xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision 8c3c4172fbd442a68f7b879958acb6794236aee0)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <qdf_types.h>
21 #include <qdf_lock.h>
22 #include <qdf_net_types.h>
23 #include <qdf_lro.h>
24 #include <qdf_module.h>
25 #include <hal_hw_headers.h>
26 #include <hal_api.h>
27 #include <hif.h>
28 #include <htt.h>
29 #include <wdi_event.h>
30 #include <queue.h>
31 #include "dp_types.h"
32 #include "dp_internal.h"
33 #include "dp_tx.h"
34 #include "dp_tx_desc.h"
35 #include "dp_rx.h"
36 #ifdef DP_RATETABLE_SUPPORT
37 #include "dp_ratetable.h"
38 #endif
39 #include <cdp_txrx_handle.h>
40 #include <wlan_cfg.h>
41 #include <wlan_utility.h>
42 #include "cdp_txrx_cmn_struct.h"
43 #include "cdp_txrx_stats_struct.h"
44 #include "cdp_txrx_cmn_reg.h"
45 #include <qdf_util.h>
46 #include "dp_peer.h"
47 #include "htt_stats.h"
48 #include "dp_htt.h"
49 #ifdef WLAN_SUPPORT_RX_FISA
50 #include <dp_fisa_rx.h>
51 #endif
52 #include "htt_ppdu_stats.h"
53 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
54 #include "cfg_ucfg_api.h"
55 
56 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
57 #include "cdp_txrx_flow_ctrl_v2.h"
58 #else
59 
60 static inline void
61 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
62 {
63 	return;
64 }
65 #endif
66 #ifdef WIFI_MONITOR_SUPPORT
67 #include <dp_mon.h>
68 #endif
69 #include "dp_ipa.h"
70 #ifdef FEATURE_WDS
71 #include "dp_txrx_wds.h"
72 #endif
73 #ifdef WLAN_SUPPORT_MSCS
74 #include "dp_mscs.h"
75 #endif
76 #ifdef WLAN_SUPPORT_MESH_LATENCY
77 #include "dp_mesh_latency.h"
78 #endif
79 #ifdef ATH_SUPPORT_IQUE
80 #include "dp_txrx_me.h"
81 #endif
82 #if defined(DP_CON_MON)
83 #ifndef REMOVE_PKT_LOG
84 #include <pktlog_ac_api.h>
85 #include <pktlog_ac.h>
86 #endif
87 #endif
88 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
89 #include <dp_swlm.h>
90 #endif
91 
92 #ifdef WLAN_FEATURE_STATS_EXT
93 #define INIT_RX_HW_STATS_LOCK(_soc) \
94 	qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
95 #define DEINIT_RX_HW_STATS_LOCK(_soc) \
96 	qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
97 #else
98 #define INIT_RX_HW_STATS_LOCK(_soc)  /* no op */
99 #define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
100 #endif
101 
102 #if defined(DP_PEER_EXTENDED_API) || defined(WLAN_DP_PENDING_MEM_FLUSH)
103 #define SET_PEER_REF_CNT_ONE(_peer) \
104 	qdf_atomic_set(&(_peer)->ref_cnt, 1)
105 #else
106 #define SET_PEER_REF_CNT_ONE(_peer)
107 #endif
108 
109 #ifdef WLAN_SYSFS_DP_STATS
110 /* sysfs event wait time for firmware stat request unit millseconds */
111 #define WLAN_SYSFS_STAT_REQ_WAIT_MS 3000
112 #endif
113 
114 #ifdef QCA_DP_ENABLE_TX_COMP_RING4
115 #define TXCOMP_RING4_NUM 3
116 #else
117 #define TXCOMP_RING4_NUM WBM2SW_TXCOMP_RING4_NUM
118 #endif
119 
120 #ifdef WLAN_MCAST_MLO
121 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) \
122 		HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
123 #else
124 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) \
125 		HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
126 #endif
127 
128 QDF_COMPILE_TIME_ASSERT(max_rx_rings_check,
129 			MAX_REO_DEST_RINGS == CDP_MAX_RX_RINGS);
130 
131 QDF_COMPILE_TIME_ASSERT(max_tx_rings_check,
132 			MAX_TCL_DATA_RINGS == CDP_MAX_TX_COMP_RINGS);
133 
134 #define dp_init_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_INIT, params)
135 #define dp_init_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_INIT, params)
136 #define dp_init_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_INIT, params)
137 #define dp_init_info(params...) \
138 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_INIT, ## params)
139 #define dp_init_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_INIT, params)
140 
141 #define dp_vdev_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_VDEV, params)
142 #define dp_vdev_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_VDEV, params)
143 #define dp_vdev_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_VDEV, params)
144 #define dp_vdev_info(params...) \
145 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_VDEV, ## params)
146 #define dp_vdev_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_VDEV, params)
147 
148 void dp_configure_arch_ops(struct dp_soc *soc);
149 qdf_size_t dp_get_soc_context_size(uint16_t device_id);
150 
151 /*
152  * The max size of cdp_peer_stats_param_t is limited to 16 bytes.
153  * If the buffer size is exceeding this size limit,
154  * dp_txrx_get_peer_stats is to be used instead.
155  */
156 QDF_COMPILE_TIME_ASSERT(cdp_peer_stats_param_t_max_size,
157 			(sizeof(cdp_peer_stats_param_t) <= 16));
158 
159 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
160 /*
161  * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS
162  * also should be updated accordingly
163  */
164 QDF_COMPILE_TIME_ASSERT(num_intr_grps,
165 			HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS);
166 
167 /*
168  * HIF_EVENT_HIST_MAX should always be power of 2
169  */
170 QDF_COMPILE_TIME_ASSERT(hif_event_history_size,
171 			(HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0);
172 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
173 
174 /*
175  * If WLAN_CFG_INT_NUM_CONTEXTS is changed,
176  * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated
177  */
178 QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs,
179 			WLAN_CFG_INT_NUM_CONTEXTS_MAX >=
180 			WLAN_CFG_INT_NUM_CONTEXTS);
181 
182 static QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl);
183 static QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl);
184 
185 static void dp_pdev_srng_deinit(struct dp_pdev *pdev);
186 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev);
187 static void dp_pdev_srng_free(struct dp_pdev *pdev);
188 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev);
189 
190 static void dp_soc_srng_deinit(struct dp_soc *soc);
191 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc);
192 static void dp_soc_srng_free(struct dp_soc *soc);
193 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc);
194 
195 static void dp_soc_cfg_init(struct dp_soc *soc);
196 static void dp_soc_cfg_attach(struct dp_soc *soc);
197 
198 static inline
199 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
200 				struct cdp_pdev_attach_params *params);
201 
202 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id);
203 
204 static QDF_STATUS
205 dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
206 		   HTC_HANDLE htc_handle,
207 		   qdf_device_t qdf_osdev,
208 		   uint8_t pdev_id);
209 
210 static QDF_STATUS
211 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, int force);
212 
213 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc);
214 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc);
215 
216 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
217 		  struct hif_opaque_softc *hif_handle);
218 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
219 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc,
220 				       uint8_t pdev_id,
221 				       int force);
222 static struct dp_soc *
223 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
224 	      struct cdp_soc_attach_params *params);
225 static inline QDF_STATUS dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl,
226 					      uint8_t vdev_id,
227 					      uint8_t *peer_mac_addr,
228 					      enum cdp_peer_type peer_type);
229 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
230 				       uint8_t vdev_id,
231 				       uint8_t *peer_mac, uint32_t bitmap);
232 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
233 				bool unmap_only);
234 #ifdef ENABLE_VERBOSE_DEBUG
235 bool is_dp_verbose_debug_enabled;
236 #endif
237 
238 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
239 static bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
240 static void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
241 			   bool enable);
242 static inline void
243 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
244 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats);
245 static inline void
246 dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
247 #endif
248 
249 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
250 						uint8_t index);
251 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index);
252 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index);
253 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
254 						 uint8_t index);
255 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
256 					    enum hal_ring_type ring_type,
257 					    int ring_num);
258 
259 #define DP_INTR_POLL_TIMER_MS	5
260 
261 #define MON_VDEV_TIMER_INIT 0x1
262 #define MON_VDEV_TIMER_RUNNING 0x2
263 
264 #define DP_MCS_LENGTH (6*MAX_MCS)
265 
266 #define DP_CURR_FW_STATS_AVAIL 19
267 #define DP_HTT_DBG_EXT_STATS_MAX 256
268 #define DP_MAX_SLEEP_TIME 100
269 #ifndef QCA_WIFI_3_0_EMU
270 #define SUSPEND_DRAIN_WAIT 500
271 #else
272 #define SUSPEND_DRAIN_WAIT 3000
273 #endif
274 
275 #ifdef IPA_OFFLOAD
276 /* Exclude IPA rings from the interrupt context */
277 #define TX_RING_MASK_VAL	0xb
278 #define RX_RING_MASK_VAL	0x7
279 #else
280 #define TX_RING_MASK_VAL	0xF
281 #define RX_RING_MASK_VAL	0xF
282 #endif
283 
284 #define STR_MAXLEN	64
285 
286 #define RNG_ERR		"SRNG setup failed for"
287 
288 /**
289  * default_dscp_tid_map - Default DSCP-TID mapping
290  *
291  * DSCP        TID
292  * 000000      0
293  * 001000      1
294  * 010000      2
295  * 011000      3
296  * 100000      4
297  * 101000      5
298  * 110000      6
299  * 111000      7
300  */
301 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
302 	0, 0, 0, 0, 0, 0, 0, 0,
303 	1, 1, 1, 1, 1, 1, 1, 1,
304 	2, 2, 2, 2, 2, 2, 2, 2,
305 	3, 3, 3, 3, 3, 3, 3, 3,
306 	4, 4, 4, 4, 4, 4, 4, 4,
307 	5, 5, 5, 5, 5, 5, 5, 5,
308 	6, 6, 6, 6, 6, 6, 6, 6,
309 	7, 7, 7, 7, 7, 7, 7, 7,
310 };
311 
312 /**
313  * default_pcp_tid_map - Default PCP-TID mapping
314  *
315  * PCP     TID
316  * 000      0
317  * 001      1
318  * 010      2
319  * 011      3
320  * 100      4
321  * 101      5
322  * 110      6
323  * 111      7
324  */
325 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
326 	0, 1, 2, 3, 4, 5, 6, 7,
327 };
328 
329 /**
330  * @brief Cpu to tx ring map
331  */
332 uint8_t
333 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
334 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
335 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
336 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
337 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
338 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
339 #ifdef WLAN_TX_PKT_CAPTURE_ENH
340 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
341 #endif
342 };
343 
344 qdf_export_symbol(dp_cpu_ring_map);
345 
346 /**
347  * @brief Select the type of statistics
348  */
349 enum dp_stats_type {
350 	STATS_FW = 0,
351 	STATS_HOST = 1,
352 	STATS_TYPE_MAX = 2,
353 };
354 
355 /**
356  * @brief General Firmware statistics options
357  *
358  */
359 enum dp_fw_stats {
360 	TXRX_FW_STATS_INVALID	= -1,
361 };
362 
363 /**
364  * dp_stats_mapping_table - Firmware and Host statistics
365  * currently supported
366  */
367 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
368 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
369 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
370 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
371 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
372 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
373 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
374 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
375 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
376 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
377 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
378 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
379 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
380 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
381 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
382 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
383 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
384 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
385 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
386 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
387 	/* Last ENUM for HTT FW STATS */
388 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
389 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
390 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
391 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
392 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
393 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
394 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
395 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
396 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
397 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
398 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
399 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
400 	{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
401 	{TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS},
402 	{TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS},
403 	{TXRX_FW_STATS_INVALID, TXRX_SOC_REO_HW_DESC_DUMP},
404 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, TXRX_HOST_STATS_INVALID}
405 };
406 
407 /* MCL specific functions */
408 #if defined(DP_CON_MON)
409 
410 #ifdef DP_CON_MON_MSI_ENABLED
411 /**
412  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
413  * @soc: pointer to dp_soc handle
414  * @intr_ctx_num: interrupt context number for which mon mask is needed
415  *
416  * For MCL, monitor mode rings are being processed in timer contexts (polled).
417  * This function is returning 0, since in interrupt mode(softirq based RX),
418  * we donot want to process monitor mode rings in a softirq.
419  *
420  * So, in case packet log is enabled for SAP/STA/P2P modes,
421  * regular interrupt processing will not process monitor mode rings. It would be
422  * done in a separate timer context.
423  *
424  * Return: 0
425  */
426 static inline uint32_t
427 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
428 {
429 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
430 }
431 #else
432 /**
433  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
434  * @soc: pointer to dp_soc handle
435  * @intr_ctx_num: interrupt context number for which mon mask is needed
436  *
437  * For MCL, monitor mode rings are being processed in timer contexts (polled).
438  * This function is returning 0, since in interrupt mode(softirq based RX),
439  * we donot want to process monitor mode rings in a softirq.
440  *
441  * So, in case packet log is enabled for SAP/STA/P2P modes,
442  * regular interrupt processing will not process monitor mode rings. It would be
443  * done in a separate timer context.
444  *
445  * Return: 0
446  */
447 static inline uint32_t
448 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
449 {
450 	return 0;
451 }
452 #endif
453 
454 /**
455  * dp_get_num_rx_contexts() - get number of RX contexts
456  * @soc_hdl: cdp opaque soc handle
457  *
458  * Return: number of RX contexts
459  */
460 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
461 {
462 	int i;
463 	int num_rx_contexts = 0;
464 
465 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
466 
467 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
468 		if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i))
469 			num_rx_contexts++;
470 
471 	return num_rx_contexts;
472 }
473 
474 #else
475 
476 /**
477  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
478  * @soc: pointer to dp_soc handle
479  * @intr_ctx_num: interrupt context number for which mon mask is needed
480  *
481  * Return: mon mask value
482  */
483 static inline
484 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
485 {
486 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
487 }
488 
489 /**
490  * dp_soc_reset_mon_intr_mask() - reset mon intr mask
491  * @soc: pointer to dp_soc handle
492  *
493  * Return:
494  */
495 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc)
496 {
497 	int i;
498 
499 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
500 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
501 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
502 	}
503 }
504 
505 qdf_export_symbol(dp_soc_reset_mon_intr_mask);
506 
507 /*
508  * dp_service_lmac_rings()- timer to reap lmac rings
509  * @arg: SoC Handle
510  *
511  * Return:
512  *
513  */
514 static void dp_service_lmac_rings(void *arg)
515 {
516 	struct dp_soc *soc = (struct dp_soc *)arg;
517 	int ring = 0, i;
518 	struct dp_pdev *pdev = NULL;
519 	union dp_rx_desc_list_elem_t *desc_list = NULL;
520 	union dp_rx_desc_list_elem_t *tail = NULL;
521 
522 	/* Process LMAC interrupts */
523 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
524 		int mac_for_pdev = ring;
525 		struct dp_srng *rx_refill_buf_ring;
526 
527 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
528 		if (!pdev)
529 			continue;
530 
531 		rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev];
532 
533 		dp_monitor_process(soc, NULL, mac_for_pdev,
534 				   QCA_NAPI_BUDGET);
535 
536 		for (i = 0;
537 		     i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
538 			dp_rxdma_err_process(&soc->intr_ctx[i], soc,
539 					     mac_for_pdev,
540 					     QCA_NAPI_BUDGET);
541 
542 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF,
543 						  mac_for_pdev))
544 			dp_rx_buffers_replenish(soc, mac_for_pdev,
545 						rx_refill_buf_ring,
546 						&soc->rx_desc_buf[mac_for_pdev],
547 						0, &desc_list, &tail);
548 	}
549 
550 	qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
551 }
552 
553 #endif
554 
555 #ifdef FEATURE_MEC
556 void dp_peer_mec_flush_entries(struct dp_soc *soc)
557 {
558 	unsigned int index;
559 	struct dp_mec_entry *mecentry, *mecentry_next;
560 
561 	TAILQ_HEAD(, dp_mec_entry) free_list;
562 	TAILQ_INIT(&free_list);
563 
564 	if (!soc->mec_hash.mask)
565 		return;
566 
567 	if (!soc->mec_hash.bins)
568 		return;
569 
570 	if (!qdf_atomic_read(&soc->mec_cnt))
571 		return;
572 
573 	qdf_spin_lock_bh(&soc->mec_lock);
574 	for (index = 0; index <= soc->mec_hash.mask; index++) {
575 		if (!TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
576 			TAILQ_FOREACH_SAFE(mecentry, &soc->mec_hash.bins[index],
577 					   hash_list_elem, mecentry_next) {
578 			    dp_peer_mec_detach_entry(soc, mecentry, &free_list);
579 			}
580 		}
581 	}
582 	qdf_spin_unlock_bh(&soc->mec_lock);
583 
584 	dp_peer_mec_free_list(soc, &free_list);
585 }
586 
587 /**
588  * dp_print_mec_entries() - Dump MEC entries in table
589  * @soc: Datapath soc handle
590  *
591  * Return: none
592  */
593 static void dp_print_mec_stats(struct dp_soc *soc)
594 {
595 	int i;
596 	uint32_t index;
597 	struct dp_mec_entry *mecentry = NULL, *mec_list;
598 	uint32_t num_entries = 0;
599 
600 	DP_PRINT_STATS("MEC Stats:");
601 	DP_PRINT_STATS("   Entries Added   = %d", soc->stats.mec.added);
602 	DP_PRINT_STATS("   Entries Deleted = %d", soc->stats.mec.deleted);
603 
604 	if (!qdf_atomic_read(&soc->mec_cnt))
605 		return;
606 
607 	mec_list = qdf_mem_malloc(sizeof(*mecentry) * DP_PEER_MAX_MEC_ENTRY);
608 	if (!mec_list) {
609 		dp_peer_warn("%pK: failed to allocate mec_list", soc);
610 		return;
611 	}
612 
613 	DP_PRINT_STATS("MEC Table:");
614 	for (index = 0; index <= soc->mec_hash.mask; index++) {
615 		qdf_spin_lock_bh(&soc->mec_lock);
616 		if (TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
617 			qdf_spin_unlock_bh(&soc->mec_lock);
618 			continue;
619 		}
620 
621 		TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index],
622 			      hash_list_elem) {
623 			qdf_mem_copy(&mec_list[num_entries], mecentry,
624 				     sizeof(*mecentry));
625 			num_entries++;
626 		}
627 		qdf_spin_unlock_bh(&soc->mec_lock);
628 	}
629 
630 	if (!num_entries) {
631 		qdf_mem_free(mec_list);
632 		return;
633 	}
634 
635 	for (i = 0; i < num_entries; i++) {
636 		DP_PRINT_STATS("%6d mac_addr = " QDF_MAC_ADDR_FMT
637 			       " is_active = %d pdev_id = %d vdev_id = %d",
638 			       i,
639 			       QDF_MAC_ADDR_REF(mec_list[i].mac_addr.raw),
640 			       mec_list[i].is_active,
641 			       mec_list[i].pdev_id,
642 			       mec_list[i].vdev_id);
643 	}
644 	qdf_mem_free(mec_list);
645 }
646 #else
647 static void dp_print_mec_stats(struct dp_soc *soc)
648 {
649 }
650 #endif
651 
652 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
653 				 uint8_t vdev_id,
654 				 uint8_t *peer_mac,
655 				 uint8_t *mac_addr,
656 				 enum cdp_txrx_ast_entry_type type,
657 				 uint32_t flags)
658 {
659 	int ret = -1;
660 	QDF_STATUS status = QDF_STATUS_SUCCESS;
661 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
662 						       peer_mac, 0, vdev_id,
663 						       DP_MOD_ID_CDP);
664 
665 	if (!peer) {
666 		dp_peer_debug("Peer is NULL!");
667 		return ret;
668 	}
669 
670 	status = dp_peer_add_ast((struct dp_soc *)soc_hdl,
671 				 peer,
672 				 mac_addr,
673 				 type,
674 				 flags);
675 	if ((status == QDF_STATUS_SUCCESS) ||
676 	    (status == QDF_STATUS_E_ALREADY) ||
677 	    (status == QDF_STATUS_E_AGAIN))
678 		ret = 0;
679 
680 	dp_hmwds_ast_add_notify(peer, mac_addr,
681 				type, status, false);
682 
683 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
684 
685 	return ret;
686 }
687 
688 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
689 						uint8_t vdev_id,
690 						uint8_t *peer_mac,
691 						uint8_t *wds_macaddr,
692 						uint32_t flags)
693 {
694 	int status = -1;
695 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
696 	struct dp_ast_entry  *ast_entry = NULL;
697 	struct dp_peer *peer;
698 
699 	if (soc->ast_offload_support)
700 		return status;
701 
702 	peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
703 				      peer_mac, 0, vdev_id,
704 				      DP_MOD_ID_CDP);
705 
706 	if (!peer) {
707 		dp_peer_debug("Peer is NULL!");
708 		return status;
709 	}
710 
711 	qdf_spin_lock_bh(&soc->ast_lock);
712 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
713 						    peer->vdev->pdev->pdev_id);
714 
715 	if (ast_entry) {
716 		status = dp_peer_update_ast(soc,
717 					    peer,
718 					    ast_entry, flags);
719 	}
720 	qdf_spin_unlock_bh(&soc->ast_lock);
721 
722 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
723 
724 	return status;
725 }
726 
727 /*
728  * dp_peer_reset_ast_entries() - Deletes all HMWDS entries for a peer
729  * @soc_handle:		Datapath SOC handle
730  * @peer:		DP peer
731  * @arg:		callback argument
732  *
733  * Return: None
734  */
735 static void
736 dp_peer_reset_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
737 {
738 	struct dp_ast_entry *ast_entry = NULL;
739 	struct dp_ast_entry *tmp_ast_entry;
740 
741 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
742 		if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
743 		    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
744 			dp_peer_del_ast(soc, ast_entry);
745 	}
746 }
747 
748 /*
749  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
750  * @soc_handle:		Datapath SOC handle
751  * @wds_macaddr:	WDS entry MAC Address
752  * @peer_macaddr:	WDS entry MAC Address
753  * @vdev_id:		id of vdev handle
754  * Return: QDF_STATUS
755  */
756 static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
757 					 uint8_t *wds_macaddr,
758 					 uint8_t *peer_mac_addr,
759 					 uint8_t vdev_id)
760 {
761 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
762 	struct dp_ast_entry *ast_entry = NULL;
763 	struct dp_peer *peer;
764 	struct dp_pdev *pdev;
765 	struct dp_vdev *vdev;
766 
767 	if (soc->ast_offload_support)
768 		return QDF_STATUS_E_FAILURE;
769 
770 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
771 
772 	if (!vdev)
773 		return QDF_STATUS_E_FAILURE;
774 
775 	pdev = vdev->pdev;
776 
777 	if (peer_mac_addr) {
778 		peer = dp_peer_find_hash_find(soc, peer_mac_addr,
779 					      0, vdev->vdev_id,
780 					      DP_MOD_ID_CDP);
781 		if (!peer) {
782 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
783 			return QDF_STATUS_E_FAILURE;
784 		}
785 
786 		qdf_spin_lock_bh(&soc->ast_lock);
787 		dp_peer_reset_ast_entries(soc, peer, NULL);
788 		qdf_spin_unlock_bh(&soc->ast_lock);
789 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
790 	} else if (wds_macaddr) {
791 		qdf_spin_lock_bh(&soc->ast_lock);
792 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
793 							    pdev->pdev_id);
794 
795 		if (ast_entry) {
796 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
797 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
798 				dp_peer_del_ast(soc, ast_entry);
799 		}
800 		qdf_spin_unlock_bh(&soc->ast_lock);
801 	}
802 
803 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
804 	return QDF_STATUS_SUCCESS;
805 }
806 
807 /*
808  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
809  * @soc:		Datapath SOC handle
810  * @vdev_id:		id of vdev object
811  *
812  * Return: QDF_STATUS
813  */
814 static QDF_STATUS
815 dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
816 			     uint8_t vdev_id)
817 {
818 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
819 
820 	if (soc->ast_offload_support)
821 		return QDF_STATUS_SUCCESS;
822 
823 	qdf_spin_lock_bh(&soc->ast_lock);
824 
825 	dp_soc_iterate_peer(soc, dp_peer_reset_ast_entries, NULL,
826 			    DP_MOD_ID_CDP);
827 	qdf_spin_unlock_bh(&soc->ast_lock);
828 
829 	return QDF_STATUS_SUCCESS;
830 }
831 
832 /*
833  * dp_peer_flush_ast_entries() - Delete all wds and hmwds ast entries of a peer
834  * @soc:		Datapath SOC
835  * @peer:		Datapath peer
836  * @arg:		arg to callback
837  *
838  * Return: None
839  */
840 static void
841 dp_peer_flush_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
842 {
843 	struct dp_ast_entry *ase = NULL;
844 	struct dp_ast_entry *temp_ase;
845 
846 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
847 		if ((ase->type ==
848 			CDP_TXRX_AST_TYPE_STATIC) ||
849 			(ase->type ==
850 			 CDP_TXRX_AST_TYPE_SELF) ||
851 			(ase->type ==
852 			 CDP_TXRX_AST_TYPE_STA_BSS))
853 			continue;
854 		dp_peer_del_ast(soc, ase);
855 	}
856 }
857 
858 /*
859  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
860  * @soc:		Datapath SOC handle
861  *
862  * Return: None
863  */
864 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
865 {
866 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
867 
868 	qdf_spin_lock_bh(&soc->ast_lock);
869 
870 	dp_soc_iterate_peer(soc, dp_peer_flush_ast_entries, NULL,
871 			    DP_MOD_ID_CDP);
872 
873 	qdf_spin_unlock_bh(&soc->ast_lock);
874 	dp_peer_mec_flush_entries(soc);
875 }
876 
877 /**
878  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
879  *                                       and return ast entry information
880  *                                       of first ast entry found in the
881  *                                       table with given mac address
882  *
883  * @soc : data path soc handle
884  * @ast_mac_addr : AST entry mac address
885  * @ast_entry_info : ast entry information
886  *
887  * return : true if ast entry found with ast_mac_addr
888  *          false if ast entry not found
889  */
890 static bool dp_peer_get_ast_info_by_soc_wifi3
891 	(struct cdp_soc_t *soc_hdl,
892 	 uint8_t *ast_mac_addr,
893 	 struct cdp_ast_entry_info *ast_entry_info)
894 {
895 	struct dp_ast_entry *ast_entry = NULL;
896 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
897 	struct dp_peer *peer = NULL;
898 
899 	if (soc->ast_offload_support)
900 		return false;
901 
902 	qdf_spin_lock_bh(&soc->ast_lock);
903 
904 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
905 	if ((!ast_entry) ||
906 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
907 		qdf_spin_unlock_bh(&soc->ast_lock);
908 		return false;
909 	}
910 
911 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
912 				     DP_MOD_ID_AST);
913 	if (!peer) {
914 		qdf_spin_unlock_bh(&soc->ast_lock);
915 		return false;
916 	}
917 
918 	ast_entry_info->type = ast_entry->type;
919 	ast_entry_info->pdev_id = ast_entry->pdev_id;
920 	ast_entry_info->vdev_id = ast_entry->vdev_id;
921 	ast_entry_info->peer_id = ast_entry->peer_id;
922 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
923 		     &peer->mac_addr.raw[0],
924 		     QDF_MAC_ADDR_SIZE);
925 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
926 	qdf_spin_unlock_bh(&soc->ast_lock);
927 	return true;
928 }
929 
930 /**
931  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
932  *                                          and return ast entry information
933  *                                          if mac address and pdev_id matches
934  *
935  * @soc : data path soc handle
936  * @ast_mac_addr : AST entry mac address
937  * @pdev_id : pdev_id
938  * @ast_entry_info : ast entry information
939  *
940  * return : true if ast entry found with ast_mac_addr
941  *          false if ast entry not found
942  */
943 static bool dp_peer_get_ast_info_by_pdevid_wifi3
944 		(struct cdp_soc_t *soc_hdl,
945 		 uint8_t *ast_mac_addr,
946 		 uint8_t pdev_id,
947 		 struct cdp_ast_entry_info *ast_entry_info)
948 {
949 	struct dp_ast_entry *ast_entry;
950 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
951 	struct dp_peer *peer = NULL;
952 
953 	if (soc->ast_offload_support)
954 		return false;
955 
956 	qdf_spin_lock_bh(&soc->ast_lock);
957 
958 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr,
959 						    pdev_id);
960 
961 	if ((!ast_entry) ||
962 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
963 		qdf_spin_unlock_bh(&soc->ast_lock);
964 		return false;
965 	}
966 
967 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
968 				     DP_MOD_ID_AST);
969 	if (!peer) {
970 		qdf_spin_unlock_bh(&soc->ast_lock);
971 		return false;
972 	}
973 
974 	ast_entry_info->type = ast_entry->type;
975 	ast_entry_info->pdev_id = ast_entry->pdev_id;
976 	ast_entry_info->vdev_id = ast_entry->vdev_id;
977 	ast_entry_info->peer_id = ast_entry->peer_id;
978 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
979 		     &peer->mac_addr.raw[0],
980 		     QDF_MAC_ADDR_SIZE);
981 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
982 	qdf_spin_unlock_bh(&soc->ast_lock);
983 	return true;
984 }
985 
986 /**
987  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
988  *                            with given mac address
989  *
990  * @soc : data path soc handle
991  * @ast_mac_addr : AST entry mac address
992  * @callback : callback function to called on ast delete response from FW
993  * @cookie : argument to be passed to callback
994  *
995  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
996  *          is sent
997  *          QDF_STATUS_E_INVAL false if ast entry not found
998  */
999 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
1000 					       uint8_t *mac_addr,
1001 					       txrx_ast_free_cb callback,
1002 					       void *cookie)
1003 
1004 {
1005 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1006 	struct dp_ast_entry *ast_entry = NULL;
1007 	txrx_ast_free_cb cb = NULL;
1008 	void *arg = NULL;
1009 
1010 	if (soc->ast_offload_support)
1011 		return -QDF_STATUS_E_INVAL;
1012 
1013 	qdf_spin_lock_bh(&soc->ast_lock);
1014 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1015 	if (!ast_entry) {
1016 		qdf_spin_unlock_bh(&soc->ast_lock);
1017 		return -QDF_STATUS_E_INVAL;
1018 	}
1019 
1020 	if (ast_entry->callback) {
1021 		cb = ast_entry->callback;
1022 		arg = ast_entry->cookie;
1023 	}
1024 
1025 	ast_entry->callback = callback;
1026 	ast_entry->cookie = cookie;
1027 
1028 	/*
1029 	 * if delete_in_progress is set AST delete is sent to target
1030 	 * and host is waiting for response should not send delete
1031 	 * again
1032 	 */
1033 	if (!ast_entry->delete_in_progress)
1034 		dp_peer_del_ast(soc, ast_entry);
1035 
1036 	qdf_spin_unlock_bh(&soc->ast_lock);
1037 	if (cb) {
1038 		cb(soc->ctrl_psoc,
1039 		   dp_soc_to_cdp_soc(soc),
1040 		   arg,
1041 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1042 	}
1043 	return QDF_STATUS_SUCCESS;
1044 }
1045 
1046 /**
1047  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
1048  *                                   table if mac address and pdev_id matches
1049  *
1050  * @soc : data path soc handle
1051  * @ast_mac_addr : AST entry mac address
1052  * @pdev_id : pdev id
1053  * @callback : callback function to called on ast delete response from FW
1054  * @cookie : argument to be passed to callback
1055  *
1056  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
1057  *          is sent
1058  *          QDF_STATUS_E_INVAL false if ast entry not found
1059  */
1060 
1061 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
1062 						uint8_t *mac_addr,
1063 						uint8_t pdev_id,
1064 						txrx_ast_free_cb callback,
1065 						void *cookie)
1066 
1067 {
1068 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1069 	struct dp_ast_entry *ast_entry;
1070 	txrx_ast_free_cb cb = NULL;
1071 	void *arg = NULL;
1072 
1073 	if (soc->ast_offload_support)
1074 		return -QDF_STATUS_E_INVAL;
1075 
1076 	qdf_spin_lock_bh(&soc->ast_lock);
1077 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
1078 
1079 	if (!ast_entry) {
1080 		qdf_spin_unlock_bh(&soc->ast_lock);
1081 		return -QDF_STATUS_E_INVAL;
1082 	}
1083 
1084 	if (ast_entry->callback) {
1085 		cb = ast_entry->callback;
1086 		arg = ast_entry->cookie;
1087 	}
1088 
1089 	ast_entry->callback = callback;
1090 	ast_entry->cookie = cookie;
1091 
1092 	/*
1093 	 * if delete_in_progress is set AST delete is sent to target
1094 	 * and host is waiting for response should not sent delete
1095 	 * again
1096 	 */
1097 	if (!ast_entry->delete_in_progress)
1098 		dp_peer_del_ast(soc, ast_entry);
1099 
1100 	qdf_spin_unlock_bh(&soc->ast_lock);
1101 
1102 	if (cb) {
1103 		cb(soc->ctrl_psoc,
1104 		   dp_soc_to_cdp_soc(soc),
1105 		   arg,
1106 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1107 	}
1108 	return QDF_STATUS_SUCCESS;
1109 }
1110 
1111 /**
1112  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
1113  * @ring_num: ring num of the ring being queried
1114  * @grp_mask: the grp_mask array for the ring type in question.
1115  *
1116  * The grp_mask array is indexed by group number and the bit fields correspond
1117  * to ring numbers.  We are finding which interrupt group a ring belongs to.
1118  *
1119  * Return: the index in the grp_mask array with the ring number.
1120  * -QDF_STATUS_E_NOENT if no entry is found
1121  */
1122 static int dp_srng_find_ring_in_mask(int ring_num, uint8_t *grp_mask)
1123 {
1124 	int ext_group_num;
1125 	uint8_t mask = 1 << ring_num;
1126 
1127 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
1128 	     ext_group_num++) {
1129 		if (mask & grp_mask[ext_group_num])
1130 			return ext_group_num;
1131 	}
1132 
1133 	return -QDF_STATUS_E_NOENT;
1134 }
1135 
1136 /**
1137  * dp_is_msi_group_number_invalid() - check msi_group_number valid or not
1138  * @msi_group_number: MSI group number.
1139  * @msi_data_count: MSI data count.
1140  *
1141  * Return: true if msi_group_number is invalid.
1142  */
1143 #ifdef WLAN_ONE_MSI_VECTOR
1144 static bool dp_is_msi_group_number_invalid(int msi_group_number,
1145 					   int msi_data_count)
1146 {
1147 	return false;
1148 }
1149 #else
1150 static bool dp_is_msi_group_number_invalid(int msi_group_number,
1151 					   int msi_data_count)
1152 {
1153 	return msi_group_number > msi_data_count;
1154 }
1155 #endif
1156 
1157 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1158 /**
1159  * dp_is_reo_ring_num_in_nf_grp1() - Check if the current reo ring is part of
1160  *				rx_near_full_grp1 mask
1161  * @soc: Datapath SoC Handle
1162  * @ring_num: REO ring number
1163  *
1164  * Return: 1 if the ring_num belongs to reo_nf_grp1,
1165  *	   0, otherwise.
1166  */
1167 static inline int
1168 dp_is_reo_ring_num_in_nf_grp1(struct dp_soc *soc, int ring_num)
1169 {
1170 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_1 & (1 << ring_num));
1171 }
1172 
1173 /**
1174  * dp_is_reo_ring_num_in_nf_grp2() - Check if the current reo ring is part of
1175  *				rx_near_full_grp2 mask
1176  * @soc: Datapath SoC Handle
1177  * @ring_num: REO ring number
1178  *
1179  * Return: 1 if the ring_num belongs to reo_nf_grp2,
1180  *	   0, otherwise.
1181  */
1182 static inline int
1183 dp_is_reo_ring_num_in_nf_grp2(struct dp_soc *soc, int ring_num)
1184 {
1185 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_2 & (1 << ring_num));
1186 }
1187 
1188 /**
1189  * dp_srng_get_near_full_irq_mask() - Get near-full irq mask for a particular
1190  *				ring type and number
1191  * @soc: Datapath SoC handle
1192  * @ring_type: SRNG type
1193  * @ring_num: ring num
1194  *
1195  * Return: near ful irq mask pointer
1196  */
1197 static inline
1198 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
1199 					enum hal_ring_type ring_type,
1200 					int ring_num)
1201 {
1202 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
1203 	uint8_t wbm2_sw_rx_rel_ring_id;
1204 	uint8_t *nf_irq_mask = NULL;
1205 
1206 	switch (ring_type) {
1207 	case WBM2SW_RELEASE:
1208 		wbm2_sw_rx_rel_ring_id =
1209 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
1210 		if (ring_num != wbm2_sw_rx_rel_ring_id) {
1211 			nf_irq_mask = &soc->wlan_cfg_ctx->
1212 					int_tx_ring_near_full_irq_mask[0];
1213 		}
1214 		break;
1215 	case REO_DST:
1216 		if (dp_is_reo_ring_num_in_nf_grp1(soc, ring_num))
1217 			nf_irq_mask =
1218 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_1_mask[0];
1219 		else if (dp_is_reo_ring_num_in_nf_grp2(soc, ring_num))
1220 			nf_irq_mask =
1221 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_2_mask[0];
1222 		else
1223 			qdf_assert(0);
1224 		break;
1225 	default:
1226 		break;
1227 	}
1228 
1229 	return nf_irq_mask;
1230 }
1231 
1232 /**
1233  * dp_srng_set_msi2_ring_params() - Set the msi2 addr/data in the ring params
1234  * @soc: Datapath SoC handle
1235  * @ring_params: srng params handle
1236  * @msi2_addr: MSI2 addr to be set for the SRNG
1237  * @msi2_data: MSI2 data to be set for the SRNG
1238  *
1239  * Return: None
1240  */
1241 static inline
1242 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
1243 				  struct hal_srng_params *ring_params,
1244 				  qdf_dma_addr_t msi2_addr,
1245 				  uint32_t msi2_data)
1246 {
1247 	ring_params->msi2_addr = msi2_addr;
1248 	ring_params->msi2_data = msi2_data;
1249 }
1250 
1251 /**
1252  * dp_srng_msi2_setup() - Setup MSI2 details for near full IRQ of an SRNG
1253  * @soc: Datapath SoC handle
1254  * @ring_params: ring_params for SRNG
1255  * @ring_type: SENG type
1256  * @ring_num: ring number for the SRNG
1257  * @nf_msi_grp_num: near full msi group number
1258  *
1259  * Return: None
1260  */
1261 static inline void
1262 dp_srng_msi2_setup(struct dp_soc *soc,
1263 		   struct hal_srng_params *ring_params,
1264 		   int ring_type, int ring_num, int nf_msi_grp_num)
1265 {
1266 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1267 	int msi_data_count, ret;
1268 
1269 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1270 					  &msi_data_count, &msi_data_start,
1271 					  &msi_irq_start);
1272 	if (ret)
1273 		return;
1274 
1275 	if (nf_msi_grp_num < 0) {
1276 		dp_init_info("%pK: ring near full IRQ not part of an ext_group; ring_type: %d,ring_num %d",
1277 			     soc, ring_type, ring_num);
1278 		ring_params->msi2_addr = 0;
1279 		ring_params->msi2_data = 0;
1280 		return;
1281 	}
1282 
1283 	if (dp_is_msi_group_number_invalid(nf_msi_grp_num, msi_data_count)) {
1284 		dp_init_warn("%pK: 2 msi_groups will share an msi for near full IRQ; msi_group_num %d",
1285 			     soc, nf_msi_grp_num);
1286 		QDF_ASSERT(0);
1287 	}
1288 
1289 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1290 
1291 	ring_params->nf_irq_support = 1;
1292 	ring_params->msi2_addr = addr_low;
1293 	ring_params->msi2_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1294 	ring_params->msi2_data = (nf_msi_grp_num % msi_data_count)
1295 		+ msi_data_start;
1296 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1297 }
1298 
1299 /* Percentage of ring entries considered as nearly full */
1300 #define DP_NF_HIGH_THRESH_PERCENTAGE	75
1301 /* Percentage of ring entries considered as critically full */
1302 #define DP_NF_CRIT_THRESH_PERCENTAGE	90
1303 /* Percentage of ring entries considered as safe threshold */
1304 #define DP_NF_SAFE_THRESH_PERCENTAGE	50
1305 
1306 /**
1307  * dp_srng_configure_nf_interrupt_thresholds() - Configure the thresholds for
1308  *			near full irq
1309  * @soc: Datapath SoC handle
1310  * @ring_params: ring params for SRNG
1311  * @ring_type: ring type
1312  */
1313 static inline void
1314 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
1315 					  struct hal_srng_params *ring_params,
1316 					  int ring_type)
1317 {
1318 	if (ring_params->nf_irq_support) {
1319 		ring_params->high_thresh = (ring_params->num_entries *
1320 					    DP_NF_HIGH_THRESH_PERCENTAGE) / 100;
1321 		ring_params->crit_thresh = (ring_params->num_entries *
1322 					    DP_NF_CRIT_THRESH_PERCENTAGE) / 100;
1323 		ring_params->safe_thresh = (ring_params->num_entries *
1324 					    DP_NF_SAFE_THRESH_PERCENTAGE) /100;
1325 	}
1326 }
1327 
1328 /**
1329  * dp_srng_set_nf_thresholds() - Set the near full thresholds to srng data
1330  *			structure from the ring params
1331  * @soc: Datapath SoC handle
1332  * @srng: SRNG handle
1333  * @ring_params: ring params for a SRNG
1334  *
1335  * Return: None
1336  */
1337 static inline void
1338 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
1339 			  struct hal_srng_params *ring_params)
1340 {
1341 	srng->crit_thresh = ring_params->crit_thresh;
1342 	srng->safe_thresh = ring_params->safe_thresh;
1343 }
1344 
1345 #else
1346 static inline
1347 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
1348 					enum hal_ring_type ring_type,
1349 					int ring_num)
1350 {
1351 	return NULL;
1352 }
1353 
1354 static inline
1355 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
1356 				  struct hal_srng_params *ring_params,
1357 				  qdf_dma_addr_t msi2_addr,
1358 				  uint32_t msi2_data)
1359 {
1360 }
1361 
1362 static inline void
1363 dp_srng_msi2_setup(struct dp_soc *soc,
1364 		   struct hal_srng_params *ring_params,
1365 		   int ring_type, int ring_num, int nf_msi_grp_num)
1366 {
1367 }
1368 
1369 static inline void
1370 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
1371 					  struct hal_srng_params *ring_params,
1372 					  int ring_type)
1373 {
1374 }
1375 
1376 static inline void
1377 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
1378 			  struct hal_srng_params *ring_params)
1379 {
1380 }
1381 #endif
1382 
1383 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
1384 				       enum hal_ring_type ring_type,
1385 				       int ring_num,
1386 				       int *reg_msi_grp_num,
1387 				       bool nf_irq_support,
1388 				       int *nf_msi_grp_num)
1389 {
1390 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
1391 	uint8_t *grp_mask, *nf_irq_mask = NULL;
1392 	bool nf_irq_enabled = false;
1393 	uint8_t wbm2_sw_rx_rel_ring_id;
1394 
1395 	switch (ring_type) {
1396 	case WBM2SW_RELEASE:
1397 		wbm2_sw_rx_rel_ring_id =
1398 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
1399 		if (ring_num == wbm2_sw_rx_rel_ring_id) {
1400 			/* dp_rx_wbm_err_process - soc->rx_rel_ring */
1401 			grp_mask = &cfg_ctx->int_rx_wbm_rel_ring_mask[0];
1402 			ring_num = 0;
1403 		} else { /* dp_tx_comp_handler - soc->tx_comp_ring */
1404 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1405 			nf_irq_mask = dp_srng_get_near_full_irq_mask(soc,
1406 								     ring_type,
1407 								     ring_num);
1408 			if (nf_irq_mask)
1409 				nf_irq_enabled = true;
1410 
1411 			/*
1412 			 * Using ring 4 as 4th tx completion ring since ring 3
1413 			 * is Rx error ring
1414 			 */
1415 			if (ring_num == WBM2SW_TXCOMP_RING4_NUM)
1416 				ring_num = TXCOMP_RING4_NUM;
1417 		}
1418 	break;
1419 
1420 	case REO_EXCEPTION:
1421 		/* dp_rx_err_process - &soc->reo_exception_ring */
1422 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
1423 	break;
1424 
1425 	case REO_DST:
1426 		/* dp_rx_process - soc->reo_dest_ring */
1427 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1428 		nf_irq_mask = dp_srng_get_near_full_irq_mask(soc, ring_type,
1429 							     ring_num);
1430 		if (nf_irq_mask)
1431 			nf_irq_enabled = true;
1432 	break;
1433 
1434 	case REO_STATUS:
1435 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
1436 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
1437 	break;
1438 
1439 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
1440 	case RXDMA_MONITOR_STATUS:
1441 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
1442 	case RXDMA_MONITOR_DST:
1443 		/* dp_mon_process */
1444 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
1445 	break;
1446 	case TX_MONITOR_DST:
1447 		/* dp_tx_mon_process */
1448 		grp_mask = &soc->wlan_cfg_ctx->int_tx_mon_ring_mask[0];
1449 	break;
1450 	case RXDMA_DST:
1451 		/* dp_rxdma_err_process */
1452 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
1453 	break;
1454 
1455 	case RXDMA_BUF:
1456 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1457 	break;
1458 
1459 	case RXDMA_MONITOR_BUF:
1460 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
1461 	break;
1462 
1463 	case TX_MONITOR_BUF:
1464 		grp_mask = &soc->wlan_cfg_ctx->int_host2txmon_ring_mask[0];
1465 	break;
1466 
1467 	case TCL_DATA:
1468 	/* CMD_CREDIT_RING is used as command in 8074 and credit in 9000 */
1469 	case TCL_CMD_CREDIT:
1470 	case REO_CMD:
1471 	case SW2WBM_RELEASE:
1472 	case WBM_IDLE_LINK:
1473 		/* normally empty SW_TO_HW rings */
1474 		return -QDF_STATUS_E_NOENT;
1475 	break;
1476 
1477 	case TCL_STATUS:
1478 	case REO_REINJECT:
1479 		/* misc unused rings */
1480 		return -QDF_STATUS_E_NOENT;
1481 	break;
1482 
1483 	case CE_SRC:
1484 	case CE_DST:
1485 	case CE_DST_STATUS:
1486 		/* CE_rings - currently handled by hif */
1487 	default:
1488 		return -QDF_STATUS_E_NOENT;
1489 	break;
1490 	}
1491 
1492 	*reg_msi_grp_num = dp_srng_find_ring_in_mask(ring_num, grp_mask);
1493 
1494 	if (nf_irq_support && nf_irq_enabled) {
1495 		*nf_msi_grp_num = dp_srng_find_ring_in_mask(ring_num,
1496 							    nf_irq_mask);
1497 	}
1498 
1499 	return QDF_STATUS_SUCCESS;
1500 }
1501 
1502 /*
1503  * dp_get_num_msi_available()- API to get number of MSIs available
1504  * @dp_soc: DP soc Handle
1505  * @interrupt_mode: Mode of interrupts
1506  *
1507  * Return: Number of MSIs available or 0 in case of integrated
1508  */
1509 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
1510 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1511 {
1512 	return 0;
1513 }
1514 #else
1515 /*
1516  * dp_get_num_msi_available()- API to get number of MSIs available
1517  * @dp_soc: DP soc Handle
1518  * @interrupt_mode: Mode of interrupts
1519  *
1520  * Return: Number of MSIs available or 0 in case of integrated
1521  */
1522 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1523 {
1524 	int msi_data_count;
1525 	int msi_data_start;
1526 	int msi_irq_start;
1527 	int ret;
1528 
1529 	if (interrupt_mode == DP_INTR_INTEGRATED) {
1530 		return 0;
1531 	} else if (interrupt_mode == DP_INTR_MSI || interrupt_mode ==
1532 		   DP_INTR_POLL) {
1533 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1534 						  &msi_data_count,
1535 						  &msi_data_start,
1536 						  &msi_irq_start);
1537 		if (ret) {
1538 			qdf_err("Unable to get DP MSI assignment %d",
1539 				interrupt_mode);
1540 			return -EINVAL;
1541 		}
1542 		return msi_data_count;
1543 	}
1544 	qdf_err("Interrupt mode invalid %d", interrupt_mode);
1545 	return -EINVAL;
1546 }
1547 #endif
1548 
1549 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
1550 			      *ring_params, int ring_type, int ring_num)
1551 {
1552 	int reg_msi_grp_num;
1553 	/*
1554 	 * nf_msi_grp_num needs to be initialized with negative value,
1555 	 * to avoid configuring near-full msi for WBM2SW3 ring
1556 	 */
1557 	int nf_msi_grp_num = -1;
1558 	int msi_data_count;
1559 	int ret;
1560 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1561 	bool nf_irq_support;
1562 
1563 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1564 					    &msi_data_count, &msi_data_start,
1565 					    &msi_irq_start);
1566 
1567 	if (ret)
1568 		return;
1569 
1570 	nf_irq_support = hal_srng_is_near_full_irq_supported(soc->hal_soc,
1571 							     ring_type,
1572 							     ring_num);
1573 	ret = dp_srng_calculate_msi_group(soc, ring_type, ring_num,
1574 					  &reg_msi_grp_num,
1575 					  nf_irq_support,
1576 					  &nf_msi_grp_num);
1577 	if (ret < 0) {
1578 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1579 			     soc, ring_type, ring_num);
1580 		ring_params->msi_addr = 0;
1581 		ring_params->msi_data = 0;
1582 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
1583 		return;
1584 	}
1585 
1586 	if (reg_msi_grp_num < 0) {
1587 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1588 			     soc, ring_type, ring_num);
1589 		ring_params->msi_addr = 0;
1590 		ring_params->msi_data = 0;
1591 		goto configure_msi2;
1592 	}
1593 
1594 	if (dp_is_msi_group_number_invalid(reg_msi_grp_num, msi_data_count)) {
1595 		dp_init_warn("%pK: 2 msi_groups will share an msi; msi_group_num %d",
1596 			     soc, reg_msi_grp_num);
1597 		QDF_ASSERT(0);
1598 	}
1599 
1600 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1601 
1602 	ring_params->msi_addr = addr_low;
1603 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1604 	ring_params->msi_data = (reg_msi_grp_num % msi_data_count)
1605 		+ msi_data_start;
1606 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1607 
1608 	dp_debug("ring type %u ring_num %u msi->data %u msi_addr %llx",
1609 		 ring_type, ring_num, ring_params->msi_data,
1610 		 (uint64_t)ring_params->msi_addr);
1611 
1612 configure_msi2:
1613 	if (!nf_irq_support) {
1614 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
1615 		return;
1616 	}
1617 
1618 	dp_srng_msi2_setup(soc, ring_params, ring_type, ring_num,
1619 			   nf_msi_grp_num);
1620 }
1621 
1622 #ifdef FEATURE_AST
1623 /**
1624  * dp_print_peer_ast_entries() - Dump AST entries of peer
1625  * @soc: Datapath soc handle
1626  * @peer: Datapath peer
1627  * @arg: argument to iterate function
1628  *
1629  * return void
1630  */
1631 static void
1632 dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1633 {
1634 	struct dp_ast_entry *ase, *tmp_ase;
1635 	uint32_t num_entries = 0;
1636 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1637 			"NONE", "STATIC", "SELF", "WDS", "HMWDS", "BSS",
1638 			"DA", "HMWDS_SEC"};
1639 
1640 	DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1641 	    DP_PRINT_STATS("%6d mac_addr = "QDF_MAC_ADDR_FMT
1642 		    " peer_mac_addr = "QDF_MAC_ADDR_FMT
1643 		    " peer_id = %u"
1644 		    " type = %s"
1645 		    " next_hop = %d"
1646 		    " is_active = %d"
1647 		    " ast_idx = %d"
1648 		    " ast_hash = %d"
1649 		    " delete_in_progress = %d"
1650 		    " pdev_id = %d"
1651 		    " vdev_id = %d",
1652 		    ++num_entries,
1653 		    QDF_MAC_ADDR_REF(ase->mac_addr.raw),
1654 		    QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1655 		    ase->peer_id,
1656 		    type[ase->type],
1657 		    ase->next_hop,
1658 		    ase->is_active,
1659 		    ase->ast_idx,
1660 		    ase->ast_hash_value,
1661 		    ase->delete_in_progress,
1662 		    ase->pdev_id,
1663 		    ase->vdev_id);
1664 	}
1665 }
1666 
1667 /**
1668  * dp_print_ast_stats() - Dump AST table contents
1669  * @soc: Datapath soc handle
1670  *
1671  * return void
1672  */
1673 void dp_print_ast_stats(struct dp_soc *soc)
1674 {
1675 	DP_PRINT_STATS("AST Stats:");
1676 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1677 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1678 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1679 	DP_PRINT_STATS("	Entries MAP ERR  = %d", soc->stats.ast.map_err);
1680 	DP_PRINT_STATS("	Entries Mismatch ERR  = %d",
1681 		       soc->stats.ast.ast_mismatch);
1682 
1683 	DP_PRINT_STATS("AST Table:");
1684 
1685 	qdf_spin_lock_bh(&soc->ast_lock);
1686 
1687 	dp_soc_iterate_peer(soc, dp_print_peer_ast_entries, NULL,
1688 			    DP_MOD_ID_GENERIC_STATS);
1689 
1690 	qdf_spin_unlock_bh(&soc->ast_lock);
1691 }
1692 #else
1693 void dp_print_ast_stats(struct dp_soc *soc)
1694 {
1695 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1696 	return;
1697 }
1698 #endif
1699 
1700 /**
1701  * dp_print_peer_info() - Dump peer info
1702  * @soc: Datapath soc handle
1703  * @peer: Datapath peer handle
1704  * @arg: argument to iter function
1705  *
1706  * return void
1707  */
1708 static void
1709 dp_print_peer_info(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1710 {
1711 	struct dp_txrx_peer *txrx_peer = NULL;
1712 
1713 	txrx_peer = dp_get_txrx_peer(peer);
1714 	if (!txrx_peer)
1715 		return;
1716 
1717 	DP_PRINT_STATS(" peer id = %d"
1718 		       " peer_mac_addr = "QDF_MAC_ADDR_FMT
1719 		       " nawds_enabled = %d"
1720 		       " bss_peer = %d"
1721 		       " wds_enabled = %d"
1722 		       " tx_cap_enabled = %d"
1723 		       " rx_cap_enabled = %d",
1724 		       peer->peer_id,
1725 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1726 		       txrx_peer->nawds_enabled,
1727 		       txrx_peer->bss_peer,
1728 		       txrx_peer->wds_enabled,
1729 		       peer->monitor_peer ?
1730 					peer->monitor_peer->tx_cap_enabled : 0,
1731 		       peer->monitor_peer ?
1732 					peer->monitor_peer->rx_cap_enabled : 0);
1733 }
1734 
1735 /**
1736  * dp_print_peer_table() - Dump all Peer stats
1737  * @vdev: Datapath Vdev handle
1738  *
1739  * return void
1740  */
1741 static void dp_print_peer_table(struct dp_vdev *vdev)
1742 {
1743 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1744 	dp_vdev_iterate_peer(vdev, dp_print_peer_info, NULL,
1745 			     DP_MOD_ID_GENERIC_STATS);
1746 }
1747 
1748 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
1749 /**
1750  * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt
1751  * threshold values from the wlan_srng_cfg table for each ring type
1752  * @soc: device handle
1753  * @ring_params: per ring specific parameters
1754  * @ring_type: Ring type
1755  * @ring_num: Ring number for a given ring type
1756  *
1757  * Fill the ring params with the interrupt threshold
1758  * configuration parameters available in the per ring type wlan_srng_cfg
1759  * table.
1760  *
1761  * Return: None
1762  */
1763 static void
1764 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1765 				       struct hal_srng_params *ring_params,
1766 				       int ring_type, int ring_num,
1767 				       int num_entries)
1768 {
1769 	uint8_t wbm2_sw_rx_rel_ring_id;
1770 
1771 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
1772 
1773 	if (ring_type == REO_DST) {
1774 		ring_params->intr_timer_thres_us =
1775 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1776 		ring_params->intr_batch_cntr_thres_entries =
1777 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1778 	} else if (ring_type == WBM2SW_RELEASE &&
1779 		   (ring_num == wbm2_sw_rx_rel_ring_id)) {
1780 		ring_params->intr_timer_thres_us =
1781 				wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1782 		ring_params->intr_batch_cntr_thres_entries =
1783 				wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1784 	} else {
1785 		ring_params->intr_timer_thres_us =
1786 				soc->wlan_srng_cfg[ring_type].timer_threshold;
1787 		ring_params->intr_batch_cntr_thres_entries =
1788 				soc->wlan_srng_cfg[ring_type].batch_count_threshold;
1789 	}
1790 	ring_params->low_threshold =
1791 			soc->wlan_srng_cfg[ring_type].low_threshold;
1792 	if (ring_params->low_threshold)
1793 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1794 
1795 	dp_srng_configure_nf_interrupt_thresholds(soc, ring_params, ring_type);
1796 }
1797 #else
1798 static void
1799 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1800 				       struct hal_srng_params *ring_params,
1801 				       int ring_type, int ring_num,
1802 				       int num_entries)
1803 {
1804 	uint8_t wbm2_sw_rx_rel_ring_id;
1805 
1806 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
1807 
1808 	if (ring_type == REO_DST) {
1809 		ring_params->intr_timer_thres_us =
1810 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1811 		ring_params->intr_batch_cntr_thres_entries =
1812 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1813 	} else if (ring_type == WBM2SW_RELEASE &&
1814 		   (ring_num < wbm2_sw_rx_rel_ring_id ||
1815 		   ring_num == WBM2SW_TXCOMP_RING4_NUM)) {
1816 		ring_params->intr_timer_thres_us =
1817 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1818 		ring_params->intr_batch_cntr_thres_entries =
1819 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1820 	} else {
1821 		ring_params->intr_timer_thres_us =
1822 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1823 		ring_params->intr_batch_cntr_thres_entries =
1824 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1825 	}
1826 
1827 	/* These rings donot require interrupt to host. Make them zero */
1828 	switch (ring_type) {
1829 	case REO_REINJECT:
1830 	case REO_CMD:
1831 	case TCL_DATA:
1832 	case TCL_CMD_CREDIT:
1833 	case TCL_STATUS:
1834 	case WBM_IDLE_LINK:
1835 	case SW2WBM_RELEASE:
1836 	case PPE2TCL:
1837 	case SW2RXDMA_NEW:
1838 		ring_params->intr_timer_thres_us = 0;
1839 		ring_params->intr_batch_cntr_thres_entries = 0;
1840 		break;
1841 	}
1842 
1843 	/* Enable low threshold interrupts for rx buffer rings (regular and
1844 	 * monitor buffer rings.
1845 	 * TODO: See if this is required for any other ring
1846 	 */
1847 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1848 	    (ring_type == RXDMA_MONITOR_STATUS ||
1849 	    (ring_type == TX_MONITOR_BUF))) {
1850 		/* TODO: Setting low threshold to 1/8th of ring size
1851 		 * see if this needs to be configurable
1852 		 */
1853 		ring_params->low_threshold = num_entries >> 3;
1854 		ring_params->intr_timer_thres_us =
1855 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1856 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1857 		ring_params->intr_batch_cntr_thres_entries = 0;
1858 	}
1859 
1860 	/* During initialisation monitor rings are only filled with
1861 	 * MON_BUF_MIN_ENTRIES entries. So low threshold needs to be set to
1862 	 * a value less than that. Low threshold value is reconfigured again
1863 	 * to 1/8th of the ring size when monitor vap is created.
1864 	 */
1865 	if (ring_type == RXDMA_MONITOR_BUF)
1866 		ring_params->low_threshold = MON_BUF_MIN_ENTRIES >> 1;
1867 
1868 	/* In case of PCI chipsets, we dont have PPDU end interrupts,
1869 	 * so MONITOR STATUS ring is reaped by receiving MSI from srng.
1870 	 * Keep batch threshold as 8 so that interrupt is received for
1871 	 * every 4 packets in MONITOR_STATUS ring
1872 	 */
1873 	if ((ring_type == RXDMA_MONITOR_STATUS) &&
1874 	    (soc->intr_mode == DP_INTR_MSI))
1875 		ring_params->intr_batch_cntr_thres_entries = 4;
1876 }
1877 #endif
1878 
1879 #ifdef DP_MEM_PRE_ALLOC
1880 
1881 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
1882 			   size_t ctxt_size)
1883 {
1884 	void *ctxt_mem;
1885 
1886 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_context) {
1887 		dp_warn("dp_prealloc_get_context null!");
1888 		goto dynamic_alloc;
1889 	}
1890 
1891 	ctxt_mem = soc->cdp_soc.ol_ops->dp_prealloc_get_context(ctxt_type);
1892 
1893 	if (ctxt_mem)
1894 		goto end;
1895 
1896 dynamic_alloc:
1897 	dp_info("Pre-alloc of ctxt failed. Dynamic allocation");
1898 	ctxt_mem = qdf_mem_malloc(ctxt_size);
1899 end:
1900 	return ctxt_mem;
1901 }
1902 
1903 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
1904 			 void *vaddr)
1905 {
1906 	QDF_STATUS status;
1907 
1908 	if (soc->cdp_soc.ol_ops->dp_prealloc_put_context) {
1909 		status = soc->cdp_soc.ol_ops->dp_prealloc_put_context(
1910 								ctxt_type,
1911 								vaddr);
1912 	} else {
1913 		dp_warn("dp_prealloc_get_context null!");
1914 		status = QDF_STATUS_E_NOSUPPORT;
1915 	}
1916 
1917 	if (QDF_IS_STATUS_ERROR(status)) {
1918 		dp_info("Context not pre-allocated");
1919 		qdf_mem_free(vaddr);
1920 	}
1921 }
1922 
1923 static inline
1924 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
1925 					   struct dp_srng *srng,
1926 					   uint32_t ring_type)
1927 {
1928 	void *mem;
1929 
1930 	qdf_assert(!srng->is_mem_prealloc);
1931 
1932 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_consistent) {
1933 		dp_warn("dp_prealloc_get_consistent is null!");
1934 		goto qdf;
1935 	}
1936 
1937 	mem =
1938 		soc->cdp_soc.ol_ops->dp_prealloc_get_consistent
1939 						(&srng->alloc_size,
1940 						 &srng->base_vaddr_unaligned,
1941 						 &srng->base_paddr_unaligned,
1942 						 &srng->base_paddr_aligned,
1943 						 DP_RING_BASE_ALIGN, ring_type);
1944 
1945 	if (mem) {
1946 		srng->is_mem_prealloc = true;
1947 		goto end;
1948 	}
1949 qdf:
1950 	mem =  qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
1951 						&srng->base_vaddr_unaligned,
1952 						&srng->base_paddr_unaligned,
1953 						&srng->base_paddr_aligned,
1954 						DP_RING_BASE_ALIGN);
1955 end:
1956 	dp_info("%s memory %pK dp_srng %pK ring_type %d alloc_size %d num_entries %d",
1957 		srng->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", mem,
1958 		srng, ring_type, srng->alloc_size, srng->num_entries);
1959 	return mem;
1960 }
1961 
1962 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
1963 					       struct dp_srng *srng)
1964 {
1965 	if (srng->is_mem_prealloc) {
1966 		if (!soc->cdp_soc.ol_ops->dp_prealloc_put_consistent) {
1967 			dp_warn("dp_prealloc_put_consistent is null!");
1968 			QDF_BUG(0);
1969 			return;
1970 		}
1971 		soc->cdp_soc.ol_ops->dp_prealloc_put_consistent
1972 						(srng->alloc_size,
1973 						 srng->base_vaddr_unaligned,
1974 						 srng->base_paddr_unaligned);
1975 
1976 	} else {
1977 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1978 					srng->alloc_size,
1979 					srng->base_vaddr_unaligned,
1980 					srng->base_paddr_unaligned, 0);
1981 	}
1982 }
1983 
1984 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
1985 				   enum dp_desc_type desc_type,
1986 				   struct qdf_mem_multi_page_t *pages,
1987 				   size_t element_size,
1988 				   uint16_t element_num,
1989 				   qdf_dma_context_t memctxt,
1990 				   bool cacheable)
1991 {
1992 	if (!soc->cdp_soc.ol_ops->dp_get_multi_pages) {
1993 		dp_warn("dp_get_multi_pages is null!");
1994 		goto qdf;
1995 	}
1996 
1997 	pages->num_pages = 0;
1998 	pages->is_mem_prealloc = 0;
1999 	soc->cdp_soc.ol_ops->dp_get_multi_pages(desc_type,
2000 						element_size,
2001 						element_num,
2002 						pages,
2003 						cacheable);
2004 	if (pages->num_pages)
2005 		goto end;
2006 
2007 qdf:
2008 	qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size,
2009 				  element_num, memctxt, cacheable);
2010 end:
2011 	dp_info("%s desc_type %d element_size %d element_num %d cacheable %d",
2012 		pages->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc",
2013 		desc_type, (int)element_size, element_num, cacheable);
2014 }
2015 
2016 void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
2017 				  enum dp_desc_type desc_type,
2018 				  struct qdf_mem_multi_page_t *pages,
2019 				  qdf_dma_context_t memctxt,
2020 				  bool cacheable)
2021 {
2022 	if (pages->is_mem_prealloc) {
2023 		if (!soc->cdp_soc.ol_ops->dp_put_multi_pages) {
2024 			dp_warn("dp_put_multi_pages is null!");
2025 			QDF_BUG(0);
2026 			return;
2027 		}
2028 
2029 		soc->cdp_soc.ol_ops->dp_put_multi_pages(desc_type, pages);
2030 		qdf_mem_zero(pages, sizeof(*pages));
2031 	} else {
2032 		qdf_mem_multi_pages_free(soc->osdev, pages,
2033 					 memctxt, cacheable);
2034 	}
2035 }
2036 
2037 #else
2038 
2039 static inline
2040 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
2041 					   struct dp_srng *srng,
2042 					   uint32_t ring_type)
2043 
2044 {
2045 	void *mem;
2046 
2047 	mem = qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
2048 					       &srng->base_vaddr_unaligned,
2049 					       &srng->base_paddr_unaligned,
2050 					       &srng->base_paddr_aligned,
2051 					       DP_RING_BASE_ALIGN);
2052 	if (mem)
2053 		qdf_mem_set(srng->base_vaddr_unaligned, 0, srng->alloc_size);
2054 
2055 	return mem;
2056 }
2057 
2058 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
2059 					       struct dp_srng *srng)
2060 {
2061 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2062 				srng->alloc_size,
2063 				srng->base_vaddr_unaligned,
2064 				srng->base_paddr_unaligned, 0);
2065 }
2066 
2067 #endif /* DP_MEM_PRE_ALLOC */
2068 
2069 /*
2070  * dp_srng_free() - Free SRNG memory
2071  * @soc  : Data path soc handle
2072  * @srng : SRNG pointer
2073  *
2074  * return: None
2075  */
2076 void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng)
2077 {
2078 	if (srng->alloc_size && srng->base_vaddr_unaligned) {
2079 		if (!srng->cached) {
2080 			dp_srng_mem_free_consistent(soc, srng);
2081 		} else {
2082 			qdf_mem_free(srng->base_vaddr_unaligned);
2083 		}
2084 		srng->alloc_size = 0;
2085 		srng->base_vaddr_unaligned = NULL;
2086 	}
2087 	srng->hal_srng = NULL;
2088 }
2089 
2090 qdf_export_symbol(dp_srng_free);
2091 
2092 #ifdef DISABLE_MON_RING_MSI_CFG
2093 /*
2094  * dp_skip_msi_cfg() - Check if msi cfg has to be skipped for ring_type
2095  * @ring_type: sring type
2096  *
2097  * Return: True if msi cfg should be skipped for srng type else false
2098  */
2099 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2100 {
2101 	if (ring_type == RXDMA_MONITOR_STATUS)
2102 		return true;
2103 
2104 	return false;
2105 }
2106 #else
2107 #ifdef DP_CON_MON_MSI_ENABLED
2108 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2109 {
2110 	if (soc->cdp_soc.ol_ops->get_con_mode &&
2111 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE) {
2112 		if (ring_type == REO_DST)
2113 			return true;
2114 	} else if (ring_type == RXDMA_MONITOR_STATUS) {
2115 		return true;
2116 	}
2117 
2118 	return false;
2119 }
2120 #else
2121 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2122 {
2123 	return false;
2124 }
2125 #endif /* DP_CON_MON_MSI_ENABLED */
2126 #endif /* DISABLE_MON_RING_MSI_CFG */
2127 
2128 /*
2129  * dp_srng_init() - Initialize SRNG
2130  * @soc  : Data path soc handle
2131  * @srng : SRNG pointer
2132  * @ring_type : Ring Type
2133  * @ring_num: Ring number
2134  * @mac_id: mac_id
2135  *
2136  * return: QDF_STATUS
2137  */
2138 QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
2139 			int ring_type, int ring_num, int mac_id)
2140 {
2141 	hal_soc_handle_t hal_soc = soc->hal_soc;
2142 	struct hal_srng_params ring_params;
2143 
2144 	if (srng->hal_srng) {
2145 		dp_init_err("%pK: Ring type: %d, num:%d is already initialized",
2146 			    soc, ring_type, ring_num);
2147 		return QDF_STATUS_SUCCESS;
2148 	}
2149 
2150 	/* memset the srng ring to zero */
2151 	qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size);
2152 
2153 	qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params));
2154 	ring_params.ring_base_paddr = srng->base_paddr_aligned;
2155 	ring_params.ring_base_vaddr = srng->base_vaddr_aligned;
2156 
2157 	ring_params.num_entries = srng->num_entries;
2158 
2159 	dp_info("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
2160 		ring_type, ring_num,
2161 		(void *)ring_params.ring_base_vaddr,
2162 		(void *)ring_params.ring_base_paddr,
2163 		ring_params.num_entries);
2164 
2165 	if (soc->intr_mode == DP_INTR_MSI && !dp_skip_msi_cfg(soc, ring_type)) {
2166 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
2167 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
2168 				 ring_type, ring_num);
2169 	} else {
2170 		ring_params.msi_data = 0;
2171 		ring_params.msi_addr = 0;
2172 		dp_srng_set_msi2_ring_params(soc, &ring_params, 0, 0);
2173 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
2174 				 ring_type, ring_num);
2175 	}
2176 
2177 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
2178 					       ring_type, ring_num,
2179 					       srng->num_entries);
2180 
2181 	dp_srng_set_nf_thresholds(soc, srng, &ring_params);
2182 
2183 	if (srng->cached)
2184 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
2185 
2186 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
2187 					mac_id, &ring_params);
2188 
2189 	if (!srng->hal_srng) {
2190 		dp_srng_free(soc, srng);
2191 		return QDF_STATUS_E_FAILURE;
2192 	}
2193 
2194 	return QDF_STATUS_SUCCESS;
2195 }
2196 
2197 qdf_export_symbol(dp_srng_init);
2198 
2199 /*
2200  * dp_srng_alloc() - Allocate memory for SRNG
2201  * @soc  : Data path soc handle
2202  * @srng : SRNG pointer
2203  * @ring_type : Ring Type
2204  * @num_entries: Number of entries
2205  * @cached: cached flag variable
2206  *
2207  * return: QDF_STATUS
2208  */
2209 QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
2210 			 int ring_type, uint32_t num_entries,
2211 			 bool cached)
2212 {
2213 	hal_soc_handle_t hal_soc = soc->hal_soc;
2214 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
2215 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
2216 
2217 	if (srng->base_vaddr_unaligned) {
2218 		dp_init_err("%pK: Ring type: %d, is already allocated",
2219 			    soc, ring_type);
2220 		return QDF_STATUS_SUCCESS;
2221 	}
2222 
2223 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
2224 	srng->hal_srng = NULL;
2225 	srng->alloc_size = num_entries * entry_size;
2226 	srng->num_entries = num_entries;
2227 	srng->cached = cached;
2228 
2229 	if (!cached) {
2230 		srng->base_vaddr_aligned =
2231 		    dp_srng_aligned_mem_alloc_consistent(soc,
2232 							 srng,
2233 							 ring_type);
2234 	} else {
2235 		srng->base_vaddr_aligned = qdf_aligned_malloc(
2236 					&srng->alloc_size,
2237 					&srng->base_vaddr_unaligned,
2238 					&srng->base_paddr_unaligned,
2239 					&srng->base_paddr_aligned,
2240 					DP_RING_BASE_ALIGN);
2241 	}
2242 
2243 	if (!srng->base_vaddr_aligned)
2244 		return QDF_STATUS_E_NOMEM;
2245 
2246 	return QDF_STATUS_SUCCESS;
2247 }
2248 
2249 qdf_export_symbol(dp_srng_alloc);
2250 
2251 /*
2252  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
2253  * @soc: DP SOC handle
2254  * @srng: source ring structure
2255  * @ring_type: type of ring
2256  * @ring_num: ring number
2257  *
2258  * Return: None
2259  */
2260 void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
2261 		    int ring_type, int ring_num)
2262 {
2263 	if (!srng->hal_srng) {
2264 		dp_init_err("%pK: Ring type: %d, num:%d not setup",
2265 			    soc, ring_type, ring_num);
2266 		return;
2267 	}
2268 
2269 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
2270 	srng->hal_srng = NULL;
2271 }
2272 
2273 qdf_export_symbol(dp_srng_deinit);
2274 
2275 /* TODO: Need this interface from HIF */
2276 void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle);
2277 
2278 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2279 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2280 			 hal_ring_handle_t hal_ring_hdl)
2281 {
2282 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2283 	uint32_t hp, tp;
2284 	uint8_t ring_id;
2285 
2286 	if (!int_ctx)
2287 		return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2288 
2289 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2290 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2291 
2292 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2293 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START);
2294 
2295 	return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2296 }
2297 
2298 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2299 			hal_ring_handle_t hal_ring_hdl)
2300 {
2301 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2302 	uint32_t hp, tp;
2303 	uint8_t ring_id;
2304 
2305 	if (!int_ctx)
2306 		return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2307 
2308 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2309 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2310 
2311 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2312 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END);
2313 
2314 	return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2315 }
2316 
2317 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2318 					      uint8_t hist_group_id)
2319 {
2320 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2321 			 0, 0, 0, HIF_EVENT_TIMER_ENTRY);
2322 }
2323 
2324 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2325 					     uint8_t hist_group_id)
2326 {
2327 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2328 			 0, 0, 0, HIF_EVENT_TIMER_EXIT);
2329 }
2330 #else
2331 
2332 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2333 					      uint8_t hist_group_id)
2334 {
2335 }
2336 
2337 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2338 					     uint8_t hist_group_id)
2339 {
2340 }
2341 
2342 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
2343 
2344 /*
2345  * dp_should_timer_irq_yield() - Decide if the bottom half should yield
2346  * @soc: DP soc handle
2347  * @work_done: work done in softirq context
2348  * @start_time: start time for the softirq
2349  *
2350  * Return: enum with yield code
2351  */
2352 enum timer_yield_status
2353 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
2354 			  uint64_t start_time)
2355 {
2356 	uint64_t cur_time = qdf_get_log_timestamp();
2357 
2358 	if (!work_done)
2359 		return DP_TIMER_WORK_DONE;
2360 
2361 	if (cur_time - start_time > DP_MAX_TIMER_EXEC_TIME_TICKS)
2362 		return DP_TIMER_TIME_EXHAUST;
2363 
2364 	return DP_TIMER_NO_YIELD;
2365 }
2366 
2367 qdf_export_symbol(dp_should_timer_irq_yield);
2368 
2369 #ifdef DP_CON_MON_MSI_ENABLED
2370 static int dp_process_rxdma_dst_ring(struct dp_soc *soc,
2371 				     struct dp_intr *int_ctx,
2372 				     int mac_for_pdev,
2373 				     int total_budget)
2374 {
2375 	if (dp_soc_get_con_mode(soc) == QDF_GLOBAL_MONITOR_MODE)
2376 		return dp_monitor_process(soc, int_ctx, mac_for_pdev,
2377 					  total_budget);
2378 	else
2379 		return dp_rxdma_err_process(int_ctx, soc, mac_for_pdev,
2380 					    total_budget);
2381 }
2382 #else
2383 static int dp_process_rxdma_dst_ring(struct dp_soc *soc,
2384 				     struct dp_intr *int_ctx,
2385 				     int mac_for_pdev,
2386 				     int total_budget)
2387 {
2388 	return dp_rxdma_err_process(int_ctx, soc, mac_for_pdev,
2389 				    total_budget);
2390 }
2391 #endif
2392 
2393 /**
2394  * dp_process_lmac_rings() - Process LMAC rings
2395  * @int_ctx: interrupt context
2396  * @total_budget: budget of work which can be done
2397  *
2398  * Return: work done
2399  */
2400 static int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget)
2401 {
2402 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2403 	struct dp_soc *soc = int_ctx->soc;
2404 	uint32_t remaining_quota = total_budget;
2405 	struct dp_pdev *pdev = NULL;
2406 	uint32_t work_done  = 0;
2407 	int budget = total_budget;
2408 	int ring = 0;
2409 
2410 	/* Process LMAC interrupts */
2411 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
2412 		int mac_for_pdev = ring;
2413 
2414 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
2415 		if (!pdev)
2416 			continue;
2417 		if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
2418 			work_done = dp_monitor_process(soc, int_ctx,
2419 						       mac_for_pdev,
2420 						       remaining_quota);
2421 			if (work_done)
2422 				intr_stats->num_rx_mon_ring_masks++;
2423 			budget -= work_done;
2424 			if (budget <= 0)
2425 				goto budget_done;
2426 			remaining_quota = budget;
2427 		}
2428 
2429 		if (int_ctx->tx_mon_ring_mask & (1 << mac_for_pdev)) {
2430 			work_done = dp_tx_mon_process(soc, int_ctx,
2431 						      mac_for_pdev,
2432 						      remaining_quota);
2433 			if (work_done)
2434 				intr_stats->num_tx_mon_ring_masks++;
2435 			budget -= work_done;
2436 			if (budget <= 0)
2437 				goto budget_done;
2438 			remaining_quota = budget;
2439 		}
2440 
2441 		if (int_ctx->rxdma2host_ring_mask &
2442 				(1 << mac_for_pdev)) {
2443 			work_done = dp_process_rxdma_dst_ring(soc, int_ctx,
2444 							      mac_for_pdev,
2445 							      remaining_quota);
2446 			if (work_done)
2447 				intr_stats->num_rxdma2host_ring_masks++;
2448 			budget -=  work_done;
2449 			if (budget <= 0)
2450 				goto budget_done;
2451 			remaining_quota = budget;
2452 		}
2453 
2454 		if (int_ctx->host2rxdma_ring_mask & (1 << mac_for_pdev)) {
2455 			union dp_rx_desc_list_elem_t *desc_list = NULL;
2456 			union dp_rx_desc_list_elem_t *tail = NULL;
2457 			struct dp_srng *rx_refill_buf_ring;
2458 			struct rx_desc_pool *rx_desc_pool;
2459 
2460 			rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2461 			if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
2462 				rx_refill_buf_ring =
2463 					&soc->rx_refill_buf_ring[mac_for_pdev];
2464 			else
2465 				rx_refill_buf_ring =
2466 					&soc->rx_refill_buf_ring[pdev->lmac_id];
2467 
2468 			intr_stats->num_host2rxdma_ring_masks++;
2469 			dp_rx_buffers_lt_replenish_simple(soc, mac_for_pdev,
2470 							  rx_refill_buf_ring,
2471 							  rx_desc_pool,
2472 							  0,
2473 							  &desc_list,
2474 							  &tail);
2475 		}
2476 
2477 	}
2478 
2479 	if (int_ctx->host2rxdma_mon_ring_mask)
2480 		dp_rx_mon_buf_refill(int_ctx);
2481 
2482 	if (int_ctx->host2txmon_ring_mask)
2483 		dp_tx_mon_buf_refill(int_ctx);
2484 
2485 budget_done:
2486 	return total_budget - budget;
2487 }
2488 
2489 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
2490 /**
2491  * dp_service_near_full_srngs() - Bottom half handler to process the near
2492  *				full IRQ on a SRNG
2493  * @dp_ctx: Datapath SoC handle
2494  * @dp_budget: Number of SRNGs which can be processed in a single attempt
2495  *		without rescheduling
2496  *
2497  * Return: remaining budget/quota for the soc device
2498  */
2499 static uint32_t dp_service_near_full_srngs(void *dp_ctx, uint32_t dp_budget)
2500 {
2501 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2502 	struct dp_soc *soc = int_ctx->soc;
2503 
2504 	/*
2505 	 * dp_service_near_full_srngs arch ops should be initialized always
2506 	 * if the NEAR FULL IRQ feature is enabled.
2507 	 */
2508 	return soc->arch_ops.dp_service_near_full_srngs(soc, int_ctx,
2509 							dp_budget);
2510 }
2511 #endif
2512 
2513 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2514 
2515 /*
2516  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
2517  * @dp_ctx: DP SOC handle
2518  * @budget: Number of frames/descriptors that can be processed in one shot
2519  *
2520  * Return: remaining budget/quota for the soc device
2521  */
2522 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
2523 {
2524 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2525 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2526 	struct dp_soc *soc = int_ctx->soc;
2527 	int ring = 0;
2528 	int index;
2529 	uint32_t work_done  = 0;
2530 	int budget = dp_budget;
2531 	uint8_t tx_mask = int_ctx->tx_ring_mask;
2532 	uint8_t rx_mask = int_ctx->rx_ring_mask;
2533 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
2534 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
2535 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2536 	uint32_t remaining_quota = dp_budget;
2537 
2538 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n",
2539 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
2540 			 reo_status_mask,
2541 			 int_ctx->rx_mon_ring_mask,
2542 			 int_ctx->host2rxdma_ring_mask,
2543 			 int_ctx->rxdma2host_ring_mask);
2544 
2545 	/* Process Tx completion interrupts first to return back buffers */
2546 	for (index = 0; index < soc->num_tx_comp_rings; index++) {
2547 		if (!(1 << wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) & tx_mask))
2548 			continue;
2549 		work_done = dp_tx_comp_handler(int_ctx,
2550 					       soc,
2551 					       soc->tx_comp_ring[index].hal_srng,
2552 					       index, remaining_quota);
2553 		if (work_done) {
2554 			intr_stats->num_tx_ring_masks[index]++;
2555 			dp_verbose_debug("tx mask 0x%x index %d, budget %d, work_done %d",
2556 					 tx_mask, index, budget,
2557 					 work_done);
2558 		}
2559 		budget -= work_done;
2560 		if (budget <= 0)
2561 			goto budget_done;
2562 
2563 		remaining_quota = budget;
2564 	}
2565 
2566 	/* Process REO Exception ring interrupt */
2567 	if (rx_err_mask) {
2568 		work_done = dp_rx_err_process(int_ctx, soc,
2569 					      soc->reo_exception_ring.hal_srng,
2570 					      remaining_quota);
2571 
2572 		if (work_done) {
2573 			intr_stats->num_rx_err_ring_masks++;
2574 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
2575 					 work_done, budget);
2576 		}
2577 
2578 		budget -=  work_done;
2579 		if (budget <= 0) {
2580 			goto budget_done;
2581 		}
2582 		remaining_quota = budget;
2583 	}
2584 
2585 	/* Process Rx WBM release ring interrupt */
2586 	if (rx_wbm_rel_mask) {
2587 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
2588 						  soc->rx_rel_ring.hal_srng,
2589 						  remaining_quota);
2590 
2591 		if (work_done) {
2592 			intr_stats->num_rx_wbm_rel_ring_masks++;
2593 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
2594 					 work_done, budget);
2595 		}
2596 
2597 		budget -=  work_done;
2598 		if (budget <= 0) {
2599 			goto budget_done;
2600 		}
2601 		remaining_quota = budget;
2602 	}
2603 
2604 	/* Process Rx interrupts */
2605 	if (rx_mask) {
2606 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
2607 			if (!(rx_mask & (1 << ring)))
2608 				continue;
2609 			work_done = soc->arch_ops.dp_rx_process(int_ctx,
2610 						  soc->reo_dest_ring[ring].hal_srng,
2611 						  ring,
2612 						  remaining_quota);
2613 			if (work_done) {
2614 				intr_stats->num_rx_ring_masks[ring]++;
2615 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
2616 						 rx_mask, ring,
2617 						 work_done, budget);
2618 				budget -=  work_done;
2619 				if (budget <= 0)
2620 					goto budget_done;
2621 				remaining_quota = budget;
2622 			}
2623 		}
2624 	}
2625 
2626 	if (reo_status_mask) {
2627 		if (dp_reo_status_ring_handler(int_ctx, soc))
2628 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2629 	}
2630 
2631 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
2632 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2633 		if (work_done) {
2634 			budget -=  work_done;
2635 			if (budget <= 0)
2636 				goto budget_done;
2637 			remaining_quota = budget;
2638 		}
2639 	}
2640 
2641 	qdf_lro_flush(int_ctx->lro_ctx);
2642 	intr_stats->num_masks++;
2643 
2644 budget_done:
2645 	return dp_budget - budget;
2646 }
2647 
2648 #else /* QCA_HOST_MODE_WIFI_DISABLED */
2649 
2650 /*
2651  * dp_service_srngs() - Top level handler for DP Monitor Ring interrupts
2652  * @dp_ctx: DP SOC handle
2653  * @budget: Number of frames/descriptors that can be processed in one shot
2654  *
2655  * Return: remaining budget/quota for the soc device
2656  */
2657 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
2658 {
2659 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2660 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2661 	struct dp_soc *soc = int_ctx->soc;
2662 	uint32_t remaining_quota = dp_budget;
2663 	uint32_t work_done  = 0;
2664 	int budget = dp_budget;
2665 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2666 
2667 	if (reo_status_mask) {
2668 		if (dp_reo_status_ring_handler(int_ctx, soc))
2669 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2670 	}
2671 
2672 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
2673 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2674 		if (work_done) {
2675 			budget -=  work_done;
2676 			if (budget <= 0)
2677 				goto budget_done;
2678 			remaining_quota = budget;
2679 		}
2680 	}
2681 
2682 	qdf_lro_flush(int_ctx->lro_ctx);
2683 	intr_stats->num_masks++;
2684 
2685 budget_done:
2686 	return dp_budget - budget;
2687 }
2688 
2689 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2690 
2691 /* dp_interrupt_timer()- timer poll for interrupts
2692  *
2693  * @arg: SoC Handle
2694  *
2695  * Return:
2696  *
2697  */
2698 static void dp_interrupt_timer(void *arg)
2699 {
2700 	struct dp_soc *soc = (struct dp_soc *) arg;
2701 	struct dp_pdev *pdev = soc->pdev_list[0];
2702 	enum timer_yield_status yield = DP_TIMER_NO_YIELD;
2703 	uint32_t work_done  = 0, total_work_done = 0;
2704 	int budget = 0xffff, i;
2705 	uint32_t remaining_quota = budget;
2706 	uint64_t start_time;
2707 	uint32_t lmac_id = DP_MON_INVALID_LMAC_ID;
2708 	uint8_t dp_intr_id = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
2709 	uint32_t lmac_iter;
2710 	int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
2711 	enum reg_wifi_band mon_band;
2712 
2713 	/*
2714 	 * this logic makes all data path interfacing rings (UMAC/LMAC)
2715 	 * and Monitor rings polling mode when NSS offload is disabled
2716 	 */
2717 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx) &&
2718 	    !wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2719 		if (qdf_atomic_read(&soc->cmn_init_done)) {
2720 			for (i = 0; i < wlan_cfg_get_num_contexts(
2721 						soc->wlan_cfg_ctx); i++)
2722 				dp_service_srngs(&soc->intr_ctx[i], 0xffff);
2723 
2724 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
2725 		}
2726 		return;
2727 	}
2728 
2729 	if (!qdf_atomic_read(&soc->cmn_init_done))
2730 		return;
2731 
2732 	if (dp_monitor_is_chan_band_known(pdev)) {
2733 		mon_band = dp_monitor_get_chan_band(pdev);
2734 		lmac_id = pdev->ch_band_lmac_id_mapping[mon_band];
2735 		if (qdf_likely(lmac_id != DP_MON_INVALID_LMAC_ID)) {
2736 			dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id];
2737 			dp_srng_record_timer_entry(soc, dp_intr_id);
2738 		}
2739 	}
2740 
2741 	start_time = qdf_get_log_timestamp();
2742 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
2743 
2744 	while (yield == DP_TIMER_NO_YIELD) {
2745 		for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) {
2746 			if (lmac_iter == lmac_id)
2747 				work_done = dp_monitor_process(soc,
2748 						&soc->intr_ctx[dp_intr_id],
2749 						lmac_iter, remaining_quota);
2750 			else
2751 				work_done =
2752 					dp_monitor_drop_packets_for_mac(pdev,
2753 							     lmac_iter,
2754 							     remaining_quota);
2755 			if (work_done) {
2756 				budget -=  work_done;
2757 				if (budget <= 0) {
2758 					yield = DP_TIMER_WORK_EXHAUST;
2759 					goto budget_done;
2760 				}
2761 				remaining_quota = budget;
2762 				total_work_done += work_done;
2763 			}
2764 		}
2765 
2766 		yield = dp_should_timer_irq_yield(soc, total_work_done,
2767 						  start_time);
2768 		total_work_done = 0;
2769 	}
2770 
2771 budget_done:
2772 	if (yield == DP_TIMER_WORK_EXHAUST ||
2773 	    yield == DP_TIMER_TIME_EXHAUST)
2774 		qdf_timer_mod(&soc->int_timer, 1);
2775 	else
2776 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
2777 
2778 	if (lmac_id != DP_MON_INVALID_LMAC_ID)
2779 		dp_srng_record_timer_exit(soc, dp_intr_id);
2780 }
2781 
2782 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2783 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
2784 					struct dp_intr *intr_ctx)
2785 {
2786 	if (intr_ctx->rx_mon_ring_mask)
2787 		return true;
2788 
2789 	return false;
2790 }
2791 #else
2792 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
2793 					struct dp_intr *intr_ctx)
2794 {
2795 	return false;
2796 }
2797 #endif
2798 
2799 /*
2800  * dp_soc_attach_poll() - Register handlers for DP interrupts
2801  * @txrx_soc: DP SOC handle
2802  *
2803  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
2804  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
2805  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
2806  *
2807  * Return: 0 for success, nonzero for failure.
2808  */
2809 static QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
2810 {
2811 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2812 	int i;
2813 	int lmac_id = 0;
2814 
2815 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
2816 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
2817 	soc->intr_mode = DP_INTR_POLL;
2818 
2819 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
2820 		soc->intr_ctx[i].dp_intr_id = i;
2821 		soc->intr_ctx[i].tx_ring_mask =
2822 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
2823 		soc->intr_ctx[i].rx_ring_mask =
2824 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
2825 		soc->intr_ctx[i].rx_mon_ring_mask =
2826 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
2827 		soc->intr_ctx[i].rx_err_ring_mask =
2828 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
2829 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
2830 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
2831 		soc->intr_ctx[i].reo_status_ring_mask =
2832 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
2833 		soc->intr_ctx[i].rxdma2host_ring_mask =
2834 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
2835 		soc->intr_ctx[i].soc = soc;
2836 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
2837 
2838 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
2839 			hif_event_history_init(soc->hif_handle, i);
2840 			soc->mon_intr_id_lmac_map[lmac_id] = i;
2841 			lmac_id++;
2842 		}
2843 	}
2844 
2845 	qdf_timer_init(soc->osdev, &soc->int_timer,
2846 			dp_interrupt_timer, (void *)soc,
2847 			QDF_TIMER_TYPE_WAKE_APPS);
2848 
2849 	return QDF_STATUS_SUCCESS;
2850 }
2851 
2852 /**
2853  * dp_soc_set_interrupt_mode() - Set the interrupt mode in soc
2854  * soc: DP soc handle
2855  *
2856  * Set the appropriate interrupt mode flag in the soc
2857  */
2858 static void dp_soc_set_interrupt_mode(struct dp_soc *soc)
2859 {
2860 	uint32_t msi_base_data, msi_vector_start;
2861 	int msi_vector_count, ret;
2862 
2863 	soc->intr_mode = DP_INTR_INTEGRATED;
2864 
2865 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
2866 	    (dp_is_monitor_mode_using_poll(soc) &&
2867 	     soc->cdp_soc.ol_ops->get_con_mode &&
2868 	     soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) {
2869 		soc->intr_mode = DP_INTR_POLL;
2870 	} else {
2871 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
2872 						  &msi_vector_count,
2873 						  &msi_base_data,
2874 						  &msi_vector_start);
2875 		if (ret)
2876 			return;
2877 
2878 		soc->intr_mode = DP_INTR_MSI;
2879 	}
2880 }
2881 
2882 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc);
2883 #if defined(DP_INTR_POLL_BOTH)
2884 /*
2885  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
2886  * @txrx_soc: DP SOC handle
2887  *
2888  * Call the appropriate attach function based on the mode of operation.
2889  * This is a WAR for enabling monitor mode.
2890  *
2891  * Return: 0 for success. nonzero for failure.
2892  */
2893 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2894 {
2895 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2896 
2897 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
2898 	    (dp_is_monitor_mode_using_poll(soc) &&
2899 	     soc->cdp_soc.ol_ops->get_con_mode &&
2900 	     soc->cdp_soc.ol_ops->get_con_mode() ==
2901 	     QDF_GLOBAL_MONITOR_MODE)) {
2902 		dp_info("Poll mode");
2903 		return dp_soc_attach_poll(txrx_soc);
2904 	} else {
2905 		dp_info("Interrupt  mode");
2906 		return dp_soc_interrupt_attach(txrx_soc);
2907 	}
2908 }
2909 #else
2910 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
2911 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2912 {
2913 	return dp_soc_attach_poll(txrx_soc);
2914 }
2915 #else
2916 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2917 {
2918 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2919 
2920 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx))
2921 		return dp_soc_attach_poll(txrx_soc);
2922 	else
2923 		return dp_soc_interrupt_attach(txrx_soc);
2924 }
2925 #endif
2926 #endif
2927 
2928 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
2929 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
2930 {
2931 	int j;
2932 	int num_irq = 0;
2933 
2934 	int tx_mask =
2935 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2936 	int rx_mask =
2937 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2938 	int rx_mon_mask =
2939 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2940 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
2941 					soc->wlan_cfg_ctx, intr_ctx_num);
2942 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
2943 					soc->wlan_cfg_ctx, intr_ctx_num);
2944 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
2945 					soc->wlan_cfg_ctx, intr_ctx_num);
2946 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
2947 					soc->wlan_cfg_ctx, intr_ctx_num);
2948 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
2949 					soc->wlan_cfg_ctx, intr_ctx_num);
2950 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
2951 					soc->wlan_cfg_ctx, intr_ctx_num);
2952 
2953 	soc->intr_mode = DP_INTR_INTEGRATED;
2954 
2955 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
2956 
2957 		if (tx_mask & (1 << j)) {
2958 			irq_id_map[num_irq++] =
2959 				(wbm2host_tx_completions_ring1 - j);
2960 		}
2961 
2962 		if (rx_mask & (1 << j)) {
2963 			irq_id_map[num_irq++] =
2964 				(reo2host_destination_ring1 - j);
2965 		}
2966 
2967 		if (rxdma2host_ring_mask & (1 << j)) {
2968 			irq_id_map[num_irq++] =
2969 				rxdma2host_destination_ring_mac1 - j;
2970 		}
2971 
2972 		if (host2rxdma_ring_mask & (1 << j)) {
2973 			irq_id_map[num_irq++] =
2974 				host2rxdma_host_buf_ring_mac1 -	j;
2975 		}
2976 
2977 		if (host2rxdma_mon_ring_mask & (1 << j)) {
2978 			irq_id_map[num_irq++] =
2979 				host2rxdma_monitor_ring1 - j;
2980 		}
2981 
2982 		if (rx_mon_mask & (1 << j)) {
2983 			irq_id_map[num_irq++] =
2984 				ppdu_end_interrupts_mac1 - j;
2985 			irq_id_map[num_irq++] =
2986 				rxdma2host_monitor_status_ring_mac1 - j;
2987 			irq_id_map[num_irq++] =
2988 				rxdma2host_monitor_destination_mac1 - j;
2989 		}
2990 
2991 		if (rx_wbm_rel_ring_mask & (1 << j))
2992 			irq_id_map[num_irq++] = wbm2host_rx_release;
2993 
2994 		if (rx_err_ring_mask & (1 << j))
2995 			irq_id_map[num_irq++] = reo2host_exception;
2996 
2997 		if (reo_status_ring_mask & (1 << j))
2998 			irq_id_map[num_irq++] = reo2host_status;
2999 
3000 	}
3001 	*num_irq_r = num_irq;
3002 }
3003 
3004 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
3005 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
3006 		int msi_vector_count, int msi_vector_start)
3007 {
3008 	int tx_mask = wlan_cfg_get_tx_ring_mask(
3009 					soc->wlan_cfg_ctx, intr_ctx_num);
3010 	int rx_mask = wlan_cfg_get_rx_ring_mask(
3011 					soc->wlan_cfg_ctx, intr_ctx_num);
3012 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
3013 					soc->wlan_cfg_ctx, intr_ctx_num);
3014 	int tx_mon_mask = wlan_cfg_get_tx_mon_ring_mask(
3015 					soc->wlan_cfg_ctx, intr_ctx_num);
3016 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3017 					soc->wlan_cfg_ctx, intr_ctx_num);
3018 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3019 					soc->wlan_cfg_ctx, intr_ctx_num);
3020 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3021 					soc->wlan_cfg_ctx, intr_ctx_num);
3022 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3023 					soc->wlan_cfg_ctx, intr_ctx_num);
3024 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3025 					soc->wlan_cfg_ctx, intr_ctx_num);
3026 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3027 					soc->wlan_cfg_ctx, intr_ctx_num);
3028 	int rx_near_full_grp_1_mask =
3029 		wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
3030 						     intr_ctx_num);
3031 	int rx_near_full_grp_2_mask =
3032 		wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
3033 						     intr_ctx_num);
3034 	int tx_ring_near_full_mask =
3035 		wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
3036 						    intr_ctx_num);
3037 
3038 	int host2txmon_ring_mask =
3039 		wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx,
3040 						  intr_ctx_num);
3041 	unsigned int vector =
3042 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
3043 	int num_irq = 0;
3044 
3045 	soc->intr_mode = DP_INTR_MSI;
3046 
3047 	if (tx_mask | rx_mask | rx_mon_mask | tx_mon_mask | rx_err_ring_mask |
3048 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask |
3049 	    host2rxdma_ring_mask | host2rxdma_mon_ring_mask |
3050 	    rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
3051 	    tx_ring_near_full_mask | host2txmon_ring_mask)
3052 		irq_id_map[num_irq++] =
3053 			pld_get_msi_irq(soc->osdev->dev, vector);
3054 
3055 	*num_irq_r = num_irq;
3056 }
3057 
3058 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
3059 				    int *irq_id_map, int *num_irq)
3060 {
3061 	int msi_vector_count, ret;
3062 	uint32_t msi_base_data, msi_vector_start;
3063 
3064 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
3065 					    &msi_vector_count,
3066 					    &msi_base_data,
3067 					    &msi_vector_start);
3068 	if (ret)
3069 		return dp_soc_interrupt_map_calculate_integrated(soc,
3070 				intr_ctx_num, irq_id_map, num_irq);
3071 
3072 	else
3073 		dp_soc_interrupt_map_calculate_msi(soc,
3074 				intr_ctx_num, irq_id_map, num_irq,
3075 				msi_vector_count, msi_vector_start);
3076 }
3077 
3078 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
3079 /**
3080  * dp_soc_near_full_interrupt_attach() - Register handler for DP near fill irq
3081  * @soc: DP soc handle
3082  * @num_irq: IRQ number
3083  * @irq_id_map: IRQ map
3084  * intr_id: interrupt context ID
3085  *
3086  * Return: 0 for success. nonzero for failure.
3087  */
3088 static inline int
3089 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
3090 				  int irq_id_map[], int intr_id)
3091 {
3092 	return hif_register_ext_group(soc->hif_handle,
3093 				      num_irq, irq_id_map,
3094 				      dp_service_near_full_srngs,
3095 				      &soc->intr_ctx[intr_id], "dp_nf_intr",
3096 				      HIF_EXEC_NAPI_TYPE,
3097 				      QCA_NAPI_DEF_SCALE_BIN_SHIFT);
3098 }
3099 #else
3100 static inline int
3101 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
3102 				  int *irq_id_map, int intr_id)
3103 {
3104 	return 0;
3105 }
3106 #endif
3107 
3108 /*
3109  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
3110  * @txrx_soc: DP SOC handle
3111  *
3112  * Return: none
3113  */
3114 static void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
3115 {
3116 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3117 	int i;
3118 
3119 	if (soc->intr_mode == DP_INTR_POLL) {
3120 		qdf_timer_free(&soc->int_timer);
3121 	} else {
3122 		hif_deconfigure_ext_group_interrupts(soc->hif_handle);
3123 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
3124 		hif_deregister_exec_group(soc->hif_handle, "dp_nf_intr");
3125 	}
3126 
3127 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3128 		soc->intr_ctx[i].tx_ring_mask = 0;
3129 		soc->intr_ctx[i].rx_ring_mask = 0;
3130 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
3131 		soc->intr_ctx[i].rx_err_ring_mask = 0;
3132 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
3133 		soc->intr_ctx[i].reo_status_ring_mask = 0;
3134 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
3135 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
3136 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
3137 		soc->intr_ctx[i].rx_near_full_grp_1_mask = 0;
3138 		soc->intr_ctx[i].rx_near_full_grp_2_mask = 0;
3139 		soc->intr_ctx[i].tx_ring_near_full_mask = 0;
3140 		soc->intr_ctx[i].tx_mon_ring_mask = 0;
3141 		soc->intr_ctx[i].host2txmon_ring_mask = 0;
3142 
3143 		hif_event_history_deinit(soc->hif_handle, i);
3144 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
3145 	}
3146 
3147 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3148 		    sizeof(soc->mon_intr_id_lmac_map),
3149 		    DP_MON_INVALID_LMAC_ID);
3150 }
3151 
3152 /*
3153  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
3154  * @txrx_soc: DP SOC handle
3155  *
3156  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
3157  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
3158  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
3159  *
3160  * Return: 0 for success. nonzero for failure.
3161  */
3162 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
3163 {
3164 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3165 
3166 	int i = 0;
3167 	int num_irq = 0;
3168 	int rx_err_ring_intr_ctxt_id = HIF_MAX_GROUP;
3169 	int lmac_id = 0;
3170 
3171 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3172 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
3173 
3174 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3175 		int ret = 0;
3176 
3177 		/* Map of IRQ ids registered with one interrupt context */
3178 		int irq_id_map[HIF_MAX_GRP_IRQ];
3179 
3180 		int tx_mask =
3181 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
3182 		int rx_mask =
3183 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
3184 		int rx_mon_mask =
3185 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
3186 		int tx_mon_ring_mask =
3187 			wlan_cfg_get_tx_mon_ring_mask(soc->wlan_cfg_ctx, i);
3188 		int rx_err_ring_mask =
3189 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
3190 		int rx_wbm_rel_ring_mask =
3191 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
3192 		int reo_status_ring_mask =
3193 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
3194 		int rxdma2host_ring_mask =
3195 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
3196 		int host2rxdma_ring_mask =
3197 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
3198 		int host2rxdma_mon_ring_mask =
3199 			wlan_cfg_get_host2rxdma_mon_ring_mask(
3200 				soc->wlan_cfg_ctx, i);
3201 		int rx_near_full_grp_1_mask =
3202 			wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
3203 							     i);
3204 		int rx_near_full_grp_2_mask =
3205 			wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
3206 							     i);
3207 		int tx_ring_near_full_mask =
3208 			wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
3209 							    i);
3210 		int host2txmon_ring_mask =
3211 			wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx, i);
3212 
3213 		soc->intr_ctx[i].dp_intr_id = i;
3214 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
3215 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
3216 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
3217 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
3218 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
3219 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
3220 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
3221 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
3222 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
3223 			 host2rxdma_mon_ring_mask;
3224 		soc->intr_ctx[i].rx_near_full_grp_1_mask =
3225 						rx_near_full_grp_1_mask;
3226 		soc->intr_ctx[i].rx_near_full_grp_2_mask =
3227 						rx_near_full_grp_2_mask;
3228 		soc->intr_ctx[i].tx_ring_near_full_mask =
3229 						tx_ring_near_full_mask;
3230 		soc->intr_ctx[i].tx_mon_ring_mask = tx_mon_ring_mask;
3231 		soc->intr_ctx[i].host2txmon_ring_mask = host2txmon_ring_mask;
3232 
3233 		soc->intr_ctx[i].soc = soc;
3234 
3235 		num_irq = 0;
3236 
3237 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
3238 					       &num_irq);
3239 
3240 		if (rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
3241 		    tx_ring_near_full_mask) {
3242 			dp_soc_near_full_interrupt_attach(soc, num_irq,
3243 							  irq_id_map, i);
3244 		} else {
3245 			ret = hif_register_ext_group(soc->hif_handle,
3246 				num_irq, irq_id_map, dp_service_srngs,
3247 				&soc->intr_ctx[i], "dp_intr",
3248 				HIF_EXEC_NAPI_TYPE,
3249 				QCA_NAPI_DEF_SCALE_BIN_SHIFT);
3250 		}
3251 
3252 		dp_debug(" int ctx %u num_irq %u irq_id_map %u %u",
3253 			 i, num_irq, irq_id_map[0], irq_id_map[1]);
3254 
3255 		if (ret) {
3256 			dp_init_err("%pK: failed, ret = %d", soc, ret);
3257 			dp_soc_interrupt_detach(txrx_soc);
3258 			return QDF_STATUS_E_FAILURE;
3259 		}
3260 
3261 		hif_event_history_init(soc->hif_handle, i);
3262 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
3263 
3264 		if (rx_err_ring_mask)
3265 			rx_err_ring_intr_ctxt_id = i;
3266 
3267 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
3268 			soc->mon_intr_id_lmac_map[lmac_id] = i;
3269 			lmac_id++;
3270 		}
3271 	}
3272 
3273 	hif_configure_ext_group_interrupts(soc->hif_handle);
3274 	if (rx_err_ring_intr_ctxt_id != HIF_MAX_GROUP)
3275 		hif_config_irq_clear_cpu_affinity(soc->hif_handle,
3276 						  rx_err_ring_intr_ctxt_id, 0);
3277 
3278 	return QDF_STATUS_SUCCESS;
3279 }
3280 
3281 #define AVG_MAX_MPDUS_PER_TID 128
3282 #define AVG_TIDS_PER_CLIENT 2
3283 #define AVG_FLOWS_PER_TID 2
3284 #define AVG_MSDUS_PER_FLOW 128
3285 #define AVG_MSDUS_PER_MPDU 4
3286 
3287 /*
3288  * dp_hw_link_desc_pool_banks_free() - Free h/w link desc pool banks
3289  * @soc: DP SOC handle
3290  * @mac_id: mac id
3291  *
3292  * Return: none
3293  */
3294 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
3295 {
3296 	struct qdf_mem_multi_page_t *pages;
3297 
3298 	if (mac_id != WLAN_INVALID_PDEV_ID) {
3299 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3300 	} else {
3301 		pages = &soc->link_desc_pages;
3302 	}
3303 
3304 	if (!pages) {
3305 		dp_err("can not get link desc pages");
3306 		QDF_ASSERT(0);
3307 		return;
3308 	}
3309 
3310 	if (pages->dma_pages) {
3311 		wlan_minidump_remove((void *)
3312 				     pages->dma_pages->page_v_addr_start,
3313 				     pages->num_pages * pages->page_size,
3314 				     soc->ctrl_psoc,
3315 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3316 				     "hw_link_desc_bank");
3317 		dp_desc_multi_pages_mem_free(soc, DP_HW_LINK_DESC_TYPE,
3318 					     pages, 0, false);
3319 	}
3320 }
3321 
3322 qdf_export_symbol(dp_hw_link_desc_pool_banks_free);
3323 
3324 /*
3325  * dp_hw_link_desc_pool_banks_alloc() - Allocate h/w link desc pool banks
3326  * @soc: DP SOC handle
3327  * @mac_id: mac id
3328  *
3329  * Allocates memory pages for link descriptors, the page size is 4K for
3330  * MCL and 2MB for WIN. if the mac_id is invalid link descriptor pages are
3331  * allocated for regular RX/TX and if the there is a proper mac_id link
3332  * descriptors are allocated for RX monitor mode.
3333  *
3334  * Return: QDF_STATUS_SUCCESS: Success
3335  *	   QDF_STATUS_E_FAILURE: Failure
3336  */
3337 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
3338 {
3339 	hal_soc_handle_t hal_soc = soc->hal_soc;
3340 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
3341 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
3342 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
3343 	uint32_t num_mpdus_per_link_desc = hal_num_mpdus_per_link_desc(hal_soc);
3344 	uint32_t num_msdus_per_link_desc = hal_num_msdus_per_link_desc(hal_soc);
3345 	uint32_t num_mpdu_links_per_queue_desc =
3346 		hal_num_mpdu_links_per_queue_desc(hal_soc);
3347 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3348 	uint32_t *total_link_descs, total_mem_size;
3349 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
3350 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
3351 	uint32_t num_entries;
3352 	struct qdf_mem_multi_page_t *pages;
3353 	struct dp_srng *dp_srng;
3354 	uint8_t minidump_str[MINIDUMP_STR_SIZE];
3355 
3356 	/* Only Tx queue descriptors are allocated from common link descriptor
3357 	 * pool Rx queue descriptors are not included in this because (REO queue
3358 	 * extension descriptors) they are expected to be allocated contiguously
3359 	 * with REO queue descriptors
3360 	 */
3361 	if (mac_id != WLAN_INVALID_PDEV_ID) {
3362 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3363 		/* dp_monitor_get_link_desc_pages returns NULL only
3364 		 * if monitor SOC is  NULL
3365 		 */
3366 		if (!pages) {
3367 			dp_err("can not get link desc pages");
3368 			QDF_ASSERT(0);
3369 			return QDF_STATUS_E_FAULT;
3370 		}
3371 		dp_srng = &soc->rxdma_mon_desc_ring[mac_id];
3372 		num_entries = dp_srng->alloc_size /
3373 			hal_srng_get_entrysize(soc->hal_soc,
3374 					       RXDMA_MONITOR_DESC);
3375 		total_link_descs = dp_monitor_get_total_link_descs(soc, mac_id);
3376 		qdf_str_lcopy(minidump_str, "mon_link_desc_bank",
3377 			      MINIDUMP_STR_SIZE);
3378 	} else {
3379 		num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3380 			AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
3381 
3382 		num_mpdu_queue_descs = num_mpdu_link_descs /
3383 			num_mpdu_links_per_queue_desc;
3384 
3385 		num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3386 			AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
3387 			num_msdus_per_link_desc;
3388 
3389 		num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3390 			AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
3391 
3392 		num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
3393 			num_tx_msdu_link_descs + num_rx_msdu_link_descs;
3394 
3395 		pages = &soc->link_desc_pages;
3396 		total_link_descs = &soc->total_link_descs;
3397 		qdf_str_lcopy(minidump_str, "link_desc_bank",
3398 			      MINIDUMP_STR_SIZE);
3399 	}
3400 
3401 	/* If link descriptor banks are allocated, return from here */
3402 	if (pages->num_pages)
3403 		return QDF_STATUS_SUCCESS;
3404 
3405 	/* Round up to power of 2 */
3406 	*total_link_descs = 1;
3407 	while (*total_link_descs < num_entries)
3408 		*total_link_descs <<= 1;
3409 
3410 	dp_init_info("%pK: total_link_descs: %u, link_desc_size: %d",
3411 		     soc, *total_link_descs, link_desc_size);
3412 	total_mem_size =  *total_link_descs * link_desc_size;
3413 	total_mem_size += link_desc_align;
3414 
3415 	dp_init_info("%pK: total_mem_size: %d",
3416 		     soc, total_mem_size);
3417 
3418 	dp_set_max_page_size(pages, max_alloc_size);
3419 	dp_desc_multi_pages_mem_alloc(soc, DP_HW_LINK_DESC_TYPE,
3420 				      pages,
3421 				      link_desc_size,
3422 				      *total_link_descs,
3423 				      0, false);
3424 	if (!pages->num_pages) {
3425 		dp_err("Multi page alloc fail for hw link desc pool");
3426 		return QDF_STATUS_E_FAULT;
3427 	}
3428 
3429 	wlan_minidump_log(pages->dma_pages->page_v_addr_start,
3430 			  pages->num_pages * pages->page_size,
3431 			  soc->ctrl_psoc,
3432 			  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3433 			  "hw_link_desc_bank");
3434 
3435 	return QDF_STATUS_SUCCESS;
3436 }
3437 
3438 /*
3439  * dp_hw_link_desc_ring_free() - Free h/w link desc rings
3440  * @soc: DP SOC handle
3441  *
3442  * Return: none
3443  */
3444 static void dp_hw_link_desc_ring_free(struct dp_soc *soc)
3445 {
3446 	uint32_t i;
3447 	uint32_t size = soc->wbm_idle_scatter_buf_size;
3448 	void *vaddr = soc->wbm_idle_link_ring.base_vaddr_unaligned;
3449 	qdf_dma_addr_t paddr;
3450 
3451 	if (soc->wbm_idle_scatter_buf_base_vaddr[0]) {
3452 		for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3453 			vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3454 			paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3455 			if (vaddr) {
3456 				qdf_mem_free_consistent(soc->osdev,
3457 							soc->osdev->dev,
3458 							size,
3459 							vaddr,
3460 							paddr,
3461 							0);
3462 				vaddr = NULL;
3463 			}
3464 		}
3465 	} else {
3466 		wlan_minidump_remove(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3467 				     soc->wbm_idle_link_ring.alloc_size,
3468 				     soc->ctrl_psoc,
3469 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3470 				     "wbm_idle_link_ring");
3471 		dp_srng_free(soc, &soc->wbm_idle_link_ring);
3472 	}
3473 }
3474 
3475 /*
3476  * dp_hw_link_desc_ring_alloc() - Allocate hw link desc rings
3477  * @soc: DP SOC handle
3478  *
3479  * Allocate memory for WBM_IDLE_LINK srng ring if the number of
3480  * link descriptors is less then the max_allocated size. else
3481  * allocate memory for wbm_idle_scatter_buffer.
3482  *
3483  * Return: QDF_STATUS_SUCCESS: success
3484  *         QDF_STATUS_E_NO_MEM: No memory (Failure)
3485  */
3486 static QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc)
3487 {
3488 	uint32_t entry_size, i;
3489 	uint32_t total_mem_size;
3490 	qdf_dma_addr_t *baseaddr = NULL;
3491 	struct dp_srng *dp_srng;
3492 	uint32_t ring_type;
3493 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3494 	uint32_t tlds;
3495 
3496 	ring_type = WBM_IDLE_LINK;
3497 	dp_srng = &soc->wbm_idle_link_ring;
3498 	tlds = soc->total_link_descs;
3499 
3500 	entry_size = hal_srng_get_entrysize(soc->hal_soc, ring_type);
3501 	total_mem_size = entry_size * tlds;
3502 
3503 	if (total_mem_size <= max_alloc_size) {
3504 		if (dp_srng_alloc(soc, dp_srng, ring_type, tlds, 0)) {
3505 			dp_init_err("%pK: Link desc idle ring setup failed",
3506 				    soc);
3507 			goto fail;
3508 		}
3509 
3510 		wlan_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3511 				  soc->wbm_idle_link_ring.alloc_size,
3512 				  soc->ctrl_psoc,
3513 				  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3514 				  "wbm_idle_link_ring");
3515 	} else {
3516 		uint32_t num_scatter_bufs;
3517 		uint32_t num_entries_per_buf;
3518 		uint32_t buf_size = 0;
3519 
3520 		soc->wbm_idle_scatter_buf_size =
3521 			hal_idle_list_scatter_buf_size(soc->hal_soc);
3522 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
3523 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
3524 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
3525 					soc->hal_soc, total_mem_size,
3526 					soc->wbm_idle_scatter_buf_size);
3527 
3528 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
3529 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3530 				  FL("scatter bufs size out of bounds"));
3531 			goto fail;
3532 		}
3533 
3534 		for (i = 0; i < num_scatter_bufs; i++) {
3535 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
3536 			buf_size = soc->wbm_idle_scatter_buf_size;
3537 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
3538 				qdf_mem_alloc_consistent(soc->osdev,
3539 							 soc->osdev->dev,
3540 							 buf_size,
3541 							 baseaddr);
3542 
3543 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
3544 				QDF_TRACE(QDF_MODULE_ID_DP,
3545 					  QDF_TRACE_LEVEL_ERROR,
3546 					  FL("Scatter lst memory alloc fail"));
3547 				goto fail;
3548 			}
3549 		}
3550 		soc->num_scatter_bufs = num_scatter_bufs;
3551 	}
3552 	return QDF_STATUS_SUCCESS;
3553 
3554 fail:
3555 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3556 		void *vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3557 		qdf_dma_addr_t paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3558 
3559 		if (vaddr) {
3560 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
3561 						soc->wbm_idle_scatter_buf_size,
3562 						vaddr,
3563 						paddr, 0);
3564 			vaddr = NULL;
3565 		}
3566 	}
3567 	return QDF_STATUS_E_NOMEM;
3568 }
3569 
3570 qdf_export_symbol(dp_hw_link_desc_pool_banks_alloc);
3571 
3572 /*
3573  * dp_hw_link_desc_ring_init() - Initialize hw link desc rings
3574  * @soc: DP SOC handle
3575  *
3576  * Return: QDF_STATUS_SUCCESS: success
3577  *         QDF_STATUS_E_FAILURE: failure
3578  */
3579 static QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc)
3580 {
3581 	struct dp_srng *dp_srng = &soc->wbm_idle_link_ring;
3582 
3583 	if (dp_srng->base_vaddr_unaligned) {
3584 		if (dp_srng_init(soc, dp_srng, WBM_IDLE_LINK, 0, 0))
3585 			return QDF_STATUS_E_FAILURE;
3586 	}
3587 	return QDF_STATUS_SUCCESS;
3588 }
3589 
3590 /*
3591  * dp_hw_link_desc_ring_deinit() - Reset hw link desc rings
3592  * @soc: DP SOC handle
3593  *
3594  * Return: None
3595  */
3596 static void dp_hw_link_desc_ring_deinit(struct dp_soc *soc)
3597 {
3598 	dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0);
3599 }
3600 
3601 /*
3602  * dp_hw_link_desc_ring_replenish() - Replenish hw link desc rings
3603  * @soc: DP SOC handle
3604  * @mac_id: mac id
3605  *
3606  * Return: None
3607  */
3608 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id)
3609 {
3610 	uint32_t cookie = 0;
3611 	uint32_t page_idx = 0;
3612 	struct qdf_mem_multi_page_t *pages;
3613 	struct qdf_mem_dma_page_t *dma_pages;
3614 	uint32_t offset = 0;
3615 	uint32_t count = 0;
3616 	uint32_t desc_id = 0;
3617 	void *desc_srng;
3618 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
3619 	uint32_t *total_link_descs_addr;
3620 	uint32_t total_link_descs;
3621 	uint32_t scatter_buf_num;
3622 	uint32_t num_entries_per_buf = 0;
3623 	uint32_t rem_entries;
3624 	uint32_t num_descs_per_page;
3625 	uint32_t num_scatter_bufs = 0;
3626 	uint8_t *scatter_buf_ptr;
3627 	void *desc;
3628 
3629 	num_scatter_bufs = soc->num_scatter_bufs;
3630 
3631 	if (mac_id == WLAN_INVALID_PDEV_ID) {
3632 		pages = &soc->link_desc_pages;
3633 		total_link_descs = soc->total_link_descs;
3634 		desc_srng = soc->wbm_idle_link_ring.hal_srng;
3635 	} else {
3636 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3637 		/* dp_monitor_get_link_desc_pages returns NULL only
3638 		 * if monitor SOC is  NULL
3639 		 */
3640 		if (!pages) {
3641 			dp_err("can not get link desc pages");
3642 			QDF_ASSERT(0);
3643 			return;
3644 		}
3645 		total_link_descs_addr =
3646 				dp_monitor_get_total_link_descs(soc, mac_id);
3647 		total_link_descs = *total_link_descs_addr;
3648 		desc_srng = soc->rxdma_mon_desc_ring[mac_id].hal_srng;
3649 	}
3650 
3651 	dma_pages = pages->dma_pages;
3652 	do {
3653 		qdf_mem_zero(dma_pages[page_idx].page_v_addr_start,
3654 			     pages->page_size);
3655 		page_idx++;
3656 	} while (page_idx < pages->num_pages);
3657 
3658 	if (desc_srng) {
3659 		hal_srng_access_start_unlocked(soc->hal_soc, desc_srng);
3660 		page_idx = 0;
3661 		count = 0;
3662 		offset = 0;
3663 		pages = &soc->link_desc_pages;
3664 		while ((desc = hal_srng_src_get_next(soc->hal_soc,
3665 						     desc_srng)) &&
3666 			(count < total_link_descs)) {
3667 			page_idx = count / pages->num_element_per_page;
3668 			if (desc_id == pages->num_element_per_page)
3669 				desc_id = 0;
3670 
3671 			offset = count % pages->num_element_per_page;
3672 			cookie = LINK_DESC_COOKIE(desc_id, page_idx,
3673 						  soc->link_desc_id_start);
3674 
3675 			hal_set_link_desc_addr(soc->hal_soc, desc, cookie,
3676 					       dma_pages[page_idx].page_p_addr
3677 					       + (offset * link_desc_size),
3678 					       soc->idle_link_bm_id);
3679 			count++;
3680 			desc_id++;
3681 		}
3682 		hal_srng_access_end_unlocked(soc->hal_soc, desc_srng);
3683 	} else {
3684 		/* Populate idle list scatter buffers with link descriptor
3685 		 * pointers
3686 		 */
3687 		scatter_buf_num = 0;
3688 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
3689 					soc->hal_soc,
3690 					soc->wbm_idle_scatter_buf_size);
3691 
3692 		scatter_buf_ptr = (uint8_t *)(
3693 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
3694 		rem_entries = num_entries_per_buf;
3695 		pages = &soc->link_desc_pages;
3696 		page_idx = 0; count = 0;
3697 		offset = 0;
3698 		num_descs_per_page = pages->num_element_per_page;
3699 
3700 		while (count < total_link_descs) {
3701 			page_idx = count / num_descs_per_page;
3702 			offset = count % num_descs_per_page;
3703 			if (desc_id == pages->num_element_per_page)
3704 				desc_id = 0;
3705 
3706 			cookie = LINK_DESC_COOKIE(desc_id, page_idx,
3707 						  soc->link_desc_id_start);
3708 			hal_set_link_desc_addr(soc->hal_soc,
3709 					       (void *)scatter_buf_ptr,
3710 					       cookie,
3711 					       dma_pages[page_idx].page_p_addr +
3712 					       (offset * link_desc_size),
3713 					       soc->idle_link_bm_id);
3714 			rem_entries--;
3715 			if (rem_entries) {
3716 				scatter_buf_ptr += link_desc_size;
3717 			} else {
3718 				rem_entries = num_entries_per_buf;
3719 				scatter_buf_num++;
3720 				if (scatter_buf_num >= num_scatter_bufs)
3721 					break;
3722 				scatter_buf_ptr = (uint8_t *)
3723 					(soc->wbm_idle_scatter_buf_base_vaddr[
3724 					 scatter_buf_num]);
3725 			}
3726 			count++;
3727 			desc_id++;
3728 		}
3729 		/* Setup link descriptor idle list in HW */
3730 		hal_setup_link_idle_list(soc->hal_soc,
3731 			soc->wbm_idle_scatter_buf_base_paddr,
3732 			soc->wbm_idle_scatter_buf_base_vaddr,
3733 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
3734 			(uint32_t)(scatter_buf_ptr -
3735 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
3736 			scatter_buf_num-1])), total_link_descs);
3737 	}
3738 }
3739 
3740 qdf_export_symbol(dp_link_desc_ring_replenish);
3741 
3742 #ifdef IPA_OFFLOAD
3743 #define USE_1_IPA_RX_REO_RING 1
3744 #define USE_2_IPA_RX_REO_RINGS 2
3745 #define REO_DST_RING_SIZE_QCA6290 1023
3746 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
3747 #define REO_DST_RING_SIZE_QCA8074 1023
3748 #define REO_DST_RING_SIZE_QCN9000 2048
3749 #else
3750 #define REO_DST_RING_SIZE_QCA8074 8
3751 #define REO_DST_RING_SIZE_QCN9000 8
3752 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
3753 
3754 #ifdef IPA_WDI3_TX_TWO_PIPES
3755 #ifdef DP_MEMORY_OPT
3756 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3757 {
3758 	return dp_init_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3759 }
3760 
3761 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3762 {
3763 	dp_deinit_tx_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3764 }
3765 
3766 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3767 {
3768 	return dp_alloc_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3769 }
3770 
3771 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3772 {
3773 	dp_free_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3774 }
3775 
3776 #else /* !DP_MEMORY_OPT */
3777 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3778 {
3779 	return 0;
3780 }
3781 
3782 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3783 {
3784 }
3785 
3786 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3787 {
3788 	return 0
3789 }
3790 
3791 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3792 {
3793 }
3794 #endif /* DP_MEMORY_OPT */
3795 
3796 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
3797 {
3798 	hal_tx_init_data_ring(soc->hal_soc,
3799 			      soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng);
3800 }
3801 
3802 #else /* !IPA_WDI3_TX_TWO_PIPES */
3803 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3804 {
3805 	return 0;
3806 }
3807 
3808 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3809 {
3810 }
3811 
3812 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3813 {
3814 	return 0;
3815 }
3816 
3817 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3818 {
3819 }
3820 
3821 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
3822 {
3823 }
3824 
3825 #endif /* IPA_WDI3_TX_TWO_PIPES */
3826 
3827 #else
3828 
3829 #define REO_DST_RING_SIZE_QCA6290 1024
3830 
3831 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3832 {
3833 	return 0;
3834 }
3835 
3836 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3837 {
3838 }
3839 
3840 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3841 {
3842 	return 0;
3843 }
3844 
3845 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3846 {
3847 }
3848 
3849 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
3850 {
3851 }
3852 
3853 #endif /* IPA_OFFLOAD */
3854 
3855 /*
3856  * dp_soc_reset_ring_map() - Reset cpu ring map
3857  * @soc: Datapath soc handler
3858  *
3859  * This api resets the default cpu ring map
3860  */
3861 
3862 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
3863 {
3864 	uint8_t i;
3865 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
3866 
3867 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
3868 		switch (nss_config) {
3869 		case dp_nss_cfg_first_radio:
3870 			/*
3871 			 * Setting Tx ring map for one nss offloaded radio
3872 			 */
3873 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
3874 			break;
3875 
3876 		case dp_nss_cfg_second_radio:
3877 			/*
3878 			 * Setting Tx ring for two nss offloaded radios
3879 			 */
3880 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
3881 			break;
3882 
3883 		case dp_nss_cfg_dbdc:
3884 			/*
3885 			 * Setting Tx ring map for 2 nss offloaded radios
3886 			 */
3887 			soc->tx_ring_map[i] =
3888 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
3889 			break;
3890 
3891 		case dp_nss_cfg_dbtc:
3892 			/*
3893 			 * Setting Tx ring map for 3 nss offloaded radios
3894 			 */
3895 			soc->tx_ring_map[i] =
3896 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
3897 			break;
3898 
3899 		default:
3900 			dp_err("tx_ring_map failed due to invalid nss cfg");
3901 			break;
3902 		}
3903 	}
3904 }
3905 
3906 /*
3907  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
3908  * @dp_soc - DP soc handle
3909  * @ring_type - ring type
3910  * @ring_num - ring_num
3911  *
3912  * return 0 or 1
3913  */
3914 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
3915 {
3916 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
3917 	uint8_t status = 0;
3918 
3919 	switch (ring_type) {
3920 	case WBM2SW_RELEASE:
3921 	case REO_DST:
3922 	case RXDMA_BUF:
3923 	case REO_EXCEPTION:
3924 		status = ((nss_config) & (1 << ring_num));
3925 		break;
3926 	default:
3927 		break;
3928 	}
3929 
3930 	return status;
3931 }
3932 
3933 /*
3934  * dp_soc_disable_unused_mac_intr_mask() - reset interrupt mask for
3935  *					  unused WMAC hw rings
3936  * @dp_soc - DP Soc handle
3937  * @mac_num - wmac num
3938  *
3939  * Return: Return void
3940  */
3941 static void dp_soc_disable_unused_mac_intr_mask(struct dp_soc *soc,
3942 						int mac_num)
3943 {
3944 	uint8_t *grp_mask = NULL;
3945 	int group_number;
3946 
3947 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
3948 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
3949 	wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
3950 					  group_number, 0x0);
3951 
3952 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
3953 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
3954 	wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
3955 				      group_number, 0x0);
3956 
3957 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
3958 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
3959 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
3960 					  group_number, 0x0);
3961 
3962 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
3963 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
3964 	wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
3965 					      group_number, 0x0);
3966 }
3967 
3968 /*
3969  * dp_soc_reset_intr_mask() - reset interrupt mask
3970  * @dp_soc - DP Soc handle
3971  *
3972  * Return: Return void
3973  */
3974 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
3975 {
3976 	uint8_t j;
3977 	uint8_t *grp_mask = NULL;
3978 	int group_number, mask, num_ring;
3979 
3980 	/* number of tx ring */
3981 	num_ring = soc->num_tcl_data_rings;
3982 
3983 	/*
3984 	 * group mask for tx completion  ring.
3985 	 */
3986 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
3987 
3988 	/* loop and reset the mask for only offloaded ring */
3989 	for (j = 0; j < WLAN_CFG_NUM_TCL_DATA_RINGS; j++) {
3990 		/*
3991 		 * Group number corresponding to tx offloaded ring.
3992 		 */
3993 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
3994 		if (group_number < 0) {
3995 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
3996 				      soc, WBM2SW_RELEASE, j);
3997 			continue;
3998 		}
3999 
4000 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
4001 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j) &&
4002 		    (!mask)) {
4003 			continue;
4004 		}
4005 
4006 		/* reset the tx mask for offloaded ring */
4007 		mask &= (~(1 << j));
4008 
4009 		/*
4010 		 * reset the interrupt mask for offloaded ring.
4011 		 */
4012 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4013 	}
4014 
4015 	/* number of rx rings */
4016 	num_ring = soc->num_reo_dest_rings;
4017 
4018 	/*
4019 	 * group mask for reo destination ring.
4020 	 */
4021 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
4022 
4023 	/* loop and reset the mask for only offloaded ring */
4024 	for (j = 0; j < WLAN_CFG_NUM_REO_DEST_RING; j++) {
4025 		/*
4026 		 * Group number corresponding to rx offloaded ring.
4027 		 */
4028 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4029 		if (group_number < 0) {
4030 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4031 				      soc, REO_DST, j);
4032 			continue;
4033 		}
4034 
4035 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
4036 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j) &&
4037 		    (!mask)) {
4038 			continue;
4039 		}
4040 
4041 		/* reset the interrupt mask for offloaded ring */
4042 		mask &= (~(1 << j));
4043 
4044 		/*
4045 		 * set the interrupt mask to zero for rx offloaded radio.
4046 		 */
4047 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4048 	}
4049 
4050 	/*
4051 	 * group mask for Rx buffer refill ring
4052 	 */
4053 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
4054 
4055 	/* loop and reset the mask for only offloaded ring */
4056 	for (j = 0; j < MAX_PDEV_CNT; j++) {
4057 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
4058 
4059 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
4060 			continue;
4061 		}
4062 
4063 		/*
4064 		 * Group number corresponding to rx offloaded ring.
4065 		 */
4066 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
4067 		if (group_number < 0) {
4068 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4069 				      soc, REO_DST, lmac_id);
4070 			continue;
4071 		}
4072 
4073 		/* set the interrupt mask for offloaded ring */
4074 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4075 				group_number);
4076 		mask &= (~(1 << lmac_id));
4077 
4078 		/*
4079 		 * set the interrupt mask to zero for rx offloaded radio.
4080 		 */
4081 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4082 			group_number, mask);
4083 	}
4084 
4085 	grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
4086 
4087 	for (j = 0; j < num_ring; j++) {
4088 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_EXCEPTION, j)) {
4089 			continue;
4090 		}
4091 
4092 		/*
4093 		 * Group number corresponding to rx err ring.
4094 		 */
4095 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4096 		if (group_number < 0) {
4097 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4098 				      soc, REO_EXCEPTION, j);
4099 			continue;
4100 		}
4101 
4102 		wlan_cfg_set_rx_err_ring_mask(soc->wlan_cfg_ctx,
4103 					      group_number, 0);
4104 	}
4105 }
4106 
4107 #ifdef IPA_OFFLOAD
4108 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
4109 			 uint32_t *remap1, uint32_t *remap2)
4110 {
4111 	uint32_t ring[8] = {REO_REMAP_SW1, REO_REMAP_SW2, REO_REMAP_SW3};
4112 	int target_type;
4113 
4114 	target_type = hal_get_target_type(soc->hal_soc);
4115 
4116 	switch (target_type) {
4117 	case TARGET_TYPE_KIWI:
4118 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4119 					      soc->num_reo_dest_rings -
4120 					      USE_2_IPA_RX_REO_RINGS, remap1,
4121 					      remap2);
4122 		break;
4123 
4124 	default:
4125 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4126 					      soc->num_reo_dest_rings -
4127 					      USE_1_IPA_RX_REO_RING, remap1,
4128 					      remap2);
4129 		break;
4130 	}
4131 
4132 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
4133 
4134 	return true;
4135 }
4136 
4137 #ifdef IPA_WDI3_TX_TWO_PIPES
4138 static bool dp_ipa_is_alt_tx_ring(int index)
4139 {
4140 	return index == IPA_TX_ALT_RING_IDX;
4141 }
4142 
4143 static bool dp_ipa_is_alt_tx_comp_ring(int index)
4144 {
4145 	return index == IPA_TX_ALT_COMP_RING_IDX;
4146 }
4147 #else /* !IPA_WDI3_TX_TWO_PIPES */
4148 static bool dp_ipa_is_alt_tx_ring(int index)
4149 {
4150 	return false;
4151 }
4152 
4153 static bool dp_ipa_is_alt_tx_comp_ring(int index)
4154 {
4155 	return false;
4156 }
4157 #endif /* IPA_WDI3_TX_TWO_PIPES */
4158 
4159 /**
4160  * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA
4161  *
4162  * @tx_ring_num: Tx ring number
4163  * @tx_ipa_ring_sz: Return param only updated for IPA.
4164  * @soc_cfg_ctx: dp soc cfg context
4165  *
4166  * Return: None
4167  */
4168 static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz,
4169 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4170 {
4171 	if (!soc_cfg_ctx->ipa_enabled)
4172 		return;
4173 
4174 	if (tx_ring_num == IPA_TCL_DATA_RING_IDX)
4175 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_ring_size(soc_cfg_ctx);
4176 	else if (dp_ipa_is_alt_tx_ring(tx_ring_num))
4177 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_alt_ring_size(soc_cfg_ctx);
4178 }
4179 
4180 /**
4181  * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA
4182  *
4183  * @tx_comp_ring_num: Tx comp ring number
4184  * @tx_comp_ipa_ring_sz: Return param only updated for IPA.
4185  * @soc_cfg_ctx: dp soc cfg context
4186  *
4187  * Return: None
4188  */
4189 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
4190 					 int *tx_comp_ipa_ring_sz,
4191 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4192 {
4193 	if (!soc_cfg_ctx->ipa_enabled)
4194 		return;
4195 
4196 	if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX)
4197 		*tx_comp_ipa_ring_sz =
4198 				wlan_cfg_ipa_tx_comp_ring_size(soc_cfg_ctx);
4199 	else if (dp_ipa_is_alt_tx_comp_ring(tx_comp_ring_num))
4200 		*tx_comp_ipa_ring_sz =
4201 				wlan_cfg_ipa_tx_alt_comp_ring_size(soc_cfg_ctx);
4202 }
4203 #else
4204 static uint8_t dp_reo_ring_selection(uint32_t value, uint32_t *ring)
4205 {
4206 	uint8_t num = 0;
4207 
4208 	switch (value) {
4209 	case 0xF:
4210 		num = 4;
4211 		ring[0] = REO_REMAP_SW1;
4212 		ring[1] = REO_REMAP_SW2;
4213 		ring[2] = REO_REMAP_SW3;
4214 		ring[3] = REO_REMAP_SW4;
4215 		break;
4216 	case 0xE:
4217 		num = 3;
4218 		ring[0] = REO_REMAP_SW2;
4219 		ring[1] = REO_REMAP_SW3;
4220 		ring[2] = REO_REMAP_SW4;
4221 		break;
4222 	case 0xD:
4223 		num = 3;
4224 		ring[0] = REO_REMAP_SW1;
4225 		ring[1] = REO_REMAP_SW3;
4226 		ring[2] = REO_REMAP_SW4;
4227 		break;
4228 	case 0xC:
4229 		num = 2;
4230 		ring[0] = REO_REMAP_SW3;
4231 		ring[1] = REO_REMAP_SW4;
4232 		break;
4233 	case 0xB:
4234 		num = 3;
4235 		ring[0] = REO_REMAP_SW1;
4236 		ring[1] = REO_REMAP_SW2;
4237 		ring[2] = REO_REMAP_SW4;
4238 		break;
4239 	case 0xA:
4240 		num = 2;
4241 		ring[0] = REO_REMAP_SW2;
4242 		ring[1] = REO_REMAP_SW4;
4243 		break;
4244 	case 0x9:
4245 		num = 2;
4246 		ring[0] = REO_REMAP_SW1;
4247 		ring[1] = REO_REMAP_SW4;
4248 		break;
4249 	case 0x8:
4250 		num = 1;
4251 		ring[0] = REO_REMAP_SW4;
4252 		break;
4253 	case 0x7:
4254 		num = 3;
4255 		ring[0] = REO_REMAP_SW1;
4256 		ring[1] = REO_REMAP_SW2;
4257 		ring[2] = REO_REMAP_SW3;
4258 		break;
4259 	case 0x6:
4260 		num = 2;
4261 		ring[0] = REO_REMAP_SW2;
4262 		ring[1] = REO_REMAP_SW3;
4263 		break;
4264 	case 0x5:
4265 		num = 2;
4266 		ring[0] = REO_REMAP_SW1;
4267 		ring[1] = REO_REMAP_SW3;
4268 		break;
4269 	case 0x4:
4270 		num = 1;
4271 		ring[0] = REO_REMAP_SW3;
4272 		break;
4273 	case 0x3:
4274 		num = 2;
4275 		ring[0] = REO_REMAP_SW1;
4276 		ring[1] = REO_REMAP_SW2;
4277 		break;
4278 	case 0x2:
4279 		num = 1;
4280 		ring[0] = REO_REMAP_SW2;
4281 		break;
4282 	case 0x1:
4283 		num = 1;
4284 		ring[0] = REO_REMAP_SW1;
4285 		break;
4286 	}
4287 	return num;
4288 }
4289 
4290 bool dp_reo_remap_config(struct dp_soc *soc,
4291 			 uint32_t *remap0,
4292 			 uint32_t *remap1,
4293 			 uint32_t *remap2)
4294 {
4295 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4296 	uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
4297 	uint8_t target_type, num;
4298 	uint32_t ring[4];
4299 	uint32_t value;
4300 
4301 	target_type = hal_get_target_type(soc->hal_soc);
4302 
4303 	switch (offload_radio) {
4304 	case dp_nss_cfg_default:
4305 		value = reo_config & 0xF;
4306 		num = dp_reo_ring_selection(value, ring);
4307 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4308 					      num, remap1, remap2);
4309 
4310 		break;
4311 	case dp_nss_cfg_first_radio:
4312 		value = reo_config & 0xE;
4313 		num = dp_reo_ring_selection(value, ring);
4314 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4315 					      num, remap1, remap2);
4316 
4317 		break;
4318 	case dp_nss_cfg_second_radio:
4319 		value = reo_config & 0xD;
4320 		num = dp_reo_ring_selection(value, ring);
4321 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4322 					      num, remap1, remap2);
4323 
4324 		break;
4325 	case dp_nss_cfg_dbdc:
4326 	case dp_nss_cfg_dbtc:
4327 		/* return false if both or all are offloaded to NSS */
4328 		return false;
4329 
4330 	}
4331 
4332 	dp_debug("remap1 %x remap2 %x offload_radio %u",
4333 		 *remap1, *remap2, offload_radio);
4334 	return true;
4335 }
4336 
4337 static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz,
4338 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4339 {
4340 }
4341 
4342 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
4343 					 int *tx_comp_ipa_ring_sz,
4344 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4345 {
4346 }
4347 #endif /* IPA_OFFLOAD */
4348 
4349 /*
4350  * dp_reo_frag_dst_set() - configure reo register to set the
4351  *                        fragment destination ring
4352  * @soc : Datapath soc
4353  * @frag_dst_ring : output parameter to set fragment destination ring
4354  *
4355  * Based on offload_radio below fragment destination rings is selected
4356  * 0 - TCL
4357  * 1 - SW1
4358  * 2 - SW2
4359  * 3 - SW3
4360  * 4 - SW4
4361  * 5 - Release
4362  * 6 - FW
4363  * 7 - alternate select
4364  *
4365  * return: void
4366  */
4367 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
4368 {
4369 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4370 
4371 	switch (offload_radio) {
4372 	case dp_nss_cfg_default:
4373 		*frag_dst_ring = REO_REMAP_TCL;
4374 		break;
4375 	case dp_nss_cfg_first_radio:
4376 		/*
4377 		 * This configuration is valid for single band radio which
4378 		 * is also NSS offload.
4379 		 */
4380 	case dp_nss_cfg_dbdc:
4381 	case dp_nss_cfg_dbtc:
4382 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
4383 		break;
4384 	default:
4385 		dp_init_err("%pK: dp_reo_frag_dst_set invalid offload radio config", soc);
4386 		break;
4387 	}
4388 }
4389 
4390 #ifdef ENABLE_VERBOSE_DEBUG
4391 static void dp_enable_verbose_debug(struct dp_soc *soc)
4392 {
4393 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4394 
4395 	soc_cfg_ctx = soc->wlan_cfg_ctx;
4396 
4397 	if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask)
4398 		is_dp_verbose_debug_enabled = true;
4399 
4400 	if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask)
4401 		hal_set_verbose_debug(true);
4402 	else
4403 		hal_set_verbose_debug(false);
4404 }
4405 #else
4406 static void dp_enable_verbose_debug(struct dp_soc *soc)
4407 {
4408 }
4409 #endif
4410 
4411 #ifdef WLAN_FEATURE_STATS_EXT
4412 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
4413 {
4414 	qdf_event_create(&soc->rx_hw_stats_event);
4415 }
4416 #else
4417 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
4418 {
4419 }
4420 #endif
4421 
4422 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index)
4423 {
4424 	int tcl_ring_num, wbm_ring_num;
4425 
4426 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
4427 						index,
4428 						&tcl_ring_num,
4429 						&wbm_ring_num);
4430 
4431 	if (tcl_ring_num == -1 || wbm_ring_num == -1) {
4432 		dp_err("incorrect tcl/wbm ring num for index %u", index);
4433 		return;
4434 	}
4435 
4436 	wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned,
4437 			     soc->tcl_data_ring[index].alloc_size,
4438 			     soc->ctrl_psoc,
4439 			     WLAN_MD_DP_SRNG_TCL_DATA,
4440 			     "tcl_data_ring");
4441 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
4442 	dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA,
4443 		       tcl_ring_num);
4444 
4445 	wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned,
4446 			     soc->tx_comp_ring[index].alloc_size,
4447 			     soc->ctrl_psoc,
4448 			     WLAN_MD_DP_SRNG_TX_COMP,
4449 			     "tcl_comp_ring");
4450 	dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4451 		       wbm_ring_num);
4452 }
4453 
4454 /**
4455  * dp_init_tx_ring_pair_by_index() - The function inits tcl data/wbm completion
4456  * ring pair
4457  * @soc: DP soc pointer
4458  * @index: index of soc->tcl_data or soc->tx_comp to initialize
4459  *
4460  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
4461  */
4462 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
4463 						uint8_t index)
4464 {
4465 	int tcl_ring_num, wbm_ring_num;
4466 	uint8_t bm_id;
4467 
4468 	if (index >= MAX_TCL_DATA_RINGS) {
4469 		dp_err("unexpected index!");
4470 		QDF_BUG(0);
4471 		goto fail1;
4472 	}
4473 
4474 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
4475 						index,
4476 						&tcl_ring_num,
4477 						&wbm_ring_num);
4478 
4479 	if (tcl_ring_num == -1 || wbm_ring_num == -1) {
4480 		dp_err("incorrect tcl/wbm ring num for index %u", index);
4481 		goto fail1;
4482 	}
4483 
4484 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
4485 	if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA,
4486 			 tcl_ring_num, 0)) {
4487 		dp_err("dp_srng_init failed for tcl_data_ring");
4488 		goto fail1;
4489 	}
4490 	wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned,
4491 			  soc->tcl_data_ring[index].alloc_size,
4492 			  soc->ctrl_psoc,
4493 			  WLAN_MD_DP_SRNG_TCL_DATA,
4494 			  "tcl_data_ring");
4495 
4496 	if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4497 			 wbm_ring_num, 0)) {
4498 		dp_err("dp_srng_init failed for tx_comp_ring");
4499 		goto fail1;
4500 	}
4501 
4502 	bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_ring_num);
4503 
4504 	soc->arch_ops.tx_implicit_rbm_set(soc, tcl_ring_num, bm_id);
4505 	wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned,
4506 			  soc->tx_comp_ring[index].alloc_size,
4507 			  soc->ctrl_psoc,
4508 			  WLAN_MD_DP_SRNG_TX_COMP,
4509 			  "tcl_comp_ring");
4510 
4511 	return QDF_STATUS_SUCCESS;
4512 
4513 fail1:
4514 	return QDF_STATUS_E_FAILURE;
4515 }
4516 
4517 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index)
4518 {
4519 	dp_debug("index %u", index);
4520 	dp_srng_free(soc, &soc->tcl_data_ring[index]);
4521 	dp_srng_free(soc, &soc->tx_comp_ring[index]);
4522 }
4523 
4524 /**
4525  * dp_alloc_tx_ring_pair_by_index() - The function allocs tcl data/wbm2sw
4526  * ring pair for the given "index"
4527  * @soc: DP soc pointer
4528  * @index: index of soc->tcl_data or soc->tx_comp to initialize
4529  *
4530  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
4531  */
4532 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
4533 						 uint8_t index)
4534 {
4535 	int tx_ring_size;
4536 	int tx_comp_ring_size;
4537 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
4538 	int cached = 0;
4539 
4540 	if (index >= MAX_TCL_DATA_RINGS) {
4541 		dp_err("unexpected index!");
4542 		QDF_BUG(0);
4543 		goto fail1;
4544 	}
4545 
4546 	dp_debug("index %u", index);
4547 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
4548 	dp_ipa_get_tx_ring_size(index, &tx_ring_size, soc_cfg_ctx);
4549 
4550 	if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA,
4551 			  tx_ring_size, cached)) {
4552 		dp_err("dp_srng_alloc failed for tcl_data_ring");
4553 		goto fail1;
4554 	}
4555 
4556 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
4557 	dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size, soc_cfg_ctx);
4558 	/* Enable cached TCL desc if NSS offload is disabled */
4559 	if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
4560 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
4561 
4562 	if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4563 			  tx_comp_ring_size, cached)) {
4564 		dp_err("dp_srng_alloc failed for tx_comp_ring");
4565 		goto fail1;
4566 	}
4567 
4568 	return QDF_STATUS_SUCCESS;
4569 
4570 fail1:
4571 	return QDF_STATUS_E_FAILURE;
4572 }
4573 
4574 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
4575 {
4576 	struct cdp_lro_hash_config lro_hash;
4577 	QDF_STATUS status;
4578 
4579 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
4580 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
4581 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
4582 		dp_err("LRO, GRO and RX hash disabled");
4583 		return QDF_STATUS_E_FAILURE;
4584 	}
4585 
4586 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
4587 
4588 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
4589 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
4590 		lro_hash.lro_enable = 1;
4591 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
4592 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
4593 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
4594 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
4595 	}
4596 
4597 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
4598 			     (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
4599 			      LRO_IPV4_SEED_ARR_SZ));
4600 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
4601 			     (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
4602 			      LRO_IPV6_SEED_ARR_SZ));
4603 
4604 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
4605 
4606 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
4607 		QDF_BUG(0);
4608 		dp_err("lro_hash_config not configured");
4609 		return QDF_STATUS_E_FAILURE;
4610 	}
4611 
4612 	status = soc->cdp_soc.ol_ops->lro_hash_config(soc->ctrl_psoc,
4613 						      pdev->pdev_id,
4614 						      &lro_hash);
4615 	if (!QDF_IS_STATUS_SUCCESS(status)) {
4616 		dp_err("failed to send lro_hash_config to FW %u", status);
4617 		return status;
4618 	}
4619 
4620 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
4621 		lro_hash.lro_enable, lro_hash.tcp_flag,
4622 		lro_hash.tcp_flag_mask);
4623 
4624 	dp_info("toeplitz_hash_ipv4:");
4625 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4626 			   lro_hash.toeplitz_hash_ipv4,
4627 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
4628 			   LRO_IPV4_SEED_ARR_SZ));
4629 
4630 	dp_info("toeplitz_hash_ipv6:");
4631 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4632 			   lro_hash.toeplitz_hash_ipv6,
4633 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
4634 			   LRO_IPV6_SEED_ARR_SZ));
4635 
4636 	return status;
4637 }
4638 
4639 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
4640 /*
4641  * dp_reap_timer_init() - initialize the reap timer
4642  * @soc: data path SoC handle
4643  *
4644  * Return: void
4645  */
4646 static void dp_reap_timer_init(struct dp_soc *soc)
4647 {
4648 	/*
4649 	 * Timer to reap rxdma status rings.
4650 	 * Needed until we enable ppdu end interrupts
4651 	 */
4652 	dp_monitor_reap_timer_init(soc);
4653 	dp_monitor_vdev_timer_init(soc);
4654 }
4655 
4656 /*
4657  * dp_reap_timer_deinit() - de-initialize the reap timer
4658  * @soc: data path SoC handle
4659  *
4660  * Return: void
4661  */
4662 static void dp_reap_timer_deinit(struct dp_soc *soc)
4663 {
4664 	dp_monitor_reap_timer_deinit(soc);
4665 }
4666 #else
4667 /* WIN use case */
4668 static void dp_reap_timer_init(struct dp_soc *soc)
4669 {
4670 	/* Configure LMAC rings in Polled mode */
4671 	if (soc->lmac_polled_mode) {
4672 		/*
4673 		 * Timer to reap lmac rings.
4674 		 */
4675 		qdf_timer_init(soc->osdev, &soc->lmac_reap_timer,
4676 			       dp_service_lmac_rings, (void *)soc,
4677 			       QDF_TIMER_TYPE_WAKE_APPS);
4678 		soc->lmac_timer_init = 1;
4679 		qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
4680 	}
4681 }
4682 
4683 static void dp_reap_timer_deinit(struct dp_soc *soc)
4684 {
4685 	if (soc->lmac_timer_init) {
4686 		qdf_timer_stop(&soc->lmac_reap_timer);
4687 		qdf_timer_free(&soc->lmac_reap_timer);
4688 		soc->lmac_timer_init = 0;
4689 	}
4690 }
4691 #endif
4692 
4693 #ifdef QCA_HOST2FW_RXBUF_RING
4694 /*
4695  * dp_rxdma_ring_alloc() - allocate the RXDMA rings
4696  * @soc: data path SoC handle
4697  * @pdev: Physical device handle
4698  *
4699  * Return: 0 - success, > 0 - failure
4700  */
4701 static int dp_rxdma_ring_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
4702 {
4703 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
4704 	int max_mac_rings;
4705 	int i;
4706 	int ring_size;
4707 
4708 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
4709 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
4710 	ring_size =  wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx);
4711 
4712 	for (i = 0; i < max_mac_rings; i++) {
4713 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
4714 		if (dp_srng_alloc(soc, &pdev->rx_mac_buf_ring[i],
4715 				  RXDMA_BUF, ring_size, 0)) {
4716 			dp_init_err("%pK: failed rx mac ring setup", soc);
4717 			return QDF_STATUS_E_FAILURE;
4718 		}
4719 	}
4720 	return QDF_STATUS_SUCCESS;
4721 }
4722 
4723 /*
4724  * dp_rxdma_ring_setup() - configure the RXDMA rings
4725  * @soc: data path SoC handle
4726  * @pdev: Physical device handle
4727  *
4728  * Return: 0 - success, > 0 - failure
4729  */
4730 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
4731 {
4732 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
4733 	int max_mac_rings;
4734 	int i;
4735 
4736 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
4737 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
4738 
4739 	for (i = 0; i < max_mac_rings; i++) {
4740 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
4741 		if (dp_srng_init(soc, &pdev->rx_mac_buf_ring[i],
4742 				 RXDMA_BUF, 1, i)) {
4743 			dp_init_err("%pK: failed rx mac ring setup", soc);
4744 			return QDF_STATUS_E_FAILURE;
4745 		}
4746 	}
4747 	return QDF_STATUS_SUCCESS;
4748 }
4749 
4750 /*
4751  * dp_rxdma_ring_cleanup() - Deinit the RXDMA rings and reap timer
4752  * @soc: data path SoC handle
4753  * @pdev: Physical device handle
4754  *
4755  * Return: void
4756  */
4757 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
4758 {
4759 	int i;
4760 
4761 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
4762 		dp_srng_deinit(soc, &pdev->rx_mac_buf_ring[i], RXDMA_BUF, 1);
4763 
4764 	dp_reap_timer_deinit(soc);
4765 }
4766 
4767 /*
4768  * dp_rxdma_ring_free() - Free the RXDMA rings
4769  * @pdev: Physical device handle
4770  *
4771  * Return: void
4772  */
4773 static void dp_rxdma_ring_free(struct dp_pdev *pdev)
4774 {
4775 	int i;
4776 
4777 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
4778 		dp_srng_free(pdev->soc, &pdev->rx_mac_buf_ring[i]);
4779 }
4780 
4781 #else
4782 static int dp_rxdma_ring_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
4783 {
4784 	return QDF_STATUS_SUCCESS;
4785 }
4786 
4787 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
4788 {
4789 	return QDF_STATUS_SUCCESS;
4790 }
4791 
4792 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
4793 {
4794 	dp_reap_timer_deinit(soc);
4795 }
4796 
4797 static void dp_rxdma_ring_free(struct dp_pdev *pdev)
4798 {
4799 }
4800 #endif
4801 
4802 /**
4803  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
4804  * @pdev - DP_PDEV handle
4805  *
4806  * Return: void
4807  */
4808 static inline void
4809 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
4810 {
4811 	uint8_t map_id;
4812 	struct dp_soc *soc = pdev->soc;
4813 
4814 	if (!soc)
4815 		return;
4816 
4817 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
4818 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
4819 			     default_dscp_tid_map,
4820 			     sizeof(default_dscp_tid_map));
4821 	}
4822 
4823 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
4824 		hal_tx_set_dscp_tid_map(soc->hal_soc,
4825 					default_dscp_tid_map,
4826 					map_id);
4827 	}
4828 }
4829 
4830 /**
4831  * dp_pcp_tid_map_setup(): Initialize the pcp-tid maps
4832  * @pdev - DP_PDEV handle
4833  *
4834  * Return: void
4835  */
4836 static inline void
4837 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
4838 {
4839 	struct dp_soc *soc = pdev->soc;
4840 
4841 	if (!soc)
4842 		return;
4843 
4844 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
4845 		     sizeof(default_pcp_tid_map));
4846 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
4847 }
4848 
4849 #ifdef IPA_OFFLOAD
4850 /**
4851  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
4852  * @soc: data path instance
4853  * @pdev: core txrx pdev context
4854  *
4855  * Return: QDF_STATUS_SUCCESS: success
4856  *         QDF_STATUS_E_RESOURCES: Error return
4857  */
4858 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4859 					   struct dp_pdev *pdev)
4860 {
4861 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4862 	int entries;
4863 
4864 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
4865 		soc_cfg_ctx = soc->wlan_cfg_ctx;
4866 		entries =
4867 			wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
4868 
4869 		/* Setup second Rx refill buffer ring */
4870 		if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
4871 				  entries, 0)) {
4872 			dp_init_err("%pK: dp_srng_alloc failed second"
4873 				    "rx refill ring", soc);
4874 			return QDF_STATUS_E_FAILURE;
4875 		}
4876 	}
4877 
4878 	return QDF_STATUS_SUCCESS;
4879 }
4880 
4881 /**
4882  * dp_init_ipa_rx_refill_buf_ring - Init second Rx refill buffer ring
4883  * @soc: data path instance
4884  * @pdev: core txrx pdev context
4885  *
4886  * Return: QDF_STATUS_SUCCESS: success
4887  *         QDF_STATUS_E_RESOURCES: Error return
4888  */
4889 static int dp_init_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4890 					  struct dp_pdev *pdev)
4891 {
4892 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
4893 		if (dp_srng_init(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
4894 				 IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id)) {
4895 			dp_init_err("%pK: dp_srng_init failed second"
4896 				    "rx refill ring", soc);
4897 			return QDF_STATUS_E_FAILURE;
4898 		}
4899 	}
4900 	return QDF_STATUS_SUCCESS;
4901 }
4902 
4903 /**
4904  * dp_deinit_ipa_rx_refill_buf_ring - deinit second Rx refill buffer ring
4905  * @soc: data path instance
4906  * @pdev: core txrx pdev context
4907  *
4908  * Return: void
4909  */
4910 static void dp_deinit_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4911 					     struct dp_pdev *pdev)
4912 {
4913 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
4914 		dp_srng_deinit(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 0);
4915 }
4916 
4917 /**
4918  * dp_free_ipa_rx_refill_buf_ring - free second Rx refill buffer ring
4919  * @soc: data path instance
4920  * @pdev: core txrx pdev context
4921  *
4922  * Return: void
4923  */
4924 static void dp_free_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4925 					   struct dp_pdev *pdev)
4926 {
4927 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
4928 		dp_srng_free(soc, &pdev->rx_refill_buf_ring2);
4929 }
4930 #else
4931 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4932 					   struct dp_pdev *pdev)
4933 {
4934 	return QDF_STATUS_SUCCESS;
4935 }
4936 
4937 static int dp_init_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4938 					  struct dp_pdev *pdev)
4939 {
4940 	return QDF_STATUS_SUCCESS;
4941 }
4942 
4943 static void dp_deinit_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4944 					     struct dp_pdev *pdev)
4945 {
4946 }
4947 
4948 static void dp_free_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4949 					   struct dp_pdev *pdev)
4950 {
4951 }
4952 #endif
4953 
4954 #ifdef DP_TX_HW_DESC_HISTORY
4955 /**
4956  * dp_soc_tx_hw_desc_history_attach - Attach TX HW descriptor history
4957  *
4958  * @soc: DP soc handle
4959  *
4960  * Return: None
4961  */
4962 static void dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
4963 {
4964 	soc->tx_hw_desc_history = dp_context_alloc_mem(
4965 			soc, DP_TX_HW_DESC_HIST_TYPE,
4966 			sizeof(*soc->tx_hw_desc_history));
4967 	if (soc->tx_hw_desc_history)
4968 		soc->tx_hw_desc_history->index = 0;
4969 }
4970 
4971 static void dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
4972 {
4973 	dp_context_free_mem(soc, DP_TX_HW_DESC_HIST_TYPE,
4974 			    soc->tx_hw_desc_history);
4975 }
4976 
4977 #else /* DP_TX_HW_DESC_HISTORY */
4978 static inline void
4979 dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
4980 {
4981 }
4982 
4983 static inline void
4984 dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
4985 {
4986 }
4987 #endif /* DP_TX_HW_DESC_HISTORY */
4988 
4989 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
4990 #ifndef RX_DEFRAG_DO_NOT_REINJECT
4991 /**
4992  * dp_soc_rx_reinject_ring_history_attach - Attach the reo reinject ring
4993  *					    history.
4994  * @soc: DP soc handle
4995  *
4996  * Return: None
4997  */
4998 static void dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
4999 {
5000 	soc->rx_reinject_ring_history =
5001 		dp_context_alloc_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
5002 				     sizeof(struct dp_rx_reinject_history));
5003 	if (soc->rx_reinject_ring_history)
5004 		qdf_atomic_init(&soc->rx_reinject_ring_history->index);
5005 }
5006 #else /* RX_DEFRAG_DO_NOT_REINJECT */
5007 static inline void
5008 dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
5009 {
5010 }
5011 #endif /* RX_DEFRAG_DO_NOT_REINJECT */
5012 
5013 /**
5014  * dp_soc_rx_history_attach() - Attach the ring history record buffers
5015  * @soc: DP soc structure
5016  *
5017  * This function allocates the memory for recording the rx ring, rx error
5018  * ring and the reinject ring entries. There is no error returned in case
5019  * of allocation failure since the record function checks if the history is
5020  * initialized or not. We do not want to fail the driver load in case of
5021  * failure to allocate memory for debug history.
5022  *
5023  * Returns: None
5024  */
5025 static void dp_soc_rx_history_attach(struct dp_soc *soc)
5026 {
5027 	int i;
5028 	uint32_t rx_ring_hist_size;
5029 	uint32_t rx_refill_ring_hist_size;
5030 
5031 	rx_ring_hist_size = sizeof(*soc->rx_ring_history[0]);
5032 	rx_refill_ring_hist_size = sizeof(*soc->rx_refill_ring_history[0]);
5033 
5034 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5035 		soc->rx_ring_history[i] = dp_context_alloc_mem(
5036 				soc, DP_RX_RING_HIST_TYPE, rx_ring_hist_size);
5037 		if (soc->rx_ring_history[i])
5038 			qdf_atomic_init(&soc->rx_ring_history[i]->index);
5039 	}
5040 
5041 	soc->rx_err_ring_history = dp_context_alloc_mem(
5042 			soc, DP_RX_ERR_RING_HIST_TYPE, rx_ring_hist_size);
5043 	if (soc->rx_err_ring_history)
5044 		qdf_atomic_init(&soc->rx_err_ring_history->index);
5045 
5046 	dp_soc_rx_reinject_ring_history_attach(soc);
5047 
5048 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5049 		soc->rx_refill_ring_history[i] = dp_context_alloc_mem(
5050 						soc,
5051 						DP_RX_REFILL_RING_HIST_TYPE,
5052 						rx_refill_ring_hist_size);
5053 
5054 		if (soc->rx_refill_ring_history[i])
5055 			qdf_atomic_init(&soc->rx_refill_ring_history[i]->index);
5056 	}
5057 }
5058 
5059 static void dp_soc_rx_history_detach(struct dp_soc *soc)
5060 {
5061 	int i;
5062 
5063 	for (i = 0; i < MAX_REO_DEST_RINGS; i++)
5064 		dp_context_free_mem(soc, DP_RX_RING_HIST_TYPE,
5065 				    soc->rx_ring_history[i]);
5066 
5067 	dp_context_free_mem(soc, DP_RX_ERR_RING_HIST_TYPE,
5068 			    soc->rx_err_ring_history);
5069 
5070 	/*
5071 	 * No need for a featurized detach since qdf_mem_free takes
5072 	 * care of NULL pointer.
5073 	 */
5074 	dp_context_free_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
5075 			    soc->rx_reinject_ring_history);
5076 
5077 	for (i = 0; i < MAX_PDEV_CNT; i++)
5078 		dp_context_free_mem(soc, DP_RX_REFILL_RING_HIST_TYPE,
5079 				    soc->rx_refill_ring_history[i]);
5080 }
5081 
5082 #else
5083 static inline void dp_soc_rx_history_attach(struct dp_soc *soc)
5084 {
5085 }
5086 
5087 static inline void dp_soc_rx_history_detach(struct dp_soc *soc)
5088 {
5089 }
5090 #endif
5091 
5092 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
5093 /**
5094  * dp_soc_tx_history_attach() - Attach the ring history record buffers
5095  * @soc: DP soc structure
5096  *
5097  * This function allocates the memory for recording the tx tcl ring and
5098  * the tx comp ring entries. There is no error returned in case
5099  * of allocation failure since the record function checks if the history is
5100  * initialized or not. We do not want to fail the driver load in case of
5101  * failure to allocate memory for debug history.
5102  *
5103  * Returns: None
5104  */
5105 static void dp_soc_tx_history_attach(struct dp_soc *soc)
5106 {
5107 	uint32_t tx_tcl_hist_size;
5108 	uint32_t tx_comp_hist_size;
5109 
5110 	tx_tcl_hist_size = sizeof(*soc->tx_tcl_history);
5111 	soc->tx_tcl_history = dp_context_alloc_mem(soc, DP_TX_TCL_HIST_TYPE,
5112 						   tx_tcl_hist_size);
5113 	if (soc->tx_tcl_history)
5114 		qdf_atomic_init(&soc->tx_tcl_history->index);
5115 
5116 	tx_comp_hist_size = sizeof(*soc->tx_comp_history);
5117 	soc->tx_comp_history = dp_context_alloc_mem(soc, DP_TX_COMP_HIST_TYPE,
5118 						    tx_comp_hist_size);
5119 	if (soc->tx_comp_history)
5120 		qdf_atomic_init(&soc->tx_comp_history->index);
5121 }
5122 
5123 /**
5124  * dp_soc_tx_history_detach() - Detach the ring history record buffers
5125  * @soc: DP soc structure
5126  *
5127  * This function frees the memory for recording the tx tcl ring and
5128  * the tx comp ring entries.
5129  *
5130  * Returns: None
5131  */
5132 static void dp_soc_tx_history_detach(struct dp_soc *soc)
5133 {
5134 	dp_context_free_mem(soc, DP_TX_TCL_HIST_TYPE, soc->tx_tcl_history);
5135 	dp_context_free_mem(soc, DP_TX_COMP_HIST_TYPE, soc->tx_comp_history);
5136 }
5137 
5138 #else
5139 static inline void dp_soc_tx_history_attach(struct dp_soc *soc)
5140 {
5141 }
5142 
5143 static inline void dp_soc_tx_history_detach(struct dp_soc *soc)
5144 {
5145 }
5146 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
5147 
5148 /*
5149 * dp_pdev_attach_wifi3() - attach txrx pdev
5150 * @txrx_soc: Datapath SOC handle
5151 * @params: Params for PDEV attach
5152 *
5153 * Return: QDF_STATUS
5154 */
5155 static inline
5156 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
5157 				struct cdp_pdev_attach_params *params)
5158 {
5159 	qdf_size_t pdev_context_size;
5160 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5161 	struct dp_pdev *pdev = NULL;
5162 	uint8_t pdev_id = params->pdev_id;
5163 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5164 	int nss_cfg;
5165 
5166 	pdev_context_size =
5167 		soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_PDEV);
5168 	if (pdev_context_size)
5169 		pdev = dp_context_alloc_mem(soc, DP_PDEV_TYPE, pdev_context_size);
5170 
5171 	if (!pdev) {
5172 		dp_init_err("%pK: DP PDEV memory allocation failed",
5173 			    soc);
5174 		goto fail0;
5175 	}
5176 	wlan_minidump_log(pdev, sizeof(*pdev), soc->ctrl_psoc,
5177 			  WLAN_MD_DP_PDEV, "dp_pdev");
5178 
5179 	soc_cfg_ctx = soc->wlan_cfg_ctx;
5180 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
5181 
5182 	if (!pdev->wlan_cfg_ctx) {
5183 		dp_init_err("%pK: pdev cfg_attach failed", soc);
5184 		goto fail1;
5185 	}
5186 
5187 	/*
5188 	 * set nss pdev config based on soc config
5189 	 */
5190 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
5191 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
5192 					 (nss_cfg & (1 << pdev_id)));
5193 
5194 	pdev->soc = soc;
5195 	pdev->pdev_id = pdev_id;
5196 	soc->pdev_list[pdev_id] = pdev;
5197 
5198 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
5199 	soc->pdev_count++;
5200 
5201 	/* Allocate memory for pdev srng rings */
5202 	if (dp_pdev_srng_alloc(pdev)) {
5203 		dp_init_err("%pK: dp_pdev_srng_alloc failed", soc);
5204 		goto fail2;
5205 	}
5206 
5207 	/* Setup second Rx refill buffer ring */
5208 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev)) {
5209 		dp_init_err("%pK: dp_srng_alloc failed rxrefill2 ring",
5210 			    soc);
5211 		goto fail3;
5212 	}
5213 
5214 	/* Allocate memory for pdev rxdma rings */
5215 	if (dp_rxdma_ring_alloc(soc, pdev)) {
5216 		dp_init_err("%pK: dp_rxdma_ring_alloc failed", soc);
5217 		goto fail4;
5218 	}
5219 
5220 	/* Rx specific init */
5221 	if (dp_rx_pdev_desc_pool_alloc(pdev)) {
5222 		dp_init_err("%pK: dp_rx_pdev_attach failed", soc);
5223 		goto fail4;
5224 	}
5225 
5226 	if (dp_monitor_pdev_attach(pdev)) {
5227 		dp_init_err("%pK: dp_monitor_pdev_attach failed", soc);
5228 		goto fail5;
5229 	}
5230 
5231 	soc->arch_ops.txrx_pdev_attach(pdev, params);
5232 
5233 	return QDF_STATUS_SUCCESS;
5234 fail5:
5235 	dp_rx_pdev_desc_pool_free(pdev);
5236 fail4:
5237 	dp_rxdma_ring_free(pdev);
5238 	dp_free_ipa_rx_refill_buf_ring(soc, pdev);
5239 fail3:
5240 	dp_pdev_srng_free(pdev);
5241 fail2:
5242 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
5243 fail1:
5244 	soc->pdev_list[pdev_id] = NULL;
5245 	qdf_mem_free(pdev);
5246 fail0:
5247 	return QDF_STATUS_E_FAILURE;
5248 }
5249 
5250 /**
5251  * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev
5252  * @pdev: Datapath PDEV handle
5253  *
5254  * This is the last chance to flush all pending dp vdevs/peers,
5255  * some peer/vdev leak case like Non-SSR + peer unmap missing
5256  * will be covered here.
5257  *
5258  * Return: None
5259  */
5260 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
5261 {
5262 	struct dp_soc *soc = pdev->soc;
5263 	struct dp_vdev *vdev_arr[MAX_VDEV_CNT] = {0};
5264 	uint32_t i = 0;
5265 	uint32_t num_vdevs = 0;
5266 	struct dp_vdev *vdev = NULL;
5267 
5268 	if (TAILQ_EMPTY(&soc->inactive_vdev_list))
5269 		return;
5270 
5271 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
5272 	TAILQ_FOREACH(vdev, &soc->inactive_vdev_list,
5273 		      inactive_list_elem) {
5274 		if (vdev->pdev != pdev)
5275 			continue;
5276 
5277 		vdev_arr[num_vdevs] = vdev;
5278 		num_vdevs++;
5279 		/* take reference to free */
5280 		dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CDP);
5281 	}
5282 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
5283 
5284 	for (i = 0; i < num_vdevs; i++) {
5285 		dp_vdev_flush_peers((struct cdp_vdev *)vdev_arr[i], 0);
5286 		dp_vdev_unref_delete(soc, vdev_arr[i], DP_MOD_ID_CDP);
5287 	}
5288 }
5289 
5290 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
5291 /**
5292  * dp_vdev_stats_hw_offload_target_config() - Send HTT command to FW
5293  *                                          for enable/disable of HW vdev stats
5294  * @soc: Datapath soc handle
5295  * @pdev_id: INVALID_PDEV_ID for all pdevs or 0,1,2 for individual pdev
5296  * @enable: flag to reprsent enable/disable of hw vdev stats
5297  *
5298  * Return: none
5299  */
5300 static void dp_vdev_stats_hw_offload_target_config(struct dp_soc *soc,
5301 						   uint8_t pdev_id,
5302 						   bool enable)
5303 {
5304 	/* Check SOC level config for HW offload vdev stats support */
5305 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
5306 		dp_debug("%pK: HW vdev offload stats is disabled", soc);
5307 		return;
5308 	}
5309 
5310 	/* Send HTT command to FW for enable of stats */
5311 	dp_h2t_hw_vdev_stats_config_send(soc, pdev_id, enable, false, 0);
5312 }
5313 
5314 /**
5315  * dp_vdev_stats_hw_offload_target_clear() - Clear HW vdev stats on target
5316  * @soc: Datapath soc handle
5317  * @pdev_id: pdev_id (0,1,2)
5318  * @bitmask: bitmask with vdev_id(s) for which stats are to be cleared on HW
5319  *
5320  * Return: none
5321  */
5322 static
5323 void dp_vdev_stats_hw_offload_target_clear(struct dp_soc *soc, uint8_t pdev_id,
5324 					   uint64_t vdev_id_bitmask)
5325 {
5326 	/* Check SOC level config for HW offload vdev stats support */
5327 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
5328 		dp_debug("%pK: HW vdev offload stats is disabled", soc);
5329 		return;
5330 	}
5331 
5332 	/* Send HTT command to FW for reset of stats */
5333 	dp_h2t_hw_vdev_stats_config_send(soc, pdev_id, true, true,
5334 					 vdev_id_bitmask);
5335 }
5336 #else
5337 static void
5338 dp_vdev_stats_hw_offload_target_config(struct dp_soc *soc, uint8_t pdev_id,
5339 				       bool enable)
5340 {
5341 }
5342 
5343 static
5344 void dp_vdev_stats_hw_offload_target_clear(struct dp_soc *soc, uint8_t pdev_id,
5345 					   uint64_t vdev_id_bitmask)
5346 {
5347 }
5348 #endif /*QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT */
5349 
5350 /**
5351  * dp_pdev_deinit() - Deinit txrx pdev
5352  * @txrx_pdev: Datapath PDEV handle
5353  * @force: Force deinit
5354  *
5355  * Return: None
5356  */
5357 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
5358 {
5359 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
5360 	qdf_nbuf_t curr_nbuf, next_nbuf;
5361 
5362 	if (pdev->pdev_deinit)
5363 		return;
5364 
5365 	dp_tx_me_exit(pdev);
5366 	dp_rx_fst_detach(pdev->soc, pdev);
5367 	dp_rx_pdev_buffers_free(pdev);
5368 	dp_rx_pdev_desc_pool_deinit(pdev);
5369 	dp_pdev_bkp_stats_detach(pdev);
5370 	qdf_event_destroy(&pdev->fw_peer_stats_event);
5371 	if (pdev->sojourn_buf)
5372 		qdf_nbuf_free(pdev->sojourn_buf);
5373 
5374 	dp_pdev_flush_pending_vdevs(pdev);
5375 	dp_tx_desc_flush(pdev, NULL, true);
5376 
5377 	qdf_spinlock_destroy(&pdev->tx_mutex);
5378 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
5379 
5380 	dp_monitor_pdev_deinit(pdev);
5381 
5382 	dp_pdev_srng_deinit(pdev);
5383 
5384 	dp_ipa_uc_detach(pdev->soc, pdev);
5385 	dp_deinit_ipa_rx_refill_buf_ring(pdev->soc, pdev);
5386 	dp_rxdma_ring_cleanup(pdev->soc, pdev);
5387 
5388 	curr_nbuf = pdev->invalid_peer_head_msdu;
5389 	while (curr_nbuf) {
5390 		next_nbuf = qdf_nbuf_next(curr_nbuf);
5391 		dp_rx_nbuf_free(curr_nbuf);
5392 		curr_nbuf = next_nbuf;
5393 	}
5394 	pdev->invalid_peer_head_msdu = NULL;
5395 	pdev->invalid_peer_tail_msdu = NULL;
5396 
5397 	dp_wdi_event_detach(pdev);
5398 	pdev->pdev_deinit = 1;
5399 }
5400 
5401 /**
5402  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
5403  * @psoc: Datapath psoc handle
5404  * @pdev_id: Id of datapath PDEV handle
5405  * @force: Force deinit
5406  *
5407  * Return: QDF_STATUS
5408  */
5409 static QDF_STATUS
5410 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
5411 		     int force)
5412 {
5413 	struct dp_pdev *txrx_pdev;
5414 
5415 	txrx_pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
5416 						       pdev_id);
5417 
5418 	if (!txrx_pdev)
5419 		return QDF_STATUS_E_FAILURE;
5420 
5421 	dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force);
5422 
5423 	return QDF_STATUS_SUCCESS;
5424 }
5425 
5426 /*
5427  * dp_pdev_post_attach() - Do post pdev attach after dev_alloc_name
5428  * @txrx_pdev: Datapath PDEV handle
5429  *
5430  * Return: None
5431  */
5432 static void dp_pdev_post_attach(struct cdp_pdev *txrx_pdev)
5433 {
5434 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
5435 
5436 	dp_monitor_tx_capture_debugfs_init(pdev);
5437 
5438 	if (dp_pdev_htt_stats_dbgfs_init(pdev)) {
5439 		dp_init_err("%pK: Failed to initialize pdev HTT stats debugfs", pdev->soc);
5440 	}
5441 }
5442 
5443 /*
5444  * dp_pdev_post_attach_wifi3() - attach txrx pdev post
5445  * @psoc: Datapath soc handle
5446  * @pdev_id: pdev id of pdev
5447  *
5448  * Return: QDF_STATUS
5449  */
5450 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *soc,
5451 				     uint8_t pdev_id)
5452 {
5453 	struct dp_pdev *pdev;
5454 
5455 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
5456 						  pdev_id);
5457 
5458 	if (!pdev) {
5459 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
5460 			    (struct dp_soc *)soc, pdev_id);
5461 		return QDF_STATUS_E_FAILURE;
5462 	}
5463 
5464 	dp_pdev_post_attach((struct cdp_pdev *)pdev);
5465 	return QDF_STATUS_SUCCESS;
5466 }
5467 
5468 /*
5469  * dp_pdev_detach() - Complete rest of pdev detach
5470  * @txrx_pdev: Datapath PDEV handle
5471  * @force: Force deinit
5472  *
5473  * Return: None
5474  */
5475 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
5476 {
5477 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
5478 	struct dp_soc *soc = pdev->soc;
5479 
5480 	dp_pdev_htt_stats_dbgfs_deinit(pdev);
5481 	dp_rx_pdev_desc_pool_free(pdev);
5482 	dp_monitor_pdev_detach(pdev);
5483 	dp_rxdma_ring_free(pdev);
5484 	dp_free_ipa_rx_refill_buf_ring(soc, pdev);
5485 	dp_pdev_srng_free(pdev);
5486 
5487 	soc->pdev_count--;
5488 	soc->pdev_list[pdev->pdev_id] = NULL;
5489 
5490 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
5491 	wlan_minidump_remove(pdev, sizeof(*pdev), soc->ctrl_psoc,
5492 			     WLAN_MD_DP_PDEV, "dp_pdev");
5493 	dp_context_free_mem(soc, DP_PDEV_TYPE, pdev);
5494 }
5495 
5496 /*
5497  * dp_pdev_detach_wifi3() - detach txrx pdev
5498  * @psoc: Datapath soc handle
5499  * @pdev_id: pdev id of pdev
5500  * @force: Force detach
5501  *
5502  * Return: QDF_STATUS
5503  */
5504 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
5505 				       int force)
5506 {
5507 	struct dp_pdev *pdev;
5508 
5509 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
5510 						  pdev_id);
5511 
5512 	if (!pdev) {
5513 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
5514 			    (struct dp_soc *)psoc, pdev_id);
5515 		return QDF_STATUS_E_FAILURE;
5516 	}
5517 
5518 	dp_pdev_detach((struct cdp_pdev *)pdev, force);
5519 	return QDF_STATUS_SUCCESS;
5520 }
5521 
5522 /*
5523  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
5524  * @soc: DP SOC handle
5525  */
5526 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
5527 {
5528 	struct reo_desc_list_node *desc;
5529 	struct dp_rx_tid *rx_tid;
5530 
5531 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
5532 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
5533 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
5534 		rx_tid = &desc->rx_tid;
5535 		qdf_mem_unmap_nbytes_single(soc->osdev,
5536 			rx_tid->hw_qdesc_paddr,
5537 			QDF_DMA_BIDIRECTIONAL,
5538 			rx_tid->hw_qdesc_alloc_size);
5539 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
5540 		qdf_mem_free(desc);
5541 	}
5542 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
5543 	qdf_list_destroy(&soc->reo_desc_freelist);
5544 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
5545 }
5546 
5547 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
5548 /*
5549  * dp_reo_desc_deferred_freelist_create() - Initialize the resources used
5550  *                                          for deferred reo desc list
5551  * @psoc: Datapath soc handle
5552  *
5553  * Return: void
5554  */
5555 static void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
5556 {
5557 	qdf_spinlock_create(&soc->reo_desc_deferred_freelist_lock);
5558 	qdf_list_create(&soc->reo_desc_deferred_freelist,
5559 			REO_DESC_DEFERRED_FREELIST_SIZE);
5560 	soc->reo_desc_deferred_freelist_init = true;
5561 }
5562 
5563 /*
5564  * dp_reo_desc_deferred_freelist_destroy() - loop the deferred free list &
5565  *                                           free the leftover REO QDESCs
5566  * @psoc: Datapath soc handle
5567  *
5568  * Return: void
5569  */
5570 static void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
5571 {
5572 	struct reo_desc_deferred_freelist_node *desc;
5573 
5574 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
5575 	soc->reo_desc_deferred_freelist_init = false;
5576 	while (qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
5577 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
5578 		qdf_mem_unmap_nbytes_single(soc->osdev,
5579 					    desc->hw_qdesc_paddr,
5580 					    QDF_DMA_BIDIRECTIONAL,
5581 					    desc->hw_qdesc_alloc_size);
5582 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
5583 		qdf_mem_free(desc);
5584 	}
5585 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
5586 
5587 	qdf_list_destroy(&soc->reo_desc_deferred_freelist);
5588 	qdf_spinlock_destroy(&soc->reo_desc_deferred_freelist_lock);
5589 }
5590 #else
5591 static inline void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
5592 {
5593 }
5594 
5595 static inline void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
5596 {
5597 }
5598 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
5599 
5600 /*
5601  * dp_soc_reset_txrx_ring_map() - reset tx ring map
5602  * @soc: DP SOC handle
5603  *
5604  */
5605 static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc)
5606 {
5607 	uint32_t i;
5608 
5609 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++)
5610 		soc->tx_ring_map[i] = 0;
5611 }
5612 
5613 /*
5614  * dp_soc_print_inactive_objects() - prints inactive peer and vdev list
5615  * @soc: DP SOC handle
5616  *
5617  */
5618 static void dp_soc_print_inactive_objects(struct dp_soc *soc)
5619 {
5620 	struct dp_peer *peer = NULL;
5621 	struct dp_peer *tmp_peer = NULL;
5622 	struct dp_vdev *vdev = NULL;
5623 	struct dp_vdev *tmp_vdev = NULL;
5624 	int i = 0;
5625 	uint32_t count;
5626 
5627 	if (TAILQ_EMPTY(&soc->inactive_peer_list) &&
5628 	    TAILQ_EMPTY(&soc->inactive_vdev_list))
5629 		return;
5630 
5631 	TAILQ_FOREACH_SAFE(peer, &soc->inactive_peer_list,
5632 			   inactive_list_elem, tmp_peer) {
5633 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
5634 			count = qdf_atomic_read(&peer->mod_refs[i]);
5635 			if (count)
5636 				DP_PRINT_STATS("peer %pK Module id %u ==> %u",
5637 					       peer, i, count);
5638 		}
5639 	}
5640 
5641 	TAILQ_FOREACH_SAFE(vdev, &soc->inactive_vdev_list,
5642 			   inactive_list_elem, tmp_vdev) {
5643 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
5644 			count = qdf_atomic_read(&vdev->mod_refs[i]);
5645 			if (count)
5646 				DP_PRINT_STATS("vdev %pK Module id %u ==> %u",
5647 					       vdev, i, count);
5648 		}
5649 	}
5650 	QDF_BUG(0);
5651 }
5652 
5653 /**
5654  * dp_soc_deinit() - Deinitialize txrx SOC
5655  * @txrx_soc: Opaque DP SOC handle
5656  *
5657  * Return: None
5658  */
5659 static void dp_soc_deinit(void *txrx_soc)
5660 {
5661 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5662 	struct htt_soc *htt_soc = soc->htt_handle;
5663 	struct dp_mon_ops *mon_ops;
5664 
5665 	qdf_atomic_set(&soc->cmn_init_done, 0);
5666 
5667 	soc->arch_ops.txrx_soc_deinit(soc);
5668 
5669 	mon_ops = dp_mon_ops_get(soc);
5670 	if (mon_ops && mon_ops->mon_soc_deinit)
5671 		mon_ops->mon_soc_deinit(soc);
5672 
5673 	/* free peer tables & AST tables allocated during peer_map_attach */
5674 	if (soc->peer_map_attach_success) {
5675 		dp_peer_find_detach(soc);
5676 		soc->arch_ops.txrx_peer_map_detach(soc);
5677 		soc->peer_map_attach_success = FALSE;
5678 	}
5679 
5680 	qdf_flush_work(&soc->htt_stats.work);
5681 	qdf_disable_work(&soc->htt_stats.work);
5682 
5683 	qdf_spinlock_destroy(&soc->htt_stats.lock);
5684 
5685 	dp_soc_reset_txrx_ring_map(soc);
5686 
5687 	dp_reo_desc_freelist_destroy(soc);
5688 	dp_reo_desc_deferred_freelist_destroy(soc);
5689 
5690 	DEINIT_RX_HW_STATS_LOCK(soc);
5691 
5692 	qdf_spinlock_destroy(&soc->ast_lock);
5693 
5694 	dp_peer_mec_spinlock_destroy(soc);
5695 
5696 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
5697 
5698 	qdf_nbuf_queue_free(&soc->invalid_buf_queue);
5699 
5700 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
5701 
5702 	qdf_spinlock_destroy(&soc->vdev_map_lock);
5703 
5704 	dp_reo_cmdlist_destroy(soc);
5705 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
5706 
5707 	dp_soc_tx_desc_sw_pools_deinit(soc);
5708 
5709 	dp_soc_srng_deinit(soc);
5710 
5711 	dp_hw_link_desc_ring_deinit(soc);
5712 
5713 	dp_soc_print_inactive_objects(soc);
5714 	qdf_spinlock_destroy(&soc->inactive_peer_list_lock);
5715 	qdf_spinlock_destroy(&soc->inactive_vdev_list_lock);
5716 
5717 	htt_soc_htc_dealloc(soc->htt_handle);
5718 
5719 	htt_soc_detach(htt_soc);
5720 
5721 	/* Free wbm sg list and reset flags in down path */
5722 	dp_rx_wbm_sg_list_deinit(soc);
5723 
5724 	wlan_minidump_remove(soc, sizeof(*soc), soc->ctrl_psoc,
5725 			     WLAN_MD_DP_SOC, "dp_soc");
5726 }
5727 
5728 /**
5729  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
5730  * @txrx_soc: Opaque DP SOC handle
5731  *
5732  * Return: None
5733  */
5734 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc)
5735 {
5736 	dp_soc_deinit(txrx_soc);
5737 }
5738 
5739 /*
5740  * dp_soc_detach() - Detach rest of txrx SOC
5741  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
5742  *
5743  * Return: None
5744  */
5745 static void dp_soc_detach(struct cdp_soc_t *txrx_soc)
5746 {
5747 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5748 
5749 	soc->arch_ops.txrx_soc_detach(soc);
5750 
5751 	dp_sysfs_deinitialize_stats(soc);
5752 	dp_soc_swlm_detach(soc);
5753 	dp_soc_tx_desc_sw_pools_free(soc);
5754 	dp_soc_srng_free(soc);
5755 	dp_hw_link_desc_ring_free(soc);
5756 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
5757 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
5758 	dp_soc_tx_hw_desc_history_detach(soc);
5759 	dp_soc_tx_history_detach(soc);
5760 	dp_soc_rx_history_detach(soc);
5761 
5762 	if (!dp_monitor_modularized_enable()) {
5763 		dp_mon_soc_detach_wrapper(soc);
5764 	}
5765 
5766 	qdf_mem_free(soc->cdp_soc.ops);
5767 	qdf_mem_free(soc);
5768 }
5769 
5770 /*
5771  * dp_soc_detach_wifi3() - Detach txrx SOC
5772  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
5773  *
5774  * Return: None
5775  */
5776 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc)
5777 {
5778 	dp_soc_detach(txrx_soc);
5779 }
5780 
5781 /*
5782  * dp_rxdma_ring_config() - configure the RX DMA rings
5783  *
5784  * This function is used to configure the MAC rings.
5785  * On MCL host provides buffers in Host2FW ring
5786  * FW refills (copies) buffers to the ring and updates
5787  * ring_idx in register
5788  *
5789  * @soc: data path SoC handle
5790  *
5791  * Return: zero on success, non-zero on failure
5792  */
5793 #ifdef QCA_HOST2FW_RXBUF_RING
5794 static inline void
5795 dp_htt_setup_rxdma_err_dst_ring(struct dp_soc *soc, int mac_id,
5796 				int lmac_id)
5797 {
5798 	if (soc->rxdma_err_dst_ring[lmac_id].hal_srng)
5799 		htt_srng_setup(soc->htt_handle, mac_id,
5800 			       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
5801 			       RXDMA_DST);
5802 }
5803 
5804 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
5805 {
5806 	int i;
5807 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5808 
5809 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5810 		struct dp_pdev *pdev = soc->pdev_list[i];
5811 
5812 		if (pdev) {
5813 			int mac_id;
5814 			bool dbs_enable = 0;
5815 			int max_mac_rings =
5816 				 wlan_cfg_get_num_mac_rings
5817 				(pdev->wlan_cfg_ctx);
5818 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
5819 
5820 			htt_srng_setup(soc->htt_handle, i,
5821 				       soc->rx_refill_buf_ring[lmac_id]
5822 				       .hal_srng,
5823 				       RXDMA_BUF);
5824 
5825 			if (pdev->rx_refill_buf_ring2.hal_srng)
5826 				htt_srng_setup(soc->htt_handle, i,
5827 					       pdev->rx_refill_buf_ring2
5828 					       .hal_srng,
5829 					       RXDMA_BUF);
5830 
5831 			if (soc->cdp_soc.ol_ops->
5832 				is_hw_dbs_2x2_capable) {
5833 				dbs_enable = soc->cdp_soc.ol_ops->
5834 					is_hw_dbs_2x2_capable(
5835 							(void *)soc->ctrl_psoc);
5836 			}
5837 
5838 			if (dbs_enable) {
5839 				QDF_TRACE(QDF_MODULE_ID_TXRX,
5840 				QDF_TRACE_LEVEL_ERROR,
5841 				FL("DBS enabled max_mac_rings %d"),
5842 					 max_mac_rings);
5843 			} else {
5844 				max_mac_rings = 1;
5845 				QDF_TRACE(QDF_MODULE_ID_TXRX,
5846 					 QDF_TRACE_LEVEL_ERROR,
5847 					 FL("DBS disabled, max_mac_rings %d"),
5848 					 max_mac_rings);
5849 			}
5850 
5851 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5852 					 FL("pdev_id %d max_mac_rings %d"),
5853 					 pdev->pdev_id, max_mac_rings);
5854 
5855 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
5856 				int mac_for_pdev =
5857 					dp_get_mac_id_for_pdev(mac_id,
5858 							       pdev->pdev_id);
5859 				/*
5860 				 * Obtain lmac id from pdev to access the LMAC
5861 				 * ring in soc context
5862 				 */
5863 				lmac_id =
5864 				dp_get_lmac_id_for_pdev_id(soc,
5865 							   mac_id,
5866 							   pdev->pdev_id);
5867 				QDF_TRACE(QDF_MODULE_ID_TXRX,
5868 					 QDF_TRACE_LEVEL_ERROR,
5869 					 FL("mac_id %d"), mac_for_pdev);
5870 
5871 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
5872 					 pdev->rx_mac_buf_ring[mac_id]
5873 						.hal_srng,
5874 					 RXDMA_BUF);
5875 
5876 				if (!soc->rxdma2sw_rings_not_supported)
5877 					dp_htt_setup_rxdma_err_dst_ring(soc,
5878 						mac_for_pdev, lmac_id);
5879 
5880 				/* Configure monitor mode rings */
5881 				status = dp_monitor_htt_srng_setup(soc, pdev,
5882 								   lmac_id,
5883 								   mac_for_pdev);
5884 				if (status != QDF_STATUS_SUCCESS) {
5885 					dp_err("Failed to send htt monitor messages to target");
5886 					return status;
5887 				}
5888 
5889 			}
5890 		}
5891 	}
5892 
5893 	dp_reap_timer_init(soc);
5894 	return status;
5895 }
5896 #else
5897 /* This is only for WIN */
5898 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
5899 {
5900 	int i;
5901 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5902 	int mac_for_pdev;
5903 	int lmac_id;
5904 
5905 	/* Configure monitor mode rings */
5906 	dp_monitor_soc_htt_srng_setup(soc);
5907 
5908 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5909 		struct dp_pdev *pdev =  soc->pdev_list[i];
5910 
5911 		if (!pdev)
5912 			continue;
5913 
5914 		mac_for_pdev = i;
5915 		lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
5916 
5917 		if (soc->rx_refill_buf_ring[lmac_id].hal_srng)
5918 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
5919 				       soc->rx_refill_buf_ring[lmac_id].
5920 				       hal_srng, RXDMA_BUF);
5921 
5922 		/* Configure monitor mode rings */
5923 		dp_monitor_htt_srng_setup(soc, pdev,
5924 					  lmac_id,
5925 					  mac_for_pdev);
5926 		if (!soc->rxdma2sw_rings_not_supported)
5927 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
5928 				       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
5929 				       RXDMA_DST);
5930 	}
5931 
5932 	dp_reap_timer_init(soc);
5933 	return status;
5934 }
5935 #endif
5936 
5937 /*
5938  * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine
5939  *
5940  * This function is used to configure the FSE HW block in RX OLE on a
5941  * per pdev basis. Here, we will be programming parameters related to
5942  * the Flow Search Table.
5943  *
5944  * @soc: data path SoC handle
5945  *
5946  * Return: zero on success, non-zero on failure
5947  */
5948 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
5949 static QDF_STATUS
5950 dp_rx_target_fst_config(struct dp_soc *soc)
5951 {
5952 	int i;
5953 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5954 
5955 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5956 		struct dp_pdev *pdev = soc->pdev_list[i];
5957 
5958 		/* Flow search is not enabled if NSS offload is enabled */
5959 		if (pdev &&
5960 		    !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
5961 			status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev);
5962 			if (status != QDF_STATUS_SUCCESS)
5963 				break;
5964 		}
5965 	}
5966 	return status;
5967 }
5968 #elif defined(WLAN_SUPPORT_RX_FISA)
5969 /**
5970  * dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW
5971  * @soc: SoC handle
5972  *
5973  * Return: Success
5974  */
5975 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
5976 {
5977 	/* Check if it is enabled in the INI */
5978 	if (!soc->fisa_enable) {
5979 		dp_err("RX FISA feature is disabled");
5980 		return QDF_STATUS_E_NOSUPPORT;
5981 	}
5982 
5983 	return dp_rx_flow_send_fst_fw_setup(soc, soc->pdev_list[0]);
5984 }
5985 
5986 #define FISA_MAX_TIMEOUT 0xffffffff
5987 #define FISA_DISABLE_TIMEOUT 0
5988 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
5989 {
5990 	struct dp_htt_rx_fisa_cfg fisa_config;
5991 
5992 	fisa_config.pdev_id = 0;
5993 	fisa_config.fisa_timeout = FISA_MAX_TIMEOUT;
5994 
5995 	return dp_htt_rx_fisa_config(soc->pdev_list[0], &fisa_config);
5996 }
5997 #else /* !WLAN_SUPPORT_RX_FISA */
5998 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
5999 {
6000 	return QDF_STATUS_SUCCESS;
6001 }
6002 #endif /* !WLAN_SUPPORT_RX_FISA */
6003 
6004 #ifndef WLAN_SUPPORT_RX_FISA
6005 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
6006 {
6007 	return QDF_STATUS_SUCCESS;
6008 }
6009 
6010 static QDF_STATUS dp_rx_dump_fisa_stats(struct dp_soc *soc)
6011 {
6012 	return QDF_STATUS_SUCCESS;
6013 }
6014 
6015 static void dp_rx_dump_fisa_table(struct dp_soc *soc)
6016 {
6017 }
6018 
6019 static void dp_suspend_fse_cache_flush(struct dp_soc *soc)
6020 {
6021 }
6022 
6023 static void dp_resume_fse_cache_flush(struct dp_soc *soc)
6024 {
6025 }
6026 #endif /* !WLAN_SUPPORT_RX_FISA */
6027 
6028 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR
6029 static inline QDF_STATUS dp_print_swlm_stats(struct dp_soc *soc)
6030 {
6031 	return QDF_STATUS_SUCCESS;
6032 }
6033 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */
6034 
6035 /*
6036  * dp_soc_attach_target_wifi3() - SOC initialization in the target
6037  * @cdp_soc: Opaque Datapath SOC handle
6038  *
6039  * Return: zero on success, non-zero on failure
6040  */
6041 static QDF_STATUS
6042 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
6043 {
6044 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
6045 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6046 
6047 	htt_soc_attach_target(soc->htt_handle);
6048 
6049 	status = dp_rxdma_ring_config(soc);
6050 	if (status != QDF_STATUS_SUCCESS) {
6051 		dp_err("Failed to send htt srng setup messages to target");
6052 		return status;
6053 	}
6054 
6055 	status = soc->arch_ops.dp_rxdma_ring_sel_cfg(soc);
6056 	if (status != QDF_STATUS_SUCCESS) {
6057 		dp_err("Failed to send htt ring config message to target");
6058 		return status;
6059 	}
6060 
6061 	status = dp_rx_target_fst_config(soc);
6062 	if (status != QDF_STATUS_SUCCESS &&
6063 	    status != QDF_STATUS_E_NOSUPPORT) {
6064 		dp_err("Failed to send htt fst setup config message to target");
6065 		return status;
6066 	}
6067 
6068 	if (status == QDF_STATUS_SUCCESS) {
6069 		status = dp_rx_fisa_config(soc);
6070 		if (status != QDF_STATUS_SUCCESS) {
6071 			dp_err("Failed to send htt FISA config message to target");
6072 			return status;
6073 		}
6074 	}
6075 
6076 	DP_STATS_INIT(soc);
6077 
6078 	dp_runtime_init(soc);
6079 
6080 	/* Enable HW vdev offload stats if feature is supported */
6081 	dp_vdev_stats_hw_offload_target_config(soc, INVALID_PDEV_ID, true);
6082 
6083 	/* initialize work queue for stats processing */
6084 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
6085 
6086 	return QDF_STATUS_SUCCESS;
6087 }
6088 
6089 /*
6090  * dp_vdev_id_map_tbl_add() - Add vdev into vdev_id table
6091  * @soc: SoC handle
6092  * @vdev: vdev handle
6093  * @vdev_id: vdev_id
6094  *
6095  * Return: None
6096  */
6097 static void dp_vdev_id_map_tbl_add(struct dp_soc *soc,
6098 				   struct dp_vdev *vdev,
6099 				   uint8_t vdev_id)
6100 {
6101 	QDF_ASSERT(vdev_id <= MAX_VDEV_CNT);
6102 
6103 	qdf_spin_lock_bh(&soc->vdev_map_lock);
6104 
6105 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
6106 			QDF_STATUS_SUCCESS) {
6107 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK vdev_id %u",
6108 			     soc, vdev, vdev_id);
6109 		qdf_spin_unlock_bh(&soc->vdev_map_lock);
6110 		return;
6111 	}
6112 
6113 	if (!soc->vdev_id_map[vdev_id])
6114 		soc->vdev_id_map[vdev_id] = vdev;
6115 	else
6116 		QDF_ASSERT(0);
6117 
6118 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
6119 }
6120 
6121 /*
6122  * dp_vdev_id_map_tbl_remove() - remove vdev from vdev_id table
6123  * @soc: SoC handle
6124  * @vdev: vdev handle
6125  *
6126  * Return: None
6127  */
6128 static void dp_vdev_id_map_tbl_remove(struct dp_soc *soc,
6129 				      struct dp_vdev *vdev)
6130 {
6131 	qdf_spin_lock_bh(&soc->vdev_map_lock);
6132 	QDF_ASSERT(soc->vdev_id_map[vdev->vdev_id] == vdev);
6133 
6134 	soc->vdev_id_map[vdev->vdev_id] = NULL;
6135 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
6136 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
6137 }
6138 
6139 /*
6140  * dp_vdev_pdev_list_add() - add vdev into pdev's list
6141  * @soc: soc handle
6142  * @pdev: pdev handle
6143  * @vdev: vdev handle
6144  *
6145  * return: none
6146  */
6147 static void dp_vdev_pdev_list_add(struct dp_soc *soc,
6148 				  struct dp_pdev *pdev,
6149 				  struct dp_vdev *vdev)
6150 {
6151 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
6152 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
6153 			QDF_STATUS_SUCCESS) {
6154 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK",
6155 			     soc, vdev);
6156 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6157 		return;
6158 	}
6159 	/* add this vdev into the pdev's list */
6160 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
6161 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6162 }
6163 
6164 /*
6165  * dp_vdev_pdev_list_remove() - remove vdev from pdev's list
6166  * @soc: SoC handle
6167  * @pdev: pdev handle
6168  * @vdev: VDEV handle
6169  *
6170  * Return: none
6171  */
6172 static void dp_vdev_pdev_list_remove(struct dp_soc *soc,
6173 				     struct dp_pdev *pdev,
6174 				     struct dp_vdev *vdev)
6175 {
6176 	uint8_t found = 0;
6177 	struct dp_vdev *tmpvdev = NULL;
6178 
6179 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
6180 	TAILQ_FOREACH(tmpvdev, &pdev->vdev_list, vdev_list_elem) {
6181 		if (tmpvdev == vdev) {
6182 			found = 1;
6183 			break;
6184 		}
6185 	}
6186 
6187 	if (found) {
6188 		TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
6189 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
6190 	} else {
6191 		dp_vdev_debug("%pK: vdev:%pK not found in pdev:%pK vdevlist:%pK",
6192 			      soc, vdev, pdev, &pdev->vdev_list);
6193 		QDF_ASSERT(0);
6194 	}
6195 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6196 }
6197 
6198 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
6199 /*
6200  * dp_vdev_init_rx_eapol() - initializing osif_rx_eapol
6201  * @vdev: Datapath VDEV handle
6202  *
6203  * Return: None
6204  */
6205 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev)
6206 {
6207 	vdev->osif_rx_eapol = NULL;
6208 }
6209 
6210 /*
6211  * dp_vdev_register_rx_eapol() - Register VDEV operations for rx_eapol
6212  * @vdev: DP vdev handle
6213  * @txrx_ops: Tx and Rx operations
6214  *
6215  * Return: None
6216  */
6217 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev,
6218 					     struct ol_txrx_ops *txrx_ops)
6219 {
6220 	vdev->osif_rx_eapol = txrx_ops->rx.rx_eapol;
6221 }
6222 #else
6223 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev)
6224 {
6225 }
6226 
6227 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev,
6228 					     struct ol_txrx_ops *txrx_ops)
6229 {
6230 }
6231 #endif
6232 
6233 #ifdef WLAN_FEATURE_11BE_MLO
6234 static inline void dp_vdev_save_mld_addr(struct dp_vdev *vdev,
6235 					 struct cdp_vdev_info *vdev_info)
6236 {
6237 	if (vdev_info->mld_mac_addr)
6238 		qdf_mem_copy(&vdev->mld_mac_addr.raw[0],
6239 			     vdev_info->mld_mac_addr, QDF_MAC_ADDR_SIZE);
6240 }
6241 #else
6242 static inline void dp_vdev_save_mld_addr(struct dp_vdev *vdev,
6243 					 struct cdp_vdev_info *vdev_info)
6244 {
6245 
6246 }
6247 #endif
6248 
6249 /*
6250 * dp_vdev_attach_wifi3() - attach txrx vdev
6251 * @txrx_pdev: Datapath PDEV handle
6252 * @pdev_id: PDEV ID for vdev creation
6253 * @vdev_info: parameters used for vdev creation
6254 *
6255 * Return: status
6256 */
6257 static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc,
6258 				       uint8_t pdev_id,
6259 				       struct cdp_vdev_info *vdev_info)
6260 {
6261 	int i = 0;
6262 	qdf_size_t vdev_context_size;
6263 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
6264 	struct dp_pdev *pdev =
6265 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
6266 						   pdev_id);
6267 	struct dp_vdev *vdev;
6268 	uint8_t *vdev_mac_addr = vdev_info->vdev_mac_addr;
6269 	uint8_t vdev_id = vdev_info->vdev_id;
6270 	enum wlan_op_mode op_mode = vdev_info->op_mode;
6271 	enum wlan_op_subtype subtype = vdev_info->subtype;
6272 	uint8_t vdev_stats_id = vdev_info->vdev_stats_id;
6273 
6274 	vdev_context_size =
6275 		soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_VDEV);
6276 	vdev = qdf_mem_malloc(vdev_context_size);
6277 
6278 	if (!pdev) {
6279 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
6280 			    cdp_soc, pdev_id);
6281 		qdf_mem_free(vdev);
6282 		goto fail0;
6283 	}
6284 
6285 	if (!vdev) {
6286 		dp_init_err("%pK: DP VDEV memory allocation failed",
6287 			    cdp_soc);
6288 		goto fail0;
6289 	}
6290 
6291 	wlan_minidump_log(vdev, sizeof(*vdev), soc->ctrl_psoc,
6292 			  WLAN_MD_DP_VDEV, "dp_vdev");
6293 
6294 	vdev->pdev = pdev;
6295 	vdev->vdev_id = vdev_id;
6296 	vdev->vdev_stats_id = vdev_stats_id;
6297 	vdev->opmode = op_mode;
6298 	vdev->subtype = subtype;
6299 	vdev->osdev = soc->osdev;
6300 
6301 	vdev->osif_rx = NULL;
6302 	vdev->osif_rsim_rx_decap = NULL;
6303 	vdev->osif_get_key = NULL;
6304 	vdev->osif_tx_free_ext = NULL;
6305 	vdev->osif_vdev = NULL;
6306 
6307 	vdev->delete.pending = 0;
6308 	vdev->safemode = 0;
6309 	vdev->drop_unenc = 1;
6310 	vdev->sec_type = cdp_sec_type_none;
6311 	vdev->multipass_en = false;
6312 	dp_vdev_init_rx_eapol(vdev);
6313 	qdf_atomic_init(&vdev->ref_cnt);
6314 	for (i = 0; i < DP_MOD_ID_MAX; i++)
6315 		qdf_atomic_init(&vdev->mod_refs[i]);
6316 
6317 	/* Take one reference for create*/
6318 	qdf_atomic_inc(&vdev->ref_cnt);
6319 	qdf_atomic_inc(&vdev->mod_refs[DP_MOD_ID_CONFIG]);
6320 	vdev->num_peers = 0;
6321 #ifdef notyet
6322 	vdev->filters_num = 0;
6323 #endif
6324 	vdev->lmac_id = pdev->lmac_id;
6325 
6326 	qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
6327 
6328 	dp_vdev_save_mld_addr(vdev, vdev_info);
6329 
6330 	/* TODO: Initialize default HTT meta data that will be used in
6331 	 * TCL descriptors for packets transmitted from this VDEV
6332 	 */
6333 
6334 	qdf_spinlock_create(&vdev->peer_list_lock);
6335 	TAILQ_INIT(&vdev->peer_list);
6336 	dp_peer_multipass_list_init(vdev);
6337 	if ((soc->intr_mode == DP_INTR_POLL) &&
6338 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
6339 		if ((pdev->vdev_count == 0) ||
6340 		    (wlan_op_mode_monitor == vdev->opmode))
6341 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
6342 	} else if (dp_soc_get_con_mode(soc) == QDF_GLOBAL_MISSION_MODE &&
6343 		   soc->intr_mode == DP_INTR_MSI &&
6344 		   wlan_op_mode_monitor == vdev->opmode) {
6345 		/* Timer to reap status ring in mission mode */
6346 		dp_monitor_vdev_timer_start(soc);
6347 	}
6348 
6349 	dp_vdev_id_map_tbl_add(soc, vdev, vdev_id);
6350 
6351 	if (wlan_op_mode_monitor == vdev->opmode) {
6352 		if (dp_monitor_vdev_attach(vdev) == QDF_STATUS_SUCCESS) {
6353 			dp_monitor_pdev_set_mon_vdev(vdev);
6354 			dp_monitor_vdev_set_monitor_mode_buf_rings(pdev);
6355 			return QDF_STATUS_SUCCESS;
6356 		}
6357 		return QDF_STATUS_E_FAILURE;
6358 	}
6359 
6360 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
6361 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
6362 	vdev->dscp_tid_map_id = 0;
6363 	vdev->mcast_enhancement_en = 0;
6364 	vdev->igmp_mcast_enhanc_en = 0;
6365 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
6366 	vdev->prev_tx_enq_tstamp = 0;
6367 	vdev->prev_rx_deliver_tstamp = 0;
6368 	vdev->skip_sw_tid_classification = DP_TX_HW_DSCP_TID_MAP_VALID;
6369 
6370 	dp_vdev_pdev_list_add(soc, pdev, vdev);
6371 	pdev->vdev_count++;
6372 
6373 	if (wlan_op_mode_sta != vdev->opmode &&
6374 	    wlan_op_mode_ndi != vdev->opmode)
6375 		vdev->ap_bridge_enabled = true;
6376 	else
6377 		vdev->ap_bridge_enabled = false;
6378 	dp_init_info("%pK: wlan_cfg_ap_bridge_enabled %d",
6379 		     cdp_soc, vdev->ap_bridge_enabled);
6380 
6381 	dp_tx_vdev_attach(vdev);
6382 
6383 	dp_monitor_vdev_attach(vdev);
6384 	if (!pdev->is_lro_hash_configured) {
6385 		if (QDF_IS_STATUS_SUCCESS(dp_lro_hash_setup(soc, pdev)))
6386 			pdev->is_lro_hash_configured = true;
6387 		else
6388 			dp_err("LRO hash setup failure!");
6389 	}
6390 
6391 	dp_info("Created vdev %pK ("QDF_MAC_ADDR_FMT")", vdev,
6392 		QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
6393 	DP_STATS_INIT(vdev);
6394 
6395 	if (QDF_IS_STATUS_ERROR(soc->arch_ops.txrx_vdev_attach(soc, vdev)))
6396 		goto fail0;
6397 
6398 	if (wlan_op_mode_sta == vdev->opmode)
6399 		dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id,
6400 				     vdev->mac_addr.raw, CDP_LINK_PEER_TYPE);
6401 	return QDF_STATUS_SUCCESS;
6402 
6403 fail0:
6404 	return QDF_STATUS_E_FAILURE;
6405 }
6406 
6407 #ifndef QCA_HOST_MODE_WIFI_DISABLED
6408 /**
6409  * dp_vdev_register_tx_handler() - Register Tx handler
6410  * @vdev: struct dp_vdev *
6411  * @soc: struct dp_soc *
6412  * @txrx_ops: struct ol_txrx_ops *
6413  */
6414 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
6415 					       struct dp_soc *soc,
6416 					       struct ol_txrx_ops *txrx_ops)
6417 {
6418 	/* Enable vdev_id check only for ap, if flag is enabled */
6419 	if (vdev->mesh_vdev)
6420 		txrx_ops->tx.tx = dp_tx_send_mesh;
6421 	else if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
6422 		 (vdev->opmode == wlan_op_mode_ap))
6423 		txrx_ops->tx.tx = dp_tx_send_vdev_id_check;
6424 	else
6425 		txrx_ops->tx.tx = dp_tx_send;
6426 
6427 	/* Avoid check in regular exception Path */
6428 	if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
6429 	    (vdev->opmode == wlan_op_mode_ap))
6430 		txrx_ops->tx.tx_exception = dp_tx_send_exception_vdev_id_check;
6431 	else
6432 		txrx_ops->tx.tx_exception = dp_tx_send_exception;
6433 
6434 	dp_info("Configure tx_vdev_id_chk_handler Feature Flag: %d and mode:%d for vdev_id:%d",
6435 		wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx),
6436 		vdev->opmode, vdev->vdev_id);
6437 }
6438 #else /* QCA_HOST_MODE_WIFI_DISABLED */
6439 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
6440 					       struct dp_soc *soc,
6441 					       struct ol_txrx_ops *txrx_ops)
6442 {
6443 }
6444 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
6445 
6446 /**
6447  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
6448  * @soc: Datapath soc handle
6449  * @vdev_id: id of Datapath VDEV handle
6450  * @osif_vdev: OSIF vdev handle
6451  * @txrx_ops: Tx and Rx operations
6452  *
6453  * Return: DP VDEV handle on success, NULL on failure
6454  */
6455 static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc_hdl,
6456 					 uint8_t vdev_id,
6457 					 ol_osif_vdev_handle osif_vdev,
6458 					 struct ol_txrx_ops *txrx_ops)
6459 {
6460 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6461 	struct dp_vdev *vdev =	dp_vdev_get_ref_by_id(soc, vdev_id,
6462 						      DP_MOD_ID_CDP);
6463 
6464 	if (!vdev)
6465 		return QDF_STATUS_E_FAILURE;
6466 
6467 	vdev->osif_vdev = osif_vdev;
6468 	vdev->osif_rx = txrx_ops->rx.rx;
6469 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
6470 	vdev->osif_rx_flush = txrx_ops->rx.rx_flush;
6471 	vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush;
6472 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
6473 	vdev->osif_fisa_rx = txrx_ops->rx.osif_fisa_rx;
6474 	vdev->osif_fisa_flush = txrx_ops->rx.osif_fisa_flush;
6475 	vdev->osif_get_key = txrx_ops->get_key;
6476 	dp_monitor_vdev_register_osif(vdev, txrx_ops);
6477 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
6478 	vdev->tx_comp = txrx_ops->tx.tx_comp;
6479 	vdev->stats_cb = txrx_ops->rx.stats_rx;
6480 #ifdef notyet
6481 #if ATH_SUPPORT_WAPI
6482 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
6483 #endif
6484 #endif
6485 #ifdef UMAC_SUPPORT_PROXY_ARP
6486 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
6487 #endif
6488 	vdev->me_convert = txrx_ops->me_convert;
6489 	vdev->get_tsf_time = txrx_ops->get_tsf_time;
6490 
6491 	dp_vdev_register_rx_eapol(vdev, txrx_ops);
6492 
6493 	dp_vdev_register_tx_handler(vdev, soc, txrx_ops);
6494 
6495 	dp_init_info("%pK: DP Vdev Register success", soc);
6496 
6497 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6498 	return QDF_STATUS_SUCCESS;
6499 }
6500 
6501 void dp_peer_delete(struct dp_soc *soc,
6502 		    struct dp_peer *peer,
6503 		    void *arg)
6504 {
6505 	if (!peer->valid)
6506 		return;
6507 
6508 	dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
6509 			     peer->vdev->vdev_id,
6510 			     peer->mac_addr.raw, 0);
6511 }
6512 
6513 /**
6514  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
6515  * @vdev: Datapath VDEV handle
6516  * @unmap_only: Flag to indicate "only unmap"
6517  *
6518  * Return: void
6519  */
6520 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
6521 {
6522 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6523 	struct dp_pdev *pdev = vdev->pdev;
6524 	struct dp_soc *soc = pdev->soc;
6525 	struct dp_peer *peer;
6526 	uint32_t i = 0;
6527 
6528 
6529 	if (!unmap_only)
6530 		dp_vdev_iterate_peer_lock_safe(vdev, dp_peer_delete, NULL,
6531 					       DP_MOD_ID_CDP);
6532 
6533 	for (i = 0; i < soc->max_peer_id ; i++) {
6534 		peer = __dp_peer_get_ref_by_id(soc, i, DP_MOD_ID_CDP);
6535 
6536 		if (!peer)
6537 			continue;
6538 
6539 		if (peer->vdev != vdev) {
6540 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6541 			continue;
6542 		}
6543 
6544 		dp_info("peer: "QDF_MAC_ADDR_FMT" is getting unmap",
6545 			QDF_MAC_ADDR_REF(peer->mac_addr.raw));
6546 
6547 		dp_rx_peer_unmap_handler(soc, i,
6548 					 vdev->vdev_id,
6549 					 peer->mac_addr.raw, 0,
6550 					 DP_PEER_WDS_COUNT_INVALID);
6551 		SET_PEER_REF_CNT_ONE(peer);
6552 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6553 	}
6554 
6555 }
6556 
6557 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
6558 /*
6559  * dp_txrx_alloc_vdev_stats_id()- Allocate vdev_stats_id
6560  * @soc_hdl: Datapath soc handle
6561  * @vdev_stats_id: Address of vdev_stats_id
6562  *
6563  * Return: QDF_STATUS
6564  */
6565 static QDF_STATUS dp_txrx_alloc_vdev_stats_id(struct cdp_soc_t *soc_hdl,
6566 					      uint8_t *vdev_stats_id)
6567 {
6568 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6569 	uint8_t id = 0;
6570 
6571 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
6572 		*vdev_stats_id = CDP_INVALID_VDEV_STATS_ID;
6573 		return QDF_STATUS_E_FAILURE;
6574 	}
6575 
6576 	while (id < CDP_MAX_VDEV_STATS_ID) {
6577 		if (!qdf_atomic_test_and_set_bit(id, &soc->vdev_stats_id_map)) {
6578 			*vdev_stats_id = id;
6579 			return QDF_STATUS_SUCCESS;
6580 		}
6581 		id++;
6582 	}
6583 
6584 	*vdev_stats_id = CDP_INVALID_VDEV_STATS_ID;
6585 	return QDF_STATUS_E_FAILURE;
6586 }
6587 
6588 /*
6589  * dp_txrx_reset_vdev_stats_id() - Reset vdev_stats_id in dp_soc
6590  * @soc_hdl: Datapath soc handle
6591  * @vdev_stats_id: vdev_stats_id to reset in dp_soc
6592  *
6593  * Return: none
6594  */
6595 static void dp_txrx_reset_vdev_stats_id(struct cdp_soc_t *soc_hdl,
6596 					uint8_t vdev_stats_id)
6597 {
6598 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6599 
6600 	if ((!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) ||
6601 	    (vdev_stats_id >= CDP_MAX_VDEV_STATS_ID))
6602 		return;
6603 
6604 	qdf_atomic_clear_bit(vdev_stats_id, &soc->vdev_stats_id_map);
6605 }
6606 #else
6607 static void dp_txrx_reset_vdev_stats_id(struct cdp_soc_t *soc,
6608 					uint8_t vdev_stats_id)
6609 {}
6610 #endif
6611 /*
6612  * dp_vdev_detach_wifi3() - Detach txrx vdev
6613  * @cdp_soc: Datapath soc handle
6614  * @vdev_id: VDEV Id
6615  * @callback: Callback OL_IF on completion of detach
6616  * @cb_context:	Callback context
6617  *
6618  */
6619 static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc,
6620 				       uint8_t vdev_id,
6621 				       ol_txrx_vdev_delete_cb callback,
6622 				       void *cb_context)
6623 {
6624 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
6625 	struct dp_pdev *pdev;
6626 	struct dp_neighbour_peer *peer = NULL;
6627 	struct dp_peer *vap_self_peer = NULL;
6628 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
6629 						     DP_MOD_ID_CDP);
6630 
6631 	if (!vdev)
6632 		return QDF_STATUS_E_FAILURE;
6633 
6634 	soc->arch_ops.txrx_vdev_detach(soc, vdev);
6635 
6636 	pdev = vdev->pdev;
6637 
6638 	vap_self_peer = dp_sta_vdev_self_peer_ref_n_get(soc, vdev,
6639 							DP_MOD_ID_CONFIG);
6640 	if (vap_self_peer) {
6641 		qdf_spin_lock_bh(&soc->ast_lock);
6642 		if (vap_self_peer->self_ast_entry) {
6643 			dp_peer_del_ast(soc, vap_self_peer->self_ast_entry);
6644 			vap_self_peer->self_ast_entry = NULL;
6645 		}
6646 		qdf_spin_unlock_bh(&soc->ast_lock);
6647 
6648 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc, vdev->vdev_id,
6649 				     vap_self_peer->mac_addr.raw, 0);
6650 		dp_peer_unref_delete(vap_self_peer, DP_MOD_ID_CONFIG);
6651 	}
6652 
6653 	/*
6654 	 * If Target is hung, flush all peers before detaching vdev
6655 	 * this will free all references held due to missing
6656 	 * unmap commands from Target
6657 	 */
6658 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
6659 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, false);
6660 	else if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
6661 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, true);
6662 
6663 	/* indicate that the vdev needs to be deleted */
6664 	vdev->delete.pending = 1;
6665 	dp_rx_vdev_detach(vdev);
6666 	/*
6667 	 * move it after dp_rx_vdev_detach(),
6668 	 * as the call back done in dp_rx_vdev_detach()
6669 	 * still need to get vdev pointer by vdev_id.
6670 	 */
6671 	dp_vdev_id_map_tbl_remove(soc, vdev);
6672 
6673 	dp_monitor_neighbour_peer_list_remove(pdev, vdev, peer);
6674 
6675 	dp_txrx_reset_vdev_stats_id(cdp_soc, vdev->vdev_stats_id);
6676 
6677 	dp_tx_vdev_multipass_deinit(vdev);
6678 
6679 	if (vdev->vdev_dp_ext_handle) {
6680 		qdf_mem_free(vdev->vdev_dp_ext_handle);
6681 		vdev->vdev_dp_ext_handle = NULL;
6682 	}
6683 	vdev->delete.callback = callback;
6684 	vdev->delete.context = cb_context;
6685 
6686 	if (vdev->opmode != wlan_op_mode_monitor)
6687 		dp_vdev_pdev_list_remove(soc, pdev, vdev);
6688 
6689 	pdev->vdev_count--;
6690 	/* release reference taken above for find */
6691 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6692 
6693 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
6694 	TAILQ_INSERT_TAIL(&soc->inactive_vdev_list, vdev, inactive_list_elem);
6695 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
6696 
6697 	/* release reference taken at dp_vdev_create */
6698 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
6699 
6700 	return QDF_STATUS_SUCCESS;
6701 }
6702 
6703 #ifdef WLAN_FEATURE_11BE_MLO
6704 /**
6705  * is_dp_peer_can_reuse() - check if the dp_peer match condition to be reused
6706  * @vdev: Target DP vdev handle
6707  * @peer: DP peer handle to be checked
6708  * @peer_mac_addr: Target peer mac address
6709  * @peer_type: Target peer type
6710  *
6711  * Return: true - if match, false - not match
6712  */
6713 static inline
6714 bool is_dp_peer_can_reuse(struct dp_vdev *vdev,
6715 			  struct dp_peer *peer,
6716 			  uint8_t *peer_mac_addr,
6717 			  enum cdp_peer_type peer_type)
6718 {
6719 	if (peer->bss_peer && (peer->vdev == vdev) &&
6720 	    (peer->peer_type == peer_type) &&
6721 	    (qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw,
6722 			 QDF_MAC_ADDR_SIZE) == 0))
6723 		return true;
6724 
6725 	return false;
6726 }
6727 #else
6728 static inline
6729 bool is_dp_peer_can_reuse(struct dp_vdev *vdev,
6730 			  struct dp_peer *peer,
6731 			  uint8_t *peer_mac_addr,
6732 			  enum cdp_peer_type peer_type)
6733 {
6734 	if (peer->bss_peer && (peer->vdev == vdev) &&
6735 	    (qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw,
6736 			 QDF_MAC_ADDR_SIZE) == 0))
6737 		return true;
6738 
6739 	return false;
6740 }
6741 #endif
6742 
6743 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
6744 						uint8_t *peer_mac_addr,
6745 						enum cdp_peer_type peer_type)
6746 {
6747 	struct dp_peer *peer;
6748 	struct dp_soc *soc = vdev->pdev->soc;
6749 
6750 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
6751 	TAILQ_FOREACH(peer, &soc->inactive_peer_list,
6752 		      inactive_list_elem) {
6753 
6754 		/* reuse bss peer only when vdev matches*/
6755 		if (is_dp_peer_can_reuse(vdev, peer,
6756 					 peer_mac_addr, peer_type)) {
6757 			/* increment ref count for cdp_peer_create*/
6758 			if (dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG) ==
6759 						QDF_STATUS_SUCCESS) {
6760 				TAILQ_REMOVE(&soc->inactive_peer_list, peer,
6761 					     inactive_list_elem);
6762 				qdf_spin_unlock_bh
6763 					(&soc->inactive_peer_list_lock);
6764 				return peer;
6765 			}
6766 		}
6767 	}
6768 
6769 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
6770 	return NULL;
6771 }
6772 
6773 #ifdef FEATURE_AST
6774 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
6775 					       struct dp_pdev *pdev,
6776 					       uint8_t *peer_mac_addr)
6777 {
6778 	struct dp_ast_entry *ast_entry;
6779 
6780 	if (soc->ast_offload_support)
6781 		return;
6782 
6783 	qdf_spin_lock_bh(&soc->ast_lock);
6784 	if (soc->ast_override_support)
6785 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
6786 							    pdev->pdev_id);
6787 	else
6788 		ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
6789 
6790 	if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress)
6791 		dp_peer_del_ast(soc, ast_entry);
6792 
6793 	qdf_spin_unlock_bh(&soc->ast_lock);
6794 }
6795 #endif
6796 
6797 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
6798 /*
6799  * dp_peer_hw_txrx_stats_init() - Initialize hw_txrx_stats_en in dp_peer
6800  * @soc: Datapath soc handle
6801  * @peer: Datapath peer handle
6802  *
6803  * Return: none
6804  */
6805 static inline
6806 void dp_peer_hw_txrx_stats_init(struct dp_soc *soc,
6807 				struct dp_txrx_peer *txrx_peer)
6808 {
6809 	txrx_peer->hw_txrx_stats_en =
6810 		wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx);
6811 }
6812 #else
6813 static inline
6814 void dp_peer_hw_txrx_stats_init(struct dp_soc *soc,
6815 				struct dp_txrx_peer *txrx_peer)
6816 {
6817 	txrx_peer->hw_txrx_stats_en = 0;
6818 }
6819 #endif
6820 
6821 static QDF_STATUS dp_txrx_peer_detach(struct dp_soc *soc, struct dp_peer *peer)
6822 {
6823 	struct dp_txrx_peer *txrx_peer;
6824 	struct dp_pdev *pdev;
6825 
6826 	/* dp_txrx_peer exists for mld peer and legacy peer */
6827 	if (peer->txrx_peer) {
6828 		txrx_peer = peer->txrx_peer;
6829 		peer->txrx_peer = NULL;
6830 		pdev = txrx_peer->vdev->pdev;
6831 
6832 		dp_peer_defrag_rx_tids_deinit(txrx_peer);
6833 		/*
6834 		 * Deallocate the extended stats contenxt
6835 		 */
6836 		dp_peer_delay_stats_ctx_dealloc(soc, txrx_peer);
6837 		dp_peer_rx_bufq_resources_deinit(txrx_peer);
6838 		dp_peer_jitter_stats_ctx_dealloc(pdev, txrx_peer);
6839 
6840 		qdf_mem_free(txrx_peer);
6841 	}
6842 
6843 	return QDF_STATUS_SUCCESS;
6844 }
6845 
6846 static QDF_STATUS dp_txrx_peer_attach(struct dp_soc *soc, struct dp_peer *peer)
6847 {
6848 	struct dp_txrx_peer *txrx_peer;
6849 	struct dp_pdev *pdev;
6850 
6851 	txrx_peer = (struct dp_txrx_peer *)qdf_mem_malloc(sizeof(*txrx_peer));
6852 
6853 	if (!txrx_peer)
6854 		return QDF_STATUS_E_NOMEM; /* failure */
6855 
6856 	txrx_peer->peer_id = HTT_INVALID_PEER;
6857 	/* initialize the peer_id */
6858 	txrx_peer->vdev = peer->vdev;
6859 	pdev = peer->vdev->pdev;
6860 
6861 	DP_STATS_INIT(txrx_peer);
6862 
6863 	dp_wds_ext_peer_init(txrx_peer);
6864 	dp_peer_rx_bufq_resources_init(txrx_peer);
6865 	dp_peer_hw_txrx_stats_init(soc, txrx_peer);
6866 	/*
6867 	 * Allocate peer extended stats context. Fall through in
6868 	 * case of failure as its not an implicit requirement to have
6869 	 * this object for regular statistics updates.
6870 	 */
6871 	if (dp_peer_delay_stats_ctx_alloc(soc, txrx_peer) !=
6872 					  QDF_STATUS_SUCCESS)
6873 		dp_warn("peer delay_stats ctx alloc failed");
6874 
6875 	/*
6876 	 * Alloctate memory for jitter stats. Fall through in
6877 	 * case of failure as its not an implicit requirement to have
6878 	 * this object for regular statistics updates.
6879 	 */
6880 	if (dp_peer_jitter_stats_ctx_alloc(pdev, txrx_peer) !=
6881 					   QDF_STATUS_SUCCESS)
6882 		dp_warn("peer jitter_stats ctx alloc failed");
6883 
6884 	dp_set_peer_isolation(txrx_peer, false);
6885 
6886 	dp_peer_defrag_rx_tids_init(txrx_peer);
6887 	dp_txrx_peer_attach_add(soc, peer, txrx_peer);
6888 
6889 	return QDF_STATUS_SUCCESS;
6890 }
6891 
6892 static inline
6893 void dp_txrx_peer_stats_clr(struct dp_txrx_peer *txrx_peer)
6894 {
6895 	if (!txrx_peer)
6896 		return;
6897 
6898 	txrx_peer->tx_failed = 0;
6899 	txrx_peer->comp_pkt.num = 0;
6900 	txrx_peer->comp_pkt.bytes = 0;
6901 	txrx_peer->to_stack.num = 0;
6902 	txrx_peer->to_stack.bytes = 0;
6903 
6904 	DP_STATS_CLR(txrx_peer);
6905 	dp_peer_delay_stats_ctx_clr(txrx_peer);
6906 	dp_peer_jitter_stats_ctx_clr(txrx_peer);
6907 }
6908 
6909 /*
6910  * dp_peer_create_wifi3() - attach txrx peer
6911  * @soc_hdl: Datapath soc handle
6912  * @vdev_id: id of vdev
6913  * @peer_mac_addr: Peer MAC address
6914  * @peer_type: link or MLD peer type
6915  *
6916  * Return: 0 on success, -1 on failure
6917  */
6918 static QDF_STATUS
6919 dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
6920 		     uint8_t *peer_mac_addr, enum cdp_peer_type peer_type)
6921 {
6922 	struct dp_peer *peer;
6923 	int i;
6924 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6925 	struct dp_pdev *pdev;
6926 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
6927 	struct dp_vdev *vdev = NULL;
6928 
6929 	if (!peer_mac_addr)
6930 		return QDF_STATUS_E_FAILURE;
6931 
6932 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
6933 
6934 	if (!vdev)
6935 		return QDF_STATUS_E_FAILURE;
6936 
6937 	pdev = vdev->pdev;
6938 	soc = pdev->soc;
6939 
6940 	/*
6941 	 * If a peer entry with given MAC address already exists,
6942 	 * reuse the peer and reset the state of peer.
6943 	 */
6944 	peer = dp_peer_can_reuse(vdev, peer_mac_addr, peer_type);
6945 
6946 	if (peer) {
6947 		qdf_atomic_init(&peer->is_default_route_set);
6948 		dp_peer_cleanup(vdev, peer);
6949 
6950 		dp_peer_vdev_list_add(soc, vdev, peer);
6951 		dp_peer_find_hash_add(soc, peer);
6952 
6953 		dp_peer_rx_tids_create(peer);
6954 		if (IS_MLO_DP_MLD_PEER(peer))
6955 			dp_mld_peer_init_link_peers_info(peer);
6956 
6957 		qdf_spin_lock_bh(&soc->ast_lock);
6958 		dp_peer_delete_ast_entries(soc, peer);
6959 		qdf_spin_unlock_bh(&soc->ast_lock);
6960 
6961 		if ((vdev->opmode == wlan_op_mode_sta) &&
6962 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
6963 		     QDF_MAC_ADDR_SIZE)) {
6964 			ast_type = CDP_TXRX_AST_TYPE_SELF;
6965 		}
6966 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
6967 
6968 		peer->valid = 1;
6969 		dp_local_peer_id_alloc(pdev, peer);
6970 
6971 		qdf_spinlock_create(&peer->peer_info_lock);
6972 
6973 		DP_STATS_INIT(peer);
6974 
6975 		/*
6976 		 * In tx_monitor mode, filter may be set for unassociated peer
6977 		 * when unassociated peer get associated peer need to
6978 		 * update tx_cap_enabled flag to support peer filter.
6979 		 */
6980 		if (!IS_MLO_DP_MLD_PEER(peer)) {
6981 			dp_monitor_peer_tx_capture_filter_check(pdev, peer);
6982 			dp_monitor_peer_reset_stats(soc, peer);
6983 		}
6984 
6985 		if (peer->txrx_peer) {
6986 			dp_peer_rx_bufq_resources_init(peer->txrx_peer);
6987 			dp_txrx_peer_stats_clr(peer->txrx_peer);
6988 			dp_set_peer_isolation(peer->txrx_peer, false);
6989 			dp_wds_ext_peer_init(peer->txrx_peer);
6990 			dp_peer_hw_txrx_stats_init(soc, peer->txrx_peer);
6991 		}
6992 
6993 		dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
6994 
6995 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6996 		return QDF_STATUS_SUCCESS;
6997 	} else {
6998 		/*
6999 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
7000 		 * need to remove the AST entry which was earlier added as a WDS
7001 		 * entry.
7002 		 * If an AST entry exists, but no peer entry exists with a given
7003 		 * MAC addresses, we could deduce it as a WDS entry
7004 		 */
7005 		dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
7006 	}
7007 
7008 #ifdef notyet
7009 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
7010 		soc->mempool_ol_ath_peer);
7011 #else
7012 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
7013 #endif
7014 	wlan_minidump_log(peer,
7015 			  sizeof(*peer),
7016 			  soc->ctrl_psoc,
7017 			  WLAN_MD_DP_PEER, "dp_peer");
7018 	if (!peer) {
7019 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7020 		return QDF_STATUS_E_FAILURE; /* failure */
7021 	}
7022 
7023 	qdf_mem_zero(peer, sizeof(struct dp_peer));
7024 
7025 	/* store provided params */
7026 	peer->vdev = vdev;
7027 
7028 	/* initialize the peer_id */
7029 	peer->peer_id = HTT_INVALID_PEER;
7030 
7031 	qdf_mem_copy(
7032 		&peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
7033 
7034 	DP_PEER_SET_TYPE(peer, peer_type);
7035 	if (IS_MLO_DP_MLD_PEER(peer)) {
7036 		if (dp_txrx_peer_attach(soc, peer) !=
7037 				QDF_STATUS_SUCCESS)
7038 			goto fail; /* failure */
7039 
7040 		dp_mld_peer_init_link_peers_info(peer);
7041 	} else if (dp_monitor_peer_attach(soc, peer) !=
7042 				QDF_STATUS_SUCCESS)
7043 		dp_warn("peer monitor ctx alloc failed");
7044 
7045 	TAILQ_INIT(&peer->ast_entry_list);
7046 
7047 	/* get the vdev reference for new peer */
7048 	dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CHILD);
7049 
7050 	if ((vdev->opmode == wlan_op_mode_sta) &&
7051 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
7052 			 QDF_MAC_ADDR_SIZE)) {
7053 		ast_type = CDP_TXRX_AST_TYPE_SELF;
7054 	}
7055 	qdf_spinlock_create(&peer->peer_state_lock);
7056 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
7057 	qdf_spinlock_create(&peer->peer_info_lock);
7058 
7059 	/* reset the ast index to flowid table */
7060 	dp_peer_reset_flowq_map(peer);
7061 
7062 	qdf_atomic_init(&peer->ref_cnt);
7063 
7064 	for (i = 0; i < DP_MOD_ID_MAX; i++)
7065 		qdf_atomic_init(&peer->mod_refs[i]);
7066 
7067 	/* keep one reference for attach */
7068 	qdf_atomic_inc(&peer->ref_cnt);
7069 	qdf_atomic_inc(&peer->mod_refs[DP_MOD_ID_CONFIG]);
7070 
7071 	dp_peer_vdev_list_add(soc, vdev, peer);
7072 
7073 	/* TODO: See if hash based search is required */
7074 	dp_peer_find_hash_add(soc, peer);
7075 
7076 	/* Initialize the peer state */
7077 	peer->state = OL_TXRX_PEER_STATE_DISC;
7078 
7079 	dp_info("vdev %pK created peer %pK ("QDF_MAC_ADDR_FMT") ref_cnt: %d",
7080 		vdev, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
7081 		qdf_atomic_read(&peer->ref_cnt));
7082 	/*
7083 	 * For every peer MAp message search and set if bss_peer
7084 	 */
7085 	if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
7086 			QDF_MAC_ADDR_SIZE) == 0 &&
7087 			(wlan_op_mode_sta != vdev->opmode)) {
7088 		dp_info("vdev bss_peer!!");
7089 		peer->bss_peer = 1;
7090 		if (peer->txrx_peer)
7091 			peer->txrx_peer->bss_peer = 1;
7092 	}
7093 
7094 	if (wlan_op_mode_sta == vdev->opmode &&
7095 	    qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
7096 			QDF_MAC_ADDR_SIZE) == 0) {
7097 		peer->sta_self_peer = 1;
7098 	}
7099 
7100 	dp_peer_rx_tids_create(peer);
7101 
7102 	peer->valid = 1;
7103 	dp_local_peer_id_alloc(pdev, peer);
7104 	DP_STATS_INIT(peer);
7105 
7106 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
7107 
7108 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7109 
7110 	return QDF_STATUS_SUCCESS;
7111 fail:
7112 	qdf_mem_free(peer);
7113 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7114 
7115 	return QDF_STATUS_E_FAILURE;
7116 }
7117 
7118 static QDF_STATUS dp_peer_legacy_setup(struct dp_soc *soc, struct dp_peer *peer)
7119 {
7120 	/* txrx_peer might exist already in peer reuse case */
7121 	if (peer->txrx_peer)
7122 		return QDF_STATUS_SUCCESS;
7123 
7124 	if (dp_txrx_peer_attach(soc, peer) !=
7125 				QDF_STATUS_SUCCESS) {
7126 		dp_err("peer txrx ctx alloc failed");
7127 		return QDF_STATUS_E_FAILURE;
7128 	}
7129 
7130 	return QDF_STATUS_SUCCESS;
7131 }
7132 
7133 #ifdef WLAN_FEATURE_11BE_MLO
7134 QDF_STATUS dp_peer_mlo_setup(
7135 			struct dp_soc *soc,
7136 			struct dp_peer *peer,
7137 			uint8_t vdev_id,
7138 			struct cdp_peer_setup_info *setup_info)
7139 {
7140 	struct dp_peer *mld_peer = NULL;
7141 
7142 	/* Non-MLO connection, do nothing */
7143 	if (!setup_info || !setup_info->mld_peer_mac)
7144 		return QDF_STATUS_SUCCESS;
7145 
7146 	/* To do: remove this check if link/mld peer mac_addr allow to same */
7147 	if (!qdf_mem_cmp(setup_info->mld_peer_mac, peer->mac_addr.raw,
7148 			 QDF_MAC_ADDR_SIZE)) {
7149 		dp_peer_err("Same mac addres for link/mld peer");
7150 		return QDF_STATUS_E_FAILURE;
7151 	}
7152 
7153 	/* if this is the first link peer */
7154 	if (setup_info->is_first_link)
7155 		/* create MLD peer */
7156 		dp_peer_create_wifi3((struct cdp_soc_t *)soc,
7157 				     vdev_id,
7158 				     setup_info->mld_peer_mac,
7159 				     CDP_MLD_PEER_TYPE);
7160 
7161 	peer->first_link = setup_info->is_first_link;
7162 	peer->primary_link = setup_info->is_primary_link;
7163 	mld_peer = dp_peer_find_hash_find(soc,
7164 					  setup_info->mld_peer_mac,
7165 					  0, DP_VDEV_ALL, DP_MOD_ID_CDP);
7166 	if (mld_peer) {
7167 		if (setup_info->is_first_link) {
7168 			/* assign rx_tid to mld peer */
7169 			mld_peer->rx_tid = peer->rx_tid;
7170 			/* no cdp_peer_setup for MLD peer,
7171 			 * set it for addba processing
7172 			 */
7173 			qdf_atomic_set(&mld_peer->is_default_route_set, 1);
7174 		} else {
7175 			/* free link peer origial rx_tids mem */
7176 			dp_peer_rx_tids_destroy(peer);
7177 			/* assign mld peer rx_tid to link peer */
7178 			peer->rx_tid = mld_peer->rx_tid;
7179 		}
7180 
7181 		if (setup_info->is_primary_link &&
7182 		    !setup_info->is_first_link) {
7183 			/*
7184 			 * if first link is not the primary link,
7185 			 * then need to change mld_peer->vdev as
7186 			 * primary link dp_vdev is not same one
7187 			 * during mld peer creation.
7188 			 */
7189 
7190 			/* relase the ref to original dp_vdev */
7191 			dp_vdev_unref_delete(soc, mld_peer->vdev,
7192 					     DP_MOD_ID_CHILD);
7193 			/*
7194 			 * get the ref to new dp_vdev,
7195 			 * increase dp_vdev ref_cnt
7196 			 */
7197 			mld_peer->vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7198 							       DP_MOD_ID_CHILD);
7199 		}
7200 
7201 		/* associate mld and link peer */
7202 		dp_link_peer_add_mld_peer(peer, mld_peer);
7203 		dp_mld_peer_add_link_peer(mld_peer, peer);
7204 
7205 		mld_peer->txrx_peer->mld_peer = 1;
7206 		dp_peer_unref_delete(mld_peer, DP_MOD_ID_CDP);
7207 	} else {
7208 		peer->mld_peer = NULL;
7209 		dp_err("mld peer" QDF_MAC_ADDR_FMT "not found!",
7210 		       QDF_MAC_ADDR_REF(setup_info->mld_peer_mac));
7211 		return QDF_STATUS_E_FAILURE;
7212 	}
7213 
7214 	return QDF_STATUS_SUCCESS;
7215 }
7216 
7217 /*
7218  * dp_mlo_peer_authorize() - authorize MLO peer
7219  * @soc: soc handle
7220  * @peer: pointer to link peer
7221  *
7222  * return void
7223  */
7224 static void dp_mlo_peer_authorize(struct dp_soc *soc,
7225 				  struct dp_peer *peer)
7226 {
7227 	int i;
7228 	struct dp_peer *link_peer = NULL;
7229 	struct dp_peer *mld_peer = peer->mld_peer;
7230 	struct dp_mld_link_peers link_peers_info;
7231 
7232 	if (!mld_peer)
7233 		return;
7234 
7235 	/* get link peers with reference */
7236 	dp_get_link_peers_ref_from_mld_peer(soc, mld_peer,
7237 					    &link_peers_info,
7238 					    DP_MOD_ID_CDP);
7239 
7240 	for (i = 0; i < link_peers_info.num_links; i++) {
7241 		link_peer = link_peers_info.link_peers[i];
7242 
7243 		if (!link_peer->authorize) {
7244 			dp_release_link_peers_ref(&link_peers_info,
7245 						  DP_MOD_ID_CDP);
7246 			mld_peer->authorize = false;
7247 			return;
7248 		}
7249 	}
7250 
7251 	/* if we are here all link peers are authorized,
7252 	 * authorize ml_peer also
7253 	 */
7254 	mld_peer->authorize = true;
7255 
7256 	/* release link peers reference */
7257 	dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
7258 }
7259 #endif
7260 
7261 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
7262 				  enum cdp_host_reo_dest_ring *reo_dest,
7263 				  bool *hash_based)
7264 {
7265 	struct dp_soc *soc;
7266 	struct dp_pdev *pdev;
7267 
7268 	pdev = vdev->pdev;
7269 	soc = pdev->soc;
7270 	/*
7271 	 * hash based steering is disabled for Radios which are offloaded
7272 	 * to NSS
7273 	 */
7274 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
7275 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
7276 
7277 	/*
7278 	 * Below line of code will ensure the proper reo_dest ring is chosen
7279 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
7280 	 */
7281 	*reo_dest = pdev->reo_dest;
7282 }
7283 
7284 #ifdef IPA_OFFLOAD
7285 /**
7286  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
7287  * @vdev: Virtual device
7288  *
7289  * Return: true if the vdev is of subtype P2P
7290  *	   false if the vdev is of any other subtype
7291  */
7292 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
7293 {
7294 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
7295 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
7296 	    vdev->subtype == wlan_op_subtype_p2p_go)
7297 		return true;
7298 
7299 	return false;
7300 }
7301 
7302 /*
7303  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
7304  * @vdev: Datapath VDEV handle
7305  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
7306  * @hash_based: pointer to hash value (enabled/disabled) to be populated
7307  *
7308  * If IPA is enabled in ini, for SAP mode, disable hash based
7309  * steering, use default reo_dst ring for RX. Use config values for other modes.
7310  * Return: None
7311  */
7312 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
7313 				       struct cdp_peer_setup_info *setup_info,
7314 				       enum cdp_host_reo_dest_ring *reo_dest,
7315 				       bool *hash_based,
7316 				       uint8_t *lmac_peer_id_msb)
7317 {
7318 	struct dp_soc *soc;
7319 	struct dp_pdev *pdev;
7320 
7321 	pdev = vdev->pdev;
7322 	soc = pdev->soc;
7323 
7324 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
7325 
7326 	/* For P2P-GO interfaces we do not need to change the REO
7327 	 * configuration even if IPA config is enabled
7328 	 */
7329 	if (dp_is_vdev_subtype_p2p(vdev))
7330 		return;
7331 
7332 	/*
7333 	 * If IPA is enabled, disable hash-based flow steering and set
7334 	 * reo_dest_ring_4 as the REO ring to receive packets on.
7335 	 * IPA is configured to reap reo_dest_ring_4.
7336 	 *
7337 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
7338 	 * value enum value is from 1 - 4.
7339 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
7340 	 */
7341 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
7342 		if (vdev->opmode == wlan_op_mode_ap) {
7343 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
7344 			*hash_based = 0;
7345 		} else if (vdev->opmode == wlan_op_mode_sta &&
7346 			   dp_ipa_is_mdm_platform()) {
7347 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
7348 		}
7349 	}
7350 }
7351 
7352 #else
7353 
7354 /*
7355  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
7356  * @vdev: Datapath VDEV handle
7357  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
7358  * @hash_based: pointer to hash value (enabled/disabled) to be populated
7359  *
7360  * Use system config values for hash based steering.
7361  * Return: None
7362  */
7363 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
7364 				       struct cdp_peer_setup_info *setup_info,
7365 				       enum cdp_host_reo_dest_ring *reo_dest,
7366 				       bool *hash_based,
7367 				       uint8_t *lmac_peer_id_msb)
7368 {
7369 	struct dp_soc *soc = vdev->pdev->soc;
7370 
7371 	soc->arch_ops.peer_get_reo_hash(vdev, setup_info, reo_dest, hash_based,
7372 					lmac_peer_id_msb);
7373 }
7374 #endif /* IPA_OFFLOAD */
7375 
7376 /*
7377  * dp_peer_setup_wifi3() - initialize the peer
7378  * @soc_hdl: soc handle object
7379  * @vdev_id : vdev_id of vdev object
7380  * @peer_mac: Peer's mac address
7381  * @peer_setup_info: peer setup info for MLO
7382  *
7383  * Return: QDF_STATUS
7384  */
7385 static QDF_STATUS
7386 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
7387 		    uint8_t *peer_mac,
7388 		    struct cdp_peer_setup_info *setup_info)
7389 {
7390 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7391 	struct dp_pdev *pdev;
7392 	bool hash_based = 0;
7393 	enum cdp_host_reo_dest_ring reo_dest;
7394 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7395 	struct dp_vdev *vdev = NULL;
7396 	struct dp_peer *peer =
7397 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
7398 					       DP_MOD_ID_CDP);
7399 	struct dp_peer *mld_peer = NULL;
7400 	enum wlan_op_mode vdev_opmode;
7401 	uint8_t lmac_peer_id_msb = 0;
7402 
7403 	if (!peer)
7404 		return QDF_STATUS_E_FAILURE;
7405 
7406 	vdev = peer->vdev;
7407 	if (!vdev) {
7408 		status = QDF_STATUS_E_FAILURE;
7409 		goto fail;
7410 	}
7411 
7412 	/* save vdev related member in case vdev freed */
7413 	vdev_opmode = vdev->opmode;
7414 	pdev = vdev->pdev;
7415 	dp_peer_setup_get_reo_hash(vdev, setup_info,
7416 				   &reo_dest, &hash_based,
7417 				   &lmac_peer_id_msb);
7418 
7419 	dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
7420 		pdev->pdev_id, vdev->vdev_id,
7421 		vdev->opmode, hash_based, reo_dest);
7422 
7423 	/*
7424 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
7425 	 * i.e both the devices have same MAC address. In these
7426 	 * cases we want such pkts to be processed in NULL Q handler
7427 	 * which is REO2TCL ring. for this reason we should
7428 	 * not setup reo_queues and default route for bss_peer.
7429 	 */
7430 	if (!IS_MLO_DP_MLD_PEER(peer))
7431 		dp_monitor_peer_tx_init(pdev, peer);
7432 
7433 	if (!setup_info)
7434 		if (dp_peer_legacy_setup(soc, peer) !=
7435 				QDF_STATUS_SUCCESS) {
7436 			status = QDF_STATUS_E_RESOURCES;
7437 			goto fail;
7438 		}
7439 
7440 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
7441 		status = QDF_STATUS_E_FAILURE;
7442 		goto fail;
7443 	}
7444 
7445 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
7446 		/* TODO: Check the destination ring number to be passed to FW */
7447 		soc->cdp_soc.ol_ops->peer_set_default_routing(
7448 				soc->ctrl_psoc,
7449 				peer->vdev->pdev->pdev_id,
7450 				peer->mac_addr.raw,
7451 				peer->vdev->vdev_id, hash_based, reo_dest,
7452 				lmac_peer_id_msb);
7453 	}
7454 
7455 	qdf_atomic_set(&peer->is_default_route_set, 1);
7456 
7457 	status = dp_peer_mlo_setup(soc, peer, vdev->vdev_id, setup_info);
7458 	if (QDF_IS_STATUS_ERROR(status)) {
7459 		dp_peer_err("peer mlo setup failed");
7460 		qdf_assert_always(0);
7461 	}
7462 
7463 	if (vdev_opmode != wlan_op_mode_monitor) {
7464 		/* In case of MLD peer, switch peer to mld peer and
7465 		 * do peer_rx_init.
7466 		 */
7467 		if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
7468 		    IS_MLO_DP_LINK_PEER(peer)) {
7469 			if (setup_info && setup_info->is_first_link) {
7470 				mld_peer = DP_GET_MLD_PEER_FROM_PEER(peer);
7471 				if (mld_peer)
7472 					dp_peer_rx_init(pdev, mld_peer);
7473 				else
7474 					dp_peer_err("MLD peer null. Primary link peer:%pK", peer);
7475 			}
7476 		} else {
7477 			dp_peer_rx_init(pdev, peer);
7478 		}
7479 	}
7480 
7481 	if (!IS_MLO_DP_MLD_PEER(peer))
7482 		dp_peer_ppdu_delayed_ba_init(peer);
7483 
7484 fail:
7485 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7486 	return status;
7487 }
7488 
7489 /*
7490  * dp_cp_peer_del_resp_handler - Handle the peer delete response
7491  * @soc_hdl: Datapath SOC handle
7492  * @vdev_id: id of virtual device object
7493  * @mac_addr: Mac address of the peer
7494  *
7495  * Return: QDF_STATUS
7496  */
7497 static QDF_STATUS dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl,
7498 					      uint8_t vdev_id,
7499 					      uint8_t *mac_addr)
7500 {
7501 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7502 	struct dp_ast_entry  *ast_entry = NULL;
7503 	txrx_ast_free_cb cb = NULL;
7504 	void *cookie;
7505 
7506 	if (soc->ast_offload_support)
7507 		return QDF_STATUS_E_INVAL;
7508 
7509 	qdf_spin_lock_bh(&soc->ast_lock);
7510 
7511 	ast_entry =
7512 		dp_peer_ast_hash_find_by_vdevid(soc, mac_addr,
7513 						vdev_id);
7514 
7515 	/* in case of qwrap we have multiple BSS peers
7516 	 * with same mac address
7517 	 *
7518 	 * AST entry for this mac address will be created
7519 	 * only for one peer hence it will be NULL here
7520 	 */
7521 	if ((!ast_entry || !ast_entry->delete_in_progress) ||
7522 	    (ast_entry->peer_id != HTT_INVALID_PEER)) {
7523 		qdf_spin_unlock_bh(&soc->ast_lock);
7524 		return QDF_STATUS_E_FAILURE;
7525 	}
7526 
7527 	if (ast_entry->is_mapped)
7528 		soc->ast_table[ast_entry->ast_idx] = NULL;
7529 
7530 	DP_STATS_INC(soc, ast.deleted, 1);
7531 	dp_peer_ast_hash_remove(soc, ast_entry);
7532 
7533 	cb = ast_entry->callback;
7534 	cookie = ast_entry->cookie;
7535 	ast_entry->callback = NULL;
7536 	ast_entry->cookie = NULL;
7537 
7538 	soc->num_ast_entries--;
7539 	qdf_spin_unlock_bh(&soc->ast_lock);
7540 
7541 	if (cb) {
7542 		cb(soc->ctrl_psoc,
7543 		   dp_soc_to_cdp_soc(soc),
7544 		   cookie,
7545 		   CDP_TXRX_AST_DELETED);
7546 	}
7547 	qdf_mem_free(ast_entry);
7548 
7549 	return QDF_STATUS_SUCCESS;
7550 }
7551 
7552 /*
7553  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
7554  * @txrx_soc: cdp soc handle
7555  * @ac: Access category
7556  * @value: timeout value in millisec
7557  *
7558  * Return: void
7559  */
7560 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
7561 				    uint8_t ac, uint32_t value)
7562 {
7563 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
7564 
7565 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
7566 }
7567 
7568 /*
7569  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
7570  * @txrx_soc: cdp soc handle
7571  * @ac: access category
7572  * @value: timeout value in millisec
7573  *
7574  * Return: void
7575  */
7576 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
7577 				    uint8_t ac, uint32_t *value)
7578 {
7579 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
7580 
7581 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
7582 }
7583 
7584 /*
7585  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
7586  * @txrx_soc: cdp soc handle
7587  * @pdev_id: id of physical device object
7588  * @val: reo destination ring index (1 - 4)
7589  *
7590  * Return: QDF_STATUS
7591  */
7592 static QDF_STATUS
7593 dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
7594 		     enum cdp_host_reo_dest_ring val)
7595 {
7596 	struct dp_pdev *pdev =
7597 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
7598 						   pdev_id);
7599 
7600 	if (pdev) {
7601 		pdev->reo_dest = val;
7602 		return QDF_STATUS_SUCCESS;
7603 	}
7604 
7605 	return QDF_STATUS_E_FAILURE;
7606 }
7607 
7608 /*
7609  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
7610  * @txrx_soc: cdp soc handle
7611  * @pdev_id: id of physical device object
7612  *
7613  * Return: reo destination ring index
7614  */
7615 static enum cdp_host_reo_dest_ring
7616 dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id)
7617 {
7618 	struct dp_pdev *pdev =
7619 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
7620 						   pdev_id);
7621 
7622 	if (pdev)
7623 		return pdev->reo_dest;
7624 	else
7625 		return cdp_host_reo_dest_ring_unknown;
7626 }
7627 
7628 #ifdef WLAN_SUPPORT_SCS
7629 /*
7630  * dp_enable_scs_params - Enable/Disable SCS procedures
7631  * @soc - Datapath soc handle
7632  * @peer_mac - STA Mac address
7633  * @vdev_id - ID of the vdev handle
7634  * @active - Flag to set SCS active/inactive
7635  * return type - QDF_STATUS - Success/Invalid
7636  */
7637 static QDF_STATUS
7638 dp_enable_scs_params(struct cdp_soc_t *soc_hdl, struct qdf_mac_addr
7639 		     *peer_mac,
7640 		     uint8_t vdev_id,
7641 		     bool is_active)
7642 {
7643 	struct dp_peer *peer;
7644 	QDF_STATUS status = QDF_STATUS_E_INVAL;
7645 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7646 
7647 	peer = dp_peer_find_hash_find(soc, peer_mac->bytes, 0, vdev_id,
7648 				      DP_MOD_ID_CDP);
7649 
7650 	if (!peer) {
7651 		dp_err("Peer is NULL!");
7652 		goto fail;
7653 	}
7654 
7655 	peer->scs_is_active = is_active;
7656 	status = QDF_STATUS_SUCCESS;
7657 
7658 fail:
7659 	if (peer)
7660 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7661 	return status;
7662 }
7663 
7664 /*
7665  * @brief dp_copy_scs_params - SCS Parameters sent by STA
7666  * is copied from the cdp layer to the dp layer
7667  * These parameters are then used by the peer
7668  * for traffic classification.
7669  *
7670  * @param peer - peer struct
7671  * @param scs_params - cdp layer params
7672  * @idx - SCS_entry index obtained from the
7673  * node database with a given SCSID
7674  * @return void
7675  */
7676 void
7677 dp_copy_scs_params(struct dp_peer *peer,
7678 		   struct cdp_scs_params *scs_params,
7679 		   uint8_t idx)
7680 {
7681 	uint8_t tidx = 0;
7682 	uint8_t tclas_elem;
7683 
7684 	peer->scs[idx].scsid = scs_params->scsid;
7685 	peer->scs[idx].access_priority =
7686 		scs_params->access_priority;
7687 	peer->scs[idx].tclas_elements =
7688 		scs_params->tclas_elements;
7689 	peer->scs[idx].tclas_process =
7690 		scs_params->tclas_process;
7691 
7692 	tclas_elem = peer->scs[idx].tclas_elements;
7693 
7694 	while (tidx < tclas_elem) {
7695 		qdf_mem_copy(&peer->scs[idx].tclas[tidx],
7696 			     &scs_params->tclas[tidx],
7697 			     sizeof(struct cdp_tclas_tuple));
7698 		tidx++;
7699 	}
7700 }
7701 
7702 /*
7703  * @brief dp_record_scs_params() - Copying the SCS params to a
7704  * peer based database.
7705  *
7706  * @soc - Datapath soc handle
7707  * @peer_mac - STA Mac address
7708  * @vdev_id - ID of the vdev handle
7709  * @scs_params - Structure having SCS parameters obtained
7710  * from handshake
7711  * @idx - SCS_entry index obtained from the
7712  * node database with a given SCSID
7713  * @scs_sessions - Total # of SCS sessions active
7714  *
7715  * @details
7716  * SCS parameters sent by the STA in
7717  * the SCS Request to the AP. The AP makes a note of these
7718  * parameters while sending the MSDUs to the STA, to
7719  * send the downlink traffic with correct User priority.
7720  *
7721  * return type - QDF_STATUS - Success/Invalid
7722  */
7723 static QDF_STATUS
7724 dp_record_scs_params(struct cdp_soc_t *soc_hdl, struct qdf_mac_addr
7725 		     *peer_mac,
7726 		     uint8_t vdev_id,
7727 		     struct cdp_scs_params *scs_params,
7728 		     uint8_t idx,
7729 		     uint8_t scs_sessions)
7730 {
7731 	struct dp_peer *peer;
7732 	QDF_STATUS status = QDF_STATUS_E_INVAL;
7733 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7734 
7735 	peer = dp_peer_find_hash_find(soc, peer_mac->bytes, 0, vdev_id,
7736 				      DP_MOD_ID_CDP);
7737 
7738 	if (!peer) {
7739 		dp_err("Peer is NULL!");
7740 		goto fail;
7741 	}
7742 
7743 	if (idx >= IEEE80211_SCS_MAX_NO_OF_ELEM)
7744 		goto fail;
7745 
7746 	/* SCS procedure for the peer is activated
7747 	 * as soon as we get this information from
7748 	 * the control path, unless explicitly disabled.
7749 	 */
7750 	peer->scs_is_active = 1;
7751 	dp_copy_scs_params(peer, scs_params, idx);
7752 	status = QDF_STATUS_SUCCESS;
7753 	peer->no_of_scs_sessions = scs_sessions;
7754 
7755 fail:
7756 	if (peer)
7757 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7758 	return status;
7759 }
7760 #endif
7761 
7762 #ifdef WLAN_SUPPORT_MSCS
7763 /*
7764  * dp_record_mscs_params - MSCS parameters sent by the STA in
7765  * the MSCS Request to the AP. The AP makes a note of these
7766  * parameters while comparing the MSDUs sent by the STA, to
7767  * send the downlink traffic with correct User priority.
7768  * @soc - Datapath soc handle
7769  * @peer_mac - STA Mac address
7770  * @vdev_id - ID of the vdev handle
7771  * @mscs_params - Structure having MSCS parameters obtained
7772  * from handshake
7773  * @active - Flag to set MSCS active/inactive
7774  * return type - QDF_STATUS - Success/Invalid
7775  */
7776 static QDF_STATUS
7777 dp_record_mscs_params(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
7778 		      uint8_t vdev_id, struct cdp_mscs_params *mscs_params,
7779 		      bool active)
7780 {
7781 	struct dp_peer *peer;
7782 	QDF_STATUS status = QDF_STATUS_E_INVAL;
7783 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7784 
7785 	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
7786 				      DP_MOD_ID_CDP);
7787 
7788 	if (!peer) {
7789 		dp_err("Peer is NULL!");
7790 		goto fail;
7791 	}
7792 	if (!active) {
7793 		dp_info("MSCS Procedure is terminated");
7794 		peer->mscs_active = active;
7795 		goto fail;
7796 	}
7797 
7798 	if (mscs_params->classifier_type == IEEE80211_TCLAS_MASK_CLA_TYPE_4) {
7799 		/* Populate entries inside IPV4 database first */
7800 		peer->mscs_ipv4_parameter.user_priority_bitmap =
7801 			mscs_params->user_pri_bitmap;
7802 		peer->mscs_ipv4_parameter.user_priority_limit =
7803 			mscs_params->user_pri_limit;
7804 		peer->mscs_ipv4_parameter.classifier_mask =
7805 			mscs_params->classifier_mask;
7806 
7807 		/* Populate entries inside IPV6 database */
7808 		peer->mscs_ipv6_parameter.user_priority_bitmap =
7809 			mscs_params->user_pri_bitmap;
7810 		peer->mscs_ipv6_parameter.user_priority_limit =
7811 			mscs_params->user_pri_limit;
7812 		peer->mscs_ipv6_parameter.classifier_mask =
7813 			mscs_params->classifier_mask;
7814 		peer->mscs_active = 1;
7815 		dp_info("\n\tMSCS Procedure request based parameters for "QDF_MAC_ADDR_FMT"\n"
7816 			"\tClassifier_type = %d\tUser priority bitmap = %x\n"
7817 			"\tUser priority limit = %x\tClassifier mask = %x",
7818 			QDF_MAC_ADDR_REF(peer_mac),
7819 			mscs_params->classifier_type,
7820 			peer->mscs_ipv4_parameter.user_priority_bitmap,
7821 			peer->mscs_ipv4_parameter.user_priority_limit,
7822 			peer->mscs_ipv4_parameter.classifier_mask);
7823 	}
7824 
7825 	status = QDF_STATUS_SUCCESS;
7826 fail:
7827 	if (peer)
7828 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7829 	return status;
7830 }
7831 #endif
7832 
7833 /*
7834  * dp_get_sec_type() - Get the security type
7835  * @soc: soc handle
7836  * @vdev_id: id of dp handle
7837  * @peer_mac: mac of datapath PEER handle
7838  * @sec_idx:    Security id (mcast, ucast)
7839  *
7840  * return sec_type: Security type
7841  */
7842 static int dp_get_sec_type(struct cdp_soc_t *soc, uint8_t vdev_id,
7843 			   uint8_t *peer_mac, uint8_t sec_idx)
7844 {
7845 	int sec_type = 0;
7846 	struct dp_peer *peer =
7847 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
7848 						       peer_mac, 0, vdev_id,
7849 						       DP_MOD_ID_CDP);
7850 
7851 	if (!peer) {
7852 		dp_cdp_err("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
7853 		return sec_type;
7854 	}
7855 
7856 	if (!peer->txrx_peer) {
7857 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7858 		dp_peer_debug("%pK: txrx peer is NULL!\n", soc);
7859 		return sec_type;
7860 	}
7861 	sec_type = peer->txrx_peer->security[sec_idx].sec_type;
7862 
7863 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7864 	return sec_type;
7865 }
7866 
7867 /*
7868  * dp_peer_authorize() - authorize txrx peer
7869  * @soc: soc handle
7870  * @vdev_id: id of dp handle
7871  * @peer_mac: mac of datapath PEER handle
7872  * @authorize
7873  *
7874  */
7875 static QDF_STATUS
7876 dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
7877 		  uint8_t *peer_mac, uint32_t authorize)
7878 {
7879 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7880 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7881 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(soc, peer_mac,
7882 							      0, vdev_id,
7883 							      DP_MOD_ID_CDP);
7884 
7885 	if (!peer) {
7886 		dp_cdp_debug("%pK: Peer is NULL!\n", soc);
7887 		status = QDF_STATUS_E_FAILURE;
7888 	} else {
7889 		peer->authorize = authorize ? 1 : 0;
7890 		if (peer->txrx_peer)
7891 			peer->txrx_peer->authorize = peer->authorize;
7892 
7893 		if (!peer->authorize)
7894 			dp_peer_flush_frags(soc_hdl, vdev_id, peer_mac);
7895 
7896 		dp_mlo_peer_authorize(soc, peer);
7897 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7898 	}
7899 
7900 	return status;
7901 }
7902 
7903 /*
7904  * dp_peer_get_authorize() - get peer authorize status
7905  * @soc: soc handle
7906  * @vdev_id: id of dp handle
7907  * @peer_mac: mac of datapath PEER handle
7908  *
7909  * Retusn: true is peer is authorized, false otherwise
7910  */
7911 static bool
7912 dp_peer_get_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
7913 		      uint8_t *peer_mac)
7914 {
7915 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7916 	bool authorize = false;
7917 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
7918 						      0, vdev_id,
7919 						      DP_MOD_ID_CDP);
7920 
7921 	if (!peer) {
7922 		dp_cdp_debug("%pK: Peer is NULL!\n", soc);
7923 		return authorize;
7924 	}
7925 
7926 	authorize = peer->authorize;
7927 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7928 
7929 	return authorize;
7930 }
7931 
7932 /**
7933  * dp_vdev_unref_delete() - check and process vdev delete
7934  * @soc : DP specific soc pointer
7935  * @vdev: DP specific vdev pointer
7936  * @mod_id: module id
7937  *
7938  */
7939 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev,
7940 			  enum dp_mod_id mod_id)
7941 {
7942 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
7943 	void *vdev_delete_context = NULL;
7944 	uint8_t vdev_id = vdev->vdev_id;
7945 	struct dp_pdev *pdev = vdev->pdev;
7946 	struct dp_vdev *tmp_vdev = NULL;
7947 	uint8_t found = 0;
7948 
7949 	QDF_ASSERT(qdf_atomic_dec_return(&vdev->mod_refs[mod_id]) >= 0);
7950 
7951 	/* Return if this is not the last reference*/
7952 	if (!qdf_atomic_dec_and_test(&vdev->ref_cnt))
7953 		return;
7954 
7955 	/*
7956 	 * This should be set as last reference need to released
7957 	 * after cdp_vdev_detach() is called
7958 	 *
7959 	 * if this assert is hit there is a ref count issue
7960 	 */
7961 	QDF_ASSERT(vdev->delete.pending);
7962 
7963 	vdev_delete_cb = vdev->delete.callback;
7964 	vdev_delete_context = vdev->delete.context;
7965 
7966 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")- its last peer is done",
7967 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
7968 
7969 	if (wlan_op_mode_monitor == vdev->opmode) {
7970 		dp_monitor_vdev_delete(soc, vdev);
7971 		goto free_vdev;
7972 	}
7973 
7974 	/* all peers are gone, go ahead and delete it */
7975 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
7976 			FLOW_TYPE_VDEV, vdev_id);
7977 	dp_tx_vdev_detach(vdev);
7978 	dp_monitor_vdev_detach(vdev);
7979 
7980 free_vdev:
7981 	qdf_spinlock_destroy(&vdev->peer_list_lock);
7982 
7983 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
7984 	TAILQ_FOREACH(tmp_vdev, &soc->inactive_vdev_list,
7985 		      inactive_list_elem) {
7986 		if (tmp_vdev == vdev) {
7987 			found = 1;
7988 			break;
7989 		}
7990 	}
7991 	if (found)
7992 		TAILQ_REMOVE(&soc->inactive_vdev_list, vdev,
7993 			     inactive_list_elem);
7994 	/* delete this peer from the list */
7995 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
7996 
7997 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")",
7998 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
7999 	wlan_minidump_remove(vdev, sizeof(*vdev), soc->ctrl_psoc,
8000 			     WLAN_MD_DP_VDEV, "dp_vdev");
8001 	qdf_mem_free(vdev);
8002 	vdev = NULL;
8003 
8004 	if (vdev_delete_cb)
8005 		vdev_delete_cb(vdev_delete_context);
8006 }
8007 
8008 qdf_export_symbol(dp_vdev_unref_delete);
8009 
8010 /*
8011  * dp_peer_unref_delete() - unref and delete peer
8012  * @peer_handle:    Datapath peer handle
8013  * @mod_id:         ID of module releasing reference
8014  *
8015  */
8016 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id mod_id)
8017 {
8018 	struct dp_vdev *vdev = peer->vdev;
8019 	struct dp_pdev *pdev = vdev->pdev;
8020 	struct dp_soc *soc = pdev->soc;
8021 	uint16_t peer_id;
8022 	struct dp_peer *tmp_peer;
8023 	bool found = false;
8024 
8025 	if (mod_id > DP_MOD_ID_RX)
8026 		QDF_ASSERT(qdf_atomic_dec_return(&peer->mod_refs[mod_id]) >= 0);
8027 
8028 	/*
8029 	 * Hold the lock all the way from checking if the peer ref count
8030 	 * is zero until the peer references are removed from the hash
8031 	 * table and vdev list (if the peer ref count is zero).
8032 	 * This protects against a new HL tx operation starting to use the
8033 	 * peer object just after this function concludes it's done being used.
8034 	 * Furthermore, the lock needs to be held while checking whether the
8035 	 * vdev's list of peers is empty, to make sure that list is not modified
8036 	 * concurrently with the empty check.
8037 	 */
8038 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
8039 		peer_id = peer->peer_id;
8040 
8041 		/*
8042 		 * Make sure that the reference to the peer in
8043 		 * peer object map is removed
8044 		 */
8045 		QDF_ASSERT(peer_id == HTT_INVALID_PEER);
8046 
8047 		dp_peer_debug("Deleting peer %pK ("QDF_MAC_ADDR_FMT")", peer,
8048 			      QDF_MAC_ADDR_REF(peer->mac_addr.raw));
8049 
8050 		wlan_minidump_remove(peer, sizeof(*peer), soc->ctrl_psoc,
8051 				     WLAN_MD_DP_PEER, "dp_peer");
8052 
8053 		qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
8054 		TAILQ_FOREACH(tmp_peer, &soc->inactive_peer_list,
8055 			      inactive_list_elem) {
8056 			if (tmp_peer == peer) {
8057 				found = 1;
8058 				break;
8059 			}
8060 		}
8061 		if (found)
8062 			TAILQ_REMOVE(&soc->inactive_peer_list, peer,
8063 				     inactive_list_elem);
8064 		/* delete this peer from the list */
8065 		qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
8066 		DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
8067 		dp_peer_update_state(soc, peer, DP_PEER_STATE_FREED);
8068 
8069 		/* cleanup the peer data */
8070 		dp_peer_cleanup(vdev, peer);
8071 
8072 		if (!IS_MLO_DP_MLD_PEER(peer))
8073 			dp_monitor_peer_detach(soc, peer);
8074 
8075 		qdf_spinlock_destroy(&peer->peer_state_lock);
8076 
8077 		dp_txrx_peer_detach(soc, peer);
8078 		qdf_mem_free(peer);
8079 
8080 		/*
8081 		 * Decrement ref count taken at peer create
8082 		 */
8083 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CHILD);
8084 	}
8085 }
8086 
8087 qdf_export_symbol(dp_peer_unref_delete);
8088 
8089 /*
8090  * dp_txrx_peer_unref_delete() - unref and delete peer
8091  * @handle: Datapath txrx ref handle
8092  * @mod_id: Module ID of the caller
8093  *
8094  */
8095 void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle,
8096 			       enum dp_mod_id mod_id)
8097 {
8098 	dp_peer_unref_delete((struct dp_peer *)handle, mod_id);
8099 }
8100 
8101 qdf_export_symbol(dp_txrx_peer_unref_delete);
8102 
8103 /*
8104  * dp_peer_detach_wifi3() – Detach txrx peer
8105  * @soc_hdl: soc handle
8106  * @vdev_id: id of dp handle
8107  * @peer_mac: mac of datapath PEER handle
8108  * @bitmap: bitmap indicating special handling of request.
8109  *
8110  */
8111 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
8112 				       uint8_t vdev_id,
8113 				       uint8_t *peer_mac, uint32_t bitmap)
8114 {
8115 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8116 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
8117 						      0, vdev_id,
8118 						      DP_MOD_ID_CDP);
8119 	struct dp_vdev *vdev = NULL;
8120 
8121 	/* Peer can be null for monitor vap mac address */
8122 	if (!peer) {
8123 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
8124 			  "%s: Invalid peer\n", __func__);
8125 		return QDF_STATUS_E_FAILURE;
8126 	}
8127 
8128 	if (!peer->valid) {
8129 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8130 		dp_err("Invalid peer: "QDF_MAC_ADDR_FMT,
8131 			QDF_MAC_ADDR_REF(peer_mac));
8132 		return QDF_STATUS_E_ALREADY;
8133 	}
8134 
8135 	vdev = peer->vdev;
8136 
8137 	if (!vdev) {
8138 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8139 		return QDF_STATUS_E_FAILURE;
8140 	}
8141 
8142 	peer->valid = 0;
8143 
8144 	dp_init_info("%pK: peer %pK (" QDF_MAC_ADDR_FMT ")",
8145 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
8146 
8147 	dp_local_peer_id_free(peer->vdev->pdev, peer);
8148 
8149 	/* Drop all rx packets before deleting peer */
8150 	dp_clear_peer_internal(soc, peer);
8151 
8152 	qdf_spinlock_destroy(&peer->peer_info_lock);
8153 	dp_peer_multipass_list_remove(peer);
8154 
8155 	/* remove the reference to the peer from the hash table */
8156 	dp_peer_find_hash_remove(soc, peer);
8157 
8158 	dp_peer_vdev_list_remove(soc, vdev, peer);
8159 
8160 	dp_peer_mlo_delete(peer);
8161 
8162 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
8163 	TAILQ_INSERT_TAIL(&soc->inactive_peer_list, peer,
8164 			  inactive_list_elem);
8165 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
8166 
8167 	/*
8168 	 * Remove the reference added during peer_attach.
8169 	 * The peer will still be left allocated until the
8170 	 * PEER_UNMAP message arrives to remove the other
8171 	 * reference, added by the PEER_MAP message.
8172 	 */
8173 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
8174 	/*
8175 	 * Remove the reference taken above
8176 	 */
8177 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8178 
8179 	return QDF_STATUS_SUCCESS;
8180 }
8181 
8182 /*
8183  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
8184  * @soc_hdl: Datapath soc handle
8185  * @vdev_id: virtual interface id
8186  *
8187  * Return: MAC address on success, NULL on failure.
8188  *
8189  */
8190 static uint8_t *dp_get_vdev_mac_addr_wifi3(struct cdp_soc_t *soc_hdl,
8191 					   uint8_t vdev_id)
8192 {
8193 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8194 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8195 						     DP_MOD_ID_CDP);
8196 	uint8_t *mac = NULL;
8197 
8198 	if (!vdev)
8199 		return NULL;
8200 
8201 	mac = vdev->mac_addr.raw;
8202 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8203 
8204 	return mac;
8205 }
8206 
8207 /*
8208  * dp_vdev_set_wds() - Enable per packet stats
8209  * @soc: DP soc handle
8210  * @vdev_id: id of DP VDEV handle
8211  * @val: value
8212  *
8213  * Return: none
8214  */
8215 static int dp_vdev_set_wds(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8216 			   uint32_t val)
8217 {
8218 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8219 	struct dp_vdev *vdev =
8220 		dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id,
8221 				      DP_MOD_ID_CDP);
8222 
8223 	if (!vdev)
8224 		return QDF_STATUS_E_FAILURE;
8225 
8226 	vdev->wds_enabled = val;
8227 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8228 
8229 	return QDF_STATUS_SUCCESS;
8230 }
8231 
8232 static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
8233 {
8234 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8235 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8236 						     DP_MOD_ID_CDP);
8237 	int opmode;
8238 
8239 	if (!vdev) {
8240 		dp_err("vdev for id %d is NULL", vdev_id);
8241 		return -EINVAL;
8242 	}
8243 	opmode = vdev->opmode;
8244 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8245 
8246 	return opmode;
8247 }
8248 
8249 /**
8250  * dp_get_os_rx_handles_from_vdev_wifi3() - Get os rx handles for a vdev
8251  * @soc_hdl: ol_txrx_soc_handle handle
8252  * @vdev_id: vdev id for which os rx handles are needed
8253  * @stack_fn_p: pointer to stack function pointer
8254  * @osif_handle_p: pointer to ol_osif_vdev_handle
8255  *
8256  * Return: void
8257  */
8258 static
8259 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_soc_t *soc_hdl,
8260 					  uint8_t vdev_id,
8261 					  ol_txrx_rx_fp *stack_fn_p,
8262 					  ol_osif_vdev_handle *osif_vdev_p)
8263 {
8264 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8265 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8266 						     DP_MOD_ID_CDP);
8267 
8268 	if (qdf_unlikely(!vdev)) {
8269 		*stack_fn_p = NULL;
8270 		*osif_vdev_p = NULL;
8271 		return;
8272 	}
8273 	*stack_fn_p = vdev->osif_rx_stack;
8274 	*osif_vdev_p = vdev->osif_vdev;
8275 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8276 }
8277 
8278 /**
8279  * dp_get_ctrl_pdev_from_vdev() - Get control pdev of vdev
8280  * @soc_hdl: datapath soc handle
8281  * @vdev_id: virtual device/interface id
8282  *
8283  * Return: Handle to control pdev
8284  */
8285 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(
8286 						struct cdp_soc_t *soc_hdl,
8287 						uint8_t vdev_id)
8288 {
8289 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8290 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8291 						     DP_MOD_ID_CDP);
8292 	struct dp_pdev *pdev;
8293 
8294 	if (!vdev)
8295 		return NULL;
8296 
8297 	pdev = vdev->pdev;
8298 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8299 	return pdev ? (struct cdp_cfg *)pdev->wlan_cfg_ctx : NULL;
8300 }
8301 
8302 /**
8303  * dp_get_tx_pending() - read pending tx
8304  * @pdev_handle: Datapath PDEV handle
8305  *
8306  * Return: outstanding tx
8307  */
8308 static int32_t dp_get_tx_pending(struct cdp_pdev *pdev_handle)
8309 {
8310 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8311 
8312 	return qdf_atomic_read(&pdev->num_tx_outstanding);
8313 }
8314 
8315 /**
8316  * dp_get_peer_mac_from_peer_id() - get peer mac
8317  * @pdev_handle: Datapath PDEV handle
8318  * @peer_id: Peer ID
8319  * @peer_mac: MAC addr of PEER
8320  *
8321  * Return: QDF_STATUS
8322  */
8323 static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc,
8324 					       uint32_t peer_id,
8325 					       uint8_t *peer_mac)
8326 {
8327 	struct dp_peer *peer;
8328 
8329 	if (soc && peer_mac) {
8330 		peer = dp_peer_get_ref_by_id((struct dp_soc *)soc,
8331 					     (uint16_t)peer_id,
8332 					     DP_MOD_ID_CDP);
8333 		if (peer) {
8334 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
8335 				     QDF_MAC_ADDR_SIZE);
8336 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8337 			return QDF_STATUS_SUCCESS;
8338 		}
8339 	}
8340 
8341 	return QDF_STATUS_E_FAILURE;
8342 }
8343 
8344 #ifdef MESH_MODE_SUPPORT
8345 static
8346 void dp_vdev_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
8347 {
8348 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
8349 
8350 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
8351 	vdev->mesh_vdev = val;
8352 	if (val)
8353 		vdev->skip_sw_tid_classification |=
8354 			DP_TX_MESH_ENABLED;
8355 	else
8356 		vdev->skip_sw_tid_classification &=
8357 			~DP_TX_MESH_ENABLED;
8358 }
8359 
8360 /*
8361  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
8362  * @vdev_hdl: virtual device object
8363  * @val: value to be set
8364  *
8365  * Return: void
8366  */
8367 static
8368 void dp_vdev_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
8369 {
8370 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
8371 
8372 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
8373 	vdev->mesh_rx_filter = val;
8374 }
8375 #endif
8376 
8377 /*
8378  * dp_vdev_set_hlos_tid_override() - to set hlos tid override
8379  * @vdev_hdl: virtual device object
8380  * @val: value to be set
8381  *
8382  * Return: void
8383  */
8384 static
8385 void dp_vdev_set_hlos_tid_override(struct dp_vdev *vdev, uint32_t val)
8386 {
8387 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
8388 	if (val)
8389 		vdev->skip_sw_tid_classification |=
8390 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
8391 	else
8392 		vdev->skip_sw_tid_classification &=
8393 			~DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
8394 }
8395 
8396 /*
8397  * dp_vdev_get_hlos_tid_override() - to get hlos tid override flag
8398  * @vdev_hdl: virtual device object
8399  * @val: value to be set
8400  *
8401  * Return: 1 if this flag is set
8402  */
8403 static
8404 uint8_t dp_vdev_get_hlos_tid_override(struct cdp_vdev *vdev_hdl)
8405 {
8406 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
8407 
8408 	return !!(vdev->skip_sw_tid_classification &
8409 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED);
8410 }
8411 
8412 #ifdef VDEV_PEER_PROTOCOL_COUNT
8413 static void dp_enable_vdev_peer_protocol_count(struct cdp_soc_t *soc_hdl,
8414 					       int8_t vdev_id,
8415 					       bool enable)
8416 {
8417 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8418 	struct dp_vdev *vdev;
8419 
8420 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
8421 	if (!vdev)
8422 		return;
8423 
8424 	dp_info("enable %d vdev_id %d", enable, vdev_id);
8425 	vdev->peer_protocol_count_track = enable;
8426 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8427 }
8428 
8429 static void dp_enable_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
8430 						   int8_t vdev_id,
8431 						   int drop_mask)
8432 {
8433 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8434 	struct dp_vdev *vdev;
8435 
8436 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
8437 	if (!vdev)
8438 		return;
8439 
8440 	dp_info("drop_mask %d vdev_id %d", drop_mask, vdev_id);
8441 	vdev->peer_protocol_count_dropmask = drop_mask;
8442 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8443 }
8444 
8445 static int dp_is_vdev_peer_protocol_count_enabled(struct cdp_soc_t *soc_hdl,
8446 						  int8_t vdev_id)
8447 {
8448 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8449 	struct dp_vdev *vdev;
8450 	int peer_protocol_count_track;
8451 
8452 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
8453 	if (!vdev)
8454 		return 0;
8455 
8456 	dp_info("enable %d vdev_id %d", vdev->peer_protocol_count_track,
8457 		vdev_id);
8458 	peer_protocol_count_track =
8459 		vdev->peer_protocol_count_track;
8460 
8461 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8462 	return peer_protocol_count_track;
8463 }
8464 
8465 static int dp_get_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
8466 					       int8_t vdev_id)
8467 {
8468 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8469 	struct dp_vdev *vdev;
8470 	int peer_protocol_count_dropmask;
8471 
8472 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
8473 	if (!vdev)
8474 		return 0;
8475 
8476 	dp_info("drop_mask %d vdev_id %d", vdev->peer_protocol_count_dropmask,
8477 		vdev_id);
8478 	peer_protocol_count_dropmask =
8479 		vdev->peer_protocol_count_dropmask;
8480 
8481 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8482 	return peer_protocol_count_dropmask;
8483 }
8484 
8485 #endif
8486 
8487 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data)
8488 {
8489 	uint8_t pdev_count;
8490 
8491 	for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) {
8492 		if (soc->pdev_list[pdev_count] &&
8493 		    soc->pdev_list[pdev_count] == data)
8494 			return true;
8495 	}
8496 	return false;
8497 }
8498 
8499 /**
8500  * dp_rx_bar_stats_cb(): BAR received stats callback
8501  * @soc: SOC handle
8502  * @cb_ctxt: Call back context
8503  * @reo_status: Reo status
8504  *
8505  * return: void
8506  */
8507 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
8508 	union hal_reo_status *reo_status)
8509 {
8510 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
8511 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
8512 
8513 	if (!dp_check_pdev_exists(soc, pdev)) {
8514 		dp_err_rl("pdev doesn't exist");
8515 		return;
8516 	}
8517 
8518 	if (!qdf_atomic_read(&soc->cmn_init_done))
8519 		return;
8520 
8521 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
8522 		DP_PRINT_STATS("REO stats failure %d",
8523 			       queue_status->header.status);
8524 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
8525 		return;
8526 	}
8527 
8528 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
8529 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
8530 
8531 }
8532 
8533 /**
8534  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
8535  * @vdev: DP VDEV handle
8536  *
8537  * return: void
8538  */
8539 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
8540 			     struct cdp_vdev_stats *vdev_stats)
8541 {
8542 	struct dp_soc *soc = NULL;
8543 
8544 	if (!vdev || !vdev->pdev)
8545 		return;
8546 
8547 	soc = vdev->pdev->soc;
8548 
8549 	dp_update_vdev_ingress_stats(vdev);
8550 
8551 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
8552 
8553 	dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats,
8554 			     DP_MOD_ID_GENERIC_STATS);
8555 
8556 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
8557 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
8558 			     vdev_stats, vdev->vdev_id,
8559 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
8560 #endif
8561 }
8562 
8563 void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
8564 {
8565 	struct dp_vdev *vdev = NULL;
8566 	struct dp_soc *soc;
8567 	struct cdp_vdev_stats *vdev_stats =
8568 			qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
8569 
8570 	if (!vdev_stats) {
8571 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
8572 			   pdev->soc);
8573 		return;
8574 	}
8575 
8576 	soc = pdev->soc;
8577 
8578 	qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
8579 	qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
8580 	qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
8581 	qdf_mem_zero(&pdev->stats.rx_i, sizeof(pdev->stats.rx_i));
8582 
8583 	if (dp_monitor_is_enable_mcopy_mode(pdev))
8584 		dp_monitor_invalid_peer_update_pdev_stats(soc, pdev);
8585 
8586 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
8587 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
8588 
8589 		dp_aggregate_vdev_stats(vdev, vdev_stats);
8590 		dp_update_pdev_stats(pdev, vdev_stats);
8591 		dp_update_pdev_ingress_stats(pdev, vdev);
8592 	}
8593 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
8594 	qdf_mem_free(vdev_stats);
8595 
8596 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
8597 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
8598 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
8599 #endif
8600 }
8601 
8602 /**
8603  * dp_vdev_getstats() - get vdev packet level stats
8604  * @vdev_handle: Datapath VDEV handle
8605  * @stats: cdp network device stats structure
8606  *
8607  * Return: QDF_STATUS
8608  */
8609 static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle,
8610 				   struct cdp_dev_stats *stats)
8611 {
8612 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8613 	struct dp_pdev *pdev;
8614 	struct dp_soc *soc;
8615 	struct cdp_vdev_stats *vdev_stats;
8616 
8617 	if (!vdev)
8618 		return QDF_STATUS_E_FAILURE;
8619 
8620 	pdev = vdev->pdev;
8621 	if (!pdev)
8622 		return QDF_STATUS_E_FAILURE;
8623 
8624 	soc = pdev->soc;
8625 
8626 	vdev_stats = qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
8627 
8628 	if (!vdev_stats) {
8629 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
8630 			   soc);
8631 		return QDF_STATUS_E_FAILURE;
8632 	}
8633 
8634 	dp_aggregate_vdev_stats(vdev, vdev_stats);
8635 
8636 	stats->tx_packets = vdev_stats->tx.comp_pkt.num;
8637 	stats->tx_bytes = vdev_stats->tx.comp_pkt.bytes;
8638 
8639 	stats->tx_errors = vdev_stats->tx.tx_failed;
8640 	stats->tx_dropped = vdev_stats->tx_i.dropped.dropped_pkt.num +
8641 			    vdev_stats->tx_i.sg.dropped_host.num +
8642 			    vdev_stats->tx_i.mcast_en.dropped_map_error +
8643 			    vdev_stats->tx_i.mcast_en.dropped_self_mac +
8644 			    vdev_stats->tx_i.mcast_en.dropped_send_fail +
8645 			    vdev_stats->tx.nawds_mcast_drop;
8646 
8647 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
8648 		stats->rx_packets = vdev_stats->rx.to_stack.num;
8649 		stats->rx_bytes = vdev_stats->rx.to_stack.bytes;
8650 	} else {
8651 		stats->rx_packets = vdev_stats->rx_i.reo_rcvd_pkt.num +
8652 				    vdev_stats->rx_i.null_q_desc_pkt.num +
8653 				    vdev_stats->rx_i.routed_eapol_pkt.num;
8654 		stats->rx_bytes = vdev_stats->rx_i.reo_rcvd_pkt.bytes +
8655 				  vdev_stats->rx_i.null_q_desc_pkt.bytes +
8656 				  vdev_stats->rx_i.routed_eapol_pkt.bytes;
8657 	}
8658 
8659 	stats->rx_errors = vdev_stats->rx.err.mic_err +
8660 			   vdev_stats->rx.err.decrypt_err +
8661 			   vdev_stats->rx.err.fcserr +
8662 			   vdev_stats->rx.err.pn_err +
8663 			   vdev_stats->rx.err.oor_err +
8664 			   vdev_stats->rx.err.jump_2k_err +
8665 			   vdev_stats->rx.err.rxdma_wifi_parse_err;
8666 
8667 	stats->rx_dropped = vdev_stats->rx.mec_drop.num +
8668 			    vdev_stats->rx.multipass_rx_pkt_drop +
8669 			    vdev_stats->rx.peer_unauth_rx_pkt_drop +
8670 			    vdev_stats->rx.policy_check_drop +
8671 			    vdev_stats->rx.nawds_mcast_drop;
8672 
8673 	qdf_mem_free(vdev_stats);
8674 
8675 	return QDF_STATUS_SUCCESS;
8676 }
8677 
8678 /**
8679  * dp_pdev_getstats() - get pdev packet level stats
8680  * @pdev_handle: Datapath PDEV handle
8681  * @stats: cdp network device stats structure
8682  *
8683  * Return: QDF_STATUS
8684  */
8685 static void dp_pdev_getstats(struct cdp_pdev *pdev_handle,
8686 			     struct cdp_dev_stats *stats)
8687 {
8688 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8689 
8690 	dp_aggregate_pdev_stats(pdev);
8691 
8692 	stats->tx_packets = pdev->stats.tx.comp_pkt.num;
8693 	stats->tx_bytes = pdev->stats.tx.comp_pkt.bytes;
8694 
8695 	stats->tx_errors = pdev->stats.tx.tx_failed;
8696 	stats->tx_dropped = pdev->stats.tx_i.dropped.dropped_pkt.num +
8697 			    pdev->stats.tx_i.sg.dropped_host.num +
8698 			    pdev->stats.tx_i.mcast_en.dropped_map_error +
8699 			    pdev->stats.tx_i.mcast_en.dropped_self_mac +
8700 			    pdev->stats.tx_i.mcast_en.dropped_send_fail +
8701 			    pdev->stats.tx.nawds_mcast_drop +
8702 			    pdev->stats.tso_stats.dropped_host.num;
8703 
8704 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(pdev->soc->wlan_cfg_ctx)) {
8705 		stats->rx_packets = pdev->stats.rx.to_stack.num;
8706 		stats->rx_bytes = pdev->stats.rx.to_stack.bytes;
8707 	} else {
8708 		stats->rx_packets = pdev->stats.rx_i.reo_rcvd_pkt.num +
8709 				    pdev->stats.rx_i.null_q_desc_pkt.num +
8710 				    pdev->stats.rx_i.routed_eapol_pkt.num;
8711 		stats->rx_bytes = pdev->stats.rx_i.reo_rcvd_pkt.bytes +
8712 				  pdev->stats.rx_i.null_q_desc_pkt.bytes +
8713 				  pdev->stats.rx_i.routed_eapol_pkt.bytes;
8714 	}
8715 
8716 	stats->rx_errors = pdev->stats.err.ip_csum_err +
8717 		pdev->stats.err.tcp_udp_csum_err +
8718 		pdev->stats.rx.err.mic_err +
8719 		pdev->stats.rx.err.decrypt_err +
8720 		pdev->stats.rx.err.fcserr +
8721 		pdev->stats.rx.err.pn_err +
8722 		pdev->stats.rx.err.oor_err +
8723 		pdev->stats.rx.err.jump_2k_err +
8724 		pdev->stats.rx.err.rxdma_wifi_parse_err;
8725 	stats->rx_dropped = pdev->stats.dropped.msdu_not_done +
8726 		pdev->stats.dropped.mec +
8727 		pdev->stats.dropped.mesh_filter +
8728 		pdev->stats.dropped.wifi_parse +
8729 		pdev->stats.dropped.mon_rx_drop +
8730 		pdev->stats.dropped.mon_radiotap_update_err +
8731 		pdev->stats.rx.mec_drop.num +
8732 		pdev->stats.rx.multipass_rx_pkt_drop +
8733 		pdev->stats.rx.peer_unauth_rx_pkt_drop +
8734 		pdev->stats.rx.policy_check_drop +
8735 		pdev->stats.rx.nawds_mcast_drop;
8736 }
8737 
8738 /**
8739  * dp_get_device_stats() - get interface level packet stats
8740  * @soc: soc handle
8741  * @id : vdev_id or pdev_id based on type
8742  * @stats: cdp network device stats structure
8743  * @type: device type pdev/vdev
8744  *
8745  * Return: QDF_STATUS
8746  */
8747 static QDF_STATUS dp_get_device_stats(struct cdp_soc_t *soc_hdl, uint8_t id,
8748 				      struct cdp_dev_stats *stats,
8749 				      uint8_t type)
8750 {
8751 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8752 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
8753 	struct dp_vdev *vdev;
8754 
8755 	switch (type) {
8756 	case UPDATE_VDEV_STATS:
8757 		vdev = dp_vdev_get_ref_by_id(soc, id, DP_MOD_ID_CDP);
8758 
8759 		if (vdev) {
8760 			status = dp_vdev_getstats((struct cdp_vdev *)vdev,
8761 						  stats);
8762 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8763 		}
8764 		return status;
8765 	case UPDATE_PDEV_STATS:
8766 		{
8767 			struct dp_pdev *pdev =
8768 				dp_get_pdev_from_soc_pdev_id_wifi3(
8769 						(struct dp_soc *)soc,
8770 						 id);
8771 			if (pdev) {
8772 				dp_pdev_getstats((struct cdp_pdev *)pdev,
8773 						 stats);
8774 				return QDF_STATUS_SUCCESS;
8775 			}
8776 		}
8777 		break;
8778 	default:
8779 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
8780 			"apstats cannot be updated for this input "
8781 			"type %d", type);
8782 		break;
8783 	}
8784 
8785 	return QDF_STATUS_E_FAILURE;
8786 }
8787 
8788 const
8789 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
8790 {
8791 	switch (ring_type) {
8792 	case REO_DST:
8793 		return "Reo_dst";
8794 	case REO_EXCEPTION:
8795 		return "Reo_exception";
8796 	case REO_CMD:
8797 		return "Reo_cmd";
8798 	case REO_REINJECT:
8799 		return "Reo_reinject";
8800 	case REO_STATUS:
8801 		return "Reo_status";
8802 	case WBM2SW_RELEASE:
8803 		return "wbm2sw_release";
8804 	case TCL_DATA:
8805 		return "tcl_data";
8806 	case TCL_CMD_CREDIT:
8807 		return "tcl_cmd_credit";
8808 	case TCL_STATUS:
8809 		return "tcl_status";
8810 	case SW2WBM_RELEASE:
8811 		return "sw2wbm_release";
8812 	case RXDMA_BUF:
8813 		return "Rxdma_buf";
8814 	case RXDMA_DST:
8815 		return "Rxdma_dst";
8816 	case RXDMA_MONITOR_BUF:
8817 		return "Rxdma_monitor_buf";
8818 	case RXDMA_MONITOR_DESC:
8819 		return "Rxdma_monitor_desc";
8820 	case RXDMA_MONITOR_STATUS:
8821 		return "Rxdma_monitor_status";
8822 	case RXDMA_MONITOR_DST:
8823 		return "Rxdma_monitor_destination";
8824 	case WBM_IDLE_LINK:
8825 		return "WBM_hw_idle_link";
8826 	default:
8827 		dp_err("Invalid ring type");
8828 		break;
8829 	}
8830 	return "Invalid";
8831 }
8832 
8833 /*
8834  * dp_print_napi_stats(): NAPI stats
8835  * @soc - soc handle
8836  */
8837 void dp_print_napi_stats(struct dp_soc *soc)
8838 {
8839 	hif_print_napi_stats(soc->hif_handle);
8840 }
8841 
8842 /**
8843  * dp_txrx_host_peer_stats_clr): Reinitialize the txrx peer stats
8844  * @soc: Datapath soc
8845  * @peer: Datatpath peer
8846  * @arg: argument to iter function
8847  *
8848  * Return: QDF_STATUS
8849  */
8850 static inline void
8851 dp_txrx_host_peer_stats_clr(struct dp_soc *soc,
8852 			    struct dp_peer *peer,
8853 			    void *arg)
8854 {
8855 	struct dp_txrx_peer *txrx_peer = NULL;
8856 	struct dp_peer *tgt_peer = NULL;
8857 	struct cdp_interface_peer_stats peer_stats_intf;
8858 
8859 	qdf_mem_zero(&peer_stats_intf, sizeof(struct cdp_interface_peer_stats));
8860 
8861 	DP_STATS_CLR(peer);
8862 	/* Clear monitor peer stats */
8863 	dp_monitor_peer_reset_stats(soc, peer);
8864 
8865 	/* Clear MLD peer stats only when link peer is primary */
8866 	if (dp_peer_is_primary_link_peer(peer)) {
8867 		tgt_peer = dp_get_tgt_peer_from_peer(peer);
8868 		if (tgt_peer) {
8869 			DP_STATS_CLR(tgt_peer);
8870 			txrx_peer = tgt_peer->txrx_peer;
8871 			dp_txrx_peer_stats_clr(txrx_peer);
8872 		}
8873 	}
8874 
8875 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
8876 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, peer->vdev->pdev->soc,
8877 			     &peer_stats_intf,  peer->peer_id,
8878 			     UPDATE_PEER_STATS, peer->vdev->pdev->pdev_id);
8879 #endif
8880 }
8881 
8882 /**
8883  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
8884  * @vdev: DP_VDEV handle
8885  * @dp_soc: DP_SOC handle
8886  *
8887  * Return: QDF_STATUS
8888  */
8889 static inline QDF_STATUS
8890 dp_txrx_host_stats_clr(struct dp_vdev *vdev, struct dp_soc *soc)
8891 {
8892 	if (!vdev || !vdev->pdev)
8893 		return QDF_STATUS_E_FAILURE;
8894 
8895 	/*
8896 	 * if NSS offload is enabled, then send message
8897 	 * to NSS FW to clear the stats. Once NSS FW clears the statistics
8898 	 * then clear host statistics.
8899 	 */
8900 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
8901 		if (soc->cdp_soc.ol_ops->nss_stats_clr)
8902 			soc->cdp_soc.ol_ops->nss_stats_clr(soc->ctrl_psoc,
8903 							   vdev->vdev_id);
8904 	}
8905 
8906 	dp_vdev_stats_hw_offload_target_clear(soc, vdev->pdev->pdev_id,
8907 					      vdev->vdev_id);
8908 
8909 	DP_STATS_CLR(vdev->pdev);
8910 	DP_STATS_CLR(vdev->pdev->soc);
8911 	DP_STATS_CLR(vdev);
8912 
8913 	hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
8914 
8915 	dp_vdev_iterate_peer(vdev, dp_txrx_host_peer_stats_clr, NULL,
8916 			     DP_MOD_ID_GENERIC_STATS);
8917 
8918 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
8919 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
8920 			     &vdev->stats,  vdev->vdev_id,
8921 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
8922 #endif
8923 	return QDF_STATUS_SUCCESS;
8924 }
8925 
8926 /**
8927  * dp_get_peer_calibr_stats()- Get peer calibrated stats
8928  * @peer: Datapath peer
8929  * @peer_stats: buffer for peer stats
8930  *
8931  * Return: none
8932  */
8933 static inline
8934 void dp_get_peer_calibr_stats(struct dp_peer *peer,
8935 			      struct cdp_peer_stats *peer_stats)
8936 {
8937 	peer_stats->tx.last_per = peer->stats.tx.last_per;
8938 	peer_stats->tx.tx_bytes_success_last =
8939 					peer->stats.tx.tx_bytes_success_last;
8940 	peer_stats->tx.tx_data_success_last =
8941 					peer->stats.tx.tx_data_success_last;
8942 	peer_stats->tx.tx_byte_rate = peer->stats.tx.tx_byte_rate;
8943 	peer_stats->tx.tx_data_rate = peer->stats.tx.tx_data_rate;
8944 	peer_stats->tx.tx_data_ucast_last = peer->stats.tx.tx_data_ucast_last;
8945 	peer_stats->tx.tx_data_ucast_rate = peer->stats.tx.tx_data_ucast_rate;
8946 	peer_stats->tx.inactive_time = peer->stats.tx.inactive_time;
8947 	peer_stats->rx.rx_bytes_success_last =
8948 					peer->stats.rx.rx_bytes_success_last;
8949 	peer_stats->rx.rx_data_success_last =
8950 					peer->stats.rx.rx_data_success_last;
8951 	peer_stats->rx.rx_byte_rate = peer->stats.rx.rx_byte_rate;
8952 	peer_stats->rx.rx_data_rate = peer->stats.rx.rx_data_rate;
8953 }
8954 
8955 /**
8956  * dp_get_peer_basic_stats()- Get peer basic stats
8957  * @peer: Datapath peer
8958  * @peer_stats: buffer for peer stats
8959  *
8960  * Return: none
8961  */
8962 static inline
8963 void dp_get_peer_basic_stats(struct dp_peer *peer,
8964 			     struct cdp_peer_stats *peer_stats)
8965 {
8966 	struct dp_txrx_peer *txrx_peer;
8967 
8968 	txrx_peer = peer->txrx_peer;
8969 	if (!txrx_peer)
8970 		return;
8971 
8972 	peer_stats->tx.comp_pkt.num += txrx_peer->comp_pkt.num;
8973 	peer_stats->tx.comp_pkt.bytes += txrx_peer->comp_pkt.bytes;
8974 	peer_stats->tx.tx_failed += txrx_peer->tx_failed;
8975 	peer_stats->rx.to_stack.num += txrx_peer->to_stack.num;
8976 	peer_stats->rx.to_stack.bytes += txrx_peer->to_stack.bytes;
8977 }
8978 
8979 /**
8980  * dp_get_peer_per_pkt_stats()- Get peer per pkt stats
8981  * @peer: Datapath peer
8982  * @peer_stats: buffer for peer stats
8983  *
8984  * Return: none
8985  */
8986 static inline
8987 void dp_get_peer_per_pkt_stats(struct dp_peer *peer,
8988 			       struct cdp_peer_stats *peer_stats)
8989 {
8990 	struct dp_txrx_peer *txrx_peer;
8991 	struct dp_peer_per_pkt_stats *per_pkt_stats;
8992 
8993 	txrx_peer = peer->txrx_peer;
8994 	if (!txrx_peer)
8995 		return;
8996 
8997 	per_pkt_stats = &txrx_peer->stats.per_pkt_stats;
8998 	DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats);
8999 }
9000 
9001 /**
9002  * dp_get_peer_extd_stats()- Get peer extd stats
9003  * @peer: Datapath peer
9004  * @peer_stats: buffer for peer stats
9005  *
9006  * Return: none
9007  */
9008 #ifdef QCA_ENHANCED_STATS_SUPPORT
9009 #ifdef WLAN_FEATURE_11BE_MLO
9010 static inline
9011 void dp_get_peer_extd_stats(struct dp_peer *peer,
9012 			    struct cdp_peer_stats *peer_stats)
9013 {
9014 	struct dp_soc *soc = peer->vdev->pdev->soc;
9015 
9016 	if (IS_MLO_DP_MLD_PEER(peer)) {
9017 		uint8_t i;
9018 		struct dp_peer *link_peer;
9019 		struct dp_soc *link_peer_soc;
9020 		struct dp_mld_link_peers link_peers_info;
9021 
9022 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
9023 						    &link_peers_info,
9024 						    DP_MOD_ID_CDP);
9025 		for (i = 0; i < link_peers_info.num_links; i++) {
9026 			link_peer = link_peers_info.link_peers[i];
9027 			link_peer_soc = link_peer->vdev->pdev->soc;
9028 			dp_monitor_peer_get_stats(link_peer_soc, link_peer,
9029 						  peer_stats,
9030 						  UPDATE_PEER_STATS);
9031 		}
9032 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
9033 	} else {
9034 		dp_monitor_peer_get_stats(soc, peer, peer_stats,
9035 					  UPDATE_PEER_STATS);
9036 	}
9037 }
9038 #else
9039 static inline
9040 void dp_get_peer_extd_stats(struct dp_peer *peer,
9041 			    struct cdp_peer_stats *peer_stats)
9042 {
9043 	struct dp_soc *soc = peer->vdev->pdev->soc;
9044 
9045 	dp_monitor_peer_get_stats(soc, peer, peer_stats, UPDATE_PEER_STATS);
9046 }
9047 #endif
9048 #else
9049 static inline
9050 void dp_get_peer_extd_stats(struct dp_peer *peer,
9051 			    struct cdp_peer_stats *peer_stats)
9052 {
9053 	struct dp_txrx_peer *txrx_peer;
9054 	struct dp_peer_extd_stats *extd_stats;
9055 
9056 	txrx_peer = peer->txrx_peer;
9057 	if (!txrx_peer)
9058 		return;
9059 
9060 	extd_stats = &txrx_peer->stats.extd_stats;
9061 	DP_UPDATE_EXTD_STATS(peer_stats, extd_stats);
9062 }
9063 #endif
9064 
9065 /**
9066  * dp_get_peer_stats()- Get peer stats
9067  * @peer: Datapath peer
9068  * @peer_stats: buffer for peer stats
9069  *
9070  * Return: none
9071  */
9072 static inline
9073 void dp_get_peer_stats(struct dp_peer *peer, struct cdp_peer_stats *peer_stats)
9074 {
9075 	dp_get_peer_calibr_stats(peer, peer_stats);
9076 
9077 	dp_get_peer_basic_stats(peer, peer_stats);
9078 
9079 	dp_get_peer_per_pkt_stats(peer, peer_stats);
9080 
9081 	dp_get_peer_extd_stats(peer, peer_stats);
9082 }
9083 
9084 /*
9085  * dp_get_host_peer_stats()- function to print peer stats
9086  * @soc: dp_soc handle
9087  * @mac_addr: mac address of the peer
9088  *
9089  * Return: QDF_STATUS
9090  */
9091 static QDF_STATUS
9092 dp_get_host_peer_stats(struct cdp_soc_t *soc, uint8_t *mac_addr)
9093 {
9094 	struct dp_peer *peer = NULL;
9095 	struct cdp_peer_stats *peer_stats = NULL;
9096 
9097 	if (!mac_addr) {
9098 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9099 			  "%s: NULL peer mac addr\n", __func__);
9100 		return QDF_STATUS_E_FAILURE;
9101 	}
9102 
9103 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
9104 				      mac_addr, 0,
9105 				      DP_VDEV_ALL,
9106 				      DP_MOD_ID_CDP);
9107 	if (!peer) {
9108 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9109 			  "%s: Invalid peer\n", __func__);
9110 		return QDF_STATUS_E_FAILURE;
9111 	}
9112 
9113 	peer_stats = qdf_mem_malloc(sizeof(struct cdp_peer_stats));
9114 	if (!peer_stats) {
9115 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9116 			  "%s: Memory allocation failed for cdp_peer_stats\n",
9117 			  __func__);
9118 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9119 		return QDF_STATUS_E_NOMEM;
9120 	}
9121 
9122 	qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats));
9123 
9124 	dp_get_peer_stats(peer, peer_stats);
9125 	dp_print_peer_stats(peer, peer_stats);
9126 
9127 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
9128 
9129 	qdf_mem_free(peer_stats);
9130 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9131 
9132 	return QDF_STATUS_SUCCESS;
9133 }
9134 
9135 /**
9136  * dp_txrx_stats_help() - Helper function for Txrx_Stats
9137  *
9138  * Return: None
9139  */
9140 static void dp_txrx_stats_help(void)
9141 {
9142 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
9143 	dp_info("stats_option:");
9144 	dp_info("  1 -- HTT Tx Statistics");
9145 	dp_info("  2 -- HTT Rx Statistics");
9146 	dp_info("  3 -- HTT Tx HW Queue Statistics");
9147 	dp_info("  4 -- HTT Tx HW Sched Statistics");
9148 	dp_info("  5 -- HTT Error Statistics");
9149 	dp_info("  6 -- HTT TQM Statistics");
9150 	dp_info("  7 -- HTT TQM CMDQ Statistics");
9151 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
9152 	dp_info("  9 -- HTT Tx Rate Statistics");
9153 	dp_info(" 10 -- HTT Rx Rate Statistics");
9154 	dp_info(" 11 -- HTT Peer Statistics");
9155 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
9156 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
9157 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
9158 	dp_info(" 15 -- HTT SRNG Statistics");
9159 	dp_info(" 16 -- HTT SFM Info Statistics");
9160 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
9161 	dp_info(" 18 -- HTT Peer List Details");
9162 	dp_info(" 20 -- Clear Host Statistics");
9163 	dp_info(" 21 -- Host Rx Rate Statistics");
9164 	dp_info(" 22 -- Host Tx Rate Statistics");
9165 	dp_info(" 23 -- Host Tx Statistics");
9166 	dp_info(" 24 -- Host Rx Statistics");
9167 	dp_info(" 25 -- Host AST Statistics");
9168 	dp_info(" 26 -- Host SRNG PTR Statistics");
9169 	dp_info(" 27 -- Host Mon Statistics");
9170 	dp_info(" 28 -- Host REO Queue Statistics");
9171 	dp_info(" 29 -- Host Soc cfg param Statistics");
9172 	dp_info(" 30 -- Host pdev cfg param Statistics");
9173 	dp_info(" 31 -- Host FISA stats");
9174 	dp_info(" 32 -- Host Register Work stats");
9175 }
9176 
9177 /**
9178  * dp_print_host_stats()- Function to print the stats aggregated at host
9179  * @vdev_handle: DP_VDEV handle
9180  * @req: host stats type
9181  * @soc: dp soc handler
9182  *
9183  * Return: 0 on success, print error message in case of failure
9184  */
9185 static int
9186 dp_print_host_stats(struct dp_vdev *vdev,
9187 		    struct cdp_txrx_stats_req *req,
9188 		    struct dp_soc *soc)
9189 {
9190 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
9191 	enum cdp_host_txrx_stats type =
9192 			dp_stats_mapping_table[req->stats][STATS_HOST];
9193 
9194 	dp_aggregate_pdev_stats(pdev);
9195 
9196 	switch (type) {
9197 	case TXRX_CLEAR_STATS:
9198 		dp_txrx_host_stats_clr(vdev, soc);
9199 		break;
9200 	case TXRX_RX_RATE_STATS:
9201 		dp_print_rx_rates(vdev);
9202 		break;
9203 	case TXRX_TX_RATE_STATS:
9204 		dp_print_tx_rates(vdev);
9205 		break;
9206 	case TXRX_TX_HOST_STATS:
9207 		dp_print_pdev_tx_stats(pdev);
9208 		dp_print_soc_tx_stats(pdev->soc);
9209 		break;
9210 	case TXRX_RX_HOST_STATS:
9211 		dp_print_pdev_rx_stats(pdev);
9212 		dp_print_soc_rx_stats(pdev->soc);
9213 		break;
9214 	case TXRX_AST_STATS:
9215 		dp_print_ast_stats(pdev->soc);
9216 		dp_print_mec_stats(pdev->soc);
9217 		dp_print_peer_table(vdev);
9218 		break;
9219 	case TXRX_SRNG_PTR_STATS:
9220 		dp_print_ring_stats(pdev);
9221 		break;
9222 	case TXRX_RX_MON_STATS:
9223 		dp_monitor_print_pdev_rx_mon_stats(pdev);
9224 		break;
9225 	case TXRX_REO_QUEUE_STATS:
9226 		dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc,
9227 				       req->peer_addr);
9228 		break;
9229 	case TXRX_SOC_CFG_PARAMS:
9230 		dp_print_soc_cfg_params(pdev->soc);
9231 		break;
9232 	case TXRX_PDEV_CFG_PARAMS:
9233 		dp_print_pdev_cfg_params(pdev);
9234 		break;
9235 	case TXRX_NAPI_STATS:
9236 		dp_print_napi_stats(pdev->soc);
9237 		break;
9238 	case TXRX_SOC_INTERRUPT_STATS:
9239 		dp_print_soc_interrupt_stats(pdev->soc);
9240 		break;
9241 	case TXRX_SOC_FSE_STATS:
9242 		dp_rx_dump_fisa_table(pdev->soc);
9243 		break;
9244 	case TXRX_HAL_REG_WRITE_STATS:
9245 		hal_dump_reg_write_stats(pdev->soc->hal_soc);
9246 		hal_dump_reg_write_srng_stats(pdev->soc->hal_soc);
9247 		break;
9248 	case TXRX_SOC_REO_HW_DESC_DUMP:
9249 		dp_get_rx_reo_queue_info((struct cdp_soc_t *)pdev->soc,
9250 					 vdev->vdev_id);
9251 		break;
9252 	default:
9253 		dp_info("Wrong Input For TxRx Host Stats");
9254 		dp_txrx_stats_help();
9255 		break;
9256 	}
9257 	return 0;
9258 }
9259 
9260 /*
9261  * dp_pdev_tid_stats_ingress_inc
9262  * @pdev: pdev handle
9263  * @val: increase in value
9264  *
9265  * Return: void
9266  */
9267 static void
9268 dp_pdev_tid_stats_ingress_inc(struct dp_pdev *pdev, uint32_t val)
9269 {
9270 	pdev->stats.tid_stats.ingress_stack += val;
9271 }
9272 
9273 /*
9274  * dp_pdev_tid_stats_osif_drop
9275  * @pdev: pdev handle
9276  * @val: increase in value
9277  *
9278  * Return: void
9279  */
9280 static void
9281 dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val)
9282 {
9283 	pdev->stats.tid_stats.osif_drop += val;
9284 }
9285 
9286 /*
9287  * dp_get_fw_peer_stats()- function to print peer stats
9288  * @soc: soc handle
9289  * @pdev_id : id of the pdev handle
9290  * @mac_addr: mac address of the peer
9291  * @cap: Type of htt stats requested
9292  * @is_wait: if set, wait on completion from firmware response
9293  *
9294  * Currently Supporting only MAC ID based requests Only
9295  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
9296  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
9297  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
9298  *
9299  * Return: QDF_STATUS
9300  */
9301 static QDF_STATUS
9302 dp_get_fw_peer_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
9303 		     uint8_t *mac_addr,
9304 		     uint32_t cap, uint32_t is_wait)
9305 {
9306 	int i;
9307 	uint32_t config_param0 = 0;
9308 	uint32_t config_param1 = 0;
9309 	uint32_t config_param2 = 0;
9310 	uint32_t config_param3 = 0;
9311 	struct dp_pdev *pdev =
9312 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9313 						   pdev_id);
9314 
9315 	if (!pdev)
9316 		return QDF_STATUS_E_FAILURE;
9317 
9318 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
9319 	config_param0 |= (1 << (cap + 1));
9320 
9321 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
9322 		config_param1 |= (1 << i);
9323 	}
9324 
9325 	config_param2 |= (mac_addr[0] & 0x000000ff);
9326 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
9327 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
9328 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
9329 
9330 	config_param3 |= (mac_addr[4] & 0x000000ff);
9331 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
9332 
9333 	if (is_wait) {
9334 		qdf_event_reset(&pdev->fw_peer_stats_event);
9335 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
9336 					  config_param0, config_param1,
9337 					  config_param2, config_param3,
9338 					  0, DBG_STATS_COOKIE_DP_STATS, 0);
9339 		qdf_wait_single_event(&pdev->fw_peer_stats_event,
9340 				      DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
9341 	} else {
9342 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
9343 					  config_param0, config_param1,
9344 					  config_param2, config_param3,
9345 					  0, DBG_STATS_COOKIE_DEFAULT, 0);
9346 	}
9347 
9348 	return QDF_STATUS_SUCCESS;
9349 
9350 }
9351 
9352 /* This struct definition will be removed from here
9353  * once it get added in FW headers*/
9354 struct httstats_cmd_req {
9355     uint32_t    config_param0;
9356     uint32_t    config_param1;
9357     uint32_t    config_param2;
9358     uint32_t    config_param3;
9359     int cookie;
9360     u_int8_t    stats_id;
9361 };
9362 
9363 /*
9364  * dp_get_htt_stats: function to process the httstas request
9365  * @soc: DP soc handle
9366  * @pdev_id: id of pdev handle
9367  * @data: pointer to request data
9368  * @data_len: length for request data
9369  *
9370  * return: QDF_STATUS
9371  */
9372 static QDF_STATUS
9373 dp_get_htt_stats(struct cdp_soc_t *soc, uint8_t pdev_id, void *data,
9374 		 uint32_t data_len)
9375 {
9376 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
9377 	struct dp_pdev *pdev =
9378 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9379 						   pdev_id);
9380 
9381 	if (!pdev)
9382 		return QDF_STATUS_E_FAILURE;
9383 
9384 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
9385 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
9386 				req->config_param0, req->config_param1,
9387 				req->config_param2, req->config_param3,
9388 				req->cookie, DBG_STATS_COOKIE_DEFAULT, 0);
9389 
9390 	return QDF_STATUS_SUCCESS;
9391 }
9392 
9393 /**
9394  * dp_set_pdev_tidmap_prty_wifi3(): update tidmap priority in pdev
9395  * @pdev: DP_PDEV handle
9396  * @prio: tidmap priority value passed by the user
9397  *
9398  * Return: QDF_STATUS_SUCCESS on success
9399  */
9400 static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct dp_pdev *pdev,
9401 						uint8_t prio)
9402 {
9403 	struct dp_soc *soc = pdev->soc;
9404 
9405 	soc->tidmap_prty = prio;
9406 
9407 	hal_tx_set_tidmap_prty(soc->hal_soc, prio);
9408 	return QDF_STATUS_SUCCESS;
9409 }
9410 
9411 /*
9412  * dp_get_peer_param: function to get parameters in peer
9413  * @cdp_soc: DP soc handle
9414  * @vdev_id: id of vdev handle
9415  * @peer_mac: peer mac address
9416  * @param: parameter type to be set
9417  * @val : address of buffer
9418  *
9419  * Return: val
9420  */
9421 static QDF_STATUS dp_get_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
9422 				    uint8_t *peer_mac,
9423 				    enum cdp_peer_param_type param,
9424 				    cdp_config_param_type *val)
9425 {
9426 	return QDF_STATUS_SUCCESS;
9427 }
9428 
9429 /*
9430  * dp_set_peer_param: function to set parameters in peer
9431  * @cdp_soc: DP soc handle
9432  * @vdev_id: id of vdev handle
9433  * @peer_mac: peer mac address
9434  * @param: parameter type to be set
9435  * @val: value of parameter to be set
9436  *
9437  * Return: 0 for success. nonzero for failure.
9438  */
9439 static QDF_STATUS dp_set_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
9440 				    uint8_t *peer_mac,
9441 				    enum cdp_peer_param_type param,
9442 				    cdp_config_param_type val)
9443 {
9444 	struct dp_peer *peer =
9445 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
9446 						       peer_mac, 0, vdev_id,
9447 						       DP_MOD_ID_CDP);
9448 	struct dp_txrx_peer *txrx_peer;
9449 
9450 	if (!peer)
9451 		return QDF_STATUS_E_FAILURE;
9452 
9453 	txrx_peer = peer->txrx_peer;
9454 	if (!txrx_peer) {
9455 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9456 		return QDF_STATUS_E_FAILURE;
9457 	}
9458 
9459 	switch (param) {
9460 	case CDP_CONFIG_NAWDS:
9461 		txrx_peer->nawds_enabled = val.cdp_peer_param_nawds;
9462 		break;
9463 	case CDP_CONFIG_ISOLATION:
9464 		dp_set_peer_isolation(txrx_peer, val.cdp_peer_param_isolation);
9465 		break;
9466 	case CDP_CONFIG_IN_TWT:
9467 		txrx_peer->in_twt = !!(val.cdp_peer_param_in_twt);
9468 		break;
9469 	default:
9470 		break;
9471 	}
9472 
9473 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9474 
9475 	return QDF_STATUS_SUCCESS;
9476 }
9477 
9478 /*
9479  * dp_get_pdev_param: function to get parameters from pdev
9480  * @cdp_soc: DP soc handle
9481  * @pdev_id: id of pdev handle
9482  * @param: parameter type to be get
9483  * @value : buffer for value
9484  *
9485  * Return: status
9486  */
9487 static QDF_STATUS dp_get_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
9488 				    enum cdp_pdev_param_type param,
9489 				    cdp_config_param_type *val)
9490 {
9491 	struct cdp_pdev *pdev = (struct cdp_pdev *)
9492 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
9493 						   pdev_id);
9494 	if (!pdev)
9495 		return QDF_STATUS_E_FAILURE;
9496 
9497 	switch (param) {
9498 	case CDP_CONFIG_VOW:
9499 		val->cdp_pdev_param_cfg_vow =
9500 				((struct dp_pdev *)pdev)->delay_stats_flag;
9501 		break;
9502 	case CDP_TX_PENDING:
9503 		val->cdp_pdev_param_tx_pending = dp_get_tx_pending(pdev);
9504 		break;
9505 	case CDP_FILTER_MCAST_DATA:
9506 		val->cdp_pdev_param_fltr_mcast =
9507 				dp_monitor_pdev_get_filter_mcast_data(pdev);
9508 		break;
9509 	case CDP_FILTER_NO_DATA:
9510 		val->cdp_pdev_param_fltr_none =
9511 				dp_monitor_pdev_get_filter_non_data(pdev);
9512 		break;
9513 	case CDP_FILTER_UCAST_DATA:
9514 		val->cdp_pdev_param_fltr_ucast =
9515 				dp_monitor_pdev_get_filter_ucast_data(pdev);
9516 		break;
9517 	default:
9518 		return QDF_STATUS_E_FAILURE;
9519 	}
9520 
9521 	return QDF_STATUS_SUCCESS;
9522 }
9523 
9524 /*
9525  * dp_set_pdev_param: function to set parameters in pdev
9526  * @cdp_soc: DP soc handle
9527  * @pdev_id: id of pdev handle
9528  * @param: parameter type to be set
9529  * @val: value of parameter to be set
9530  *
9531  * Return: 0 for success. nonzero for failure.
9532  */
9533 static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
9534 				    enum cdp_pdev_param_type param,
9535 				    cdp_config_param_type val)
9536 {
9537 	int target_type;
9538 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
9539 	struct dp_pdev *pdev =
9540 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
9541 						   pdev_id);
9542 	enum reg_wifi_band chan_band;
9543 
9544 	if (!pdev)
9545 		return QDF_STATUS_E_FAILURE;
9546 
9547 	target_type = hal_get_target_type(soc->hal_soc);
9548 	switch (target_type) {
9549 	case TARGET_TYPE_QCA6750:
9550 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC0_LMAC_ID;
9551 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
9552 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
9553 		break;
9554 	case TARGET_TYPE_KIWI:
9555 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC0_LMAC_ID;
9556 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
9557 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
9558 		break;
9559 	default:
9560 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC1_LMAC_ID;
9561 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
9562 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
9563 		break;
9564 	}
9565 
9566 	switch (param) {
9567 	case CDP_CONFIG_TX_CAPTURE:
9568 		return dp_monitor_config_debug_sniffer(pdev,
9569 						val.cdp_pdev_param_tx_capture);
9570 	case CDP_CONFIG_DEBUG_SNIFFER:
9571 		return dp_monitor_config_debug_sniffer(pdev,
9572 						val.cdp_pdev_param_dbg_snf);
9573 	case CDP_CONFIG_BPR_ENABLE:
9574 		return dp_monitor_set_bpr_enable(pdev,
9575 						 val.cdp_pdev_param_bpr_enable);
9576 	case CDP_CONFIG_PRIMARY_RADIO:
9577 		pdev->is_primary = val.cdp_pdev_param_primary_radio;
9578 		break;
9579 	case CDP_CONFIG_CAPTURE_LATENCY:
9580 		pdev->latency_capture_enable = val.cdp_pdev_param_cptr_latcy;
9581 		break;
9582 	case CDP_INGRESS_STATS:
9583 		dp_pdev_tid_stats_ingress_inc(pdev,
9584 					      val.cdp_pdev_param_ingrs_stats);
9585 		break;
9586 	case CDP_OSIF_DROP:
9587 		dp_pdev_tid_stats_osif_drop(pdev,
9588 					    val.cdp_pdev_param_osif_drop);
9589 		break;
9590 	case CDP_CONFIG_ENH_RX_CAPTURE:
9591 		return dp_monitor_config_enh_rx_capture(pdev,
9592 						val.cdp_pdev_param_en_rx_cap);
9593 	case CDP_CONFIG_ENH_TX_CAPTURE:
9594 		return dp_monitor_config_enh_tx_capture(pdev,
9595 						val.cdp_pdev_param_en_tx_cap);
9596 	case CDP_CONFIG_HMMC_TID_OVERRIDE:
9597 		pdev->hmmc_tid_override_en = val.cdp_pdev_param_hmmc_tid_ovrd;
9598 		break;
9599 	case CDP_CONFIG_HMMC_TID_VALUE:
9600 		pdev->hmmc_tid = val.cdp_pdev_param_hmmc_tid;
9601 		break;
9602 	case CDP_CHAN_NOISE_FLOOR:
9603 		pdev->chan_noise_floor = val.cdp_pdev_param_chn_noise_flr;
9604 		break;
9605 	case CDP_TIDMAP_PRTY:
9606 		dp_set_pdev_tidmap_prty_wifi3(pdev,
9607 					      val.cdp_pdev_param_tidmap_prty);
9608 		break;
9609 	case CDP_FILTER_NEIGH_PEERS:
9610 		dp_monitor_set_filter_neigh_peers(pdev,
9611 					val.cdp_pdev_param_fltr_neigh_peers);
9612 		break;
9613 	case CDP_MONITOR_CHANNEL:
9614 		dp_monitor_set_chan_num(pdev, val.cdp_pdev_param_monitor_chan);
9615 		break;
9616 	case CDP_MONITOR_FREQUENCY:
9617 		chan_band = wlan_reg_freq_to_band(val.cdp_pdev_param_mon_freq);
9618 		dp_monitor_set_chan_freq(pdev, val.cdp_pdev_param_mon_freq);
9619 		dp_monitor_set_chan_band(pdev, chan_band);
9620 		break;
9621 	case CDP_CONFIG_BSS_COLOR:
9622 		dp_monitor_set_bsscolor(pdev, val.cdp_pdev_param_bss_color);
9623 		break;
9624 	case CDP_SET_ATF_STATS_ENABLE:
9625 		dp_monitor_set_atf_stats_enable(pdev,
9626 					val.cdp_pdev_param_atf_stats_enable);
9627 		break;
9628 	case CDP_CONFIG_SPECIAL_VAP:
9629 		dp_monitor_pdev_config_scan_spcl_vap(pdev,
9630 					val.cdp_pdev_param_config_special_vap);
9631 		dp_monitor_vdev_set_monitor_mode_buf_rings(pdev);
9632 		break;
9633 	case CDP_RESET_SCAN_SPCL_VAP_STATS_ENABLE:
9634 		dp_monitor_pdev_reset_scan_spcl_vap_stats_enable(pdev,
9635 				val.cdp_pdev_param_reset_scan_spcl_vap_stats_enable);
9636 		break;
9637 	case CDP_CONFIG_ENHANCED_STATS_ENABLE:
9638 		pdev->enhanced_stats_en = val.cdp_pdev_param_enhanced_stats_enable;
9639 		break;
9640 	case CDP_ISOLATION:
9641 		pdev->isolation = val.cdp_pdev_param_isolation;
9642 		break;
9643 	default:
9644 		return QDF_STATUS_E_INVAL;
9645 	}
9646 	return QDF_STATUS_SUCCESS;
9647 }
9648 
9649 #ifdef QCA_PEER_EXT_STATS
9650 static void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
9651 					  qdf_nbuf_t nbuf)
9652 {
9653 	struct dp_peer *peer = NULL;
9654 	uint16_t peer_id, ring_id;
9655 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
9656 	struct dp_peer_delay_stats *delay_stats = NULL;
9657 
9658 	peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
9659 	if (peer_id > soc->max_peer_id)
9660 		return;
9661 
9662 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
9663 	if (qdf_unlikely(!peer))
9664 		return;
9665 
9666 	if (qdf_unlikely(!peer->txrx_peer)) {
9667 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9668 		return;
9669 	}
9670 
9671 	if (qdf_likely(peer->txrx_peer->delay_stats)) {
9672 		delay_stats = peer->txrx_peer->delay_stats;
9673 		ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
9674 		dp_rx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
9675 					nbuf);
9676 	}
9677 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9678 }
9679 #else
9680 static inline void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
9681 						 qdf_nbuf_t nbuf)
9682 {
9683 }
9684 #endif
9685 
9686 /*
9687  * dp_calculate_delay_stats: function to get rx delay stats
9688  * @cdp_soc: DP soc handle
9689  * @vdev_id: id of DP vdev handle
9690  * @nbuf: skb
9691  *
9692  * Return: QDF_STATUS
9693  */
9694 static QDF_STATUS
9695 dp_calculate_delay_stats(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
9696 			 qdf_nbuf_t nbuf)
9697 {
9698 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
9699 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9700 						     DP_MOD_ID_CDP);
9701 
9702 	if (!vdev)
9703 		return QDF_STATUS_SUCCESS;
9704 
9705 	if (vdev->pdev->delay_stats_flag)
9706 		dp_rx_compute_delay(vdev, nbuf);
9707 	else
9708 		dp_rx_update_peer_delay_stats(soc, nbuf);
9709 
9710 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9711 	return QDF_STATUS_SUCCESS;
9712 }
9713 
9714 /*
9715  * dp_get_vdev_param: function to get parameters from vdev
9716  * @cdp_soc : DP soc handle
9717  * @vdev_id: id of DP vdev handle
9718  * @param: parameter type to get value
9719  * @val: buffer address
9720  *
9721  * return: status
9722  */
9723 static QDF_STATUS dp_get_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
9724 				    enum cdp_vdev_param_type param,
9725 				    cdp_config_param_type *val)
9726 {
9727 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
9728 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9729 						     DP_MOD_ID_CDP);
9730 
9731 	if (!vdev)
9732 		return QDF_STATUS_E_FAILURE;
9733 
9734 	switch (param) {
9735 	case CDP_ENABLE_WDS:
9736 		val->cdp_vdev_param_wds = vdev->wds_enabled;
9737 		break;
9738 	case CDP_ENABLE_MEC:
9739 		val->cdp_vdev_param_mec = vdev->mec_enabled;
9740 		break;
9741 	case CDP_ENABLE_DA_WAR:
9742 		val->cdp_vdev_param_da_war = vdev->pdev->soc->da_war_enabled;
9743 		break;
9744 	case CDP_ENABLE_IGMP_MCAST_EN:
9745 		val->cdp_vdev_param_igmp_mcast_en = vdev->igmp_mcast_enhanc_en;
9746 		break;
9747 	case CDP_ENABLE_MCAST_EN:
9748 		val->cdp_vdev_param_mcast_en = vdev->mcast_enhancement_en;
9749 		break;
9750 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
9751 		val->cdp_vdev_param_hlos_tid_override =
9752 			    dp_vdev_get_hlos_tid_override((struct cdp_vdev *)vdev);
9753 		break;
9754 	case CDP_ENABLE_PEER_AUTHORIZE:
9755 		val->cdp_vdev_param_peer_authorize =
9756 			    vdev->peer_authorize;
9757 		break;
9758 #ifdef WLAN_SUPPORT_MESH_LATENCY
9759 	case CDP_ENABLE_PEER_TID_LATENCY:
9760 		val->cdp_vdev_param_peer_tid_latency_enable =
9761 			vdev->peer_tid_latency_enabled;
9762 		break;
9763 	case CDP_SET_VAP_MESH_TID:
9764 		val->cdp_vdev_param_mesh_tid =
9765 				vdev->mesh_tid_latency_config.latency_tid;
9766 		break;
9767 #endif
9768 	default:
9769 		dp_cdp_err("%pK: param value %d is wrong",
9770 			   soc, param);
9771 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9772 		return QDF_STATUS_E_FAILURE;
9773 	}
9774 
9775 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9776 	return QDF_STATUS_SUCCESS;
9777 }
9778 
9779 /*
9780  * dp_set_vdev_param: function to set parameters in vdev
9781  * @cdp_soc : DP soc handle
9782  * @vdev_id: id of DP vdev handle
9783  * @param: parameter type to get value
9784  * @val: value
9785  *
9786  * return: QDF_STATUS
9787  */
9788 static QDF_STATUS
9789 dp_set_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
9790 		  enum cdp_vdev_param_type param, cdp_config_param_type val)
9791 {
9792 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
9793 	struct dp_vdev *vdev =
9794 		dp_vdev_get_ref_by_id(dsoc, vdev_id, DP_MOD_ID_CDP);
9795 	uint32_t var = 0;
9796 
9797 	if (!vdev)
9798 		return QDF_STATUS_E_FAILURE;
9799 
9800 	switch (param) {
9801 	case CDP_ENABLE_WDS:
9802 		dp_cdp_err("%pK: wds_enable %d for vdev(%pK) id(%d)\n",
9803 			   dsoc, val.cdp_vdev_param_wds, vdev, vdev->vdev_id);
9804 		vdev->wds_enabled = val.cdp_vdev_param_wds;
9805 		break;
9806 	case CDP_ENABLE_MEC:
9807 		dp_cdp_err("%pK: mec_enable %d for vdev(%pK) id(%d)\n",
9808 			   dsoc, val.cdp_vdev_param_mec, vdev, vdev->vdev_id);
9809 		vdev->mec_enabled = val.cdp_vdev_param_mec;
9810 		break;
9811 	case CDP_ENABLE_DA_WAR:
9812 		dp_cdp_err("%pK: da_war_enable %d for vdev(%pK) id(%d)\n",
9813 			   dsoc, val.cdp_vdev_param_da_war, vdev, vdev->vdev_id);
9814 		vdev->pdev->soc->da_war_enabled = val.cdp_vdev_param_da_war;
9815 		dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
9816 					     vdev->pdev->soc));
9817 		break;
9818 	case CDP_ENABLE_NAWDS:
9819 		vdev->nawds_enabled = val.cdp_vdev_param_nawds;
9820 		break;
9821 	case CDP_ENABLE_MCAST_EN:
9822 		vdev->mcast_enhancement_en = val.cdp_vdev_param_mcast_en;
9823 		break;
9824 	case CDP_ENABLE_IGMP_MCAST_EN:
9825 		vdev->igmp_mcast_enhanc_en = val.cdp_vdev_param_igmp_mcast_en;
9826 		break;
9827 	case CDP_ENABLE_PROXYSTA:
9828 		vdev->proxysta_vdev = val.cdp_vdev_param_proxysta;
9829 		break;
9830 	case CDP_UPDATE_TDLS_FLAGS:
9831 		vdev->tdls_link_connected = val.cdp_vdev_param_tdls_flags;
9832 		break;
9833 	case CDP_CFG_WDS_AGING_TIMER:
9834 		var = val.cdp_vdev_param_aging_tmr;
9835 		if (!var)
9836 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
9837 		else if (var != vdev->wds_aging_timer_val)
9838 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, var);
9839 
9840 		vdev->wds_aging_timer_val = var;
9841 		break;
9842 	case CDP_ENABLE_AP_BRIDGE:
9843 		if (wlan_op_mode_sta != vdev->opmode)
9844 			vdev->ap_bridge_enabled = val.cdp_vdev_param_ap_brdg_en;
9845 		else
9846 			vdev->ap_bridge_enabled = false;
9847 		break;
9848 	case CDP_ENABLE_CIPHER:
9849 		vdev->sec_type = val.cdp_vdev_param_cipher_en;
9850 		break;
9851 	case CDP_ENABLE_QWRAP_ISOLATION:
9852 		vdev->isolation_vdev = val.cdp_vdev_param_qwrap_isolation;
9853 		break;
9854 	case CDP_UPDATE_MULTIPASS:
9855 		vdev->multipass_en = val.cdp_vdev_param_update_multipass;
9856 		break;
9857 	case CDP_TX_ENCAP_TYPE:
9858 		vdev->tx_encap_type = val.cdp_vdev_param_tx_encap;
9859 		break;
9860 	case CDP_RX_DECAP_TYPE:
9861 		vdev->rx_decap_type = val.cdp_vdev_param_rx_decap;
9862 		break;
9863 	case CDP_TID_VDEV_PRTY:
9864 		vdev->tidmap_prty = val.cdp_vdev_param_tidmap_prty;
9865 		break;
9866 	case CDP_TIDMAP_TBL_ID:
9867 		vdev->tidmap_tbl_id = val.cdp_vdev_param_tidmap_tbl_id;
9868 		break;
9869 #ifdef MESH_MODE_SUPPORT
9870 	case CDP_MESH_RX_FILTER:
9871 		dp_vdev_set_mesh_rx_filter((struct cdp_vdev *)vdev,
9872 					   val.cdp_vdev_param_mesh_rx_filter);
9873 		break;
9874 	case CDP_MESH_MODE:
9875 		dp_vdev_set_mesh_mode((struct cdp_vdev *)vdev,
9876 				      val.cdp_vdev_param_mesh_mode);
9877 		break;
9878 #endif
9879 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
9880 		dp_info("vdev_id %d enable hlod tid override %d", vdev_id,
9881 			val.cdp_vdev_param_hlos_tid_override);
9882 		dp_vdev_set_hlos_tid_override(vdev,
9883 				val.cdp_vdev_param_hlos_tid_override);
9884 		break;
9885 #ifdef QCA_SUPPORT_WDS_EXTENDED
9886 	case CDP_CFG_WDS_EXT:
9887 		vdev->wds_ext_enabled = val.cdp_vdev_param_wds_ext;
9888 		break;
9889 #endif
9890 	case CDP_ENABLE_PEER_AUTHORIZE:
9891 		vdev->peer_authorize = val.cdp_vdev_param_peer_authorize;
9892 		break;
9893 #ifdef WLAN_SUPPORT_MESH_LATENCY
9894 	case CDP_ENABLE_PEER_TID_LATENCY:
9895 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
9896 			val.cdp_vdev_param_peer_tid_latency_enable);
9897 		vdev->peer_tid_latency_enabled =
9898 			val.cdp_vdev_param_peer_tid_latency_enable;
9899 		break;
9900 	case CDP_SET_VAP_MESH_TID:
9901 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
9902 			val.cdp_vdev_param_mesh_tid);
9903 		vdev->mesh_tid_latency_config.latency_tid
9904 				= val.cdp_vdev_param_mesh_tid;
9905 		break;
9906 #endif
9907 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
9908 	case CDP_SKIP_BAR_UPDATE_AP:
9909 		dp_info("vdev_id %d skip BAR update: %u", vdev_id,
9910 			val.cdp_skip_bar_update);
9911 		vdev->skip_bar_update = val.cdp_skip_bar_update;
9912 		vdev->skip_bar_update_last_ts = 0;
9913 		break;
9914 #endif
9915 	default:
9916 		break;
9917 	}
9918 
9919 	dsoc->arch_ops.txrx_set_vdev_param(dsoc, vdev, param, val);
9920 	dp_tx_vdev_update_search_flags((struct dp_vdev *)vdev);
9921 	dp_vdev_unref_delete(dsoc, vdev, DP_MOD_ID_CDP);
9922 
9923 	return QDF_STATUS_SUCCESS;
9924 }
9925 
9926 /*
9927  * dp_set_psoc_param: function to set parameters in psoc
9928  * @cdp_soc : DP soc handle
9929  * @param: parameter type to be set
9930  * @val: value of parameter to be set
9931  *
9932  * return: QDF_STATUS
9933  */
9934 static QDF_STATUS
9935 dp_set_psoc_param(struct cdp_soc_t *cdp_soc,
9936 		  enum cdp_psoc_param_type param, cdp_config_param_type val)
9937 {
9938 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
9939 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->wlan_cfg_ctx;
9940 
9941 	switch (param) {
9942 	case CDP_ENABLE_RATE_STATS:
9943 		soc->rdkstats_enabled = val.cdp_psoc_param_en_rate_stats;
9944 		break;
9945 	case CDP_SET_NSS_CFG:
9946 		wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx,
9947 					    val.cdp_psoc_param_en_nss_cfg);
9948 		/*
9949 		 * TODO: masked out based on the per offloaded radio
9950 		 */
9951 		switch (val.cdp_psoc_param_en_nss_cfg) {
9952 		case dp_nss_cfg_default:
9953 			break;
9954 		case dp_nss_cfg_first_radio:
9955 		/*
9956 		 * This configuration is valid for single band radio which
9957 		 * is also NSS offload.
9958 		 */
9959 		case dp_nss_cfg_dbdc:
9960 		case dp_nss_cfg_dbtc:
9961 			wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
9962 			wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
9963 			wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
9964 			wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
9965 			break;
9966 		default:
9967 			dp_cdp_err("%pK: Invalid offload config %d",
9968 				   soc, val.cdp_psoc_param_en_nss_cfg);
9969 		}
9970 
9971 			dp_cdp_err("%pK: nss-wifi<0> nss config is enabled"
9972 				   , soc);
9973 		break;
9974 	case CDP_SET_PREFERRED_HW_MODE:
9975 		soc->preferred_hw_mode = val.cdp_psoc_param_preferred_hw_mode;
9976 		break;
9977 	case CDP_IPA_ENABLE:
9978 		soc->wlan_cfg_ctx->ipa_enabled = val.cdp_ipa_enabled;
9979 		break;
9980 	case CDP_SET_VDEV_STATS_HW_OFFLOAD:
9981 		wlan_cfg_set_vdev_stats_hw_offload_config(wlan_cfg_ctx,
9982 				val.cdp_psoc_param_vdev_stats_hw_offload);
9983 		break;
9984 	case CDP_SAWF_ENABLE:
9985 		wlan_cfg_set_sawf_config(wlan_cfg_ctx, val.cdp_sawf_enabled);
9986 		break;
9987 	default:
9988 		break;
9989 	}
9990 
9991 	return QDF_STATUS_SUCCESS;
9992 }
9993 
9994 /*
9995  * dp_get_psoc_param: function to get parameters in soc
9996  * @cdp_soc : DP soc handle
9997  * @param: parameter type to be set
9998  * @val: address of buffer
9999  *
10000  * return: status
10001  */
10002 static QDF_STATUS dp_get_psoc_param(struct cdp_soc_t *cdp_soc,
10003 				    enum cdp_psoc_param_type param,
10004 				    cdp_config_param_type *val)
10005 {
10006 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
10007 
10008 	if (!soc)
10009 		return QDF_STATUS_E_FAILURE;
10010 
10011 	switch (param) {
10012 	case CDP_CFG_PEER_EXT_STATS:
10013 		val->cdp_psoc_param_pext_stats =
10014 			wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx);
10015 		break;
10016 	default:
10017 		dp_warn("Invalid param");
10018 		break;
10019 	}
10020 
10021 	return QDF_STATUS_SUCCESS;
10022 }
10023 
10024 /*
10025  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
10026  * @soc: DP_SOC handle
10027  * @vdev_id: id of DP_VDEV handle
10028  * @map_id:ID of map that needs to be updated
10029  *
10030  * Return: QDF_STATUS
10031  */
10032 static QDF_STATUS dp_set_vdev_dscp_tid_map_wifi3(ol_txrx_soc_handle cdp_soc,
10033 						 uint8_t vdev_id,
10034 						 uint8_t map_id)
10035 {
10036 	cdp_config_param_type val;
10037 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
10038 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10039 						     DP_MOD_ID_CDP);
10040 	if (vdev) {
10041 		vdev->dscp_tid_map_id = map_id;
10042 		val.cdp_vdev_param_dscp_tid_map_id = map_id;
10043 		soc->arch_ops.txrx_set_vdev_param(soc,
10044 						  vdev,
10045 						  CDP_UPDATE_DSCP_TO_TID_MAP,
10046 						  val);
10047 		/* Updatr flag for transmit tid classification */
10048 		if (vdev->dscp_tid_map_id < soc->num_hw_dscp_tid_map)
10049 			vdev->skip_sw_tid_classification |=
10050 				DP_TX_HW_DSCP_TID_MAP_VALID;
10051 		else
10052 			vdev->skip_sw_tid_classification &=
10053 				~DP_TX_HW_DSCP_TID_MAP_VALID;
10054 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10055 		return QDF_STATUS_SUCCESS;
10056 	}
10057 
10058 	return QDF_STATUS_E_FAILURE;
10059 }
10060 
10061 #ifdef DP_RATETABLE_SUPPORT
10062 static int dp_txrx_get_ratekbps(int preamb, int mcs,
10063 				int htflag, int gintval)
10064 {
10065 	uint32_t rix;
10066 	uint16_t ratecode;
10067 	enum PUNCTURED_MODES punc_mode = NO_PUNCTURE;
10068 
10069 	return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
10070 			       (uint8_t)preamb, 1, punc_mode,
10071 			       &rix, &ratecode);
10072 }
10073 #else
10074 static int dp_txrx_get_ratekbps(int preamb, int mcs,
10075 				int htflag, int gintval)
10076 {
10077 	return 0;
10078 }
10079 #endif
10080 
10081 /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
10082  * @soc: DP soc handle
10083  * @pdev_id: id of DP pdev handle
10084  * @pdev_stats: buffer to copy to
10085  *
10086  * return : status success/failure
10087  */
10088 static QDF_STATUS
10089 dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
10090 		       struct cdp_pdev_stats *pdev_stats)
10091 {
10092 	struct dp_pdev *pdev =
10093 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10094 						   pdev_id);
10095 	if (!pdev)
10096 		return QDF_STATUS_E_FAILURE;
10097 
10098 	dp_aggregate_pdev_stats(pdev);
10099 
10100 	qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats));
10101 	return QDF_STATUS_SUCCESS;
10102 }
10103 
10104 /* dp_txrx_update_vdev_me_stats(): Update vdev ME stats sent from CDP
10105  * @vdev: DP vdev handle
10106  * @buf: buffer containing specific stats structure
10107  *
10108  * Returns: void
10109  */
10110 static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev,
10111 					 void *buf)
10112 {
10113 	struct cdp_tx_ingress_stats *host_stats = NULL;
10114 
10115 	if (!buf) {
10116 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
10117 		return;
10118 	}
10119 	host_stats = (struct cdp_tx_ingress_stats *)buf;
10120 
10121 	DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt,
10122 			 host_stats->mcast_en.mcast_pkt.num,
10123 			 host_stats->mcast_en.mcast_pkt.bytes);
10124 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error,
10125 		     host_stats->mcast_en.dropped_map_error);
10126 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac,
10127 		     host_stats->mcast_en.dropped_self_mac);
10128 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail,
10129 		     host_stats->mcast_en.dropped_send_fail);
10130 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast,
10131 		     host_stats->mcast_en.ucast);
10132 	DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc,
10133 		     host_stats->mcast_en.fail_seg_alloc);
10134 	DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail,
10135 		     host_stats->mcast_en.clone_fail);
10136 }
10137 
10138 /* dp_txrx_update_vdev_igmp_me_stats(): Update vdev IGMP ME stats sent from CDP
10139  * @vdev: DP vdev handle
10140  * @buf: buffer containing specific stats structure
10141  *
10142  * Returns: void
10143  */
10144 static void dp_txrx_update_vdev_igmp_me_stats(struct dp_vdev *vdev,
10145 					      void *buf)
10146 {
10147 	struct cdp_tx_ingress_stats *host_stats = NULL;
10148 
10149 	if (!buf) {
10150 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
10151 		return;
10152 	}
10153 	host_stats = (struct cdp_tx_ingress_stats *)buf;
10154 
10155 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_rcvd,
10156 		     host_stats->igmp_mcast_en.igmp_rcvd);
10157 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_ucast_converted,
10158 		     host_stats->igmp_mcast_en.igmp_ucast_converted);
10159 }
10160 
10161 /* dp_txrx_update_vdev_host_stats(): Update stats sent through CDP
10162  * @soc: DP soc handle
10163  * @vdev_id: id of DP vdev handle
10164  * @buf: buffer containing specific stats structure
10165  * @stats_id: stats type
10166  *
10167  * Returns: QDF_STATUS
10168  */
10169 static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc_hdl,
10170 						 uint8_t vdev_id,
10171 						 void *buf,
10172 						 uint16_t stats_id)
10173 {
10174 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10175 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10176 						     DP_MOD_ID_CDP);
10177 
10178 	if (!vdev) {
10179 		dp_cdp_err("%pK: Invalid vdev handle", soc);
10180 		return QDF_STATUS_E_FAILURE;
10181 	}
10182 
10183 	switch (stats_id) {
10184 	case DP_VDEV_STATS_PKT_CNT_ONLY:
10185 		break;
10186 	case DP_VDEV_STATS_TX_ME:
10187 		dp_txrx_update_vdev_me_stats(vdev, buf);
10188 		dp_txrx_update_vdev_igmp_me_stats(vdev, buf);
10189 		break;
10190 	default:
10191 		qdf_info("Invalid stats_id %d", stats_id);
10192 		break;
10193 	}
10194 
10195 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10196 	return QDF_STATUS_SUCCESS;
10197 }
10198 
10199 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
10200  * @soc: soc handle
10201  * @vdev_id: id of vdev handle
10202  * @peer_mac: mac of DP_PEER handle
10203  * @peer_stats: buffer to copy to
10204  * return : status success/failure
10205  */
10206 static QDF_STATUS
10207 dp_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
10208 		       uint8_t *peer_mac, struct cdp_peer_stats *peer_stats)
10209 {
10210 	QDF_STATUS status = QDF_STATUS_SUCCESS;
10211 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
10212 						       peer_mac, 0, vdev_id,
10213 						       DP_MOD_ID_CDP);
10214 
10215 	qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats));
10216 
10217 	if (!peer)
10218 		return QDF_STATUS_E_FAILURE;
10219 
10220 	dp_get_peer_stats(peer, peer_stats);
10221 
10222 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10223 
10224 	return status;
10225 }
10226 
10227 /* dp_txrx_get_peer_stats_param - will return specified cdp_peer_stats
10228  * @param soc - soc handle
10229  * @param vdev_id - vdev_id of vdev object
10230  * @param peer_mac - mac address of the peer
10231  * @param type - enum of required stats
10232  * @param buf - buffer to hold the value
10233  * return : status success/failure
10234  */
10235 static QDF_STATUS
10236 dp_txrx_get_peer_stats_param(struct cdp_soc_t *soc, uint8_t vdev_id,
10237 			     uint8_t *peer_mac, enum cdp_peer_stats_type type,
10238 			     cdp_peer_stats_param_t *buf)
10239 {
10240 	QDF_STATUS ret;
10241 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
10242 						      peer_mac, 0, vdev_id,
10243 						      DP_MOD_ID_CDP);
10244 
10245 	if (!peer) {
10246 		dp_peer_err("%pK: Invalid Peer for Mac " QDF_MAC_ADDR_FMT,
10247 			    soc, QDF_MAC_ADDR_REF(peer_mac));
10248 		return QDF_STATUS_E_FAILURE;
10249 	}
10250 
10251 	if (type >= cdp_peer_per_pkt_stats_min &&
10252 	    type < cdp_peer_per_pkt_stats_max) {
10253 		ret = dp_txrx_get_peer_per_pkt_stats_param(peer, type, buf);
10254 	} else if (type >= cdp_peer_extd_stats_min &&
10255 		   type < cdp_peer_extd_stats_max) {
10256 		ret = dp_txrx_get_peer_extd_stats_param(peer, type, buf);
10257 	} else {
10258 		dp_err("%pK: Invalid stat type requested", soc);
10259 		ret = QDF_STATUS_E_FAILURE;
10260 	}
10261 
10262 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10263 
10264 	return ret;
10265 }
10266 
10267 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
10268  * @soc: soc handle
10269  * @vdev_id: id of vdev handle
10270  * @peer_mac: mac of DP_PEER handle
10271  *
10272  * return : QDF_STATUS
10273  */
10274 #ifdef WLAN_FEATURE_11BE_MLO
10275 static QDF_STATUS
10276 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
10277 			 uint8_t *peer_mac)
10278 {
10279 	QDF_STATUS status = QDF_STATUS_SUCCESS;
10280 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10281 	struct dp_peer *peer =
10282 			dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
10283 						       vdev_id, DP_MOD_ID_CDP);
10284 
10285 	if (!peer)
10286 		return QDF_STATUS_E_FAILURE;
10287 
10288 	DP_STATS_CLR(peer);
10289 	dp_txrx_peer_stats_clr(peer->txrx_peer);
10290 
10291 	if (IS_MLO_DP_MLD_PEER(peer)) {
10292 		uint8_t i;
10293 		struct dp_peer *link_peer;
10294 		struct dp_soc *link_peer_soc;
10295 		struct dp_mld_link_peers link_peers_info;
10296 
10297 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
10298 						    &link_peers_info,
10299 						    DP_MOD_ID_CDP);
10300 		for (i = 0; i < link_peers_info.num_links; i++) {
10301 			link_peer = link_peers_info.link_peers[i];
10302 			link_peer_soc = link_peer->vdev->pdev->soc;
10303 
10304 			DP_STATS_CLR(link_peer);
10305 			dp_monitor_peer_reset_stats(link_peer_soc, link_peer);
10306 		}
10307 
10308 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
10309 	} else {
10310 		dp_monitor_peer_reset_stats(soc, peer);
10311 	}
10312 
10313 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10314 
10315 	return status;
10316 }
10317 #else
10318 static QDF_STATUS
10319 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
10320 			 uint8_t *peer_mac)
10321 {
10322 	QDF_STATUS status = QDF_STATUS_SUCCESS;
10323 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
10324 						      peer_mac, 0, vdev_id,
10325 						      DP_MOD_ID_CDP);
10326 
10327 	if (!peer)
10328 		return QDF_STATUS_E_FAILURE;
10329 
10330 	DP_STATS_CLR(peer);
10331 	dp_txrx_peer_stats_clr(peer->txrx_peer);
10332 	dp_monitor_peer_reset_stats((struct dp_soc *)soc, peer);
10333 
10334 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10335 
10336 	return status;
10337 }
10338 #endif
10339 
10340 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
10341  * @vdev_handle: DP_VDEV handle
10342  * @buf: buffer for vdev stats
10343  *
10344  * return : int
10345  */
10346 static int dp_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
10347 				  void *buf, bool is_aggregate)
10348 {
10349 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10350 	struct cdp_vdev_stats *vdev_stats;
10351 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10352 						     DP_MOD_ID_CDP);
10353 
10354 	if (!vdev)
10355 		return 1;
10356 
10357 	vdev_stats = (struct cdp_vdev_stats *)buf;
10358 
10359 	if (is_aggregate) {
10360 		dp_aggregate_vdev_stats(vdev, buf);
10361 	} else {
10362 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
10363 	}
10364 
10365 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10366 	return 0;
10367 }
10368 
10369 /*
10370  * dp_get_total_per(): get total per
10371  * @soc: DP soc handle
10372  * @pdev_id: id of DP_PDEV handle
10373  *
10374  * Return: % error rate using retries per packet and success packets
10375  */
10376 static int dp_get_total_per(struct cdp_soc_t *soc, uint8_t pdev_id)
10377 {
10378 	struct dp_pdev *pdev =
10379 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10380 						   pdev_id);
10381 
10382 	if (!pdev)
10383 		return 0;
10384 
10385 	dp_aggregate_pdev_stats(pdev);
10386 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
10387 		return 0;
10388 	return ((pdev->stats.tx.retries * 100) /
10389 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
10390 }
10391 
10392 /*
10393  * dp_txrx_stats_publish(): publish pdev stats into a buffer
10394  * @soc: DP soc handle
10395  * @pdev_id: id of DP_PDEV handle
10396  * @buf: to hold pdev_stats
10397  *
10398  * Return: int
10399  */
10400 static int
10401 dp_txrx_stats_publish(struct cdp_soc_t *soc, uint8_t pdev_id,
10402 		      struct cdp_stats_extd *buf)
10403 {
10404 	struct cdp_txrx_stats_req req = {0,};
10405 	struct dp_pdev *pdev =
10406 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10407 						   pdev_id);
10408 
10409 	if (!pdev)
10410 		return TXRX_STATS_LEVEL_OFF;
10411 
10412 	dp_aggregate_pdev_stats(pdev);
10413 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
10414 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
10415 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
10416 				req.param1, req.param2, req.param3, 0,
10417 				req.cookie_val, 0);
10418 
10419 	msleep(DP_MAX_SLEEP_TIME);
10420 
10421 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
10422 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
10423 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
10424 				req.param1, req.param2, req.param3, 0,
10425 				req.cookie_val, 0);
10426 
10427 	msleep(DP_MAX_SLEEP_TIME);
10428 	qdf_mem_copy(buf, &pdev->stats, sizeof(struct cdp_pdev_stats));
10429 
10430 	return TXRX_STATS_LEVEL;
10431 }
10432 
10433 /**
10434  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
10435  * @soc: soc handle
10436  * @pdev_id: id of DP_PDEV handle
10437  * @map_id: ID of map that needs to be updated
10438  * @tos: index value in map
10439  * @tid: tid value passed by the user
10440  *
10441  * Return: QDF_STATUS
10442  */
10443 static QDF_STATUS
10444 dp_set_pdev_dscp_tid_map_wifi3(struct cdp_soc_t *soc_handle,
10445 			       uint8_t pdev_id,
10446 			       uint8_t map_id,
10447 			       uint8_t tos, uint8_t tid)
10448 {
10449 	uint8_t dscp;
10450 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10451 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10452 
10453 	if (!pdev)
10454 		return QDF_STATUS_E_FAILURE;
10455 
10456 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
10457 	pdev->dscp_tid_map[map_id][dscp] = tid;
10458 
10459 	if (map_id < soc->num_hw_dscp_tid_map)
10460 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
10461 				       map_id, dscp);
10462 	else
10463 		return QDF_STATUS_E_FAILURE;
10464 
10465 	return QDF_STATUS_SUCCESS;
10466 }
10467 
10468 #ifdef WLAN_SYSFS_DP_STATS
10469 /*
10470  * dp_sysfs_event_trigger(): Trigger event to wait for firmware
10471  * stats request response.
10472  * @soc: soc handle
10473  * @cookie_val: cookie value
10474  *
10475  * @Return: QDF_STATUS
10476  */
10477 static QDF_STATUS
10478 dp_sysfs_event_trigger(struct dp_soc *soc, uint32_t cookie_val)
10479 {
10480 	QDF_STATUS status = QDF_STATUS_SUCCESS;
10481 	/* wait for firmware response for sysfs stats request */
10482 	if (cookie_val == DBG_SYSFS_STATS_COOKIE) {
10483 		if (!soc) {
10484 			dp_cdp_err("soc is NULL");
10485 			return QDF_STATUS_E_FAILURE;
10486 		}
10487 		/* wait for event completion */
10488 		status = qdf_wait_single_event(&soc->sysfs_config->sysfs_txrx_fw_request_done,
10489 					       WLAN_SYSFS_STAT_REQ_WAIT_MS);
10490 		if (status == QDF_STATUS_SUCCESS)
10491 			dp_cdp_info("sysfs_txrx_fw_request_done event completed");
10492 		else if (status == QDF_STATUS_E_TIMEOUT)
10493 			dp_cdp_warn("sysfs_txrx_fw_request_done event expired");
10494 		else
10495 			dp_cdp_warn("sysfs_txrx_fw_request_done event erro code %d", status);
10496 	}
10497 
10498 	return status;
10499 }
10500 #else /* WLAN_SYSFS_DP_STATS */
10501 /*
10502  * dp_sysfs_event_trigger(): Trigger event to wait for firmware
10503  * stats request response.
10504  * @soc: soc handle
10505  * @cookie_val: cookie value
10506  *
10507  * @Return: QDF_STATUS
10508  */
10509 static QDF_STATUS
10510 dp_sysfs_event_trigger(struct dp_soc *soc, uint32_t cookie_val)
10511 {
10512 	return QDF_STATUS_SUCCESS;
10513 }
10514 #endif /* WLAN_SYSFS_DP_STATS */
10515 
10516 /**
10517  * dp_fw_stats_process(): Process TXRX FW stats request.
10518  * @vdev_handle: DP VDEV handle
10519  * @req: stats request
10520  *
10521  * return: QDF_STATUS
10522  */
10523 static QDF_STATUS
10524 dp_fw_stats_process(struct dp_vdev *vdev,
10525 		    struct cdp_txrx_stats_req *req)
10526 {
10527 	struct dp_pdev *pdev = NULL;
10528 	struct dp_soc *soc = NULL;
10529 	uint32_t stats = req->stats;
10530 	uint8_t mac_id = req->mac_id;
10531 	uint32_t cookie_val = DBG_STATS_COOKIE_DEFAULT;
10532 
10533 	if (!vdev) {
10534 		DP_TRACE(NONE, "VDEV not found");
10535 		return QDF_STATUS_E_FAILURE;
10536 	}
10537 
10538 	pdev = vdev->pdev;
10539 	if (!pdev) {
10540 		DP_TRACE(NONE, "PDEV not found");
10541 		return QDF_STATUS_E_FAILURE;
10542 	}
10543 
10544 	soc = pdev->soc;
10545 	if (!soc) {
10546 		DP_TRACE(NONE, "soc not found");
10547 		return QDF_STATUS_E_FAILURE;
10548 	}
10549 
10550 	/* In case request is from host sysfs for displaying stats on console */
10551 	if (req->cookie_val == DBG_SYSFS_STATS_COOKIE)
10552 		cookie_val = DBG_SYSFS_STATS_COOKIE;
10553 
10554 	/*
10555 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
10556 	 * from param0 to param3 according to below rule:
10557 	 *
10558 	 * PARAM:
10559 	 *   - config_param0 : start_offset (stats type)
10560 	 *   - config_param1 : stats bmask from start offset
10561 	 *   - config_param2 : stats bmask from start offset + 32
10562 	 *   - config_param3 : stats bmask from start offset + 64
10563 	 */
10564 	if (req->stats == CDP_TXRX_STATS_0) {
10565 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
10566 		req->param1 = 0xFFFFFFFF;
10567 		req->param2 = 0xFFFFFFFF;
10568 		req->param3 = 0xFFFFFFFF;
10569 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
10570 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
10571 	}
10572 
10573 	if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT) {
10574 		dp_h2t_ext_stats_msg_send(pdev,
10575 					  HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT,
10576 					  req->param0, req->param1, req->param2,
10577 					  req->param3, 0, cookie_val,
10578 					  mac_id);
10579 	} else {
10580 		dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
10581 					  req->param1, req->param2, req->param3,
10582 					  0, cookie_val, mac_id);
10583 	}
10584 
10585 	dp_sysfs_event_trigger(soc, cookie_val);
10586 
10587 	return QDF_STATUS_SUCCESS;
10588 }
10589 
10590 /**
10591  * dp_txrx_stats_request - function to map to firmware and host stats
10592  * @soc: soc handle
10593  * @vdev_id: virtual device ID
10594  * @req: stats request
10595  *
10596  * Return: QDF_STATUS
10597  */
10598 static
10599 QDF_STATUS dp_txrx_stats_request(struct cdp_soc_t *soc_handle,
10600 				 uint8_t vdev_id,
10601 				 struct cdp_txrx_stats_req *req)
10602 {
10603 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_handle);
10604 	int host_stats;
10605 	int fw_stats;
10606 	enum cdp_stats stats;
10607 	int num_stats;
10608 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10609 						     DP_MOD_ID_CDP);
10610 	QDF_STATUS status = QDF_STATUS_E_INVAL;
10611 
10612 	if (!vdev || !req) {
10613 		dp_cdp_err("%pK: Invalid vdev/req instance", soc);
10614 		status = QDF_STATUS_E_INVAL;
10615 		goto fail0;
10616 	}
10617 
10618 	if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) {
10619 		dp_err("Invalid mac id request");
10620 		status = QDF_STATUS_E_INVAL;
10621 		goto fail0;
10622 	}
10623 
10624 	stats = req->stats;
10625 	if (stats >= CDP_TXRX_MAX_STATS) {
10626 		status = QDF_STATUS_E_INVAL;
10627 		goto fail0;
10628 	}
10629 
10630 	/*
10631 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
10632 	 *			has to be updated if new FW HTT stats added
10633 	 */
10634 	if (stats > CDP_TXRX_STATS_HTT_MAX)
10635 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
10636 
10637 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
10638 
10639 	if (stats >= num_stats) {
10640 		dp_cdp_err("%pK : Invalid stats option: %d", soc, stats);
10641 		status = QDF_STATUS_E_INVAL;
10642 		goto fail0;
10643 	}
10644 
10645 	req->stats = stats;
10646 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
10647 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
10648 
10649 	dp_info("stats: %u fw_stats_type: %d host_stats: %d",
10650 		stats, fw_stats, host_stats);
10651 
10652 	if (fw_stats != TXRX_FW_STATS_INVALID) {
10653 		/* update request with FW stats type */
10654 		req->stats = fw_stats;
10655 		status = dp_fw_stats_process(vdev, req);
10656 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
10657 			(host_stats <= TXRX_HOST_STATS_MAX))
10658 		status = dp_print_host_stats(vdev, req, soc);
10659 	else
10660 		dp_cdp_info("%pK: Wrong Input for TxRx Stats", soc);
10661 fail0:
10662 	if (vdev)
10663 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10664 	return status;
10665 }
10666 
10667 /*
10668  * dp_txrx_dump_stats() -  Dump statistics
10669  * @value - Statistics option
10670  */
10671 static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value,
10672 				     enum qdf_stats_verbosity_level level)
10673 {
10674 	struct dp_soc *soc =
10675 		(struct dp_soc *)psoc;
10676 	QDF_STATUS status = QDF_STATUS_SUCCESS;
10677 
10678 	if (!soc) {
10679 		dp_cdp_err("%pK: soc is NULL", soc);
10680 		return QDF_STATUS_E_INVAL;
10681 	}
10682 
10683 	switch (value) {
10684 	case CDP_TXRX_PATH_STATS:
10685 		dp_txrx_path_stats(soc);
10686 		dp_print_soc_interrupt_stats(soc);
10687 		hal_dump_reg_write_stats(soc->hal_soc);
10688 		break;
10689 
10690 	case CDP_RX_RING_STATS:
10691 		dp_print_per_ring_stats(soc);
10692 		break;
10693 
10694 	case CDP_TXRX_TSO_STATS:
10695 		dp_print_tso_stats(soc, level);
10696 		break;
10697 
10698 	case CDP_DUMP_TX_FLOW_POOL_INFO:
10699 		if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH)
10700 			cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
10701 		else
10702 			dp_tx_dump_flow_pool_info_compact(soc);
10703 		break;
10704 
10705 	case CDP_DP_NAPI_STATS:
10706 		dp_print_napi_stats(soc);
10707 		break;
10708 
10709 	case CDP_TXRX_DESC_STATS:
10710 		/* TODO: NOT IMPLEMENTED */
10711 		break;
10712 
10713 	case CDP_DP_RX_FISA_STATS:
10714 		dp_rx_dump_fisa_stats(soc);
10715 		break;
10716 
10717 	case CDP_DP_SWLM_STATS:
10718 		dp_print_swlm_stats(soc);
10719 		break;
10720 
10721 	default:
10722 		status = QDF_STATUS_E_INVAL;
10723 		break;
10724 	}
10725 
10726 	return status;
10727 
10728 }
10729 
10730 #ifdef WLAN_SYSFS_DP_STATS
10731 static
10732 void dp_sysfs_get_stat_type(struct dp_soc *soc, uint32_t *mac_id,
10733 			    uint32_t *stat_type)
10734 {
10735 	qdf_spinlock_acquire(&soc->sysfs_config->rw_stats_lock);
10736 	*stat_type = soc->sysfs_config->stat_type_requested;
10737 	*mac_id   = soc->sysfs_config->mac_id;
10738 
10739 	qdf_spinlock_release(&soc->sysfs_config->rw_stats_lock);
10740 }
10741 
10742 static
10743 void dp_sysfs_update_config_buf_params(struct dp_soc *soc,
10744 				       uint32_t curr_len,
10745 				       uint32_t max_buf_len,
10746 				       char *buf)
10747 {
10748 	qdf_spinlock_acquire(&soc->sysfs_config->sysfs_write_user_buffer);
10749 	/* set sysfs_config parameters */
10750 	soc->sysfs_config->buf = buf;
10751 	soc->sysfs_config->curr_buffer_length = curr_len;
10752 	soc->sysfs_config->max_buffer_length = max_buf_len;
10753 	qdf_spinlock_release(&soc->sysfs_config->sysfs_write_user_buffer);
10754 }
10755 
10756 static
10757 QDF_STATUS dp_sysfs_fill_stats(ol_txrx_soc_handle soc_hdl,
10758 			       char *buf, uint32_t buf_size)
10759 {
10760 	uint32_t mac_id = 0;
10761 	uint32_t stat_type = 0;
10762 	uint32_t fw_stats = 0;
10763 	uint32_t host_stats = 0;
10764 	enum cdp_stats stats;
10765 	struct cdp_txrx_stats_req req;
10766 	struct dp_soc *soc = NULL;
10767 
10768 	if (!soc_hdl) {
10769 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
10770 		return QDF_STATUS_E_INVAL;
10771 	}
10772 
10773 	soc = cdp_soc_t_to_dp_soc(soc_hdl);
10774 
10775 	if (!soc) {
10776 		dp_cdp_err("%pK: soc is NULL", soc);
10777 		return QDF_STATUS_E_INVAL;
10778 	}
10779 
10780 	dp_sysfs_get_stat_type(soc, &mac_id, &stat_type);
10781 
10782 	stats = stat_type;
10783 	if (stats >= CDP_TXRX_MAX_STATS) {
10784 		dp_cdp_info("sysfs stat type requested is invalid");
10785 		return QDF_STATUS_E_INVAL;
10786 	}
10787 	/*
10788 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
10789 	 *			has to be updated if new FW HTT stats added
10790 	 */
10791 	if (stats > CDP_TXRX_MAX_STATS)
10792 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
10793 
10794 	/* build request */
10795 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
10796 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
10797 
10798 	req.stats = stat_type;
10799 	req.mac_id = mac_id;
10800 	/* request stats to be printed */
10801 	qdf_mutex_acquire(&soc->sysfs_config->sysfs_read_lock);
10802 
10803 	if (fw_stats != TXRX_FW_STATS_INVALID) {
10804 		/* update request with FW stats type */
10805 		req.cookie_val = DBG_SYSFS_STATS_COOKIE;
10806 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
10807 			(host_stats <= TXRX_HOST_STATS_MAX)) {
10808 		req.cookie_val = DBG_STATS_COOKIE_DEFAULT;
10809 		soc->sysfs_config->process_id = qdf_get_current_pid();
10810 		soc->sysfs_config->printing_mode = PRINTING_MODE_ENABLED;
10811 	}
10812 
10813 	dp_sysfs_update_config_buf_params(soc, 0, buf_size, buf);
10814 
10815 	dp_txrx_stats_request(soc_hdl, mac_id, &req);
10816 	soc->sysfs_config->process_id = 0;
10817 	soc->sysfs_config->printing_mode = PRINTING_MODE_DISABLED;
10818 
10819 	dp_sysfs_update_config_buf_params(soc, 0, 0, NULL);
10820 
10821 	qdf_mutex_release(&soc->sysfs_config->sysfs_read_lock);
10822 	return QDF_STATUS_SUCCESS;
10823 }
10824 
10825 static
10826 QDF_STATUS dp_sysfs_set_stat_type(ol_txrx_soc_handle soc_hdl,
10827 				  uint32_t stat_type, uint32_t mac_id)
10828 {
10829 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10830 
10831 	if (!soc_hdl) {
10832 		dp_cdp_err("%pK: soc is NULL", soc);
10833 		return QDF_STATUS_E_INVAL;
10834 	}
10835 
10836 	qdf_spinlock_acquire(&soc->sysfs_config->rw_stats_lock);
10837 
10838 	soc->sysfs_config->stat_type_requested = stat_type;
10839 	soc->sysfs_config->mac_id = mac_id;
10840 
10841 	qdf_spinlock_release(&soc->sysfs_config->rw_stats_lock);
10842 
10843 	return QDF_STATUS_SUCCESS;
10844 }
10845 
10846 static
10847 QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl)
10848 {
10849 	struct dp_soc *soc;
10850 	QDF_STATUS status;
10851 
10852 	if (!soc_hdl) {
10853 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
10854 		return QDF_STATUS_E_INVAL;
10855 	}
10856 
10857 	soc = soc_hdl;
10858 
10859 	soc->sysfs_config = qdf_mem_malloc(sizeof(struct sysfs_stats_config));
10860 	if (!soc->sysfs_config) {
10861 		dp_cdp_err("failed to allocate memory for sysfs_config no memory");
10862 		return QDF_STATUS_E_NOMEM;
10863 	}
10864 
10865 	status = qdf_event_create(&soc->sysfs_config->sysfs_txrx_fw_request_done);
10866 	/* create event for fw stats request from sysfs */
10867 	if (status != QDF_STATUS_SUCCESS) {
10868 		dp_cdp_err("failed to create event sysfs_txrx_fw_request_done");
10869 		qdf_mem_free(soc->sysfs_config);
10870 		soc->sysfs_config = NULL;
10871 		return QDF_STATUS_E_FAILURE;
10872 	}
10873 
10874 	qdf_spinlock_create(&soc->sysfs_config->rw_stats_lock);
10875 	qdf_mutex_create(&soc->sysfs_config->sysfs_read_lock);
10876 	qdf_spinlock_create(&soc->sysfs_config->sysfs_write_user_buffer);
10877 
10878 	return QDF_STATUS_SUCCESS;
10879 }
10880 
10881 static
10882 QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl)
10883 {
10884 	struct dp_soc *soc;
10885 	QDF_STATUS status;
10886 
10887 	if (!soc_hdl) {
10888 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
10889 		return QDF_STATUS_E_INVAL;
10890 	}
10891 
10892 	soc = soc_hdl;
10893 	if (!soc->sysfs_config) {
10894 		dp_cdp_err("soc->sysfs_config is NULL");
10895 		return QDF_STATUS_E_FAILURE;
10896 	}
10897 
10898 	status = qdf_event_destroy(&soc->sysfs_config->sysfs_txrx_fw_request_done);
10899 	if (status != QDF_STATUS_SUCCESS)
10900 		dp_cdp_err("Failed to detroy event sysfs_txrx_fw_request_done ");
10901 
10902 	qdf_mutex_destroy(&soc->sysfs_config->sysfs_read_lock);
10903 	qdf_spinlock_destroy(&soc->sysfs_config->rw_stats_lock);
10904 	qdf_spinlock_destroy(&soc->sysfs_config->sysfs_write_user_buffer);
10905 
10906 	qdf_mem_free(soc->sysfs_config);
10907 
10908 	return QDF_STATUS_SUCCESS;
10909 }
10910 
10911 #else /* WLAN_SYSFS_DP_STATS */
10912 
10913 static
10914 QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl)
10915 {
10916 	return QDF_STATUS_SUCCESS;
10917 }
10918 
10919 static
10920 QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl)
10921 {
10922 	return QDF_STATUS_SUCCESS;
10923 }
10924 #endif /* WLAN_SYSFS_DP_STATS */
10925 
10926 /**
10927  * dp_txrx_clear_dump_stats() - clear dumpStats
10928  * @soc- soc handle
10929  * @value - stats option
10930  *
10931  * Return: 0 - Success, non-zero - failure
10932  */
10933 static
10934 QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
10935 				    uint8_t value)
10936 {
10937 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10938 	QDF_STATUS status = QDF_STATUS_SUCCESS;
10939 
10940 	if (!soc) {
10941 		dp_err("soc is NULL");
10942 		return QDF_STATUS_E_INVAL;
10943 	}
10944 
10945 	switch (value) {
10946 	case CDP_TXRX_TSO_STATS:
10947 		dp_txrx_clear_tso_stats(soc);
10948 		break;
10949 
10950 	default:
10951 		status = QDF_STATUS_E_INVAL;
10952 		break;
10953 	}
10954 
10955 	return status;
10956 }
10957 
10958 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
10959 /**
10960  * dp_update_flow_control_parameters() - API to store datapath
10961  *                            config parameters
10962  * @soc: soc handle
10963  * @cfg: ini parameter handle
10964  *
10965  * Return: void
10966  */
10967 static inline
10968 void dp_update_flow_control_parameters(struct dp_soc *soc,
10969 				struct cdp_config_params *params)
10970 {
10971 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
10972 					params->tx_flow_stop_queue_threshold;
10973 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
10974 					params->tx_flow_start_queue_offset;
10975 }
10976 #else
10977 static inline
10978 void dp_update_flow_control_parameters(struct dp_soc *soc,
10979 				struct cdp_config_params *params)
10980 {
10981 }
10982 #endif
10983 
10984 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
10985 /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */
10986 #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024
10987 
10988 /* Max packet limit for RX REAP Loop (dp_rx_process) */
10989 #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024
10990 
10991 static
10992 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
10993 					struct cdp_config_params *params)
10994 {
10995 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit =
10996 				params->tx_comp_loop_pkt_limit;
10997 
10998 	if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX)
10999 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true;
11000 	else
11001 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false;
11002 
11003 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit =
11004 				params->rx_reap_loop_pkt_limit;
11005 
11006 	if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX)
11007 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = true;
11008 	else
11009 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = false;
11010 
11011 	soc->wlan_cfg_ctx->rx_hp_oos_update_limit =
11012 				params->rx_hp_oos_update_limit;
11013 
11014 	dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u",
11015 		soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit,
11016 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check,
11017 		soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit,
11018 		soc->wlan_cfg_ctx->rx_enable_eol_data_check,
11019 		soc->wlan_cfg_ctx->rx_hp_oos_update_limit);
11020 }
11021 
11022 static void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
11023 				      uint32_t rx_limit)
11024 {
11025 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit = tx_limit;
11026 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit = rx_limit;
11027 }
11028 
11029 #else
11030 static inline
11031 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
11032 					struct cdp_config_params *params)
11033 { }
11034 
11035 static inline
11036 void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
11037 			       uint32_t rx_limit)
11038 {
11039 }
11040 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
11041 
11042 /**
11043  * dp_update_config_parameters() - API to store datapath
11044  *                            config parameters
11045  * @soc: soc handle
11046  * @cfg: ini parameter handle
11047  *
11048  * Return: status
11049  */
11050 static
11051 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
11052 				struct cdp_config_params *params)
11053 {
11054 	struct dp_soc *soc = (struct dp_soc *)psoc;
11055 
11056 	if (!(soc)) {
11057 		dp_cdp_err("%pK: Invalid handle", soc);
11058 		return QDF_STATUS_E_INVAL;
11059 	}
11060 
11061 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
11062 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
11063 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
11064 	soc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload =
11065 				params->p2p_tcp_udp_checksumoffload;
11066 	soc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload =
11067 				params->nan_tcp_udp_checksumoffload;
11068 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
11069 				params->tcp_udp_checksumoffload;
11070 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
11071 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
11072 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
11073 
11074 	dp_update_rx_soft_irq_limit_params(soc, params);
11075 	dp_update_flow_control_parameters(soc, params);
11076 
11077 	return QDF_STATUS_SUCCESS;
11078 }
11079 
11080 static struct cdp_wds_ops dp_ops_wds = {
11081 	.vdev_set_wds = dp_vdev_set_wds,
11082 #ifdef WDS_VENDOR_EXTENSION
11083 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
11084 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
11085 #endif
11086 };
11087 
11088 /*
11089  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
11090  * @soc_hdl - datapath soc handle
11091  * @vdev_id - virtual interface id
11092  * @callback - callback function
11093  * @ctxt: callback context
11094  *
11095  */
11096 static void
11097 dp_txrx_data_tx_cb_set(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
11098 		       ol_txrx_data_tx_cb callback, void *ctxt)
11099 {
11100 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11101 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11102 						     DP_MOD_ID_CDP);
11103 
11104 	if (!vdev)
11105 		return;
11106 
11107 	vdev->tx_non_std_data_callback.func = callback;
11108 	vdev->tx_non_std_data_callback.ctxt = ctxt;
11109 
11110 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11111 }
11112 
11113 /**
11114  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
11115  * @soc: datapath soc handle
11116  * @pdev_id: id of datapath pdev handle
11117  *
11118  * Return: opaque pointer to dp txrx handle
11119  */
11120 static void *dp_pdev_get_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id)
11121 {
11122 	struct dp_pdev *pdev =
11123 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11124 						   pdev_id);
11125 	if (qdf_unlikely(!pdev))
11126 		return NULL;
11127 
11128 	return pdev->dp_txrx_handle;
11129 }
11130 
11131 /**
11132  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
11133  * @soc: datapath soc handle
11134  * @pdev_id: id of datapath pdev handle
11135  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
11136  *
11137  * Return: void
11138  */
11139 static void
11140 dp_pdev_set_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id,
11141 			   void *dp_txrx_hdl)
11142 {
11143 	struct dp_pdev *pdev =
11144 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11145 						   pdev_id);
11146 
11147 	if (!pdev)
11148 		return;
11149 
11150 	pdev->dp_txrx_handle = dp_txrx_hdl;
11151 }
11152 
11153 /**
11154  * dp_vdev_get_dp_ext_handle() - get dp handle from vdev
11155  * @soc: datapath soc handle
11156  * @vdev_id: vdev id
11157  *
11158  * Return: opaque pointer to dp txrx handle
11159  */
11160 static void *dp_vdev_get_dp_ext_handle(ol_txrx_soc_handle soc_hdl,
11161 				       uint8_t vdev_id)
11162 {
11163 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11164 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11165 						     DP_MOD_ID_CDP);
11166 	void *dp_ext_handle;
11167 
11168 	if (!vdev)
11169 		return NULL;
11170 	dp_ext_handle = vdev->vdev_dp_ext_handle;
11171 
11172 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11173 	return dp_ext_handle;
11174 }
11175 
11176 /**
11177  * dp_vdev_set_dp_ext_handle() - set dp handle in vdev
11178  * @soc: datapath soc handle
11179  * @vdev_id: vdev id
11180  * @size: size of advance dp handle
11181  *
11182  * Return: QDF_STATUS
11183  */
11184 static QDF_STATUS
11185 dp_vdev_set_dp_ext_handle(ol_txrx_soc_handle soc_hdl, uint8_t vdev_id,
11186 			  uint16_t size)
11187 {
11188 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11189 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11190 						     DP_MOD_ID_CDP);
11191 	void *dp_ext_handle;
11192 
11193 	if (!vdev)
11194 		return QDF_STATUS_E_FAILURE;
11195 
11196 	dp_ext_handle = qdf_mem_malloc(size);
11197 
11198 	if (!dp_ext_handle) {
11199 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11200 		return QDF_STATUS_E_FAILURE;
11201 	}
11202 
11203 	vdev->vdev_dp_ext_handle = dp_ext_handle;
11204 
11205 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11206 	return QDF_STATUS_SUCCESS;
11207 }
11208 
11209 /**
11210  * dp_vdev_inform_ll_conn() - Inform vdev to add/delete a latency critical
11211  *			      connection for this vdev
11212  * @soc_hdl: CDP soc handle
11213  * @vdev_id: vdev ID
11214  * @action: Add/Delete action
11215  *
11216  * Returns: QDF_STATUS.
11217  */
11218 static QDF_STATUS
11219 dp_vdev_inform_ll_conn(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
11220 		       enum vdev_ll_conn_actions action)
11221 {
11222 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11223 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11224 						     DP_MOD_ID_CDP);
11225 
11226 	if (!vdev) {
11227 		dp_err("LL connection action for invalid vdev %d", vdev_id);
11228 		return QDF_STATUS_E_FAILURE;
11229 	}
11230 
11231 	switch (action) {
11232 	case CDP_VDEV_LL_CONN_ADD:
11233 		vdev->num_latency_critical_conn++;
11234 		break;
11235 
11236 	case CDP_VDEV_LL_CONN_DEL:
11237 		vdev->num_latency_critical_conn--;
11238 		break;
11239 
11240 	default:
11241 		dp_err("LL connection action invalid %d", action);
11242 		break;
11243 	}
11244 
11245 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11246 	return QDF_STATUS_SUCCESS;
11247 }
11248 
11249 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
11250 /**
11251  * dp_soc_set_swlm_enable() - Enable/Disable SWLM if initialized.
11252  * @soc_hdl: CDP Soc handle
11253  * @value: Enable/Disable value
11254  *
11255  * Returns: QDF_STATUS
11256  */
11257 static QDF_STATUS dp_soc_set_swlm_enable(struct cdp_soc_t *soc_hdl,
11258 					 uint8_t value)
11259 {
11260 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11261 
11262 	if (!soc->swlm.is_init) {
11263 		dp_err("SWLM is not initialized");
11264 		return QDF_STATUS_E_FAILURE;
11265 	}
11266 
11267 	soc->swlm.is_enabled = !!value;
11268 
11269 	return QDF_STATUS_SUCCESS;
11270 }
11271 
11272 /**
11273  * dp_soc_is_swlm_enabled() - Check if SWLM is enabled.
11274  * @soc_hdl: CDP Soc handle
11275  *
11276  * Returns: QDF_STATUS
11277  */
11278 static uint8_t dp_soc_is_swlm_enabled(struct cdp_soc_t *soc_hdl)
11279 {
11280 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11281 
11282 	return soc->swlm.is_enabled;
11283 }
11284 #endif
11285 
11286 /**
11287  * dp_display_srng_info() - Dump the srng HP TP info
11288  * @soc_hdl: CDP Soc handle
11289  *
11290  * This function dumps the SW hp/tp values for the important rings.
11291  * HW hp/tp values are not being dumped, since it can lead to
11292  * READ NOC error when UMAC is in low power state. MCC does not have
11293  * device force wake working yet.
11294  *
11295  * Return: none
11296  */
11297 static void dp_display_srng_info(struct cdp_soc_t *soc_hdl)
11298 {
11299 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11300 	hal_soc_handle_t hal_soc = soc->hal_soc;
11301 	uint32_t hp, tp, i;
11302 
11303 	dp_info("SRNG HP-TP data:");
11304 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
11305 		hal_get_sw_hptp(hal_soc, soc->tcl_data_ring[i].hal_srng,
11306 				&tp, &hp);
11307 		dp_info("TCL DATA ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
11308 
11309 		hal_get_sw_hptp(hal_soc, soc->tx_comp_ring[i].hal_srng,
11310 				&tp, &hp);
11311 		dp_info("TX comp ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
11312 	}
11313 
11314 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
11315 		hal_get_sw_hptp(hal_soc, soc->reo_dest_ring[i].hal_srng,
11316 				&tp, &hp);
11317 		dp_info("REO DST ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
11318 	}
11319 
11320 	hal_get_sw_hptp(hal_soc, soc->reo_exception_ring.hal_srng, &tp, &hp);
11321 	dp_info("REO exception ring: hp=0x%x, tp=0x%x", hp, tp);
11322 
11323 	hal_get_sw_hptp(hal_soc, soc->rx_rel_ring.hal_srng, &tp, &hp);
11324 	dp_info("WBM RX release ring: hp=0x%x, tp=0x%x", hp, tp);
11325 
11326 	hal_get_sw_hptp(hal_soc, soc->wbm_desc_rel_ring.hal_srng, &tp, &hp);
11327 	dp_info("WBM desc release ring: hp=0x%x, tp=0x%x", hp, tp);
11328 }
11329 
11330 /**
11331  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
11332  * @soc_handle: datapath soc handle
11333  *
11334  * Return: opaque pointer to external dp (non-core DP)
11335  */
11336 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
11337 {
11338 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11339 
11340 	return soc->external_txrx_handle;
11341 }
11342 
11343 /**
11344  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
11345  * @soc_handle: datapath soc handle
11346  * @txrx_handle: opaque pointer to external dp (non-core DP)
11347  *
11348  * Return: void
11349  */
11350 static void
11351 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
11352 {
11353 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11354 
11355 	soc->external_txrx_handle = txrx_handle;
11356 }
11357 
11358 /**
11359  * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
11360  * @soc_hdl: datapath soc handle
11361  * @pdev_id: id of the datapath pdev handle
11362  * @lmac_id: lmac id
11363  *
11364  * Return: QDF_STATUS
11365  */
11366 static QDF_STATUS
11367 dp_soc_map_pdev_to_lmac
11368 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
11369 	 uint32_t lmac_id)
11370 {
11371 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11372 
11373 	wlan_cfg_set_hw_mac_idx(soc->wlan_cfg_ctx,
11374 				pdev_id,
11375 				lmac_id);
11376 
11377 	/*Set host PDEV ID for lmac_id*/
11378 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
11379 			      pdev_id,
11380 			      lmac_id);
11381 
11382 	return QDF_STATUS_SUCCESS;
11383 }
11384 
11385 /**
11386  * dp_soc_handle_pdev_mode_change() - Update pdev to lmac mapping
11387  * @soc_hdl: datapath soc handle
11388  * @pdev_id: id of the datapath pdev handle
11389  * @lmac_id: lmac id
11390  *
11391  * In the event of a dynamic mode change, update the pdev to lmac mapping
11392  *
11393  * Return: QDF_STATUS
11394  */
11395 static QDF_STATUS
11396 dp_soc_handle_pdev_mode_change
11397 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
11398 	 uint32_t lmac_id)
11399 {
11400 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11401 	struct dp_vdev *vdev = NULL;
11402 	uint8_t hw_pdev_id, mac_id;
11403 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc,
11404 								  pdev_id);
11405 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
11406 
11407 	if (qdf_unlikely(!pdev))
11408 		return QDF_STATUS_E_FAILURE;
11409 
11410 	pdev->lmac_id = lmac_id;
11411 	pdev->target_pdev_id =
11412 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
11413 	dp_info(" mode change %d %d\n", pdev->pdev_id, pdev->lmac_id);
11414 
11415 	/*Set host PDEV ID for lmac_id*/
11416 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
11417 			      pdev->pdev_id,
11418 			      lmac_id);
11419 
11420 	hw_pdev_id =
11421 		dp_get_target_pdev_id_for_host_pdev_id(soc,
11422 						       pdev->pdev_id);
11423 
11424 	/*
11425 	 * When NSS offload is enabled, send pdev_id->lmac_id
11426 	 * and pdev_id to hw_pdev_id to NSS FW
11427 	 */
11428 	if (nss_config) {
11429 		mac_id = pdev->lmac_id;
11430 		if (soc->cdp_soc.ol_ops->pdev_update_lmac_n_target_pdev_id)
11431 			soc->cdp_soc.ol_ops->
11432 				pdev_update_lmac_n_target_pdev_id(
11433 				soc->ctrl_psoc,
11434 				&pdev_id, &mac_id, &hw_pdev_id);
11435 	}
11436 
11437 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
11438 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
11439 		DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
11440 					       hw_pdev_id);
11441 		vdev->lmac_id = pdev->lmac_id;
11442 	}
11443 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
11444 
11445 	return QDF_STATUS_SUCCESS;
11446 }
11447 
11448 /**
11449  * dp_soc_set_pdev_status_down() - set pdev down/up status
11450  * @soc: datapath soc handle
11451  * @pdev_id: id of datapath pdev handle
11452  * @is_pdev_down: pdev down/up status
11453  *
11454  * Return: QDF_STATUS
11455  */
11456 static QDF_STATUS
11457 dp_soc_set_pdev_status_down(struct cdp_soc_t *soc, uint8_t pdev_id,
11458 			    bool is_pdev_down)
11459 {
11460 	struct dp_pdev *pdev =
11461 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11462 						   pdev_id);
11463 	if (!pdev)
11464 		return QDF_STATUS_E_FAILURE;
11465 
11466 	pdev->is_pdev_down = is_pdev_down;
11467 	return QDF_STATUS_SUCCESS;
11468 }
11469 
11470 /**
11471  * dp_get_cfg_capabilities() - get dp capabilities
11472  * @soc_handle: datapath soc handle
11473  * @dp_caps: enum for dp capabilities
11474  *
11475  * Return: bool to determine if dp caps is enabled
11476  */
11477 static bool
11478 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
11479 			enum cdp_capabilities dp_caps)
11480 {
11481 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11482 
11483 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
11484 }
11485 
11486 #ifdef FEATURE_AST
11487 static QDF_STATUS
11488 dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
11489 		       uint8_t *peer_mac)
11490 {
11491 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11492 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11493 	struct dp_peer *peer =
11494 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
11495 					       DP_MOD_ID_CDP);
11496 
11497 	/* Peer can be null for monitor vap mac address */
11498 	if (!peer) {
11499 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
11500 			  "%s: Invalid peer\n", __func__);
11501 		return QDF_STATUS_E_FAILURE;
11502 	}
11503 
11504 	dp_peer_update_state(soc, peer, DP_PEER_STATE_LOGICAL_DELETE);
11505 
11506 	qdf_spin_lock_bh(&soc->ast_lock);
11507 	dp_peer_delete_ast_entries(soc, peer);
11508 	qdf_spin_unlock_bh(&soc->ast_lock);
11509 
11510 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11511 	return status;
11512 }
11513 #endif
11514 
11515 #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS
11516 /**
11517  * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
11518  * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
11519  * @soc: cdp_soc handle
11520  * @pdev_id: id of cdp_pdev handle
11521  * @protocol_type: protocol type for which stats should be displayed
11522  *
11523  * Return: none
11524  */
11525 static inline void
11526 dp_dump_pdev_rx_protocol_tag_stats(struct cdp_soc_t  *soc, uint8_t pdev_id,
11527 				   uint16_t protocol_type)
11528 {
11529 }
11530 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
11531 
11532 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
11533 /**
11534  * dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be
11535  * applied to the desired protocol type packets
11536  * @soc: soc handle
11537  * @pdev_id: id of cdp_pdev handle
11538  * @enable_rx_protocol_tag - bitmask that indicates what protocol types
11539  * are enabled for tagging. zero indicates disable feature, non-zero indicates
11540  * enable feature
11541  * @protocol_type: new protocol type for which the tag is being added
11542  * @tag: user configured tag for the new protocol
11543  *
11544  * Return: Success
11545  */
11546 static inline QDF_STATUS
11547 dp_update_pdev_rx_protocol_tag(struct cdp_soc_t  *soc, uint8_t pdev_id,
11548 			       uint32_t enable_rx_protocol_tag,
11549 			       uint16_t protocol_type,
11550 			       uint16_t tag)
11551 {
11552 	return QDF_STATUS_SUCCESS;
11553 }
11554 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
11555 
11556 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
11557 /**
11558  * dp_set_rx_flow_tag - add/delete a flow
11559  * @soc: soc handle
11560  * @pdev_id: id of cdp_pdev handle
11561  * @flow_info: flow tuple that is to be added to/deleted from flow search table
11562  *
11563  * Return: Success
11564  */
11565 static inline QDF_STATUS
11566 dp_set_rx_flow_tag(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
11567 		   struct cdp_rx_flow_info *flow_info)
11568 {
11569 	return QDF_STATUS_SUCCESS;
11570 }
11571 /**
11572  * dp_dump_rx_flow_tag_stats - dump the number of packets tagged for
11573  * given flow 5-tuple
11574  * @cdp_soc: soc handle
11575  * @pdev_id: id of cdp_pdev handle
11576  * @flow_info: flow 5-tuple for which stats should be displayed
11577  *
11578  * Return: Success
11579  */
11580 static inline QDF_STATUS
11581 dp_dump_rx_flow_tag_stats(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
11582 			  struct cdp_rx_flow_info *flow_info)
11583 {
11584 	return QDF_STATUS_SUCCESS;
11585 }
11586 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
11587 
11588 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
11589 					   uint32_t max_peers,
11590 					   uint32_t max_ast_index,
11591 					   uint8_t peer_map_unmap_versions)
11592 {
11593 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11594 	QDF_STATUS status;
11595 
11596 	soc->max_peers = max_peers;
11597 
11598 	wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
11599 
11600 	status = soc->arch_ops.txrx_peer_map_attach(soc);
11601 	if (!QDF_IS_STATUS_SUCCESS(status)) {
11602 		dp_err("failure in allocating peer tables");
11603 		return QDF_STATUS_E_FAILURE;
11604 	}
11605 
11606 	dp_info("max_peers %u, calculated max_peers %u max_ast_index: %u\n",
11607 		max_peers, soc->max_peer_id, max_ast_index);
11608 
11609 	status = dp_peer_find_attach(soc);
11610 	if (!QDF_IS_STATUS_SUCCESS(status)) {
11611 		dp_err("Peer find attach failure");
11612 		goto fail;
11613 	}
11614 
11615 	soc->peer_map_unmap_versions = peer_map_unmap_versions;
11616 	soc->peer_map_attach_success = TRUE;
11617 
11618 	return QDF_STATUS_SUCCESS;
11619 fail:
11620 	soc->arch_ops.txrx_peer_map_detach(soc);
11621 
11622 	return status;
11623 }
11624 
11625 static QDF_STATUS dp_soc_set_param(struct cdp_soc_t  *soc_hdl,
11626 				   enum cdp_soc_param_t param,
11627 				   uint32_t value)
11628 {
11629 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11630 
11631 	switch (param) {
11632 	case DP_SOC_PARAM_MSDU_EXCEPTION_DESC:
11633 		soc->num_msdu_exception_desc = value;
11634 		dp_info("num_msdu exception_desc %u",
11635 			value);
11636 		break;
11637 	case DP_SOC_PARAM_CMEM_FSE_SUPPORT:
11638 		if (wlan_cfg_is_fst_in_cmem_enabled(soc->wlan_cfg_ctx))
11639 			soc->fst_in_cmem = !!value;
11640 		dp_info("FW supports CMEM FSE %u", value);
11641 		break;
11642 	case DP_SOC_PARAM_MAX_AST_AGEOUT:
11643 		soc->max_ast_ageout_count = value;
11644 		dp_info("Max ast ageout count %u", soc->max_ast_ageout_count);
11645 		break;
11646 	case DP_SOC_PARAM_EAPOL_OVER_CONTROL_PORT:
11647 		soc->eapol_over_control_port = value;
11648 		dp_info("Eapol over control_port:%d",
11649 			soc->eapol_over_control_port);
11650 		break;
11651 	default:
11652 		dp_info("not handled param %d ", param);
11653 		break;
11654 	}
11655 
11656 	return QDF_STATUS_SUCCESS;
11657 }
11658 
11659 static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
11660 				      void *stats_ctx)
11661 {
11662 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11663 
11664 	soc->rate_stats_ctx = (struct cdp_soc_rate_stats_ctx *)stats_ctx;
11665 }
11666 
11667 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
11668 /**
11669  * dp_peer_flush_rate_stats_req(): Flush peer rate stats
11670  * @soc: Datapath SOC handle
11671  * @peer: Datapath peer
11672  * @arg: argument to iter function
11673  *
11674  * Return: QDF_STATUS
11675  */
11676 static void
11677 dp_peer_flush_rate_stats_req(struct dp_soc *soc, struct dp_peer *peer,
11678 			     void *arg)
11679 {
11680 	if (peer->bss_peer)
11681 		return;
11682 
11683 	dp_wdi_event_handler(
11684 		WDI_EVENT_FLUSH_RATE_STATS_REQ,
11685 		soc, dp_monitor_peer_get_rdkstats_ctx(soc, peer),
11686 		peer->peer_id,
11687 		WDI_NO_VAL, peer->vdev->pdev->pdev_id);
11688 }
11689 
11690 /**
11691  * dp_flush_rate_stats_req(): Flush peer rate stats in pdev
11692  * @soc_hdl: Datapath SOC handle
11693  * @pdev_id: pdev_id
11694  *
11695  * Return: QDF_STATUS
11696  */
11697 static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
11698 					  uint8_t pdev_id)
11699 {
11700 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11701 	struct dp_pdev *pdev =
11702 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11703 						   pdev_id);
11704 	if (!pdev)
11705 		return QDF_STATUS_E_FAILURE;
11706 
11707 	dp_pdev_iterate_peer(pdev, dp_peer_flush_rate_stats_req, NULL,
11708 			     DP_MOD_ID_CDP);
11709 
11710 	return QDF_STATUS_SUCCESS;
11711 }
11712 #else
11713 static inline QDF_STATUS
11714 dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
11715 			uint8_t pdev_id)
11716 {
11717 	return QDF_STATUS_SUCCESS;
11718 }
11719 #endif
11720 
11721 static void *dp_peer_get_rdkstats_ctx(struct cdp_soc_t *soc_hdl,
11722 				      uint8_t vdev_id,
11723 				      uint8_t *mac_addr)
11724 {
11725 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11726 	struct dp_peer *peer;
11727 	void *rdkstats_ctx = NULL;
11728 
11729 	if (mac_addr) {
11730 		peer = dp_peer_find_hash_find(soc, mac_addr,
11731 					      0, vdev_id,
11732 					      DP_MOD_ID_CDP);
11733 		if (!peer)
11734 			return NULL;
11735 
11736 		if (!IS_MLO_DP_MLD_PEER(peer))
11737 			rdkstats_ctx = dp_monitor_peer_get_rdkstats_ctx(soc,
11738 									peer);
11739 
11740 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11741 	}
11742 
11743 	return rdkstats_ctx;
11744 }
11745 
11746 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
11747 static QDF_STATUS dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
11748 					   uint8_t pdev_id,
11749 					   void *buf)
11750 {
11751 	 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS,
11752 			      (struct dp_soc *)soc, buf, HTT_INVALID_PEER,
11753 			      WDI_NO_VAL, pdev_id);
11754 	return QDF_STATUS_SUCCESS;
11755 }
11756 #else
11757 static inline QDF_STATUS
11758 dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
11759 			 uint8_t pdev_id,
11760 			 void *buf)
11761 {
11762 	return QDF_STATUS_SUCCESS;
11763 }
11764 #endif
11765 
11766 static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle)
11767 {
11768 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11769 
11770 	return soc->rate_stats_ctx;
11771 }
11772 
11773 /*
11774  * dp_get_cfg() - get dp cfg
11775  * @soc: cdp soc handle
11776  * @cfg: cfg enum
11777  *
11778  * Return: cfg value
11779  */
11780 static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg)
11781 {
11782 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
11783 	uint32_t value = 0;
11784 
11785 	switch (cfg) {
11786 	case cfg_dp_enable_data_stall:
11787 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
11788 		break;
11789 	case cfg_dp_enable_p2p_ip_tcp_udp_checksum_offload:
11790 		value = dpsoc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload;
11791 		break;
11792 	case cfg_dp_enable_nan_ip_tcp_udp_checksum_offload:
11793 		value = dpsoc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload;
11794 		break;
11795 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
11796 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
11797 		break;
11798 	case cfg_dp_disable_legacy_mode_csum_offload:
11799 		value = dpsoc->wlan_cfg_ctx->
11800 					legacy_mode_checksumoffload_disable;
11801 		break;
11802 	case cfg_dp_tso_enable:
11803 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
11804 		break;
11805 	case cfg_dp_lro_enable:
11806 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
11807 		break;
11808 	case cfg_dp_gro_enable:
11809 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
11810 		break;
11811 	case cfg_dp_force_gro_enable:
11812 		value = dpsoc->wlan_cfg_ctx->force_gro_enabled;
11813 		break;
11814 	case cfg_dp_sg_enable:
11815 		value = dpsoc->wlan_cfg_ctx->sg_enabled;
11816 		break;
11817 	case cfg_dp_tx_flow_start_queue_offset:
11818 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
11819 		break;
11820 	case cfg_dp_tx_flow_stop_queue_threshold:
11821 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
11822 		break;
11823 	case cfg_dp_disable_intra_bss_fwd:
11824 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
11825 		break;
11826 	case cfg_dp_pktlog_buffer_size:
11827 		value = dpsoc->wlan_cfg_ctx->pktlog_buffer_size;
11828 		break;
11829 	case cfg_dp_wow_check_rx_pending:
11830 		value = dpsoc->wlan_cfg_ctx->wow_check_rx_pending_enable;
11831 		break;
11832 	default:
11833 		value =  0;
11834 	}
11835 
11836 	return value;
11837 }
11838 
11839 #ifdef PEER_FLOW_CONTROL
11840 /**
11841  * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params
11842  * @soc_handle: datapath soc handle
11843  * @pdev_id: id of datapath pdev handle
11844  * @param: ol ath params
11845  * @value: value of the flag
11846  * @buff: Buffer to be passed
11847  *
11848  * Implemented this function same as legacy function. In legacy code, single
11849  * function is used to display stats and update pdev params.
11850  *
11851  * Return: 0 for success. nonzero for failure.
11852  */
11853 static uint32_t dp_tx_flow_ctrl_configure_pdev(struct cdp_soc_t *soc_handle,
11854 					       uint8_t pdev_id,
11855 					       enum _dp_param_t param,
11856 					       uint32_t value, void *buff)
11857 {
11858 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11859 	struct dp_pdev *pdev =
11860 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11861 						   pdev_id);
11862 
11863 	if (qdf_unlikely(!pdev))
11864 		return 1;
11865 
11866 	soc = pdev->soc;
11867 	if (!soc)
11868 		return 1;
11869 
11870 	switch (param) {
11871 #ifdef QCA_ENH_V3_STATS_SUPPORT
11872 	case DP_PARAM_VIDEO_DELAY_STATS_FC:
11873 		if (value)
11874 			pdev->delay_stats_flag = true;
11875 		else
11876 			pdev->delay_stats_flag = false;
11877 		break;
11878 	case DP_PARAM_VIDEO_STATS_FC:
11879 		qdf_print("------- TID Stats ------\n");
11880 		dp_pdev_print_tid_stats(pdev);
11881 		qdf_print("------ Delay Stats ------\n");
11882 		dp_pdev_print_delay_stats(pdev);
11883 		qdf_print("------ Rx Error Stats ------\n");
11884 		dp_pdev_print_rx_error_stats(pdev);
11885 		break;
11886 #endif
11887 	case DP_PARAM_TOTAL_Q_SIZE:
11888 		{
11889 			uint32_t tx_min, tx_max;
11890 
11891 			tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx);
11892 			tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
11893 
11894 			if (!buff) {
11895 				if ((value >= tx_min) && (value <= tx_max)) {
11896 					pdev->num_tx_allowed = value;
11897 				} else {
11898 					dp_tx_info("%pK: Failed to update num_tx_allowed, Q_min = %d Q_max = %d",
11899 						   soc, tx_min, tx_max);
11900 					break;
11901 				}
11902 			} else {
11903 				*(int *)buff = pdev->num_tx_allowed;
11904 			}
11905 		}
11906 		break;
11907 	default:
11908 		dp_tx_info("%pK: not handled param %d ", soc, param);
11909 		break;
11910 	}
11911 
11912 	return 0;
11913 }
11914 #endif
11915 
11916 /**
11917  * dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev
11918  * @psoc: dp soc handle
11919  * @pdev_id: id of DP_PDEV handle
11920  * @pcp: pcp value
11921  * @tid: tid value passed by the user
11922  *
11923  * Return: QDF_STATUS_SUCCESS on success
11924  */
11925 static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
11926 						uint8_t pdev_id,
11927 						uint8_t pcp, uint8_t tid)
11928 {
11929 	struct dp_soc *soc = (struct dp_soc *)psoc;
11930 
11931 	soc->pcp_tid_map[pcp] = tid;
11932 
11933 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
11934 	return QDF_STATUS_SUCCESS;
11935 }
11936 
11937 /**
11938  * dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev
11939  * @soc: DP soc handle
11940  * @vdev_id: id of DP_VDEV handle
11941  * @pcp: pcp value
11942  * @tid: tid value passed by the user
11943  *
11944  * Return: QDF_STATUS_SUCCESS on success
11945  */
11946 static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc_hdl,
11947 						uint8_t vdev_id,
11948 						uint8_t pcp, uint8_t tid)
11949 {
11950 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11951 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11952 						     DP_MOD_ID_CDP);
11953 
11954 	if (!vdev)
11955 		return QDF_STATUS_E_FAILURE;
11956 
11957 	vdev->pcp_tid_map[pcp] = tid;
11958 
11959 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11960 	return QDF_STATUS_SUCCESS;
11961 }
11962 
11963 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
11964 static void dp_drain_txrx(struct cdp_soc_t *soc_handle)
11965 {
11966 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11967 	uint32_t cur_tx_limit, cur_rx_limit;
11968 	uint32_t budget = 0xffff;
11969 	uint32_t val;
11970 	int i;
11971 
11972 	cur_tx_limit = soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit;
11973 	cur_rx_limit = soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit;
11974 
11975 	/* Temporarily increase soft irq limits when going to drain
11976 	 * the UMAC/LMAC SRNGs and restore them after polling.
11977 	 * Though the budget is on higher side, the TX/RX reaping loops
11978 	 * will not execute longer as both TX and RX would be suspended
11979 	 * by the time this API is called.
11980 	 */
11981 	dp_update_soft_irq_limits(soc, budget, budget);
11982 
11983 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
11984 		dp_service_srngs(&soc->intr_ctx[i], budget);
11985 
11986 	dp_update_soft_irq_limits(soc, cur_tx_limit, cur_rx_limit);
11987 
11988 	/* Do a dummy read at offset 0; this will ensure all
11989 	 * pendings writes(HP/TP) are flushed before read returns.
11990 	 */
11991 	val = HAL_REG_READ((struct hal_soc *)soc->hal_soc, 0);
11992 	dp_debug("Register value at offset 0: %u\n", val);
11993 }
11994 #endif
11995 
11996 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
11997 static void
11998 dp_set_pkt_capture_mode(struct cdp_soc_t *soc_handle, bool val)
11999 {
12000 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12001 
12002 	soc->wlan_cfg_ctx->pkt_capture_mode = val;
12003 }
12004 #endif
12005 
12006 static struct cdp_cmn_ops dp_ops_cmn = {
12007 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
12008 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
12009 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
12010 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
12011 	.txrx_pdev_post_attach = dp_pdev_post_attach_wifi3,
12012 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
12013 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
12014 	.txrx_peer_create = dp_peer_create_wifi3,
12015 	.txrx_peer_setup = dp_peer_setup_wifi3,
12016 #ifdef FEATURE_AST
12017 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
12018 #else
12019 	.txrx_peer_teardown = NULL,
12020 #endif
12021 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
12022 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
12023 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
12024 	.txrx_peer_get_ast_info_by_pdev =
12025 		dp_peer_get_ast_info_by_pdevid_wifi3,
12026 	.txrx_peer_ast_delete_by_soc =
12027 		dp_peer_ast_entry_del_by_soc,
12028 	.txrx_peer_ast_delete_by_pdev =
12029 		dp_peer_ast_entry_del_by_pdev,
12030 	.txrx_peer_delete = dp_peer_delete_wifi3,
12031 	.txrx_vdev_register = dp_vdev_register_wifi3,
12032 	.txrx_soc_detach = dp_soc_detach_wifi3,
12033 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
12034 	.txrx_soc_init = dp_soc_init_wifi3,
12035 #ifndef QCA_HOST_MODE_WIFI_DISABLED
12036 	.txrx_tso_soc_attach = dp_tso_soc_attach,
12037 	.txrx_tso_soc_detach = dp_tso_soc_detach,
12038 	.tx_send = dp_tx_send,
12039 	.tx_send_exc = dp_tx_send_exception,
12040 #endif
12041 	.txrx_pdev_init = dp_pdev_init_wifi3,
12042 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
12043 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
12044 	.txrx_ath_getstats = dp_get_device_stats,
12045 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
12046 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
12047 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
12048 	.delba_process = dp_delba_process_wifi3,
12049 	.set_addba_response = dp_set_addba_response,
12050 	.flush_cache_rx_queue = NULL,
12051 	/* TODO: get API's for dscp-tid need to be added*/
12052 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
12053 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
12054 	.txrx_get_total_per = dp_get_total_per,
12055 	.txrx_stats_request = dp_txrx_stats_request,
12056 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
12057 	.display_stats = dp_txrx_dump_stats,
12058 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
12059 	.txrx_intr_detach = dp_soc_interrupt_detach,
12060 	.set_pn_check = dp_set_pn_check_wifi3,
12061 	.set_key_sec_type = dp_set_key_sec_type_wifi3,
12062 	.update_config_parameters = dp_update_config_parameters,
12063 	/* TODO: Add other functions */
12064 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
12065 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
12066 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
12067 	.get_vdev_dp_ext_txrx_handle = dp_vdev_get_dp_ext_handle,
12068 	.set_vdev_dp_ext_txrx_handle = dp_vdev_set_dp_ext_handle,
12069 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
12070 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
12071 	.map_pdev_to_lmac = dp_soc_map_pdev_to_lmac,
12072 	.handle_mode_change = dp_soc_handle_pdev_mode_change,
12073 	.set_pdev_status_down = dp_soc_set_pdev_status_down,
12074 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
12075 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
12076 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
12077 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
12078 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
12079 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
12080 	.set_soc_param = dp_soc_set_param,
12081 	.txrx_get_os_rx_handles_from_vdev =
12082 					dp_get_os_rx_handles_from_vdev_wifi3,
12083 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
12084 	.get_dp_capabilities = dp_get_cfg_capabilities,
12085 	.txrx_get_cfg = dp_get_cfg,
12086 	.set_rate_stats_ctx = dp_soc_set_rate_stats_ctx,
12087 	.get_rate_stats_ctx = dp_soc_get_rate_stats_ctx,
12088 	.txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats,
12089 	.txrx_flush_rate_stats_request = dp_flush_rate_stats_req,
12090 	.txrx_peer_get_rdkstats_ctx = dp_peer_get_rdkstats_ctx,
12091 
12092 	.set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3,
12093 	.set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3,
12094 
12095 	.txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler,
12096 #ifdef QCA_MULTIPASS_SUPPORT
12097 	.set_vlan_groupkey = dp_set_vlan_groupkey,
12098 #endif
12099 	.get_peer_mac_list = dp_get_peer_mac_list,
12100 #ifdef QCA_SUPPORT_WDS_EXTENDED
12101 	.get_wds_ext_peer_id = dp_wds_ext_get_peer_id,
12102 	.set_wds_ext_peer_rx = dp_wds_ext_set_peer_rx,
12103 #endif /* QCA_SUPPORT_WDS_EXTENDED */
12104 
12105 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
12106 	.txrx_drain = dp_drain_txrx,
12107 #endif
12108 #if defined(FEATURE_RUNTIME_PM)
12109 	.set_rtpm_tput_policy = dp_set_rtpm_tput_policy_requirement,
12110 #endif
12111 #ifdef WLAN_SYSFS_DP_STATS
12112 	.txrx_sysfs_fill_stats = dp_sysfs_fill_stats,
12113 	.txrx_sysfs_set_stat_type = dp_sysfs_set_stat_type,
12114 #endif /* WLAN_SYSFS_DP_STATS */
12115 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
12116 	.set_pkt_capture_mode = dp_set_pkt_capture_mode,
12117 #endif
12118 };
12119 
12120 static struct cdp_ctrl_ops dp_ops_ctrl = {
12121 	.txrx_peer_authorize = dp_peer_authorize,
12122 	.txrx_peer_get_authorize = dp_peer_get_authorize,
12123 #ifdef VDEV_PEER_PROTOCOL_COUNT
12124 	.txrx_enable_peer_protocol_count = dp_enable_vdev_peer_protocol_count,
12125 	.txrx_set_peer_protocol_drop_mask =
12126 		dp_enable_vdev_peer_protocol_drop_mask,
12127 	.txrx_is_peer_protocol_count_enabled =
12128 		dp_is_vdev_peer_protocol_count_enabled,
12129 	.txrx_get_peer_protocol_drop_mask = dp_get_vdev_peer_protocol_drop_mask,
12130 #endif
12131 	.txrx_set_vdev_param = dp_set_vdev_param,
12132 	.txrx_set_psoc_param = dp_set_psoc_param,
12133 	.txrx_get_psoc_param = dp_get_psoc_param,
12134 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
12135 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
12136 	.txrx_get_sec_type = dp_get_sec_type,
12137 	.txrx_wdi_event_sub = dp_wdi_event_sub,
12138 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
12139 	.txrx_set_pdev_param = dp_set_pdev_param,
12140 	.txrx_get_pdev_param = dp_get_pdev_param,
12141 	.txrx_set_peer_param = dp_set_peer_param,
12142 	.txrx_get_peer_param = dp_get_peer_param,
12143 #ifdef VDEV_PEER_PROTOCOL_COUNT
12144 	.txrx_peer_protocol_cnt = dp_peer_stats_update_protocol_cnt,
12145 #endif
12146 #ifdef WLAN_SUPPORT_MSCS
12147 	.txrx_record_mscs_params = dp_record_mscs_params,
12148 #endif
12149 #ifdef WLAN_SUPPORT_SCS
12150 	.txrx_enable_scs_params = dp_enable_scs_params,
12151 	.txrx_record_scs_params = dp_record_scs_params,
12152 #endif
12153 	.set_key = dp_set_michael_key,
12154 	.txrx_get_vdev_param = dp_get_vdev_param,
12155 	.calculate_delay_stats = dp_calculate_delay_stats,
12156 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
12157 	.txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag,
12158 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
12159 	.txrx_dump_pdev_rx_protocol_tag_stats =
12160 				dp_dump_pdev_rx_protocol_tag_stats,
12161 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
12162 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
12163 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
12164 	.txrx_set_rx_flow_tag = dp_set_rx_flow_tag,
12165 	.txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats,
12166 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
12167 #ifdef QCA_MULTIPASS_SUPPORT
12168 	.txrx_peer_set_vlan_id = dp_peer_set_vlan_id,
12169 #endif /*QCA_MULTIPASS_SUPPORT*/
12170 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
12171 	.txrx_set_delta_tsf = dp_set_delta_tsf,
12172 	.txrx_set_tsf_ul_delay_report = dp_set_tsf_ul_delay_report,
12173 	.txrx_get_uplink_delay = dp_get_uplink_delay,
12174 #endif
12175 };
12176 
12177 static struct cdp_me_ops dp_ops_me = {
12178 #ifndef QCA_HOST_MODE_WIFI_DISABLED
12179 #ifdef ATH_SUPPORT_IQUE
12180 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
12181 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
12182 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
12183 #endif
12184 #endif
12185 };
12186 
12187 static struct cdp_host_stats_ops dp_ops_host_stats = {
12188 	.txrx_per_peer_stats = dp_get_host_peer_stats,
12189 	.get_fw_peer_stats = dp_get_fw_peer_stats,
12190 	.get_htt_stats = dp_get_htt_stats,
12191 	.txrx_stats_publish = dp_txrx_stats_publish,
12192 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
12193 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
12194 	.txrx_get_soc_stats = dp_txrx_get_soc_stats,
12195 	.txrx_get_peer_stats_param = dp_txrx_get_peer_stats_param,
12196 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
12197 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
12198 	.txrx_get_ratekbps = dp_txrx_get_ratekbps,
12199 	.txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats,
12200 	.txrx_get_peer_delay_stats = dp_txrx_get_peer_delay_stats,
12201 	.txrx_get_peer_jitter_stats = dp_txrx_get_peer_jitter_stats,
12202 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
12203 	.txrx_alloc_vdev_stats_id = dp_txrx_alloc_vdev_stats_id,
12204 	.txrx_reset_vdev_stats_id = dp_txrx_reset_vdev_stats_id,
12205 #endif
12206 #ifdef WLAN_TX_PKT_CAPTURE_ENH
12207 	.get_peer_tx_capture_stats = dp_peer_get_tx_capture_stats,
12208 	.get_pdev_tx_capture_stats = dp_pdev_get_tx_capture_stats,
12209 #endif /* WLAN_TX_PKT_CAPTURE_ENH */
12210 	/* TODO */
12211 };
12212 
12213 static struct cdp_raw_ops dp_ops_raw = {
12214 	/* TODO */
12215 };
12216 
12217 #ifdef PEER_FLOW_CONTROL
12218 static struct cdp_pflow_ops dp_ops_pflow = {
12219 	dp_tx_flow_ctrl_configure_pdev,
12220 };
12221 #endif /* CONFIG_WIN */
12222 
12223 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
12224 static struct cdp_cfr_ops dp_ops_cfr = {
12225 	.txrx_cfr_filter = NULL,
12226 	.txrx_get_cfr_rcc = dp_get_cfr_rcc,
12227 	.txrx_set_cfr_rcc = dp_set_cfr_rcc,
12228 	.txrx_get_cfr_dbg_stats = dp_get_cfr_dbg_stats,
12229 	.txrx_clear_cfr_dbg_stats = dp_clear_cfr_dbg_stats,
12230 	.txrx_enable_mon_reap_timer = NULL,
12231 };
12232 #endif
12233 
12234 #ifdef WLAN_SUPPORT_MSCS
12235 static struct cdp_mscs_ops dp_ops_mscs = {
12236 	.mscs_peer_lookup_n_get_priority = dp_mscs_peer_lookup_n_get_priority,
12237 };
12238 #endif
12239 
12240 #ifdef WLAN_SUPPORT_MESH_LATENCY
12241 static struct cdp_mesh_latency_ops dp_ops_mesh_latency = {
12242 	.mesh_latency_update_peer_parameter =
12243 		dp_mesh_latency_update_peer_parameter,
12244 };
12245 #endif
12246 
12247 #if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
12248 /**
12249  * dp_flush_ring_hptp() - Update ring shadow
12250  *			  register HP/TP address when runtime
12251  *                        resume
12252  * @opaque_soc: DP soc context
12253  *
12254  * Return: None
12255  */
12256 static
12257 void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
12258 {
12259 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
12260 						 HAL_SRNG_FLUSH_EVENT)) {
12261 		/* Acquire the lock */
12262 		hal_srng_access_start(soc->hal_soc, hal_srng);
12263 
12264 		hal_srng_access_end(soc->hal_soc, hal_srng);
12265 
12266 		hal_srng_set_flush_last_ts(hal_srng);
12267 
12268 		dp_debug("flushed");
12269 	}
12270 }
12271 #endif
12272 
12273 #ifdef DP_TX_TRACKING
12274 
12275 #define DP_TX_COMP_MAX_LATENCY_MS 30000
12276 /**
12277  * dp_tx_comp_delay_check() - calculate time latency for tx completion per pkt
12278  * @timestamp - tx descriptor timestamp
12279  *
12280  * Calculate time latency for tx completion per pkt and trigger self recovery
12281  * when the delay is more than threshold value.
12282  *
12283  * Return: True if delay is more than threshold
12284  */
12285 static bool dp_tx_comp_delay_check(uint64_t timestamp)
12286 {
12287 	uint64_t time_latency, current_time;
12288 
12289 	if (!timestamp)
12290 		return false;
12291 
12292 	if (dp_tx_pkt_tracepoints_enabled()) {
12293 		current_time = qdf_ktime_to_ms(qdf_ktime_real_get());
12294 		time_latency = current_time - timestamp;
12295 		if (time_latency >= DP_TX_COMP_MAX_LATENCY_MS) {
12296 			dp_err_rl("enqueued: %llu ms, current : %llu ms",
12297 				  timestamp, current_time);
12298 			return true;
12299 		}
12300 	} else {
12301 		current_time = qdf_system_ticks();
12302 		time_latency = qdf_system_ticks_to_msecs(current_time -
12303 							 timestamp);
12304 		if (time_latency >= DP_TX_COMP_MAX_LATENCY_MS) {
12305 			dp_err_rl("enqueued: %u ms, current : %u ms",
12306 				  qdf_system_ticks_to_msecs(timestamp),
12307 				  qdf_system_ticks_to_msecs(current_time));
12308 			return true;
12309 		}
12310 	}
12311 
12312 	return false;
12313 }
12314 
12315 /**
12316  * dp_find_missing_tx_comp() - check for leaked descriptor in tx path
12317  * @soc - DP SOC context
12318  *
12319  * Parse through descriptors in all pools and validate magic number and
12320  * completion time. Trigger self recovery if magic value is corrupted.
12321  *
12322  * Return: None.
12323  */
12324 static void dp_find_missing_tx_comp(struct dp_soc *soc)
12325 {
12326 	uint8_t i;
12327 	uint32_t j;
12328 	uint32_t num_desc, page_id, offset;
12329 	uint16_t num_desc_per_page;
12330 	struct dp_tx_desc_s *tx_desc = NULL;
12331 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
12332 
12333 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
12334 		tx_desc_pool = &soc->tx_desc[i];
12335 		if (!(tx_desc_pool->pool_size) ||
12336 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
12337 		    !(tx_desc_pool->desc_pages.cacheable_pages))
12338 			continue;
12339 
12340 		num_desc = tx_desc_pool->pool_size;
12341 		num_desc_per_page =
12342 			tx_desc_pool->desc_pages.num_element_per_page;
12343 		for (j = 0; j < num_desc; j++) {
12344 			page_id = j / num_desc_per_page;
12345 			offset = j % num_desc_per_page;
12346 
12347 			if (qdf_unlikely(!(tx_desc_pool->
12348 					 desc_pages.cacheable_pages)))
12349 				break;
12350 
12351 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
12352 			if (tx_desc->magic == DP_TX_MAGIC_PATTERN_FREE) {
12353 				continue;
12354 			} else if (tx_desc->magic ==
12355 				   DP_TX_MAGIC_PATTERN_INUSE) {
12356 				if (dp_tx_comp_delay_check(
12357 							tx_desc->timestamp)) {
12358 					dp_err_rl("Tx completion not rcvd for id: %u",
12359 						  tx_desc->id);
12360 				}
12361 			} else {
12362 				dp_err_rl("tx desc %u corrupted, flags: 0x%x",
12363 				       tx_desc->id, tx_desc->flags);
12364 			}
12365 		}
12366 	}
12367 }
12368 #else
12369 static inline void dp_find_missing_tx_comp(struct dp_soc *soc)
12370 {
12371 }
12372 #endif
12373 
12374 #ifdef FEATURE_RUNTIME_PM
12375 /**
12376  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
12377  * @soc_hdl: Datapath soc handle
12378  * @pdev_id: id of data path pdev handle
12379  *
12380  * DP is ready to runtime suspend if there are no pending TX packets.
12381  *
12382  * Return: QDF_STATUS
12383  */
12384 static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
12385 {
12386 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12387 	struct dp_pdev *pdev;
12388 	uint8_t i;
12389 	int32_t tx_pending;
12390 
12391 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12392 	if (!pdev) {
12393 		dp_err("pdev is NULL");
12394 		return QDF_STATUS_E_INVAL;
12395 	}
12396 
12397 	/* Abort if there are any pending TX packets */
12398 	tx_pending = dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev));
12399 	if (tx_pending) {
12400 		dp_info_rl("%pK: Abort suspend due to pending TX packets %d",
12401 			   soc, tx_pending);
12402 		dp_find_missing_tx_comp(soc);
12403 		/* perform a force flush if tx is pending */
12404 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
12405 			hal_srng_set_event(soc->tcl_data_ring[i].hal_srng,
12406 					   HAL_SRNG_FLUSH_EVENT);
12407 			dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
12408 		}
12409 		qdf_atomic_set(&soc->tx_pending_rtpm, 0);
12410 
12411 		return QDF_STATUS_E_AGAIN;
12412 	}
12413 
12414 	if (dp_runtime_get_refcount(soc)) {
12415 		dp_init_info("refcount: %d", dp_runtime_get_refcount(soc));
12416 
12417 		return QDF_STATUS_E_AGAIN;
12418 	}
12419 
12420 	if (soc->intr_mode == DP_INTR_POLL)
12421 		qdf_timer_stop(&soc->int_timer);
12422 
12423 	dp_rx_fst_update_pm_suspend_status(soc, true);
12424 
12425 	return QDF_STATUS_SUCCESS;
12426 }
12427 
12428 #define DP_FLUSH_WAIT_CNT 10
12429 #define DP_RUNTIME_SUSPEND_WAIT_MS 10
12430 /**
12431  * dp_runtime_resume() - ensure DP is ready to runtime resume
12432  * @soc_hdl: Datapath soc handle
12433  * @pdev_id: id of data path pdev handle
12434  *
12435  * Resume DP for runtime PM.
12436  *
12437  * Return: QDF_STATUS
12438  */
12439 static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
12440 {
12441 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12442 	int i, suspend_wait = 0;
12443 
12444 	if (soc->intr_mode == DP_INTR_POLL)
12445 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
12446 
12447 	/*
12448 	 * Wait until dp runtime refcount becomes zero or time out, then flush
12449 	 * pending tx for runtime suspend.
12450 	 */
12451 	while (dp_runtime_get_refcount(soc) &&
12452 	       suspend_wait < DP_FLUSH_WAIT_CNT) {
12453 		qdf_sleep(DP_RUNTIME_SUSPEND_WAIT_MS);
12454 		suspend_wait++;
12455 	}
12456 
12457 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
12458 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
12459 	}
12460 	qdf_atomic_set(&soc->tx_pending_rtpm, 0);
12461 
12462 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
12463 	dp_rx_fst_update_pm_suspend_status(soc, false);
12464 
12465 	return QDF_STATUS_SUCCESS;
12466 }
12467 #endif /* FEATURE_RUNTIME_PM */
12468 
12469 /**
12470  * dp_tx_get_success_ack_stats() - get tx success completion count
12471  * @soc_hdl: Datapath soc handle
12472  * @vdevid: vdev identifier
12473  *
12474  * Return: tx success ack count
12475  */
12476 static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl,
12477 					    uint8_t vdev_id)
12478 {
12479 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12480 	struct cdp_vdev_stats *vdev_stats = NULL;
12481 	uint32_t tx_success;
12482 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12483 						     DP_MOD_ID_CDP);
12484 
12485 	if (!vdev) {
12486 		dp_cdp_err("%pK: Invalid vdev id %d", soc, vdev_id);
12487 		return 0;
12488 	}
12489 
12490 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
12491 	if (!vdev_stats) {
12492 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats", soc);
12493 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12494 		return 0;
12495 	}
12496 
12497 	dp_aggregate_vdev_stats(vdev, vdev_stats);
12498 
12499 	tx_success = vdev_stats->tx.tx_success.num;
12500 	qdf_mem_free(vdev_stats);
12501 
12502 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12503 	return tx_success;
12504 }
12505 
12506 #ifdef WLAN_SUPPORT_DATA_STALL
12507 /**
12508  * dp_register_data_stall_detect_cb() - register data stall callback
12509  * @soc_hdl: Datapath soc handle
12510  * @pdev_id: id of data path pdev handle
12511  * @data_stall_detect_callback: data stall callback function
12512  *
12513  * Return: QDF_STATUS Enumeration
12514  */
12515 static
12516 QDF_STATUS dp_register_data_stall_detect_cb(
12517 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12518 			data_stall_detect_cb data_stall_detect_callback)
12519 {
12520 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12521 	struct dp_pdev *pdev;
12522 
12523 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12524 	if (!pdev) {
12525 		dp_err("pdev NULL!");
12526 		return QDF_STATUS_E_INVAL;
12527 	}
12528 
12529 	pdev->data_stall_detect_callback = data_stall_detect_callback;
12530 	return QDF_STATUS_SUCCESS;
12531 }
12532 
12533 /**
12534  * dp_deregister_data_stall_detect_cb() - de-register data stall callback
12535  * @soc_hdl: Datapath soc handle
12536  * @pdev_id: id of data path pdev handle
12537  * @data_stall_detect_callback: data stall callback function
12538  *
12539  * Return: QDF_STATUS Enumeration
12540  */
12541 static
12542 QDF_STATUS dp_deregister_data_stall_detect_cb(
12543 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12544 			data_stall_detect_cb data_stall_detect_callback)
12545 {
12546 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12547 	struct dp_pdev *pdev;
12548 
12549 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12550 	if (!pdev) {
12551 		dp_err("pdev NULL!");
12552 		return QDF_STATUS_E_INVAL;
12553 	}
12554 
12555 	pdev->data_stall_detect_callback = NULL;
12556 	return QDF_STATUS_SUCCESS;
12557 }
12558 
12559 /**
12560  * dp_txrx_post_data_stall_event() - post data stall event
12561  * @soc_hdl: Datapath soc handle
12562  * @indicator: Module triggering data stall
12563  * @data_stall_type: data stall event type
12564  * @pdev_id: pdev id
12565  * @vdev_id_bitmap: vdev id bitmap
12566  * @recovery_type: data stall recovery type
12567  *
12568  * Return: None
12569  */
12570 static void
12571 dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl,
12572 			      enum data_stall_log_event_indicator indicator,
12573 			      enum data_stall_log_event_type data_stall_type,
12574 			      uint32_t pdev_id, uint32_t vdev_id_bitmap,
12575 			      enum data_stall_log_recovery_type recovery_type)
12576 {
12577 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12578 	struct data_stall_event_info data_stall_info;
12579 	struct dp_pdev *pdev;
12580 
12581 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12582 	if (!pdev) {
12583 		dp_err("pdev NULL!");
12584 		return;
12585 	}
12586 
12587 	if (!pdev->data_stall_detect_callback) {
12588 		dp_err("data stall cb not registered!");
12589 		return;
12590 	}
12591 
12592 	dp_info("data_stall_type: %x pdev_id: %d",
12593 		data_stall_type, pdev_id);
12594 
12595 	data_stall_info.indicator = indicator;
12596 	data_stall_info.data_stall_type = data_stall_type;
12597 	data_stall_info.vdev_id_bitmap = vdev_id_bitmap;
12598 	data_stall_info.pdev_id = pdev_id;
12599 	data_stall_info.recovery_type = recovery_type;
12600 
12601 	pdev->data_stall_detect_callback(&data_stall_info);
12602 }
12603 #endif /* WLAN_SUPPORT_DATA_STALL */
12604 
12605 #ifdef WLAN_FEATURE_STATS_EXT
12606 /* rx hw stats event wait timeout in ms */
12607 #define DP_REO_STATUS_STATS_TIMEOUT 1500
12608 /**
12609  * dp_txrx_ext_stats_request - request dp txrx extended stats request
12610  * @soc_hdl: soc handle
12611  * @pdev_id: pdev id
12612  * @req: stats request
12613  *
12614  * Return: QDF_STATUS
12615  */
12616 static QDF_STATUS
12617 dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12618 			  struct cdp_txrx_ext_stats *req)
12619 {
12620 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12621 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12622 	int i = 0;
12623 	int tcl_ring_full = 0;
12624 
12625 	if (!pdev) {
12626 		dp_err("pdev is null");
12627 		return QDF_STATUS_E_INVAL;
12628 	}
12629 
12630 	dp_aggregate_pdev_stats(pdev);
12631 
12632 	for(i = 0 ; i < MAX_TCL_DATA_RINGS; i++)
12633 		tcl_ring_full += soc->stats.tx.tcl_ring_full[i];
12634 
12635 	req->tx_msdu_enqueue = pdev->stats.tx_i.processed.num;
12636 	req->tx_msdu_overflow = tcl_ring_full;
12637 	req->rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
12638 	req->rx_mpdu_delivered = soc->ext_stats.rx_mpdu_received;
12639 	req->rx_mpdu_missed = pdev->stats.err.reo_error;
12640 	/* only count error source from RXDMA */
12641 	req->rx_mpdu_error = pdev->stats.err.rxdma_error;
12642 
12643 	dp_info("ext stats: tx_msdu_enq = %u, tx_msdu_overflow = %u, "
12644 		"tx_mpdu_recieve = %u, rx_mpdu_delivered = %u, "
12645 		"rx_mpdu_missed = %u, rx_mpdu_error = %u",
12646 		req->tx_msdu_enqueue,
12647 		req->tx_msdu_overflow,
12648 		req->rx_mpdu_received,
12649 		req->rx_mpdu_delivered,
12650 		req->rx_mpdu_missed,
12651 		req->rx_mpdu_error);
12652 
12653 	return QDF_STATUS_SUCCESS;
12654 }
12655 
12656 /**
12657  * dp_rx_hw_stats_cb - request rx hw stats response callback
12658  * @soc: soc handle
12659  * @cb_ctxt: callback context
12660  * @reo_status: reo command response status
12661  *
12662  * Return: None
12663  */
12664 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
12665 			      union hal_reo_status *reo_status)
12666 {
12667 	struct dp_req_rx_hw_stats_t *rx_hw_stats = cb_ctxt;
12668 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
12669 	bool is_query_timeout;
12670 
12671 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
12672 	is_query_timeout = rx_hw_stats->is_query_timeout;
12673 	/* free the cb_ctxt if all pending tid stats query is received */
12674 	if (qdf_atomic_dec_and_test(&rx_hw_stats->pending_tid_stats_cnt)) {
12675 		if (!is_query_timeout) {
12676 			qdf_event_set(&soc->rx_hw_stats_event);
12677 			soc->is_last_stats_ctx_init = false;
12678 		}
12679 
12680 		qdf_mem_free(rx_hw_stats);
12681 	}
12682 
12683 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
12684 		dp_info("REO stats failure %d",
12685 			queue_status->header.status);
12686 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
12687 		return;
12688 	}
12689 
12690 	if (!is_query_timeout) {
12691 		soc->ext_stats.rx_mpdu_received +=
12692 					queue_status->mpdu_frms_cnt;
12693 		soc->ext_stats.rx_mpdu_missed +=
12694 					queue_status->hole_cnt;
12695 	}
12696 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
12697 }
12698 
12699 /**
12700  * dp_request_rx_hw_stats - request rx hardware stats
12701  * @soc_hdl: soc handle
12702  * @vdev_id: vdev id
12703  *
12704  * Return: None
12705  */
12706 static QDF_STATUS
12707 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
12708 {
12709 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12710 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12711 						     DP_MOD_ID_CDP);
12712 	struct dp_peer *peer = NULL;
12713 	QDF_STATUS status;
12714 	struct dp_req_rx_hw_stats_t *rx_hw_stats;
12715 	int rx_stats_sent_cnt = 0;
12716 	uint32_t last_rx_mpdu_received;
12717 	uint32_t last_rx_mpdu_missed;
12718 
12719 	if (!vdev) {
12720 		dp_err("vdev is null for vdev_id: %u", vdev_id);
12721 		status = QDF_STATUS_E_INVAL;
12722 		goto out;
12723 	}
12724 
12725 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_CDP);
12726 
12727 	if (!peer) {
12728 		dp_err("Peer is NULL");
12729 		status = QDF_STATUS_E_INVAL;
12730 		goto out;
12731 	}
12732 
12733 	rx_hw_stats = qdf_mem_malloc(sizeof(*rx_hw_stats));
12734 
12735 	if (!rx_hw_stats) {
12736 		dp_err("malloc failed for hw stats structure");
12737 		status = QDF_STATUS_E_INVAL;
12738 		goto out;
12739 	}
12740 
12741 	qdf_event_reset(&soc->rx_hw_stats_event);
12742 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
12743 	/* save the last soc cumulative stats and reset it to 0 */
12744 	last_rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
12745 	last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
12746 	soc->ext_stats.rx_mpdu_received = 0;
12747 
12748 	rx_stats_sent_cnt =
12749 		dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
12750 	if (!rx_stats_sent_cnt) {
12751 		dp_err("no tid stats sent successfully");
12752 		qdf_mem_free(rx_hw_stats);
12753 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
12754 		status = QDF_STATUS_E_INVAL;
12755 		goto out;
12756 	}
12757 	qdf_atomic_set(&rx_hw_stats->pending_tid_stats_cnt,
12758 		       rx_stats_sent_cnt);
12759 	rx_hw_stats->is_query_timeout = false;
12760 	soc->is_last_stats_ctx_init = true;
12761 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
12762 
12763 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
12764 				       DP_REO_STATUS_STATS_TIMEOUT);
12765 
12766 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
12767 	if (status != QDF_STATUS_SUCCESS) {
12768 		dp_info("rx hw stats event timeout");
12769 		if (soc->is_last_stats_ctx_init)
12770 			rx_hw_stats->is_query_timeout = true;
12771 		/**
12772 		 * If query timeout happened, use the last saved stats
12773 		 * for this time query.
12774 		 */
12775 		soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
12776 		soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
12777 	}
12778 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
12779 
12780 out:
12781 	if (peer)
12782 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
12783 	if (vdev)
12784 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12785 
12786 	return status;
12787 }
12788 
12789 /**
12790  * dp_reset_rx_hw_ext_stats - Reset rx hardware ext stats
12791  * @soc_hdl: soc handle
12792  *
12793  * Return: None
12794  */
12795 static
12796 void dp_reset_rx_hw_ext_stats(struct cdp_soc_t *soc_hdl)
12797 {
12798 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12799 
12800 	soc->ext_stats.rx_mpdu_received = 0;
12801 	soc->ext_stats.rx_mpdu_missed = 0;
12802 }
12803 #endif /* WLAN_FEATURE_STATS_EXT */
12804 
12805 static
12806 uint32_t dp_get_tx_rings_grp_bitmap(struct cdp_soc_t *soc_hdl)
12807 {
12808 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12809 
12810 	return soc->wlan_cfg_ctx->tx_rings_grp_bitmap;
12811 }
12812 
12813 #ifdef DP_PEER_EXTENDED_API
12814 static struct cdp_misc_ops dp_ops_misc = {
12815 #ifdef FEATURE_WLAN_TDLS
12816 	.tx_non_std = dp_tx_non_std,
12817 #endif /* FEATURE_WLAN_TDLS */
12818 	.get_opmode = dp_get_opmode,
12819 #ifdef FEATURE_RUNTIME_PM
12820 	.runtime_suspend = dp_runtime_suspend,
12821 	.runtime_resume = dp_runtime_resume,
12822 #endif /* FEATURE_RUNTIME_PM */
12823 	.get_num_rx_contexts = dp_get_num_rx_contexts,
12824 	.get_tx_ack_stats = dp_tx_get_success_ack_stats,
12825 #ifdef WLAN_SUPPORT_DATA_STALL
12826 	.txrx_data_stall_cb_register = dp_register_data_stall_detect_cb,
12827 	.txrx_data_stall_cb_deregister = dp_deregister_data_stall_detect_cb,
12828 	.txrx_post_data_stall_event = dp_txrx_post_data_stall_event,
12829 #endif
12830 
12831 #ifdef WLAN_FEATURE_STATS_EXT
12832 	.txrx_ext_stats_request = dp_txrx_ext_stats_request,
12833 	.request_rx_hw_stats = dp_request_rx_hw_stats,
12834 	.reset_rx_hw_ext_stats = dp_reset_rx_hw_ext_stats,
12835 #endif /* WLAN_FEATURE_STATS_EXT */
12836 	.vdev_inform_ll_conn = dp_vdev_inform_ll_conn,
12837 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
12838 	.set_swlm_enable = dp_soc_set_swlm_enable,
12839 	.is_swlm_enabled = dp_soc_is_swlm_enabled,
12840 #endif
12841 	.display_txrx_hw_info = dp_display_srng_info,
12842 	.get_tx_rings_grp_bitmap = dp_get_tx_rings_grp_bitmap,
12843 };
12844 #endif
12845 
12846 #ifdef DP_FLOW_CTL
12847 static struct cdp_flowctl_ops dp_ops_flowctl = {
12848 	/* WIFI 3.0 DP implement as required. */
12849 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
12850 	.flow_pool_map_handler = dp_tx_flow_pool_map,
12851 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
12852 	.register_pause_cb = dp_txrx_register_pause_cb,
12853 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
12854 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
12855 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
12856 };
12857 
12858 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
12859 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
12860 };
12861 #endif
12862 
12863 #ifdef IPA_OFFLOAD
12864 static struct cdp_ipa_ops dp_ops_ipa = {
12865 	.ipa_get_resource = dp_ipa_get_resource,
12866 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
12867 	.ipa_iounmap_doorbell_vaddr = dp_ipa_iounmap_doorbell_vaddr,
12868 	.ipa_op_response = dp_ipa_op_response,
12869 	.ipa_register_op_cb = dp_ipa_register_op_cb,
12870 	.ipa_deregister_op_cb = dp_ipa_deregister_op_cb,
12871 	.ipa_get_stat = dp_ipa_get_stat,
12872 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
12873 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
12874 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
12875 	.ipa_setup = dp_ipa_setup,
12876 	.ipa_cleanup = dp_ipa_cleanup,
12877 	.ipa_setup_iface = dp_ipa_setup_iface,
12878 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
12879 	.ipa_enable_pipes = dp_ipa_enable_pipes,
12880 	.ipa_disable_pipes = dp_ipa_disable_pipes,
12881 	.ipa_set_perf_level = dp_ipa_set_perf_level,
12882 	.ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd,
12883 	.ipa_tx_buf_smmu_mapping = dp_ipa_tx_buf_smmu_mapping,
12884 	.ipa_tx_buf_smmu_unmapping = dp_ipa_tx_buf_smmu_unmapping
12885 };
12886 #endif
12887 
12888 #ifdef DP_POWER_SAVE
12889 static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
12890 {
12891 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12892 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12893 	int timeout = SUSPEND_DRAIN_WAIT;
12894 	int drain_wait_delay = 50; /* 50 ms */
12895 	int32_t tx_pending;
12896 
12897 	if (qdf_unlikely(!pdev)) {
12898 		dp_err("pdev is NULL");
12899 		return QDF_STATUS_E_INVAL;
12900 	}
12901 
12902 	/* Abort if there are any pending TX packets */
12903 	while ((tx_pending = dp_get_tx_pending((struct cdp_pdev *)pdev))) {
12904 		qdf_sleep(drain_wait_delay);
12905 		if (timeout <= 0) {
12906 			dp_info("TX frames are pending %d, abort suspend",
12907 				tx_pending);
12908 			dp_find_missing_tx_comp(soc);
12909 			return QDF_STATUS_E_TIMEOUT;
12910 		}
12911 		timeout = timeout - drain_wait_delay;
12912 	}
12913 
12914 	if (soc->intr_mode == DP_INTR_POLL)
12915 		qdf_timer_stop(&soc->int_timer);
12916 
12917 	/* Stop monitor reap timer and reap any pending frames in ring */
12918 	dp_monitor_pktlog_reap_pending_frames(pdev);
12919 
12920 	dp_suspend_fse_cache_flush(soc);
12921 
12922 	return QDF_STATUS_SUCCESS;
12923 }
12924 
12925 static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
12926 {
12927 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12928 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12929 	uint8_t i;
12930 
12931 	if (qdf_unlikely(!pdev)) {
12932 		dp_err("pdev is NULL");
12933 		return QDF_STATUS_E_INVAL;
12934 	}
12935 
12936 	if (soc->intr_mode == DP_INTR_POLL)
12937 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
12938 
12939 	/* Start monitor reap timer */
12940 	dp_monitor_pktlog_start_reap_timer(pdev);
12941 
12942 	dp_resume_fse_cache_flush(soc);
12943 
12944 	for (i = 0; i < soc->num_tcl_data_rings; i++)
12945 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
12946 
12947 	return QDF_STATUS_SUCCESS;
12948 }
12949 
12950 /**
12951  * dp_process_wow_ack_rsp() - process wow ack response
12952  * @soc_hdl: datapath soc handle
12953  * @pdev_id: data path pdev handle id
12954  *
12955  * Return: none
12956  */
12957 static void dp_process_wow_ack_rsp(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
12958 {
12959 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12960 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12961 
12962 	if (qdf_unlikely(!pdev)) {
12963 		dp_err("pdev is NULL");
12964 		return;
12965 	}
12966 
12967 	/*
12968 	 * As part of wow enable FW disables the mon status ring and in wow ack
12969 	 * response from FW reap mon status ring to make sure no packets pending
12970 	 * in the ring.
12971 	 */
12972 	dp_monitor_pktlog_reap_pending_frames(pdev);
12973 }
12974 
12975 /**
12976  * dp_process_target_suspend_req() - process target suspend request
12977  * @soc_hdl: datapath soc handle
12978  * @pdev_id: data path pdev handle id
12979  *
12980  * Return: none
12981  */
12982 static void dp_process_target_suspend_req(struct cdp_soc_t *soc_hdl,
12983 					  uint8_t pdev_id)
12984 {
12985 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12986 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12987 
12988 	if (qdf_unlikely(!pdev)) {
12989 		dp_err("pdev is NULL");
12990 		return;
12991 	}
12992 
12993 	/* Stop monitor reap timer and reap any pending frames in ring */
12994 	dp_monitor_pktlog_reap_pending_frames(pdev);
12995 }
12996 
12997 static struct cdp_bus_ops dp_ops_bus = {
12998 	.bus_suspend = dp_bus_suspend,
12999 	.bus_resume = dp_bus_resume,
13000 	.process_wow_ack_rsp = dp_process_wow_ack_rsp,
13001 	.process_target_suspend_req = dp_process_target_suspend_req
13002 };
13003 #endif
13004 
13005 #ifdef DP_FLOW_CTL
13006 static struct cdp_throttle_ops dp_ops_throttle = {
13007 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
13008 };
13009 
13010 static struct cdp_cfg_ops dp_ops_cfg = {
13011 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
13012 };
13013 #endif
13014 
13015 #ifdef DP_PEER_EXTENDED_API
13016 static struct cdp_ocb_ops dp_ops_ocb = {
13017 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
13018 };
13019 
13020 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
13021 	.clear_stats = dp_txrx_clear_dump_stats,
13022 };
13023 
13024 static struct cdp_peer_ops dp_ops_peer = {
13025 	.register_peer = dp_register_peer,
13026 	.clear_peer = dp_clear_peer,
13027 	.find_peer_exist = dp_find_peer_exist,
13028 	.find_peer_exist_on_vdev = dp_find_peer_exist_on_vdev,
13029 	.find_peer_exist_on_other_vdev = dp_find_peer_exist_on_other_vdev,
13030 	.peer_state_update = dp_peer_state_update,
13031 	.get_vdevid = dp_get_vdevid,
13032 	.get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr,
13033 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
13034 	.get_peer_state = dp_get_peer_state,
13035 	.peer_flush_frags = dp_peer_flush_frags,
13036 };
13037 #endif
13038 
13039 static void dp_soc_txrx_ops_attach(struct dp_soc *soc)
13040 {
13041 	soc->cdp_soc.ops->cmn_drv_ops = &dp_ops_cmn;
13042 	soc->cdp_soc.ops->ctrl_ops = &dp_ops_ctrl;
13043 	soc->cdp_soc.ops->me_ops = &dp_ops_me;
13044 	soc->cdp_soc.ops->host_stats_ops = &dp_ops_host_stats;
13045 	soc->cdp_soc.ops->wds_ops = &dp_ops_wds;
13046 	soc->cdp_soc.ops->raw_ops = &dp_ops_raw;
13047 #ifdef PEER_FLOW_CONTROL
13048 	soc->cdp_soc.ops->pflow_ops = &dp_ops_pflow;
13049 #endif /* PEER_FLOW_CONTROL */
13050 #ifdef DP_PEER_EXTENDED_API
13051 	soc->cdp_soc.ops->misc_ops = &dp_ops_misc;
13052 	soc->cdp_soc.ops->ocb_ops = &dp_ops_ocb;
13053 	soc->cdp_soc.ops->peer_ops = &dp_ops_peer;
13054 	soc->cdp_soc.ops->mob_stats_ops = &dp_ops_mob_stats;
13055 #endif
13056 #ifdef DP_FLOW_CTL
13057 	soc->cdp_soc.ops->cfg_ops = &dp_ops_cfg;
13058 	soc->cdp_soc.ops->flowctl_ops = &dp_ops_flowctl;
13059 	soc->cdp_soc.ops->l_flowctl_ops = &dp_ops_l_flowctl;
13060 	soc->cdp_soc.ops->throttle_ops = &dp_ops_throttle;
13061 #endif
13062 #ifdef IPA_OFFLOAD
13063 	soc->cdp_soc.ops->ipa_ops = &dp_ops_ipa;
13064 #endif
13065 #ifdef DP_POWER_SAVE
13066 	soc->cdp_soc.ops->bus_ops = &dp_ops_bus;
13067 #endif
13068 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
13069 	soc->cdp_soc.ops->cfr_ops = &dp_ops_cfr;
13070 #endif
13071 #ifdef WLAN_SUPPORT_MSCS
13072 	soc->cdp_soc.ops->mscs_ops = &dp_ops_mscs;
13073 #endif
13074 #ifdef WLAN_SUPPORT_MESH_LATENCY
13075 	soc->cdp_soc.ops->mesh_latency_ops = &dp_ops_mesh_latency;
13076 #endif
13077 };
13078 
13079 /*
13080  * dp_soc_set_txrx_ring_map()
13081  * @dp_soc: DP handler for soc
13082  *
13083  * Return: Void
13084  */
13085 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
13086 {
13087 	uint32_t i;
13088 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
13089 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
13090 	}
13091 }
13092 
13093 qdf_export_symbol(dp_soc_set_txrx_ring_map);
13094 
13095 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
13096 	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574)
13097 /**
13098  * dp_soc_attach_wifi3() - Attach txrx SOC
13099  * @ctrl_psoc: Opaque SOC handle from control plane
13100  * @params: SOC attach params
13101  *
13102  * Return: DP SOC handle on success, NULL on failure
13103  */
13104 struct cdp_soc_t *
13105 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
13106 		    struct cdp_soc_attach_params *params)
13107 {
13108 	struct dp_soc *dp_soc = NULL;
13109 
13110 	dp_soc = dp_soc_attach(ctrl_psoc, params);
13111 
13112 	return dp_soc_to_cdp_soc_t(dp_soc);
13113 }
13114 
13115 static inline void dp_soc_set_def_pdev(struct dp_soc *soc)
13116 {
13117 	int lmac_id;
13118 
13119 	for (lmac_id = 0; lmac_id < MAX_NUM_LMAC_HW; lmac_id++) {
13120 		/*Set default host PDEV ID for lmac_id*/
13121 		wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
13122 				      INVALID_PDEV_ID, lmac_id);
13123 	}
13124 }
13125 
13126 static uint32_t
13127 dp_get_link_desc_id_start(uint16_t arch_id)
13128 {
13129 	switch (arch_id) {
13130 	case CDP_ARCH_TYPE_LI:
13131 		return LINK_DESC_ID_START_21_BITS_COOKIE;
13132 	case CDP_ARCH_TYPE_BE:
13133 		return LINK_DESC_ID_START_20_BITS_COOKIE;
13134 	default:
13135 		dp_err("unkonwn arch_id 0x%x", arch_id);
13136 		QDF_BUG(0);
13137 		return LINK_DESC_ID_START_21_BITS_COOKIE;
13138 	}
13139 }
13140 
13141 /**
13142  * dp_soc_attach() - Attach txrx SOC
13143  * @ctrl_psoc: Opaque SOC handle from control plane
13144  * @params: SOC attach params
13145  *
13146  * Return: DP SOC handle on success, NULL on failure
13147  */
13148 static struct dp_soc *
13149 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
13150 	      struct cdp_soc_attach_params *params)
13151 {
13152 	int int_ctx;
13153 	struct dp_soc *soc =  NULL;
13154 	uint16_t arch_id;
13155 	struct hif_opaque_softc *hif_handle = params->hif_handle;
13156 	qdf_device_t qdf_osdev = params->qdf_osdev;
13157 	struct ol_if_ops *ol_ops = params->ol_ops;
13158 	uint16_t device_id = params->device_id;
13159 
13160 	if (!hif_handle) {
13161 		dp_err("HIF handle is NULL");
13162 		goto fail0;
13163 	}
13164 	arch_id = cdp_get_arch_type_from_devid(device_id);
13165 	soc = qdf_mem_malloc(dp_get_soc_context_size(device_id));
13166 	if (!soc) {
13167 		dp_err("DP SOC memory allocation failed");
13168 		goto fail0;
13169 	}
13170 
13171 	dp_info("soc memory allocated %pK", soc);
13172 	soc->hif_handle = hif_handle;
13173 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
13174 	if (!soc->hal_soc)
13175 		goto fail1;
13176 
13177 	hif_get_cmem_info(soc->hif_handle,
13178 			  &soc->cmem_base,
13179 			  &soc->cmem_size);
13180 	int_ctx = 0;
13181 	soc->device_id = device_id;
13182 	soc->cdp_soc.ops =
13183 		(struct cdp_ops *)qdf_mem_malloc(sizeof(struct cdp_ops));
13184 	if (!soc->cdp_soc.ops)
13185 		goto fail1;
13186 
13187 	dp_soc_txrx_ops_attach(soc);
13188 	soc->cdp_soc.ol_ops = ol_ops;
13189 	soc->ctrl_psoc = ctrl_psoc;
13190 	soc->osdev = qdf_osdev;
13191 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
13192 	hal_rx_get_tlv_size(soc->hal_soc, &soc->rx_pkt_tlv_size,
13193 			    &soc->rx_mon_pkt_tlv_size);
13194 	soc->idle_link_bm_id = hal_get_idle_link_bm_id(soc->hal_soc,
13195 						       params->mlo_chip_id);
13196 	soc->features.dmac_cmn_src_rxbuf_ring_enabled =
13197 		hal_dmac_cmn_src_rxbuf_ring_get(soc->hal_soc);
13198 	soc->arch_id = arch_id;
13199 	soc->link_desc_id_start =
13200 			dp_get_link_desc_id_start(soc->arch_id);
13201 	dp_configure_arch_ops(soc);
13202 
13203 	/* Reset wbm sg list and flags */
13204 	dp_rx_wbm_sg_list_reset(soc);
13205 
13206 	dp_soc_tx_hw_desc_history_attach(soc);
13207 	dp_soc_rx_history_attach(soc);
13208 	dp_soc_tx_history_attach(soc);
13209 	wlan_set_srng_cfg(&soc->wlan_srng_cfg);
13210 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
13211 	if (!soc->wlan_cfg_ctx) {
13212 		dp_err("wlan_cfg_ctx failed\n");
13213 		goto fail2;
13214 	}
13215 	dp_soc_cfg_attach(soc);
13216 
13217 	if (dp_hw_link_desc_pool_banks_alloc(soc, WLAN_INVALID_PDEV_ID)) {
13218 		dp_err("failed to allocate link desc pool banks");
13219 		goto fail3;
13220 	}
13221 
13222 	if (dp_hw_link_desc_ring_alloc(soc)) {
13223 		dp_err("failed to allocate link_desc_ring");
13224 		goto fail4;
13225 	}
13226 
13227 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_attach(soc,
13228 								 params))) {
13229 		dp_err("unable to do target specific attach");
13230 		goto fail5;
13231 	}
13232 
13233 	if (dp_soc_srng_alloc(soc)) {
13234 		dp_err("failed to allocate soc srng rings");
13235 		goto fail6;
13236 	}
13237 
13238 	if (dp_soc_tx_desc_sw_pools_alloc(soc)) {
13239 		dp_err("dp_soc_tx_desc_sw_pools_alloc failed");
13240 		goto fail7;
13241 	}
13242 
13243 	if (!dp_monitor_modularized_enable()) {
13244 		if (dp_mon_soc_attach_wrapper(soc)) {
13245 			dp_err("failed to attach monitor");
13246 			goto fail8;
13247 		}
13248 	}
13249 
13250 	if (dp_sysfs_initialize_stats(soc) != QDF_STATUS_SUCCESS) {
13251 		dp_err("failed to initialize dp stats sysfs file");
13252 		dp_sysfs_deinitialize_stats(soc);
13253 	}
13254 
13255 	dp_soc_swlm_attach(soc);
13256 	dp_soc_set_interrupt_mode(soc);
13257 	dp_soc_set_def_pdev(soc);
13258 
13259 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
13260 		qdf_dma_mem_stats_read(),
13261 		qdf_heap_mem_stats_read(),
13262 		qdf_skb_total_mem_stats_read());
13263 
13264 	return soc;
13265 fail8:
13266 	dp_soc_tx_desc_sw_pools_free(soc);
13267 fail7:
13268 	dp_soc_srng_free(soc);
13269 fail6:
13270 	soc->arch_ops.txrx_soc_detach(soc);
13271 fail5:
13272 	dp_hw_link_desc_ring_free(soc);
13273 fail4:
13274 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
13275 fail3:
13276 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
13277 fail2:
13278 	qdf_mem_free(soc->cdp_soc.ops);
13279 fail1:
13280 	qdf_mem_free(soc);
13281 fail0:
13282 	return NULL;
13283 }
13284 
13285 /**
13286  * dp_soc_init() - Initialize txrx SOC
13287  * @dp_soc: Opaque DP SOC handle
13288  * @htc_handle: Opaque HTC handle
13289  * @hif_handle: Opaque HIF handle
13290  *
13291  * Return: DP SOC handle on success, NULL on failure
13292  */
13293 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
13294 		  struct hif_opaque_softc *hif_handle)
13295 {
13296 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
13297 	bool is_monitor_mode = false;
13298 	struct hal_reo_params reo_params;
13299 	uint8_t i;
13300 	int num_dp_msi;
13301 	struct dp_mon_ops *mon_ops;
13302 
13303 	wlan_minidump_log(soc, sizeof(*soc), soc->ctrl_psoc,
13304 			  WLAN_MD_DP_SOC, "dp_soc");
13305 
13306 	soc->hif_handle = hif_handle;
13307 
13308 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
13309 	if (!soc->hal_soc)
13310 		goto fail0;
13311 
13312 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_init(soc))) {
13313 		dp_err("unable to do target specific init");
13314 		goto fail0;
13315 	}
13316 
13317 	htt_soc = htt_soc_attach(soc, htc_handle);
13318 	if (!htt_soc)
13319 		goto fail1;
13320 
13321 	soc->htt_handle = htt_soc;
13322 
13323 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
13324 		goto fail2;
13325 
13326 	htt_set_htc_handle(htt_soc, htc_handle);
13327 
13328 	dp_soc_cfg_init(soc);
13329 
13330 	dp_monitor_soc_cfg_init(soc);
13331 	/* Reset/Initialize wbm sg list and flags */
13332 	dp_rx_wbm_sg_list_reset(soc);
13333 
13334 	/* Note: Any SRNG ring initialization should happen only after
13335 	 * Interrupt mode is set and followed by filling up the
13336 	 * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER.
13337 	 */
13338 	dp_soc_set_interrupt_mode(soc);
13339 	if (soc->cdp_soc.ol_ops->get_con_mode &&
13340 	    soc->cdp_soc.ol_ops->get_con_mode() ==
13341 	    QDF_GLOBAL_MONITOR_MODE)
13342 		is_monitor_mode = true;
13343 
13344 	num_dp_msi = dp_get_num_msi_available(soc, soc->intr_mode);
13345 	if (num_dp_msi < 0) {
13346 		dp_init_err("%pK: dp_interrupt assignment failed", soc);
13347 		goto fail3;
13348 	}
13349 
13350 	wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, num_dp_msi,
13351 				     soc->intr_mode, is_monitor_mode);
13352 
13353 	/* initialize WBM_IDLE_LINK ring */
13354 	if (dp_hw_link_desc_ring_init(soc)) {
13355 		dp_init_err("%pK: dp_hw_link_desc_ring_init failed", soc);
13356 		goto fail3;
13357 	}
13358 
13359 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
13360 
13361 	if (dp_soc_srng_init(soc)) {
13362 		dp_init_err("%pK: dp_soc_srng_init failed", soc);
13363 		goto fail4;
13364 	}
13365 
13366 	if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
13367 			       htt_get_htc_handle(htt_soc),
13368 			       soc->hal_soc, soc->osdev) == NULL)
13369 		goto fail5;
13370 
13371 	/* Initialize descriptors in TCL Rings */
13372 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
13373 		hal_tx_init_data_ring(soc->hal_soc,
13374 				      soc->tcl_data_ring[i].hal_srng);
13375 	}
13376 
13377 	if (dp_soc_tx_desc_sw_pools_init(soc)) {
13378 		dp_init_err("%pK: dp_tx_soc_attach failed", soc);
13379 		goto fail6;
13380 	}
13381 
13382 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
13383 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
13384 	soc->cce_disable = false;
13385 	soc->max_ast_ageout_count = MAX_AST_AGEOUT_COUNT;
13386 
13387 	soc->sta_mode_search_policy = DP_TX_ADDR_SEARCH_ADDR_POLICY;
13388 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
13389 	qdf_spinlock_create(&soc->vdev_map_lock);
13390 	qdf_atomic_init(&soc->num_tx_outstanding);
13391 	qdf_atomic_init(&soc->num_tx_exception);
13392 	soc->num_tx_allowed =
13393 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
13394 
13395 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
13396 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
13397 				CDP_CFG_MAX_PEER_ID);
13398 
13399 		if (ret != -EINVAL)
13400 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
13401 
13402 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
13403 				CDP_CFG_CCE_DISABLE);
13404 		if (ret == 1)
13405 			soc->cce_disable = true;
13406 	}
13407 
13408 	/*
13409 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
13410 	 * and IPQ5018 WMAC2 is not there in these platforms.
13411 	 */
13412 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018 ||
13413 	    soc->disable_mac2_intr)
13414 		dp_soc_disable_unused_mac_intr_mask(soc, 0x2);
13415 
13416 	/*
13417 	 * Skip registering hw ring interrupts for WMAC1 on IPQ5018
13418 	 * WMAC1 is not there in this platform.
13419 	 */
13420 	if (soc->disable_mac1_intr)
13421 		dp_soc_disable_unused_mac_intr_mask(soc, 0x1);
13422 
13423 	/* Setup HW REO */
13424 	qdf_mem_zero(&reo_params, sizeof(reo_params));
13425 
13426 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
13427 		/*
13428 		 * Reo ring remap is not required if both radios
13429 		 * are offloaded to NSS
13430 		 */
13431 		if (soc->arch_ops.reo_remap_config(soc, &reo_params.remap0,
13432 						   &reo_params.remap1,
13433 						   &reo_params.remap2))
13434 			reo_params.rx_hash_enabled = true;
13435 		else
13436 			reo_params.rx_hash_enabled = false;
13437 	}
13438 
13439 	/* setup the global rx defrag waitlist */
13440 	TAILQ_INIT(&soc->rx.defrag.waitlist);
13441 	soc->rx.defrag.timeout_ms =
13442 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
13443 	soc->rx.defrag.next_flush_ms = 0;
13444 	soc->rx.flags.defrag_timeout_check =
13445 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
13446 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
13447 
13448 	/*
13449 	 * set the fragment destination ring
13450 	 */
13451 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
13452 
13453 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
13454 		reo_params.alt_dst_ind_0 = REO_REMAP_RELEASE;
13455 
13456 	hal_reo_setup(soc->hal_soc, &reo_params);
13457 
13458 	hal_reo_set_err_dst_remap(soc->hal_soc);
13459 
13460 	soc->features.pn_in_reo_dest = hal_reo_enable_pn_in_dest(soc->hal_soc);
13461 
13462 	mon_ops = dp_mon_ops_get(soc);
13463 	if (mon_ops && mon_ops->mon_soc_init)
13464 		mon_ops->mon_soc_init(soc);
13465 
13466 	qdf_atomic_set(&soc->cmn_init_done, 1);
13467 
13468 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
13469 
13470 	qdf_spinlock_create(&soc->ast_lock);
13471 	dp_peer_mec_spinlock_create(soc);
13472 
13473 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
13474 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
13475 	INIT_RX_HW_STATS_LOCK(soc);
13476 
13477 	qdf_nbuf_queue_init(&soc->invalid_buf_queue);
13478 	/* fill the tx/rx cpu ring map*/
13479 	dp_soc_set_txrx_ring_map(soc);
13480 
13481 	TAILQ_INIT(&soc->inactive_peer_list);
13482 	qdf_spinlock_create(&soc->inactive_peer_list_lock);
13483 	TAILQ_INIT(&soc->inactive_vdev_list);
13484 	qdf_spinlock_create(&soc->inactive_vdev_list_lock);
13485 	qdf_spinlock_create(&soc->htt_stats.lock);
13486 	/* initialize work queue for stats processing */
13487 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
13488 
13489 	dp_reo_desc_deferred_freelist_create(soc);
13490 
13491 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
13492 		qdf_dma_mem_stats_read(),
13493 		qdf_heap_mem_stats_read(),
13494 		qdf_skb_total_mem_stats_read());
13495 
13496 	soc->vdev_stats_id_map = 0;
13497 
13498 	return soc;
13499 fail6:
13500 	htt_soc_htc_dealloc(soc->htt_handle);
13501 fail5:
13502 	dp_soc_srng_deinit(soc);
13503 fail4:
13504 	dp_hw_link_desc_ring_deinit(soc);
13505 fail3:
13506 	htt_htc_pkt_pool_free(htt_soc);
13507 fail2:
13508 	htt_soc_detach(htt_soc);
13509 fail1:
13510 	soc->arch_ops.txrx_soc_deinit(soc);
13511 fail0:
13512 	return NULL;
13513 }
13514 
13515 /**
13516  * dp_soc_init_wifi3() - Initialize txrx SOC
13517  * @soc: Opaque DP SOC handle
13518  * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
13519  * @hif_handle: Opaque HIF handle
13520  * @htc_handle: Opaque HTC handle
13521  * @qdf_osdev: QDF device (Unused)
13522  * @ol_ops: Offload Operations (Unused)
13523  * @device_id: Device ID (Unused)
13524  *
13525  * Return: DP SOC handle on success, NULL on failure
13526  */
13527 void *dp_soc_init_wifi3(struct cdp_soc_t *soc,
13528 			struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
13529 			struct hif_opaque_softc *hif_handle,
13530 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
13531 			struct ol_if_ops *ol_ops, uint16_t device_id)
13532 {
13533 	return dp_soc_init((struct dp_soc *)soc, htc_handle, hif_handle);
13534 }
13535 
13536 #endif
13537 
13538 /*
13539  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
13540  *
13541  * @soc: handle to DP soc
13542  * @mac_id: MAC id
13543  *
13544  * Return: Return pdev corresponding to MAC
13545  */
13546 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
13547 {
13548 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
13549 		return (mac_id < MAX_PDEV_CNT) ? soc->pdev_list[mac_id] : NULL;
13550 
13551 	/* Typically for MCL as there only 1 PDEV*/
13552 	return soc->pdev_list[0];
13553 }
13554 
13555 /*
13556  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
13557  * @soc:		DP SoC context
13558  * @max_mac_rings:	No of MAC rings
13559  *
13560  * Return: None
13561  */
13562 void dp_is_hw_dbs_enable(struct dp_soc *soc,
13563 				int *max_mac_rings)
13564 {
13565 	bool dbs_enable = false;
13566 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
13567 		dbs_enable = soc->cdp_soc.ol_ops->
13568 		is_hw_dbs_2x2_capable((void *)soc->ctrl_psoc);
13569 
13570 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
13571 }
13572 
13573 qdf_export_symbol(dp_is_hw_dbs_enable);
13574 
13575 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
13576 /**
13577  * dp_get_cfr_rcc() - get cfr rcc config
13578  * @soc_hdl: Datapath soc handle
13579  * @pdev_id: id of objmgr pdev
13580  *
13581  * Return: true/false based on cfr mode setting
13582  */
13583 static
13584 bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
13585 {
13586 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13587 	struct dp_pdev *pdev = NULL;
13588 
13589 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13590 	if (!pdev) {
13591 		dp_err("pdev is NULL");
13592 		return false;
13593 	}
13594 
13595 	return pdev->cfr_rcc_mode;
13596 }
13597 
13598 /**
13599  * dp_set_cfr_rcc() - enable/disable cfr rcc config
13600  * @soc_hdl: Datapath soc handle
13601  * @pdev_id: id of objmgr pdev
13602  * @enable: Enable/Disable cfr rcc mode
13603  *
13604  * Return: none
13605  */
13606 static
13607 void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, bool enable)
13608 {
13609 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13610 	struct dp_pdev *pdev = NULL;
13611 
13612 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13613 	if (!pdev) {
13614 		dp_err("pdev is NULL");
13615 		return;
13616 	}
13617 
13618 	pdev->cfr_rcc_mode = enable;
13619 }
13620 
13621 /*
13622  * dp_get_cfr_dbg_stats - Get the debug statistics for CFR
13623  * @soc_hdl: Datapath soc handle
13624  * @pdev_id: id of data path pdev handle
13625  * @cfr_rcc_stats: CFR RCC debug statistics buffer
13626  *
13627  * Return: none
13628  */
13629 static inline void
13630 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
13631 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats)
13632 {
13633 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13634 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13635 
13636 	if (!pdev) {
13637 		dp_err("Invalid pdev");
13638 		return;
13639 	}
13640 
13641 	qdf_mem_copy(cfr_rcc_stats, &pdev->stats.rcc,
13642 		     sizeof(struct cdp_cfr_rcc_stats));
13643 }
13644 
13645 /*
13646  * dp_clear_cfr_dbg_stats - Clear debug statistics for CFR
13647  * @soc_hdl: Datapath soc handle
13648  * @pdev_id: id of data path pdev handle
13649  *
13650  * Return: none
13651  */
13652 static void dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl,
13653 				   uint8_t pdev_id)
13654 {
13655 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13656 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13657 
13658 	if (!pdev) {
13659 		dp_err("dp pdev is NULL");
13660 		return;
13661 	}
13662 
13663 	qdf_mem_zero(&pdev->stats.rcc, sizeof(pdev->stats.rcc));
13664 }
13665 #endif
13666 
13667 /**
13668  * dp_bucket_index() - Return index from array
13669  *
13670  * @delay: delay measured
13671  * @array: array used to index corresponding delay
13672  *
13673  * Return: index
13674  */
13675 static uint8_t dp_bucket_index(uint32_t delay, uint16_t *array)
13676 {
13677 	uint8_t i = CDP_DELAY_BUCKET_0;
13678 
13679 	for (; i < CDP_DELAY_BUCKET_MAX - 1; i++) {
13680 		if (delay >= array[i] && delay <= array[i + 1])
13681 			return i;
13682 	}
13683 
13684 	return (CDP_DELAY_BUCKET_MAX - 1);
13685 }
13686 
13687 /**
13688  * dp_fill_delay_buckets() - Fill delay statistics bucket for each
13689  *				type of delay
13690  *
13691  * @pdev: pdev handle
13692  * @delay: delay in ms
13693  * @tid: tid value
13694  * @mode: type of tx delay mode
13695  * @ring_id: ring number
13696  * Return: pointer to cdp_delay_stats structure
13697  */
13698 static struct cdp_delay_stats *
13699 dp_fill_delay_buckets(struct dp_pdev *pdev, uint32_t delay,
13700 		      uint8_t tid, uint8_t mode, uint8_t ring_id)
13701 {
13702 	uint8_t delay_index = 0;
13703 	struct cdp_tid_tx_stats *tstats =
13704 		&pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
13705 	struct cdp_tid_rx_stats *rstats =
13706 		&pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
13707 	/*
13708 	 * cdp_fw_to_hw_delay_range
13709 	 * Fw to hw delay ranges in milliseconds
13710 	 */
13711 	uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
13712 		0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
13713 
13714 	/*
13715 	 * cdp_sw_enq_delay_range
13716 	 * Software enqueue delay ranges in milliseconds
13717 	 */
13718 	uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
13719 		0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
13720 
13721 	/*
13722 	 * cdp_intfrm_delay_range
13723 	 * Interframe delay ranges in milliseconds
13724 	 */
13725 	uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
13726 		0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
13727 
13728 	/*
13729 	 * Update delay stats in proper bucket
13730 	 */
13731 	switch (mode) {
13732 	/* Software Enqueue delay ranges */
13733 	case CDP_DELAY_STATS_SW_ENQ:
13734 
13735 		delay_index = dp_bucket_index(delay, cdp_sw_enq_delay);
13736 		tstats->swq_delay.delay_bucket[delay_index]++;
13737 		return &tstats->swq_delay;
13738 
13739 	/* Tx Completion delay ranges */
13740 	case CDP_DELAY_STATS_FW_HW_TRANSMIT:
13741 
13742 		delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay);
13743 		tstats->hwtx_delay.delay_bucket[delay_index]++;
13744 		return &tstats->hwtx_delay;
13745 
13746 	/* Interframe tx delay ranges */
13747 	case CDP_DELAY_STATS_TX_INTERFRAME:
13748 
13749 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
13750 		tstats->intfrm_delay.delay_bucket[delay_index]++;
13751 		return &tstats->intfrm_delay;
13752 
13753 	/* Interframe rx delay ranges */
13754 	case CDP_DELAY_STATS_RX_INTERFRAME:
13755 
13756 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
13757 		rstats->intfrm_delay.delay_bucket[delay_index]++;
13758 		return &rstats->intfrm_delay;
13759 
13760 	/* Ring reap to indication to network stack */
13761 	case CDP_DELAY_STATS_REAP_STACK:
13762 
13763 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
13764 		rstats->to_stack_delay.delay_bucket[delay_index]++;
13765 		return &rstats->to_stack_delay;
13766 	default:
13767 		dp_debug("Incorrect delay mode: %d", mode);
13768 	}
13769 
13770 	return NULL;
13771 }
13772 
13773 /**
13774  * dp_update_delay_stats() - Update delay statistics in structure
13775  *				and fill min, max and avg delay
13776  *
13777  * @pdev: pdev handle
13778  * @delay: delay in ms
13779  * @tid: tid value
13780  * @mode: type of tx delay mode
13781  * @ring id: ring number
13782  * Return: none
13783  */
13784 void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay,
13785 			   uint8_t tid, uint8_t mode, uint8_t ring_id)
13786 {
13787 	struct cdp_delay_stats *dstats = NULL;
13788 
13789 	/*
13790 	 * Delay ranges are different for different delay modes
13791 	 * Get the correct index to update delay bucket
13792 	 */
13793 	dstats = dp_fill_delay_buckets(pdev, delay, tid, mode, ring_id);
13794 	if (qdf_unlikely(!dstats))
13795 		return;
13796 
13797 	if (delay != 0) {
13798 		/*
13799 		 * Compute minimum,average and maximum
13800 		 * delay
13801 		 */
13802 		if (delay < dstats->min_delay)
13803 			dstats->min_delay = delay;
13804 
13805 		if (delay > dstats->max_delay)
13806 			dstats->max_delay = delay;
13807 
13808 		/*
13809 		 * Average over delay measured till now
13810 		 */
13811 		if (!dstats->avg_delay)
13812 			dstats->avg_delay = delay;
13813 		else
13814 			dstats->avg_delay = ((delay + dstats->avg_delay) / 2);
13815 	}
13816 }
13817 
13818 /**
13819  * dp_get_peer_mac_list(): function to get peer mac list of vdev
13820  * @soc: Datapath soc handle
13821  * @vdev_id: vdev id
13822  * @newmac: Table of the clients mac
13823  * @mac_cnt: No. of MACs required
13824  * @limit: Limit the number of clients
13825  *
13826  * return: no of clients
13827  */
13828 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id,
13829 			      u_int8_t newmac[][QDF_MAC_ADDR_SIZE],
13830 			      u_int16_t mac_cnt, bool limit)
13831 {
13832 	struct dp_soc *dp_soc = (struct dp_soc *)soc;
13833 	struct dp_vdev *vdev =
13834 		dp_vdev_get_ref_by_id(dp_soc, vdev_id, DP_MOD_ID_CDP);
13835 	struct dp_peer *peer;
13836 	uint16_t new_mac_cnt = 0;
13837 
13838 	if (!vdev)
13839 		return new_mac_cnt;
13840 
13841 	if (limit && (vdev->num_peers > mac_cnt))
13842 		return 0;
13843 
13844 	qdf_spin_lock_bh(&vdev->peer_list_lock);
13845 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
13846 		if (peer->bss_peer)
13847 			continue;
13848 		if (new_mac_cnt < mac_cnt) {
13849 			WLAN_ADDR_COPY(newmac[new_mac_cnt], peer->mac_addr.raw);
13850 			new_mac_cnt++;
13851 		}
13852 	}
13853 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
13854 	dp_vdev_unref_delete(dp_soc, vdev, DP_MOD_ID_CDP);
13855 	return new_mac_cnt;
13856 }
13857 
13858 #ifdef QCA_SUPPORT_WDS_EXTENDED
13859 uint16_t dp_wds_ext_get_peer_id(ol_txrx_soc_handle soc,
13860 				uint8_t vdev_id,
13861 				uint8_t *mac)
13862 {
13863 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
13864 						       mac, 0, vdev_id,
13865 						       DP_MOD_ID_CDP);
13866 	uint16_t peer_id = HTT_INVALID_PEER;
13867 
13868 	if (!peer) {
13869 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
13870 		return peer_id;
13871 	}
13872 
13873 	peer_id = peer->peer_id;
13874 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
13875 	return peer_id;
13876 }
13877 
13878 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc,
13879 				  uint8_t vdev_id,
13880 				  uint8_t *mac,
13881 				  ol_txrx_rx_fp rx,
13882 				  ol_osif_peer_handle osif_peer)
13883 {
13884 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
13885 						       mac, 0, vdev_id,
13886 						       DP_MOD_ID_CDP);
13887 	QDF_STATUS status = QDF_STATUS_E_INVAL;
13888 
13889 	if (!peer) {
13890 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
13891 		return status;
13892 	}
13893 	if (!peer->txrx_peer) {
13894 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
13895 		return status;
13896 	}
13897 
13898 	if (rx) {
13899 		if (peer->txrx_peer->osif_rx) {
13900 			status = QDF_STATUS_E_ALREADY;
13901 		} else {
13902 			peer->txrx_peer->osif_rx = rx;
13903 			status = QDF_STATUS_SUCCESS;
13904 		}
13905 	} else {
13906 		if (peer->txrx_peer->osif_rx) {
13907 			peer->txrx_peer->osif_rx = NULL;
13908 			status = QDF_STATUS_SUCCESS;
13909 		} else {
13910 			status = QDF_STATUS_E_ALREADY;
13911 		}
13912 	}
13913 
13914 	peer->txrx_peer->wds_ext.osif_peer = osif_peer;
13915 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
13916 
13917 	return status;
13918 }
13919 #endif /* QCA_SUPPORT_WDS_EXTENDED */
13920 
13921 /**
13922  * dp_pdev_srng_deinit() - de-initialize all pdev srng ring including
13923  *			   monitor rings
13924  * @pdev: Datapath pdev handle
13925  *
13926  */
13927 static void dp_pdev_srng_deinit(struct dp_pdev *pdev)
13928 {
13929 	struct dp_soc *soc = pdev->soc;
13930 	uint8_t i;
13931 
13932 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled)
13933 		dp_srng_deinit(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
13934 			       RXDMA_BUF,
13935 			       pdev->lmac_id);
13936 
13937 	if (!soc->rxdma2sw_rings_not_supported) {
13938 		for (i = 0;
13939 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
13940 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
13941 								 pdev->pdev_id);
13942 
13943 			wlan_minidump_remove(soc->rxdma_err_dst_ring[lmac_id].
13944 							base_vaddr_unaligned,
13945 					     soc->rxdma_err_dst_ring[lmac_id].
13946 								alloc_size,
13947 					     soc->ctrl_psoc,
13948 					     WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
13949 					     "rxdma_err_dst");
13950 			dp_srng_deinit(soc, &soc->rxdma_err_dst_ring[lmac_id],
13951 				       RXDMA_DST, lmac_id);
13952 		}
13953 	}
13954 
13955 
13956 }
13957 
13958 /**
13959  * dp_pdev_srng_init() - initialize all pdev srng rings including
13960  *			   monitor rings
13961  * @pdev: Datapath pdev handle
13962  *
13963  * return: QDF_STATUS_SUCCESS on success
13964  *	   QDF_STATUS_E_NOMEM on failure
13965  */
13966 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev)
13967 {
13968 	struct dp_soc *soc = pdev->soc;
13969 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
13970 	uint32_t i;
13971 
13972 	soc_cfg_ctx = soc->wlan_cfg_ctx;
13973 
13974 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
13975 		if (dp_srng_init(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
13976 				 RXDMA_BUF, 0, pdev->lmac_id)) {
13977 			dp_init_err("%pK: dp_srng_init failed rx refill ring",
13978 				    soc);
13979 			goto fail1;
13980 		}
13981 	}
13982 
13983 	/* LMAC RxDMA to SW Rings configuration */
13984 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
13985 		/* Only valid for MCL */
13986 		pdev = soc->pdev_list[0];
13987 
13988 	if (!soc->rxdma2sw_rings_not_supported) {
13989 		for (i = 0;
13990 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
13991 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
13992 								 pdev->pdev_id);
13993 			struct dp_srng *srng =
13994 				&soc->rxdma_err_dst_ring[lmac_id];
13995 
13996 			if (srng->hal_srng)
13997 				continue;
13998 
13999 			if (dp_srng_init(soc, srng, RXDMA_DST, 0, lmac_id)) {
14000 				dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring",
14001 					    soc);
14002 				goto fail1;
14003 			}
14004 			wlan_minidump_log(soc->rxdma_err_dst_ring[lmac_id].
14005 						base_vaddr_unaligned,
14006 					  soc->rxdma_err_dst_ring[lmac_id].
14007 						alloc_size,
14008 					  soc->ctrl_psoc,
14009 					  WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
14010 					  "rxdma_err_dst");
14011 		}
14012 	}
14013 	return QDF_STATUS_SUCCESS;
14014 
14015 fail1:
14016 	dp_pdev_srng_deinit(pdev);
14017 	return QDF_STATUS_E_NOMEM;
14018 }
14019 
14020 /**
14021  * dp_pdev_srng_free() - free all pdev srng rings including monitor rings
14022  * pdev: Datapath pdev handle
14023  *
14024  */
14025 static void dp_pdev_srng_free(struct dp_pdev *pdev)
14026 {
14027 	struct dp_soc *soc = pdev->soc;
14028 	uint8_t i;
14029 
14030 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled)
14031 		dp_srng_free(soc, &soc->rx_refill_buf_ring[pdev->lmac_id]);
14032 
14033 	if (!soc->rxdma2sw_rings_not_supported) {
14034 		for (i = 0;
14035 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
14036 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
14037 								 pdev->pdev_id);
14038 
14039 			dp_srng_free(soc, &soc->rxdma_err_dst_ring[lmac_id]);
14040 		}
14041 	}
14042 }
14043 
14044 /**
14045  * dp_pdev_srng_alloc() - allocate memory for all pdev srng rings including
14046  *			  monitor rings
14047  * pdev: Datapath pdev handle
14048  *
14049  * return: QDF_STATUS_SUCCESS on success
14050  *	   QDF_STATUS_E_NOMEM on failure
14051  */
14052 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev)
14053 {
14054 	struct dp_soc *soc = pdev->soc;
14055 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
14056 	uint32_t ring_size;
14057 	uint32_t i;
14058 
14059 	soc_cfg_ctx = soc->wlan_cfg_ctx;
14060 
14061 	ring_size = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
14062 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
14063 		if (dp_srng_alloc(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
14064 				  RXDMA_BUF, ring_size, 0)) {
14065 			dp_init_err("%pK: dp_srng_alloc failed rx refill ring",
14066 				    soc);
14067 			goto fail1;
14068 		}
14069 	}
14070 
14071 	ring_size = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
14072 	/* LMAC RxDMA to SW Rings configuration */
14073 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
14074 		/* Only valid for MCL */
14075 		pdev = soc->pdev_list[0];
14076 
14077 	if (!soc->rxdma2sw_rings_not_supported) {
14078 		for (i = 0;
14079 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
14080 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
14081 								 pdev->pdev_id);
14082 			struct dp_srng *srng =
14083 				&soc->rxdma_err_dst_ring[lmac_id];
14084 
14085 			if (srng->base_vaddr_unaligned)
14086 				continue;
14087 
14088 			if (dp_srng_alloc(soc, srng, RXDMA_DST, ring_size, 0)) {
14089 				dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring",
14090 					    soc);
14091 				goto fail1;
14092 			}
14093 		}
14094 	}
14095 
14096 	return QDF_STATUS_SUCCESS;
14097 fail1:
14098 	dp_pdev_srng_free(pdev);
14099 	return QDF_STATUS_E_NOMEM;
14100 }
14101 
14102 /**
14103  * dp_soc_srng_deinit() - de-initialize soc srng rings
14104  * @soc: Datapath soc handle
14105  *
14106  */
14107 static void dp_soc_srng_deinit(struct dp_soc *soc)
14108 {
14109 	uint32_t i;
14110 
14111 	if (soc->arch_ops.txrx_soc_srng_deinit)
14112 		soc->arch_ops.txrx_soc_srng_deinit(soc);
14113 
14114 	/* Free the ring memories */
14115 	/* Common rings */
14116 	wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
14117 			     soc->wbm_desc_rel_ring.alloc_size,
14118 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_WBM_DESC_REL,
14119 			     "wbm_desc_rel_ring");
14120 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
14121 
14122 	/* Tx data rings */
14123 	for (i = 0; i < soc->num_tcl_data_rings; i++)
14124 		dp_deinit_tx_pair_by_index(soc, i);
14125 
14126 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
14127 		dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
14128 		dp_ipa_deinit_alt_tx_ring(soc);
14129 	}
14130 
14131 	/* TCL command and status rings */
14132 	if (soc->init_tcl_cmd_cred_ring) {
14133 		wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
14134 				     soc->tcl_cmd_credit_ring.alloc_size,
14135 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_CMD,
14136 				     "wbm_desc_rel_ring");
14137 		dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring,
14138 			       TCL_CMD_CREDIT, 0);
14139 	}
14140 
14141 	wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned,
14142 			     soc->tcl_status_ring.alloc_size,
14143 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_STATUS,
14144 			     "wbm_desc_rel_ring");
14145 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
14146 
14147 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
14148 		/* TODO: Get number of rings and ring sizes
14149 		 * from wlan_cfg
14150 		 */
14151 		wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned,
14152 				     soc->reo_dest_ring[i].alloc_size,
14153 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_DEST,
14154 				     "reo_dest_ring");
14155 		dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i);
14156 	}
14157 
14158 	/* REO reinjection ring */
14159 	wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned,
14160 			     soc->reo_reinject_ring.alloc_size,
14161 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_REINJECT,
14162 			     "reo_reinject_ring");
14163 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
14164 
14165 	/* Rx release ring */
14166 	wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned,
14167 			     soc->rx_rel_ring.alloc_size,
14168 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_RX_REL,
14169 			     "reo_release_ring");
14170 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
14171 
14172 	/* Rx exception ring */
14173 	/* TODO: Better to store ring_type and ring_num in
14174 	 * dp_srng during setup
14175 	 */
14176 	wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned,
14177 			     soc->reo_exception_ring.alloc_size,
14178 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_EXCEPTION,
14179 			     "reo_exception_ring");
14180 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
14181 
14182 	/* REO command and status rings */
14183 	wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned,
14184 			     soc->reo_cmd_ring.alloc_size,
14185 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_CMD,
14186 			     "reo_cmd_ring");
14187 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
14188 	wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned,
14189 			     soc->reo_status_ring.alloc_size,
14190 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_STATUS,
14191 			     "reo_status_ring");
14192 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
14193 }
14194 
14195 /**
14196  * dp_soc_srng_init() - Initialize soc level srng rings
14197  * @soc: Datapath soc handle
14198  *
14199  * return: QDF_STATUS_SUCCESS on success
14200  *	   QDF_STATUS_E_FAILURE on failure
14201  */
14202 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc)
14203 {
14204 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
14205 	uint8_t i;
14206 	uint8_t wbm2_sw_rx_rel_ring_id;
14207 
14208 	soc_cfg_ctx = soc->wlan_cfg_ctx;
14209 
14210 	dp_enable_verbose_debug(soc);
14211 
14212 	/* WBM descriptor release ring */
14213 	if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) {
14214 		dp_init_err("%pK: dp_srng_init failed for wbm_desc_rel_ring", soc);
14215 		goto fail1;
14216 	}
14217 
14218 	wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
14219 			  soc->wbm_desc_rel_ring.alloc_size,
14220 			  soc->ctrl_psoc,
14221 			  WLAN_MD_DP_SRNG_WBM_DESC_REL,
14222 			  "wbm_desc_rel_ring");
14223 
14224 	if (soc->init_tcl_cmd_cred_ring) {
14225 		/* TCL command and status rings */
14226 		if (dp_srng_init(soc, &soc->tcl_cmd_credit_ring,
14227 				 TCL_CMD_CREDIT, 0, 0)) {
14228 			dp_init_err("%pK: dp_srng_init failed for tcl_cmd_ring", soc);
14229 			goto fail1;
14230 		}
14231 
14232 		wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
14233 				  soc->tcl_cmd_credit_ring.alloc_size,
14234 				  soc->ctrl_psoc,
14235 				  WLAN_MD_DP_SRNG_TCL_CMD,
14236 				  "wbm_desc_rel_ring");
14237 	}
14238 
14239 	if (dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0)) {
14240 		dp_init_err("%pK: dp_srng_init failed for tcl_status_ring", soc);
14241 		goto fail1;
14242 	}
14243 
14244 	wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned,
14245 			  soc->tcl_status_ring.alloc_size,
14246 			  soc->ctrl_psoc,
14247 			  WLAN_MD_DP_SRNG_TCL_STATUS,
14248 			  "wbm_desc_rel_ring");
14249 
14250 	/* REO reinjection ring */
14251 	if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) {
14252 		dp_init_err("%pK: dp_srng_init failed for reo_reinject_ring", soc);
14253 		goto fail1;
14254 	}
14255 
14256 	wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned,
14257 			  soc->reo_reinject_ring.alloc_size,
14258 			  soc->ctrl_psoc,
14259 			  WLAN_MD_DP_SRNG_REO_REINJECT,
14260 			  "reo_reinject_ring");
14261 
14262 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc_cfg_ctx);
14263 	/* Rx release ring */
14264 	if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
14265 			 wbm2_sw_rx_rel_ring_id, 0)) {
14266 		dp_init_err("%pK: dp_srng_init failed for rx_rel_ring", soc);
14267 		goto fail1;
14268 	}
14269 
14270 	wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned,
14271 			  soc->rx_rel_ring.alloc_size,
14272 			  soc->ctrl_psoc,
14273 			  WLAN_MD_DP_SRNG_RX_REL,
14274 			  "reo_release_ring");
14275 
14276 	/* Rx exception ring */
14277 	if (dp_srng_init(soc, &soc->reo_exception_ring,
14278 			 REO_EXCEPTION, 0, MAX_REO_DEST_RINGS)) {
14279 		dp_init_err("%pK: dp_srng_init failed - reo_exception", soc);
14280 		goto fail1;
14281 	}
14282 
14283 	wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned,
14284 			  soc->reo_exception_ring.alloc_size,
14285 			  soc->ctrl_psoc,
14286 			  WLAN_MD_DP_SRNG_REO_EXCEPTION,
14287 			  "reo_exception_ring");
14288 
14289 	/* REO command and status rings */
14290 	if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) {
14291 		dp_init_err("%pK: dp_srng_init failed for reo_cmd_ring", soc);
14292 		goto fail1;
14293 	}
14294 
14295 	wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned,
14296 			  soc->reo_cmd_ring.alloc_size,
14297 			  soc->ctrl_psoc,
14298 			  WLAN_MD_DP_SRNG_REO_CMD,
14299 			  "reo_cmd_ring");
14300 
14301 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
14302 	TAILQ_INIT(&soc->rx.reo_cmd_list);
14303 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
14304 
14305 	if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) {
14306 		dp_init_err("%pK: dp_srng_init failed for reo_status_ring", soc);
14307 		goto fail1;
14308 	}
14309 
14310 	wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned,
14311 			  soc->reo_status_ring.alloc_size,
14312 			  soc->ctrl_psoc,
14313 			  WLAN_MD_DP_SRNG_REO_STATUS,
14314 			  "reo_status_ring");
14315 
14316 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
14317 		if (dp_init_tx_ring_pair_by_index(soc, i))
14318 			goto fail1;
14319 	}
14320 
14321 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
14322 		if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
14323 			goto fail1;
14324 
14325 		if (dp_ipa_init_alt_tx_ring(soc))
14326 			goto fail1;
14327 	}
14328 
14329 	dp_create_ext_stats_event(soc);
14330 
14331 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
14332 		/* Initialize REO destination ring */
14333 		if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) {
14334 			dp_init_err("%pK: dp_srng_init failed for reo_dest_ringn", soc);
14335 			goto fail1;
14336 		}
14337 
14338 		wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned,
14339 				  soc->reo_dest_ring[i].alloc_size,
14340 				  soc->ctrl_psoc,
14341 				  WLAN_MD_DP_SRNG_REO_DEST,
14342 				  "reo_dest_ring");
14343 	}
14344 
14345 	if (soc->arch_ops.txrx_soc_srng_init) {
14346 		if (soc->arch_ops.txrx_soc_srng_init(soc)) {
14347 			dp_init_err("%pK: dp_srng_init failed for arch rings",
14348 				    soc);
14349 			goto fail1;
14350 		}
14351 	}
14352 
14353 	return QDF_STATUS_SUCCESS;
14354 fail1:
14355 	/*
14356 	 * Cleanup will be done as part of soc_detach, which will
14357 	 * be called on pdev attach failure
14358 	 */
14359 	dp_soc_srng_deinit(soc);
14360 	return QDF_STATUS_E_FAILURE;
14361 }
14362 
14363 /**
14364  * dp_soc_srng_free() - free soc level srng rings
14365  * @soc: Datapath soc handle
14366  *
14367  */
14368 static void dp_soc_srng_free(struct dp_soc *soc)
14369 {
14370 	uint32_t i;
14371 
14372 	if (soc->arch_ops.txrx_soc_srng_free)
14373 		soc->arch_ops.txrx_soc_srng_free(soc);
14374 
14375 	dp_srng_free(soc, &soc->wbm_desc_rel_ring);
14376 
14377 	for (i = 0; i < soc->num_tcl_data_rings; i++)
14378 		dp_free_tx_ring_pair_by_index(soc, i);
14379 
14380 	/* Free IPA rings for TCL_TX and TCL_COMPL ring */
14381 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
14382 		dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
14383 		dp_ipa_free_alt_tx_ring(soc);
14384 	}
14385 
14386 	if (soc->init_tcl_cmd_cred_ring)
14387 		dp_srng_free(soc, &soc->tcl_cmd_credit_ring);
14388 
14389 	dp_srng_free(soc, &soc->tcl_status_ring);
14390 
14391 	for (i = 0; i < soc->num_reo_dest_rings; i++)
14392 		dp_srng_free(soc, &soc->reo_dest_ring[i]);
14393 
14394 	dp_srng_free(soc, &soc->reo_reinject_ring);
14395 	dp_srng_free(soc, &soc->rx_rel_ring);
14396 
14397 	dp_srng_free(soc, &soc->reo_exception_ring);
14398 
14399 	dp_srng_free(soc, &soc->reo_cmd_ring);
14400 	dp_srng_free(soc, &soc->reo_status_ring);
14401 }
14402 
14403 /**
14404  * dp_soc_srng_alloc() - Allocate memory for soc level srng rings
14405  * @soc: Datapath soc handle
14406  *
14407  * return: QDF_STATUS_SUCCESS on success
14408  *	   QDF_STATUS_E_NOMEM on failure
14409  */
14410 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc)
14411 {
14412 	uint32_t entries;
14413 	uint32_t i;
14414 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
14415 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
14416 	uint32_t tx_comp_ring_size, tx_ring_size, reo_dst_ring_size;
14417 
14418 	soc_cfg_ctx = soc->wlan_cfg_ctx;
14419 
14420 	/* sw2wbm link descriptor release ring */
14421 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
14422 	if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE,
14423 			  entries, 0)) {
14424 		dp_init_err("%pK: dp_srng_alloc failed for wbm_desc_rel_ring", soc);
14425 		goto fail1;
14426 	}
14427 
14428 	entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx);
14429 	/* TCL command and status rings */
14430 	if (soc->init_tcl_cmd_cred_ring) {
14431 		if (dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring,
14432 				  TCL_CMD_CREDIT, entries, 0)) {
14433 			dp_init_err("%pK: dp_srng_alloc failed for tcl_cmd_ring", soc);
14434 			goto fail1;
14435 		}
14436 	}
14437 
14438 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
14439 	if (dp_srng_alloc(soc, &soc->tcl_status_ring, TCL_STATUS, entries,
14440 			  0)) {
14441 		dp_init_err("%pK: dp_srng_alloc failed for tcl_status_ring", soc);
14442 		goto fail1;
14443 	}
14444 
14445 	/* REO reinjection ring */
14446 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
14447 	if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT,
14448 			  entries, 0)) {
14449 		dp_init_err("%pK: dp_srng_alloc failed for reo_reinject_ring", soc);
14450 		goto fail1;
14451 	}
14452 
14453 	/* Rx release ring */
14454 	entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx);
14455 	if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
14456 			  entries, 0)) {
14457 		dp_init_err("%pK: dp_srng_alloc failed for rx_rel_ring", soc);
14458 		goto fail1;
14459 	}
14460 
14461 	/* Rx exception ring */
14462 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
14463 	if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION,
14464 			  entries, 0)) {
14465 		dp_init_err("%pK: dp_srng_alloc failed - reo_exception", soc);
14466 		goto fail1;
14467 	}
14468 
14469 	/* REO command and status rings */
14470 	entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx);
14471 	if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) {
14472 		dp_init_err("%pK: dp_srng_alloc failed for reo_cmd_ring", soc);
14473 		goto fail1;
14474 	}
14475 
14476 	entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx);
14477 	if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS,
14478 			  entries, 0)) {
14479 		dp_init_err("%pK: dp_srng_alloc failed for reo_status_ring", soc);
14480 		goto fail1;
14481 	}
14482 
14483 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
14484 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
14485 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx);
14486 
14487 	/* Disable cached desc if NSS offload is enabled */
14488 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
14489 		cached = 0;
14490 
14491 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
14492 		if (dp_alloc_tx_ring_pair_by_index(soc, i))
14493 			goto fail1;
14494 	}
14495 
14496 	/* IPA rings for TCL_TX and TX_COMP will be allocated here */
14497 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
14498 		if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
14499 			goto fail1;
14500 
14501 		if (dp_ipa_alloc_alt_tx_ring(soc))
14502 			goto fail1;
14503 	}
14504 
14505 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
14506 		/* Setup REO destination ring */
14507 		if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST,
14508 				  reo_dst_ring_size, cached)) {
14509 			dp_init_err("%pK: dp_srng_alloc failed for reo_dest_ring", soc);
14510 			goto fail1;
14511 		}
14512 	}
14513 
14514 	if (soc->arch_ops.txrx_soc_srng_alloc) {
14515 		if (soc->arch_ops.txrx_soc_srng_alloc(soc)) {
14516 			dp_init_err("%pK: dp_srng_alloc failed for arch rings",
14517 				    soc);
14518 			goto fail1;
14519 		}
14520 	}
14521 
14522 	return QDF_STATUS_SUCCESS;
14523 
14524 fail1:
14525 	dp_soc_srng_free(soc);
14526 	return QDF_STATUS_E_NOMEM;
14527 }
14528 
14529 static void dp_soc_cfg_dump(struct dp_soc *soc, uint32_t target_type)
14530 {
14531 	dp_init_info("DP soc Dump for Target = %d", target_type);
14532 	dp_init_info("ast_override_support = %d, da_war_enabled = %d,",
14533 		     soc->ast_override_support, soc->da_war_enabled);
14534 
14535 	wlan_cfg_dp_soc_ctx_dump(soc->wlan_cfg_ctx);
14536 }
14537 
14538 /**
14539  * dp_soc_cfg_init() - initialize target specific configuration
14540  *		       during dp_soc_init
14541  * @soc: dp soc handle
14542  */
14543 static void dp_soc_cfg_init(struct dp_soc *soc)
14544 {
14545 	uint32_t target_type;
14546 
14547 	target_type = hal_get_target_type(soc->hal_soc);
14548 	switch (target_type) {
14549 	case TARGET_TYPE_QCA6290:
14550 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
14551 					       REO_DST_RING_SIZE_QCA6290);
14552 		soc->ast_override_support = 1;
14553 		soc->da_war_enabled = false;
14554 		break;
14555 	case TARGET_TYPE_QCA6390:
14556 	case TARGET_TYPE_QCA6490:
14557 	case TARGET_TYPE_QCA6750:
14558 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
14559 					       REO_DST_RING_SIZE_QCA6290);
14560 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
14561 		soc->ast_override_support = 1;
14562 		if (soc->cdp_soc.ol_ops->get_con_mode &&
14563 		    soc->cdp_soc.ol_ops->get_con_mode() ==
14564 		    QDF_GLOBAL_MONITOR_MODE) {
14565 			int int_ctx;
14566 
14567 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
14568 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
14569 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
14570 			}
14571 		}
14572 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
14573 		break;
14574 	case TARGET_TYPE_KIWI:
14575 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
14576 					       REO_DST_RING_SIZE_QCA6290);
14577 		soc->ast_override_support = 1;
14578 
14579 		if (soc->cdp_soc.ol_ops->get_con_mode &&
14580 		    soc->cdp_soc.ol_ops->get_con_mode() ==
14581 		    QDF_GLOBAL_MONITOR_MODE) {
14582 			int int_ctx;
14583 
14584 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS;
14585 			     int_ctx++) {
14586 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
14587 				if (dp_is_monitor_mode_using_poll(soc))
14588 					soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
14589 			}
14590 		}
14591 
14592 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
14593 		soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev = 1;
14594 		/* use only MAC0 status ring */
14595 		soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev = 1;
14596 		break;
14597 	case TARGET_TYPE_QCA8074:
14598 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
14599 		soc->da_war_enabled = true;
14600 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
14601 		break;
14602 	case TARGET_TYPE_QCA8074V2:
14603 	case TARGET_TYPE_QCA6018:
14604 	case TARGET_TYPE_QCA9574:
14605 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
14606 		soc->ast_override_support = 1;
14607 		soc->per_tid_basize_max_tid = 8;
14608 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
14609 		soc->da_war_enabled = false;
14610 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
14611 		break;
14612 	case TARGET_TYPE_QCN9000:
14613 		soc->ast_override_support = 1;
14614 		soc->da_war_enabled = false;
14615 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
14616 		soc->per_tid_basize_max_tid = 8;
14617 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
14618 		soc->lmac_polled_mode = 0;
14619 		soc->wbm_release_desc_rx_sg_support = 1;
14620 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
14621 		break;
14622 	case TARGET_TYPE_QCA5018:
14623 	case TARGET_TYPE_QCN6122:
14624 		soc->ast_override_support = 1;
14625 		soc->da_war_enabled = false;
14626 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
14627 		soc->per_tid_basize_max_tid = 8;
14628 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS_11AX;
14629 		soc->disable_mac1_intr = 1;
14630 		soc->disable_mac2_intr = 1;
14631 		soc->wbm_release_desc_rx_sg_support = 1;
14632 		break;
14633 	case TARGET_TYPE_QCN9224:
14634 		soc->ast_override_support = 1;
14635 		soc->da_war_enabled = false;
14636 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
14637 		soc->per_tid_basize_max_tid = 8;
14638 		soc->wbm_release_desc_rx_sg_support = 1;
14639 		soc->rxdma2sw_rings_not_supported = 1;
14640 		soc->wbm_sg_last_msdu_war = 1;
14641 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
14642 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
14643 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
14644 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
14645 		break;
14646 	default:
14647 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
14648 		qdf_assert_always(0);
14649 		break;
14650 	}
14651 	dp_soc_cfg_dump(soc, target_type);
14652 }
14653 
14654 /**
14655  * dp_soc_cfg_attach() - set target specific configuration in
14656  *			 dp soc cfg.
14657  * @soc: dp soc handle
14658  */
14659 static void dp_soc_cfg_attach(struct dp_soc *soc)
14660 {
14661 	int target_type;
14662 	int nss_cfg = 0;
14663 
14664 	target_type = hal_get_target_type(soc->hal_soc);
14665 	switch (target_type) {
14666 	case TARGET_TYPE_QCA6290:
14667 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
14668 					       REO_DST_RING_SIZE_QCA6290);
14669 		break;
14670 	case TARGET_TYPE_QCA6390:
14671 	case TARGET_TYPE_QCA6490:
14672 	case TARGET_TYPE_QCA6750:
14673 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
14674 					       REO_DST_RING_SIZE_QCA6290);
14675 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
14676 		break;
14677 	case TARGET_TYPE_KIWI:
14678 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
14679 					       REO_DST_RING_SIZE_QCA6290);
14680 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
14681 		break;
14682 	case TARGET_TYPE_QCA8074:
14683 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
14684 		break;
14685 	case TARGET_TYPE_QCA8074V2:
14686 	case TARGET_TYPE_QCA6018:
14687 	case TARGET_TYPE_QCA9574:
14688 	case TARGET_TYPE_QCN6122:
14689 	case TARGET_TYPE_QCA5018:
14690 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
14691 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
14692 		break;
14693 	case TARGET_TYPE_QCN9000:
14694 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
14695 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
14696 		break;
14697 	case TARGET_TYPE_QCN9224:
14698 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
14699 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
14700 		break;
14701 	default:
14702 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
14703 		qdf_assert_always(0);
14704 		break;
14705 	}
14706 
14707 	if (soc->cdp_soc.ol_ops->get_soc_nss_cfg)
14708 		nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc);
14709 
14710 	wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg);
14711 
14712 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
14713 		wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0);
14714 		wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0);
14715 		wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0);
14716 		wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0);
14717 		soc->init_tcl_cmd_cred_ring = false;
14718 		soc->num_tcl_data_rings =
14719 			wlan_cfg_num_nss_tcl_data_rings(soc->wlan_cfg_ctx);
14720 		soc->num_reo_dest_rings =
14721 			wlan_cfg_num_nss_reo_dest_rings(soc->wlan_cfg_ctx);
14722 
14723 	} else {
14724 		soc->init_tcl_cmd_cred_ring = true;
14725 		soc->num_tx_comp_rings =
14726 			wlan_cfg_num_tx_comp_rings(soc->wlan_cfg_ctx);
14727 		soc->num_tcl_data_rings =
14728 			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
14729 		soc->num_reo_dest_rings =
14730 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
14731 	}
14732 
14733 	soc->arch_ops.soc_cfg_attach(soc);
14734 }
14735 
14736 static inline  void dp_pdev_set_default_reo(struct dp_pdev *pdev)
14737 {
14738 	struct dp_soc *soc = pdev->soc;
14739 
14740 	switch (pdev->pdev_id) {
14741 	case 0:
14742 		pdev->reo_dest =
14743 			wlan_cfg_radio0_default_reo_get(soc->wlan_cfg_ctx);
14744 		break;
14745 
14746 	case 1:
14747 		pdev->reo_dest =
14748 			wlan_cfg_radio1_default_reo_get(soc->wlan_cfg_ctx);
14749 		break;
14750 
14751 	case 2:
14752 		pdev->reo_dest =
14753 			wlan_cfg_radio2_default_reo_get(soc->wlan_cfg_ctx);
14754 		break;
14755 
14756 	default:
14757 		dp_init_err("%pK: Invalid pdev_id %d for reo selection",
14758 			    soc, pdev->pdev_id);
14759 		break;
14760 	}
14761 }
14762 
14763 static QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc,
14764 				      HTC_HANDLE htc_handle,
14765 				      qdf_device_t qdf_osdev,
14766 				      uint8_t pdev_id)
14767 {
14768 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
14769 	int nss_cfg;
14770 	void *sojourn_buf;
14771 	QDF_STATUS ret;
14772 
14773 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
14774 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
14775 
14776 	soc_cfg_ctx = soc->wlan_cfg_ctx;
14777 	pdev->soc = soc;
14778 	pdev->pdev_id = pdev_id;
14779 
14780 	/*
14781 	 * Variable to prevent double pdev deinitialization during
14782 	 * radio detach execution .i.e. in the absence of any vdev.
14783 	 */
14784 	pdev->pdev_deinit = 0;
14785 
14786 	if (dp_wdi_event_attach(pdev)) {
14787 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
14788 			  "dp_wdi_evet_attach failed");
14789 		goto fail0;
14790 	}
14791 
14792 	if (dp_pdev_srng_init(pdev)) {
14793 		dp_init_err("%pK: Failed to initialize pdev srng rings", soc);
14794 		goto fail1;
14795 	}
14796 
14797 	/* Initialize descriptors in TCL Rings used by IPA */
14798 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
14799 		hal_tx_init_data_ring(soc->hal_soc,
14800 				      soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng);
14801 		dp_ipa_hal_tx_init_alt_data_ring(soc);
14802 	}
14803 
14804 	/*
14805 	 * Initialize command/credit ring descriptor
14806 	 * Command/CREDIT ring also used for sending DATA cmds
14807 	 */
14808 	if (soc->init_tcl_cmd_cred_ring)
14809 		hal_tx_init_cmd_credit_ring(soc->hal_soc,
14810 					    soc->tcl_cmd_credit_ring.hal_srng);
14811 
14812 	dp_tx_pdev_init(pdev);
14813 
14814 	/*
14815 	 * set nss pdev config based on soc config
14816 	 */
14817 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
14818 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
14819 					 (nss_cfg & (1 << pdev_id)));
14820 	pdev->target_pdev_id =
14821 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
14822 
14823 	if (soc->preferred_hw_mode == WMI_HOST_HW_MODE_2G_PHYB &&
14824 	    pdev->lmac_id == PHYB_2G_LMAC_ID) {
14825 		pdev->target_pdev_id = PHYB_2G_TARGET_PDEV_ID;
14826 	}
14827 
14828 	/* Reset the cpu ring map if radio is NSS offloaded */
14829 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
14830 		dp_soc_reset_cpu_ring_map(soc);
14831 		dp_soc_reset_intr_mask(soc);
14832 	}
14833 
14834 	TAILQ_INIT(&pdev->vdev_list);
14835 	qdf_spinlock_create(&pdev->vdev_list_lock);
14836 	pdev->vdev_count = 0;
14837 
14838 	qdf_spinlock_create(&pdev->tx_mutex);
14839 	pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_INVALID_LMAC_ID;
14840 	pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_INVALID_LMAC_ID;
14841 	pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_INVALID_LMAC_ID;
14842 
14843 	DP_STATS_INIT(pdev);
14844 
14845 	dp_local_peer_id_pool_init(pdev);
14846 
14847 	dp_dscp_tid_map_setup(pdev);
14848 	dp_pcp_tid_map_setup(pdev);
14849 
14850 	/* set the reo destination during initialization */
14851 	dp_pdev_set_default_reo(pdev);
14852 
14853 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
14854 
14855 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
14856 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
14857 			      TRUE);
14858 
14859 	if (!pdev->sojourn_buf) {
14860 		dp_init_err("%pK: Failed to allocate sojourn buf", soc);
14861 		goto fail2;
14862 	}
14863 	sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
14864 	qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
14865 
14866 	qdf_event_create(&pdev->fw_peer_stats_event);
14867 
14868 	pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
14869 
14870 	if (dp_rxdma_ring_setup(soc, pdev)) {
14871 		dp_init_err("%pK: RXDMA ring config failed", soc);
14872 		goto fail3;
14873 	}
14874 
14875 	if (dp_init_ipa_rx_refill_buf_ring(soc, pdev))
14876 		goto fail3;
14877 
14878 	if (dp_ipa_ring_resource_setup(soc, pdev))
14879 		goto fail4;
14880 
14881 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
14882 		dp_init_err("%pK: dp_ipa_uc_attach failed", soc);
14883 		goto fail4;
14884 	}
14885 
14886 	ret = dp_rx_fst_attach(soc, pdev);
14887 	if ((ret != QDF_STATUS_SUCCESS) &&
14888 	    (ret != QDF_STATUS_E_NOSUPPORT)) {
14889 		dp_init_err("%pK: RX Flow Search Table attach failed: pdev %d err %d",
14890 			    soc, pdev_id, ret);
14891 		goto fail5;
14892 	}
14893 
14894 	if (dp_pdev_bkp_stats_attach(pdev) != QDF_STATUS_SUCCESS) {
14895 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
14896 			  FL("dp_pdev_bkp_stats_attach failed"));
14897 		goto fail6;
14898 	}
14899 
14900 	if (dp_monitor_pdev_init(pdev)) {
14901 		dp_init_err("%pK: dp_monitor_pdev_init failed\n", soc);
14902 		goto fail7;
14903 	}
14904 
14905 	/* initialize sw rx descriptors */
14906 	dp_rx_pdev_desc_pool_init(pdev);
14907 	/* allocate buffers and replenish the RxDMA ring */
14908 	dp_rx_pdev_buffers_alloc(pdev);
14909 
14910 	dp_init_tso_stats(pdev);
14911 
14912 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
14913 		qdf_dma_mem_stats_read(),
14914 		qdf_heap_mem_stats_read(),
14915 		qdf_skb_total_mem_stats_read());
14916 
14917 	return QDF_STATUS_SUCCESS;
14918 fail7:
14919 	dp_pdev_bkp_stats_detach(pdev);
14920 fail6:
14921 	dp_rx_fst_detach(soc, pdev);
14922 fail5:
14923 	dp_ipa_uc_detach(soc, pdev);
14924 fail4:
14925 	dp_deinit_ipa_rx_refill_buf_ring(soc, pdev);
14926 fail3:
14927 	dp_rxdma_ring_cleanup(soc, pdev);
14928 	qdf_nbuf_free(pdev->sojourn_buf);
14929 fail2:
14930 	qdf_spinlock_destroy(&pdev->tx_mutex);
14931 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
14932 	dp_pdev_srng_deinit(pdev);
14933 fail1:
14934 	dp_wdi_event_detach(pdev);
14935 fail0:
14936 	return QDF_STATUS_E_FAILURE;
14937 }
14938 
14939 /*
14940  * dp_pdev_init_wifi3() - Init txrx pdev
14941  * @htc_handle: HTC handle for host-target interface
14942  * @qdf_osdev: QDF OS device
14943  * @force: Force deinit
14944  *
14945  * Return: QDF_STATUS
14946  */
14947 static QDF_STATUS dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
14948 				     HTC_HANDLE htc_handle,
14949 				     qdf_device_t qdf_osdev,
14950 				     uint8_t pdev_id)
14951 {
14952 	return dp_pdev_init(txrx_soc, htc_handle, qdf_osdev, pdev_id);
14953 }
14954 
14955