xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision 8b3dca18206e1a0461492f082fa6e270b092c035)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <wlan_ipa_obj_mgmt_api.h>
21 #include <qdf_types.h>
22 #include <qdf_lock.h>
23 #include <qdf_net_types.h>
24 #include <qdf_lro.h>
25 #include <qdf_module.h>
26 #include <hal_hw_headers.h>
27 #include <hal_api.h>
28 #include <hif.h>
29 #include <htt.h>
30 #include <wdi_event.h>
31 #include <queue.h>
32 #include "dp_types.h"
33 #include "dp_internal.h"
34 #include "dp_tx.h"
35 #include "dp_tx_desc.h"
36 #include "dp_rx.h"
37 #ifdef DP_RATETABLE_SUPPORT
38 #include "dp_ratetable.h"
39 #endif
40 #include <cdp_txrx_handle.h>
41 #include <wlan_cfg.h>
42 #include <wlan_utility.h>
43 #include "cdp_txrx_cmn_struct.h"
44 #include "cdp_txrx_stats_struct.h"
45 #include "cdp_txrx_cmn_reg.h"
46 #include <qdf_util.h>
47 #include "dp_peer.h"
48 #include "htt_stats.h"
49 #include "dp_htt.h"
50 #ifdef WLAN_SUPPORT_RX_FISA
51 #include <wlan_dp_fisa_rx.h>
52 #endif
53 #include "htt_ppdu_stats.h"
54 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
55 #include "cfg_ucfg_api.h"
56 #include <wlan_module_ids.h>
57 
58 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
59 #include "cdp_txrx_flow_ctrl_v2.h"
60 #else
61 
62 static inline void
63 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
64 {
65 	return;
66 }
67 #endif
68 #ifdef WIFI_MONITOR_SUPPORT
69 #include <dp_mon.h>
70 #endif
71 #include "dp_ipa.h"
72 #ifdef FEATURE_WDS
73 #include "dp_txrx_wds.h"
74 #endif
75 #ifdef WLAN_SUPPORT_MSCS
76 #include "dp_mscs.h"
77 #endif
78 #ifdef WLAN_SUPPORT_MESH_LATENCY
79 #include "dp_mesh_latency.h"
80 #endif
81 #ifdef WLAN_SUPPORT_SCS
82 #include "dp_scs.h"
83 #endif
84 #ifdef ATH_SUPPORT_IQUE
85 #include "dp_txrx_me.h"
86 #endif
87 #if defined(DP_CON_MON)
88 #ifndef REMOVE_PKT_LOG
89 #include <pktlog_ac_api.h>
90 #include <pktlog_ac.h>
91 #endif
92 #endif
93 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
94 #include <wlan_dp_swlm.h>
95 #endif
96 #ifdef CONFIG_SAWF_DEF_QUEUES
97 #include "dp_sawf.h"
98 #endif
99 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
100 #include <target_if_dp.h>
101 #endif
102 
103 #ifdef WLAN_FEATURE_STATS_EXT
104 #define INIT_RX_HW_STATS_LOCK(_soc) \
105 	qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
106 #define DEINIT_RX_HW_STATS_LOCK(_soc) \
107 	qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
108 #else
109 #define INIT_RX_HW_STATS_LOCK(_soc)  /* no op */
110 #define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
111 #endif
112 
113 #if defined(DP_PEER_EXTENDED_API) || defined(WLAN_DP_PENDING_MEM_FLUSH)
114 #define SET_PEER_REF_CNT_ONE(_peer) \
115 	qdf_atomic_set(&(_peer)->ref_cnt, 1)
116 #else
117 #define SET_PEER_REF_CNT_ONE(_peer)
118 #endif
119 
120 #ifdef WLAN_SYSFS_DP_STATS
121 /* sysfs event wait time for firmware stat request unit millseconds */
122 #define WLAN_SYSFS_STAT_REQ_WAIT_MS 3000
123 #endif
124 
125 #ifdef QCA_DP_ENABLE_TX_COMP_RING4
126 #define TXCOMP_RING4_NUM 3
127 #else
128 #define TXCOMP_RING4_NUM WBM2SW_TXCOMP_RING4_NUM
129 #endif
130 
131 #ifdef QCA_DP_TX_FW_METADATA_V2
132 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) \
133 		HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
134 #else
135 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) \
136 		HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
137 #endif
138 
139 QDF_COMPILE_TIME_ASSERT(max_rx_rings_check,
140 			MAX_REO_DEST_RINGS == CDP_MAX_RX_RINGS);
141 
142 QDF_COMPILE_TIME_ASSERT(max_tx_rings_check,
143 			MAX_TCL_DATA_RINGS == CDP_MAX_TX_COMP_RINGS);
144 
145 #define dp_init_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_INIT, params)
146 #define dp_init_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_INIT, params)
147 #define dp_init_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_INIT, params)
148 #define dp_init_info(params...) \
149 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_INIT, ## params)
150 #define dp_init_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_INIT, params)
151 
152 #define dp_vdev_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_VDEV, params)
153 #define dp_vdev_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_VDEV, params)
154 #define dp_vdev_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_VDEV, params)
155 #define dp_vdev_info(params...) \
156 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_VDEV, ## params)
157 #define dp_vdev_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_VDEV, params)
158 
159 void dp_configure_arch_ops(struct dp_soc *soc);
160 qdf_size_t dp_get_soc_context_size(uint16_t device_id);
161 
162 /*
163  * The max size of cdp_peer_stats_param_t is limited to 16 bytes.
164  * If the buffer size is exceeding this size limit,
165  * dp_txrx_get_peer_stats is to be used instead.
166  */
167 QDF_COMPILE_TIME_ASSERT(cdp_peer_stats_param_t_max_size,
168 			(sizeof(cdp_peer_stats_param_t) <= 16));
169 
170 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
171 /*
172  * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS
173  * also should be updated accordingly
174  */
175 QDF_COMPILE_TIME_ASSERT(num_intr_grps,
176 			HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS);
177 
178 /*
179  * HIF_EVENT_HIST_MAX should always be power of 2
180  */
181 QDF_COMPILE_TIME_ASSERT(hif_event_history_size,
182 			(HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0);
183 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
184 
185 /*
186  * If WLAN_CFG_INT_NUM_CONTEXTS is changed,
187  * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated
188  */
189 QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs,
190 			WLAN_CFG_INT_NUM_CONTEXTS_MAX >=
191 			WLAN_CFG_INT_NUM_CONTEXTS);
192 
193 static QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl);
194 static QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl);
195 
196 static void dp_pdev_srng_deinit(struct dp_pdev *pdev);
197 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev);
198 static void dp_pdev_srng_free(struct dp_pdev *pdev);
199 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev);
200 
201 static void dp_soc_srng_deinit(struct dp_soc *soc);
202 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc);
203 static void dp_soc_srng_free(struct dp_soc *soc);
204 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc);
205 
206 static void dp_soc_cfg_init(struct dp_soc *soc);
207 static void dp_soc_cfg_attach(struct dp_soc *soc);
208 
209 static inline
210 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
211 				struct cdp_pdev_attach_params *params);
212 
213 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id);
214 
215 static QDF_STATUS
216 dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
217 		   HTC_HANDLE htc_handle,
218 		   qdf_device_t qdf_osdev,
219 		   uint8_t pdev_id);
220 
221 static QDF_STATUS
222 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, int force);
223 
224 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc);
225 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc);
226 
227 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
228 		  struct hif_opaque_softc *hif_handle);
229 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
230 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc,
231 				       uint8_t pdev_id,
232 				       int force);
233 static struct dp_soc *
234 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
235 	      struct cdp_soc_attach_params *params);
236 static inline QDF_STATUS dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl,
237 					      uint8_t vdev_id,
238 					      uint8_t *peer_mac_addr,
239 					      enum cdp_peer_type peer_type);
240 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
241 				       uint8_t vdev_id,
242 				       uint8_t *peer_mac, uint32_t bitmap,
243 				       enum cdp_peer_type peer_type);
244 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
245 				bool unmap_only,
246 				bool mlo_peers_only);
247 #ifdef ENABLE_VERBOSE_DEBUG
248 bool is_dp_verbose_debug_enabled;
249 #endif
250 
251 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
252 static bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
253 static void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
254 			   bool enable);
255 static inline void
256 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
257 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats);
258 static inline void
259 dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
260 #endif
261 
262 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
263 						uint8_t index);
264 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index);
265 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index);
266 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
267 						 uint8_t index);
268 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
269 					    enum hal_ring_type ring_type,
270 					    int ring_num);
271 #ifdef FEATURE_AST
272 void dp_print_mlo_ast_stats(struct dp_soc *soc);
273 #endif
274 
275 #ifdef DP_UMAC_HW_RESET_SUPPORT
276 static QDF_STATUS dp_umac_reset_handle_pre_reset(struct dp_soc *soc);
277 static QDF_STATUS dp_umac_reset_handle_post_reset(struct dp_soc *soc);
278 static QDF_STATUS dp_umac_reset_handle_post_reset_complete(struct dp_soc *soc);
279 #endif
280 
281 #define DP_INTR_POLL_TIMER_MS	5
282 
283 #define MON_VDEV_TIMER_INIT 0x1
284 #define MON_VDEV_TIMER_RUNNING 0x2
285 
286 #define DP_MCS_LENGTH (6*MAX_MCS)
287 
288 #define DP_CURR_FW_STATS_AVAIL 19
289 #define DP_HTT_DBG_EXT_STATS_MAX 256
290 #define DP_MAX_SLEEP_TIME 100
291 #ifndef QCA_WIFI_3_0_EMU
292 #define SUSPEND_DRAIN_WAIT 500
293 #else
294 #define SUSPEND_DRAIN_WAIT 3000
295 #endif
296 
297 #ifdef IPA_OFFLOAD
298 /* Exclude IPA rings from the interrupt context */
299 #define TX_RING_MASK_VAL	0xb
300 #define RX_RING_MASK_VAL	0x7
301 #else
302 #define TX_RING_MASK_VAL	0xF
303 #define RX_RING_MASK_VAL	0xF
304 #endif
305 
306 #define STR_MAXLEN	64
307 
308 #define RNG_ERR		"SRNG setup failed for"
309 
310 /**
311  * default_dscp_tid_map - Default DSCP-TID mapping
312  *
313  * DSCP        TID
314  * 000000      0
315  * 001000      1
316  * 010000      2
317  * 011000      3
318  * 100000      4
319  * 101000      5
320  * 110000      6
321  * 111000      7
322  */
323 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
324 	0, 0, 0, 0, 0, 0, 0, 0,
325 	1, 1, 1, 1, 1, 1, 1, 1,
326 	2, 2, 2, 2, 2, 2, 2, 2,
327 	3, 3, 3, 3, 3, 3, 3, 3,
328 	4, 4, 4, 4, 4, 4, 4, 4,
329 	5, 5, 5, 5, 5, 5, 5, 5,
330 	6, 6, 6, 6, 6, 6, 6, 6,
331 	7, 7, 7, 7, 7, 7, 7, 7,
332 };
333 
334 /**
335  * default_pcp_tid_map - Default PCP-TID mapping
336  *
337  * PCP     TID
338  * 000      0
339  * 001      1
340  * 010      2
341  * 011      3
342  * 100      4
343  * 101      5
344  * 110      6
345  * 111      7
346  */
347 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
348 	0, 1, 2, 3, 4, 5, 6, 7,
349 };
350 
351 /**
352  * @brief Cpu to tx ring map
353  */
354 uint8_t
355 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
356 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
357 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
358 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
359 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
360 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
361 #ifdef WLAN_TX_PKT_CAPTURE_ENH
362 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
363 #endif
364 };
365 
366 qdf_export_symbol(dp_cpu_ring_map);
367 
368 /**
369  * @brief Select the type of statistics
370  */
371 enum dp_stats_type {
372 	STATS_FW = 0,
373 	STATS_HOST = 1,
374 	STATS_TYPE_MAX = 2,
375 };
376 
377 /**
378  * @brief General Firmware statistics options
379  *
380  */
381 enum dp_fw_stats {
382 	TXRX_FW_STATS_INVALID	= -1,
383 };
384 
385 /**
386  * dp_stats_mapping_table - Firmware and Host statistics
387  * currently supported
388  */
389 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
390 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
391 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
392 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
393 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
394 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
395 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
396 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
397 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
398 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
399 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
400 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
401 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
402 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
403 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
404 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
405 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
406 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
407 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
408 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
409 	/* Last ENUM for HTT FW STATS */
410 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
411 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
412 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
413 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
414 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
415 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
416 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
417 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
418 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
419 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
420 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
421 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
422 	{TXRX_FW_STATS_INVALID, TXRX_NAPI_STATS},
423 	{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
424 	{TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS},
425 	{TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS},
426 	{TXRX_FW_STATS_INVALID, TXRX_SOC_REO_HW_DESC_DUMP},
427 	{TXRX_FW_STATS_INVALID, TXRX_SOC_WBM_IDLE_HPTP_DUMP},
428 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_USAGE_WM_STATS},
429 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, TXRX_HOST_STATS_INVALID},
430 	{HTT_DBG_EXT_STATS_TX_SOUNDING_INFO, TXRX_HOST_STATS_INVALID}
431 };
432 
433 /* MCL specific functions */
434 #if defined(DP_CON_MON)
435 
436 #ifdef DP_CON_MON_MSI_ENABLED
437 /**
438  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
439  * @soc: pointer to dp_soc handle
440  * @intr_ctx_num: interrupt context number for which mon mask is needed
441  *
442  * For MCL, monitor mode rings are being processed in timer contexts (polled).
443  * This function is returning 0, since in interrupt mode(softirq based RX),
444  * we donot want to process monitor mode rings in a softirq.
445  *
446  * So, in case packet log is enabled for SAP/STA/P2P modes,
447  * regular interrupt processing will not process monitor mode rings. It would be
448  * done in a separate timer context.
449  *
450  * Return: 0
451  */
452 static inline uint32_t
453 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
454 {
455 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
456 }
457 #else
458 /**
459  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
460  * @soc: pointer to dp_soc handle
461  * @intr_ctx_num: interrupt context number for which mon mask is needed
462  *
463  * For MCL, monitor mode rings are being processed in timer contexts (polled).
464  * This function is returning 0, since in interrupt mode(softirq based RX),
465  * we donot want to process monitor mode rings in a softirq.
466  *
467  * So, in case packet log is enabled for SAP/STA/P2P modes,
468  * regular interrupt processing will not process monitor mode rings. It would be
469  * done in a separate timer context.
470  *
471  * Return: 0
472  */
473 static inline uint32_t
474 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
475 {
476 	return 0;
477 }
478 #endif
479 
480 #ifdef IPA_OFFLOAD
481 /**
482  * dp_get_num_rx_contexts() - get number of RX contexts
483  * @soc_hdl: cdp opaque soc handle
484  *
485  * Return: number of RX contexts
486  */
487 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
488 {
489 	int num_rx_contexts;
490 	uint32_t reo_ring_map;
491 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
492 
493 	reo_ring_map = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
494 
495 	switch (soc->arch_id) {
496 	case CDP_ARCH_TYPE_BE:
497 		/* 2 REO rings are used for IPA */
498 		reo_ring_map &=  ~(BIT(3) | BIT(7));
499 
500 		break;
501 	case CDP_ARCH_TYPE_LI:
502 		/* 1 REO ring is used for IPA */
503 		reo_ring_map &=  ~BIT(3);
504 		break;
505 	default:
506 		dp_err("unkonwn arch_id 0x%x", soc->arch_id);
507 		QDF_BUG(0);
508 	}
509 	/*
510 	 * qdf_get_hweight32 prefer over qdf_get_hweight8 in case map is scaled
511 	 * in future
512 	 */
513 	num_rx_contexts = qdf_get_hweight32(reo_ring_map);
514 
515 	return num_rx_contexts;
516 }
517 #else
518 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
519 {
520 	int num_rx_contexts;
521 	uint32_t reo_config;
522 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
523 
524 	reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
525 	/*
526 	 * qdf_get_hweight32 prefer over qdf_get_hweight8 in case map is scaled
527 	 * in future
528 	 */
529 	num_rx_contexts = qdf_get_hweight32(reo_config);
530 
531 	return num_rx_contexts;
532 }
533 #endif
534 
535 #else
536 
537 /**
538  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
539  * @soc: pointer to dp_soc handle
540  * @intr_ctx_num: interrupt context number for which mon mask is needed
541  *
542  * Return: mon mask value
543  */
544 static inline
545 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
546 {
547 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
548 }
549 
550 /**
551  * dp_soc_reset_mon_intr_mask() - reset mon intr mask
552  * @soc: pointer to dp_soc handle
553  *
554  * Return:
555  */
556 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc)
557 {
558 	int i;
559 
560 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
561 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
562 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
563 	}
564 }
565 
566 qdf_export_symbol(dp_soc_reset_mon_intr_mask);
567 
568 /*
569  * dp_service_lmac_rings()- timer to reap lmac rings
570  * @arg: SoC Handle
571  *
572  * Return:
573  *
574  */
575 static void dp_service_lmac_rings(void *arg)
576 {
577 	struct dp_soc *soc = (struct dp_soc *)arg;
578 	int ring = 0, i;
579 	struct dp_pdev *pdev = NULL;
580 	union dp_rx_desc_list_elem_t *desc_list = NULL;
581 	union dp_rx_desc_list_elem_t *tail = NULL;
582 
583 	/* Process LMAC interrupts */
584 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
585 		int mac_for_pdev = ring;
586 		struct dp_srng *rx_refill_buf_ring;
587 
588 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
589 		if (!pdev)
590 			continue;
591 
592 		rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev];
593 
594 		dp_monitor_process(soc, NULL, mac_for_pdev,
595 				   QCA_NAPI_BUDGET);
596 
597 		for (i = 0;
598 		     i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
599 			dp_rxdma_err_process(&soc->intr_ctx[i], soc,
600 					     mac_for_pdev,
601 					     QCA_NAPI_BUDGET);
602 
603 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF,
604 						  mac_for_pdev))
605 			dp_rx_buffers_replenish(soc, mac_for_pdev,
606 						rx_refill_buf_ring,
607 						&soc->rx_desc_buf[mac_for_pdev],
608 						0, &desc_list, &tail, false);
609 	}
610 
611 	qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
612 }
613 
614 #endif
615 
616 #ifdef FEATURE_MEC
617 void dp_peer_mec_flush_entries(struct dp_soc *soc)
618 {
619 	unsigned int index;
620 	struct dp_mec_entry *mecentry, *mecentry_next;
621 
622 	TAILQ_HEAD(, dp_mec_entry) free_list;
623 	TAILQ_INIT(&free_list);
624 
625 	if (!soc->mec_hash.mask)
626 		return;
627 
628 	if (!soc->mec_hash.bins)
629 		return;
630 
631 	if (!qdf_atomic_read(&soc->mec_cnt))
632 		return;
633 
634 	qdf_spin_lock_bh(&soc->mec_lock);
635 	for (index = 0; index <= soc->mec_hash.mask; index++) {
636 		if (!TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
637 			TAILQ_FOREACH_SAFE(mecentry, &soc->mec_hash.bins[index],
638 					   hash_list_elem, mecentry_next) {
639 			    dp_peer_mec_detach_entry(soc, mecentry, &free_list);
640 			}
641 		}
642 	}
643 	qdf_spin_unlock_bh(&soc->mec_lock);
644 
645 	dp_peer_mec_free_list(soc, &free_list);
646 }
647 
648 /**
649  * dp_print_mec_entries() - Dump MEC entries in table
650  * @soc: Datapath soc handle
651  *
652  * Return: none
653  */
654 static void dp_print_mec_stats(struct dp_soc *soc)
655 {
656 	int i;
657 	uint32_t index;
658 	struct dp_mec_entry *mecentry = NULL, *mec_list;
659 	uint32_t num_entries = 0;
660 
661 	DP_PRINT_STATS("MEC Stats:");
662 	DP_PRINT_STATS("   Entries Added   = %d", soc->stats.mec.added);
663 	DP_PRINT_STATS("   Entries Deleted = %d", soc->stats.mec.deleted);
664 
665 	if (!qdf_atomic_read(&soc->mec_cnt))
666 		return;
667 
668 	mec_list = qdf_mem_malloc(sizeof(*mecentry) * DP_PEER_MAX_MEC_ENTRY);
669 	if (!mec_list) {
670 		dp_peer_warn("%pK: failed to allocate mec_list", soc);
671 		return;
672 	}
673 
674 	DP_PRINT_STATS("MEC Table:");
675 	for (index = 0; index <= soc->mec_hash.mask; index++) {
676 		qdf_spin_lock_bh(&soc->mec_lock);
677 		if (TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
678 			qdf_spin_unlock_bh(&soc->mec_lock);
679 			continue;
680 		}
681 
682 		TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index],
683 			      hash_list_elem) {
684 			qdf_mem_copy(&mec_list[num_entries], mecentry,
685 				     sizeof(*mecentry));
686 			num_entries++;
687 		}
688 		qdf_spin_unlock_bh(&soc->mec_lock);
689 	}
690 
691 	if (!num_entries) {
692 		qdf_mem_free(mec_list);
693 		return;
694 	}
695 
696 	for (i = 0; i < num_entries; i++) {
697 		DP_PRINT_STATS("%6d mac_addr = " QDF_MAC_ADDR_FMT
698 			       " is_active = %d pdev_id = %d vdev_id = %d",
699 			       i,
700 			       QDF_MAC_ADDR_REF(mec_list[i].mac_addr.raw),
701 			       mec_list[i].is_active,
702 			       mec_list[i].pdev_id,
703 			       mec_list[i].vdev_id);
704 	}
705 	qdf_mem_free(mec_list);
706 }
707 #else
708 static void dp_print_mec_stats(struct dp_soc *soc)
709 {
710 }
711 #endif
712 
713 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
714 				 uint8_t vdev_id,
715 				 uint8_t *peer_mac,
716 				 uint8_t *mac_addr,
717 				 enum cdp_txrx_ast_entry_type type,
718 				 uint32_t flags)
719 {
720 	int ret = -1;
721 	QDF_STATUS status = QDF_STATUS_SUCCESS;
722 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
723 						       peer_mac, 0, vdev_id,
724 						       DP_MOD_ID_CDP);
725 
726 	if (!peer) {
727 		dp_peer_debug("Peer is NULL!");
728 		return ret;
729 	}
730 
731 	status = dp_peer_add_ast((struct dp_soc *)soc_hdl,
732 				 peer,
733 				 mac_addr,
734 				 type,
735 				 flags);
736 	if ((status == QDF_STATUS_SUCCESS) ||
737 	    (status == QDF_STATUS_E_ALREADY) ||
738 	    (status == QDF_STATUS_E_AGAIN))
739 		ret = 0;
740 
741 	dp_hmwds_ast_add_notify(peer, mac_addr,
742 				type, status, false);
743 
744 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
745 
746 	return ret;
747 }
748 
749 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
750 						uint8_t vdev_id,
751 						uint8_t *peer_mac,
752 						uint8_t *wds_macaddr,
753 						uint32_t flags)
754 {
755 	int status = -1;
756 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
757 	struct dp_ast_entry  *ast_entry = NULL;
758 	struct dp_peer *peer;
759 
760 	if (soc->ast_offload_support)
761 		return status;
762 
763 	peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
764 				      peer_mac, 0, vdev_id,
765 				      DP_MOD_ID_CDP);
766 
767 	if (!peer) {
768 		dp_peer_debug("Peer is NULL!");
769 		return status;
770 	}
771 
772 	qdf_spin_lock_bh(&soc->ast_lock);
773 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
774 						    peer->vdev->pdev->pdev_id);
775 
776 	if (ast_entry) {
777 		status = dp_peer_update_ast(soc,
778 					    peer,
779 					    ast_entry, flags);
780 	}
781 	qdf_spin_unlock_bh(&soc->ast_lock);
782 
783 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
784 
785 	return status;
786 }
787 
788 /*
789  * dp_peer_reset_ast_entries() - Deletes all HMWDS entries for a peer
790  * @soc_handle:		Datapath SOC handle
791  * @peer:		DP peer
792  * @arg:		callback argument
793  *
794  * Return: None
795  */
796 static void
797 dp_peer_reset_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
798 {
799 	struct dp_ast_entry *ast_entry = NULL;
800 	struct dp_ast_entry *tmp_ast_entry;
801 
802 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
803 		if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
804 		    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
805 			dp_peer_del_ast(soc, ast_entry);
806 	}
807 }
808 
809 /*
810  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
811  * @soc_handle:		Datapath SOC handle
812  * @wds_macaddr:	WDS entry MAC Address
813  * @peer_macaddr:	WDS entry MAC Address
814  * @vdev_id:		id of vdev handle
815  * Return: QDF_STATUS
816  */
817 static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
818 					 uint8_t *wds_macaddr,
819 					 uint8_t *peer_mac_addr,
820 					 uint8_t vdev_id)
821 {
822 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
823 	struct dp_ast_entry *ast_entry = NULL;
824 	struct dp_peer *peer;
825 	struct dp_pdev *pdev;
826 	struct dp_vdev *vdev;
827 
828 	if (soc->ast_offload_support)
829 		return QDF_STATUS_E_FAILURE;
830 
831 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
832 
833 	if (!vdev)
834 		return QDF_STATUS_E_FAILURE;
835 
836 	pdev = vdev->pdev;
837 
838 	if (peer_mac_addr) {
839 		peer = dp_peer_find_hash_find(soc, peer_mac_addr,
840 					      0, vdev->vdev_id,
841 					      DP_MOD_ID_CDP);
842 		if (!peer) {
843 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
844 			return QDF_STATUS_E_FAILURE;
845 		}
846 
847 		qdf_spin_lock_bh(&soc->ast_lock);
848 		dp_peer_reset_ast_entries(soc, peer, NULL);
849 		qdf_spin_unlock_bh(&soc->ast_lock);
850 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
851 	} else if (wds_macaddr) {
852 		qdf_spin_lock_bh(&soc->ast_lock);
853 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
854 							    pdev->pdev_id);
855 
856 		if (ast_entry) {
857 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
858 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
859 				dp_peer_del_ast(soc, ast_entry);
860 		}
861 		qdf_spin_unlock_bh(&soc->ast_lock);
862 	}
863 
864 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
865 	return QDF_STATUS_SUCCESS;
866 }
867 
868 /*
869  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
870  * @soc:		Datapath SOC handle
871  * @vdev_id:		id of vdev object
872  *
873  * Return: QDF_STATUS
874  */
875 static QDF_STATUS
876 dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
877 			     uint8_t vdev_id)
878 {
879 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
880 
881 	if (soc->ast_offload_support)
882 		return QDF_STATUS_SUCCESS;
883 
884 	qdf_spin_lock_bh(&soc->ast_lock);
885 
886 	dp_soc_iterate_peer(soc, dp_peer_reset_ast_entries, NULL,
887 			    DP_MOD_ID_CDP);
888 	qdf_spin_unlock_bh(&soc->ast_lock);
889 
890 	return QDF_STATUS_SUCCESS;
891 }
892 
893 /*
894  * dp_peer_flush_ast_entries() - Delete all wds and hmwds ast entries of a peer
895  * @soc:		Datapath SOC
896  * @peer:		Datapath peer
897  * @arg:		arg to callback
898  *
899  * Return: None
900  */
901 static void
902 dp_peer_flush_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
903 {
904 	struct dp_ast_entry *ase = NULL;
905 	struct dp_ast_entry *temp_ase;
906 
907 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
908 		if ((ase->type ==
909 			CDP_TXRX_AST_TYPE_STATIC) ||
910 			(ase->type ==
911 			 CDP_TXRX_AST_TYPE_SELF) ||
912 			(ase->type ==
913 			 CDP_TXRX_AST_TYPE_STA_BSS))
914 			continue;
915 		dp_peer_del_ast(soc, ase);
916 	}
917 }
918 
919 /*
920  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
921  * @soc:		Datapath SOC handle
922  *
923  * Return: None
924  */
925 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
926 {
927 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
928 
929 	qdf_spin_lock_bh(&soc->ast_lock);
930 
931 	dp_soc_iterate_peer(soc, dp_peer_flush_ast_entries, NULL,
932 			    DP_MOD_ID_CDP);
933 
934 	qdf_spin_unlock_bh(&soc->ast_lock);
935 	dp_peer_mec_flush_entries(soc);
936 }
937 
938 #if defined(IPA_WDS_EASYMESH_FEATURE) && defined(FEATURE_AST)
939 /*
940  * dp_peer_send_wds_disconnect() - Send Disconnect event to IPA for each peer
941  * @soc: Datapath SOC
942  * @peer: Datapath peer
943  *
944  * Return: None
945  */
946 static void
947 dp_peer_send_wds_disconnect(struct dp_soc *soc, struct dp_peer *peer)
948 {
949 	struct dp_ast_entry *ase = NULL;
950 	struct dp_ast_entry *temp_ase;
951 
952 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
953 		if (ase->type == CDP_TXRX_AST_TYPE_WDS) {
954 			soc->cdp_soc.ol_ops->peer_send_wds_disconnect(soc->ctrl_psoc,
955 								      ase->mac_addr.raw,
956 								      ase->vdev_id);
957 		}
958 	}
959 }
960 #elif defined(FEATURE_AST)
961 static void
962 dp_peer_send_wds_disconnect(struct dp_soc *soc, struct dp_peer *peer)
963 {
964 }
965 #endif
966 
967 /**
968  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
969  *                                       and return ast entry information
970  *                                       of first ast entry found in the
971  *                                       table with given mac address
972  *
973  * @soc : data path soc handle
974  * @ast_mac_addr : AST entry mac address
975  * @ast_entry_info : ast entry information
976  *
977  * return : true if ast entry found with ast_mac_addr
978  *          false if ast entry not found
979  */
980 static bool dp_peer_get_ast_info_by_soc_wifi3
981 	(struct cdp_soc_t *soc_hdl,
982 	 uint8_t *ast_mac_addr,
983 	 struct cdp_ast_entry_info *ast_entry_info)
984 {
985 	struct dp_ast_entry *ast_entry = NULL;
986 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
987 	struct dp_peer *peer = NULL;
988 
989 	if (soc->ast_offload_support)
990 		return false;
991 
992 	qdf_spin_lock_bh(&soc->ast_lock);
993 
994 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
995 	if ((!ast_entry) ||
996 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
997 		qdf_spin_unlock_bh(&soc->ast_lock);
998 		return false;
999 	}
1000 
1001 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1002 				     DP_MOD_ID_AST);
1003 	if (!peer) {
1004 		qdf_spin_unlock_bh(&soc->ast_lock);
1005 		return false;
1006 	}
1007 
1008 	ast_entry_info->type = ast_entry->type;
1009 	ast_entry_info->pdev_id = ast_entry->pdev_id;
1010 	ast_entry_info->vdev_id = ast_entry->vdev_id;
1011 	ast_entry_info->peer_id = ast_entry->peer_id;
1012 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
1013 		     &peer->mac_addr.raw[0],
1014 		     QDF_MAC_ADDR_SIZE);
1015 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1016 	qdf_spin_unlock_bh(&soc->ast_lock);
1017 	return true;
1018 }
1019 
1020 /**
1021  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
1022  *                                          and return ast entry information
1023  *                                          if mac address and pdev_id matches
1024  *
1025  * @soc : data path soc handle
1026  * @ast_mac_addr : AST entry mac address
1027  * @pdev_id : pdev_id
1028  * @ast_entry_info : ast entry information
1029  *
1030  * return : true if ast entry found with ast_mac_addr
1031  *          false if ast entry not found
1032  */
1033 static bool dp_peer_get_ast_info_by_pdevid_wifi3
1034 		(struct cdp_soc_t *soc_hdl,
1035 		 uint8_t *ast_mac_addr,
1036 		 uint8_t pdev_id,
1037 		 struct cdp_ast_entry_info *ast_entry_info)
1038 {
1039 	struct dp_ast_entry *ast_entry;
1040 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
1041 	struct dp_peer *peer = NULL;
1042 
1043 	if (soc->ast_offload_support)
1044 		return false;
1045 
1046 	qdf_spin_lock_bh(&soc->ast_lock);
1047 
1048 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr,
1049 						    pdev_id);
1050 
1051 	if ((!ast_entry) ||
1052 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
1053 		qdf_spin_unlock_bh(&soc->ast_lock);
1054 		return false;
1055 	}
1056 
1057 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1058 				     DP_MOD_ID_AST);
1059 	if (!peer) {
1060 		qdf_spin_unlock_bh(&soc->ast_lock);
1061 		return false;
1062 	}
1063 
1064 	ast_entry_info->type = ast_entry->type;
1065 	ast_entry_info->pdev_id = ast_entry->pdev_id;
1066 	ast_entry_info->vdev_id = ast_entry->vdev_id;
1067 	ast_entry_info->peer_id = ast_entry->peer_id;
1068 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
1069 		     &peer->mac_addr.raw[0],
1070 		     QDF_MAC_ADDR_SIZE);
1071 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1072 	qdf_spin_unlock_bh(&soc->ast_lock);
1073 	return true;
1074 }
1075 
1076 /**
1077  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
1078  *                            with given mac address
1079  *
1080  * @soc : data path soc handle
1081  * @ast_mac_addr : AST entry mac address
1082  * @callback : callback function to called on ast delete response from FW
1083  * @cookie : argument to be passed to callback
1084  *
1085  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
1086  *          is sent
1087  *          QDF_STATUS_E_INVAL false if ast entry not found
1088  */
1089 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
1090 					       uint8_t *mac_addr,
1091 					       txrx_ast_free_cb callback,
1092 					       void *cookie)
1093 
1094 {
1095 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1096 	struct dp_ast_entry *ast_entry = NULL;
1097 	txrx_ast_free_cb cb = NULL;
1098 	void *arg = NULL;
1099 
1100 	if (soc->ast_offload_support)
1101 		return -QDF_STATUS_E_INVAL;
1102 
1103 	qdf_spin_lock_bh(&soc->ast_lock);
1104 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1105 	if (!ast_entry) {
1106 		qdf_spin_unlock_bh(&soc->ast_lock);
1107 		return -QDF_STATUS_E_INVAL;
1108 	}
1109 
1110 	if (ast_entry->callback) {
1111 		cb = ast_entry->callback;
1112 		arg = ast_entry->cookie;
1113 	}
1114 
1115 	ast_entry->callback = callback;
1116 	ast_entry->cookie = cookie;
1117 
1118 	/*
1119 	 * if delete_in_progress is set AST delete is sent to target
1120 	 * and host is waiting for response should not send delete
1121 	 * again
1122 	 */
1123 	if (!ast_entry->delete_in_progress)
1124 		dp_peer_del_ast(soc, ast_entry);
1125 
1126 	qdf_spin_unlock_bh(&soc->ast_lock);
1127 	if (cb) {
1128 		cb(soc->ctrl_psoc,
1129 		   dp_soc_to_cdp_soc(soc),
1130 		   arg,
1131 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1132 	}
1133 	return QDF_STATUS_SUCCESS;
1134 }
1135 
1136 /**
1137  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
1138  *                                   table if mac address and pdev_id matches
1139  *
1140  * @soc : data path soc handle
1141  * @ast_mac_addr : AST entry mac address
1142  * @pdev_id : pdev id
1143  * @callback : callback function to called on ast delete response from FW
1144  * @cookie : argument to be passed to callback
1145  *
1146  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
1147  *          is sent
1148  *          QDF_STATUS_E_INVAL false if ast entry not found
1149  */
1150 
1151 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
1152 						uint8_t *mac_addr,
1153 						uint8_t pdev_id,
1154 						txrx_ast_free_cb callback,
1155 						void *cookie)
1156 
1157 {
1158 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1159 	struct dp_ast_entry *ast_entry;
1160 	txrx_ast_free_cb cb = NULL;
1161 	void *arg = NULL;
1162 
1163 	if (soc->ast_offload_support)
1164 		return -QDF_STATUS_E_INVAL;
1165 
1166 	qdf_spin_lock_bh(&soc->ast_lock);
1167 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
1168 
1169 	if (!ast_entry) {
1170 		qdf_spin_unlock_bh(&soc->ast_lock);
1171 		return -QDF_STATUS_E_INVAL;
1172 	}
1173 
1174 	if (ast_entry->callback) {
1175 		cb = ast_entry->callback;
1176 		arg = ast_entry->cookie;
1177 	}
1178 
1179 	ast_entry->callback = callback;
1180 	ast_entry->cookie = cookie;
1181 
1182 	/*
1183 	 * if delete_in_progress is set AST delete is sent to target
1184 	 * and host is waiting for response should not sent delete
1185 	 * again
1186 	 */
1187 	if (!ast_entry->delete_in_progress)
1188 		dp_peer_del_ast(soc, ast_entry);
1189 
1190 	qdf_spin_unlock_bh(&soc->ast_lock);
1191 
1192 	if (cb) {
1193 		cb(soc->ctrl_psoc,
1194 		   dp_soc_to_cdp_soc(soc),
1195 		   arg,
1196 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1197 	}
1198 	return QDF_STATUS_SUCCESS;
1199 }
1200 
1201 /**
1202  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
1203  * @ring_num: ring num of the ring being queried
1204  * @grp_mask: the grp_mask array for the ring type in question.
1205  *
1206  * The grp_mask array is indexed by group number and the bit fields correspond
1207  * to ring numbers.  We are finding which interrupt group a ring belongs to.
1208  *
1209  * Return: the index in the grp_mask array with the ring number.
1210  * -QDF_STATUS_E_NOENT if no entry is found
1211  */
1212 static int dp_srng_find_ring_in_mask(int ring_num, uint8_t *grp_mask)
1213 {
1214 	int ext_group_num;
1215 	uint8_t mask = 1 << ring_num;
1216 
1217 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
1218 	     ext_group_num++) {
1219 		if (mask & grp_mask[ext_group_num])
1220 			return ext_group_num;
1221 	}
1222 
1223 	return -QDF_STATUS_E_NOENT;
1224 }
1225 
1226 /**
1227  * dp_is_msi_group_number_invalid() - check msi_group_number valid or not
1228  * @msi_group_number: MSI group number.
1229  * @msi_data_count: MSI data count.
1230  *
1231  * Return: true if msi_group_number is invalid.
1232  */
1233 #ifdef WLAN_ONE_MSI_VECTOR
1234 static bool dp_is_msi_group_number_invalid(int msi_group_number,
1235 					   int msi_data_count)
1236 {
1237 	return false;
1238 }
1239 #else
1240 static bool dp_is_msi_group_number_invalid(int msi_group_number,
1241 					   int msi_data_count)
1242 {
1243 	return msi_group_number > msi_data_count;
1244 }
1245 #endif
1246 
1247 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1248 /**
1249  * dp_is_reo_ring_num_in_nf_grp1() - Check if the current reo ring is part of
1250  *				rx_near_full_grp1 mask
1251  * @soc: Datapath SoC Handle
1252  * @ring_num: REO ring number
1253  *
1254  * Return: 1 if the ring_num belongs to reo_nf_grp1,
1255  *	   0, otherwise.
1256  */
1257 static inline int
1258 dp_is_reo_ring_num_in_nf_grp1(struct dp_soc *soc, int ring_num)
1259 {
1260 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_1 & (1 << ring_num));
1261 }
1262 
1263 /**
1264  * dp_is_reo_ring_num_in_nf_grp2() - Check if the current reo ring is part of
1265  *				rx_near_full_grp2 mask
1266  * @soc: Datapath SoC Handle
1267  * @ring_num: REO ring number
1268  *
1269  * Return: 1 if the ring_num belongs to reo_nf_grp2,
1270  *	   0, otherwise.
1271  */
1272 static inline int
1273 dp_is_reo_ring_num_in_nf_grp2(struct dp_soc *soc, int ring_num)
1274 {
1275 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_2 & (1 << ring_num));
1276 }
1277 
1278 /**
1279  * dp_srng_get_near_full_irq_mask() - Get near-full irq mask for a particular
1280  *				ring type and number
1281  * @soc: Datapath SoC handle
1282  * @ring_type: SRNG type
1283  * @ring_num: ring num
1284  *
1285  * Return: near ful irq mask pointer
1286  */
1287 static inline
1288 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
1289 					enum hal_ring_type ring_type,
1290 					int ring_num)
1291 {
1292 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
1293 	uint8_t wbm2_sw_rx_rel_ring_id;
1294 	uint8_t *nf_irq_mask = NULL;
1295 
1296 	switch (ring_type) {
1297 	case WBM2SW_RELEASE:
1298 		wbm2_sw_rx_rel_ring_id =
1299 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
1300 		if (ring_num != wbm2_sw_rx_rel_ring_id) {
1301 			nf_irq_mask = &soc->wlan_cfg_ctx->
1302 					int_tx_ring_near_full_irq_mask[0];
1303 		}
1304 		break;
1305 	case REO_DST:
1306 		if (dp_is_reo_ring_num_in_nf_grp1(soc, ring_num))
1307 			nf_irq_mask =
1308 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_1_mask[0];
1309 		else if (dp_is_reo_ring_num_in_nf_grp2(soc, ring_num))
1310 			nf_irq_mask =
1311 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_2_mask[0];
1312 		else
1313 			qdf_assert(0);
1314 		break;
1315 	default:
1316 		break;
1317 	}
1318 
1319 	return nf_irq_mask;
1320 }
1321 
1322 /**
1323  * dp_srng_set_msi2_ring_params() - Set the msi2 addr/data in the ring params
1324  * @soc: Datapath SoC handle
1325  * @ring_params: srng params handle
1326  * @msi2_addr: MSI2 addr to be set for the SRNG
1327  * @msi2_data: MSI2 data to be set for the SRNG
1328  *
1329  * Return: None
1330  */
1331 static inline
1332 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
1333 				  struct hal_srng_params *ring_params,
1334 				  qdf_dma_addr_t msi2_addr,
1335 				  uint32_t msi2_data)
1336 {
1337 	ring_params->msi2_addr = msi2_addr;
1338 	ring_params->msi2_data = msi2_data;
1339 }
1340 
1341 /**
1342  * dp_srng_msi2_setup() - Setup MSI2 details for near full IRQ of an SRNG
1343  * @soc: Datapath SoC handle
1344  * @ring_params: ring_params for SRNG
1345  * @ring_type: SENG type
1346  * @ring_num: ring number for the SRNG
1347  * @nf_msi_grp_num: near full msi group number
1348  *
1349  * Return: None
1350  */
1351 static inline void
1352 dp_srng_msi2_setup(struct dp_soc *soc,
1353 		   struct hal_srng_params *ring_params,
1354 		   int ring_type, int ring_num, int nf_msi_grp_num)
1355 {
1356 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1357 	int msi_data_count, ret;
1358 
1359 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1360 					  &msi_data_count, &msi_data_start,
1361 					  &msi_irq_start);
1362 	if (ret)
1363 		return;
1364 
1365 	if (nf_msi_grp_num < 0) {
1366 		dp_init_info("%pK: ring near full IRQ not part of an ext_group; ring_type: %d,ring_num %d",
1367 			     soc, ring_type, ring_num);
1368 		ring_params->msi2_addr = 0;
1369 		ring_params->msi2_data = 0;
1370 		return;
1371 	}
1372 
1373 	if (dp_is_msi_group_number_invalid(nf_msi_grp_num, msi_data_count)) {
1374 		dp_init_warn("%pK: 2 msi_groups will share an msi for near full IRQ; msi_group_num %d",
1375 			     soc, nf_msi_grp_num);
1376 		QDF_ASSERT(0);
1377 	}
1378 
1379 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1380 
1381 	ring_params->nf_irq_support = 1;
1382 	ring_params->msi2_addr = addr_low;
1383 	ring_params->msi2_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1384 	ring_params->msi2_data = (nf_msi_grp_num % msi_data_count)
1385 		+ msi_data_start;
1386 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1387 }
1388 
1389 /* Percentage of ring entries considered as nearly full */
1390 #define DP_NF_HIGH_THRESH_PERCENTAGE	75
1391 /* Percentage of ring entries considered as critically full */
1392 #define DP_NF_CRIT_THRESH_PERCENTAGE	90
1393 /* Percentage of ring entries considered as safe threshold */
1394 #define DP_NF_SAFE_THRESH_PERCENTAGE	50
1395 
1396 /**
1397  * dp_srng_configure_nf_interrupt_thresholds() - Configure the thresholds for
1398  *			near full irq
1399  * @soc: Datapath SoC handle
1400  * @ring_params: ring params for SRNG
1401  * @ring_type: ring type
1402  */
1403 static inline void
1404 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
1405 					  struct hal_srng_params *ring_params,
1406 					  int ring_type)
1407 {
1408 	if (ring_params->nf_irq_support) {
1409 		ring_params->high_thresh = (ring_params->num_entries *
1410 					    DP_NF_HIGH_THRESH_PERCENTAGE) / 100;
1411 		ring_params->crit_thresh = (ring_params->num_entries *
1412 					    DP_NF_CRIT_THRESH_PERCENTAGE) / 100;
1413 		ring_params->safe_thresh = (ring_params->num_entries *
1414 					    DP_NF_SAFE_THRESH_PERCENTAGE) /100;
1415 	}
1416 }
1417 
1418 /**
1419  * dp_srng_set_nf_thresholds() - Set the near full thresholds to srng data
1420  *			structure from the ring params
1421  * @soc: Datapath SoC handle
1422  * @srng: SRNG handle
1423  * @ring_params: ring params for a SRNG
1424  *
1425  * Return: None
1426  */
1427 static inline void
1428 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
1429 			  struct hal_srng_params *ring_params)
1430 {
1431 	srng->crit_thresh = ring_params->crit_thresh;
1432 	srng->safe_thresh = ring_params->safe_thresh;
1433 }
1434 
1435 #else
1436 static inline
1437 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
1438 					enum hal_ring_type ring_type,
1439 					int ring_num)
1440 {
1441 	return NULL;
1442 }
1443 
1444 static inline
1445 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
1446 				  struct hal_srng_params *ring_params,
1447 				  qdf_dma_addr_t msi2_addr,
1448 				  uint32_t msi2_data)
1449 {
1450 }
1451 
1452 static inline void
1453 dp_srng_msi2_setup(struct dp_soc *soc,
1454 		   struct hal_srng_params *ring_params,
1455 		   int ring_type, int ring_num, int nf_msi_grp_num)
1456 {
1457 }
1458 
1459 static inline void
1460 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
1461 					  struct hal_srng_params *ring_params,
1462 					  int ring_type)
1463 {
1464 }
1465 
1466 static inline void
1467 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
1468 			  struct hal_srng_params *ring_params)
1469 {
1470 }
1471 #endif
1472 
1473 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
1474 				       enum hal_ring_type ring_type,
1475 				       int ring_num,
1476 				       int *reg_msi_grp_num,
1477 				       bool nf_irq_support,
1478 				       int *nf_msi_grp_num)
1479 {
1480 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
1481 	uint8_t *grp_mask, *nf_irq_mask = NULL;
1482 	bool nf_irq_enabled = false;
1483 	uint8_t wbm2_sw_rx_rel_ring_id;
1484 
1485 	switch (ring_type) {
1486 	case WBM2SW_RELEASE:
1487 		wbm2_sw_rx_rel_ring_id =
1488 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
1489 		if (ring_num == wbm2_sw_rx_rel_ring_id) {
1490 			/* dp_rx_wbm_err_process - soc->rx_rel_ring */
1491 			grp_mask = &cfg_ctx->int_rx_wbm_rel_ring_mask[0];
1492 			ring_num = 0;
1493 		} else { /* dp_tx_comp_handler - soc->tx_comp_ring */
1494 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1495 			nf_irq_mask = dp_srng_get_near_full_irq_mask(soc,
1496 								     ring_type,
1497 								     ring_num);
1498 			if (nf_irq_mask)
1499 				nf_irq_enabled = true;
1500 
1501 			/*
1502 			 * Using ring 4 as 4th tx completion ring since ring 3
1503 			 * is Rx error ring
1504 			 */
1505 			if (ring_num == WBM2SW_TXCOMP_RING4_NUM)
1506 				ring_num = TXCOMP_RING4_NUM;
1507 		}
1508 	break;
1509 
1510 	case REO_EXCEPTION:
1511 		/* dp_rx_err_process - &soc->reo_exception_ring */
1512 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
1513 	break;
1514 
1515 	case REO_DST:
1516 		/* dp_rx_process - soc->reo_dest_ring */
1517 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1518 		nf_irq_mask = dp_srng_get_near_full_irq_mask(soc, ring_type,
1519 							     ring_num);
1520 		if (nf_irq_mask)
1521 			nf_irq_enabled = true;
1522 	break;
1523 
1524 	case REO_STATUS:
1525 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
1526 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
1527 	break;
1528 
1529 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
1530 	case RXDMA_MONITOR_STATUS:
1531 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
1532 	case RXDMA_MONITOR_DST:
1533 		/* dp_mon_process */
1534 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
1535 	break;
1536 	case TX_MONITOR_DST:
1537 		/* dp_tx_mon_process */
1538 		grp_mask = &soc->wlan_cfg_ctx->int_tx_mon_ring_mask[0];
1539 	break;
1540 	case RXDMA_DST:
1541 		/* dp_rxdma_err_process */
1542 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
1543 	break;
1544 
1545 	case RXDMA_BUF:
1546 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1547 	break;
1548 
1549 	case RXDMA_MONITOR_BUF:
1550 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
1551 	break;
1552 
1553 	case TX_MONITOR_BUF:
1554 		grp_mask = &soc->wlan_cfg_ctx->int_host2txmon_ring_mask[0];
1555 	break;
1556 
1557 	case TCL_DATA:
1558 	/* CMD_CREDIT_RING is used as command in 8074 and credit in 9000 */
1559 	case TCL_CMD_CREDIT:
1560 	case REO_CMD:
1561 	case SW2WBM_RELEASE:
1562 	case WBM_IDLE_LINK:
1563 		/* normally empty SW_TO_HW rings */
1564 		return -QDF_STATUS_E_NOENT;
1565 	break;
1566 
1567 	case TCL_STATUS:
1568 	case REO_REINJECT:
1569 		/* misc unused rings */
1570 		return -QDF_STATUS_E_NOENT;
1571 	break;
1572 
1573 	case CE_SRC:
1574 	case CE_DST:
1575 	case CE_DST_STATUS:
1576 		/* CE_rings - currently handled by hif */
1577 	default:
1578 		return -QDF_STATUS_E_NOENT;
1579 	break;
1580 	}
1581 
1582 	*reg_msi_grp_num = dp_srng_find_ring_in_mask(ring_num, grp_mask);
1583 
1584 	if (nf_irq_support && nf_irq_enabled) {
1585 		*nf_msi_grp_num = dp_srng_find_ring_in_mask(ring_num,
1586 							    nf_irq_mask);
1587 	}
1588 
1589 	return QDF_STATUS_SUCCESS;
1590 }
1591 
1592 /*
1593  * dp_get_num_msi_available()- API to get number of MSIs available
1594  * @dp_soc: DP soc Handle
1595  * @interrupt_mode: Mode of interrupts
1596  *
1597  * Return: Number of MSIs available or 0 in case of integrated
1598  */
1599 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
1600 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1601 {
1602 	return 0;
1603 }
1604 #else
1605 /*
1606  * dp_get_num_msi_available()- API to get number of MSIs available
1607  * @dp_soc: DP soc Handle
1608  * @interrupt_mode: Mode of interrupts
1609  *
1610  * Return: Number of MSIs available or 0 in case of integrated
1611  */
1612 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1613 {
1614 	int msi_data_count;
1615 	int msi_data_start;
1616 	int msi_irq_start;
1617 	int ret;
1618 
1619 	if (interrupt_mode == DP_INTR_INTEGRATED) {
1620 		return 0;
1621 	} else if (interrupt_mode == DP_INTR_MSI || interrupt_mode ==
1622 		   DP_INTR_POLL) {
1623 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1624 						  &msi_data_count,
1625 						  &msi_data_start,
1626 						  &msi_irq_start);
1627 		if (ret) {
1628 			qdf_err("Unable to get DP MSI assignment %d",
1629 				interrupt_mode);
1630 			return -EINVAL;
1631 		}
1632 		return msi_data_count;
1633 	}
1634 	qdf_err("Interrupt mode invalid %d", interrupt_mode);
1635 	return -EINVAL;
1636 }
1637 #endif
1638 
1639 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
1640 			      *ring_params, int ring_type, int ring_num)
1641 {
1642 	int reg_msi_grp_num;
1643 	/*
1644 	 * nf_msi_grp_num needs to be initialized with negative value,
1645 	 * to avoid configuring near-full msi for WBM2SW3 ring
1646 	 */
1647 	int nf_msi_grp_num = -1;
1648 	int msi_data_count;
1649 	int ret;
1650 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1651 	bool nf_irq_support;
1652 
1653 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1654 					    &msi_data_count, &msi_data_start,
1655 					    &msi_irq_start);
1656 
1657 	if (ret)
1658 		return;
1659 
1660 	nf_irq_support = hal_srng_is_near_full_irq_supported(soc->hal_soc,
1661 							     ring_type,
1662 							     ring_num);
1663 	ret = dp_srng_calculate_msi_group(soc, ring_type, ring_num,
1664 					  &reg_msi_grp_num,
1665 					  nf_irq_support,
1666 					  &nf_msi_grp_num);
1667 	if (ret < 0) {
1668 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1669 			     soc, ring_type, ring_num);
1670 		ring_params->msi_addr = 0;
1671 		ring_params->msi_data = 0;
1672 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
1673 		return;
1674 	}
1675 
1676 	if (reg_msi_grp_num < 0) {
1677 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1678 			     soc, ring_type, ring_num);
1679 		ring_params->msi_addr = 0;
1680 		ring_params->msi_data = 0;
1681 		goto configure_msi2;
1682 	}
1683 
1684 	if (dp_is_msi_group_number_invalid(reg_msi_grp_num, msi_data_count)) {
1685 		dp_init_warn("%pK: 2 msi_groups will share an msi; msi_group_num %d",
1686 			     soc, reg_msi_grp_num);
1687 		QDF_ASSERT(0);
1688 	}
1689 
1690 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1691 
1692 	ring_params->msi_addr = addr_low;
1693 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1694 	ring_params->msi_data = (reg_msi_grp_num % msi_data_count)
1695 		+ msi_data_start;
1696 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1697 
1698 	dp_debug("ring type %u ring_num %u msi->data %u msi_addr %llx",
1699 		 ring_type, ring_num, ring_params->msi_data,
1700 		 (uint64_t)ring_params->msi_addr);
1701 
1702 configure_msi2:
1703 	if (!nf_irq_support) {
1704 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
1705 		return;
1706 	}
1707 
1708 	dp_srng_msi2_setup(soc, ring_params, ring_type, ring_num,
1709 			   nf_msi_grp_num);
1710 }
1711 
1712 #ifdef FEATURE_AST
1713 /**
1714  * dp_print_mlo_ast_stats() - Print AST stats for MLO peers
1715  *
1716  * @soc : core DP soc context
1717  *
1718  * Return: void
1719  */
1720 void dp_print_mlo_ast_stats(struct dp_soc *soc)
1721 {
1722 	if (soc->arch_ops.print_mlo_ast_stats)
1723 		soc->arch_ops.print_mlo_ast_stats(soc);
1724 }
1725 
1726 /**
1727  * dp_print_peer_ast_entries() - Dump AST entries of peer
1728  * @soc: Datapath soc handle
1729  * @peer: Datapath peer
1730  * @arg: argument to iterate function
1731  *
1732  * return void
1733  */
1734 void
1735 dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1736 {
1737 	struct dp_ast_entry *ase, *tmp_ase;
1738 	uint32_t num_entries = 0;
1739 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1740 			"NONE", "STATIC", "SELF", "WDS", "HMWDS", "BSS",
1741 			"DA", "HMWDS_SEC", "MLD"};
1742 
1743 	DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1744 	    DP_PRINT_STATS("%6d mac_addr = "QDF_MAC_ADDR_FMT
1745 		    " peer_mac_addr = "QDF_MAC_ADDR_FMT
1746 		    " peer_id = %u"
1747 		    " type = %s"
1748 		    " next_hop = %d"
1749 		    " is_active = %d"
1750 		    " ast_idx = %d"
1751 		    " ast_hash = %d"
1752 		    " delete_in_progress = %d"
1753 		    " pdev_id = %d"
1754 		    " vdev_id = %d",
1755 		    ++num_entries,
1756 		    QDF_MAC_ADDR_REF(ase->mac_addr.raw),
1757 		    QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1758 		    ase->peer_id,
1759 		    type[ase->type],
1760 		    ase->next_hop,
1761 		    ase->is_active,
1762 		    ase->ast_idx,
1763 		    ase->ast_hash_value,
1764 		    ase->delete_in_progress,
1765 		    ase->pdev_id,
1766 		    ase->vdev_id);
1767 	}
1768 }
1769 
1770 /**
1771  * dp_print_ast_stats() - Dump AST table contents
1772  * @soc: Datapath soc handle
1773  *
1774  * return void
1775  */
1776 void dp_print_ast_stats(struct dp_soc *soc)
1777 {
1778 	DP_PRINT_STATS("AST Stats:");
1779 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1780 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1781 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1782 	DP_PRINT_STATS("	Entries MAP ERR  = %d", soc->stats.ast.map_err);
1783 	DP_PRINT_STATS("	Entries Mismatch ERR  = %d",
1784 		       soc->stats.ast.ast_mismatch);
1785 
1786 	DP_PRINT_STATS("AST Table:");
1787 
1788 	qdf_spin_lock_bh(&soc->ast_lock);
1789 
1790 	dp_soc_iterate_peer(soc, dp_print_peer_ast_entries, NULL,
1791 			    DP_MOD_ID_GENERIC_STATS);
1792 
1793 	qdf_spin_unlock_bh(&soc->ast_lock);
1794 
1795 	dp_print_mlo_ast_stats(soc);
1796 }
1797 #else
1798 void dp_print_ast_stats(struct dp_soc *soc)
1799 {
1800 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1801 	return;
1802 }
1803 #endif
1804 
1805 /**
1806  * dp_print_peer_info() - Dump peer info
1807  * @soc: Datapath soc handle
1808  * @peer: Datapath peer handle
1809  * @arg: argument to iter function
1810  *
1811  * return void
1812  */
1813 static void
1814 dp_print_peer_info(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1815 {
1816 	struct dp_txrx_peer *txrx_peer = NULL;
1817 
1818 	txrx_peer = dp_get_txrx_peer(peer);
1819 	if (!txrx_peer)
1820 		return;
1821 
1822 	DP_PRINT_STATS(" peer id = %d"
1823 		       " peer_mac_addr = "QDF_MAC_ADDR_FMT
1824 		       " nawds_enabled = %d"
1825 		       " bss_peer = %d"
1826 		       " wds_enabled = %d"
1827 		       " tx_cap_enabled = %d"
1828 		       " rx_cap_enabled = %d",
1829 		       peer->peer_id,
1830 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1831 		       txrx_peer->nawds_enabled,
1832 		       txrx_peer->bss_peer,
1833 		       txrx_peer->wds_enabled,
1834 		       dp_monitor_is_tx_cap_enabled(peer),
1835 		       dp_monitor_is_rx_cap_enabled(peer));
1836 }
1837 
1838 /**
1839  * dp_print_peer_table() - Dump all Peer stats
1840  * @vdev: Datapath Vdev handle
1841  *
1842  * return void
1843  */
1844 static void dp_print_peer_table(struct dp_vdev *vdev)
1845 {
1846 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1847 	dp_vdev_iterate_peer(vdev, dp_print_peer_info, NULL,
1848 			     DP_MOD_ID_GENERIC_STATS);
1849 }
1850 
1851 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
1852 /**
1853  * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt
1854  * threshold values from the wlan_srng_cfg table for each ring type
1855  * @soc: device handle
1856  * @ring_params: per ring specific parameters
1857  * @ring_type: Ring type
1858  * @ring_num: Ring number for a given ring type
1859  *
1860  * Fill the ring params with the interrupt threshold
1861  * configuration parameters available in the per ring type wlan_srng_cfg
1862  * table.
1863  *
1864  * Return: None
1865  */
1866 static void
1867 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1868 				       struct hal_srng_params *ring_params,
1869 				       int ring_type, int ring_num,
1870 				       int num_entries)
1871 {
1872 	uint8_t wbm2_sw_rx_rel_ring_id;
1873 
1874 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
1875 
1876 	if (ring_type == REO_DST) {
1877 		ring_params->intr_timer_thres_us =
1878 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1879 		ring_params->intr_batch_cntr_thres_entries =
1880 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1881 	} else if (ring_type == WBM2SW_RELEASE &&
1882 		   (ring_num == wbm2_sw_rx_rel_ring_id)) {
1883 		ring_params->intr_timer_thres_us =
1884 				wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1885 		ring_params->intr_batch_cntr_thres_entries =
1886 				wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1887 	} else {
1888 		ring_params->intr_timer_thres_us =
1889 				soc->wlan_srng_cfg[ring_type].timer_threshold;
1890 		ring_params->intr_batch_cntr_thres_entries =
1891 				soc->wlan_srng_cfg[ring_type].batch_count_threshold;
1892 	}
1893 	ring_params->low_threshold =
1894 			soc->wlan_srng_cfg[ring_type].low_threshold;
1895 	if (ring_params->low_threshold)
1896 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1897 
1898 	dp_srng_configure_nf_interrupt_thresholds(soc, ring_params, ring_type);
1899 }
1900 #else
1901 static void
1902 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1903 				       struct hal_srng_params *ring_params,
1904 				       int ring_type, int ring_num,
1905 				       int num_entries)
1906 {
1907 	uint8_t wbm2_sw_rx_rel_ring_id;
1908 
1909 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
1910 
1911 	if (ring_type == REO_DST) {
1912 		ring_params->intr_timer_thres_us =
1913 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1914 		ring_params->intr_batch_cntr_thres_entries =
1915 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1916 	} else if (ring_type == WBM2SW_RELEASE &&
1917 		   (ring_num < wbm2_sw_rx_rel_ring_id ||
1918 		   ring_num == WBM2SW_TXCOMP_RING4_NUM)) {
1919 		ring_params->intr_timer_thres_us =
1920 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1921 		ring_params->intr_batch_cntr_thres_entries =
1922 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1923 	} else {
1924 		ring_params->intr_timer_thres_us =
1925 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1926 		ring_params->intr_batch_cntr_thres_entries =
1927 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1928 	}
1929 
1930 	/* These rings donot require interrupt to host. Make them zero */
1931 	switch (ring_type) {
1932 	case REO_REINJECT:
1933 	case REO_CMD:
1934 	case TCL_DATA:
1935 	case TCL_CMD_CREDIT:
1936 	case TCL_STATUS:
1937 	case WBM_IDLE_LINK:
1938 	case SW2WBM_RELEASE:
1939 	case PPE2TCL:
1940 	case SW2RXDMA_NEW:
1941 		ring_params->intr_timer_thres_us = 0;
1942 		ring_params->intr_batch_cntr_thres_entries = 0;
1943 		break;
1944 	}
1945 
1946 	/* Enable low threshold interrupts for rx buffer rings (regular and
1947 	 * monitor buffer rings.
1948 	 * TODO: See if this is required for any other ring
1949 	 */
1950 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1951 	    (ring_type == RXDMA_MONITOR_STATUS ||
1952 	    (ring_type == TX_MONITOR_BUF))) {
1953 		/* TODO: Setting low threshold to 1/8th of ring size
1954 		 * see if this needs to be configurable
1955 		 */
1956 		ring_params->low_threshold = num_entries >> 3;
1957 		ring_params->intr_timer_thres_us =
1958 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1959 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1960 		ring_params->intr_batch_cntr_thres_entries = 0;
1961 	}
1962 
1963 	/* During initialisation monitor rings are only filled with
1964 	 * MON_BUF_MIN_ENTRIES entries. So low threshold needs to be set to
1965 	 * a value less than that. Low threshold value is reconfigured again
1966 	 * to 1/8th of the ring size when monitor vap is created.
1967 	 */
1968 	if (ring_type == RXDMA_MONITOR_BUF)
1969 		ring_params->low_threshold = MON_BUF_MIN_ENTRIES >> 1;
1970 
1971 	/* In case of PCI chipsets, we dont have PPDU end interrupts,
1972 	 * so MONITOR STATUS ring is reaped by receiving MSI from srng.
1973 	 * Keep batch threshold as 8 so that interrupt is received for
1974 	 * every 4 packets in MONITOR_STATUS ring
1975 	 */
1976 	if ((ring_type == RXDMA_MONITOR_STATUS) &&
1977 	    (soc->intr_mode == DP_INTR_MSI))
1978 		ring_params->intr_batch_cntr_thres_entries = 4;
1979 }
1980 #endif
1981 
1982 #ifdef DP_MEM_PRE_ALLOC
1983 
1984 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
1985 			   size_t ctxt_size)
1986 {
1987 	void *ctxt_mem;
1988 
1989 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_context) {
1990 		dp_warn("dp_prealloc_get_context null!");
1991 		goto dynamic_alloc;
1992 	}
1993 
1994 	ctxt_mem = soc->cdp_soc.ol_ops->dp_prealloc_get_context(ctxt_type,
1995 								ctxt_size);
1996 
1997 	if (ctxt_mem)
1998 		goto end;
1999 
2000 dynamic_alloc:
2001 	dp_info("Pre-alloc type %d, size %zu failed, need dynamic-alloc",
2002 		ctxt_type, ctxt_size);
2003 	ctxt_mem = qdf_mem_malloc(ctxt_size);
2004 end:
2005 	return ctxt_mem;
2006 }
2007 
2008 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
2009 			 void *vaddr)
2010 {
2011 	QDF_STATUS status;
2012 
2013 	if (soc->cdp_soc.ol_ops->dp_prealloc_put_context) {
2014 		status = soc->cdp_soc.ol_ops->dp_prealloc_put_context(
2015 								ctxt_type,
2016 								vaddr);
2017 	} else {
2018 		dp_warn("dp_prealloc_put_context null!");
2019 		status = QDF_STATUS_E_NOSUPPORT;
2020 	}
2021 
2022 	if (QDF_IS_STATUS_ERROR(status)) {
2023 		dp_info("Context type %d not pre-allocated", ctxt_type);
2024 		qdf_mem_free(vaddr);
2025 	}
2026 }
2027 
2028 static inline
2029 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
2030 					   struct dp_srng *srng,
2031 					   uint32_t ring_type)
2032 {
2033 	void *mem;
2034 
2035 	qdf_assert(!srng->is_mem_prealloc);
2036 
2037 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_consistent) {
2038 		dp_warn("dp_prealloc_get_consistent is null!");
2039 		goto qdf;
2040 	}
2041 
2042 	mem =
2043 		soc->cdp_soc.ol_ops->dp_prealloc_get_consistent
2044 						(&srng->alloc_size,
2045 						 &srng->base_vaddr_unaligned,
2046 						 &srng->base_paddr_unaligned,
2047 						 &srng->base_paddr_aligned,
2048 						 DP_RING_BASE_ALIGN, ring_type);
2049 
2050 	if (mem) {
2051 		srng->is_mem_prealloc = true;
2052 		goto end;
2053 	}
2054 qdf:
2055 	mem =  qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
2056 						&srng->base_vaddr_unaligned,
2057 						&srng->base_paddr_unaligned,
2058 						&srng->base_paddr_aligned,
2059 						DP_RING_BASE_ALIGN);
2060 end:
2061 	dp_info("%s memory %pK dp_srng %pK ring_type %d alloc_size %d num_entries %d",
2062 		srng->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", mem,
2063 		srng, ring_type, srng->alloc_size, srng->num_entries);
2064 	return mem;
2065 }
2066 
2067 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
2068 					       struct dp_srng *srng)
2069 {
2070 	if (srng->is_mem_prealloc) {
2071 		if (!soc->cdp_soc.ol_ops->dp_prealloc_put_consistent) {
2072 			dp_warn("dp_prealloc_put_consistent is null!");
2073 			QDF_BUG(0);
2074 			return;
2075 		}
2076 		soc->cdp_soc.ol_ops->dp_prealloc_put_consistent
2077 						(srng->alloc_size,
2078 						 srng->base_vaddr_unaligned,
2079 						 srng->base_paddr_unaligned);
2080 
2081 	} else {
2082 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2083 					srng->alloc_size,
2084 					srng->base_vaddr_unaligned,
2085 					srng->base_paddr_unaligned, 0);
2086 	}
2087 }
2088 
2089 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
2090 				   enum dp_desc_type desc_type,
2091 				   struct qdf_mem_multi_page_t *pages,
2092 				   size_t element_size,
2093 				   uint32_t element_num,
2094 				   qdf_dma_context_t memctxt,
2095 				   bool cacheable)
2096 {
2097 	if (!soc->cdp_soc.ol_ops->dp_get_multi_pages) {
2098 		dp_warn("dp_get_multi_pages is null!");
2099 		goto qdf;
2100 	}
2101 
2102 	pages->num_pages = 0;
2103 	pages->is_mem_prealloc = 0;
2104 	soc->cdp_soc.ol_ops->dp_get_multi_pages(desc_type,
2105 						element_size,
2106 						element_num,
2107 						pages,
2108 						cacheable);
2109 	if (pages->num_pages)
2110 		goto end;
2111 
2112 qdf:
2113 	qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size,
2114 				  element_num, memctxt, cacheable);
2115 end:
2116 	dp_info("%s desc_type %d element_size %d element_num %d cacheable %d",
2117 		pages->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc",
2118 		desc_type, (int)element_size, element_num, cacheable);
2119 }
2120 
2121 void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
2122 				  enum dp_desc_type desc_type,
2123 				  struct qdf_mem_multi_page_t *pages,
2124 				  qdf_dma_context_t memctxt,
2125 				  bool cacheable)
2126 {
2127 	if (pages->is_mem_prealloc) {
2128 		if (!soc->cdp_soc.ol_ops->dp_put_multi_pages) {
2129 			dp_warn("dp_put_multi_pages is null!");
2130 			QDF_BUG(0);
2131 			return;
2132 		}
2133 
2134 		soc->cdp_soc.ol_ops->dp_put_multi_pages(desc_type, pages);
2135 		qdf_mem_zero(pages, sizeof(*pages));
2136 	} else {
2137 		qdf_mem_multi_pages_free(soc->osdev, pages,
2138 					 memctxt, cacheable);
2139 	}
2140 }
2141 
2142 #else
2143 
2144 static inline
2145 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
2146 					   struct dp_srng *srng,
2147 					   uint32_t ring_type)
2148 
2149 {
2150 	void *mem;
2151 
2152 	mem = qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
2153 					       &srng->base_vaddr_unaligned,
2154 					       &srng->base_paddr_unaligned,
2155 					       &srng->base_paddr_aligned,
2156 					       DP_RING_BASE_ALIGN);
2157 	if (mem)
2158 		qdf_mem_set(srng->base_vaddr_unaligned, 0, srng->alloc_size);
2159 
2160 	return mem;
2161 }
2162 
2163 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
2164 					       struct dp_srng *srng)
2165 {
2166 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2167 				srng->alloc_size,
2168 				srng->base_vaddr_unaligned,
2169 				srng->base_paddr_unaligned, 0);
2170 }
2171 
2172 #endif /* DP_MEM_PRE_ALLOC */
2173 
2174 #ifdef QCA_SUPPORT_WDS_EXTENDED
2175 static bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev)
2176 {
2177 	return vdev->wds_ext_enabled;
2178 }
2179 #else
2180 static bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev)
2181 {
2182 	return false;
2183 }
2184 #endif
2185 
2186 void dp_pdev_update_fast_rx_flag(struct dp_soc *soc, struct dp_pdev *pdev)
2187 {
2188 	struct dp_vdev *vdev = NULL;
2189 	uint8_t rx_fast_flag = true;
2190 
2191 	if (wlan_cfg_is_rx_flow_tag_enabled(soc->wlan_cfg_ctx)) {
2192 		rx_fast_flag = false;
2193 		goto update_flag;
2194 	}
2195 
2196 	/* Check if protocol tagging enable */
2197 	if (pdev->is_rx_protocol_tagging_enabled) {
2198 		rx_fast_flag = false;
2199 		goto update_flag;
2200 	}
2201 
2202 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
2203 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2204 		/* Check if any VDEV has NAWDS enabled */
2205 		if (vdev->nawds_enabled) {
2206 			rx_fast_flag = false;
2207 			break;
2208 		}
2209 
2210 		/* Check if any VDEV has multipass enabled */
2211 		if (vdev->multipass_en) {
2212 			rx_fast_flag = false;
2213 			break;
2214 		}
2215 
2216 		/* Check if any VDEV has mesh enabled */
2217 		if (vdev->mesh_vdev) {
2218 			rx_fast_flag = false;
2219 			break;
2220 		}
2221 
2222 		/* Check if any VDEV has WDS ext enabled */
2223 		if (dp_vdev_is_wds_ext_enabled(vdev)) {
2224 			rx_fast_flag = false;
2225 			break;
2226 		}
2227 	}
2228 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
2229 
2230 update_flag:
2231 	dp_init_info("Updated Rx fast flag to %u", rx_fast_flag);
2232 	pdev->rx_fast_flag = rx_fast_flag;
2233 }
2234 
2235 /*
2236  * dp_srng_free() - Free SRNG memory
2237  * @soc  : Data path soc handle
2238  * @srng : SRNG pointer
2239  *
2240  * return: None
2241  */
2242 void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng)
2243 {
2244 	if (srng->alloc_size && srng->base_vaddr_unaligned) {
2245 		if (!srng->cached) {
2246 			dp_srng_mem_free_consistent(soc, srng);
2247 		} else {
2248 			qdf_mem_free(srng->base_vaddr_unaligned);
2249 		}
2250 		srng->alloc_size = 0;
2251 		srng->base_vaddr_unaligned = NULL;
2252 	}
2253 	srng->hal_srng = NULL;
2254 }
2255 
2256 qdf_export_symbol(dp_srng_free);
2257 
2258 #ifdef DISABLE_MON_RING_MSI_CFG
2259 /*
2260  * dp_skip_msi_cfg() - Check if msi cfg has to be skipped for ring_type
2261  * @ring_type: sring type
2262  *
2263  * Return: True if msi cfg should be skipped for srng type else false
2264  */
2265 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2266 {
2267 	if (ring_type == RXDMA_MONITOR_STATUS)
2268 		return true;
2269 
2270 	return false;
2271 }
2272 #else
2273 #ifdef DP_CON_MON_MSI_ENABLED
2274 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2275 {
2276 	if (soc->cdp_soc.ol_ops->get_con_mode &&
2277 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE) {
2278 		if (ring_type == REO_DST || ring_type == RXDMA_DST)
2279 			return true;
2280 	} else if (ring_type == RXDMA_MONITOR_STATUS) {
2281 		return true;
2282 	}
2283 
2284 	return false;
2285 }
2286 #else
2287 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2288 {
2289 	return false;
2290 }
2291 #endif /* DP_CON_MON_MSI_ENABLED */
2292 #endif /* DISABLE_MON_RING_MSI_CFG */
2293 
2294 #ifdef DP_UMAC_HW_RESET_SUPPORT
2295 static bool dp_check_umac_reset_in_progress(struct dp_soc *soc)
2296 {
2297 	return !!soc->umac_reset_ctx.intr_ctx_bkp;
2298 }
2299 #else
2300 static bool dp_check_umac_reset_in_progress(struct dp_soc *soc)
2301 {
2302 	return false;
2303 }
2304 #endif
2305 
2306 /*
2307  * dp_srng_init() - Initialize SRNG
2308  * @soc  : Data path soc handle
2309  * @srng : SRNG pointer
2310  * @ring_type : Ring Type
2311  * @ring_num: Ring number
2312  * @mac_id: mac_id
2313  *
2314  * return: QDF_STATUS
2315  */
2316 QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
2317 			int ring_type, int ring_num, int mac_id)
2318 {
2319 	bool idle_check;
2320 
2321 	hal_soc_handle_t hal_soc = soc->hal_soc;
2322 	struct hal_srng_params ring_params;
2323 
2324 	if (srng->hal_srng) {
2325 		dp_init_err("%pK: Ring type: %d, num:%d is already initialized",
2326 			    soc, ring_type, ring_num);
2327 		return QDF_STATUS_SUCCESS;
2328 	}
2329 
2330 	/* memset the srng ring to zero */
2331 	qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size);
2332 
2333 	qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params));
2334 	ring_params.ring_base_paddr = srng->base_paddr_aligned;
2335 	ring_params.ring_base_vaddr = srng->base_vaddr_aligned;
2336 
2337 	ring_params.num_entries = srng->num_entries;
2338 
2339 	dp_info("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
2340 		ring_type, ring_num,
2341 		(void *)ring_params.ring_base_vaddr,
2342 		(void *)ring_params.ring_base_paddr,
2343 		ring_params.num_entries);
2344 
2345 	if (soc->intr_mode == DP_INTR_MSI && !dp_skip_msi_cfg(soc, ring_type)) {
2346 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
2347 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
2348 				 ring_type, ring_num);
2349 	} else {
2350 		ring_params.msi_data = 0;
2351 		ring_params.msi_addr = 0;
2352 		dp_srng_set_msi2_ring_params(soc, &ring_params, 0, 0);
2353 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
2354 				 ring_type, ring_num);
2355 	}
2356 
2357 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
2358 					       ring_type, ring_num,
2359 					       srng->num_entries);
2360 
2361 	dp_srng_set_nf_thresholds(soc, srng, &ring_params);
2362 
2363 	if (srng->cached)
2364 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
2365 
2366 	idle_check = dp_check_umac_reset_in_progress(soc);
2367 
2368 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
2369 					mac_id, &ring_params, idle_check);
2370 
2371 	if (!srng->hal_srng) {
2372 		dp_srng_free(soc, srng);
2373 		return QDF_STATUS_E_FAILURE;
2374 	}
2375 
2376 	return QDF_STATUS_SUCCESS;
2377 }
2378 
2379 qdf_export_symbol(dp_srng_init);
2380 
2381 /*
2382  * dp_srng_alloc() - Allocate memory for SRNG
2383  * @soc  : Data path soc handle
2384  * @srng : SRNG pointer
2385  * @ring_type : Ring Type
2386  * @num_entries: Number of entries
2387  * @cached: cached flag variable
2388  *
2389  * return: QDF_STATUS
2390  */
2391 QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
2392 			 int ring_type, uint32_t num_entries,
2393 			 bool cached)
2394 {
2395 	hal_soc_handle_t hal_soc = soc->hal_soc;
2396 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
2397 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
2398 
2399 	if (srng->base_vaddr_unaligned) {
2400 		dp_init_err("%pK: Ring type: %d, is already allocated",
2401 			    soc, ring_type);
2402 		return QDF_STATUS_SUCCESS;
2403 	}
2404 
2405 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
2406 	srng->hal_srng = NULL;
2407 	srng->alloc_size = num_entries * entry_size;
2408 	srng->num_entries = num_entries;
2409 	srng->cached = cached;
2410 
2411 	if (!cached) {
2412 		srng->base_vaddr_aligned =
2413 		    dp_srng_aligned_mem_alloc_consistent(soc,
2414 							 srng,
2415 							 ring_type);
2416 	} else {
2417 		srng->base_vaddr_aligned = qdf_aligned_malloc(
2418 					&srng->alloc_size,
2419 					&srng->base_vaddr_unaligned,
2420 					&srng->base_paddr_unaligned,
2421 					&srng->base_paddr_aligned,
2422 					DP_RING_BASE_ALIGN);
2423 	}
2424 
2425 	if (!srng->base_vaddr_aligned)
2426 		return QDF_STATUS_E_NOMEM;
2427 
2428 	return QDF_STATUS_SUCCESS;
2429 }
2430 
2431 qdf_export_symbol(dp_srng_alloc);
2432 
2433 /*
2434  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
2435  * @soc: DP SOC handle
2436  * @srng: source ring structure
2437  * @ring_type: type of ring
2438  * @ring_num: ring number
2439  *
2440  * Return: None
2441  */
2442 void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
2443 		    int ring_type, int ring_num)
2444 {
2445 	if (!srng->hal_srng) {
2446 		dp_init_err("%pK: Ring type: %d, num:%d not setup",
2447 			    soc, ring_type, ring_num);
2448 		return;
2449 	}
2450 
2451 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
2452 	srng->hal_srng = NULL;
2453 }
2454 
2455 qdf_export_symbol(dp_srng_deinit);
2456 
2457 /* TODO: Need this interface from HIF */
2458 void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle);
2459 
2460 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2461 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2462 			 hal_ring_handle_t hal_ring_hdl)
2463 {
2464 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2465 	uint32_t hp, tp;
2466 	uint8_t ring_id;
2467 
2468 	if (!int_ctx)
2469 		return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2470 
2471 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2472 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2473 
2474 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2475 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START);
2476 
2477 	return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2478 }
2479 
2480 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2481 			hal_ring_handle_t hal_ring_hdl)
2482 {
2483 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2484 	uint32_t hp, tp;
2485 	uint8_t ring_id;
2486 
2487 	if (!int_ctx)
2488 		return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2489 
2490 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2491 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2492 
2493 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2494 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END);
2495 
2496 	return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2497 }
2498 
2499 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2500 					      uint8_t hist_group_id)
2501 {
2502 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2503 			 0, 0, 0, HIF_EVENT_TIMER_ENTRY);
2504 }
2505 
2506 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2507 					     uint8_t hist_group_id)
2508 {
2509 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2510 			 0, 0, 0, HIF_EVENT_TIMER_EXIT);
2511 }
2512 #else
2513 
2514 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2515 					      uint8_t hist_group_id)
2516 {
2517 }
2518 
2519 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2520 					     uint8_t hist_group_id)
2521 {
2522 }
2523 
2524 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
2525 
2526 /*
2527  * dp_should_timer_irq_yield() - Decide if the bottom half should yield
2528  * @soc: DP soc handle
2529  * @work_done: work done in softirq context
2530  * @start_time: start time for the softirq
2531  *
2532  * Return: enum with yield code
2533  */
2534 enum timer_yield_status
2535 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
2536 			  uint64_t start_time)
2537 {
2538 	uint64_t cur_time = qdf_get_log_timestamp();
2539 
2540 	if (!work_done)
2541 		return DP_TIMER_WORK_DONE;
2542 
2543 	if (cur_time - start_time > DP_MAX_TIMER_EXEC_TIME_TICKS)
2544 		return DP_TIMER_TIME_EXHAUST;
2545 
2546 	return DP_TIMER_NO_YIELD;
2547 }
2548 
2549 qdf_export_symbol(dp_should_timer_irq_yield);
2550 
2551 static int dp_process_rxdma_dst_ring(struct dp_soc *soc,
2552 				     struct dp_intr *int_ctx,
2553 				     int mac_for_pdev,
2554 				     int total_budget)
2555 {
2556 	return dp_rxdma_err_process(int_ctx, soc, mac_for_pdev,
2557 				    total_budget);
2558 }
2559 
2560 /**
2561  * dp_process_lmac_rings() - Process LMAC rings
2562  * @int_ctx: interrupt context
2563  * @total_budget: budget of work which can be done
2564  *
2565  * Return: work done
2566  */
2567 static int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget)
2568 {
2569 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2570 	struct dp_soc *soc = int_ctx->soc;
2571 	uint32_t remaining_quota = total_budget;
2572 	struct dp_pdev *pdev = NULL;
2573 	uint32_t work_done  = 0;
2574 	int budget = total_budget;
2575 	int ring = 0;
2576 
2577 	/* Process LMAC interrupts */
2578 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
2579 		int mac_for_pdev = ring;
2580 
2581 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
2582 		if (!pdev)
2583 			continue;
2584 		if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
2585 			work_done = dp_monitor_process(soc, int_ctx,
2586 						       mac_for_pdev,
2587 						       remaining_quota);
2588 			if (work_done)
2589 				intr_stats->num_rx_mon_ring_masks++;
2590 			budget -= work_done;
2591 			if (budget <= 0)
2592 				goto budget_done;
2593 			remaining_quota = budget;
2594 		}
2595 
2596 		if (int_ctx->tx_mon_ring_mask & (1 << mac_for_pdev)) {
2597 			work_done = dp_tx_mon_process(soc, int_ctx,
2598 						      mac_for_pdev,
2599 						      remaining_quota);
2600 			if (work_done)
2601 				intr_stats->num_tx_mon_ring_masks++;
2602 			budget -= work_done;
2603 			if (budget <= 0)
2604 				goto budget_done;
2605 			remaining_quota = budget;
2606 		}
2607 
2608 		if (int_ctx->rxdma2host_ring_mask &
2609 				(1 << mac_for_pdev)) {
2610 			work_done = dp_process_rxdma_dst_ring(soc, int_ctx,
2611 							      mac_for_pdev,
2612 							      remaining_quota);
2613 			if (work_done)
2614 				intr_stats->num_rxdma2host_ring_masks++;
2615 			budget -=  work_done;
2616 			if (budget <= 0)
2617 				goto budget_done;
2618 			remaining_quota = budget;
2619 		}
2620 
2621 		if (int_ctx->host2rxdma_ring_mask & (1 << mac_for_pdev)) {
2622 			union dp_rx_desc_list_elem_t *desc_list = NULL;
2623 			union dp_rx_desc_list_elem_t *tail = NULL;
2624 			struct dp_srng *rx_refill_buf_ring;
2625 			struct rx_desc_pool *rx_desc_pool;
2626 
2627 			rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2628 			if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
2629 				rx_refill_buf_ring =
2630 					&soc->rx_refill_buf_ring[mac_for_pdev];
2631 			else
2632 				rx_refill_buf_ring =
2633 					&soc->rx_refill_buf_ring[pdev->lmac_id];
2634 
2635 			intr_stats->num_host2rxdma_ring_masks++;
2636 			dp_rx_buffers_lt_replenish_simple(soc, mac_for_pdev,
2637 							  rx_refill_buf_ring,
2638 							  rx_desc_pool,
2639 							  0,
2640 							  &desc_list,
2641 							  &tail);
2642 		}
2643 
2644 	}
2645 
2646 	if (int_ctx->host2rxdma_mon_ring_mask)
2647 		dp_rx_mon_buf_refill(int_ctx);
2648 
2649 	if (int_ctx->host2txmon_ring_mask)
2650 		dp_tx_mon_buf_refill(int_ctx);
2651 
2652 budget_done:
2653 	return total_budget - budget;
2654 }
2655 
2656 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
2657 /**
2658  * dp_service_near_full_srngs() - Bottom half handler to process the near
2659  *				full IRQ on a SRNG
2660  * @dp_ctx: Datapath SoC handle
2661  * @dp_budget: Number of SRNGs which can be processed in a single attempt
2662  *		without rescheduling
2663  * @cpu: cpu id
2664  *
2665  * Return: remaining budget/quota for the soc device
2666  */
2667 static
2668 uint32_t dp_service_near_full_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
2669 {
2670 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2671 	struct dp_soc *soc = int_ctx->soc;
2672 
2673 	/*
2674 	 * dp_service_near_full_srngs arch ops should be initialized always
2675 	 * if the NEAR FULL IRQ feature is enabled.
2676 	 */
2677 	return soc->arch_ops.dp_service_near_full_srngs(soc, int_ctx,
2678 							dp_budget);
2679 }
2680 #endif
2681 
2682 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2683 
2684 /*
2685  * dp_srng_get_cpu() - Get the smp processor id for srng processing
2686  *
2687  * Return: smp processor id
2688  */
2689 static inline int dp_srng_get_cpu(void)
2690 {
2691 	return smp_processor_id();
2692 }
2693 
2694 /*
2695  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
2696  * @dp_ctx: DP SOC handle
2697  * @budget: Number of frames/descriptors that can be processed in one shot
2698  * @cpu: CPU on which this instance is running
2699  *
2700  * Return: remaining budget/quota for the soc device
2701  */
2702 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
2703 {
2704 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2705 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2706 	struct dp_soc *soc = int_ctx->soc;
2707 	int ring = 0;
2708 	int index;
2709 	uint32_t work_done  = 0;
2710 	int budget = dp_budget;
2711 	uint8_t tx_mask = int_ctx->tx_ring_mask;
2712 	uint8_t rx_mask = int_ctx->rx_ring_mask;
2713 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
2714 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
2715 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2716 	uint32_t remaining_quota = dp_budget;
2717 
2718 	qdf_atomic_set_bit(cpu, &soc->service_rings_running);
2719 
2720 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n",
2721 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
2722 			 reo_status_mask,
2723 			 int_ctx->rx_mon_ring_mask,
2724 			 int_ctx->host2rxdma_ring_mask,
2725 			 int_ctx->rxdma2host_ring_mask);
2726 
2727 	/* Process Tx completion interrupts first to return back buffers */
2728 	for (index = 0; index < soc->num_tx_comp_rings; index++) {
2729 		if (!(1 << wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) & tx_mask))
2730 			continue;
2731 		work_done = dp_tx_comp_handler(int_ctx,
2732 					       soc,
2733 					       soc->tx_comp_ring[index].hal_srng,
2734 					       index, remaining_quota);
2735 		if (work_done) {
2736 			intr_stats->num_tx_ring_masks[index]++;
2737 			dp_verbose_debug("tx mask 0x%x index %d, budget %d, work_done %d",
2738 					 tx_mask, index, budget,
2739 					 work_done);
2740 		}
2741 		budget -= work_done;
2742 		if (budget <= 0)
2743 			goto budget_done;
2744 
2745 		remaining_quota = budget;
2746 	}
2747 
2748 	/* Process REO Exception ring interrupt */
2749 	if (rx_err_mask) {
2750 		work_done = dp_rx_err_process(int_ctx, soc,
2751 					      soc->reo_exception_ring.hal_srng,
2752 					      remaining_quota);
2753 
2754 		if (work_done) {
2755 			intr_stats->num_rx_err_ring_masks++;
2756 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
2757 					 work_done, budget);
2758 		}
2759 
2760 		budget -=  work_done;
2761 		if (budget <= 0) {
2762 			goto budget_done;
2763 		}
2764 		remaining_quota = budget;
2765 	}
2766 
2767 	/* Process Rx WBM release ring interrupt */
2768 	if (rx_wbm_rel_mask) {
2769 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
2770 						  soc->rx_rel_ring.hal_srng,
2771 						  remaining_quota);
2772 
2773 		if (work_done) {
2774 			intr_stats->num_rx_wbm_rel_ring_masks++;
2775 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
2776 					 work_done, budget);
2777 		}
2778 
2779 		budget -=  work_done;
2780 		if (budget <= 0) {
2781 			goto budget_done;
2782 		}
2783 		remaining_quota = budget;
2784 	}
2785 
2786 	/* Process Rx interrupts */
2787 	if (rx_mask) {
2788 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
2789 			if (!(rx_mask & (1 << ring)))
2790 				continue;
2791 			work_done = soc->arch_ops.dp_rx_process(int_ctx,
2792 						  soc->reo_dest_ring[ring].hal_srng,
2793 						  ring,
2794 						  remaining_quota);
2795 			if (work_done) {
2796 				intr_stats->num_rx_ring_masks[ring]++;
2797 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
2798 						 rx_mask, ring,
2799 						 work_done, budget);
2800 				budget -=  work_done;
2801 				if (budget <= 0)
2802 					goto budget_done;
2803 				remaining_quota = budget;
2804 			}
2805 		}
2806 	}
2807 
2808 	if (reo_status_mask) {
2809 		if (dp_reo_status_ring_handler(int_ctx, soc))
2810 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2811 	}
2812 
2813 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
2814 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2815 		if (work_done) {
2816 			budget -=  work_done;
2817 			if (budget <= 0)
2818 				goto budget_done;
2819 			remaining_quota = budget;
2820 		}
2821 	}
2822 
2823 	qdf_lro_flush(int_ctx->lro_ctx);
2824 	intr_stats->num_masks++;
2825 
2826 budget_done:
2827 	qdf_atomic_clear_bit(cpu, &soc->service_rings_running);
2828 
2829 	if (soc->notify_fw_callback)
2830 		soc->notify_fw_callback(soc);
2831 
2832 	return dp_budget - budget;
2833 }
2834 
2835 #else /* QCA_HOST_MODE_WIFI_DISABLED */
2836 
2837 /*
2838  * dp_srng_get_cpu() - Get the smp processor id for srng processing
2839  *
2840  * Return: smp processor id
2841  */
2842 static inline int dp_srng_get_cpu(void)
2843 {
2844 	return 0;
2845 }
2846 
2847 /*
2848  * dp_service_srngs() - Top level handler for DP Monitor Ring interrupts
2849  * @dp_ctx: DP SOC handle
2850  * @budget: Number of frames/descriptors that can be processed in one shot
2851  *
2852  * Return: remaining budget/quota for the soc device
2853  */
2854 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
2855 {
2856 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2857 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2858 	struct dp_soc *soc = int_ctx->soc;
2859 	uint32_t remaining_quota = dp_budget;
2860 	uint32_t work_done  = 0;
2861 	int budget = dp_budget;
2862 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2863 
2864 	if (reo_status_mask) {
2865 		if (dp_reo_status_ring_handler(int_ctx, soc))
2866 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2867 	}
2868 
2869 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
2870 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2871 		if (work_done) {
2872 			budget -=  work_done;
2873 			if (budget <= 0)
2874 				goto budget_done;
2875 			remaining_quota = budget;
2876 		}
2877 	}
2878 
2879 	qdf_lro_flush(int_ctx->lro_ctx);
2880 	intr_stats->num_masks++;
2881 
2882 budget_done:
2883 	return dp_budget - budget;
2884 }
2885 
2886 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2887 
2888 /* dp_interrupt_timer()- timer poll for interrupts
2889  *
2890  * @arg: SoC Handle
2891  *
2892  * Return:
2893  *
2894  */
2895 static void dp_interrupt_timer(void *arg)
2896 {
2897 	struct dp_soc *soc = (struct dp_soc *) arg;
2898 	struct dp_pdev *pdev = soc->pdev_list[0];
2899 	enum timer_yield_status yield = DP_TIMER_NO_YIELD;
2900 	uint32_t work_done  = 0, total_work_done = 0;
2901 	int budget = 0xffff, i;
2902 	uint32_t remaining_quota = budget;
2903 	uint64_t start_time;
2904 	uint32_t lmac_id = DP_MON_INVALID_LMAC_ID;
2905 	uint8_t dp_intr_id = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
2906 	uint32_t lmac_iter;
2907 	int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
2908 	enum reg_wifi_band mon_band;
2909 	int cpu = dp_srng_get_cpu();
2910 
2911 	/*
2912 	 * this logic makes all data path interfacing rings (UMAC/LMAC)
2913 	 * and Monitor rings polling mode when NSS offload is disabled
2914 	 */
2915 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx) &&
2916 	    !wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2917 		if (qdf_atomic_read(&soc->cmn_init_done)) {
2918 			for (i = 0; i < wlan_cfg_get_num_contexts(
2919 						soc->wlan_cfg_ctx); i++)
2920 				dp_service_srngs(&soc->intr_ctx[i], 0xffff,
2921 						 cpu);
2922 
2923 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
2924 		}
2925 		return;
2926 	}
2927 
2928 	if (!qdf_atomic_read(&soc->cmn_init_done))
2929 		return;
2930 
2931 	if (dp_monitor_is_chan_band_known(pdev)) {
2932 		mon_band = dp_monitor_get_chan_band(pdev);
2933 		lmac_id = pdev->ch_band_lmac_id_mapping[mon_band];
2934 		if (qdf_likely(lmac_id != DP_MON_INVALID_LMAC_ID)) {
2935 			dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id];
2936 			dp_srng_record_timer_entry(soc, dp_intr_id);
2937 		}
2938 	}
2939 
2940 	start_time = qdf_get_log_timestamp();
2941 	dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
2942 
2943 	while (yield == DP_TIMER_NO_YIELD) {
2944 		for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) {
2945 			if (lmac_iter == lmac_id)
2946 				work_done = dp_monitor_process(soc,
2947 						&soc->intr_ctx[dp_intr_id],
2948 						lmac_iter, remaining_quota);
2949 			else
2950 				work_done =
2951 					dp_monitor_drop_packets_for_mac(pdev,
2952 							     lmac_iter,
2953 							     remaining_quota);
2954 			if (work_done) {
2955 				budget -=  work_done;
2956 				if (budget <= 0) {
2957 					yield = DP_TIMER_WORK_EXHAUST;
2958 					goto budget_done;
2959 				}
2960 				remaining_quota = budget;
2961 				total_work_done += work_done;
2962 			}
2963 		}
2964 
2965 		yield = dp_should_timer_irq_yield(soc, total_work_done,
2966 						  start_time);
2967 		total_work_done = 0;
2968 	}
2969 
2970 budget_done:
2971 	if (yield == DP_TIMER_WORK_EXHAUST ||
2972 	    yield == DP_TIMER_TIME_EXHAUST)
2973 		qdf_timer_mod(&soc->int_timer, 1);
2974 	else
2975 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
2976 
2977 	if (lmac_id != DP_MON_INVALID_LMAC_ID)
2978 		dp_srng_record_timer_exit(soc, dp_intr_id);
2979 }
2980 
2981 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2982 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
2983 					struct dp_intr *intr_ctx)
2984 {
2985 	if (intr_ctx->rx_mon_ring_mask)
2986 		return true;
2987 
2988 	return false;
2989 }
2990 #else
2991 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
2992 					struct dp_intr *intr_ctx)
2993 {
2994 	return false;
2995 }
2996 #endif
2997 
2998 /*
2999  * dp_soc_attach_poll() - Register handlers for DP interrupts
3000  * @txrx_soc: DP SOC handle
3001  *
3002  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
3003  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
3004  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
3005  *
3006  * Return: 0 for success, nonzero for failure.
3007  */
3008 static QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
3009 {
3010 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3011 	int i;
3012 	int lmac_id = 0;
3013 
3014 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3015 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
3016 	soc->intr_mode = DP_INTR_POLL;
3017 
3018 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3019 		soc->intr_ctx[i].dp_intr_id = i;
3020 		soc->intr_ctx[i].tx_ring_mask =
3021 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
3022 		soc->intr_ctx[i].rx_ring_mask =
3023 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
3024 		soc->intr_ctx[i].rx_mon_ring_mask =
3025 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
3026 		soc->intr_ctx[i].rx_err_ring_mask =
3027 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
3028 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
3029 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
3030 		soc->intr_ctx[i].reo_status_ring_mask =
3031 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
3032 		soc->intr_ctx[i].rxdma2host_ring_mask =
3033 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
3034 		soc->intr_ctx[i].soc = soc;
3035 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
3036 
3037 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
3038 			hif_event_history_init(soc->hif_handle, i);
3039 			soc->mon_intr_id_lmac_map[lmac_id] = i;
3040 			lmac_id++;
3041 		}
3042 	}
3043 
3044 	qdf_timer_init(soc->osdev, &soc->int_timer,
3045 			dp_interrupt_timer, (void *)soc,
3046 			QDF_TIMER_TYPE_WAKE_APPS);
3047 
3048 	return QDF_STATUS_SUCCESS;
3049 }
3050 
3051 /**
3052  * dp_soc_set_interrupt_mode() - Set the interrupt mode in soc
3053  * soc: DP soc handle
3054  *
3055  * Set the appropriate interrupt mode flag in the soc
3056  */
3057 static void dp_soc_set_interrupt_mode(struct dp_soc *soc)
3058 {
3059 	uint32_t msi_base_data, msi_vector_start;
3060 	int msi_vector_count, ret;
3061 
3062 	soc->intr_mode = DP_INTR_INTEGRATED;
3063 
3064 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
3065 	    (dp_is_monitor_mode_using_poll(soc) &&
3066 	     soc->cdp_soc.ol_ops->get_con_mode &&
3067 	     soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) {
3068 		soc->intr_mode = DP_INTR_POLL;
3069 	} else {
3070 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
3071 						  &msi_vector_count,
3072 						  &msi_base_data,
3073 						  &msi_vector_start);
3074 		if (ret)
3075 			return;
3076 
3077 		soc->intr_mode = DP_INTR_MSI;
3078 	}
3079 }
3080 
3081 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc);
3082 #if defined(DP_INTR_POLL_BOTH)
3083 /*
3084  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
3085  * @txrx_soc: DP SOC handle
3086  *
3087  * Call the appropriate attach function based on the mode of operation.
3088  * This is a WAR for enabling monitor mode.
3089  *
3090  * Return: 0 for success. nonzero for failure.
3091  */
3092 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
3093 {
3094 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3095 
3096 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
3097 	    (dp_is_monitor_mode_using_poll(soc) &&
3098 	     soc->cdp_soc.ol_ops->get_con_mode &&
3099 	     soc->cdp_soc.ol_ops->get_con_mode() ==
3100 	     QDF_GLOBAL_MONITOR_MODE)) {
3101 		dp_info("Poll mode");
3102 		return dp_soc_attach_poll(txrx_soc);
3103 	} else {
3104 		dp_info("Interrupt  mode");
3105 		return dp_soc_interrupt_attach(txrx_soc);
3106 	}
3107 }
3108 #else
3109 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
3110 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
3111 {
3112 	return dp_soc_attach_poll(txrx_soc);
3113 }
3114 #else
3115 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
3116 {
3117 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3118 
3119 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx))
3120 		return dp_soc_attach_poll(txrx_soc);
3121 	else
3122 		return dp_soc_interrupt_attach(txrx_soc);
3123 }
3124 #endif
3125 #endif
3126 
3127 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
3128 /**
3129  * dp_soc_interrupt_map_calculate_wifi3_pci_legacy()
3130  * Calculate interrupt map for legacy interrupts
3131  * @soc: DP soc handle
3132  * @intr_ctx_num: Interrupt context number
3133  * @irq_id_map: IRQ map
3134  * num_irq_r: Number of interrupts assigned for this context
3135  *
3136  * Return: void
3137  */
3138 static void dp_soc_interrupt_map_calculate_wifi3_pci_legacy(struct dp_soc *soc,
3139 							    int intr_ctx_num,
3140 							    int *irq_id_map,
3141 							    int *num_irq_r)
3142 {
3143 	int j;
3144 	int num_irq = 0;
3145 	int tx_mask = wlan_cfg_get_tx_ring_mask(
3146 					soc->wlan_cfg_ctx, intr_ctx_num);
3147 	int rx_mask = wlan_cfg_get_rx_ring_mask(
3148 					soc->wlan_cfg_ctx, intr_ctx_num);
3149 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
3150 					soc->wlan_cfg_ctx, intr_ctx_num);
3151 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3152 					soc->wlan_cfg_ctx, intr_ctx_num);
3153 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3154 					soc->wlan_cfg_ctx, intr_ctx_num);
3155 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3156 					soc->wlan_cfg_ctx, intr_ctx_num);
3157 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3158 					soc->wlan_cfg_ctx, intr_ctx_num);
3159 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3160 					soc->wlan_cfg_ctx, intr_ctx_num);
3161 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3162 					soc->wlan_cfg_ctx, intr_ctx_num);
3163 	soc->intr_mode = DP_INTR_LEGACY_VIRTUAL_IRQ;
3164 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
3165 		if (tx_mask & (1 << j))
3166 			irq_id_map[num_irq++] = (wbm2sw0_release - j);
3167 		if (rx_mask & (1 << j))
3168 			irq_id_map[num_irq++] = (reo2sw1_intr - j);
3169 		if (rx_mon_mask & (1 << j))
3170 			irq_id_map[num_irq++] = (rxmon2sw_p0_dest0 - j);
3171 		if (rx_err_ring_mask & (1 << j))
3172 			irq_id_map[num_irq++] = (reo2sw0_intr - j);
3173 		if (rx_wbm_rel_ring_mask & (1 << j))
3174 			irq_id_map[num_irq++] = (wbm2sw5_release - j);
3175 		if (reo_status_ring_mask & (1 << j))
3176 			irq_id_map[num_irq++] = (reo_status - j);
3177 		if (rxdma2host_ring_mask & (1 << j))
3178 			irq_id_map[num_irq++] = (rxdma2sw_dst_ring0 - j);
3179 		if (host2rxdma_ring_mask & (1 << j))
3180 			irq_id_map[num_irq++] = (sw2rxdma_0 - j);
3181 		if (host2rxdma_mon_ring_mask & (1 << j))
3182 			irq_id_map[num_irq++] = (sw2rxmon_src_ring - j);
3183 	}
3184 	*num_irq_r = num_irq;
3185 }
3186 #else
3187 /**
3188  * dp_soc_interrupt_map_calculate_wifi3_pci_legacy()
3189  * Calculate interrupt map for legacy interrupts
3190  * @soc: DP soc handle
3191  * @intr_ctx_num: Interrupt context number
3192  * @irq_id_map: IRQ map
3193  * num_irq_r: Number of interrupts assigned for this context
3194  *
3195  * Return: void
3196  */
3197 static void dp_soc_interrupt_map_calculate_wifi3_pci_legacy(struct dp_soc *soc,
3198 							    int intr_ctx_num,
3199 							    int *irq_id_map,
3200 							    int *num_irq_r)
3201 {
3202 }
3203 #endif
3204 
3205 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
3206 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
3207 {
3208 	int j;
3209 	int num_irq = 0;
3210 
3211 	int tx_mask =
3212 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3213 	int rx_mask =
3214 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3215 	int rx_mon_mask =
3216 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3217 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3218 					soc->wlan_cfg_ctx, intr_ctx_num);
3219 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3220 					soc->wlan_cfg_ctx, intr_ctx_num);
3221 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3222 					soc->wlan_cfg_ctx, intr_ctx_num);
3223 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3224 					soc->wlan_cfg_ctx, intr_ctx_num);
3225 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3226 					soc->wlan_cfg_ctx, intr_ctx_num);
3227 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3228 					soc->wlan_cfg_ctx, intr_ctx_num);
3229 
3230 	soc->intr_mode = DP_INTR_INTEGRATED;
3231 
3232 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
3233 
3234 		if (tx_mask & (1 << j)) {
3235 			irq_id_map[num_irq++] =
3236 				(wbm2host_tx_completions_ring1 - j);
3237 		}
3238 
3239 		if (rx_mask & (1 << j)) {
3240 			irq_id_map[num_irq++] =
3241 				(reo2host_destination_ring1 - j);
3242 		}
3243 
3244 		if (rxdma2host_ring_mask & (1 << j)) {
3245 			irq_id_map[num_irq++] =
3246 				rxdma2host_destination_ring_mac1 - j;
3247 		}
3248 
3249 		if (host2rxdma_ring_mask & (1 << j)) {
3250 			irq_id_map[num_irq++] =
3251 				host2rxdma_host_buf_ring_mac1 -	j;
3252 		}
3253 
3254 		if (host2rxdma_mon_ring_mask & (1 << j)) {
3255 			irq_id_map[num_irq++] =
3256 				host2rxdma_monitor_ring1 - j;
3257 		}
3258 
3259 		if (rx_mon_mask & (1 << j)) {
3260 			irq_id_map[num_irq++] =
3261 				ppdu_end_interrupts_mac1 - j;
3262 			irq_id_map[num_irq++] =
3263 				rxdma2host_monitor_status_ring_mac1 - j;
3264 			irq_id_map[num_irq++] =
3265 				rxdma2host_monitor_destination_mac1 - j;
3266 		}
3267 
3268 		if (rx_wbm_rel_ring_mask & (1 << j))
3269 			irq_id_map[num_irq++] = wbm2host_rx_release;
3270 
3271 		if (rx_err_ring_mask & (1 << j))
3272 			irq_id_map[num_irq++] = reo2host_exception;
3273 
3274 		if (reo_status_ring_mask & (1 << j))
3275 			irq_id_map[num_irq++] = reo2host_status;
3276 
3277 	}
3278 	*num_irq_r = num_irq;
3279 }
3280 
3281 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
3282 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
3283 		int msi_vector_count, int msi_vector_start)
3284 {
3285 	int tx_mask = wlan_cfg_get_tx_ring_mask(
3286 					soc->wlan_cfg_ctx, intr_ctx_num);
3287 	int rx_mask = wlan_cfg_get_rx_ring_mask(
3288 					soc->wlan_cfg_ctx, intr_ctx_num);
3289 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
3290 					soc->wlan_cfg_ctx, intr_ctx_num);
3291 	int tx_mon_mask = wlan_cfg_get_tx_mon_ring_mask(
3292 					soc->wlan_cfg_ctx, intr_ctx_num);
3293 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3294 					soc->wlan_cfg_ctx, intr_ctx_num);
3295 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3296 					soc->wlan_cfg_ctx, intr_ctx_num);
3297 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3298 					soc->wlan_cfg_ctx, intr_ctx_num);
3299 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3300 					soc->wlan_cfg_ctx, intr_ctx_num);
3301 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3302 					soc->wlan_cfg_ctx, intr_ctx_num);
3303 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3304 					soc->wlan_cfg_ctx, intr_ctx_num);
3305 	int rx_near_full_grp_1_mask =
3306 		wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
3307 						     intr_ctx_num);
3308 	int rx_near_full_grp_2_mask =
3309 		wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
3310 						     intr_ctx_num);
3311 	int tx_ring_near_full_mask =
3312 		wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
3313 						    intr_ctx_num);
3314 
3315 	int host2txmon_ring_mask =
3316 		wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx,
3317 						  intr_ctx_num);
3318 	unsigned int vector =
3319 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
3320 	int num_irq = 0;
3321 
3322 	soc->intr_mode = DP_INTR_MSI;
3323 
3324 	if (tx_mask | rx_mask | rx_mon_mask | tx_mon_mask | rx_err_ring_mask |
3325 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask |
3326 	    host2rxdma_ring_mask | host2rxdma_mon_ring_mask |
3327 	    rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
3328 	    tx_ring_near_full_mask | host2txmon_ring_mask)
3329 		irq_id_map[num_irq++] =
3330 			pld_get_msi_irq(soc->osdev->dev, vector);
3331 
3332 	*num_irq_r = num_irq;
3333 }
3334 
3335 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
3336 				    int *irq_id_map, int *num_irq)
3337 {
3338 	int msi_vector_count, ret;
3339 	uint32_t msi_base_data, msi_vector_start;
3340 
3341 	if (pld_get_enable_intx(soc->osdev->dev)) {
3342 		return dp_soc_interrupt_map_calculate_wifi3_pci_legacy(soc,
3343 				intr_ctx_num, irq_id_map, num_irq);
3344 	}
3345 
3346 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
3347 					    &msi_vector_count,
3348 					    &msi_base_data,
3349 					    &msi_vector_start);
3350 	if (ret)
3351 		return dp_soc_interrupt_map_calculate_integrated(soc,
3352 				intr_ctx_num, irq_id_map, num_irq);
3353 
3354 	else
3355 		dp_soc_interrupt_map_calculate_msi(soc,
3356 				intr_ctx_num, irq_id_map, num_irq,
3357 				msi_vector_count, msi_vector_start);
3358 }
3359 
3360 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
3361 /**
3362  * dp_soc_near_full_interrupt_attach() - Register handler for DP near fill irq
3363  * @soc: DP soc handle
3364  * @num_irq: IRQ number
3365  * @irq_id_map: IRQ map
3366  * intr_id: interrupt context ID
3367  *
3368  * Return: 0 for success. nonzero for failure.
3369  */
3370 static inline int
3371 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
3372 				  int irq_id_map[], int intr_id)
3373 {
3374 	return hif_register_ext_group(soc->hif_handle,
3375 				      num_irq, irq_id_map,
3376 				      dp_service_near_full_srngs,
3377 				      &soc->intr_ctx[intr_id], "dp_nf_intr",
3378 				      HIF_EXEC_NAPI_TYPE,
3379 				      QCA_NAPI_DEF_SCALE_BIN_SHIFT);
3380 }
3381 #else
3382 static inline int
3383 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
3384 				  int *irq_id_map, int intr_id)
3385 {
3386 	return 0;
3387 }
3388 #endif
3389 
3390 #ifdef DP_CON_MON_MSI_SKIP_SET
3391 static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
3392 {
3393 	return !!(soc->cdp_soc.ol_ops->get_con_mode() !=
3394 			QDF_GLOBAL_MONITOR_MODE);
3395 }
3396 #else
3397 static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
3398 {
3399 	return false;
3400 }
3401 #endif
3402 
3403 /*
3404  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
3405  * @txrx_soc: DP SOC handle
3406  *
3407  * Return: none
3408  */
3409 static void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
3410 {
3411 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3412 	int i;
3413 
3414 	if (soc->intr_mode == DP_INTR_POLL) {
3415 		qdf_timer_free(&soc->int_timer);
3416 	} else {
3417 		hif_deconfigure_ext_group_interrupts(soc->hif_handle);
3418 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
3419 		hif_deregister_exec_group(soc->hif_handle, "dp_nf_intr");
3420 	}
3421 
3422 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3423 		soc->intr_ctx[i].tx_ring_mask = 0;
3424 		soc->intr_ctx[i].rx_ring_mask = 0;
3425 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
3426 		soc->intr_ctx[i].rx_err_ring_mask = 0;
3427 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
3428 		soc->intr_ctx[i].reo_status_ring_mask = 0;
3429 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
3430 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
3431 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
3432 		soc->intr_ctx[i].rx_near_full_grp_1_mask = 0;
3433 		soc->intr_ctx[i].rx_near_full_grp_2_mask = 0;
3434 		soc->intr_ctx[i].tx_ring_near_full_mask = 0;
3435 		soc->intr_ctx[i].tx_mon_ring_mask = 0;
3436 		soc->intr_ctx[i].host2txmon_ring_mask = 0;
3437 		soc->intr_ctx[i].umac_reset_intr_mask = 0;
3438 
3439 		hif_event_history_deinit(soc->hif_handle, i);
3440 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
3441 	}
3442 
3443 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3444 		    sizeof(soc->mon_intr_id_lmac_map),
3445 		    DP_MON_INVALID_LMAC_ID);
3446 }
3447 
3448 /*
3449  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
3450  * @txrx_soc: DP SOC handle
3451  *
3452  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
3453  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
3454  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
3455  *
3456  * Return: 0 for success. nonzero for failure.
3457  */
3458 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
3459 {
3460 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3461 
3462 	int i = 0;
3463 	int num_irq = 0;
3464 	int rx_err_ring_intr_ctxt_id = HIF_MAX_GROUP;
3465 	int lmac_id = 0;
3466 	int napi_scale;
3467 
3468 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3469 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
3470 
3471 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3472 		int ret = 0;
3473 
3474 		/* Map of IRQ ids registered with one interrupt context */
3475 		int irq_id_map[HIF_MAX_GRP_IRQ];
3476 
3477 		int tx_mask =
3478 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
3479 		int rx_mask =
3480 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
3481 		int rx_mon_mask =
3482 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
3483 		int tx_mon_ring_mask =
3484 			wlan_cfg_get_tx_mon_ring_mask(soc->wlan_cfg_ctx, i);
3485 		int rx_err_ring_mask =
3486 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
3487 		int rx_wbm_rel_ring_mask =
3488 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
3489 		int reo_status_ring_mask =
3490 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
3491 		int rxdma2host_ring_mask =
3492 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
3493 		int host2rxdma_ring_mask =
3494 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
3495 		int host2rxdma_mon_ring_mask =
3496 			wlan_cfg_get_host2rxdma_mon_ring_mask(
3497 				soc->wlan_cfg_ctx, i);
3498 		int rx_near_full_grp_1_mask =
3499 			wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
3500 							     i);
3501 		int rx_near_full_grp_2_mask =
3502 			wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
3503 							     i);
3504 		int tx_ring_near_full_mask =
3505 			wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
3506 							    i);
3507 		int host2txmon_ring_mask =
3508 			wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx, i);
3509 		int umac_reset_intr_mask =
3510 			wlan_cfg_get_umac_reset_intr_mask(soc->wlan_cfg_ctx, i);
3511 
3512 		if (dp_skip_rx_mon_ring_mask_set(soc))
3513 			rx_mon_mask = 0;
3514 
3515 		soc->intr_ctx[i].dp_intr_id = i;
3516 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
3517 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
3518 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
3519 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
3520 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
3521 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
3522 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
3523 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
3524 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
3525 			 host2rxdma_mon_ring_mask;
3526 		soc->intr_ctx[i].rx_near_full_grp_1_mask =
3527 						rx_near_full_grp_1_mask;
3528 		soc->intr_ctx[i].rx_near_full_grp_2_mask =
3529 						rx_near_full_grp_2_mask;
3530 		soc->intr_ctx[i].tx_ring_near_full_mask =
3531 						tx_ring_near_full_mask;
3532 		soc->intr_ctx[i].tx_mon_ring_mask = tx_mon_ring_mask;
3533 		soc->intr_ctx[i].host2txmon_ring_mask = host2txmon_ring_mask;
3534 		soc->intr_ctx[i].umac_reset_intr_mask = umac_reset_intr_mask;
3535 
3536 		soc->intr_ctx[i].soc = soc;
3537 
3538 		num_irq = 0;
3539 
3540 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
3541 					       &num_irq);
3542 
3543 		if (rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
3544 		    tx_ring_near_full_mask) {
3545 			dp_soc_near_full_interrupt_attach(soc, num_irq,
3546 							  irq_id_map, i);
3547 		} else {
3548 			napi_scale = wlan_cfg_get_napi_scale_factor(
3549 							    soc->wlan_cfg_ctx);
3550 			if (!napi_scale)
3551 				napi_scale = QCA_NAPI_DEF_SCALE_BIN_SHIFT;
3552 
3553 			ret = hif_register_ext_group(soc->hif_handle,
3554 				num_irq, irq_id_map, dp_service_srngs,
3555 				&soc->intr_ctx[i], "dp_intr",
3556 				HIF_EXEC_NAPI_TYPE, napi_scale);
3557 		}
3558 
3559 		dp_debug(" int ctx %u num_irq %u irq_id_map %u %u",
3560 			 i, num_irq, irq_id_map[0], irq_id_map[1]);
3561 
3562 		if (ret) {
3563 			dp_init_err("%pK: failed, ret = %d", soc, ret);
3564 			dp_soc_interrupt_detach(txrx_soc);
3565 			return QDF_STATUS_E_FAILURE;
3566 		}
3567 
3568 		hif_event_history_init(soc->hif_handle, i);
3569 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
3570 
3571 		if (rx_err_ring_mask)
3572 			rx_err_ring_intr_ctxt_id = i;
3573 
3574 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
3575 			soc->mon_intr_id_lmac_map[lmac_id] = i;
3576 			lmac_id++;
3577 		}
3578 	}
3579 
3580 	hif_configure_ext_group_interrupts(soc->hif_handle);
3581 	if (rx_err_ring_intr_ctxt_id != HIF_MAX_GROUP)
3582 		hif_config_irq_clear_cpu_affinity(soc->hif_handle,
3583 						  rx_err_ring_intr_ctxt_id, 0);
3584 
3585 	return QDF_STATUS_SUCCESS;
3586 }
3587 
3588 #define AVG_MAX_MPDUS_PER_TID 128
3589 #define AVG_TIDS_PER_CLIENT 2
3590 #define AVG_FLOWS_PER_TID 2
3591 #define AVG_MSDUS_PER_FLOW 128
3592 #define AVG_MSDUS_PER_MPDU 4
3593 
3594 /*
3595  * dp_hw_link_desc_pool_banks_free() - Free h/w link desc pool banks
3596  * @soc: DP SOC handle
3597  * @mac_id: mac id
3598  *
3599  * Return: none
3600  */
3601 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
3602 {
3603 	struct qdf_mem_multi_page_t *pages;
3604 
3605 	if (mac_id != WLAN_INVALID_PDEV_ID) {
3606 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3607 	} else {
3608 		pages = &soc->link_desc_pages;
3609 	}
3610 
3611 	if (!pages) {
3612 		dp_err("can not get link desc pages");
3613 		QDF_ASSERT(0);
3614 		return;
3615 	}
3616 
3617 	if (pages->dma_pages) {
3618 		wlan_minidump_remove((void *)
3619 				     pages->dma_pages->page_v_addr_start,
3620 				     pages->num_pages * pages->page_size,
3621 				     soc->ctrl_psoc,
3622 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3623 				     "hw_link_desc_bank");
3624 		dp_desc_multi_pages_mem_free(soc, DP_HW_LINK_DESC_TYPE,
3625 					     pages, 0, false);
3626 	}
3627 }
3628 
3629 qdf_export_symbol(dp_hw_link_desc_pool_banks_free);
3630 
3631 /*
3632  * dp_hw_link_desc_pool_banks_alloc() - Allocate h/w link desc pool banks
3633  * @soc: DP SOC handle
3634  * @mac_id: mac id
3635  *
3636  * Allocates memory pages for link descriptors, the page size is 4K for
3637  * MCL and 2MB for WIN. if the mac_id is invalid link descriptor pages are
3638  * allocated for regular RX/TX and if the there is a proper mac_id link
3639  * descriptors are allocated for RX monitor mode.
3640  *
3641  * Return: QDF_STATUS_SUCCESS: Success
3642  *	   QDF_STATUS_E_FAILURE: Failure
3643  */
3644 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
3645 {
3646 	hal_soc_handle_t hal_soc = soc->hal_soc;
3647 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
3648 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
3649 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
3650 	uint32_t num_mpdus_per_link_desc = hal_num_mpdus_per_link_desc(hal_soc);
3651 	uint32_t num_msdus_per_link_desc = hal_num_msdus_per_link_desc(hal_soc);
3652 	uint32_t num_mpdu_links_per_queue_desc =
3653 		hal_num_mpdu_links_per_queue_desc(hal_soc);
3654 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3655 	uint32_t *total_link_descs, total_mem_size;
3656 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
3657 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
3658 	uint32_t num_entries;
3659 	struct qdf_mem_multi_page_t *pages;
3660 	struct dp_srng *dp_srng;
3661 	uint8_t minidump_str[MINIDUMP_STR_SIZE];
3662 
3663 	/* Only Tx queue descriptors are allocated from common link descriptor
3664 	 * pool Rx queue descriptors are not included in this because (REO queue
3665 	 * extension descriptors) they are expected to be allocated contiguously
3666 	 * with REO queue descriptors
3667 	 */
3668 	if (mac_id != WLAN_INVALID_PDEV_ID) {
3669 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3670 		/* dp_monitor_get_link_desc_pages returns NULL only
3671 		 * if monitor SOC is  NULL
3672 		 */
3673 		if (!pages) {
3674 			dp_err("can not get link desc pages");
3675 			QDF_ASSERT(0);
3676 			return QDF_STATUS_E_FAULT;
3677 		}
3678 		dp_srng = &soc->rxdma_mon_desc_ring[mac_id];
3679 		num_entries = dp_srng->alloc_size /
3680 			hal_srng_get_entrysize(soc->hal_soc,
3681 					       RXDMA_MONITOR_DESC);
3682 		total_link_descs = dp_monitor_get_total_link_descs(soc, mac_id);
3683 		qdf_str_lcopy(minidump_str, "mon_link_desc_bank",
3684 			      MINIDUMP_STR_SIZE);
3685 	} else {
3686 		num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3687 			AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
3688 
3689 		num_mpdu_queue_descs = num_mpdu_link_descs /
3690 			num_mpdu_links_per_queue_desc;
3691 
3692 		num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3693 			AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
3694 			num_msdus_per_link_desc;
3695 
3696 		num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3697 			AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
3698 
3699 		num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
3700 			num_tx_msdu_link_descs + num_rx_msdu_link_descs;
3701 
3702 		pages = &soc->link_desc_pages;
3703 		total_link_descs = &soc->total_link_descs;
3704 		qdf_str_lcopy(minidump_str, "link_desc_bank",
3705 			      MINIDUMP_STR_SIZE);
3706 	}
3707 
3708 	/* If link descriptor banks are allocated, return from here */
3709 	if (pages->num_pages)
3710 		return QDF_STATUS_SUCCESS;
3711 
3712 	/* Round up to power of 2 */
3713 	*total_link_descs = 1;
3714 	while (*total_link_descs < num_entries)
3715 		*total_link_descs <<= 1;
3716 
3717 	dp_init_info("%pK: total_link_descs: %u, link_desc_size: %d",
3718 		     soc, *total_link_descs, link_desc_size);
3719 	total_mem_size =  *total_link_descs * link_desc_size;
3720 	total_mem_size += link_desc_align;
3721 
3722 	dp_init_info("%pK: total_mem_size: %d",
3723 		     soc, total_mem_size);
3724 
3725 	dp_set_max_page_size(pages, max_alloc_size);
3726 	dp_desc_multi_pages_mem_alloc(soc, DP_HW_LINK_DESC_TYPE,
3727 				      pages,
3728 				      link_desc_size,
3729 				      *total_link_descs,
3730 				      0, false);
3731 	if (!pages->num_pages) {
3732 		dp_err("Multi page alloc fail for hw link desc pool");
3733 		return QDF_STATUS_E_FAULT;
3734 	}
3735 
3736 	wlan_minidump_log(pages->dma_pages->page_v_addr_start,
3737 			  pages->num_pages * pages->page_size,
3738 			  soc->ctrl_psoc,
3739 			  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3740 			  "hw_link_desc_bank");
3741 
3742 	return QDF_STATUS_SUCCESS;
3743 }
3744 
3745 /*
3746  * dp_hw_link_desc_ring_free() - Free h/w link desc rings
3747  * @soc: DP SOC handle
3748  *
3749  * Return: none
3750  */
3751 static void dp_hw_link_desc_ring_free(struct dp_soc *soc)
3752 {
3753 	uint32_t i;
3754 	uint32_t size = soc->wbm_idle_scatter_buf_size;
3755 	void *vaddr = soc->wbm_idle_link_ring.base_vaddr_unaligned;
3756 	qdf_dma_addr_t paddr;
3757 
3758 	if (soc->wbm_idle_scatter_buf_base_vaddr[0]) {
3759 		for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3760 			vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3761 			paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3762 			if (vaddr) {
3763 				qdf_mem_free_consistent(soc->osdev,
3764 							soc->osdev->dev,
3765 							size,
3766 							vaddr,
3767 							paddr,
3768 							0);
3769 				vaddr = NULL;
3770 			}
3771 		}
3772 	} else {
3773 		wlan_minidump_remove(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3774 				     soc->wbm_idle_link_ring.alloc_size,
3775 				     soc->ctrl_psoc,
3776 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3777 				     "wbm_idle_link_ring");
3778 		dp_srng_free(soc, &soc->wbm_idle_link_ring);
3779 	}
3780 }
3781 
3782 /*
3783  * dp_hw_link_desc_ring_alloc() - Allocate hw link desc rings
3784  * @soc: DP SOC handle
3785  *
3786  * Allocate memory for WBM_IDLE_LINK srng ring if the number of
3787  * link descriptors is less then the max_allocated size. else
3788  * allocate memory for wbm_idle_scatter_buffer.
3789  *
3790  * Return: QDF_STATUS_SUCCESS: success
3791  *         QDF_STATUS_E_NO_MEM: No memory (Failure)
3792  */
3793 static QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc)
3794 {
3795 	uint32_t entry_size, i;
3796 	uint32_t total_mem_size;
3797 	qdf_dma_addr_t *baseaddr = NULL;
3798 	struct dp_srng *dp_srng;
3799 	uint32_t ring_type;
3800 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3801 	uint32_t tlds;
3802 
3803 	ring_type = WBM_IDLE_LINK;
3804 	dp_srng = &soc->wbm_idle_link_ring;
3805 	tlds = soc->total_link_descs;
3806 
3807 	entry_size = hal_srng_get_entrysize(soc->hal_soc, ring_type);
3808 	total_mem_size = entry_size * tlds;
3809 
3810 	if (total_mem_size <= max_alloc_size) {
3811 		if (dp_srng_alloc(soc, dp_srng, ring_type, tlds, 0)) {
3812 			dp_init_err("%pK: Link desc idle ring setup failed",
3813 				    soc);
3814 			goto fail;
3815 		}
3816 
3817 		wlan_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3818 				  soc->wbm_idle_link_ring.alloc_size,
3819 				  soc->ctrl_psoc,
3820 				  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3821 				  "wbm_idle_link_ring");
3822 	} else {
3823 		uint32_t num_scatter_bufs;
3824 		uint32_t num_entries_per_buf;
3825 		uint32_t buf_size = 0;
3826 
3827 		soc->wbm_idle_scatter_buf_size =
3828 			hal_idle_list_scatter_buf_size(soc->hal_soc);
3829 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
3830 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
3831 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
3832 					soc->hal_soc, total_mem_size,
3833 					soc->wbm_idle_scatter_buf_size);
3834 
3835 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
3836 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3837 				  FL("scatter bufs size out of bounds"));
3838 			goto fail;
3839 		}
3840 
3841 		for (i = 0; i < num_scatter_bufs; i++) {
3842 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
3843 			buf_size = soc->wbm_idle_scatter_buf_size;
3844 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
3845 				qdf_mem_alloc_consistent(soc->osdev,
3846 							 soc->osdev->dev,
3847 							 buf_size,
3848 							 baseaddr);
3849 
3850 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
3851 				QDF_TRACE(QDF_MODULE_ID_DP,
3852 					  QDF_TRACE_LEVEL_ERROR,
3853 					  FL("Scatter lst memory alloc fail"));
3854 				goto fail;
3855 			}
3856 		}
3857 		soc->num_scatter_bufs = num_scatter_bufs;
3858 	}
3859 	return QDF_STATUS_SUCCESS;
3860 
3861 fail:
3862 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3863 		void *vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3864 		qdf_dma_addr_t paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3865 
3866 		if (vaddr) {
3867 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
3868 						soc->wbm_idle_scatter_buf_size,
3869 						vaddr,
3870 						paddr, 0);
3871 			vaddr = NULL;
3872 		}
3873 	}
3874 	return QDF_STATUS_E_NOMEM;
3875 }
3876 
3877 qdf_export_symbol(dp_hw_link_desc_pool_banks_alloc);
3878 
3879 /*
3880  * dp_hw_link_desc_ring_init() - Initialize hw link desc rings
3881  * @soc: DP SOC handle
3882  *
3883  * Return: QDF_STATUS_SUCCESS: success
3884  *         QDF_STATUS_E_FAILURE: failure
3885  */
3886 static QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc)
3887 {
3888 	struct dp_srng *dp_srng = &soc->wbm_idle_link_ring;
3889 
3890 	if (dp_srng->base_vaddr_unaligned) {
3891 		if (dp_srng_init(soc, dp_srng, WBM_IDLE_LINK, 0, 0))
3892 			return QDF_STATUS_E_FAILURE;
3893 	}
3894 	return QDF_STATUS_SUCCESS;
3895 }
3896 
3897 /*
3898  * dp_hw_link_desc_ring_deinit() - Reset hw link desc rings
3899  * @soc: DP SOC handle
3900  *
3901  * Return: None
3902  */
3903 static void dp_hw_link_desc_ring_deinit(struct dp_soc *soc)
3904 {
3905 	dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0);
3906 }
3907 
3908 /*
3909  * dp_hw_link_desc_ring_replenish() - Replenish hw link desc rings
3910  * @soc: DP SOC handle
3911  * @mac_id: mac id
3912  *
3913  * Return: None
3914  */
3915 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id)
3916 {
3917 	uint32_t cookie = 0;
3918 	uint32_t page_idx = 0;
3919 	struct qdf_mem_multi_page_t *pages;
3920 	struct qdf_mem_dma_page_t *dma_pages;
3921 	uint32_t offset = 0;
3922 	uint32_t count = 0;
3923 	uint32_t desc_id = 0;
3924 	void *desc_srng;
3925 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
3926 	uint32_t *total_link_descs_addr;
3927 	uint32_t total_link_descs;
3928 	uint32_t scatter_buf_num;
3929 	uint32_t num_entries_per_buf = 0;
3930 	uint32_t rem_entries;
3931 	uint32_t num_descs_per_page;
3932 	uint32_t num_scatter_bufs = 0;
3933 	uint8_t *scatter_buf_ptr;
3934 	void *desc;
3935 
3936 	num_scatter_bufs = soc->num_scatter_bufs;
3937 
3938 	if (mac_id == WLAN_INVALID_PDEV_ID) {
3939 		pages = &soc->link_desc_pages;
3940 		total_link_descs = soc->total_link_descs;
3941 		desc_srng = soc->wbm_idle_link_ring.hal_srng;
3942 	} else {
3943 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3944 		/* dp_monitor_get_link_desc_pages returns NULL only
3945 		 * if monitor SOC is  NULL
3946 		 */
3947 		if (!pages) {
3948 			dp_err("can not get link desc pages");
3949 			QDF_ASSERT(0);
3950 			return;
3951 		}
3952 		total_link_descs_addr =
3953 				dp_monitor_get_total_link_descs(soc, mac_id);
3954 		total_link_descs = *total_link_descs_addr;
3955 		desc_srng = soc->rxdma_mon_desc_ring[mac_id].hal_srng;
3956 	}
3957 
3958 	dma_pages = pages->dma_pages;
3959 	do {
3960 		qdf_mem_zero(dma_pages[page_idx].page_v_addr_start,
3961 			     pages->page_size);
3962 		page_idx++;
3963 	} while (page_idx < pages->num_pages);
3964 
3965 	if (desc_srng) {
3966 		hal_srng_access_start_unlocked(soc->hal_soc, desc_srng);
3967 		page_idx = 0;
3968 		count = 0;
3969 		offset = 0;
3970 		pages = &soc->link_desc_pages;
3971 		while ((desc = hal_srng_src_get_next(soc->hal_soc,
3972 						     desc_srng)) &&
3973 			(count < total_link_descs)) {
3974 			page_idx = count / pages->num_element_per_page;
3975 			if (desc_id == pages->num_element_per_page)
3976 				desc_id = 0;
3977 
3978 			offset = count % pages->num_element_per_page;
3979 			cookie = LINK_DESC_COOKIE(desc_id, page_idx,
3980 						  soc->link_desc_id_start);
3981 
3982 			hal_set_link_desc_addr(soc->hal_soc, desc, cookie,
3983 					       dma_pages[page_idx].page_p_addr
3984 					       + (offset * link_desc_size),
3985 					       soc->idle_link_bm_id);
3986 			count++;
3987 			desc_id++;
3988 		}
3989 		hal_srng_access_end_unlocked(soc->hal_soc, desc_srng);
3990 	} else {
3991 		/* Populate idle list scatter buffers with link descriptor
3992 		 * pointers
3993 		 */
3994 		scatter_buf_num = 0;
3995 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
3996 					soc->hal_soc,
3997 					soc->wbm_idle_scatter_buf_size);
3998 
3999 		scatter_buf_ptr = (uint8_t *)(
4000 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
4001 		rem_entries = num_entries_per_buf;
4002 		pages = &soc->link_desc_pages;
4003 		page_idx = 0; count = 0;
4004 		offset = 0;
4005 		num_descs_per_page = pages->num_element_per_page;
4006 
4007 		while (count < total_link_descs) {
4008 			page_idx = count / num_descs_per_page;
4009 			offset = count % num_descs_per_page;
4010 			if (desc_id == pages->num_element_per_page)
4011 				desc_id = 0;
4012 
4013 			cookie = LINK_DESC_COOKIE(desc_id, page_idx,
4014 						  soc->link_desc_id_start);
4015 			hal_set_link_desc_addr(soc->hal_soc,
4016 					       (void *)scatter_buf_ptr,
4017 					       cookie,
4018 					       dma_pages[page_idx].page_p_addr +
4019 					       (offset * link_desc_size),
4020 					       soc->idle_link_bm_id);
4021 			rem_entries--;
4022 			if (rem_entries) {
4023 				scatter_buf_ptr += link_desc_size;
4024 			} else {
4025 				rem_entries = num_entries_per_buf;
4026 				scatter_buf_num++;
4027 				if (scatter_buf_num >= num_scatter_bufs)
4028 					break;
4029 				scatter_buf_ptr = (uint8_t *)
4030 					(soc->wbm_idle_scatter_buf_base_vaddr[
4031 					 scatter_buf_num]);
4032 			}
4033 			count++;
4034 			desc_id++;
4035 		}
4036 		/* Setup link descriptor idle list in HW */
4037 		hal_setup_link_idle_list(soc->hal_soc,
4038 			soc->wbm_idle_scatter_buf_base_paddr,
4039 			soc->wbm_idle_scatter_buf_base_vaddr,
4040 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
4041 			(uint32_t)(scatter_buf_ptr -
4042 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
4043 			scatter_buf_num-1])), total_link_descs);
4044 	}
4045 }
4046 
4047 qdf_export_symbol(dp_link_desc_ring_replenish);
4048 
4049 #ifdef IPA_OFFLOAD
4050 #define USE_1_IPA_RX_REO_RING 1
4051 #define USE_2_IPA_RX_REO_RINGS 2
4052 #define REO_DST_RING_SIZE_QCA6290 1023
4053 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
4054 #define REO_DST_RING_SIZE_QCA8074 1023
4055 #define REO_DST_RING_SIZE_QCN9000 2048
4056 #else
4057 #define REO_DST_RING_SIZE_QCA8074 8
4058 #define REO_DST_RING_SIZE_QCN9000 8
4059 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
4060 
4061 #ifdef IPA_WDI3_TX_TWO_PIPES
4062 #ifdef DP_MEMORY_OPT
4063 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4064 {
4065 	return dp_init_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4066 }
4067 
4068 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4069 {
4070 	dp_deinit_tx_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4071 }
4072 
4073 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4074 {
4075 	return dp_alloc_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4076 }
4077 
4078 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4079 {
4080 	dp_free_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4081 }
4082 
4083 #else /* !DP_MEMORY_OPT */
4084 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4085 {
4086 	return 0;
4087 }
4088 
4089 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4090 {
4091 }
4092 
4093 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4094 {
4095 	return 0
4096 }
4097 
4098 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4099 {
4100 }
4101 #endif /* DP_MEMORY_OPT */
4102 
4103 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
4104 {
4105 	hal_tx_init_data_ring(soc->hal_soc,
4106 			      soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng);
4107 }
4108 
4109 #else /* !IPA_WDI3_TX_TWO_PIPES */
4110 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4111 {
4112 	return 0;
4113 }
4114 
4115 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4116 {
4117 }
4118 
4119 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4120 {
4121 	return 0;
4122 }
4123 
4124 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4125 {
4126 }
4127 
4128 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
4129 {
4130 }
4131 
4132 #endif /* IPA_WDI3_TX_TWO_PIPES */
4133 
4134 #else
4135 
4136 #define REO_DST_RING_SIZE_QCA6290 1024
4137 
4138 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4139 {
4140 	return 0;
4141 }
4142 
4143 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4144 {
4145 }
4146 
4147 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4148 {
4149 	return 0;
4150 }
4151 
4152 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4153 {
4154 }
4155 
4156 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
4157 {
4158 }
4159 
4160 #endif /* IPA_OFFLOAD */
4161 
4162 /*
4163  * dp_soc_reset_ring_map() - Reset cpu ring map
4164  * @soc: Datapath soc handler
4165  *
4166  * This api resets the default cpu ring map
4167  */
4168 
4169 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
4170 {
4171 	uint8_t i;
4172 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4173 
4174 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
4175 		switch (nss_config) {
4176 		case dp_nss_cfg_first_radio:
4177 			/*
4178 			 * Setting Tx ring map for one nss offloaded radio
4179 			 */
4180 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
4181 			break;
4182 
4183 		case dp_nss_cfg_second_radio:
4184 			/*
4185 			 * Setting Tx ring for two nss offloaded radios
4186 			 */
4187 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
4188 			break;
4189 
4190 		case dp_nss_cfg_dbdc:
4191 			/*
4192 			 * Setting Tx ring map for 2 nss offloaded radios
4193 			 */
4194 			soc->tx_ring_map[i] =
4195 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
4196 			break;
4197 
4198 		case dp_nss_cfg_dbtc:
4199 			/*
4200 			 * Setting Tx ring map for 3 nss offloaded radios
4201 			 */
4202 			soc->tx_ring_map[i] =
4203 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
4204 			break;
4205 
4206 		default:
4207 			dp_err("tx_ring_map failed due to invalid nss cfg");
4208 			break;
4209 		}
4210 	}
4211 }
4212 
4213 /*
4214  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
4215  * @dp_soc - DP soc handle
4216  * @ring_type - ring type
4217  * @ring_num - ring_num
4218  *
4219  * return 0 or 1
4220  */
4221 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
4222 {
4223 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4224 	uint8_t status = 0;
4225 
4226 	switch (ring_type) {
4227 	case WBM2SW_RELEASE:
4228 	case REO_DST:
4229 	case RXDMA_BUF:
4230 	case REO_EXCEPTION:
4231 		status = ((nss_config) & (1 << ring_num));
4232 		break;
4233 	default:
4234 		break;
4235 	}
4236 
4237 	return status;
4238 }
4239 
4240 /*
4241  * dp_soc_disable_unused_mac_intr_mask() - reset interrupt mask for
4242  *					  unused WMAC hw rings
4243  * @dp_soc - DP Soc handle
4244  * @mac_num - wmac num
4245  *
4246  * Return: Return void
4247  */
4248 static void dp_soc_disable_unused_mac_intr_mask(struct dp_soc *soc,
4249 						int mac_num)
4250 {
4251 	uint8_t *grp_mask = NULL;
4252 	int group_number;
4253 
4254 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
4255 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4256 	wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4257 					  group_number, 0x0);
4258 
4259 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
4260 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4261 	wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
4262 				      group_number, 0x0);
4263 
4264 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
4265 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4266 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
4267 					  group_number, 0x0);
4268 
4269 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
4270 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4271 	wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
4272 					      group_number, 0x0);
4273 }
4274 
4275 #ifdef IPA_OFFLOAD
4276 #ifdef IPA_WDI3_VLAN_SUPPORT
4277 /*
4278  * dp_soc_reset_ipa_vlan_intr_mask() - reset interrupt mask for IPA offloaded
4279  * ring for vlan tagged traffic
4280  * @dp_soc - DP Soc handle
4281  *
4282  * Return: Return void
4283  */
4284 static void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
4285 {
4286 	uint8_t *grp_mask = NULL;
4287 	int group_number, mask;
4288 
4289 	if (!wlan_ipa_is_vlan_enabled())
4290 		return;
4291 
4292 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
4293 
4294 	group_number = dp_srng_find_ring_in_mask(IPA_ALT_REO_DEST_RING_IDX, grp_mask);
4295 	if (group_number < 0) {
4296 		dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4297 			      soc, REO_DST, IPA_ALT_REO_DEST_RING_IDX);
4298 		return;
4299 	}
4300 
4301 	mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
4302 
4303 	/* reset the interrupt mask for offloaded ring */
4304 	mask &= (~(1 << IPA_ALT_REO_DEST_RING_IDX));
4305 
4306 	/*
4307 	 * set the interrupt mask to zero for rx offloaded radio.
4308 	 */
4309 	wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4310 }
4311 #else
4312 static inline
4313 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
4314 { }
4315 #endif /* IPA_WDI3_VLAN_SUPPORT */
4316 #else
4317 static inline
4318 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
4319 { }
4320 #endif /* IPA_OFFLOAD */
4321 
4322 /*
4323  * dp_soc_reset_intr_mask() - reset interrupt mask
4324  * @dp_soc - DP Soc handle
4325  *
4326  * Return: Return void
4327  */
4328 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
4329 {
4330 	uint8_t j;
4331 	uint8_t *grp_mask = NULL;
4332 	int group_number, mask, num_ring;
4333 
4334 	/* number of tx ring */
4335 	num_ring = soc->num_tcl_data_rings;
4336 
4337 	/*
4338 	 * group mask for tx completion  ring.
4339 	 */
4340 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
4341 
4342 	/* loop and reset the mask for only offloaded ring */
4343 	for (j = 0; j < WLAN_CFG_NUM_TCL_DATA_RINGS; j++) {
4344 		/*
4345 		 * Group number corresponding to tx offloaded ring.
4346 		 */
4347 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4348 		if (group_number < 0) {
4349 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4350 				      soc, WBM2SW_RELEASE, j);
4351 			continue;
4352 		}
4353 
4354 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
4355 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j) &&
4356 		    (!mask)) {
4357 			continue;
4358 		}
4359 
4360 		/* reset the tx mask for offloaded ring */
4361 		mask &= (~(1 << j));
4362 
4363 		/*
4364 		 * reset the interrupt mask for offloaded ring.
4365 		 */
4366 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4367 	}
4368 
4369 	/* number of rx rings */
4370 	num_ring = soc->num_reo_dest_rings;
4371 
4372 	/*
4373 	 * group mask for reo destination ring.
4374 	 */
4375 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
4376 
4377 	/* loop and reset the mask for only offloaded ring */
4378 	for (j = 0; j < WLAN_CFG_NUM_REO_DEST_RING; j++) {
4379 		/*
4380 		 * Group number corresponding to rx offloaded ring.
4381 		 */
4382 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4383 		if (group_number < 0) {
4384 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4385 				      soc, REO_DST, j);
4386 			continue;
4387 		}
4388 
4389 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
4390 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j) &&
4391 		    (!mask)) {
4392 			continue;
4393 		}
4394 
4395 		/* reset the interrupt mask for offloaded ring */
4396 		mask &= (~(1 << j));
4397 
4398 		/*
4399 		 * set the interrupt mask to zero for rx offloaded radio.
4400 		 */
4401 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4402 	}
4403 
4404 	/*
4405 	 * group mask for Rx buffer refill ring
4406 	 */
4407 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
4408 
4409 	/* loop and reset the mask for only offloaded ring */
4410 	for (j = 0; j < MAX_PDEV_CNT; j++) {
4411 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
4412 
4413 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
4414 			continue;
4415 		}
4416 
4417 		/*
4418 		 * Group number corresponding to rx offloaded ring.
4419 		 */
4420 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
4421 		if (group_number < 0) {
4422 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4423 				      soc, REO_DST, lmac_id);
4424 			continue;
4425 		}
4426 
4427 		/* set the interrupt mask for offloaded ring */
4428 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4429 				group_number);
4430 		mask &= (~(1 << lmac_id));
4431 
4432 		/*
4433 		 * set the interrupt mask to zero for rx offloaded radio.
4434 		 */
4435 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4436 			group_number, mask);
4437 	}
4438 
4439 	grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
4440 
4441 	for (j = 0; j < num_ring; j++) {
4442 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_EXCEPTION, j)) {
4443 			continue;
4444 		}
4445 
4446 		/*
4447 		 * Group number corresponding to rx err ring.
4448 		 */
4449 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4450 		if (group_number < 0) {
4451 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4452 				      soc, REO_EXCEPTION, j);
4453 			continue;
4454 		}
4455 
4456 		wlan_cfg_set_rx_err_ring_mask(soc->wlan_cfg_ctx,
4457 					      group_number, 0);
4458 	}
4459 }
4460 
4461 #ifdef IPA_OFFLOAD
4462 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
4463 			 uint32_t *remap1, uint32_t *remap2)
4464 {
4465 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX] = {
4466 				REO_REMAP_SW1, REO_REMAP_SW2, REO_REMAP_SW3,
4467 				REO_REMAP_SW5, REO_REMAP_SW6, REO_REMAP_SW7};
4468 
4469 	switch (soc->arch_id) {
4470 	case CDP_ARCH_TYPE_BE:
4471 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4472 					      soc->num_reo_dest_rings -
4473 					      USE_2_IPA_RX_REO_RINGS, remap1,
4474 					      remap2);
4475 		break;
4476 
4477 	case CDP_ARCH_TYPE_LI:
4478 		if (wlan_ipa_is_vlan_enabled()) {
4479 			hal_compute_reo_remap_ix2_ix3(
4480 					soc->hal_soc, ring,
4481 					soc->num_reo_dest_rings -
4482 					USE_2_IPA_RX_REO_RINGS, remap1,
4483 					remap2);
4484 
4485 		} else {
4486 			hal_compute_reo_remap_ix2_ix3(
4487 					soc->hal_soc, ring,
4488 					soc->num_reo_dest_rings -
4489 					USE_1_IPA_RX_REO_RING, remap1,
4490 					remap2);
4491 		}
4492 
4493 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
4494 		break;
4495 	default:
4496 		dp_err("unkonwn arch_id 0x%x", soc->arch_id);
4497 		QDF_BUG(0);
4498 
4499 	}
4500 
4501 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
4502 
4503 	return true;
4504 }
4505 
4506 #ifdef IPA_WDI3_TX_TWO_PIPES
4507 static bool dp_ipa_is_alt_tx_ring(int index)
4508 {
4509 	return index == IPA_TX_ALT_RING_IDX;
4510 }
4511 
4512 static bool dp_ipa_is_alt_tx_comp_ring(int index)
4513 {
4514 	return index == IPA_TX_ALT_COMP_RING_IDX;
4515 }
4516 #else /* !IPA_WDI3_TX_TWO_PIPES */
4517 static bool dp_ipa_is_alt_tx_ring(int index)
4518 {
4519 	return false;
4520 }
4521 
4522 static bool dp_ipa_is_alt_tx_comp_ring(int index)
4523 {
4524 	return false;
4525 }
4526 #endif /* IPA_WDI3_TX_TWO_PIPES */
4527 
4528 /**
4529  * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA
4530  *
4531  * @tx_ring_num: Tx ring number
4532  * @tx_ipa_ring_sz: Return param only updated for IPA.
4533  * @soc_cfg_ctx: dp soc cfg context
4534  *
4535  * Return: None
4536  */
4537 static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz,
4538 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4539 {
4540 	if (!soc_cfg_ctx->ipa_enabled)
4541 		return;
4542 
4543 	if (tx_ring_num == IPA_TCL_DATA_RING_IDX)
4544 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_ring_size(soc_cfg_ctx);
4545 	else if (dp_ipa_is_alt_tx_ring(tx_ring_num))
4546 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_alt_ring_size(soc_cfg_ctx);
4547 }
4548 
4549 /**
4550  * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA
4551  *
4552  * @tx_comp_ring_num: Tx comp ring number
4553  * @tx_comp_ipa_ring_sz: Return param only updated for IPA.
4554  * @soc_cfg_ctx: dp soc cfg context
4555  *
4556  * Return: None
4557  */
4558 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
4559 					 int *tx_comp_ipa_ring_sz,
4560 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4561 {
4562 	if (!soc_cfg_ctx->ipa_enabled)
4563 		return;
4564 
4565 	if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX)
4566 		*tx_comp_ipa_ring_sz =
4567 				wlan_cfg_ipa_tx_comp_ring_size(soc_cfg_ctx);
4568 	else if (dp_ipa_is_alt_tx_comp_ring(tx_comp_ring_num))
4569 		*tx_comp_ipa_ring_sz =
4570 				wlan_cfg_ipa_tx_alt_comp_ring_size(soc_cfg_ctx);
4571 }
4572 #else
4573 static uint8_t dp_reo_ring_selection(uint32_t value, uint32_t *ring)
4574 {
4575 	uint8_t num = 0;
4576 
4577 	switch (value) {
4578 	/* should we have all the different possible ring configs */
4579 	case 0xFF:
4580 		num = 8;
4581 		ring[0] = REO_REMAP_SW1;
4582 		ring[1] = REO_REMAP_SW2;
4583 		ring[2] = REO_REMAP_SW3;
4584 		ring[3] = REO_REMAP_SW4;
4585 		ring[4] = REO_REMAP_SW5;
4586 		ring[5] = REO_REMAP_SW6;
4587 		ring[6] = REO_REMAP_SW7;
4588 		ring[7] = REO_REMAP_SW8;
4589 		break;
4590 
4591 	case 0x3F:
4592 		num = 6;
4593 		ring[0] = REO_REMAP_SW1;
4594 		ring[1] = REO_REMAP_SW2;
4595 		ring[2] = REO_REMAP_SW3;
4596 		ring[3] = REO_REMAP_SW4;
4597 		ring[4] = REO_REMAP_SW5;
4598 		ring[5] = REO_REMAP_SW6;
4599 		break;
4600 
4601 	case 0xF:
4602 		num = 4;
4603 		ring[0] = REO_REMAP_SW1;
4604 		ring[1] = REO_REMAP_SW2;
4605 		ring[2] = REO_REMAP_SW3;
4606 		ring[3] = REO_REMAP_SW4;
4607 		break;
4608 	case 0xE:
4609 		num = 3;
4610 		ring[0] = REO_REMAP_SW2;
4611 		ring[1] = REO_REMAP_SW3;
4612 		ring[2] = REO_REMAP_SW4;
4613 		break;
4614 	case 0xD:
4615 		num = 3;
4616 		ring[0] = REO_REMAP_SW1;
4617 		ring[1] = REO_REMAP_SW3;
4618 		ring[2] = REO_REMAP_SW4;
4619 		break;
4620 	case 0xC:
4621 		num = 2;
4622 		ring[0] = REO_REMAP_SW3;
4623 		ring[1] = REO_REMAP_SW4;
4624 		break;
4625 	case 0xB:
4626 		num = 3;
4627 		ring[0] = REO_REMAP_SW1;
4628 		ring[1] = REO_REMAP_SW2;
4629 		ring[2] = REO_REMAP_SW4;
4630 		break;
4631 	case 0xA:
4632 		num = 2;
4633 		ring[0] = REO_REMAP_SW2;
4634 		ring[1] = REO_REMAP_SW4;
4635 		break;
4636 	case 0x9:
4637 		num = 2;
4638 		ring[0] = REO_REMAP_SW1;
4639 		ring[1] = REO_REMAP_SW4;
4640 		break;
4641 	case 0x8:
4642 		num = 1;
4643 		ring[0] = REO_REMAP_SW4;
4644 		break;
4645 	case 0x7:
4646 		num = 3;
4647 		ring[0] = REO_REMAP_SW1;
4648 		ring[1] = REO_REMAP_SW2;
4649 		ring[2] = REO_REMAP_SW3;
4650 		break;
4651 	case 0x6:
4652 		num = 2;
4653 		ring[0] = REO_REMAP_SW2;
4654 		ring[1] = REO_REMAP_SW3;
4655 		break;
4656 	case 0x5:
4657 		num = 2;
4658 		ring[0] = REO_REMAP_SW1;
4659 		ring[1] = REO_REMAP_SW3;
4660 		break;
4661 	case 0x4:
4662 		num = 1;
4663 		ring[0] = REO_REMAP_SW3;
4664 		break;
4665 	case 0x3:
4666 		num = 2;
4667 		ring[0] = REO_REMAP_SW1;
4668 		ring[1] = REO_REMAP_SW2;
4669 		break;
4670 	case 0x2:
4671 		num = 1;
4672 		ring[0] = REO_REMAP_SW2;
4673 		break;
4674 	case 0x1:
4675 		num = 1;
4676 		ring[0] = REO_REMAP_SW1;
4677 		break;
4678 	default:
4679 		dp_err("unkonwn reo ring map 0x%x", value);
4680 		QDF_BUG(0);
4681 	}
4682 	return num;
4683 }
4684 
4685 bool dp_reo_remap_config(struct dp_soc *soc,
4686 			 uint32_t *remap0,
4687 			 uint32_t *remap1,
4688 			 uint32_t *remap2)
4689 {
4690 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4691 	uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
4692 	uint8_t num;
4693 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX];
4694 	uint32_t value;
4695 
4696 	switch (offload_radio) {
4697 	case dp_nss_cfg_default:
4698 		value = reo_config & WLAN_CFG_NUM_REO_RINGS_MAP_MAX;
4699 		num = dp_reo_ring_selection(value, ring);
4700 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4701 					      num, remap1, remap2);
4702 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
4703 
4704 		break;
4705 	case dp_nss_cfg_first_radio:
4706 		value = reo_config & 0xE;
4707 		num = dp_reo_ring_selection(value, ring);
4708 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4709 					      num, remap1, remap2);
4710 
4711 		break;
4712 	case dp_nss_cfg_second_radio:
4713 		value = reo_config & 0xD;
4714 		num = dp_reo_ring_selection(value, ring);
4715 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4716 					      num, remap1, remap2);
4717 
4718 		break;
4719 	case dp_nss_cfg_dbdc:
4720 	case dp_nss_cfg_dbtc:
4721 		/* return false if both or all are offloaded to NSS */
4722 		return false;
4723 
4724 	}
4725 
4726 	dp_debug("remap1 %x remap2 %x offload_radio %u",
4727 		 *remap1, *remap2, offload_radio);
4728 	return true;
4729 }
4730 
4731 static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz,
4732 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4733 {
4734 }
4735 
4736 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
4737 					 int *tx_comp_ipa_ring_sz,
4738 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4739 {
4740 }
4741 #endif /* IPA_OFFLOAD */
4742 
4743 /*
4744  * dp_reo_frag_dst_set() - configure reo register to set the
4745  *                        fragment destination ring
4746  * @soc : Datapath soc
4747  * @frag_dst_ring : output parameter to set fragment destination ring
4748  *
4749  * Based on offload_radio below fragment destination rings is selected
4750  * 0 - TCL
4751  * 1 - SW1
4752  * 2 - SW2
4753  * 3 - SW3
4754  * 4 - SW4
4755  * 5 - Release
4756  * 6 - FW
4757  * 7 - alternate select
4758  *
4759  * return: void
4760  */
4761 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
4762 {
4763 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4764 
4765 	switch (offload_radio) {
4766 	case dp_nss_cfg_default:
4767 		*frag_dst_ring = REO_REMAP_TCL;
4768 		break;
4769 	case dp_nss_cfg_first_radio:
4770 		/*
4771 		 * This configuration is valid for single band radio which
4772 		 * is also NSS offload.
4773 		 */
4774 	case dp_nss_cfg_dbdc:
4775 	case dp_nss_cfg_dbtc:
4776 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
4777 		break;
4778 	default:
4779 		dp_init_err("%pK: dp_reo_frag_dst_set invalid offload radio config", soc);
4780 		break;
4781 	}
4782 }
4783 
4784 #ifdef ENABLE_VERBOSE_DEBUG
4785 static void dp_enable_verbose_debug(struct dp_soc *soc)
4786 {
4787 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4788 
4789 	soc_cfg_ctx = soc->wlan_cfg_ctx;
4790 
4791 	if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask)
4792 		is_dp_verbose_debug_enabled = true;
4793 
4794 	if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask)
4795 		hal_set_verbose_debug(true);
4796 	else
4797 		hal_set_verbose_debug(false);
4798 }
4799 #else
4800 static void dp_enable_verbose_debug(struct dp_soc *soc)
4801 {
4802 }
4803 #endif
4804 
4805 #ifdef WLAN_FEATURE_STATS_EXT
4806 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
4807 {
4808 	qdf_event_create(&soc->rx_hw_stats_event);
4809 }
4810 #else
4811 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
4812 {
4813 }
4814 #endif
4815 
4816 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index)
4817 {
4818 	int tcl_ring_num, wbm_ring_num;
4819 
4820 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
4821 						index,
4822 						&tcl_ring_num,
4823 						&wbm_ring_num);
4824 
4825 	if (tcl_ring_num == -1) {
4826 		dp_err("incorrect tcl ring num for index %u", index);
4827 		return;
4828 	}
4829 
4830 	wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned,
4831 			     soc->tcl_data_ring[index].alloc_size,
4832 			     soc->ctrl_psoc,
4833 			     WLAN_MD_DP_SRNG_TCL_DATA,
4834 			     "tcl_data_ring");
4835 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
4836 	dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA,
4837 		       tcl_ring_num);
4838 
4839 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
4840 		return;
4841 
4842 	wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned,
4843 			     soc->tx_comp_ring[index].alloc_size,
4844 			     soc->ctrl_psoc,
4845 			     WLAN_MD_DP_SRNG_TX_COMP,
4846 			     "tcl_comp_ring");
4847 	dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4848 		       wbm_ring_num);
4849 }
4850 
4851 /**
4852  * dp_init_tx_ring_pair_by_index() - The function inits tcl data/wbm completion
4853  * ring pair
4854  * @soc: DP soc pointer
4855  * @index: index of soc->tcl_data or soc->tx_comp to initialize
4856  *
4857  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
4858  */
4859 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
4860 						uint8_t index)
4861 {
4862 	int tcl_ring_num, wbm_ring_num;
4863 	uint8_t bm_id;
4864 
4865 	if (index >= MAX_TCL_DATA_RINGS) {
4866 		dp_err("unexpected index!");
4867 		QDF_BUG(0);
4868 		goto fail1;
4869 	}
4870 
4871 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
4872 						index,
4873 						&tcl_ring_num,
4874 						&wbm_ring_num);
4875 
4876 	if (tcl_ring_num == -1) {
4877 		dp_err("incorrect tcl ring num for index %u", index);
4878 		goto fail1;
4879 	}
4880 
4881 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
4882 	if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA,
4883 			 tcl_ring_num, 0)) {
4884 		dp_err("dp_srng_init failed for tcl_data_ring");
4885 		goto fail1;
4886 	}
4887 	wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned,
4888 			  soc->tcl_data_ring[index].alloc_size,
4889 			  soc->ctrl_psoc,
4890 			  WLAN_MD_DP_SRNG_TCL_DATA,
4891 			  "tcl_data_ring");
4892 
4893 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
4894 		goto set_rbm;
4895 
4896 	if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4897 			 wbm_ring_num, 0)) {
4898 		dp_err("dp_srng_init failed for tx_comp_ring");
4899 		goto fail1;
4900 	}
4901 
4902 	wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned,
4903 			  soc->tx_comp_ring[index].alloc_size,
4904 			  soc->ctrl_psoc,
4905 			  WLAN_MD_DP_SRNG_TX_COMP,
4906 			  "tcl_comp_ring");
4907 set_rbm:
4908 	bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_ring_num);
4909 
4910 	soc->arch_ops.tx_implicit_rbm_set(soc, tcl_ring_num, bm_id);
4911 
4912 	return QDF_STATUS_SUCCESS;
4913 
4914 fail1:
4915 	return QDF_STATUS_E_FAILURE;
4916 }
4917 
4918 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index)
4919 {
4920 	dp_debug("index %u", index);
4921 	dp_srng_free(soc, &soc->tcl_data_ring[index]);
4922 	dp_srng_free(soc, &soc->tx_comp_ring[index]);
4923 }
4924 
4925 /**
4926  * dp_alloc_tx_ring_pair_by_index() - The function allocs tcl data/wbm2sw
4927  * ring pair for the given "index"
4928  * @soc: DP soc pointer
4929  * @index: index of soc->tcl_data or soc->tx_comp to initialize
4930  *
4931  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
4932  */
4933 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
4934 						 uint8_t index)
4935 {
4936 	int tx_ring_size;
4937 	int tx_comp_ring_size;
4938 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
4939 	int cached = 0;
4940 
4941 	if (index >= MAX_TCL_DATA_RINGS) {
4942 		dp_err("unexpected index!");
4943 		QDF_BUG(0);
4944 		goto fail1;
4945 	}
4946 
4947 	dp_debug("index %u", index);
4948 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
4949 	dp_ipa_get_tx_ring_size(index, &tx_ring_size, soc_cfg_ctx);
4950 
4951 	if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA,
4952 			  tx_ring_size, cached)) {
4953 		dp_err("dp_srng_alloc failed for tcl_data_ring");
4954 		goto fail1;
4955 	}
4956 
4957 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
4958 	dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size, soc_cfg_ctx);
4959 	/* Enable cached TCL desc if NSS offload is disabled */
4960 	if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
4961 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
4962 
4963 	if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) ==
4964 	    INVALID_WBM_RING_NUM)
4965 		return QDF_STATUS_SUCCESS;
4966 
4967 	if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4968 			  tx_comp_ring_size, cached)) {
4969 		dp_err("dp_srng_alloc failed for tx_comp_ring");
4970 		goto fail1;
4971 	}
4972 
4973 	return QDF_STATUS_SUCCESS;
4974 
4975 fail1:
4976 	return QDF_STATUS_E_FAILURE;
4977 }
4978 
4979 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
4980 {
4981 	struct cdp_lro_hash_config lro_hash;
4982 	QDF_STATUS status;
4983 
4984 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
4985 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
4986 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
4987 		dp_err("LRO, GRO and RX hash disabled");
4988 		return QDF_STATUS_E_FAILURE;
4989 	}
4990 
4991 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
4992 
4993 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
4994 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
4995 		lro_hash.lro_enable = 1;
4996 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
4997 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
4998 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
4999 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
5000 	}
5001 
5002 	soc->arch_ops.get_rx_hash_key(soc, &lro_hash);
5003 
5004 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
5005 
5006 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
5007 		QDF_BUG(0);
5008 		dp_err("lro_hash_config not configured");
5009 		return QDF_STATUS_E_FAILURE;
5010 	}
5011 
5012 	status = soc->cdp_soc.ol_ops->lro_hash_config(soc->ctrl_psoc,
5013 						      pdev->pdev_id,
5014 						      &lro_hash);
5015 	if (!QDF_IS_STATUS_SUCCESS(status)) {
5016 		dp_err("failed to send lro_hash_config to FW %u", status);
5017 		return status;
5018 	}
5019 
5020 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
5021 		lro_hash.lro_enable, lro_hash.tcp_flag,
5022 		lro_hash.tcp_flag_mask);
5023 
5024 	dp_info("toeplitz_hash_ipv4:");
5025 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5026 			   lro_hash.toeplitz_hash_ipv4,
5027 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
5028 			   LRO_IPV4_SEED_ARR_SZ));
5029 
5030 	dp_info("toeplitz_hash_ipv6:");
5031 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5032 			   lro_hash.toeplitz_hash_ipv6,
5033 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
5034 			   LRO_IPV6_SEED_ARR_SZ));
5035 
5036 	return status;
5037 }
5038 
5039 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
5040 /*
5041  * dp_reap_timer_init() - initialize the reap timer
5042  * @soc: data path SoC handle
5043  *
5044  * Return: void
5045  */
5046 static void dp_reap_timer_init(struct dp_soc *soc)
5047 {
5048 	/*
5049 	 * Timer to reap rxdma status rings.
5050 	 * Needed until we enable ppdu end interrupts
5051 	 */
5052 	dp_monitor_reap_timer_init(soc);
5053 	dp_monitor_vdev_timer_init(soc);
5054 }
5055 
5056 /*
5057  * dp_reap_timer_deinit() - de-initialize the reap timer
5058  * @soc: data path SoC handle
5059  *
5060  * Return: void
5061  */
5062 static void dp_reap_timer_deinit(struct dp_soc *soc)
5063 {
5064 	dp_monitor_reap_timer_deinit(soc);
5065 }
5066 #else
5067 /* WIN use case */
5068 static void dp_reap_timer_init(struct dp_soc *soc)
5069 {
5070 	/* Configure LMAC rings in Polled mode */
5071 	if (soc->lmac_polled_mode) {
5072 		/*
5073 		 * Timer to reap lmac rings.
5074 		 */
5075 		qdf_timer_init(soc->osdev, &soc->lmac_reap_timer,
5076 			       dp_service_lmac_rings, (void *)soc,
5077 			       QDF_TIMER_TYPE_WAKE_APPS);
5078 		soc->lmac_timer_init = 1;
5079 		qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
5080 	}
5081 }
5082 
5083 static void dp_reap_timer_deinit(struct dp_soc *soc)
5084 {
5085 	if (soc->lmac_timer_init) {
5086 		qdf_timer_stop(&soc->lmac_reap_timer);
5087 		qdf_timer_free(&soc->lmac_reap_timer);
5088 		soc->lmac_timer_init = 0;
5089 	}
5090 }
5091 #endif
5092 
5093 #ifdef QCA_HOST2FW_RXBUF_RING
5094 /*
5095  * dp_rxdma_ring_alloc() - allocate the RXDMA rings
5096  * @soc: data path SoC handle
5097  * @pdev: Physical device handle
5098  *
5099  * Return: 0 - success, > 0 - failure
5100  */
5101 static int dp_rxdma_ring_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
5102 {
5103 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
5104 	int max_mac_rings;
5105 	int i;
5106 	int ring_size;
5107 
5108 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
5109 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
5110 	ring_size =  wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx);
5111 
5112 	for (i = 0; i < max_mac_rings; i++) {
5113 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
5114 		if (dp_srng_alloc(soc, &pdev->rx_mac_buf_ring[i],
5115 				  RXDMA_BUF, ring_size, 0)) {
5116 			dp_init_err("%pK: failed rx mac ring setup", soc);
5117 			return QDF_STATUS_E_FAILURE;
5118 		}
5119 	}
5120 	return QDF_STATUS_SUCCESS;
5121 }
5122 
5123 /*
5124  * dp_rxdma_ring_setup() - configure the RXDMA rings
5125  * @soc: data path SoC handle
5126  * @pdev: Physical device handle
5127  *
5128  * Return: 0 - success, > 0 - failure
5129  */
5130 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
5131 {
5132 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
5133 	int max_mac_rings;
5134 	int i;
5135 
5136 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
5137 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
5138 
5139 	for (i = 0; i < max_mac_rings; i++) {
5140 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
5141 		if (dp_srng_init(soc, &pdev->rx_mac_buf_ring[i],
5142 				 RXDMA_BUF, 1, i)) {
5143 			dp_init_err("%pK: failed rx mac ring setup", soc);
5144 			return QDF_STATUS_E_FAILURE;
5145 		}
5146 	}
5147 	return QDF_STATUS_SUCCESS;
5148 }
5149 
5150 /*
5151  * dp_rxdma_ring_cleanup() - Deinit the RXDMA rings and reap timer
5152  * @soc: data path SoC handle
5153  * @pdev: Physical device handle
5154  *
5155  * Return: void
5156  */
5157 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
5158 {
5159 	int i;
5160 
5161 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
5162 		dp_srng_deinit(soc, &pdev->rx_mac_buf_ring[i], RXDMA_BUF, 1);
5163 
5164 	dp_reap_timer_deinit(soc);
5165 }
5166 
5167 /*
5168  * dp_rxdma_ring_free() - Free the RXDMA rings
5169  * @pdev: Physical device handle
5170  *
5171  * Return: void
5172  */
5173 static void dp_rxdma_ring_free(struct dp_pdev *pdev)
5174 {
5175 	int i;
5176 
5177 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
5178 		dp_srng_free(pdev->soc, &pdev->rx_mac_buf_ring[i]);
5179 }
5180 
5181 #else
5182 static int dp_rxdma_ring_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
5183 {
5184 	return QDF_STATUS_SUCCESS;
5185 }
5186 
5187 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
5188 {
5189 	return QDF_STATUS_SUCCESS;
5190 }
5191 
5192 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
5193 {
5194 	dp_reap_timer_deinit(soc);
5195 }
5196 
5197 static void dp_rxdma_ring_free(struct dp_pdev *pdev)
5198 {
5199 }
5200 #endif
5201 
5202 /**
5203  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
5204  * @pdev - DP_PDEV handle
5205  *
5206  * Return: void
5207  */
5208 static inline void
5209 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
5210 {
5211 	uint8_t map_id;
5212 	struct dp_soc *soc = pdev->soc;
5213 
5214 	if (!soc)
5215 		return;
5216 
5217 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
5218 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
5219 			     default_dscp_tid_map,
5220 			     sizeof(default_dscp_tid_map));
5221 	}
5222 
5223 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
5224 		hal_tx_set_dscp_tid_map(soc->hal_soc,
5225 					default_dscp_tid_map,
5226 					map_id);
5227 	}
5228 }
5229 
5230 /**
5231  * dp_pcp_tid_map_setup(): Initialize the pcp-tid maps
5232  * @pdev - DP_PDEV handle
5233  *
5234  * Return: void
5235  */
5236 static inline void
5237 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
5238 {
5239 	struct dp_soc *soc = pdev->soc;
5240 
5241 	if (!soc)
5242 		return;
5243 
5244 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
5245 		     sizeof(default_pcp_tid_map));
5246 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
5247 }
5248 
5249 #ifdef IPA_OFFLOAD
5250 /**
5251  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
5252  * @soc: data path instance
5253  * @pdev: core txrx pdev context
5254  *
5255  * Return: QDF_STATUS_SUCCESS: success
5256  *         QDF_STATUS_E_RESOURCES: Error return
5257  */
5258 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5259 					   struct dp_pdev *pdev)
5260 {
5261 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5262 	int entries;
5263 
5264 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5265 		soc_cfg_ctx = soc->wlan_cfg_ctx;
5266 		entries =
5267 			wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
5268 
5269 		/* Setup second Rx refill buffer ring */
5270 		if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
5271 				  entries, 0)) {
5272 			dp_init_err("%pK: dp_srng_alloc failed second"
5273 				    "rx refill ring", soc);
5274 			return QDF_STATUS_E_FAILURE;
5275 		}
5276 	}
5277 
5278 	return QDF_STATUS_SUCCESS;
5279 }
5280 
5281 #ifdef IPA_WDI3_VLAN_SUPPORT
5282 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5283 					       struct dp_pdev *pdev)
5284 {
5285 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5286 	int entries;
5287 
5288 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5289 	    wlan_ipa_is_vlan_enabled()) {
5290 		soc_cfg_ctx = soc->wlan_cfg_ctx;
5291 		entries =
5292 			wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
5293 
5294 		/* Setup second Rx refill buffer ring */
5295 		if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF,
5296 				  entries, 0)) {
5297 			dp_init_err("%pK: alloc failed for 3rd rx refill ring",
5298 				    soc);
5299 			return QDF_STATUS_E_FAILURE;
5300 		}
5301 	}
5302 
5303 	return QDF_STATUS_SUCCESS;
5304 }
5305 
5306 static int dp_init_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5307 					      struct dp_pdev *pdev)
5308 {
5309 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5310 	    wlan_ipa_is_vlan_enabled()) {
5311 		if (dp_srng_init(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF,
5312 				 IPA_RX_ALT_REFILL_BUF_RING_IDX,
5313 				 pdev->pdev_id)) {
5314 			dp_init_err("%pK: init failed for 3rd rx refill ring",
5315 				    soc);
5316 			return QDF_STATUS_E_FAILURE;
5317 		}
5318 	}
5319 
5320 	return QDF_STATUS_SUCCESS;
5321 }
5322 
5323 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5324 						 struct dp_pdev *pdev)
5325 {
5326 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5327 	    wlan_ipa_is_vlan_enabled())
5328 		dp_srng_deinit(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF, 0);
5329 }
5330 
5331 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5332 					       struct dp_pdev *pdev)
5333 {
5334 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5335 	    wlan_ipa_is_vlan_enabled())
5336 		dp_srng_free(soc, &pdev->rx_refill_buf_ring3);
5337 }
5338 #else
5339 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5340 					       struct dp_pdev *pdev)
5341 {
5342 	return QDF_STATUS_SUCCESS;
5343 }
5344 
5345 static int dp_init_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5346 					      struct dp_pdev *pdev)
5347 {
5348 	return QDF_STATUS_SUCCESS;
5349 }
5350 
5351 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5352 						 struct dp_pdev *pdev)
5353 {
5354 }
5355 
5356 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5357 					       struct dp_pdev *pdev)
5358 {
5359 }
5360 #endif
5361 
5362 /**
5363  * dp_deinit_ipa_rx_refill_buf_ring - deinit second Rx refill buffer ring
5364  * @soc: data path instance
5365  * @pdev: core txrx pdev context
5366  *
5367  * Return: void
5368  */
5369 static void dp_deinit_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5370 					     struct dp_pdev *pdev)
5371 {
5372 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
5373 		dp_srng_deinit(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 0);
5374 }
5375 
5376 /**
5377  * dp_init_ipa_rx_refill_buf_ring - Init second Rx refill buffer ring
5378  * @soc: data path instance
5379  * @pdev: core txrx pdev context
5380  *
5381  * Return: QDF_STATUS_SUCCESS: success
5382  *         QDF_STATUS_E_RESOURCES: Error return
5383  */
5384 static int dp_init_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5385 					  struct dp_pdev *pdev)
5386 {
5387 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5388 		if (dp_srng_init(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
5389 				 IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id)) {
5390 			dp_init_err("%pK: dp_srng_init failed second"
5391 				    "rx refill ring", soc);
5392 			return QDF_STATUS_E_FAILURE;
5393 		}
5394 	}
5395 
5396 	if (dp_init_ipa_rx_alt_refill_buf_ring(soc, pdev)) {
5397 		dp_deinit_ipa_rx_refill_buf_ring(soc, pdev);
5398 		return QDF_STATUS_E_FAILURE;
5399 	}
5400 
5401 	return QDF_STATUS_SUCCESS;
5402 }
5403 
5404 /**
5405  * dp_free_ipa_rx_refill_buf_ring - free second Rx refill buffer ring
5406  * @soc: data path instance
5407  * @pdev: core txrx pdev context
5408  *
5409  * Return: void
5410  */
5411 static void dp_free_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5412 					   struct dp_pdev *pdev)
5413 {
5414 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
5415 		dp_srng_free(soc, &pdev->rx_refill_buf_ring2);
5416 }
5417 #else
5418 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5419 					   struct dp_pdev *pdev)
5420 {
5421 	return QDF_STATUS_SUCCESS;
5422 }
5423 
5424 static int dp_init_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5425 					  struct dp_pdev *pdev)
5426 {
5427 	return QDF_STATUS_SUCCESS;
5428 }
5429 
5430 static void dp_deinit_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5431 					     struct dp_pdev *pdev)
5432 {
5433 }
5434 
5435 static void dp_free_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5436 					   struct dp_pdev *pdev)
5437 {
5438 }
5439 
5440 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5441 					       struct dp_pdev *pdev)
5442 {
5443 	return QDF_STATUS_SUCCESS;
5444 }
5445 
5446 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5447 						 struct dp_pdev *pdev)
5448 {
5449 }
5450 
5451 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5452 					       struct dp_pdev *pdev)
5453 {
5454 }
5455 #endif
5456 
5457 #ifdef DP_TX_HW_DESC_HISTORY
5458 /**
5459  * dp_soc_tx_hw_desc_history_attach - Attach TX HW descriptor history
5460  *
5461  * @soc: DP soc handle
5462  *
5463  * Return: None
5464  */
5465 static void dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
5466 {
5467 	dp_soc_frag_history_attach(soc, &soc->tx_hw_desc_history,
5468 				   DP_TX_HW_DESC_HIST_MAX_SLOTS,
5469 				   DP_TX_HW_DESC_HIST_PER_SLOT_MAX,
5470 				   sizeof(struct dp_tx_hw_desc_evt),
5471 				   true, DP_TX_HW_DESC_HIST_TYPE);
5472 }
5473 
5474 static void dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
5475 {
5476 	dp_soc_frag_history_detach(soc, &soc->tx_hw_desc_history,
5477 				   DP_TX_HW_DESC_HIST_MAX_SLOTS,
5478 				   true, DP_TX_HW_DESC_HIST_TYPE);
5479 }
5480 
5481 #else /* DP_TX_HW_DESC_HISTORY */
5482 static inline void
5483 dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
5484 {
5485 }
5486 
5487 static inline void
5488 dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
5489 {
5490 }
5491 #endif /* DP_TX_HW_DESC_HISTORY */
5492 
5493 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
5494 #ifndef RX_DEFRAG_DO_NOT_REINJECT
5495 /**
5496  * dp_soc_rx_reinject_ring_history_attach - Attach the reo reinject ring
5497  *					    history.
5498  * @soc: DP soc handle
5499  *
5500  * Return: None
5501  */
5502 static void dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
5503 {
5504 	soc->rx_reinject_ring_history =
5505 		dp_context_alloc_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
5506 				     sizeof(struct dp_rx_reinject_history));
5507 	if (soc->rx_reinject_ring_history)
5508 		qdf_atomic_init(&soc->rx_reinject_ring_history->index);
5509 }
5510 #else /* RX_DEFRAG_DO_NOT_REINJECT */
5511 static inline void
5512 dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
5513 {
5514 }
5515 #endif /* RX_DEFRAG_DO_NOT_REINJECT */
5516 
5517 /**
5518  * dp_soc_rx_history_attach() - Attach the ring history record buffers
5519  * @soc: DP soc structure
5520  *
5521  * This function allocates the memory for recording the rx ring, rx error
5522  * ring and the reinject ring entries. There is no error returned in case
5523  * of allocation failure since the record function checks if the history is
5524  * initialized or not. We do not want to fail the driver load in case of
5525  * failure to allocate memory for debug history.
5526  *
5527  * Returns: None
5528  */
5529 static void dp_soc_rx_history_attach(struct dp_soc *soc)
5530 {
5531 	int i;
5532 	uint32_t rx_ring_hist_size;
5533 	uint32_t rx_refill_ring_hist_size;
5534 
5535 	rx_ring_hist_size = sizeof(*soc->rx_ring_history[0]);
5536 	rx_refill_ring_hist_size = sizeof(*soc->rx_refill_ring_history[0]);
5537 
5538 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5539 		soc->rx_ring_history[i] = dp_context_alloc_mem(
5540 				soc, DP_RX_RING_HIST_TYPE, rx_ring_hist_size);
5541 		if (soc->rx_ring_history[i])
5542 			qdf_atomic_init(&soc->rx_ring_history[i]->index);
5543 	}
5544 
5545 	soc->rx_err_ring_history = dp_context_alloc_mem(
5546 			soc, DP_RX_ERR_RING_HIST_TYPE, rx_ring_hist_size);
5547 	if (soc->rx_err_ring_history)
5548 		qdf_atomic_init(&soc->rx_err_ring_history->index);
5549 
5550 	dp_soc_rx_reinject_ring_history_attach(soc);
5551 
5552 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5553 		soc->rx_refill_ring_history[i] = dp_context_alloc_mem(
5554 						soc,
5555 						DP_RX_REFILL_RING_HIST_TYPE,
5556 						rx_refill_ring_hist_size);
5557 
5558 		if (soc->rx_refill_ring_history[i])
5559 			qdf_atomic_init(&soc->rx_refill_ring_history[i]->index);
5560 	}
5561 }
5562 
5563 static void dp_soc_rx_history_detach(struct dp_soc *soc)
5564 {
5565 	int i;
5566 
5567 	for (i = 0; i < MAX_REO_DEST_RINGS; i++)
5568 		dp_context_free_mem(soc, DP_RX_RING_HIST_TYPE,
5569 				    soc->rx_ring_history[i]);
5570 
5571 	dp_context_free_mem(soc, DP_RX_ERR_RING_HIST_TYPE,
5572 			    soc->rx_err_ring_history);
5573 
5574 	/*
5575 	 * No need for a featurized detach since qdf_mem_free takes
5576 	 * care of NULL pointer.
5577 	 */
5578 	dp_context_free_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
5579 			    soc->rx_reinject_ring_history);
5580 
5581 	for (i = 0; i < MAX_PDEV_CNT; i++)
5582 		dp_context_free_mem(soc, DP_RX_REFILL_RING_HIST_TYPE,
5583 				    soc->rx_refill_ring_history[i]);
5584 }
5585 
5586 #else
5587 static inline void dp_soc_rx_history_attach(struct dp_soc *soc)
5588 {
5589 }
5590 
5591 static inline void dp_soc_rx_history_detach(struct dp_soc *soc)
5592 {
5593 }
5594 #endif
5595 
5596 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY
5597 /**
5598  * dp_soc_mon_status_ring_history_attach() - Attach the monitor status
5599  *					     buffer record history.
5600  * @soc: DP soc handle
5601  *
5602  * This function allocates memory to track the event for a monitor
5603  * status buffer, before its parsed and freed.
5604  *
5605  * Return: None
5606  */
5607 static void dp_soc_mon_status_ring_history_attach(struct dp_soc *soc)
5608 {
5609 	soc->mon_status_ring_history = dp_context_alloc_mem(soc,
5610 				DP_MON_STATUS_BUF_HIST_TYPE,
5611 				sizeof(struct dp_mon_status_ring_history));
5612 	if (!soc->mon_status_ring_history) {
5613 		dp_err("Failed to alloc memory for mon status ring history");
5614 		return;
5615 	}
5616 }
5617 
5618 /**
5619  * dp_soc_mon_status_ring_history_detach() - Detach the monitor status buffer
5620  *					     record history.
5621  * @soc: DP soc handle
5622  *
5623  * Return: None
5624  */
5625 static void dp_soc_mon_status_ring_history_detach(struct dp_soc *soc)
5626 {
5627 	dp_context_free_mem(soc, DP_MON_STATUS_BUF_HIST_TYPE,
5628 			    soc->mon_status_ring_history);
5629 }
5630 #else
5631 static void dp_soc_mon_status_ring_history_attach(struct dp_soc *soc)
5632 {
5633 }
5634 
5635 static void dp_soc_mon_status_ring_history_detach(struct dp_soc *soc)
5636 {
5637 }
5638 #endif
5639 
5640 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
5641 /**
5642  * dp_soc_tx_history_attach() - Attach the ring history record buffers
5643  * @soc: DP soc structure
5644  *
5645  * This function allocates the memory for recording the tx tcl ring and
5646  * the tx comp ring entries. There is no error returned in case
5647  * of allocation failure since the record function checks if the history is
5648  * initialized or not. We do not want to fail the driver load in case of
5649  * failure to allocate memory for debug history.
5650  *
5651  * Returns: None
5652  */
5653 static void dp_soc_tx_history_attach(struct dp_soc *soc)
5654 {
5655 	dp_soc_frag_history_attach(soc, &soc->tx_tcl_history,
5656 				   DP_TX_TCL_HIST_MAX_SLOTS,
5657 				   DP_TX_TCL_HIST_PER_SLOT_MAX,
5658 				   sizeof(struct dp_tx_desc_event),
5659 				   true, DP_TX_TCL_HIST_TYPE);
5660 	dp_soc_frag_history_attach(soc, &soc->tx_comp_history,
5661 				   DP_TX_COMP_HIST_MAX_SLOTS,
5662 				   DP_TX_COMP_HIST_PER_SLOT_MAX,
5663 				   sizeof(struct dp_tx_desc_event),
5664 				   true, DP_TX_COMP_HIST_TYPE);
5665 }
5666 
5667 /**
5668  * dp_soc_tx_history_detach() - Detach the ring history record buffers
5669  * @soc: DP soc structure
5670  *
5671  * This function frees the memory for recording the tx tcl ring and
5672  * the tx comp ring entries.
5673  *
5674  * Returns: None
5675  */
5676 static void dp_soc_tx_history_detach(struct dp_soc *soc)
5677 {
5678 	dp_soc_frag_history_detach(soc, &soc->tx_tcl_history,
5679 				   DP_TX_TCL_HIST_MAX_SLOTS,
5680 				   true, DP_TX_TCL_HIST_TYPE);
5681 	dp_soc_frag_history_detach(soc, &soc->tx_comp_history,
5682 				   DP_TX_COMP_HIST_MAX_SLOTS,
5683 				   true, DP_TX_COMP_HIST_TYPE);
5684 }
5685 
5686 #else
5687 static inline void dp_soc_tx_history_attach(struct dp_soc *soc)
5688 {
5689 }
5690 
5691 static inline void dp_soc_tx_history_detach(struct dp_soc *soc)
5692 {
5693 }
5694 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
5695 
5696 /*
5697 * dp_pdev_attach_wifi3() - attach txrx pdev
5698 * @txrx_soc: Datapath SOC handle
5699 * @params: Params for PDEV attach
5700 *
5701 * Return: QDF_STATUS
5702 */
5703 static inline
5704 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
5705 				struct cdp_pdev_attach_params *params)
5706 {
5707 	qdf_size_t pdev_context_size;
5708 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5709 	struct dp_pdev *pdev = NULL;
5710 	uint8_t pdev_id = params->pdev_id;
5711 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5712 	int nss_cfg;
5713 
5714 	pdev_context_size =
5715 		soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_PDEV);
5716 	if (pdev_context_size)
5717 		pdev = dp_context_alloc_mem(soc, DP_PDEV_TYPE, pdev_context_size);
5718 
5719 	if (!pdev) {
5720 		dp_init_err("%pK: DP PDEV memory allocation failed",
5721 			    soc);
5722 		goto fail0;
5723 	}
5724 	wlan_minidump_log(pdev, sizeof(*pdev), soc->ctrl_psoc,
5725 			  WLAN_MD_DP_PDEV, "dp_pdev");
5726 
5727 	soc_cfg_ctx = soc->wlan_cfg_ctx;
5728 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
5729 
5730 	if (!pdev->wlan_cfg_ctx) {
5731 		dp_init_err("%pK: pdev cfg_attach failed", soc);
5732 		goto fail1;
5733 	}
5734 
5735 	/*
5736 	 * set nss pdev config based on soc config
5737 	 */
5738 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
5739 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
5740 					 (nss_cfg & (1 << pdev_id)));
5741 
5742 	pdev->soc = soc;
5743 	pdev->pdev_id = pdev_id;
5744 	soc->pdev_list[pdev_id] = pdev;
5745 
5746 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
5747 	soc->pdev_count++;
5748 
5749 	/* Allocate memory for pdev srng rings */
5750 	if (dp_pdev_srng_alloc(pdev)) {
5751 		dp_init_err("%pK: dp_pdev_srng_alloc failed", soc);
5752 		goto fail2;
5753 	}
5754 
5755 	/* Setup second Rx refill buffer ring */
5756 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev)) {
5757 		dp_init_err("%pK: dp_srng_alloc failed rxrefill2 ring",
5758 			    soc);
5759 		goto fail3;
5760 	}
5761 
5762 	/* Allocate memory for pdev rxdma rings */
5763 	if (dp_rxdma_ring_alloc(soc, pdev)) {
5764 		dp_init_err("%pK: dp_rxdma_ring_alloc failed", soc);
5765 		goto fail4;
5766 	}
5767 
5768 	/* Rx specific init */
5769 	if (dp_rx_pdev_desc_pool_alloc(pdev)) {
5770 		dp_init_err("%pK: dp_rx_pdev_attach failed", soc);
5771 		goto fail4;
5772 	}
5773 
5774 	if (dp_monitor_pdev_attach(pdev)) {
5775 		dp_init_err("%pK: dp_monitor_pdev_attach failed", soc);
5776 		goto fail5;
5777 	}
5778 
5779 	soc->arch_ops.txrx_pdev_attach(pdev, params);
5780 
5781 	/* Setup third Rx refill buffer ring */
5782 	if (dp_setup_ipa_rx_alt_refill_buf_ring(soc, pdev)) {
5783 		dp_init_err("%pK: dp_srng_alloc failed rxrefill3 ring",
5784 			    soc);
5785 		goto fail6;
5786 	}
5787 
5788 	return QDF_STATUS_SUCCESS;
5789 
5790 fail6:
5791 	dp_monitor_pdev_detach(pdev);
5792 fail5:
5793 	dp_rx_pdev_desc_pool_free(pdev);
5794 fail4:
5795 	dp_rxdma_ring_free(pdev);
5796 	dp_free_ipa_rx_refill_buf_ring(soc, pdev);
5797 fail3:
5798 	dp_pdev_srng_free(pdev);
5799 fail2:
5800 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
5801 fail1:
5802 	soc->pdev_list[pdev_id] = NULL;
5803 	qdf_mem_free(pdev);
5804 fail0:
5805 	return QDF_STATUS_E_FAILURE;
5806 }
5807 
5808 /**
5809  * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev
5810  * @pdev: Datapath PDEV handle
5811  *
5812  * This is the last chance to flush all pending dp vdevs/peers,
5813  * some peer/vdev leak case like Non-SSR + peer unmap missing
5814  * will be covered here.
5815  *
5816  * Return: None
5817  */
5818 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
5819 {
5820 	struct dp_soc *soc = pdev->soc;
5821 	struct dp_vdev *vdev_arr[MAX_VDEV_CNT] = {0};
5822 	uint32_t i = 0;
5823 	uint32_t num_vdevs = 0;
5824 	struct dp_vdev *vdev = NULL;
5825 
5826 	if (TAILQ_EMPTY(&soc->inactive_vdev_list))
5827 		return;
5828 
5829 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
5830 	TAILQ_FOREACH(vdev, &soc->inactive_vdev_list,
5831 		      inactive_list_elem) {
5832 		if (vdev->pdev != pdev)
5833 			continue;
5834 
5835 		vdev_arr[num_vdevs] = vdev;
5836 		num_vdevs++;
5837 		/* take reference to free */
5838 		dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CDP);
5839 	}
5840 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
5841 
5842 	for (i = 0; i < num_vdevs; i++) {
5843 		dp_vdev_flush_peers((struct cdp_vdev *)vdev_arr[i], 0, 0);
5844 		dp_vdev_unref_delete(soc, vdev_arr[i], DP_MOD_ID_CDP);
5845 	}
5846 }
5847 
5848 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
5849 /**
5850  * dp_vdev_stats_hw_offload_target_config() - Send HTT command to FW
5851  *                                          for enable/disable of HW vdev stats
5852  * @soc: Datapath soc handle
5853  * @pdev_id: INVALID_PDEV_ID for all pdevs or 0,1,2 for individual pdev
5854  * @enable: flag to reprsent enable/disable of hw vdev stats
5855  *
5856  * Return: none
5857  */
5858 static void dp_vdev_stats_hw_offload_target_config(struct dp_soc *soc,
5859 						   uint8_t pdev_id,
5860 						   bool enable)
5861 {
5862 	/* Check SOC level config for HW offload vdev stats support */
5863 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
5864 		dp_debug("%pK: HW vdev offload stats is disabled", soc);
5865 		return;
5866 	}
5867 
5868 	/* Send HTT command to FW for enable of stats */
5869 	dp_h2t_hw_vdev_stats_config_send(soc, pdev_id, enable, false, 0);
5870 }
5871 
5872 /**
5873  * dp_vdev_stats_hw_offload_target_clear() - Clear HW vdev stats on target
5874  * @soc: Datapath soc handle
5875  * @pdev_id: pdev_id (0,1,2)
5876  * @bitmask: bitmask with vdev_id(s) for which stats are to be cleared on HW
5877  *
5878  * Return: none
5879  */
5880 static
5881 void dp_vdev_stats_hw_offload_target_clear(struct dp_soc *soc, uint8_t pdev_id,
5882 					   uint64_t vdev_id_bitmask)
5883 {
5884 	/* Check SOC level config for HW offload vdev stats support */
5885 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
5886 		dp_debug("%pK: HW vdev offload stats is disabled", soc);
5887 		return;
5888 	}
5889 
5890 	/* Send HTT command to FW for reset of stats */
5891 	dp_h2t_hw_vdev_stats_config_send(soc, pdev_id, true, true,
5892 					 vdev_id_bitmask);
5893 }
5894 #else
5895 static void
5896 dp_vdev_stats_hw_offload_target_config(struct dp_soc *soc, uint8_t pdev_id,
5897 				       bool enable)
5898 {
5899 }
5900 
5901 static
5902 void dp_vdev_stats_hw_offload_target_clear(struct dp_soc *soc, uint8_t pdev_id,
5903 					   uint64_t vdev_id_bitmask)
5904 {
5905 }
5906 #endif /*QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT */
5907 
5908 /**
5909  * dp_pdev_deinit() - Deinit txrx pdev
5910  * @txrx_pdev: Datapath PDEV handle
5911  * @force: Force deinit
5912  *
5913  * Return: None
5914  */
5915 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
5916 {
5917 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
5918 	qdf_nbuf_t curr_nbuf, next_nbuf;
5919 
5920 	if (pdev->pdev_deinit)
5921 		return;
5922 
5923 	dp_tx_me_exit(pdev);
5924 	dp_rx_fst_detach(pdev->soc, pdev);
5925 	dp_rx_pdev_buffers_free(pdev);
5926 	dp_rx_pdev_desc_pool_deinit(pdev);
5927 	dp_pdev_bkp_stats_detach(pdev);
5928 	qdf_event_destroy(&pdev->fw_peer_stats_event);
5929 	qdf_event_destroy(&pdev->fw_stats_event);
5930 	qdf_event_destroy(&pdev->fw_obss_stats_event);
5931 	if (pdev->sojourn_buf)
5932 		qdf_nbuf_free(pdev->sojourn_buf);
5933 
5934 	dp_pdev_flush_pending_vdevs(pdev);
5935 	dp_tx_desc_flush(pdev, NULL, true);
5936 
5937 	qdf_spinlock_destroy(&pdev->tx_mutex);
5938 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
5939 
5940 	dp_monitor_pdev_deinit(pdev);
5941 
5942 	dp_pdev_srng_deinit(pdev);
5943 
5944 	dp_ipa_uc_detach(pdev->soc, pdev);
5945 	dp_deinit_ipa_rx_alt_refill_buf_ring(pdev->soc, pdev);
5946 	dp_deinit_ipa_rx_refill_buf_ring(pdev->soc, pdev);
5947 	dp_rxdma_ring_cleanup(pdev->soc, pdev);
5948 
5949 	curr_nbuf = pdev->invalid_peer_head_msdu;
5950 	while (curr_nbuf) {
5951 		next_nbuf = qdf_nbuf_next(curr_nbuf);
5952 		dp_rx_nbuf_free(curr_nbuf);
5953 		curr_nbuf = next_nbuf;
5954 	}
5955 	pdev->invalid_peer_head_msdu = NULL;
5956 	pdev->invalid_peer_tail_msdu = NULL;
5957 
5958 	dp_wdi_event_detach(pdev);
5959 	pdev->pdev_deinit = 1;
5960 }
5961 
5962 /**
5963  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
5964  * @psoc: Datapath psoc handle
5965  * @pdev_id: Id of datapath PDEV handle
5966  * @force: Force deinit
5967  *
5968  * Return: QDF_STATUS
5969  */
5970 static QDF_STATUS
5971 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
5972 		     int force)
5973 {
5974 	struct dp_pdev *txrx_pdev;
5975 
5976 	txrx_pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
5977 						       pdev_id);
5978 
5979 	if (!txrx_pdev)
5980 		return QDF_STATUS_E_FAILURE;
5981 
5982 	dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force);
5983 
5984 	return QDF_STATUS_SUCCESS;
5985 }
5986 
5987 /*
5988  * dp_pdev_post_attach() - Do post pdev attach after dev_alloc_name
5989  * @txrx_pdev: Datapath PDEV handle
5990  *
5991  * Return: None
5992  */
5993 static void dp_pdev_post_attach(struct cdp_pdev *txrx_pdev)
5994 {
5995 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
5996 
5997 	dp_monitor_tx_capture_debugfs_init(pdev);
5998 
5999 	if (dp_pdev_htt_stats_dbgfs_init(pdev)) {
6000 		dp_init_err("%pK: Failed to initialize pdev HTT stats debugfs", pdev->soc);
6001 	}
6002 }
6003 
6004 /*
6005  * dp_pdev_post_attach_wifi3() - attach txrx pdev post
6006  * @psoc: Datapath soc handle
6007  * @pdev_id: pdev id of pdev
6008  *
6009  * Return: QDF_STATUS
6010  */
6011 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *soc,
6012 				     uint8_t pdev_id)
6013 {
6014 	struct dp_pdev *pdev;
6015 
6016 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
6017 						  pdev_id);
6018 
6019 	if (!pdev) {
6020 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
6021 			    (struct dp_soc *)soc, pdev_id);
6022 		return QDF_STATUS_E_FAILURE;
6023 	}
6024 
6025 	dp_pdev_post_attach((struct cdp_pdev *)pdev);
6026 	return QDF_STATUS_SUCCESS;
6027 }
6028 
6029 /*
6030  * dp_pdev_detach() - Complete rest of pdev detach
6031  * @txrx_pdev: Datapath PDEV handle
6032  * @force: Force deinit
6033  *
6034  * Return: None
6035  */
6036 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
6037 {
6038 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
6039 	struct dp_soc *soc = pdev->soc;
6040 
6041 	dp_pdev_htt_stats_dbgfs_deinit(pdev);
6042 	dp_rx_pdev_desc_pool_free(pdev);
6043 	dp_monitor_pdev_detach(pdev);
6044 	dp_rxdma_ring_free(pdev);
6045 	dp_free_ipa_rx_refill_buf_ring(soc, pdev);
6046 	dp_free_ipa_rx_alt_refill_buf_ring(soc, pdev);
6047 	dp_pdev_srng_free(pdev);
6048 
6049 	soc->pdev_count--;
6050 	soc->pdev_list[pdev->pdev_id] = NULL;
6051 
6052 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
6053 	wlan_minidump_remove(pdev, sizeof(*pdev), soc->ctrl_psoc,
6054 			     WLAN_MD_DP_PDEV, "dp_pdev");
6055 	dp_context_free_mem(soc, DP_PDEV_TYPE, pdev);
6056 }
6057 
6058 /*
6059  * dp_pdev_detach_wifi3() - detach txrx pdev
6060  * @psoc: Datapath soc handle
6061  * @pdev_id: pdev id of pdev
6062  * @force: Force detach
6063  *
6064  * Return: QDF_STATUS
6065  */
6066 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
6067 				       int force)
6068 {
6069 	struct dp_pdev *pdev;
6070 	struct dp_soc *soc = (struct dp_soc *)psoc;
6071 
6072 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
6073 						  pdev_id);
6074 
6075 	if (!pdev) {
6076 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
6077 			    (struct dp_soc *)psoc, pdev_id);
6078 		return QDF_STATUS_E_FAILURE;
6079 	}
6080 
6081 	soc->arch_ops.txrx_pdev_detach(pdev);
6082 
6083 	dp_pdev_detach((struct cdp_pdev *)pdev, force);
6084 	return QDF_STATUS_SUCCESS;
6085 }
6086 
6087 /*
6088  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
6089  * @soc: DP SOC handle
6090  */
6091 #ifndef DP_UMAC_HW_RESET_SUPPORT
6092 static inline
6093 #endif
6094 void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
6095 {
6096 	struct reo_desc_list_node *desc;
6097 	struct dp_rx_tid *rx_tid;
6098 
6099 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
6100 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
6101 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
6102 		rx_tid = &desc->rx_tid;
6103 		qdf_mem_unmap_nbytes_single(soc->osdev,
6104 			rx_tid->hw_qdesc_paddr,
6105 			QDF_DMA_BIDIRECTIONAL,
6106 			rx_tid->hw_qdesc_alloc_size);
6107 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
6108 		qdf_mem_free(desc);
6109 	}
6110 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
6111 	qdf_list_destroy(&soc->reo_desc_freelist);
6112 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
6113 }
6114 
6115 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
6116 /*
6117  * dp_reo_desc_deferred_freelist_create() - Initialize the resources used
6118  *                                          for deferred reo desc list
6119  * @psoc: Datapath soc handle
6120  *
6121  * Return: void
6122  */
6123 static void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
6124 {
6125 	qdf_spinlock_create(&soc->reo_desc_deferred_freelist_lock);
6126 	qdf_list_create(&soc->reo_desc_deferred_freelist,
6127 			REO_DESC_DEFERRED_FREELIST_SIZE);
6128 	soc->reo_desc_deferred_freelist_init = true;
6129 }
6130 
6131 /*
6132  * dp_reo_desc_deferred_freelist_destroy() - loop the deferred free list &
6133  *                                           free the leftover REO QDESCs
6134  * @psoc: Datapath soc handle
6135  *
6136  * Return: void
6137  */
6138 static void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
6139 {
6140 	struct reo_desc_deferred_freelist_node *desc;
6141 
6142 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
6143 	soc->reo_desc_deferred_freelist_init = false;
6144 	while (qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
6145 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
6146 		qdf_mem_unmap_nbytes_single(soc->osdev,
6147 					    desc->hw_qdesc_paddr,
6148 					    QDF_DMA_BIDIRECTIONAL,
6149 					    desc->hw_qdesc_alloc_size);
6150 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
6151 		qdf_mem_free(desc);
6152 	}
6153 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
6154 
6155 	qdf_list_destroy(&soc->reo_desc_deferred_freelist);
6156 	qdf_spinlock_destroy(&soc->reo_desc_deferred_freelist_lock);
6157 }
6158 #else
6159 static inline void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
6160 {
6161 }
6162 
6163 static inline void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
6164 {
6165 }
6166 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
6167 
6168 /*
6169  * dp_soc_reset_txrx_ring_map() - reset tx ring map
6170  * @soc: DP SOC handle
6171  *
6172  */
6173 static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc)
6174 {
6175 	uint32_t i;
6176 
6177 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++)
6178 		soc->tx_ring_map[i] = 0;
6179 }
6180 
6181 /*
6182  * dp_soc_print_inactive_objects() - prints inactive peer and vdev list
6183  * @soc: DP SOC handle
6184  *
6185  */
6186 static void dp_soc_print_inactive_objects(struct dp_soc *soc)
6187 {
6188 	struct dp_peer *peer = NULL;
6189 	struct dp_peer *tmp_peer = NULL;
6190 	struct dp_vdev *vdev = NULL;
6191 	struct dp_vdev *tmp_vdev = NULL;
6192 	int i = 0;
6193 	uint32_t count;
6194 
6195 	if (TAILQ_EMPTY(&soc->inactive_peer_list) &&
6196 	    TAILQ_EMPTY(&soc->inactive_vdev_list))
6197 		return;
6198 
6199 	TAILQ_FOREACH_SAFE(peer, &soc->inactive_peer_list,
6200 			   inactive_list_elem, tmp_peer) {
6201 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
6202 			count = qdf_atomic_read(&peer->mod_refs[i]);
6203 			if (count)
6204 				DP_PRINT_STATS("peer %pK Module id %u ==> %u",
6205 					       peer, i, count);
6206 		}
6207 	}
6208 
6209 	TAILQ_FOREACH_SAFE(vdev, &soc->inactive_vdev_list,
6210 			   inactive_list_elem, tmp_vdev) {
6211 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
6212 			count = qdf_atomic_read(&vdev->mod_refs[i]);
6213 			if (count)
6214 				DP_PRINT_STATS("vdev %pK Module id %u ==> %u",
6215 					       vdev, i, count);
6216 		}
6217 	}
6218 	QDF_BUG(0);
6219 }
6220 
6221 /**
6222  * dp_soc_deinit() - Deinitialize txrx SOC
6223  * @txrx_soc: Opaque DP SOC handle
6224  *
6225  * Return: None
6226  */
6227 static void dp_soc_deinit(void *txrx_soc)
6228 {
6229 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6230 	struct htt_soc *htt_soc = soc->htt_handle;
6231 
6232 	qdf_atomic_set(&soc->cmn_init_done, 0);
6233 
6234 	soc->arch_ops.txrx_soc_deinit(soc);
6235 
6236 	dp_monitor_soc_deinit(soc);
6237 
6238 	/* free peer tables & AST tables allocated during peer_map_attach */
6239 	if (soc->peer_map_attach_success) {
6240 		dp_peer_find_detach(soc);
6241 		soc->arch_ops.txrx_peer_map_detach(soc);
6242 		soc->peer_map_attach_success = FALSE;
6243 	}
6244 
6245 	qdf_flush_work(&soc->htt_stats.work);
6246 	qdf_disable_work(&soc->htt_stats.work);
6247 
6248 	qdf_spinlock_destroy(&soc->htt_stats.lock);
6249 
6250 	dp_soc_reset_txrx_ring_map(soc);
6251 
6252 	dp_reo_desc_freelist_destroy(soc);
6253 	dp_reo_desc_deferred_freelist_destroy(soc);
6254 
6255 	DEINIT_RX_HW_STATS_LOCK(soc);
6256 
6257 	qdf_spinlock_destroy(&soc->ast_lock);
6258 
6259 	dp_peer_mec_spinlock_destroy(soc);
6260 
6261 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
6262 
6263 	qdf_nbuf_queue_free(&soc->invalid_buf_queue);
6264 
6265 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
6266 
6267 	qdf_spinlock_destroy(&soc->vdev_map_lock);
6268 
6269 	dp_reo_cmdlist_destroy(soc);
6270 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
6271 
6272 	dp_soc_tx_desc_sw_pools_deinit(soc);
6273 
6274 	dp_soc_srng_deinit(soc);
6275 
6276 	dp_hw_link_desc_ring_deinit(soc);
6277 
6278 	dp_soc_print_inactive_objects(soc);
6279 	qdf_spinlock_destroy(&soc->inactive_peer_list_lock);
6280 	qdf_spinlock_destroy(&soc->inactive_vdev_list_lock);
6281 
6282 	htt_soc_htc_dealloc(soc->htt_handle);
6283 
6284 	htt_soc_detach(htt_soc);
6285 
6286 	/* Free wbm sg list and reset flags in down path */
6287 	dp_rx_wbm_sg_list_deinit(soc);
6288 
6289 	wlan_minidump_remove(soc, sizeof(*soc), soc->ctrl_psoc,
6290 			     WLAN_MD_DP_SOC, "dp_soc");
6291 }
6292 
6293 /**
6294  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
6295  * @txrx_soc: Opaque DP SOC handle
6296  *
6297  * Return: None
6298  */
6299 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc)
6300 {
6301 	dp_soc_deinit(txrx_soc);
6302 }
6303 
6304 /*
6305  * dp_soc_detach() - Detach rest of txrx SOC
6306  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
6307  *
6308  * Return: None
6309  */
6310 static void dp_soc_detach(struct cdp_soc_t *txrx_soc)
6311 {
6312 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6313 
6314 	soc->arch_ops.txrx_soc_detach(soc);
6315 
6316 	dp_runtime_deinit();
6317 
6318 	dp_sysfs_deinitialize_stats(soc);
6319 	dp_soc_swlm_detach(soc);
6320 	dp_soc_tx_desc_sw_pools_free(soc);
6321 	dp_soc_srng_free(soc);
6322 	dp_hw_link_desc_ring_free(soc);
6323 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
6324 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
6325 	dp_soc_tx_hw_desc_history_detach(soc);
6326 	dp_soc_tx_history_detach(soc);
6327 	dp_soc_mon_status_ring_history_detach(soc);
6328 	dp_soc_rx_history_detach(soc);
6329 
6330 	if (!dp_monitor_modularized_enable()) {
6331 		dp_mon_soc_detach_wrapper(soc);
6332 	}
6333 
6334 	qdf_mem_free(soc->cdp_soc.ops);
6335 	qdf_mem_free(soc);
6336 }
6337 
6338 /*
6339  * dp_soc_detach_wifi3() - Detach txrx SOC
6340  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
6341  *
6342  * Return: None
6343  */
6344 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc)
6345 {
6346 	dp_soc_detach(txrx_soc);
6347 }
6348 
6349 /*
6350  * dp_rxdma_ring_config() - configure the RX DMA rings
6351  *
6352  * This function is used to configure the MAC rings.
6353  * On MCL host provides buffers in Host2FW ring
6354  * FW refills (copies) buffers to the ring and updates
6355  * ring_idx in register
6356  *
6357  * @soc: data path SoC handle
6358  *
6359  * Return: zero on success, non-zero on failure
6360  */
6361 #ifdef QCA_HOST2FW_RXBUF_RING
6362 static inline void
6363 dp_htt_setup_rxdma_err_dst_ring(struct dp_soc *soc, int mac_id,
6364 				int lmac_id)
6365 {
6366 	if (soc->rxdma_err_dst_ring[lmac_id].hal_srng)
6367 		htt_srng_setup(soc->htt_handle, mac_id,
6368 			       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
6369 			       RXDMA_DST);
6370 }
6371 
6372 #ifdef IPA_WDI3_VLAN_SUPPORT
6373 static inline
6374 void dp_rxdma_setup_refill_ring3(struct dp_soc *soc,
6375 				 struct dp_pdev *pdev,
6376 				 uint8_t idx)
6377 {
6378 	if (pdev->rx_refill_buf_ring3.hal_srng)
6379 		htt_srng_setup(soc->htt_handle, idx,
6380 			       pdev->rx_refill_buf_ring3.hal_srng,
6381 			       RXDMA_BUF);
6382 }
6383 #else
6384 static inline
6385 void dp_rxdma_setup_refill_ring3(struct dp_soc *soc,
6386 				 struct dp_pdev *pdev,
6387 				 uint8_t idx)
6388 { }
6389 #endif
6390 
6391 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
6392 {
6393 	int i;
6394 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6395 
6396 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6397 		struct dp_pdev *pdev = soc->pdev_list[i];
6398 
6399 		if (pdev) {
6400 			int mac_id;
6401 			int max_mac_rings =
6402 				 wlan_cfg_get_num_mac_rings
6403 				(pdev->wlan_cfg_ctx);
6404 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
6405 
6406 			htt_srng_setup(soc->htt_handle, i,
6407 				       soc->rx_refill_buf_ring[lmac_id]
6408 				       .hal_srng,
6409 				       RXDMA_BUF);
6410 
6411 			if (pdev->rx_refill_buf_ring2.hal_srng)
6412 				htt_srng_setup(soc->htt_handle, i,
6413 					       pdev->rx_refill_buf_ring2
6414 					       .hal_srng,
6415 					       RXDMA_BUF);
6416 
6417 			dp_rxdma_setup_refill_ring3(soc, pdev, i);
6418 
6419 			dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
6420 			dp_err("pdev_id %d max_mac_rings %d",
6421 			       pdev->pdev_id, max_mac_rings);
6422 
6423 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
6424 				int mac_for_pdev =
6425 					dp_get_mac_id_for_pdev(mac_id,
6426 							       pdev->pdev_id);
6427 				/*
6428 				 * Obtain lmac id from pdev to access the LMAC
6429 				 * ring in soc context
6430 				 */
6431 				lmac_id =
6432 				dp_get_lmac_id_for_pdev_id(soc,
6433 							   mac_id,
6434 							   pdev->pdev_id);
6435 				QDF_TRACE(QDF_MODULE_ID_TXRX,
6436 					 QDF_TRACE_LEVEL_ERROR,
6437 					 FL("mac_id %d"), mac_for_pdev);
6438 
6439 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
6440 					 pdev->rx_mac_buf_ring[mac_id]
6441 						.hal_srng,
6442 					 RXDMA_BUF);
6443 
6444 				if (!soc->rxdma2sw_rings_not_supported)
6445 					dp_htt_setup_rxdma_err_dst_ring(soc,
6446 						mac_for_pdev, lmac_id);
6447 
6448 				/* Configure monitor mode rings */
6449 				status = dp_monitor_htt_srng_setup(soc, pdev,
6450 								   lmac_id,
6451 								   mac_for_pdev);
6452 				if (status != QDF_STATUS_SUCCESS) {
6453 					dp_err("Failed to send htt monitor messages to target");
6454 					return status;
6455 				}
6456 
6457 			}
6458 		}
6459 	}
6460 
6461 	dp_reap_timer_init(soc);
6462 	return status;
6463 }
6464 #else
6465 /* This is only for WIN */
6466 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
6467 {
6468 	int i;
6469 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6470 	int mac_for_pdev;
6471 	int lmac_id;
6472 
6473 	/* Configure monitor mode rings */
6474 	dp_monitor_soc_htt_srng_setup(soc);
6475 
6476 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6477 		struct dp_pdev *pdev =  soc->pdev_list[i];
6478 
6479 		if (!pdev)
6480 			continue;
6481 
6482 		mac_for_pdev = i;
6483 		lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
6484 
6485 		if (soc->rx_refill_buf_ring[lmac_id].hal_srng)
6486 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
6487 				       soc->rx_refill_buf_ring[lmac_id].
6488 				       hal_srng, RXDMA_BUF);
6489 
6490 		/* Configure monitor mode rings */
6491 		dp_monitor_htt_srng_setup(soc, pdev,
6492 					  lmac_id,
6493 					  mac_for_pdev);
6494 		if (!soc->rxdma2sw_rings_not_supported)
6495 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
6496 				       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
6497 				       RXDMA_DST);
6498 	}
6499 
6500 	dp_reap_timer_init(soc);
6501 	return status;
6502 }
6503 #endif
6504 
6505 /*
6506  * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine
6507  *
6508  * This function is used to configure the FSE HW block in RX OLE on a
6509  * per pdev basis. Here, we will be programming parameters related to
6510  * the Flow Search Table.
6511  *
6512  * @soc: data path SoC handle
6513  *
6514  * Return: zero on success, non-zero on failure
6515  */
6516 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
6517 static QDF_STATUS
6518 dp_rx_target_fst_config(struct dp_soc *soc)
6519 {
6520 	int i;
6521 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6522 
6523 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6524 		struct dp_pdev *pdev = soc->pdev_list[i];
6525 
6526 		/* Flow search is not enabled if NSS offload is enabled */
6527 		if (pdev &&
6528 		    !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
6529 			status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev);
6530 			if (status != QDF_STATUS_SUCCESS)
6531 				break;
6532 		}
6533 	}
6534 	return status;
6535 }
6536 #elif defined(WLAN_SUPPORT_RX_FISA)
6537 /**
6538  * dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW
6539  * @soc: SoC handle
6540  *
6541  * Return: Success
6542  */
6543 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
6544 {
6545 	QDF_STATUS status;
6546 	struct dp_rx_fst *fst = soc->rx_fst;
6547 
6548 	/* Check if it is enabled in the INI */
6549 	if (!soc->fisa_enable) {
6550 		dp_err("RX FISA feature is disabled");
6551 		return QDF_STATUS_E_NOSUPPORT;
6552 	}
6553 
6554 	status = dp_rx_flow_send_fst_fw_setup(soc, soc->pdev_list[0]);
6555 	if (QDF_IS_STATUS_ERROR(status)) {
6556 		dp_err("dp_rx_flow_send_fst_fw_setup failed %d",
6557 		       status);
6558 		return status;
6559 	}
6560 
6561 	if (soc->fst_cmem_base) {
6562 		soc->fst_in_cmem = true;
6563 		dp_rx_fst_update_cmem_params(soc, fst->max_entries,
6564 					     soc->fst_cmem_base & 0xffffffff,
6565 					     soc->fst_cmem_base >> 32);
6566 	}
6567 	return status;
6568 }
6569 
6570 #define FISA_MAX_TIMEOUT 0xffffffff
6571 #define FISA_DISABLE_TIMEOUT 0
6572 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
6573 {
6574 	struct dp_htt_rx_fisa_cfg fisa_config;
6575 
6576 	fisa_config.pdev_id = 0;
6577 	fisa_config.fisa_timeout = FISA_MAX_TIMEOUT;
6578 
6579 	return dp_htt_rx_fisa_config(soc->pdev_list[0], &fisa_config);
6580 }
6581 
6582 #else /* !WLAN_SUPPORT_RX_FISA */
6583 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
6584 {
6585 	return QDF_STATUS_SUCCESS;
6586 }
6587 #endif /* !WLAN_SUPPORT_RX_FISA */
6588 
6589 #ifndef WLAN_SUPPORT_RX_FISA
6590 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
6591 {
6592 	return QDF_STATUS_SUCCESS;
6593 }
6594 
6595 static QDF_STATUS dp_rx_dump_fisa_stats(struct dp_soc *soc)
6596 {
6597 	return QDF_STATUS_SUCCESS;
6598 }
6599 
6600 static void dp_rx_dump_fisa_table(struct dp_soc *soc)
6601 {
6602 }
6603 
6604 static void dp_suspend_fse_cache_flush(struct dp_soc *soc)
6605 {
6606 }
6607 
6608 static void dp_resume_fse_cache_flush(struct dp_soc *soc)
6609 {
6610 }
6611 #endif /* !WLAN_SUPPORT_RX_FISA */
6612 
6613 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR
6614 static inline QDF_STATUS dp_print_swlm_stats(struct dp_soc *soc)
6615 {
6616 	return QDF_STATUS_SUCCESS;
6617 }
6618 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */
6619 
6620 #ifdef WLAN_SUPPORT_PPEDS
6621 /*
6622  * dp_soc_target_ppe_rxole_rxdma_cfg() - Configure the RxOLe and RxDMA for PPE
6623  * @soc: DP Tx/Rx handle
6624  *
6625  * Return: QDF_STATUS
6626  */
6627 static
6628 QDF_STATUS dp_soc_target_ppe_rxole_rxdma_cfg(struct dp_soc *soc)
6629 {
6630 	struct dp_htt_rxdma_rxole_ppe_config htt_cfg = {0};
6631 	QDF_STATUS status;
6632 
6633 	/*
6634 	 * Program RxDMA to override the reo destination indication
6635 	 * with REO2PPE_DST_IND, when use_ppe is set to 1 in RX_MSDU_END,
6636 	 * thereby driving the packet to REO2PPE ring.
6637 	 * If the MSDU is spanning more than 1 buffer, then this
6638 	 * override is not done.
6639 	 */
6640 	htt_cfg.override = 1;
6641 	htt_cfg.reo_destination_indication = REO2PPE_DST_IND;
6642 	htt_cfg.multi_buffer_msdu_override_en = 0;
6643 
6644 	/*
6645 	 * Override use_ppe to 0 in RxOLE for the following
6646 	 * cases.
6647 	 */
6648 	htt_cfg.intra_bss_override = 1;
6649 	htt_cfg.decap_raw_override = 1;
6650 	htt_cfg.decap_nwifi_override = 1;
6651 	htt_cfg.ip_frag_override = 1;
6652 
6653 	status = dp_htt_rxdma_rxole_ppe_cfg_set(soc, &htt_cfg);
6654 	if (status != QDF_STATUS_SUCCESS)
6655 		dp_err("RxOLE and RxDMA PPE config failed %d", status);
6656 
6657 	return status;
6658 }
6659 #else
6660 static inline
6661 QDF_STATUS dp_soc_target_ppe_rxole_rxdma_cfg(struct dp_soc *soc)
6662 {
6663 	return QDF_STATUS_SUCCESS;
6664 }
6665 #endif /* WLAN_SUPPORT_PPEDS */
6666 
6667 #ifdef DP_UMAC_HW_RESET_SUPPORT
6668 static void dp_register_umac_reset_handlers(struct dp_soc *soc)
6669 {
6670 	dp_umac_reset_register_rx_action_callback(soc,
6671 		dp_umac_reset_handle_pre_reset, UMAC_RESET_ACTION_DO_PRE_RESET);
6672 
6673 	dp_umac_reset_register_rx_action_callback(soc,
6674 					dp_umac_reset_handle_post_reset,
6675 					UMAC_RESET_ACTION_DO_POST_RESET_START);
6676 
6677 	dp_umac_reset_register_rx_action_callback(soc,
6678 				dp_umac_reset_handle_post_reset_complete,
6679 				UMAC_RESET_ACTION_DO_POST_RESET_COMPLETE);
6680 
6681 }
6682 #else
6683 static void dp_register_umac_reset_handlers(struct dp_soc *soc)
6684 {
6685 }
6686 #endif
6687 /*
6688  * dp_soc_attach_target_wifi3() - SOC initialization in the target
6689  * @cdp_soc: Opaque Datapath SOC handle
6690  *
6691  * Return: zero on success, non-zero on failure
6692  */
6693 static QDF_STATUS
6694 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
6695 {
6696 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
6697 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6698 	struct hal_reo_params reo_params;
6699 
6700 	htt_soc_attach_target(soc->htt_handle);
6701 
6702 	status = dp_soc_target_ppe_rxole_rxdma_cfg(soc);
6703 	if (status != QDF_STATUS_SUCCESS) {
6704 		dp_err("Failed to send htt RxOLE and RxDMA messages to target");
6705 		return status;
6706 	}
6707 
6708 	status = dp_rxdma_ring_config(soc);
6709 	if (status != QDF_STATUS_SUCCESS) {
6710 		dp_err("Failed to send htt srng setup messages to target");
6711 		return status;
6712 	}
6713 
6714 	status = soc->arch_ops.dp_rxdma_ring_sel_cfg(soc);
6715 	if (status != QDF_STATUS_SUCCESS) {
6716 		dp_err("Failed to send htt ring config message to target");
6717 		return status;
6718 	}
6719 
6720 	status = dp_soc_umac_reset_init(soc);
6721 	if (status != QDF_STATUS_SUCCESS &&
6722 	    status != QDF_STATUS_E_NOSUPPORT) {
6723 		dp_err("Failed to initialize UMAC reset");
6724 		return status;
6725 	}
6726 
6727 	dp_register_umac_reset_handlers(soc);
6728 
6729 	status = dp_rx_target_fst_config(soc);
6730 	if (status != QDF_STATUS_SUCCESS &&
6731 	    status != QDF_STATUS_E_NOSUPPORT) {
6732 		dp_err("Failed to send htt fst setup config message to target");
6733 		return status;
6734 	}
6735 
6736 	if (status == QDF_STATUS_SUCCESS) {
6737 		status = dp_rx_fisa_config(soc);
6738 		if (status != QDF_STATUS_SUCCESS) {
6739 			dp_err("Failed to send htt FISA config message to target");
6740 			return status;
6741 		}
6742 	}
6743 
6744 	DP_STATS_INIT(soc);
6745 
6746 	dp_runtime_init(soc);
6747 
6748 	/* Enable HW vdev offload stats if feature is supported */
6749 	dp_vdev_stats_hw_offload_target_config(soc, INVALID_PDEV_ID, true);
6750 
6751 	/* initialize work queue for stats processing */
6752 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
6753 
6754 	wlan_cfg_soc_update_tgt_params(soc->wlan_cfg_ctx,
6755 				       soc->ctrl_psoc);
6756 	/* Setup HW REO */
6757 	qdf_mem_zero(&reo_params, sizeof(reo_params));
6758 
6759 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
6760 		/*
6761 		 * Reo ring remap is not required if both radios
6762 		 * are offloaded to NSS
6763 		 */
6764 
6765 		if (soc->arch_ops.reo_remap_config(soc, &reo_params.remap0,
6766 						   &reo_params.remap1,
6767 						   &reo_params.remap2))
6768 			reo_params.rx_hash_enabled = true;
6769 		else
6770 			reo_params.rx_hash_enabled = false;
6771 	}
6772 
6773 	/*
6774 	 * set the fragment destination ring
6775 	 */
6776 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
6777 
6778 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
6779 		reo_params.alt_dst_ind_0 = REO_REMAP_RELEASE;
6780 
6781 	hal_reo_setup(soc->hal_soc, &reo_params, 1);
6782 
6783 	hal_reo_set_err_dst_remap(soc->hal_soc);
6784 
6785 	soc->features.pn_in_reo_dest = hal_reo_enable_pn_in_dest(soc->hal_soc);
6786 
6787 	return QDF_STATUS_SUCCESS;
6788 }
6789 
6790 /*
6791  * dp_vdev_id_map_tbl_add() - Add vdev into vdev_id table
6792  * @soc: SoC handle
6793  * @vdev: vdev handle
6794  * @vdev_id: vdev_id
6795  *
6796  * Return: None
6797  */
6798 static void dp_vdev_id_map_tbl_add(struct dp_soc *soc,
6799 				   struct dp_vdev *vdev,
6800 				   uint8_t vdev_id)
6801 {
6802 	QDF_ASSERT(vdev_id <= MAX_VDEV_CNT);
6803 
6804 	qdf_spin_lock_bh(&soc->vdev_map_lock);
6805 
6806 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
6807 			QDF_STATUS_SUCCESS) {
6808 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK vdev_id %u",
6809 			     soc, vdev, vdev_id);
6810 		qdf_spin_unlock_bh(&soc->vdev_map_lock);
6811 		return;
6812 	}
6813 
6814 	if (!soc->vdev_id_map[vdev_id])
6815 		soc->vdev_id_map[vdev_id] = vdev;
6816 	else
6817 		QDF_ASSERT(0);
6818 
6819 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
6820 }
6821 
6822 /*
6823  * dp_vdev_id_map_tbl_remove() - remove vdev from vdev_id table
6824  * @soc: SoC handle
6825  * @vdev: vdev handle
6826  *
6827  * Return: None
6828  */
6829 static void dp_vdev_id_map_tbl_remove(struct dp_soc *soc,
6830 				      struct dp_vdev *vdev)
6831 {
6832 	qdf_spin_lock_bh(&soc->vdev_map_lock);
6833 	QDF_ASSERT(soc->vdev_id_map[vdev->vdev_id] == vdev);
6834 
6835 	soc->vdev_id_map[vdev->vdev_id] = NULL;
6836 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
6837 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
6838 }
6839 
6840 /*
6841  * dp_vdev_pdev_list_add() - add vdev into pdev's list
6842  * @soc: soc handle
6843  * @pdev: pdev handle
6844  * @vdev: vdev handle
6845  *
6846  * return: none
6847  */
6848 static void dp_vdev_pdev_list_add(struct dp_soc *soc,
6849 				  struct dp_pdev *pdev,
6850 				  struct dp_vdev *vdev)
6851 {
6852 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
6853 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
6854 			QDF_STATUS_SUCCESS) {
6855 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK",
6856 			     soc, vdev);
6857 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6858 		return;
6859 	}
6860 	/* add this vdev into the pdev's list */
6861 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
6862 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6863 }
6864 
6865 /*
6866  * dp_vdev_pdev_list_remove() - remove vdev from pdev's list
6867  * @soc: SoC handle
6868  * @pdev: pdev handle
6869  * @vdev: VDEV handle
6870  *
6871  * Return: none
6872  */
6873 static void dp_vdev_pdev_list_remove(struct dp_soc *soc,
6874 				     struct dp_pdev *pdev,
6875 				     struct dp_vdev *vdev)
6876 {
6877 	uint8_t found = 0;
6878 	struct dp_vdev *tmpvdev = NULL;
6879 
6880 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
6881 	TAILQ_FOREACH(tmpvdev, &pdev->vdev_list, vdev_list_elem) {
6882 		if (tmpvdev == vdev) {
6883 			found = 1;
6884 			break;
6885 		}
6886 	}
6887 
6888 	if (found) {
6889 		TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
6890 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
6891 	} else {
6892 		dp_vdev_debug("%pK: vdev:%pK not found in pdev:%pK vdevlist:%pK",
6893 			      soc, vdev, pdev, &pdev->vdev_list);
6894 		QDF_ASSERT(0);
6895 	}
6896 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6897 }
6898 
6899 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
6900 /*
6901  * dp_vdev_init_rx_eapol() - initializing osif_rx_eapol
6902  * @vdev: Datapath VDEV handle
6903  *
6904  * Return: None
6905  */
6906 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev)
6907 {
6908 	vdev->osif_rx_eapol = NULL;
6909 }
6910 
6911 /*
6912  * dp_vdev_register_rx_eapol() - Register VDEV operations for rx_eapol
6913  * @vdev: DP vdev handle
6914  * @txrx_ops: Tx and Rx operations
6915  *
6916  * Return: None
6917  */
6918 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev,
6919 					     struct ol_txrx_ops *txrx_ops)
6920 {
6921 	vdev->osif_rx_eapol = txrx_ops->rx.rx_eapol;
6922 }
6923 #else
6924 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev)
6925 {
6926 }
6927 
6928 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev,
6929 					     struct ol_txrx_ops *txrx_ops)
6930 {
6931 }
6932 #endif
6933 
6934 #ifdef WLAN_FEATURE_11BE_MLO
6935 #if defined(WLAN_MLO_MULTI_CHIP) && defined(WLAN_MCAST_MLO)
6936 static inline void dp_vdev_save_mld_info(struct dp_vdev *vdev,
6937 					 struct cdp_vdev_info *vdev_info)
6938 {
6939 	if (qdf_is_macaddr_zero((struct qdf_mac_addr *)vdev_info->mld_mac_addr))
6940 		vdev->mlo_vdev = false;
6941 	else
6942 		vdev->mlo_vdev = true;
6943 }
6944 #else
6945 static inline void dp_vdev_save_mld_info(struct dp_vdev *vdev,
6946 					 struct cdp_vdev_info *vdev_info)
6947 {
6948 }
6949 #endif
6950 static inline void dp_vdev_save_mld_addr(struct dp_vdev *vdev,
6951 					 struct cdp_vdev_info *vdev_info)
6952 {
6953 	if (vdev_info->mld_mac_addr)
6954 		qdf_mem_copy(&vdev->mld_mac_addr.raw[0],
6955 			     vdev_info->mld_mac_addr, QDF_MAC_ADDR_SIZE);
6956 
6957 	dp_vdev_save_mld_info(vdev, vdev_info);
6958 
6959 }
6960 #else
6961 static inline void dp_vdev_save_mld_addr(struct dp_vdev *vdev,
6962 					 struct cdp_vdev_info *vdev_info)
6963 {
6964 
6965 }
6966 #endif
6967 
6968 #ifdef DP_TRAFFIC_END_INDICATION
6969 /*
6970  * dp_tx_traffic_end_indication_attach() - Initialize data end indication
6971  *                                         related members in VDEV
6972  * @vdev: DP vdev handle
6973  *
6974  * Return: None
6975  */
6976 static inline void
6977 dp_tx_vdev_traffic_end_indication_attach(struct dp_vdev *vdev)
6978 {
6979 	qdf_nbuf_queue_init(&vdev->end_ind_pkt_q);
6980 }
6981 
6982 /*
6983  * dp_tx_vdev_traffic_end_indication_detach() - De-init data end indication
6984  *                                              related members in VDEV
6985  * @vdev: DP vdev handle
6986  *
6987  * Return: None
6988  */
6989 static inline void
6990 dp_tx_vdev_traffic_end_indication_detach(struct dp_vdev *vdev)
6991 {
6992 	qdf_nbuf_t nbuf;
6993 
6994 	while ((nbuf = qdf_nbuf_queue_remove(&vdev->end_ind_pkt_q)) != NULL)
6995 		qdf_nbuf_free(nbuf);
6996 }
6997 #else
6998 static inline void
6999 dp_tx_vdev_traffic_end_indication_attach(struct dp_vdev *vdev)
7000 {}
7001 
7002 static inline void
7003 dp_tx_vdev_traffic_end_indication_detach(struct dp_vdev *vdev)
7004 {}
7005 #endif
7006 
7007 /*
7008 * dp_vdev_attach_wifi3() - attach txrx vdev
7009 * @txrx_pdev: Datapath PDEV handle
7010 * @pdev_id: PDEV ID for vdev creation
7011 * @vdev_info: parameters used for vdev creation
7012 *
7013 * Return: status
7014 */
7015 static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc,
7016 				       uint8_t pdev_id,
7017 				       struct cdp_vdev_info *vdev_info)
7018 {
7019 	int i = 0;
7020 	qdf_size_t vdev_context_size;
7021 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
7022 	struct dp_pdev *pdev =
7023 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7024 						   pdev_id);
7025 	struct dp_vdev *vdev;
7026 	uint8_t *vdev_mac_addr = vdev_info->vdev_mac_addr;
7027 	uint8_t vdev_id = vdev_info->vdev_id;
7028 	enum wlan_op_mode op_mode = vdev_info->op_mode;
7029 	enum wlan_op_subtype subtype = vdev_info->subtype;
7030 	uint8_t vdev_stats_id = vdev_info->vdev_stats_id;
7031 
7032 	vdev_context_size =
7033 		soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_VDEV);
7034 	vdev = qdf_mem_malloc(vdev_context_size);
7035 
7036 	if (!pdev) {
7037 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
7038 			    cdp_soc, pdev_id);
7039 		qdf_mem_free(vdev);
7040 		goto fail0;
7041 	}
7042 
7043 	if (!vdev) {
7044 		dp_init_err("%pK: DP VDEV memory allocation failed",
7045 			    cdp_soc);
7046 		goto fail0;
7047 	}
7048 
7049 	wlan_minidump_log(vdev, sizeof(*vdev), soc->ctrl_psoc,
7050 			  WLAN_MD_DP_VDEV, "dp_vdev");
7051 
7052 	vdev->pdev = pdev;
7053 	vdev->vdev_id = vdev_id;
7054 	vdev->vdev_stats_id = vdev_stats_id;
7055 	vdev->opmode = op_mode;
7056 	vdev->subtype = subtype;
7057 	vdev->osdev = soc->osdev;
7058 
7059 	vdev->osif_rx = NULL;
7060 	vdev->osif_rsim_rx_decap = NULL;
7061 	vdev->osif_get_key = NULL;
7062 	vdev->osif_tx_free_ext = NULL;
7063 	vdev->osif_vdev = NULL;
7064 
7065 	vdev->delete.pending = 0;
7066 	vdev->safemode = 0;
7067 	vdev->drop_unenc = 1;
7068 	vdev->sec_type = cdp_sec_type_none;
7069 	vdev->multipass_en = false;
7070 	vdev->wrap_vdev = false;
7071 	dp_vdev_init_rx_eapol(vdev);
7072 	qdf_atomic_init(&vdev->ref_cnt);
7073 	for (i = 0; i < DP_MOD_ID_MAX; i++)
7074 		qdf_atomic_init(&vdev->mod_refs[i]);
7075 
7076 	/* Take one reference for create*/
7077 	qdf_atomic_inc(&vdev->ref_cnt);
7078 	qdf_atomic_inc(&vdev->mod_refs[DP_MOD_ID_CONFIG]);
7079 	vdev->num_peers = 0;
7080 #ifdef notyet
7081 	vdev->filters_num = 0;
7082 #endif
7083 	vdev->lmac_id = pdev->lmac_id;
7084 
7085 	qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
7086 
7087 	dp_vdev_save_mld_addr(vdev, vdev_info);
7088 
7089 	/* TODO: Initialize default HTT meta data that will be used in
7090 	 * TCL descriptors for packets transmitted from this VDEV
7091 	 */
7092 
7093 	qdf_spinlock_create(&vdev->peer_list_lock);
7094 	TAILQ_INIT(&vdev->peer_list);
7095 	dp_peer_multipass_list_init(vdev);
7096 	if ((soc->intr_mode == DP_INTR_POLL) &&
7097 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
7098 		if ((pdev->vdev_count == 0) ||
7099 		    (wlan_op_mode_monitor == vdev->opmode))
7100 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7101 	} else if (dp_soc_get_con_mode(soc) == QDF_GLOBAL_MISSION_MODE &&
7102 		   soc->intr_mode == DP_INTR_MSI &&
7103 		   wlan_op_mode_monitor == vdev->opmode) {
7104 		/* Timer to reap status ring in mission mode */
7105 		dp_monitor_vdev_timer_start(soc);
7106 	}
7107 
7108 	dp_vdev_id_map_tbl_add(soc, vdev, vdev_id);
7109 
7110 	if (wlan_op_mode_monitor == vdev->opmode) {
7111 		if (dp_monitor_vdev_attach(vdev) == QDF_STATUS_SUCCESS) {
7112 			dp_monitor_pdev_set_mon_vdev(vdev);
7113 			return dp_monitor_vdev_set_monitor_mode_buf_rings(pdev);
7114 		}
7115 		return QDF_STATUS_E_FAILURE;
7116 	}
7117 
7118 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
7119 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
7120 	vdev->dscp_tid_map_id = 0;
7121 	vdev->mcast_enhancement_en = 0;
7122 	vdev->igmp_mcast_enhanc_en = 0;
7123 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
7124 	vdev->prev_tx_enq_tstamp = 0;
7125 	vdev->prev_rx_deliver_tstamp = 0;
7126 	vdev->skip_sw_tid_classification = DP_TX_HW_DSCP_TID_MAP_VALID;
7127 	dp_tx_vdev_traffic_end_indication_attach(vdev);
7128 
7129 	dp_vdev_pdev_list_add(soc, pdev, vdev);
7130 	pdev->vdev_count++;
7131 
7132 	if (wlan_op_mode_sta != vdev->opmode &&
7133 	    wlan_op_mode_ndi != vdev->opmode)
7134 		vdev->ap_bridge_enabled = true;
7135 	else
7136 		vdev->ap_bridge_enabled = false;
7137 	dp_init_info("%pK: wlan_cfg_ap_bridge_enabled %d",
7138 		     cdp_soc, vdev->ap_bridge_enabled);
7139 
7140 	dp_tx_vdev_attach(vdev);
7141 
7142 	dp_monitor_vdev_attach(vdev);
7143 	if (!pdev->is_lro_hash_configured) {
7144 		if (QDF_IS_STATUS_SUCCESS(dp_lro_hash_setup(soc, pdev)))
7145 			pdev->is_lro_hash_configured = true;
7146 		else
7147 			dp_err("LRO hash setup failure!");
7148 	}
7149 
7150 	dp_info("Created vdev %pK ("QDF_MAC_ADDR_FMT")", vdev,
7151 		QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
7152 	DP_STATS_INIT(vdev);
7153 
7154 	if (QDF_IS_STATUS_ERROR(soc->arch_ops.txrx_vdev_attach(soc, vdev)))
7155 		goto fail0;
7156 
7157 	if (wlan_op_mode_sta == vdev->opmode)
7158 		dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id,
7159 				     vdev->mac_addr.raw, CDP_LINK_PEER_TYPE);
7160 
7161 	dp_pdev_update_fast_rx_flag(soc, pdev);
7162 
7163 	return QDF_STATUS_SUCCESS;
7164 
7165 fail0:
7166 	return QDF_STATUS_E_FAILURE;
7167 }
7168 
7169 #ifndef QCA_HOST_MODE_WIFI_DISABLED
7170 /**
7171  * dp_vdev_fetch_tx_handlers() - Fetch Tx handlers
7172  * @vdev: struct dp_vdev *
7173  * @soc: struct dp_soc *
7174  * @ctx: struct ol_txrx_hardtart_ctxt *
7175  */
7176 static inline void dp_vdev_fetch_tx_handler(struct dp_vdev *vdev,
7177 					    struct dp_soc *soc,
7178 					    struct ol_txrx_hardtart_ctxt *ctx)
7179 {
7180 	/* Enable vdev_id check only for ap, if flag is enabled */
7181 	if (vdev->mesh_vdev)
7182 		ctx->tx = dp_tx_send_mesh;
7183 	else if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
7184 		 (vdev->opmode == wlan_op_mode_ap)) {
7185 		ctx->tx = dp_tx_send_vdev_id_check;
7186 		ctx->tx_fast = dp_tx_send_vdev_id_check;
7187 	} else {
7188 		ctx->tx = dp_tx_send;
7189 		ctx->tx_fast = soc->arch_ops.dp_tx_send_fast;
7190 	}
7191 
7192 	/* Avoid check in regular exception Path */
7193 	if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
7194 	    (vdev->opmode == wlan_op_mode_ap))
7195 		ctx->tx_exception = dp_tx_send_exception_vdev_id_check;
7196 	else
7197 		ctx->tx_exception = dp_tx_send_exception;
7198 }
7199 
7200 /**
7201  * dp_vdev_register_tx_handler() - Register Tx handler
7202  * @vdev: struct dp_vdev *
7203  * @soc: struct dp_soc *
7204  * @txrx_ops: struct ol_txrx_ops *
7205  */
7206 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
7207 					       struct dp_soc *soc,
7208 					       struct ol_txrx_ops *txrx_ops)
7209 {
7210 	struct ol_txrx_hardtart_ctxt ctx = {0};
7211 
7212 	dp_vdev_fetch_tx_handler(vdev, soc, &ctx);
7213 
7214 	txrx_ops->tx.tx = ctx.tx;
7215 	txrx_ops->tx.tx_fast = ctx.tx_fast;
7216 	txrx_ops->tx.tx_exception = ctx.tx_exception;
7217 
7218 	dp_info("Configure tx_vdev_id_chk_handler Feature Flag: %d and mode:%d for vdev_id:%d",
7219 		wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx),
7220 		vdev->opmode, vdev->vdev_id);
7221 }
7222 #else /* QCA_HOST_MODE_WIFI_DISABLED */
7223 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
7224 					       struct dp_soc *soc,
7225 					       struct ol_txrx_ops *txrx_ops)
7226 {
7227 }
7228 
7229 static inline void dp_vdev_fetch_tx_handler(struct dp_vdev *vdev,
7230 					    struct dp_soc *soc,
7231 					    struct ol_txrx_hardtart_ctxt *ctx)
7232 {
7233 }
7234 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
7235 
7236 /**
7237  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
7238  * @soc: Datapath soc handle
7239  * @vdev_id: id of Datapath VDEV handle
7240  * @osif_vdev: OSIF vdev handle
7241  * @txrx_ops: Tx and Rx operations
7242  *
7243  * Return: DP VDEV handle on success, NULL on failure
7244  */
7245 static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc_hdl,
7246 					 uint8_t vdev_id,
7247 					 ol_osif_vdev_handle osif_vdev,
7248 					 struct ol_txrx_ops *txrx_ops)
7249 {
7250 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7251 	struct dp_vdev *vdev =	dp_vdev_get_ref_by_id(soc, vdev_id,
7252 						      DP_MOD_ID_CDP);
7253 
7254 	if (!vdev)
7255 		return QDF_STATUS_E_FAILURE;
7256 
7257 	vdev->osif_vdev = osif_vdev;
7258 	vdev->osif_rx = txrx_ops->rx.rx;
7259 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
7260 	vdev->osif_rx_flush = txrx_ops->rx.rx_flush;
7261 	vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush;
7262 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
7263 	vdev->osif_fisa_rx = txrx_ops->rx.osif_fisa_rx;
7264 	vdev->osif_fisa_flush = txrx_ops->rx.osif_fisa_flush;
7265 	vdev->osif_get_key = txrx_ops->get_key;
7266 	dp_monitor_vdev_register_osif(vdev, txrx_ops);
7267 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
7268 	vdev->tx_comp = txrx_ops->tx.tx_comp;
7269 	vdev->stats_cb = txrx_ops->rx.stats_rx;
7270 	vdev->tx_classify_critical_pkt_cb =
7271 		txrx_ops->tx.tx_classify_critical_pkt_cb;
7272 #ifdef notyet
7273 #if ATH_SUPPORT_WAPI
7274 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
7275 #endif
7276 #endif
7277 #ifdef UMAC_SUPPORT_PROXY_ARP
7278 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
7279 #endif
7280 	vdev->me_convert = txrx_ops->me_convert;
7281 	vdev->get_tsf_time = txrx_ops->get_tsf_time;
7282 
7283 	dp_vdev_register_rx_eapol(vdev, txrx_ops);
7284 
7285 	dp_vdev_register_tx_handler(vdev, soc, txrx_ops);
7286 
7287 	dp_init_info("%pK: DP Vdev Register success", soc);
7288 
7289 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7290 	return QDF_STATUS_SUCCESS;
7291 }
7292 
7293 #ifdef WLAN_FEATURE_11BE_MLO
7294 void dp_peer_delete(struct dp_soc *soc,
7295 		    struct dp_peer *peer,
7296 		    void *arg)
7297 {
7298 	if (!peer->valid)
7299 		return;
7300 
7301 	dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
7302 			     peer->vdev->vdev_id,
7303 			     peer->mac_addr.raw, 0,
7304 			     peer->peer_type);
7305 }
7306 #else
7307 void dp_peer_delete(struct dp_soc *soc,
7308 		    struct dp_peer *peer,
7309 		    void *arg)
7310 {
7311 	if (!peer->valid)
7312 		return;
7313 
7314 	dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
7315 			     peer->vdev->vdev_id,
7316 			     peer->mac_addr.raw, 0,
7317 			     CDP_LINK_PEER_TYPE);
7318 }
7319 #endif
7320 
7321 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
7322 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg)
7323 {
7324 	if (!peer->valid)
7325 		return;
7326 
7327 	if (IS_MLO_DP_LINK_PEER(peer))
7328 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
7329 				     peer->vdev->vdev_id,
7330 				     peer->mac_addr.raw, 0,
7331 				     CDP_LINK_PEER_TYPE);
7332 }
7333 #else
7334 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg)
7335 {
7336 }
7337 #endif
7338 /**
7339  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
7340  * @vdev: Datapath VDEV handle
7341  * @unmap_only: Flag to indicate "only unmap"
7342  *
7343  * Return: void
7344  */
7345 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
7346 				bool unmap_only,
7347 				bool mlo_peers_only)
7348 {
7349 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7350 	struct dp_pdev *pdev = vdev->pdev;
7351 	struct dp_soc *soc = pdev->soc;
7352 	struct dp_peer *peer;
7353 	uint32_t i = 0;
7354 
7355 
7356 	if (!unmap_only) {
7357 		if (!mlo_peers_only)
7358 			dp_vdev_iterate_peer_lock_safe(vdev,
7359 						       dp_peer_delete,
7360 						       NULL,
7361 						       DP_MOD_ID_CDP);
7362 		else
7363 			dp_vdev_iterate_peer_lock_safe(vdev,
7364 						       dp_mlo_peer_delete,
7365 						       NULL,
7366 						       DP_MOD_ID_CDP);
7367 	}
7368 
7369 	for (i = 0; i < soc->max_peer_id ; i++) {
7370 		peer = __dp_peer_get_ref_by_id(soc, i, DP_MOD_ID_CDP);
7371 
7372 		if (!peer)
7373 			continue;
7374 
7375 		if (peer->vdev != vdev) {
7376 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7377 			continue;
7378 		}
7379 
7380 		if (!mlo_peers_only) {
7381 			dp_info("peer: " QDF_MAC_ADDR_FMT " is getting unmap",
7382 				QDF_MAC_ADDR_REF(peer->mac_addr.raw));
7383 			dp_rx_peer_unmap_handler(soc, i,
7384 						 vdev->vdev_id,
7385 						 peer->mac_addr.raw, 0,
7386 						 DP_PEER_WDS_COUNT_INVALID);
7387 			SET_PEER_REF_CNT_ONE(peer);
7388 		} else if (IS_MLO_DP_LINK_PEER(peer) ||
7389 			   IS_MLO_DP_MLD_PEER(peer)) {
7390 			dp_info("peer: " QDF_MAC_ADDR_FMT " is getting unmap",
7391 				QDF_MAC_ADDR_REF(peer->mac_addr.raw));
7392 			dp_rx_peer_unmap_handler(soc, i,
7393 						 vdev->vdev_id,
7394 						 peer->mac_addr.raw, 0,
7395 						 DP_PEER_WDS_COUNT_INVALID);
7396 			SET_PEER_REF_CNT_ONE(peer);
7397 		}
7398 
7399 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7400 	}
7401 }
7402 
7403 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
7404 /*
7405  * dp_txrx_alloc_vdev_stats_id()- Allocate vdev_stats_id
7406  * @soc_hdl: Datapath soc handle
7407  * @vdev_stats_id: Address of vdev_stats_id
7408  *
7409  * Return: QDF_STATUS
7410  */
7411 static QDF_STATUS dp_txrx_alloc_vdev_stats_id(struct cdp_soc_t *soc_hdl,
7412 					      uint8_t *vdev_stats_id)
7413 {
7414 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7415 	uint8_t id = 0;
7416 
7417 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
7418 		*vdev_stats_id = CDP_INVALID_VDEV_STATS_ID;
7419 		return QDF_STATUS_E_FAILURE;
7420 	}
7421 
7422 	while (id < CDP_MAX_VDEV_STATS_ID) {
7423 		if (!qdf_atomic_test_and_set_bit(id, &soc->vdev_stats_id_map)) {
7424 			*vdev_stats_id = id;
7425 			return QDF_STATUS_SUCCESS;
7426 		}
7427 		id++;
7428 	}
7429 
7430 	*vdev_stats_id = CDP_INVALID_VDEV_STATS_ID;
7431 	return QDF_STATUS_E_FAILURE;
7432 }
7433 
7434 /*
7435  * dp_txrx_reset_vdev_stats_id() - Reset vdev_stats_id in dp_soc
7436  * @soc_hdl: Datapath soc handle
7437  * @vdev_stats_id: vdev_stats_id to reset in dp_soc
7438  *
7439  * Return: none
7440  */
7441 static void dp_txrx_reset_vdev_stats_id(struct cdp_soc_t *soc_hdl,
7442 					uint8_t vdev_stats_id)
7443 {
7444 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7445 
7446 	if ((!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) ||
7447 	    (vdev_stats_id >= CDP_MAX_VDEV_STATS_ID))
7448 		return;
7449 
7450 	qdf_atomic_clear_bit(vdev_stats_id, &soc->vdev_stats_id_map);
7451 }
7452 #else
7453 static void dp_txrx_reset_vdev_stats_id(struct cdp_soc_t *soc,
7454 					uint8_t vdev_stats_id)
7455 {}
7456 #endif
7457 /*
7458  * dp_vdev_detach_wifi3() - Detach txrx vdev
7459  * @cdp_soc: Datapath soc handle
7460  * @vdev_id: VDEV Id
7461  * @callback: Callback OL_IF on completion of detach
7462  * @cb_context:	Callback context
7463  *
7464  */
7465 static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc,
7466 				       uint8_t vdev_id,
7467 				       ol_txrx_vdev_delete_cb callback,
7468 				       void *cb_context)
7469 {
7470 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
7471 	struct dp_pdev *pdev;
7472 	struct dp_neighbour_peer *peer = NULL;
7473 	struct dp_peer *vap_self_peer = NULL;
7474 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7475 						     DP_MOD_ID_CDP);
7476 
7477 	if (!vdev)
7478 		return QDF_STATUS_E_FAILURE;
7479 
7480 	soc->arch_ops.txrx_vdev_detach(soc, vdev);
7481 
7482 	pdev = vdev->pdev;
7483 
7484 	vap_self_peer = dp_sta_vdev_self_peer_ref_n_get(soc, vdev,
7485 							DP_MOD_ID_CONFIG);
7486 	if (vap_self_peer) {
7487 		qdf_spin_lock_bh(&soc->ast_lock);
7488 		if (vap_self_peer->self_ast_entry) {
7489 			dp_peer_del_ast(soc, vap_self_peer->self_ast_entry);
7490 			vap_self_peer->self_ast_entry = NULL;
7491 		}
7492 		qdf_spin_unlock_bh(&soc->ast_lock);
7493 
7494 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc, vdev->vdev_id,
7495 				     vap_self_peer->mac_addr.raw, 0,
7496 				     CDP_LINK_PEER_TYPE);
7497 		dp_peer_unref_delete(vap_self_peer, DP_MOD_ID_CONFIG);
7498 	}
7499 
7500 	/*
7501 	 * If Target is hung, flush all peers before detaching vdev
7502 	 * this will free all references held due to missing
7503 	 * unmap commands from Target
7504 	 */
7505 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
7506 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, false, false);
7507 	else if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
7508 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, true, false);
7509 
7510 	/* indicate that the vdev needs to be deleted */
7511 	vdev->delete.pending = 1;
7512 	dp_rx_vdev_detach(vdev);
7513 	/*
7514 	 * move it after dp_rx_vdev_detach(),
7515 	 * as the call back done in dp_rx_vdev_detach()
7516 	 * still need to get vdev pointer by vdev_id.
7517 	 */
7518 	dp_vdev_id_map_tbl_remove(soc, vdev);
7519 
7520 	dp_monitor_neighbour_peer_list_remove(pdev, vdev, peer);
7521 
7522 	dp_txrx_reset_vdev_stats_id(cdp_soc, vdev->vdev_stats_id);
7523 
7524 	dp_tx_vdev_multipass_deinit(vdev);
7525 	dp_tx_vdev_traffic_end_indication_detach(vdev);
7526 
7527 	if (vdev->vdev_dp_ext_handle) {
7528 		qdf_mem_free(vdev->vdev_dp_ext_handle);
7529 		vdev->vdev_dp_ext_handle = NULL;
7530 	}
7531 	vdev->delete.callback = callback;
7532 	vdev->delete.context = cb_context;
7533 
7534 	if (vdev->opmode != wlan_op_mode_monitor)
7535 		dp_vdev_pdev_list_remove(soc, pdev, vdev);
7536 
7537 	pdev->vdev_count--;
7538 	/* release reference taken above for find */
7539 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7540 
7541 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
7542 	TAILQ_INSERT_TAIL(&soc->inactive_vdev_list, vdev, inactive_list_elem);
7543 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
7544 
7545 	/* release reference taken at dp_vdev_create */
7546 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
7547 
7548 	return QDF_STATUS_SUCCESS;
7549 }
7550 
7551 #ifdef WLAN_FEATURE_11BE_MLO
7552 /**
7553  * is_dp_peer_can_reuse() - check if the dp_peer match condition to be reused
7554  * @vdev: Target DP vdev handle
7555  * @peer: DP peer handle to be checked
7556  * @peer_mac_addr: Target peer mac address
7557  * @peer_type: Target peer type
7558  *
7559  * Return: true - if match, false - not match
7560  */
7561 static inline
7562 bool is_dp_peer_can_reuse(struct dp_vdev *vdev,
7563 			  struct dp_peer *peer,
7564 			  uint8_t *peer_mac_addr,
7565 			  enum cdp_peer_type peer_type)
7566 {
7567 	if (peer->bss_peer && (peer->vdev == vdev) &&
7568 	    (peer->peer_type == peer_type) &&
7569 	    (qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw,
7570 			 QDF_MAC_ADDR_SIZE) == 0))
7571 		return true;
7572 
7573 	return false;
7574 }
7575 #else
7576 static inline
7577 bool is_dp_peer_can_reuse(struct dp_vdev *vdev,
7578 			  struct dp_peer *peer,
7579 			  uint8_t *peer_mac_addr,
7580 			  enum cdp_peer_type peer_type)
7581 {
7582 	if (peer->bss_peer && (peer->vdev == vdev) &&
7583 	    (qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw,
7584 			 QDF_MAC_ADDR_SIZE) == 0))
7585 		return true;
7586 
7587 	return false;
7588 }
7589 #endif
7590 
7591 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
7592 						uint8_t *peer_mac_addr,
7593 						enum cdp_peer_type peer_type)
7594 {
7595 	struct dp_peer *peer;
7596 	struct dp_soc *soc = vdev->pdev->soc;
7597 
7598 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
7599 	TAILQ_FOREACH(peer, &soc->inactive_peer_list,
7600 		      inactive_list_elem) {
7601 
7602 		/* reuse bss peer only when vdev matches*/
7603 		if (is_dp_peer_can_reuse(vdev, peer,
7604 					 peer_mac_addr, peer_type)) {
7605 			/* increment ref count for cdp_peer_create*/
7606 			if (dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG) ==
7607 						QDF_STATUS_SUCCESS) {
7608 				TAILQ_REMOVE(&soc->inactive_peer_list, peer,
7609 					     inactive_list_elem);
7610 				qdf_spin_unlock_bh
7611 					(&soc->inactive_peer_list_lock);
7612 				return peer;
7613 			}
7614 		}
7615 	}
7616 
7617 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
7618 	return NULL;
7619 }
7620 
7621 #ifdef FEATURE_AST
7622 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
7623 					       struct dp_pdev *pdev,
7624 					       uint8_t *peer_mac_addr)
7625 {
7626 	struct dp_ast_entry *ast_entry;
7627 
7628 	if (soc->ast_offload_support)
7629 		return;
7630 
7631 	qdf_spin_lock_bh(&soc->ast_lock);
7632 	if (soc->ast_override_support)
7633 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
7634 							    pdev->pdev_id);
7635 	else
7636 		ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
7637 
7638 	if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress)
7639 		dp_peer_del_ast(soc, ast_entry);
7640 
7641 	qdf_spin_unlock_bh(&soc->ast_lock);
7642 }
7643 #else
7644 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
7645 					       struct dp_pdev *pdev,
7646 					       uint8_t *peer_mac_addr)
7647 {
7648 }
7649 #endif
7650 
7651 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
7652 /*
7653  * dp_peer_hw_txrx_stats_init() - Initialize hw_txrx_stats_en in dp_peer
7654  * @soc: Datapath soc handle
7655  * @peer: Datapath peer handle
7656  *
7657  * Return: none
7658  */
7659 static inline
7660 void dp_peer_hw_txrx_stats_init(struct dp_soc *soc,
7661 				struct dp_txrx_peer *txrx_peer)
7662 {
7663 	txrx_peer->hw_txrx_stats_en =
7664 		wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx);
7665 }
7666 #else
7667 static inline
7668 void dp_peer_hw_txrx_stats_init(struct dp_soc *soc,
7669 				struct dp_txrx_peer *txrx_peer)
7670 {
7671 	txrx_peer->hw_txrx_stats_en = 0;
7672 }
7673 #endif
7674 
7675 static QDF_STATUS dp_txrx_peer_detach(struct dp_soc *soc, struct dp_peer *peer)
7676 {
7677 	struct dp_txrx_peer *txrx_peer;
7678 	struct dp_pdev *pdev;
7679 
7680 	/* dp_txrx_peer exists for mld peer and legacy peer */
7681 	if (peer->txrx_peer) {
7682 		txrx_peer = peer->txrx_peer;
7683 		peer->txrx_peer = NULL;
7684 		pdev = txrx_peer->vdev->pdev;
7685 
7686 		dp_peer_defrag_rx_tids_deinit(txrx_peer);
7687 		/*
7688 		 * Deallocate the extended stats contenxt
7689 		 */
7690 		dp_peer_delay_stats_ctx_dealloc(soc, txrx_peer);
7691 		dp_peer_rx_bufq_resources_deinit(txrx_peer);
7692 		dp_peer_jitter_stats_ctx_dealloc(pdev, txrx_peer);
7693 		dp_peer_sawf_stats_ctx_free(soc, txrx_peer);
7694 
7695 		qdf_mem_free(txrx_peer);
7696 	}
7697 
7698 	return QDF_STATUS_SUCCESS;
7699 }
7700 
7701 static QDF_STATUS dp_txrx_peer_attach(struct dp_soc *soc, struct dp_peer *peer)
7702 {
7703 	struct dp_txrx_peer *txrx_peer;
7704 	struct dp_pdev *pdev;
7705 
7706 	txrx_peer = (struct dp_txrx_peer *)qdf_mem_malloc(sizeof(*txrx_peer));
7707 
7708 	if (!txrx_peer)
7709 		return QDF_STATUS_E_NOMEM; /* failure */
7710 
7711 	txrx_peer->peer_id = HTT_INVALID_PEER;
7712 	/* initialize the peer_id */
7713 	txrx_peer->vdev = peer->vdev;
7714 	pdev = peer->vdev->pdev;
7715 
7716 	DP_STATS_INIT(txrx_peer);
7717 
7718 	dp_wds_ext_peer_init(txrx_peer);
7719 	dp_peer_rx_bufq_resources_init(txrx_peer);
7720 	dp_peer_hw_txrx_stats_init(soc, txrx_peer);
7721 	/*
7722 	 * Allocate peer extended stats context. Fall through in
7723 	 * case of failure as its not an implicit requirement to have
7724 	 * this object for regular statistics updates.
7725 	 */
7726 	if (dp_peer_delay_stats_ctx_alloc(soc, txrx_peer) !=
7727 					  QDF_STATUS_SUCCESS)
7728 		dp_warn("peer delay_stats ctx alloc failed");
7729 
7730 	/*
7731 	 * Alloctate memory for jitter stats. Fall through in
7732 	 * case of failure as its not an implicit requirement to have
7733 	 * this object for regular statistics updates.
7734 	 */
7735 	if (dp_peer_jitter_stats_ctx_alloc(pdev, txrx_peer) !=
7736 					   QDF_STATUS_SUCCESS)
7737 		dp_warn("peer jitter_stats ctx alloc failed");
7738 
7739 	dp_set_peer_isolation(txrx_peer, false);
7740 
7741 	dp_peer_defrag_rx_tids_init(txrx_peer);
7742 
7743 	if (dp_peer_sawf_stats_ctx_alloc(soc, txrx_peer) != QDF_STATUS_SUCCESS)
7744 		dp_warn("peer sawf stats alloc failed");
7745 
7746 	dp_txrx_peer_attach_add(soc, peer, txrx_peer);
7747 
7748 	return QDF_STATUS_SUCCESS;
7749 }
7750 
7751 static inline
7752 void dp_txrx_peer_stats_clr(struct dp_txrx_peer *txrx_peer)
7753 {
7754 	if (!txrx_peer)
7755 		return;
7756 
7757 	txrx_peer->tx_failed = 0;
7758 	txrx_peer->comp_pkt.num = 0;
7759 	txrx_peer->comp_pkt.bytes = 0;
7760 	txrx_peer->to_stack.num = 0;
7761 	txrx_peer->to_stack.bytes = 0;
7762 
7763 	DP_STATS_CLR(txrx_peer);
7764 	dp_peer_delay_stats_ctx_clr(txrx_peer);
7765 	dp_peer_jitter_stats_ctx_clr(txrx_peer);
7766 }
7767 
7768 /*
7769  * dp_peer_create_wifi3() - attach txrx peer
7770  * @soc_hdl: Datapath soc handle
7771  * @vdev_id: id of vdev
7772  * @peer_mac_addr: Peer MAC address
7773  * @peer_type: link or MLD peer type
7774  *
7775  * Return: 0 on success, -1 on failure
7776  */
7777 static QDF_STATUS
7778 dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
7779 		     uint8_t *peer_mac_addr, enum cdp_peer_type peer_type)
7780 {
7781 	struct dp_peer *peer;
7782 	int i;
7783 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7784 	struct dp_pdev *pdev;
7785 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
7786 	struct dp_vdev *vdev = NULL;
7787 
7788 	if (!peer_mac_addr)
7789 		return QDF_STATUS_E_FAILURE;
7790 
7791 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
7792 
7793 	if (!vdev)
7794 		return QDF_STATUS_E_FAILURE;
7795 
7796 	pdev = vdev->pdev;
7797 	soc = pdev->soc;
7798 
7799 	/*
7800 	 * If a peer entry with given MAC address already exists,
7801 	 * reuse the peer and reset the state of peer.
7802 	 */
7803 	peer = dp_peer_can_reuse(vdev, peer_mac_addr, peer_type);
7804 
7805 	if (peer) {
7806 		qdf_atomic_init(&peer->is_default_route_set);
7807 		dp_peer_cleanup(vdev, peer);
7808 
7809 		dp_peer_vdev_list_add(soc, vdev, peer);
7810 		dp_peer_find_hash_add(soc, peer);
7811 
7812 		dp_peer_rx_tids_create(peer);
7813 		if (IS_MLO_DP_MLD_PEER(peer))
7814 			dp_mld_peer_init_link_peers_info(peer);
7815 
7816 		qdf_spin_lock_bh(&soc->ast_lock);
7817 		dp_peer_delete_ast_entries(soc, peer);
7818 		qdf_spin_unlock_bh(&soc->ast_lock);
7819 
7820 		if ((vdev->opmode == wlan_op_mode_sta) &&
7821 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
7822 		     QDF_MAC_ADDR_SIZE)) {
7823 			ast_type = CDP_TXRX_AST_TYPE_SELF;
7824 		}
7825 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
7826 
7827 		peer->valid = 1;
7828 		peer->is_tdls_peer = false;
7829 		dp_local_peer_id_alloc(pdev, peer);
7830 
7831 		qdf_spinlock_create(&peer->peer_info_lock);
7832 
7833 		DP_STATS_INIT(peer);
7834 
7835 		/*
7836 		 * In tx_monitor mode, filter may be set for unassociated peer
7837 		 * when unassociated peer get associated peer need to
7838 		 * update tx_cap_enabled flag to support peer filter.
7839 		 */
7840 		if (!IS_MLO_DP_MLD_PEER(peer)) {
7841 			dp_monitor_peer_tx_capture_filter_check(pdev, peer);
7842 			dp_monitor_peer_reset_stats(soc, peer);
7843 		}
7844 
7845 		if (peer->txrx_peer) {
7846 			dp_peer_rx_bufq_resources_init(peer->txrx_peer);
7847 			dp_txrx_peer_stats_clr(peer->txrx_peer);
7848 			dp_set_peer_isolation(peer->txrx_peer, false);
7849 			dp_wds_ext_peer_init(peer->txrx_peer);
7850 			dp_peer_hw_txrx_stats_init(soc, peer->txrx_peer);
7851 		}
7852 
7853 		dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
7854 
7855 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7856 		return QDF_STATUS_SUCCESS;
7857 	} else {
7858 		/*
7859 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
7860 		 * need to remove the AST entry which was earlier added as a WDS
7861 		 * entry.
7862 		 * If an AST entry exists, but no peer entry exists with a given
7863 		 * MAC addresses, we could deduce it as a WDS entry
7864 		 */
7865 		dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
7866 	}
7867 
7868 #ifdef notyet
7869 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
7870 		soc->mempool_ol_ath_peer);
7871 #else
7872 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
7873 #endif
7874 	wlan_minidump_log(peer,
7875 			  sizeof(*peer),
7876 			  soc->ctrl_psoc,
7877 			  WLAN_MD_DP_PEER, "dp_peer");
7878 	if (!peer) {
7879 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7880 		return QDF_STATUS_E_FAILURE; /* failure */
7881 	}
7882 
7883 	qdf_mem_zero(peer, sizeof(struct dp_peer));
7884 
7885 	/* store provided params */
7886 	peer->vdev = vdev;
7887 
7888 	/* initialize the peer_id */
7889 	peer->peer_id = HTT_INVALID_PEER;
7890 
7891 	qdf_mem_copy(
7892 		&peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
7893 
7894 	DP_PEER_SET_TYPE(peer, peer_type);
7895 	if (IS_MLO_DP_MLD_PEER(peer)) {
7896 		if (dp_txrx_peer_attach(soc, peer) !=
7897 				QDF_STATUS_SUCCESS)
7898 			goto fail; /* failure */
7899 
7900 		dp_mld_peer_init_link_peers_info(peer);
7901 	} else if (dp_monitor_peer_attach(soc, peer) !=
7902 				QDF_STATUS_SUCCESS)
7903 		dp_warn("peer monitor ctx alloc failed");
7904 
7905 	TAILQ_INIT(&peer->ast_entry_list);
7906 
7907 	/* get the vdev reference for new peer */
7908 	dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CHILD);
7909 
7910 	if ((vdev->opmode == wlan_op_mode_sta) &&
7911 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
7912 			 QDF_MAC_ADDR_SIZE)) {
7913 		ast_type = CDP_TXRX_AST_TYPE_SELF;
7914 	}
7915 	qdf_spinlock_create(&peer->peer_state_lock);
7916 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
7917 	qdf_spinlock_create(&peer->peer_info_lock);
7918 
7919 	/* reset the ast index to flowid table */
7920 	dp_peer_reset_flowq_map(peer);
7921 
7922 	qdf_atomic_init(&peer->ref_cnt);
7923 
7924 	for (i = 0; i < DP_MOD_ID_MAX; i++)
7925 		qdf_atomic_init(&peer->mod_refs[i]);
7926 
7927 	/* keep one reference for attach */
7928 	qdf_atomic_inc(&peer->ref_cnt);
7929 	qdf_atomic_inc(&peer->mod_refs[DP_MOD_ID_CONFIG]);
7930 
7931 	dp_peer_vdev_list_add(soc, vdev, peer);
7932 
7933 	/* TODO: See if hash based search is required */
7934 	dp_peer_find_hash_add(soc, peer);
7935 
7936 	/* Initialize the peer state */
7937 	peer->state = OL_TXRX_PEER_STATE_DISC;
7938 
7939 	dp_info("vdev %pK created peer %pK ("QDF_MAC_ADDR_FMT") ref_cnt: %d",
7940 		vdev, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
7941 		qdf_atomic_read(&peer->ref_cnt));
7942 	/*
7943 	 * For every peer MAp message search and set if bss_peer
7944 	 */
7945 	if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
7946 			QDF_MAC_ADDR_SIZE) == 0 &&
7947 			(wlan_op_mode_sta != vdev->opmode)) {
7948 		dp_info("vdev bss_peer!!");
7949 		peer->bss_peer = 1;
7950 		if (peer->txrx_peer)
7951 			peer->txrx_peer->bss_peer = 1;
7952 	}
7953 
7954 	if (wlan_op_mode_sta == vdev->opmode &&
7955 	    qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
7956 			QDF_MAC_ADDR_SIZE) == 0) {
7957 		peer->sta_self_peer = 1;
7958 	}
7959 
7960 	dp_peer_rx_tids_create(peer);
7961 
7962 	peer->valid = 1;
7963 	dp_local_peer_id_alloc(pdev, peer);
7964 	DP_STATS_INIT(peer);
7965 
7966 	if (dp_peer_sawf_ctx_alloc(soc, peer) != QDF_STATUS_SUCCESS)
7967 		dp_warn("peer sawf context alloc failed");
7968 
7969 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
7970 
7971 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7972 
7973 	return QDF_STATUS_SUCCESS;
7974 fail:
7975 	qdf_mem_free(peer);
7976 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7977 
7978 	return QDF_STATUS_E_FAILURE;
7979 }
7980 
7981 static QDF_STATUS dp_peer_legacy_setup(struct dp_soc *soc, struct dp_peer *peer)
7982 {
7983 	/* txrx_peer might exist already in peer reuse case */
7984 	if (peer->txrx_peer)
7985 		return QDF_STATUS_SUCCESS;
7986 
7987 	if (dp_txrx_peer_attach(soc, peer) !=
7988 				QDF_STATUS_SUCCESS) {
7989 		dp_err("peer txrx ctx alloc failed");
7990 		return QDF_STATUS_E_FAILURE;
7991 	}
7992 
7993 	return QDF_STATUS_SUCCESS;
7994 }
7995 
7996 #ifdef WLAN_FEATURE_11BE_MLO
7997 QDF_STATUS dp_peer_mlo_setup(
7998 			struct dp_soc *soc,
7999 			struct dp_peer *peer,
8000 			uint8_t vdev_id,
8001 			struct cdp_peer_setup_info *setup_info)
8002 {
8003 	struct dp_peer *mld_peer = NULL;
8004 
8005 	/* Non-MLO connection, do nothing */
8006 	if (!setup_info || !setup_info->mld_peer_mac)
8007 		return QDF_STATUS_SUCCESS;
8008 
8009 	dp_info("link peer:" QDF_MAC_ADDR_FMT "mld peer:" QDF_MAC_ADDR_FMT
8010 		"assoc_link %d, primary_link %d",
8011 		QDF_MAC_ADDR_REF(peer->mac_addr.raw),
8012 		QDF_MAC_ADDR_REF(setup_info->mld_peer_mac),
8013 		setup_info->is_first_link,
8014 		setup_info->is_primary_link);
8015 
8016 	/* if this is the first link peer */
8017 	if (setup_info->is_first_link)
8018 		/* create MLD peer */
8019 		dp_peer_create_wifi3((struct cdp_soc_t *)soc,
8020 				     vdev_id,
8021 				     setup_info->mld_peer_mac,
8022 				     CDP_MLD_PEER_TYPE);
8023 
8024 	peer->first_link = setup_info->is_first_link;
8025 	peer->primary_link = setup_info->is_primary_link;
8026 	mld_peer = dp_mld_peer_find_hash_find(soc,
8027 					      setup_info->mld_peer_mac,
8028 					      0, vdev_id, DP_MOD_ID_CDP);
8029 	if (mld_peer) {
8030 		if (setup_info->is_first_link) {
8031 			/* assign rx_tid to mld peer */
8032 			mld_peer->rx_tid = peer->rx_tid;
8033 			/* no cdp_peer_setup for MLD peer,
8034 			 * set it for addba processing
8035 			 */
8036 			qdf_atomic_set(&mld_peer->is_default_route_set, 1);
8037 		} else {
8038 			/* free link peer origial rx_tids mem */
8039 			dp_peer_rx_tids_destroy(peer);
8040 			/* assign mld peer rx_tid to link peer */
8041 			peer->rx_tid = mld_peer->rx_tid;
8042 		}
8043 
8044 		if (setup_info->is_primary_link &&
8045 		    !setup_info->is_first_link) {
8046 			/*
8047 			 * if first link is not the primary link,
8048 			 * then need to change mld_peer->vdev as
8049 			 * primary link dp_vdev is not same one
8050 			 * during mld peer creation.
8051 			 */
8052 
8053 			/* relase the ref to original dp_vdev */
8054 			dp_vdev_unref_delete(soc, mld_peer->vdev,
8055 					     DP_MOD_ID_CHILD);
8056 			/*
8057 			 * get the ref to new dp_vdev,
8058 			 * increase dp_vdev ref_cnt
8059 			 */
8060 			mld_peer->vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8061 							       DP_MOD_ID_CHILD);
8062 			mld_peer->txrx_peer->vdev = mld_peer->vdev;
8063 		}
8064 
8065 		/* associate mld and link peer */
8066 		dp_link_peer_add_mld_peer(peer, mld_peer);
8067 		dp_mld_peer_add_link_peer(mld_peer, peer);
8068 
8069 		mld_peer->txrx_peer->mld_peer = 1;
8070 		dp_peer_unref_delete(mld_peer, DP_MOD_ID_CDP);
8071 	} else {
8072 		peer->mld_peer = NULL;
8073 		dp_err("mld peer" QDF_MAC_ADDR_FMT "not found!",
8074 		       QDF_MAC_ADDR_REF(setup_info->mld_peer_mac));
8075 		return QDF_STATUS_E_FAILURE;
8076 	}
8077 
8078 	return QDF_STATUS_SUCCESS;
8079 }
8080 
8081 /*
8082  * dp_mlo_peer_authorize() - authorize MLO peer
8083  * @soc: soc handle
8084  * @peer: pointer to link peer
8085  *
8086  * return void
8087  */
8088 static void dp_mlo_peer_authorize(struct dp_soc *soc,
8089 				  struct dp_peer *peer)
8090 {
8091 	int i;
8092 	struct dp_peer *link_peer = NULL;
8093 	struct dp_peer *mld_peer = peer->mld_peer;
8094 	struct dp_mld_link_peers link_peers_info;
8095 
8096 	if (!mld_peer)
8097 		return;
8098 
8099 	/* get link peers with reference */
8100 	dp_get_link_peers_ref_from_mld_peer(soc, mld_peer,
8101 					    &link_peers_info,
8102 					    DP_MOD_ID_CDP);
8103 
8104 	for (i = 0; i < link_peers_info.num_links; i++) {
8105 		link_peer = link_peers_info.link_peers[i];
8106 
8107 		if (!link_peer->authorize) {
8108 			dp_release_link_peers_ref(&link_peers_info,
8109 						  DP_MOD_ID_CDP);
8110 			mld_peer->authorize = false;
8111 			return;
8112 		}
8113 	}
8114 
8115 	/* if we are here all link peers are authorized,
8116 	 * authorize ml_peer also
8117 	 */
8118 	mld_peer->authorize = true;
8119 
8120 	/* release link peers reference */
8121 	dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
8122 }
8123 #endif
8124 
8125 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
8126 				  enum cdp_host_reo_dest_ring *reo_dest,
8127 				  bool *hash_based)
8128 {
8129 	struct dp_soc *soc;
8130 	struct dp_pdev *pdev;
8131 
8132 	pdev = vdev->pdev;
8133 	soc = pdev->soc;
8134 	/*
8135 	 * hash based steering is disabled for Radios which are offloaded
8136 	 * to NSS
8137 	 */
8138 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
8139 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
8140 
8141 	/*
8142 	 * Below line of code will ensure the proper reo_dest ring is chosen
8143 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
8144 	 */
8145 	*reo_dest = pdev->reo_dest;
8146 }
8147 
8148 #ifdef IPA_OFFLOAD
8149 /**
8150  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
8151  * @vdev: Virtual device
8152  *
8153  * Return: true if the vdev is of subtype P2P
8154  *	   false if the vdev is of any other subtype
8155  */
8156 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
8157 {
8158 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
8159 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
8160 	    vdev->subtype == wlan_op_subtype_p2p_go)
8161 		return true;
8162 
8163 	return false;
8164 }
8165 
8166 /*
8167  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
8168  * @vdev: Datapath VDEV handle
8169  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
8170  * @hash_based: pointer to hash value (enabled/disabled) to be populated
8171  *
8172  * If IPA is enabled in ini, for SAP mode, disable hash based
8173  * steering, use default reo_dst ring for RX. Use config values for other modes.
8174  * Return: None
8175  */
8176 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
8177 				       struct cdp_peer_setup_info *setup_info,
8178 				       enum cdp_host_reo_dest_ring *reo_dest,
8179 				       bool *hash_based,
8180 				       uint8_t *lmac_peer_id_msb)
8181 {
8182 	struct dp_soc *soc;
8183 	struct dp_pdev *pdev;
8184 
8185 	pdev = vdev->pdev;
8186 	soc = pdev->soc;
8187 
8188 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
8189 
8190 	/* For P2P-GO interfaces we do not need to change the REO
8191 	 * configuration even if IPA config is enabled
8192 	 */
8193 	if (dp_is_vdev_subtype_p2p(vdev))
8194 		return;
8195 
8196 	/*
8197 	 * If IPA is enabled, disable hash-based flow steering and set
8198 	 * reo_dest_ring_4 as the REO ring to receive packets on.
8199 	 * IPA is configured to reap reo_dest_ring_4.
8200 	 *
8201 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
8202 	 * value enum value is from 1 - 4.
8203 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
8204 	 */
8205 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
8206 		if (vdev->opmode == wlan_op_mode_ap) {
8207 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
8208 			*hash_based = 0;
8209 		} else if (vdev->opmode == wlan_op_mode_sta &&
8210 			   dp_ipa_is_mdm_platform()) {
8211 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
8212 		}
8213 	}
8214 }
8215 
8216 #else
8217 
8218 /*
8219  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
8220  * @vdev: Datapath VDEV handle
8221  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
8222  * @hash_based: pointer to hash value (enabled/disabled) to be populated
8223  *
8224  * Use system config values for hash based steering.
8225  * Return: None
8226  */
8227 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
8228 				       struct cdp_peer_setup_info *setup_info,
8229 				       enum cdp_host_reo_dest_ring *reo_dest,
8230 				       bool *hash_based,
8231 				       uint8_t *lmac_peer_id_msb)
8232 {
8233 	struct dp_soc *soc = vdev->pdev->soc;
8234 
8235 	soc->arch_ops.peer_get_reo_hash(vdev, setup_info, reo_dest, hash_based,
8236 					lmac_peer_id_msb);
8237 }
8238 #endif /* IPA_OFFLOAD */
8239 
8240 /*
8241  * dp_peer_setup_wifi3() - initialize the peer
8242  * @soc_hdl: soc handle object
8243  * @vdev_id : vdev_id of vdev object
8244  * @peer_mac: Peer's mac address
8245  * @peer_setup_info: peer setup info for MLO
8246  *
8247  * Return: QDF_STATUS
8248  */
8249 static QDF_STATUS
8250 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8251 		    uint8_t *peer_mac,
8252 		    struct cdp_peer_setup_info *setup_info)
8253 {
8254 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8255 	struct dp_pdev *pdev;
8256 	bool hash_based = 0;
8257 	enum cdp_host_reo_dest_ring reo_dest;
8258 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8259 	struct dp_vdev *vdev = NULL;
8260 	struct dp_peer *peer =
8261 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
8262 					       DP_MOD_ID_CDP);
8263 	struct dp_peer *mld_peer = NULL;
8264 	enum wlan_op_mode vdev_opmode;
8265 	uint8_t lmac_peer_id_msb = 0;
8266 
8267 	if (!peer)
8268 		return QDF_STATUS_E_FAILURE;
8269 
8270 	vdev = peer->vdev;
8271 	if (!vdev) {
8272 		status = QDF_STATUS_E_FAILURE;
8273 		goto fail;
8274 	}
8275 
8276 	/* save vdev related member in case vdev freed */
8277 	vdev_opmode = vdev->opmode;
8278 	pdev = vdev->pdev;
8279 	dp_peer_setup_get_reo_hash(vdev, setup_info,
8280 				   &reo_dest, &hash_based,
8281 				   &lmac_peer_id_msb);
8282 
8283 	dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
8284 		pdev->pdev_id, vdev->vdev_id,
8285 		vdev->opmode, hash_based, reo_dest);
8286 
8287 	/*
8288 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
8289 	 * i.e both the devices have same MAC address. In these
8290 	 * cases we want such pkts to be processed in NULL Q handler
8291 	 * which is REO2TCL ring. for this reason we should
8292 	 * not setup reo_queues and default route for bss_peer.
8293 	 */
8294 	if (!IS_MLO_DP_MLD_PEER(peer))
8295 		dp_monitor_peer_tx_init(pdev, peer);
8296 
8297 	if (!setup_info)
8298 		if (dp_peer_legacy_setup(soc, peer) !=
8299 				QDF_STATUS_SUCCESS) {
8300 			status = QDF_STATUS_E_RESOURCES;
8301 			goto fail;
8302 		}
8303 
8304 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
8305 		status = QDF_STATUS_E_FAILURE;
8306 		goto fail;
8307 	}
8308 
8309 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
8310 		/* TODO: Check the destination ring number to be passed to FW */
8311 		soc->cdp_soc.ol_ops->peer_set_default_routing(
8312 				soc->ctrl_psoc,
8313 				peer->vdev->pdev->pdev_id,
8314 				peer->mac_addr.raw,
8315 				peer->vdev->vdev_id, hash_based, reo_dest,
8316 				lmac_peer_id_msb);
8317 	}
8318 
8319 	qdf_atomic_set(&peer->is_default_route_set, 1);
8320 
8321 	status = dp_peer_mlo_setup(soc, peer, vdev->vdev_id, setup_info);
8322 	if (QDF_IS_STATUS_ERROR(status)) {
8323 		dp_peer_err("peer mlo setup failed");
8324 		qdf_assert_always(0);
8325 	}
8326 
8327 	if (vdev_opmode != wlan_op_mode_monitor) {
8328 		/* In case of MLD peer, switch peer to mld peer and
8329 		 * do peer_rx_init.
8330 		 */
8331 		if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
8332 		    IS_MLO_DP_LINK_PEER(peer)) {
8333 			if (setup_info && setup_info->is_first_link) {
8334 				mld_peer = DP_GET_MLD_PEER_FROM_PEER(peer);
8335 				if (mld_peer)
8336 					dp_peer_rx_init(pdev, mld_peer);
8337 				else
8338 					dp_peer_err("MLD peer null. Primary link peer:%pK", peer);
8339 			}
8340 		} else {
8341 			dp_peer_rx_init(pdev, peer);
8342 		}
8343 	}
8344 
8345 	if (!IS_MLO_DP_MLD_PEER(peer))
8346 		dp_peer_ppdu_delayed_ba_init(peer);
8347 
8348 fail:
8349 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8350 	return status;
8351 }
8352 
8353 /*
8354  * dp_cp_peer_del_resp_handler - Handle the peer delete response
8355  * @soc_hdl: Datapath SOC handle
8356  * @vdev_id: id of virtual device object
8357  * @mac_addr: Mac address of the peer
8358  *
8359  * Return: QDF_STATUS
8360  */
8361 static QDF_STATUS dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl,
8362 					      uint8_t vdev_id,
8363 					      uint8_t *mac_addr)
8364 {
8365 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8366 	struct dp_ast_entry  *ast_entry = NULL;
8367 	txrx_ast_free_cb cb = NULL;
8368 	void *cookie;
8369 
8370 	if (soc->ast_offload_support)
8371 		return QDF_STATUS_E_INVAL;
8372 
8373 	qdf_spin_lock_bh(&soc->ast_lock);
8374 
8375 	ast_entry =
8376 		dp_peer_ast_hash_find_by_vdevid(soc, mac_addr,
8377 						vdev_id);
8378 
8379 	/* in case of qwrap we have multiple BSS peers
8380 	 * with same mac address
8381 	 *
8382 	 * AST entry for this mac address will be created
8383 	 * only for one peer hence it will be NULL here
8384 	 */
8385 	if ((!ast_entry || !ast_entry->delete_in_progress) ||
8386 	    (ast_entry->peer_id != HTT_INVALID_PEER)) {
8387 		qdf_spin_unlock_bh(&soc->ast_lock);
8388 		return QDF_STATUS_E_FAILURE;
8389 	}
8390 
8391 	if (ast_entry->is_mapped)
8392 		soc->ast_table[ast_entry->ast_idx] = NULL;
8393 
8394 	DP_STATS_INC(soc, ast.deleted, 1);
8395 	dp_peer_ast_hash_remove(soc, ast_entry);
8396 
8397 	cb = ast_entry->callback;
8398 	cookie = ast_entry->cookie;
8399 	ast_entry->callback = NULL;
8400 	ast_entry->cookie = NULL;
8401 
8402 	soc->num_ast_entries--;
8403 	qdf_spin_unlock_bh(&soc->ast_lock);
8404 
8405 	if (cb) {
8406 		cb(soc->ctrl_psoc,
8407 		   dp_soc_to_cdp_soc(soc),
8408 		   cookie,
8409 		   CDP_TXRX_AST_DELETED);
8410 	}
8411 	qdf_mem_free(ast_entry);
8412 
8413 	return QDF_STATUS_SUCCESS;
8414 }
8415 
8416 /*
8417  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
8418  * @txrx_soc: cdp soc handle
8419  * @ac: Access category
8420  * @value: timeout value in millisec
8421  *
8422  * Return: void
8423  */
8424 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
8425 				    uint8_t ac, uint32_t value)
8426 {
8427 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
8428 
8429 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
8430 }
8431 
8432 /*
8433  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
8434  * @txrx_soc: cdp soc handle
8435  * @ac: access category
8436  * @value: timeout value in millisec
8437  *
8438  * Return: void
8439  */
8440 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
8441 				    uint8_t ac, uint32_t *value)
8442 {
8443 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
8444 
8445 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
8446 }
8447 
8448 /*
8449  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
8450  * @txrx_soc: cdp soc handle
8451  * @pdev_id: id of physical device object
8452  * @val: reo destination ring index (1 - 4)
8453  *
8454  * Return: QDF_STATUS
8455  */
8456 static QDF_STATUS
8457 dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
8458 		     enum cdp_host_reo_dest_ring val)
8459 {
8460 	struct dp_pdev *pdev =
8461 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
8462 						   pdev_id);
8463 
8464 	if (pdev) {
8465 		pdev->reo_dest = val;
8466 		return QDF_STATUS_SUCCESS;
8467 	}
8468 
8469 	return QDF_STATUS_E_FAILURE;
8470 }
8471 
8472 /*
8473  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
8474  * @txrx_soc: cdp soc handle
8475  * @pdev_id: id of physical device object
8476  *
8477  * Return: reo destination ring index
8478  */
8479 static enum cdp_host_reo_dest_ring
8480 dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id)
8481 {
8482 	struct dp_pdev *pdev =
8483 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
8484 						   pdev_id);
8485 
8486 	if (pdev)
8487 		return pdev->reo_dest;
8488 	else
8489 		return cdp_host_reo_dest_ring_unknown;
8490 }
8491 
8492 #ifdef WLAN_SUPPORT_MSCS
8493 /*
8494  * dp_record_mscs_params - MSCS parameters sent by the STA in
8495  * the MSCS Request to the AP. The AP makes a note of these
8496  * parameters while comparing the MSDUs sent by the STA, to
8497  * send the downlink traffic with correct User priority.
8498  * @soc - Datapath soc handle
8499  * @peer_mac - STA Mac address
8500  * @vdev_id - ID of the vdev handle
8501  * @mscs_params - Structure having MSCS parameters obtained
8502  * from handshake
8503  * @active - Flag to set MSCS active/inactive
8504  * return type - QDF_STATUS - Success/Invalid
8505  */
8506 static QDF_STATUS
8507 dp_record_mscs_params(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
8508 		      uint8_t vdev_id, struct cdp_mscs_params *mscs_params,
8509 		      bool active)
8510 {
8511 	struct dp_peer *peer;
8512 	QDF_STATUS status = QDF_STATUS_E_INVAL;
8513 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8514 
8515 	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
8516 				      DP_MOD_ID_CDP);
8517 
8518 	if (!peer) {
8519 		dp_err("Peer is NULL!");
8520 		goto fail;
8521 	}
8522 	if (!active) {
8523 		dp_info("MSCS Procedure is terminated");
8524 		peer->mscs_active = active;
8525 		goto fail;
8526 	}
8527 
8528 	if (mscs_params->classifier_type == IEEE80211_TCLAS_MASK_CLA_TYPE_4) {
8529 		/* Populate entries inside IPV4 database first */
8530 		peer->mscs_ipv4_parameter.user_priority_bitmap =
8531 			mscs_params->user_pri_bitmap;
8532 		peer->mscs_ipv4_parameter.user_priority_limit =
8533 			mscs_params->user_pri_limit;
8534 		peer->mscs_ipv4_parameter.classifier_mask =
8535 			mscs_params->classifier_mask;
8536 
8537 		/* Populate entries inside IPV6 database */
8538 		peer->mscs_ipv6_parameter.user_priority_bitmap =
8539 			mscs_params->user_pri_bitmap;
8540 		peer->mscs_ipv6_parameter.user_priority_limit =
8541 			mscs_params->user_pri_limit;
8542 		peer->mscs_ipv6_parameter.classifier_mask =
8543 			mscs_params->classifier_mask;
8544 		peer->mscs_active = 1;
8545 		dp_info("\n\tMSCS Procedure request based parameters for "QDF_MAC_ADDR_FMT"\n"
8546 			"\tClassifier_type = %d\tUser priority bitmap = %x\n"
8547 			"\tUser priority limit = %x\tClassifier mask = %x",
8548 			QDF_MAC_ADDR_REF(peer_mac),
8549 			mscs_params->classifier_type,
8550 			peer->mscs_ipv4_parameter.user_priority_bitmap,
8551 			peer->mscs_ipv4_parameter.user_priority_limit,
8552 			peer->mscs_ipv4_parameter.classifier_mask);
8553 	}
8554 
8555 	status = QDF_STATUS_SUCCESS;
8556 fail:
8557 	if (peer)
8558 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8559 	return status;
8560 }
8561 #endif
8562 
8563 /*
8564  * dp_get_sec_type() - Get the security type
8565  * @soc: soc handle
8566  * @vdev_id: id of dp handle
8567  * @peer_mac: mac of datapath PEER handle
8568  * @sec_idx:    Security id (mcast, ucast)
8569  *
8570  * return sec_type: Security type
8571  */
8572 static int dp_get_sec_type(struct cdp_soc_t *soc, uint8_t vdev_id,
8573 			   uint8_t *peer_mac, uint8_t sec_idx)
8574 {
8575 	int sec_type = 0;
8576 	struct dp_peer *peer =
8577 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
8578 						       peer_mac, 0, vdev_id,
8579 						       DP_MOD_ID_CDP);
8580 
8581 	if (!peer) {
8582 		dp_cdp_err("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
8583 		return sec_type;
8584 	}
8585 
8586 	if (!peer->txrx_peer) {
8587 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8588 		dp_peer_debug("%pK: txrx peer is NULL!\n", soc);
8589 		return sec_type;
8590 	}
8591 	sec_type = peer->txrx_peer->security[sec_idx].sec_type;
8592 
8593 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8594 	return sec_type;
8595 }
8596 
8597 /*
8598  * dp_peer_authorize() - authorize txrx peer
8599  * @soc: soc handle
8600  * @vdev_id: id of dp handle
8601  * @peer_mac: mac of datapath PEER handle
8602  * @authorize
8603  *
8604  */
8605 static QDF_STATUS
8606 dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8607 		  uint8_t *peer_mac, uint32_t authorize)
8608 {
8609 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8610 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8611 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(soc, peer_mac,
8612 							      0, vdev_id,
8613 							      DP_MOD_ID_CDP);
8614 
8615 	if (!peer) {
8616 		dp_cdp_debug("%pK: Peer is NULL!\n", soc);
8617 		status = QDF_STATUS_E_FAILURE;
8618 	} else {
8619 		peer->authorize = authorize ? 1 : 0;
8620 		if (peer->txrx_peer)
8621 			peer->txrx_peer->authorize = peer->authorize;
8622 
8623 		if (!peer->authorize)
8624 			dp_peer_flush_frags(soc_hdl, vdev_id, peer_mac);
8625 
8626 		dp_mlo_peer_authorize(soc, peer);
8627 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8628 	}
8629 
8630 	return status;
8631 }
8632 
8633 /*
8634  * dp_peer_get_authorize() - get peer authorize status
8635  * @soc: soc handle
8636  * @vdev_id: id of dp handle
8637  * @peer_mac: mac of datapath PEER handle
8638  *
8639  * Retusn: true is peer is authorized, false otherwise
8640  */
8641 static bool
8642 dp_peer_get_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8643 		      uint8_t *peer_mac)
8644 {
8645 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8646 	bool authorize = false;
8647 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
8648 						      0, vdev_id,
8649 						      DP_MOD_ID_CDP);
8650 
8651 	if (!peer) {
8652 		dp_cdp_debug("%pK: Peer is NULL!\n", soc);
8653 		return authorize;
8654 	}
8655 
8656 	authorize = peer->authorize;
8657 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8658 
8659 	return authorize;
8660 }
8661 
8662 /**
8663  * dp_vdev_unref_delete() - check and process vdev delete
8664  * @soc : DP specific soc pointer
8665  * @vdev: DP specific vdev pointer
8666  * @mod_id: module id
8667  *
8668  */
8669 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev,
8670 			  enum dp_mod_id mod_id)
8671 {
8672 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
8673 	void *vdev_delete_context = NULL;
8674 	uint8_t vdev_id = vdev->vdev_id;
8675 	struct dp_pdev *pdev = vdev->pdev;
8676 	struct dp_vdev *tmp_vdev = NULL;
8677 	uint8_t found = 0;
8678 
8679 	QDF_ASSERT(qdf_atomic_dec_return(&vdev->mod_refs[mod_id]) >= 0);
8680 
8681 	/* Return if this is not the last reference*/
8682 	if (!qdf_atomic_dec_and_test(&vdev->ref_cnt))
8683 		return;
8684 
8685 	/*
8686 	 * This should be set as last reference need to released
8687 	 * after cdp_vdev_detach() is called
8688 	 *
8689 	 * if this assert is hit there is a ref count issue
8690 	 */
8691 	QDF_ASSERT(vdev->delete.pending);
8692 
8693 	vdev_delete_cb = vdev->delete.callback;
8694 	vdev_delete_context = vdev->delete.context;
8695 
8696 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")- its last peer is done",
8697 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
8698 
8699 	if (wlan_op_mode_monitor == vdev->opmode) {
8700 		dp_monitor_vdev_delete(soc, vdev);
8701 		goto free_vdev;
8702 	}
8703 
8704 	/* all peers are gone, go ahead and delete it */
8705 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
8706 			FLOW_TYPE_VDEV, vdev_id);
8707 	dp_tx_vdev_detach(vdev);
8708 	dp_monitor_vdev_detach(vdev);
8709 
8710 free_vdev:
8711 	qdf_spinlock_destroy(&vdev->peer_list_lock);
8712 
8713 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
8714 	TAILQ_FOREACH(tmp_vdev, &soc->inactive_vdev_list,
8715 		      inactive_list_elem) {
8716 		if (tmp_vdev == vdev) {
8717 			found = 1;
8718 			break;
8719 		}
8720 	}
8721 	if (found)
8722 		TAILQ_REMOVE(&soc->inactive_vdev_list, vdev,
8723 			     inactive_list_elem);
8724 	/* delete this peer from the list */
8725 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
8726 
8727 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")",
8728 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
8729 	wlan_minidump_remove(vdev, sizeof(*vdev), soc->ctrl_psoc,
8730 			     WLAN_MD_DP_VDEV, "dp_vdev");
8731 	qdf_mem_free(vdev);
8732 	vdev = NULL;
8733 
8734 	if (vdev_delete_cb)
8735 		vdev_delete_cb(vdev_delete_context);
8736 }
8737 
8738 qdf_export_symbol(dp_vdev_unref_delete);
8739 
8740 /*
8741  * dp_peer_unref_delete() - unref and delete peer
8742  * @peer_handle:    Datapath peer handle
8743  * @mod_id:         ID of module releasing reference
8744  *
8745  */
8746 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id mod_id)
8747 {
8748 	struct dp_vdev *vdev = peer->vdev;
8749 	struct dp_pdev *pdev = vdev->pdev;
8750 	struct dp_soc *soc = pdev->soc;
8751 	uint16_t peer_id;
8752 	struct dp_peer *tmp_peer;
8753 	bool found = false;
8754 
8755 	if (mod_id > DP_MOD_ID_RX)
8756 		QDF_ASSERT(qdf_atomic_dec_return(&peer->mod_refs[mod_id]) >= 0);
8757 
8758 	/*
8759 	 * Hold the lock all the way from checking if the peer ref count
8760 	 * is zero until the peer references are removed from the hash
8761 	 * table and vdev list (if the peer ref count is zero).
8762 	 * This protects against a new HL tx operation starting to use the
8763 	 * peer object just after this function concludes it's done being used.
8764 	 * Furthermore, the lock needs to be held while checking whether the
8765 	 * vdev's list of peers is empty, to make sure that list is not modified
8766 	 * concurrently with the empty check.
8767 	 */
8768 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
8769 		peer_id = peer->peer_id;
8770 
8771 		/*
8772 		 * Make sure that the reference to the peer in
8773 		 * peer object map is removed
8774 		 */
8775 		QDF_ASSERT(peer_id == HTT_INVALID_PEER);
8776 
8777 		dp_peer_debug("Deleting peer %pK ("QDF_MAC_ADDR_FMT")", peer,
8778 			      QDF_MAC_ADDR_REF(peer->mac_addr.raw));
8779 
8780 		dp_peer_sawf_ctx_free(soc, peer);
8781 
8782 		wlan_minidump_remove(peer, sizeof(*peer), soc->ctrl_psoc,
8783 				     WLAN_MD_DP_PEER, "dp_peer");
8784 
8785 		qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
8786 		TAILQ_FOREACH(tmp_peer, &soc->inactive_peer_list,
8787 			      inactive_list_elem) {
8788 			if (tmp_peer == peer) {
8789 				found = 1;
8790 				break;
8791 			}
8792 		}
8793 		if (found)
8794 			TAILQ_REMOVE(&soc->inactive_peer_list, peer,
8795 				     inactive_list_elem);
8796 		/* delete this peer from the list */
8797 		qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
8798 		DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
8799 		dp_peer_update_state(soc, peer, DP_PEER_STATE_FREED);
8800 
8801 		/* cleanup the peer data */
8802 		dp_peer_cleanup(vdev, peer);
8803 
8804 		if (!IS_MLO_DP_MLD_PEER(peer))
8805 			dp_monitor_peer_detach(soc, peer);
8806 
8807 		qdf_spinlock_destroy(&peer->peer_state_lock);
8808 
8809 		dp_txrx_peer_detach(soc, peer);
8810 		qdf_mem_free(peer);
8811 
8812 		/*
8813 		 * Decrement ref count taken at peer create
8814 		 */
8815 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CHILD);
8816 	}
8817 }
8818 
8819 qdf_export_symbol(dp_peer_unref_delete);
8820 
8821 /*
8822  * dp_txrx_peer_unref_delete() - unref and delete peer
8823  * @handle: Datapath txrx ref handle
8824  * @mod_id: Module ID of the caller
8825  *
8826  */
8827 void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle,
8828 			       enum dp_mod_id mod_id)
8829 {
8830 	dp_peer_unref_delete((struct dp_peer *)handle, mod_id);
8831 }
8832 
8833 qdf_export_symbol(dp_txrx_peer_unref_delete);
8834 
8835 /*
8836  * dp_peer_delete_wifi3() – Delete txrx peer
8837  * @soc_hdl: soc handle
8838  * @vdev_id: id of dp handle
8839  * @peer_mac: mac of datapath PEER handle
8840  * @bitmap: bitmap indicating special handling of request.
8841  * @peer_type: peer type (link or MLD)
8842  *
8843  */
8844 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
8845 				       uint8_t vdev_id,
8846 				       uint8_t *peer_mac, uint32_t bitmap,
8847 				       enum cdp_peer_type peer_type)
8848 {
8849 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8850 	struct dp_peer *peer;
8851 	struct cdp_peer_info peer_info = { 0 };
8852 	struct dp_vdev *vdev = NULL;
8853 
8854 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac,
8855 				 false, peer_type);
8856 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP);
8857 
8858 	/* Peer can be null for monitor vap mac address */
8859 	if (!peer) {
8860 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
8861 			  "%s: Invalid peer\n", __func__);
8862 		return QDF_STATUS_E_FAILURE;
8863 	}
8864 
8865 	if (!peer->valid) {
8866 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8867 		dp_err("Invalid peer: "QDF_MAC_ADDR_FMT,
8868 			QDF_MAC_ADDR_REF(peer_mac));
8869 		return QDF_STATUS_E_ALREADY;
8870 	}
8871 
8872 	vdev = peer->vdev;
8873 
8874 	if (!vdev) {
8875 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8876 		return QDF_STATUS_E_FAILURE;
8877 	}
8878 
8879 	peer->valid = 0;
8880 
8881 	dp_init_info("%pK: peer %pK (" QDF_MAC_ADDR_FMT ")",
8882 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
8883 
8884 	dp_local_peer_id_free(peer->vdev->pdev, peer);
8885 
8886 	/* Drop all rx packets before deleting peer */
8887 	dp_clear_peer_internal(soc, peer);
8888 
8889 	qdf_spinlock_destroy(&peer->peer_info_lock);
8890 	dp_peer_multipass_list_remove(peer);
8891 
8892 	/* remove the reference to the peer from the hash table */
8893 	dp_peer_find_hash_remove(soc, peer);
8894 
8895 	dp_peer_vdev_list_remove(soc, vdev, peer);
8896 
8897 	dp_peer_mlo_delete(peer);
8898 
8899 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
8900 	TAILQ_INSERT_TAIL(&soc->inactive_peer_list, peer,
8901 			  inactive_list_elem);
8902 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
8903 
8904 	/*
8905 	 * Remove the reference added during peer_attach.
8906 	 * The peer will still be left allocated until the
8907 	 * PEER_UNMAP message arrives to remove the other
8908 	 * reference, added by the PEER_MAP message.
8909 	 */
8910 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
8911 	/*
8912 	 * Remove the reference taken above
8913 	 */
8914 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8915 
8916 	return QDF_STATUS_SUCCESS;
8917 }
8918 
8919 #ifdef DP_RX_UDP_OVER_PEER_ROAM
8920 static QDF_STATUS dp_update_roaming_peer_wifi3(struct cdp_soc_t *soc_hdl,
8921 					       uint8_t vdev_id,
8922 					       uint8_t *peer_mac,
8923 					       uint32_t auth_status)
8924 {
8925 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8926 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8927 						     DP_MOD_ID_CDP);
8928 	if (!vdev)
8929 		return QDF_STATUS_E_FAILURE;
8930 
8931 	vdev->roaming_peer_status = auth_status;
8932 	qdf_mem_copy(vdev->roaming_peer_mac.raw, peer_mac,
8933 		     QDF_MAC_ADDR_SIZE);
8934 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8935 
8936 	return QDF_STATUS_SUCCESS;
8937 }
8938 #endif
8939 /*
8940  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
8941  * @soc_hdl: Datapath soc handle
8942  * @vdev_id: virtual interface id
8943  *
8944  * Return: MAC address on success, NULL on failure.
8945  *
8946  */
8947 static uint8_t *dp_get_vdev_mac_addr_wifi3(struct cdp_soc_t *soc_hdl,
8948 					   uint8_t vdev_id)
8949 {
8950 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8951 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8952 						     DP_MOD_ID_CDP);
8953 	uint8_t *mac = NULL;
8954 
8955 	if (!vdev)
8956 		return NULL;
8957 
8958 	mac = vdev->mac_addr.raw;
8959 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8960 
8961 	return mac;
8962 }
8963 
8964 /*
8965  * dp_vdev_set_wds() - Enable per packet stats
8966  * @soc: DP soc handle
8967  * @vdev_id: id of DP VDEV handle
8968  * @val: value
8969  *
8970  * Return: none
8971  */
8972 static int dp_vdev_set_wds(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8973 			   uint32_t val)
8974 {
8975 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8976 	struct dp_vdev *vdev =
8977 		dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id,
8978 				      DP_MOD_ID_CDP);
8979 
8980 	if (!vdev)
8981 		return QDF_STATUS_E_FAILURE;
8982 
8983 	vdev->wds_enabled = val;
8984 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8985 
8986 	return QDF_STATUS_SUCCESS;
8987 }
8988 
8989 static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
8990 {
8991 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8992 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8993 						     DP_MOD_ID_CDP);
8994 	int opmode;
8995 
8996 	if (!vdev) {
8997 		dp_err("vdev for id %d is NULL", vdev_id);
8998 		return -EINVAL;
8999 	}
9000 	opmode = vdev->opmode;
9001 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9002 
9003 	return opmode;
9004 }
9005 
9006 /**
9007  * dp_get_os_rx_handles_from_vdev_wifi3() - Get os rx handles for a vdev
9008  * @soc_hdl: ol_txrx_soc_handle handle
9009  * @vdev_id: vdev id for which os rx handles are needed
9010  * @stack_fn_p: pointer to stack function pointer
9011  * @osif_handle_p: pointer to ol_osif_vdev_handle
9012  *
9013  * Return: void
9014  */
9015 static
9016 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_soc_t *soc_hdl,
9017 					  uint8_t vdev_id,
9018 					  ol_txrx_rx_fp *stack_fn_p,
9019 					  ol_osif_vdev_handle *osif_vdev_p)
9020 {
9021 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9022 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9023 						     DP_MOD_ID_CDP);
9024 
9025 	if (qdf_unlikely(!vdev)) {
9026 		*stack_fn_p = NULL;
9027 		*osif_vdev_p = NULL;
9028 		return;
9029 	}
9030 	*stack_fn_p = vdev->osif_rx_stack;
9031 	*osif_vdev_p = vdev->osif_vdev;
9032 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9033 }
9034 
9035 /**
9036  * dp_get_ctrl_pdev_from_vdev() - Get control pdev of vdev
9037  * @soc_hdl: datapath soc handle
9038  * @vdev_id: virtual device/interface id
9039  *
9040  * Return: Handle to control pdev
9041  */
9042 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(
9043 						struct cdp_soc_t *soc_hdl,
9044 						uint8_t vdev_id)
9045 {
9046 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9047 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9048 						     DP_MOD_ID_CDP);
9049 	struct dp_pdev *pdev;
9050 
9051 	if (!vdev)
9052 		return NULL;
9053 
9054 	pdev = vdev->pdev;
9055 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9056 	return pdev ? (struct cdp_cfg *)pdev->wlan_cfg_ctx : NULL;
9057 }
9058 
9059 /**
9060  * dp_get_tx_pending() - read pending tx
9061  * @pdev_handle: Datapath PDEV handle
9062  *
9063  * Return: outstanding tx
9064  */
9065 static int32_t dp_get_tx_pending(struct cdp_pdev *pdev_handle)
9066 {
9067 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9068 
9069 	return qdf_atomic_read(&pdev->num_tx_outstanding);
9070 }
9071 
9072 /**
9073  * dp_get_peer_mac_from_peer_id() - get peer mac
9074  * @pdev_handle: Datapath PDEV handle
9075  * @peer_id: Peer ID
9076  * @peer_mac: MAC addr of PEER
9077  *
9078  * Return: QDF_STATUS
9079  */
9080 static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc,
9081 					       uint32_t peer_id,
9082 					       uint8_t *peer_mac)
9083 {
9084 	struct dp_peer *peer;
9085 
9086 	if (soc && peer_mac) {
9087 		peer = dp_peer_get_ref_by_id((struct dp_soc *)soc,
9088 					     (uint16_t)peer_id,
9089 					     DP_MOD_ID_CDP);
9090 		if (peer) {
9091 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
9092 				     QDF_MAC_ADDR_SIZE);
9093 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9094 			return QDF_STATUS_SUCCESS;
9095 		}
9096 	}
9097 
9098 	return QDF_STATUS_E_FAILURE;
9099 }
9100 
9101 #ifdef MESH_MODE_SUPPORT
9102 static
9103 void dp_vdev_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
9104 {
9105 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
9106 
9107 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
9108 	vdev->mesh_vdev = val;
9109 	if (val)
9110 		vdev->skip_sw_tid_classification |=
9111 			DP_TX_MESH_ENABLED;
9112 	else
9113 		vdev->skip_sw_tid_classification &=
9114 			~DP_TX_MESH_ENABLED;
9115 }
9116 
9117 /*
9118  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
9119  * @vdev_hdl: virtual device object
9120  * @val: value to be set
9121  *
9122  * Return: void
9123  */
9124 static
9125 void dp_vdev_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
9126 {
9127 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
9128 
9129 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
9130 	vdev->mesh_rx_filter = val;
9131 }
9132 #endif
9133 
9134 /*
9135  * dp_vdev_set_hlos_tid_override() - to set hlos tid override
9136  * @vdev_hdl: virtual device object
9137  * @val: value to be set
9138  *
9139  * Return: void
9140  */
9141 static
9142 void dp_vdev_set_hlos_tid_override(struct dp_vdev *vdev, uint32_t val)
9143 {
9144 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
9145 	if (val)
9146 		vdev->skip_sw_tid_classification |=
9147 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
9148 	else
9149 		vdev->skip_sw_tid_classification &=
9150 			~DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
9151 }
9152 
9153 /*
9154  * dp_vdev_get_hlos_tid_override() - to get hlos tid override flag
9155  * @vdev_hdl: virtual device object
9156  * @val: value to be set
9157  *
9158  * Return: 1 if this flag is set
9159  */
9160 static
9161 uint8_t dp_vdev_get_hlos_tid_override(struct cdp_vdev *vdev_hdl)
9162 {
9163 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
9164 
9165 	return !!(vdev->skip_sw_tid_classification &
9166 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED);
9167 }
9168 
9169 #ifdef VDEV_PEER_PROTOCOL_COUNT
9170 static void dp_enable_vdev_peer_protocol_count(struct cdp_soc_t *soc_hdl,
9171 					       int8_t vdev_id,
9172 					       bool enable)
9173 {
9174 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9175 	struct dp_vdev *vdev;
9176 
9177 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9178 	if (!vdev)
9179 		return;
9180 
9181 	dp_info("enable %d vdev_id %d", enable, vdev_id);
9182 	vdev->peer_protocol_count_track = enable;
9183 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9184 }
9185 
9186 static void dp_enable_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
9187 						   int8_t vdev_id,
9188 						   int drop_mask)
9189 {
9190 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9191 	struct dp_vdev *vdev;
9192 
9193 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9194 	if (!vdev)
9195 		return;
9196 
9197 	dp_info("drop_mask %d vdev_id %d", drop_mask, vdev_id);
9198 	vdev->peer_protocol_count_dropmask = drop_mask;
9199 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9200 }
9201 
9202 static int dp_is_vdev_peer_protocol_count_enabled(struct cdp_soc_t *soc_hdl,
9203 						  int8_t vdev_id)
9204 {
9205 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9206 	struct dp_vdev *vdev;
9207 	int peer_protocol_count_track;
9208 
9209 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9210 	if (!vdev)
9211 		return 0;
9212 
9213 	dp_info("enable %d vdev_id %d", vdev->peer_protocol_count_track,
9214 		vdev_id);
9215 	peer_protocol_count_track =
9216 		vdev->peer_protocol_count_track;
9217 
9218 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9219 	return peer_protocol_count_track;
9220 }
9221 
9222 static int dp_get_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
9223 					       int8_t vdev_id)
9224 {
9225 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9226 	struct dp_vdev *vdev;
9227 	int peer_protocol_count_dropmask;
9228 
9229 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9230 	if (!vdev)
9231 		return 0;
9232 
9233 	dp_info("drop_mask %d vdev_id %d", vdev->peer_protocol_count_dropmask,
9234 		vdev_id);
9235 	peer_protocol_count_dropmask =
9236 		vdev->peer_protocol_count_dropmask;
9237 
9238 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9239 	return peer_protocol_count_dropmask;
9240 }
9241 
9242 #endif
9243 
9244 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data)
9245 {
9246 	uint8_t pdev_count;
9247 
9248 	for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) {
9249 		if (soc->pdev_list[pdev_count] &&
9250 		    soc->pdev_list[pdev_count] == data)
9251 			return true;
9252 	}
9253 	return false;
9254 }
9255 
9256 /**
9257  * dp_rx_bar_stats_cb(): BAR received stats callback
9258  * @soc: SOC handle
9259  * @cb_ctxt: Call back context
9260  * @reo_status: Reo status
9261  *
9262  * return: void
9263  */
9264 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
9265 	union hal_reo_status *reo_status)
9266 {
9267 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
9268 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
9269 
9270 	if (!dp_check_pdev_exists(soc, pdev)) {
9271 		dp_err_rl("pdev doesn't exist");
9272 		return;
9273 	}
9274 
9275 	if (!qdf_atomic_read(&soc->cmn_init_done))
9276 		return;
9277 
9278 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
9279 		DP_PRINT_STATS("REO stats failure %d",
9280 			       queue_status->header.status);
9281 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
9282 		return;
9283 	}
9284 
9285 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
9286 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
9287 
9288 }
9289 
9290 /**
9291  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
9292  * @vdev: DP VDEV handle
9293  *
9294  * return: void
9295  */
9296 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
9297 			     struct cdp_vdev_stats *vdev_stats)
9298 {
9299 	struct dp_soc *soc = NULL;
9300 
9301 	if (!vdev || !vdev->pdev)
9302 		return;
9303 
9304 	soc = vdev->pdev->soc;
9305 
9306 	dp_update_vdev_ingress_stats(vdev);
9307 
9308 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
9309 
9310 	dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats,
9311 			     DP_MOD_ID_GENERIC_STATS);
9312 
9313 	dp_update_vdev_rate_stats(vdev_stats, &vdev->stats);
9314 
9315 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9316 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
9317 			     vdev_stats, vdev->vdev_id,
9318 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
9319 #endif
9320 }
9321 
9322 void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
9323 {
9324 	struct dp_vdev *vdev = NULL;
9325 	struct dp_soc *soc;
9326 	struct cdp_vdev_stats *vdev_stats =
9327 			qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
9328 
9329 	if (!vdev_stats) {
9330 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
9331 			   pdev->soc);
9332 		return;
9333 	}
9334 
9335 	soc = pdev->soc;
9336 
9337 	qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
9338 	qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
9339 	qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
9340 	qdf_mem_zero(&pdev->stats.rx_i, sizeof(pdev->stats.rx_i));
9341 
9342 	if (dp_monitor_is_enable_mcopy_mode(pdev))
9343 		dp_monitor_invalid_peer_update_pdev_stats(soc, pdev);
9344 
9345 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
9346 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
9347 
9348 		dp_aggregate_vdev_stats(vdev, vdev_stats);
9349 		dp_update_pdev_stats(pdev, vdev_stats);
9350 		dp_update_pdev_ingress_stats(pdev, vdev);
9351 	}
9352 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
9353 	qdf_mem_free(vdev_stats);
9354 
9355 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9356 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
9357 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
9358 #endif
9359 }
9360 
9361 /**
9362  * dp_vdev_getstats() - get vdev packet level stats
9363  * @vdev_handle: Datapath VDEV handle
9364  * @stats: cdp network device stats structure
9365  *
9366  * Return: QDF_STATUS
9367  */
9368 static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle,
9369 				   struct cdp_dev_stats *stats)
9370 {
9371 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
9372 	struct dp_pdev *pdev;
9373 	struct dp_soc *soc;
9374 	struct cdp_vdev_stats *vdev_stats;
9375 
9376 	if (!vdev)
9377 		return QDF_STATUS_E_FAILURE;
9378 
9379 	pdev = vdev->pdev;
9380 	if (!pdev)
9381 		return QDF_STATUS_E_FAILURE;
9382 
9383 	soc = pdev->soc;
9384 
9385 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
9386 
9387 	if (!vdev_stats) {
9388 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
9389 			   soc);
9390 		return QDF_STATUS_E_FAILURE;
9391 	}
9392 
9393 	dp_aggregate_vdev_stats(vdev, vdev_stats);
9394 
9395 	stats->tx_packets = vdev_stats->tx.comp_pkt.num;
9396 	stats->tx_bytes = vdev_stats->tx.comp_pkt.bytes;
9397 
9398 	stats->tx_errors = vdev_stats->tx.tx_failed;
9399 	stats->tx_dropped = vdev_stats->tx_i.dropped.dropped_pkt.num +
9400 			    vdev_stats->tx_i.sg.dropped_host.num +
9401 			    vdev_stats->tx_i.mcast_en.dropped_map_error +
9402 			    vdev_stats->tx_i.mcast_en.dropped_self_mac +
9403 			    vdev_stats->tx_i.mcast_en.dropped_send_fail +
9404 			    vdev_stats->tx.nawds_mcast_drop;
9405 
9406 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
9407 		stats->rx_packets = vdev_stats->rx.to_stack.num;
9408 		stats->rx_bytes = vdev_stats->rx.to_stack.bytes;
9409 	} else {
9410 		stats->rx_packets = vdev_stats->rx_i.reo_rcvd_pkt.num +
9411 				    vdev_stats->rx_i.null_q_desc_pkt.num +
9412 				    vdev_stats->rx_i.routed_eapol_pkt.num;
9413 		stats->rx_bytes = vdev_stats->rx_i.reo_rcvd_pkt.bytes +
9414 				  vdev_stats->rx_i.null_q_desc_pkt.bytes +
9415 				  vdev_stats->rx_i.routed_eapol_pkt.bytes;
9416 	}
9417 
9418 	stats->rx_errors = vdev_stats->rx.err.mic_err +
9419 			   vdev_stats->rx.err.decrypt_err +
9420 			   vdev_stats->rx.err.fcserr +
9421 			   vdev_stats->rx.err.pn_err +
9422 			   vdev_stats->rx.err.oor_err +
9423 			   vdev_stats->rx.err.jump_2k_err +
9424 			   vdev_stats->rx.err.rxdma_wifi_parse_err;
9425 
9426 	stats->rx_dropped = vdev_stats->rx.mec_drop.num +
9427 			    vdev_stats->rx.multipass_rx_pkt_drop +
9428 			    vdev_stats->rx.peer_unauth_rx_pkt_drop +
9429 			    vdev_stats->rx.policy_check_drop +
9430 			    vdev_stats->rx.nawds_mcast_drop +
9431 			    vdev_stats->rx.mcast_3addr_drop;
9432 
9433 	qdf_mem_free(vdev_stats);
9434 
9435 	return QDF_STATUS_SUCCESS;
9436 }
9437 
9438 /**
9439  * dp_pdev_getstats() - get pdev packet level stats
9440  * @pdev_handle: Datapath PDEV handle
9441  * @stats: cdp network device stats structure
9442  *
9443  * Return: QDF_STATUS
9444  */
9445 static void dp_pdev_getstats(struct cdp_pdev *pdev_handle,
9446 			     struct cdp_dev_stats *stats)
9447 {
9448 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9449 
9450 	dp_aggregate_pdev_stats(pdev);
9451 
9452 	stats->tx_packets = pdev->stats.tx.comp_pkt.num;
9453 	stats->tx_bytes = pdev->stats.tx.comp_pkt.bytes;
9454 
9455 	stats->tx_errors = pdev->stats.tx.tx_failed;
9456 	stats->tx_dropped = pdev->stats.tx_i.dropped.dropped_pkt.num +
9457 			    pdev->stats.tx_i.sg.dropped_host.num +
9458 			    pdev->stats.tx_i.mcast_en.dropped_map_error +
9459 			    pdev->stats.tx_i.mcast_en.dropped_self_mac +
9460 			    pdev->stats.tx_i.mcast_en.dropped_send_fail +
9461 			    pdev->stats.tx.nawds_mcast_drop +
9462 			    pdev->stats.tso_stats.dropped_host.num;
9463 
9464 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(pdev->soc->wlan_cfg_ctx)) {
9465 		stats->rx_packets = pdev->stats.rx.to_stack.num;
9466 		stats->rx_bytes = pdev->stats.rx.to_stack.bytes;
9467 	} else {
9468 		stats->rx_packets = pdev->stats.rx_i.reo_rcvd_pkt.num +
9469 				    pdev->stats.rx_i.null_q_desc_pkt.num +
9470 				    pdev->stats.rx_i.routed_eapol_pkt.num;
9471 		stats->rx_bytes = pdev->stats.rx_i.reo_rcvd_pkt.bytes +
9472 				  pdev->stats.rx_i.null_q_desc_pkt.bytes +
9473 				  pdev->stats.rx_i.routed_eapol_pkt.bytes;
9474 	}
9475 
9476 	stats->rx_errors = pdev->stats.err.ip_csum_err +
9477 		pdev->stats.err.tcp_udp_csum_err +
9478 		pdev->stats.rx.err.mic_err +
9479 		pdev->stats.rx.err.decrypt_err +
9480 		pdev->stats.rx.err.fcserr +
9481 		pdev->stats.rx.err.pn_err +
9482 		pdev->stats.rx.err.oor_err +
9483 		pdev->stats.rx.err.jump_2k_err +
9484 		pdev->stats.rx.err.rxdma_wifi_parse_err;
9485 	stats->rx_dropped = pdev->stats.dropped.msdu_not_done +
9486 		pdev->stats.dropped.mec +
9487 		pdev->stats.dropped.mesh_filter +
9488 		pdev->stats.dropped.wifi_parse +
9489 		pdev->stats.dropped.mon_rx_drop +
9490 		pdev->stats.dropped.mon_radiotap_update_err +
9491 		pdev->stats.rx.mec_drop.num +
9492 		pdev->stats.rx.multipass_rx_pkt_drop +
9493 		pdev->stats.rx.peer_unauth_rx_pkt_drop +
9494 		pdev->stats.rx.policy_check_drop +
9495 		pdev->stats.rx.nawds_mcast_drop +
9496 		pdev->stats.rx.mcast_3addr_drop;
9497 }
9498 
9499 /**
9500  * dp_get_device_stats() - get interface level packet stats
9501  * @soc: soc handle
9502  * @id : vdev_id or pdev_id based on type
9503  * @stats: cdp network device stats structure
9504  * @type: device type pdev/vdev
9505  *
9506  * Return: QDF_STATUS
9507  */
9508 static QDF_STATUS dp_get_device_stats(struct cdp_soc_t *soc_hdl, uint8_t id,
9509 				      struct cdp_dev_stats *stats,
9510 				      uint8_t type)
9511 {
9512 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9513 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
9514 	struct dp_vdev *vdev;
9515 
9516 	switch (type) {
9517 	case UPDATE_VDEV_STATS:
9518 		vdev = dp_vdev_get_ref_by_id(soc, id, DP_MOD_ID_CDP);
9519 
9520 		if (vdev) {
9521 			status = dp_vdev_getstats((struct cdp_vdev *)vdev,
9522 						  stats);
9523 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9524 		}
9525 		return status;
9526 	case UPDATE_PDEV_STATS:
9527 		{
9528 			struct dp_pdev *pdev =
9529 				dp_get_pdev_from_soc_pdev_id_wifi3(
9530 						(struct dp_soc *)soc,
9531 						 id);
9532 			if (pdev) {
9533 				dp_pdev_getstats((struct cdp_pdev *)pdev,
9534 						 stats);
9535 				return QDF_STATUS_SUCCESS;
9536 			}
9537 		}
9538 		break;
9539 	default:
9540 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9541 			"apstats cannot be updated for this input "
9542 			"type %d", type);
9543 		break;
9544 	}
9545 
9546 	return QDF_STATUS_E_FAILURE;
9547 }
9548 
9549 const
9550 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
9551 {
9552 	switch (ring_type) {
9553 	case REO_DST:
9554 		return "Reo_dst";
9555 	case REO_EXCEPTION:
9556 		return "Reo_exception";
9557 	case REO_CMD:
9558 		return "Reo_cmd";
9559 	case REO_REINJECT:
9560 		return "Reo_reinject";
9561 	case REO_STATUS:
9562 		return "Reo_status";
9563 	case WBM2SW_RELEASE:
9564 		return "wbm2sw_release";
9565 	case TCL_DATA:
9566 		return "tcl_data";
9567 	case TCL_CMD_CREDIT:
9568 		return "tcl_cmd_credit";
9569 	case TCL_STATUS:
9570 		return "tcl_status";
9571 	case SW2WBM_RELEASE:
9572 		return "sw2wbm_release";
9573 	case RXDMA_BUF:
9574 		return "Rxdma_buf";
9575 	case RXDMA_DST:
9576 		return "Rxdma_dst";
9577 	case RXDMA_MONITOR_BUF:
9578 		return "Rxdma_monitor_buf";
9579 	case RXDMA_MONITOR_DESC:
9580 		return "Rxdma_monitor_desc";
9581 	case RXDMA_MONITOR_STATUS:
9582 		return "Rxdma_monitor_status";
9583 	case RXDMA_MONITOR_DST:
9584 		return "Rxdma_monitor_destination";
9585 	case WBM_IDLE_LINK:
9586 		return "WBM_hw_idle_link";
9587 	case PPE2TCL:
9588 		return "PPE2TCL";
9589 	case REO2PPE:
9590 		return "REO2PPE";
9591 	default:
9592 		dp_err("Invalid ring type");
9593 		break;
9594 	}
9595 	return "Invalid";
9596 }
9597 
9598 /*
9599  * dp_print_napi_stats(): NAPI stats
9600  * @soc - soc handle
9601  */
9602 void dp_print_napi_stats(struct dp_soc *soc)
9603 {
9604 	hif_print_napi_stats(soc->hif_handle);
9605 }
9606 
9607 /**
9608  * dp_txrx_host_peer_stats_clr): Reinitialize the txrx peer stats
9609  * @soc: Datapath soc
9610  * @peer: Datatpath peer
9611  * @arg: argument to iter function
9612  *
9613  * Return: QDF_STATUS
9614  */
9615 static inline void
9616 dp_txrx_host_peer_stats_clr(struct dp_soc *soc,
9617 			    struct dp_peer *peer,
9618 			    void *arg)
9619 {
9620 	struct dp_txrx_peer *txrx_peer = NULL;
9621 	struct dp_peer *tgt_peer = NULL;
9622 	struct cdp_interface_peer_stats peer_stats_intf;
9623 
9624 	qdf_mem_zero(&peer_stats_intf, sizeof(struct cdp_interface_peer_stats));
9625 
9626 	DP_STATS_CLR(peer);
9627 	/* Clear monitor peer stats */
9628 	dp_monitor_peer_reset_stats(soc, peer);
9629 
9630 	/* Clear MLD peer stats only when link peer is primary */
9631 	if (dp_peer_is_primary_link_peer(peer)) {
9632 		tgt_peer = dp_get_tgt_peer_from_peer(peer);
9633 		if (tgt_peer) {
9634 			DP_STATS_CLR(tgt_peer);
9635 			txrx_peer = tgt_peer->txrx_peer;
9636 			dp_txrx_peer_stats_clr(txrx_peer);
9637 		}
9638 	}
9639 
9640 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9641 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, peer->vdev->pdev->soc,
9642 			     &peer_stats_intf,  peer->peer_id,
9643 			     UPDATE_PEER_STATS, peer->vdev->pdev->pdev_id);
9644 #endif
9645 }
9646 
9647 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING
9648 static inline void dp_srng_clear_ring_usage_wm_stats(struct dp_soc *soc)
9649 {
9650 	int ring;
9651 
9652 	for (ring = 0; ring < soc->num_reo_dest_rings; ring++)
9653 		hal_srng_clear_ring_usage_wm_locked(soc->hal_soc,
9654 					    soc->reo_dest_ring[ring].hal_srng);
9655 }
9656 #else
9657 static inline void dp_srng_clear_ring_usage_wm_stats(struct dp_soc *soc)
9658 {
9659 }
9660 #endif
9661 
9662 /**
9663  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
9664  * @vdev: DP_VDEV handle
9665  * @dp_soc: DP_SOC handle
9666  *
9667  * Return: QDF_STATUS
9668  */
9669 static inline QDF_STATUS
9670 dp_txrx_host_stats_clr(struct dp_vdev *vdev, struct dp_soc *soc)
9671 {
9672 	if (!vdev || !vdev->pdev)
9673 		return QDF_STATUS_E_FAILURE;
9674 
9675 	/*
9676 	 * if NSS offload is enabled, then send message
9677 	 * to NSS FW to clear the stats. Once NSS FW clears the statistics
9678 	 * then clear host statistics.
9679 	 */
9680 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
9681 		if (soc->cdp_soc.ol_ops->nss_stats_clr)
9682 			soc->cdp_soc.ol_ops->nss_stats_clr(soc->ctrl_psoc,
9683 							   vdev->vdev_id);
9684 	}
9685 
9686 	dp_vdev_stats_hw_offload_target_clear(soc, vdev->pdev->pdev_id,
9687 					      (1 << vdev->vdev_id));
9688 
9689 	DP_STATS_CLR(vdev->pdev);
9690 	DP_STATS_CLR(vdev->pdev->soc);
9691 	DP_STATS_CLR(vdev);
9692 
9693 	hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
9694 
9695 	dp_vdev_iterate_peer(vdev, dp_txrx_host_peer_stats_clr, NULL,
9696 			     DP_MOD_ID_GENERIC_STATS);
9697 
9698 	dp_srng_clear_ring_usage_wm_stats(soc);
9699 
9700 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9701 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
9702 			     &vdev->stats,  vdev->vdev_id,
9703 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
9704 #endif
9705 	return QDF_STATUS_SUCCESS;
9706 }
9707 
9708 /**
9709  * dp_get_peer_calibr_stats()- Get peer calibrated stats
9710  * @peer: Datapath peer
9711  * @peer_stats: buffer for peer stats
9712  *
9713  * Return: none
9714  */
9715 static inline
9716 void dp_get_peer_calibr_stats(struct dp_peer *peer,
9717 			      struct cdp_peer_stats *peer_stats)
9718 {
9719 	struct dp_peer *tgt_peer;
9720 
9721 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
9722 	if (!tgt_peer)
9723 		return;
9724 
9725 	peer_stats->tx.last_per = tgt_peer->stats.tx.last_per;
9726 	peer_stats->tx.tx_bytes_success_last =
9727 				tgt_peer->stats.tx.tx_bytes_success_last;
9728 	peer_stats->tx.tx_data_success_last =
9729 					tgt_peer->stats.tx.tx_data_success_last;
9730 	peer_stats->tx.tx_byte_rate = tgt_peer->stats.tx.tx_byte_rate;
9731 	peer_stats->tx.tx_data_rate = tgt_peer->stats.tx.tx_data_rate;
9732 	peer_stats->tx.tx_data_ucast_last =
9733 					tgt_peer->stats.tx.tx_data_ucast_last;
9734 	peer_stats->tx.tx_data_ucast_rate =
9735 					tgt_peer->stats.tx.tx_data_ucast_rate;
9736 	peer_stats->tx.inactive_time = tgt_peer->stats.tx.inactive_time;
9737 	peer_stats->rx.rx_bytes_success_last =
9738 				tgt_peer->stats.rx.rx_bytes_success_last;
9739 	peer_stats->rx.rx_data_success_last =
9740 				tgt_peer->stats.rx.rx_data_success_last;
9741 	peer_stats->rx.rx_byte_rate = tgt_peer->stats.rx.rx_byte_rate;
9742 	peer_stats->rx.rx_data_rate = tgt_peer->stats.rx.rx_data_rate;
9743 }
9744 
9745 /**
9746  * dp_get_peer_basic_stats()- Get peer basic stats
9747  * @peer: Datapath peer
9748  * @peer_stats: buffer for peer stats
9749  *
9750  * Return: none
9751  */
9752 #ifdef QCA_ENHANCED_STATS_SUPPORT
9753 static inline
9754 void dp_get_peer_basic_stats(struct dp_peer *peer,
9755 			     struct cdp_peer_stats *peer_stats)
9756 {
9757 	struct dp_txrx_peer *txrx_peer;
9758 
9759 	txrx_peer = dp_get_txrx_peer(peer);
9760 	if (!txrx_peer)
9761 		return;
9762 
9763 	peer_stats->tx.comp_pkt.num += txrx_peer->comp_pkt.num;
9764 	peer_stats->tx.comp_pkt.bytes += txrx_peer->comp_pkt.bytes;
9765 	peer_stats->tx.tx_failed += txrx_peer->tx_failed;
9766 	peer_stats->rx.to_stack.num += txrx_peer->to_stack.num;
9767 	peer_stats->rx.to_stack.bytes += txrx_peer->to_stack.bytes;
9768 }
9769 #else
9770 static inline
9771 void dp_get_peer_basic_stats(struct dp_peer *peer,
9772 			     struct cdp_peer_stats *peer_stats)
9773 {
9774 	struct dp_txrx_peer *txrx_peer;
9775 
9776 	txrx_peer = peer->txrx_peer;
9777 	if (!txrx_peer)
9778 		return;
9779 
9780 	peer_stats->tx.comp_pkt.num += txrx_peer->comp_pkt.num;
9781 	peer_stats->tx.comp_pkt.bytes += txrx_peer->comp_pkt.bytes;
9782 	peer_stats->tx.tx_failed += txrx_peer->tx_failed;
9783 	peer_stats->rx.to_stack.num += txrx_peer->to_stack.num;
9784 	peer_stats->rx.to_stack.bytes += txrx_peer->to_stack.bytes;
9785 }
9786 #endif
9787 
9788 /**
9789  * dp_get_peer_per_pkt_stats()- Get peer per pkt stats
9790  * @peer: Datapath peer
9791  * @peer_stats: buffer for peer stats
9792  *
9793  * Return: none
9794  */
9795 #ifdef QCA_ENHANCED_STATS_SUPPORT
9796 static inline
9797 void dp_get_peer_per_pkt_stats(struct dp_peer *peer,
9798 			       struct cdp_peer_stats *peer_stats)
9799 {
9800 	struct dp_txrx_peer *txrx_peer;
9801 	struct dp_peer_per_pkt_stats *per_pkt_stats;
9802 
9803 	txrx_peer = dp_get_txrx_peer(peer);
9804 	if (!txrx_peer)
9805 		return;
9806 
9807 	per_pkt_stats = &txrx_peer->stats.per_pkt_stats;
9808 	DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats);
9809 }
9810 #else
9811 static inline
9812 void dp_get_peer_per_pkt_stats(struct dp_peer *peer,
9813 			       struct cdp_peer_stats *peer_stats)
9814 {
9815 	struct dp_txrx_peer *txrx_peer;
9816 	struct dp_peer_per_pkt_stats *per_pkt_stats;
9817 
9818 	txrx_peer = peer->txrx_peer;
9819 	if (!txrx_peer)
9820 		return;
9821 
9822 	per_pkt_stats = &txrx_peer->stats.per_pkt_stats;
9823 	DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats);
9824 }
9825 #endif
9826 
9827 /**
9828  * dp_get_peer_extd_stats()- Get peer extd stats
9829  * @peer: Datapath peer
9830  * @peer_stats: buffer for peer stats
9831  *
9832  * Return: none
9833  */
9834 #ifdef QCA_ENHANCED_STATS_SUPPORT
9835 #ifdef WLAN_FEATURE_11BE_MLO
9836 static inline
9837 void dp_get_peer_extd_stats(struct dp_peer *peer,
9838 			    struct cdp_peer_stats *peer_stats)
9839 {
9840 	struct dp_soc *soc = peer->vdev->pdev->soc;
9841 
9842 	if (IS_MLO_DP_MLD_PEER(peer)) {
9843 		uint8_t i;
9844 		struct dp_peer *link_peer;
9845 		struct dp_soc *link_peer_soc;
9846 		struct dp_mld_link_peers link_peers_info;
9847 
9848 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
9849 						    &link_peers_info,
9850 						    DP_MOD_ID_CDP);
9851 		for (i = 0; i < link_peers_info.num_links; i++) {
9852 			link_peer = link_peers_info.link_peers[i];
9853 			link_peer_soc = link_peer->vdev->pdev->soc;
9854 			dp_monitor_peer_get_stats(link_peer_soc, link_peer,
9855 						  peer_stats,
9856 						  UPDATE_PEER_STATS);
9857 		}
9858 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
9859 	} else {
9860 		dp_monitor_peer_get_stats(soc, peer, peer_stats,
9861 					  UPDATE_PEER_STATS);
9862 	}
9863 }
9864 #else
9865 static inline
9866 void dp_get_peer_extd_stats(struct dp_peer *peer,
9867 			    struct cdp_peer_stats *peer_stats)
9868 {
9869 	struct dp_soc *soc = peer->vdev->pdev->soc;
9870 
9871 	dp_monitor_peer_get_stats(soc, peer, peer_stats, UPDATE_PEER_STATS);
9872 }
9873 #endif
9874 #else
9875 static inline
9876 void dp_get_peer_extd_stats(struct dp_peer *peer,
9877 			    struct cdp_peer_stats *peer_stats)
9878 {
9879 	struct dp_txrx_peer *txrx_peer;
9880 	struct dp_peer_extd_stats *extd_stats;
9881 
9882 	txrx_peer = dp_get_txrx_peer(peer);
9883 	if (qdf_unlikely(!txrx_peer)) {
9884 		dp_err_rl("txrx_peer NULL");
9885 		return;
9886 	}
9887 
9888 	extd_stats = &txrx_peer->stats.extd_stats;
9889 	DP_UPDATE_EXTD_STATS(peer_stats, extd_stats);
9890 }
9891 #endif
9892 
9893 /**
9894  * dp_get_peer_tx_per()- Get peer packet error ratio
9895  * @peer_stats: buffer for peer stats
9896  *
9897  * Return: none
9898  */
9899 static inline
9900 void dp_get_peer_tx_per(struct cdp_peer_stats *peer_stats)
9901 {
9902 	if (peer_stats->tx.tx_success.num + peer_stats->tx.retries > 0)
9903 		peer_stats->tx.per = (peer_stats->tx.retries * 100) /
9904 				  (peer_stats->tx.tx_success.num +
9905 				   peer_stats->tx.retries);
9906 	else
9907 		peer_stats->tx.per = 0;
9908 }
9909 
9910 /**
9911  * dp_get_peer_stats()- Get peer stats
9912  * @peer: Datapath peer
9913  * @peer_stats: buffer for peer stats
9914  *
9915  * Return: none
9916  */
9917 static inline
9918 void dp_get_peer_stats(struct dp_peer *peer, struct cdp_peer_stats *peer_stats)
9919 {
9920 	dp_get_peer_calibr_stats(peer, peer_stats);
9921 
9922 	dp_get_peer_basic_stats(peer, peer_stats);
9923 
9924 	dp_get_peer_per_pkt_stats(peer, peer_stats);
9925 
9926 	dp_get_peer_extd_stats(peer, peer_stats);
9927 
9928 	dp_get_peer_tx_per(peer_stats);
9929 }
9930 
9931 /*
9932  * dp_get_host_peer_stats()- function to print peer stats
9933  * @soc: dp_soc handle
9934  * @mac_addr: mac address of the peer
9935  *
9936  * Return: QDF_STATUS
9937  */
9938 static QDF_STATUS
9939 dp_get_host_peer_stats(struct cdp_soc_t *soc, uint8_t *mac_addr)
9940 {
9941 	struct dp_peer *peer = NULL;
9942 	struct cdp_peer_stats *peer_stats = NULL;
9943 
9944 	if (!mac_addr) {
9945 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9946 			  "%s: NULL peer mac addr\n", __func__);
9947 		return QDF_STATUS_E_FAILURE;
9948 	}
9949 
9950 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
9951 				      mac_addr, 0,
9952 				      DP_VDEV_ALL,
9953 				      DP_MOD_ID_CDP);
9954 	if (!peer) {
9955 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9956 			  "%s: Invalid peer\n", __func__);
9957 		return QDF_STATUS_E_FAILURE;
9958 	}
9959 
9960 	peer_stats = qdf_mem_malloc(sizeof(struct cdp_peer_stats));
9961 	if (!peer_stats) {
9962 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9963 			  "%s: Memory allocation failed for cdp_peer_stats\n",
9964 			  __func__);
9965 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9966 		return QDF_STATUS_E_NOMEM;
9967 	}
9968 
9969 	qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats));
9970 
9971 	dp_get_peer_stats(peer, peer_stats);
9972 	dp_print_peer_stats(peer, peer_stats);
9973 
9974 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
9975 
9976 	qdf_mem_free(peer_stats);
9977 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9978 
9979 	return QDF_STATUS_SUCCESS;
9980 }
9981 
9982 /* *
9983  * dp_dump_wbm_idle_hptp() -dump wbm idle ring, hw hp tp info.
9984  * @soc: dp soc.
9985  * @pdev: dp pdev.
9986  *
9987  * Return: None.
9988  */
9989 static void
9990 dp_dump_wbm_idle_hptp(struct dp_soc *soc, struct dp_pdev *pdev)
9991 {
9992 	uint32_t hw_head;
9993 	uint32_t hw_tail;
9994 	struct dp_srng *srng;
9995 
9996 	if (!soc) {
9997 		dp_err("soc is NULL");
9998 		return;
9999 	}
10000 
10001 	if (!pdev) {
10002 		dp_err("pdev is NULL");
10003 		return;
10004 	}
10005 
10006 	srng = &pdev->soc->wbm_idle_link_ring;
10007 	if (!srng) {
10008 		dp_err("wbm_idle_link_ring srng is NULL");
10009 		return;
10010 	}
10011 
10012 	hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &hw_head,
10013 			&hw_tail, WBM_IDLE_LINK);
10014 
10015 	dp_debug("WBM_IDLE_LINK: HW hp: %d, HW tp: %d",
10016 			hw_head, hw_tail);
10017 }
10018 
10019 
10020 /**
10021  * dp_txrx_stats_help() - Helper function for Txrx_Stats
10022  *
10023  * Return: None
10024  */
10025 static void dp_txrx_stats_help(void)
10026 {
10027 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
10028 	dp_info("stats_option:");
10029 	dp_info("  1 -- HTT Tx Statistics");
10030 	dp_info("  2 -- HTT Rx Statistics");
10031 	dp_info("  3 -- HTT Tx HW Queue Statistics");
10032 	dp_info("  4 -- HTT Tx HW Sched Statistics");
10033 	dp_info("  5 -- HTT Error Statistics");
10034 	dp_info("  6 -- HTT TQM Statistics");
10035 	dp_info("  7 -- HTT TQM CMDQ Statistics");
10036 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
10037 	dp_info("  9 -- HTT Tx Rate Statistics");
10038 	dp_info(" 10 -- HTT Rx Rate Statistics");
10039 	dp_info(" 11 -- HTT Peer Statistics");
10040 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
10041 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
10042 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
10043 	dp_info(" 15 -- HTT SRNG Statistics");
10044 	dp_info(" 16 -- HTT SFM Info Statistics");
10045 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
10046 	dp_info(" 18 -- HTT Peer List Details");
10047 	dp_info(" 20 -- Clear Host Statistics");
10048 	dp_info(" 21 -- Host Rx Rate Statistics");
10049 	dp_info(" 22 -- Host Tx Rate Statistics");
10050 	dp_info(" 23 -- Host Tx Statistics");
10051 	dp_info(" 24 -- Host Rx Statistics");
10052 	dp_info(" 25 -- Host AST Statistics");
10053 	dp_info(" 26 -- Host SRNG PTR Statistics");
10054 	dp_info(" 27 -- Host Mon Statistics");
10055 	dp_info(" 28 -- Host REO Queue Statistics");
10056 	dp_info(" 29 -- Host Soc cfg param Statistics");
10057 	dp_info(" 30 -- Host pdev cfg param Statistics");
10058 	dp_info(" 31 -- Host NAPI stats");
10059 	dp_info(" 32 -- Host Interrupt stats");
10060 	dp_info(" 33 -- Host FISA stats");
10061 	dp_info(" 34 -- Host Register Work stats");
10062 	dp_info(" 35 -- HW REO Queue stats");
10063 	dp_info(" 36 -- Host WBM IDLE link desc ring HP/TP");
10064 	dp_info(" 37 -- Host SRNG usage watermark stats");
10065 }
10066 
10067 #ifdef DP_UMAC_HW_RESET_SUPPORT
10068 /**
10069  * dp_umac_rst_skel_enable_update(): Update skel dbg flag for umac reset
10070  * @soc: dp soc handle
10071  * @en: ebable/disable
10072  *
10073  * Return: void
10074  */
10075 static void dp_umac_rst_skel_enable_update(struct dp_soc *soc, bool en)
10076 {
10077 	soc->umac_reset_ctx.skel_enable = en;
10078 	dp_cdp_debug("UMAC HW reset debug skelton code enabled :%u",
10079 		     soc->umac_reset_ctx.skel_enable);
10080 }
10081 
10082 /**
10083  * dp_umac_rst_skel_enable_get(): Get skel dbg flag for umac reset
10084  * @soc: dp soc handle
10085  *
10086  * Return: enable/disable flag
10087  */
10088 static bool dp_umac_rst_skel_enable_get(struct dp_soc *soc)
10089 {
10090 	return soc->umac_reset_ctx.skel_enable;
10091 }
10092 #else
10093 static void dp_umac_rst_skel_enable_update(struct dp_soc *soc, bool en)
10094 {
10095 }
10096 
10097 static bool dp_umac_rst_skel_enable_get(struct dp_soc *soc)
10098 {
10099 	return false;
10100 }
10101 #endif
10102 
10103 /**
10104  * dp_print_host_stats()- Function to print the stats aggregated at host
10105  * @vdev_handle: DP_VDEV handle
10106  * @req: host stats type
10107  * @soc: dp soc handler
10108  *
10109  * Return: 0 on success, print error message in case of failure
10110  */
10111 static int
10112 dp_print_host_stats(struct dp_vdev *vdev,
10113 		    struct cdp_txrx_stats_req *req,
10114 		    struct dp_soc *soc)
10115 {
10116 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
10117 	enum cdp_host_txrx_stats type =
10118 			dp_stats_mapping_table[req->stats][STATS_HOST];
10119 
10120 	dp_aggregate_pdev_stats(pdev);
10121 
10122 	switch (type) {
10123 	case TXRX_CLEAR_STATS:
10124 		dp_txrx_host_stats_clr(vdev, soc);
10125 		break;
10126 	case TXRX_RX_RATE_STATS:
10127 		dp_print_rx_rates(vdev);
10128 		break;
10129 	case TXRX_TX_RATE_STATS:
10130 		dp_print_tx_rates(vdev);
10131 		break;
10132 	case TXRX_TX_HOST_STATS:
10133 		dp_print_pdev_tx_stats(pdev);
10134 		dp_print_soc_tx_stats(pdev->soc);
10135 		break;
10136 	case TXRX_RX_HOST_STATS:
10137 		dp_print_pdev_rx_stats(pdev);
10138 		dp_print_soc_rx_stats(pdev->soc);
10139 		break;
10140 	case TXRX_AST_STATS:
10141 		dp_print_ast_stats(pdev->soc);
10142 		dp_print_mec_stats(pdev->soc);
10143 		dp_print_peer_table(vdev);
10144 		break;
10145 	case TXRX_SRNG_PTR_STATS:
10146 		dp_print_ring_stats(pdev);
10147 		break;
10148 	case TXRX_RX_MON_STATS:
10149 		dp_monitor_print_pdev_rx_mon_stats(pdev);
10150 		break;
10151 	case TXRX_REO_QUEUE_STATS:
10152 		dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc,
10153 				       req->peer_addr);
10154 		break;
10155 	case TXRX_SOC_CFG_PARAMS:
10156 		dp_print_soc_cfg_params(pdev->soc);
10157 		break;
10158 	case TXRX_PDEV_CFG_PARAMS:
10159 		dp_print_pdev_cfg_params(pdev);
10160 		break;
10161 	case TXRX_NAPI_STATS:
10162 		dp_print_napi_stats(pdev->soc);
10163 		break;
10164 	case TXRX_SOC_INTERRUPT_STATS:
10165 		dp_print_soc_interrupt_stats(pdev->soc);
10166 		break;
10167 	case TXRX_SOC_FSE_STATS:
10168 		dp_rx_dump_fisa_table(pdev->soc);
10169 		break;
10170 	case TXRX_HAL_REG_WRITE_STATS:
10171 		hal_dump_reg_write_stats(pdev->soc->hal_soc);
10172 		hal_dump_reg_write_srng_stats(pdev->soc->hal_soc);
10173 		break;
10174 	case TXRX_SOC_REO_HW_DESC_DUMP:
10175 		dp_get_rx_reo_queue_info((struct cdp_soc_t *)pdev->soc,
10176 					 vdev->vdev_id);
10177 		break;
10178 	case TXRX_SOC_WBM_IDLE_HPTP_DUMP:
10179 		dp_dump_wbm_idle_hptp(pdev->soc, pdev);
10180 		break;
10181 	case TXRX_SRNG_USAGE_WM_STATS:
10182 		/* Dump usage watermark stats for all SRNGs */
10183 		dp_dump_srng_high_wm_stats(soc, 0xFF);
10184 		break;
10185 	default:
10186 		dp_info("Wrong Input For TxRx Host Stats");
10187 		dp_txrx_stats_help();
10188 		break;
10189 	}
10190 	return 0;
10191 }
10192 
10193 /*
10194  * dp_pdev_tid_stats_ingress_inc
10195  * @pdev: pdev handle
10196  * @val: increase in value
10197  *
10198  * Return: void
10199  */
10200 static void
10201 dp_pdev_tid_stats_ingress_inc(struct dp_pdev *pdev, uint32_t val)
10202 {
10203 	pdev->stats.tid_stats.ingress_stack += val;
10204 }
10205 
10206 /*
10207  * dp_pdev_tid_stats_osif_drop
10208  * @pdev: pdev handle
10209  * @val: increase in value
10210  *
10211  * Return: void
10212  */
10213 static void
10214 dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val)
10215 {
10216 	pdev->stats.tid_stats.osif_drop += val;
10217 }
10218 
10219 /*
10220  * dp_get_fw_peer_stats()- function to print peer stats
10221  * @soc: soc handle
10222  * @pdev_id : id of the pdev handle
10223  * @mac_addr: mac address of the peer
10224  * @cap: Type of htt stats requested
10225  * @is_wait: if set, wait on completion from firmware response
10226  *
10227  * Currently Supporting only MAC ID based requests Only
10228  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
10229  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
10230  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
10231  *
10232  * Return: QDF_STATUS
10233  */
10234 static QDF_STATUS
10235 dp_get_fw_peer_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
10236 		     uint8_t *mac_addr,
10237 		     uint32_t cap, uint32_t is_wait)
10238 {
10239 	int i;
10240 	uint32_t config_param0 = 0;
10241 	uint32_t config_param1 = 0;
10242 	uint32_t config_param2 = 0;
10243 	uint32_t config_param3 = 0;
10244 	struct dp_pdev *pdev =
10245 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10246 						   pdev_id);
10247 
10248 	if (!pdev)
10249 		return QDF_STATUS_E_FAILURE;
10250 
10251 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
10252 	config_param0 |= (1 << (cap + 1));
10253 
10254 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
10255 		config_param1 |= (1 << i);
10256 	}
10257 
10258 	config_param2 |= (mac_addr[0] & 0x000000ff);
10259 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
10260 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
10261 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
10262 
10263 	config_param3 |= (mac_addr[4] & 0x000000ff);
10264 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
10265 
10266 	if (is_wait) {
10267 		qdf_event_reset(&pdev->fw_peer_stats_event);
10268 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
10269 					  config_param0, config_param1,
10270 					  config_param2, config_param3,
10271 					  0, DBG_STATS_COOKIE_DP_STATS, 0);
10272 		qdf_wait_single_event(&pdev->fw_peer_stats_event,
10273 				      DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
10274 	} else {
10275 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
10276 					  config_param0, config_param1,
10277 					  config_param2, config_param3,
10278 					  0, DBG_STATS_COOKIE_DEFAULT, 0);
10279 	}
10280 
10281 	return QDF_STATUS_SUCCESS;
10282 
10283 }
10284 
10285 /* This struct definition will be removed from here
10286  * once it get added in FW headers*/
10287 struct httstats_cmd_req {
10288     uint32_t    config_param0;
10289     uint32_t    config_param1;
10290     uint32_t    config_param2;
10291     uint32_t    config_param3;
10292     int cookie;
10293     u_int8_t    stats_id;
10294 };
10295 
10296 /*
10297  * dp_get_htt_stats: function to process the httstas request
10298  * @soc: DP soc handle
10299  * @pdev_id: id of pdev handle
10300  * @data: pointer to request data
10301  * @data_len: length for request data
10302  *
10303  * return: QDF_STATUS
10304  */
10305 static QDF_STATUS
10306 dp_get_htt_stats(struct cdp_soc_t *soc, uint8_t pdev_id, void *data,
10307 		 uint32_t data_len)
10308 {
10309 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
10310 	struct dp_pdev *pdev =
10311 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10312 						   pdev_id);
10313 
10314 	if (!pdev)
10315 		return QDF_STATUS_E_FAILURE;
10316 
10317 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
10318 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
10319 				req->config_param0, req->config_param1,
10320 				req->config_param2, req->config_param3,
10321 				req->cookie, DBG_STATS_COOKIE_DEFAULT, 0);
10322 
10323 	return QDF_STATUS_SUCCESS;
10324 }
10325 
10326 /**
10327  * dp_set_pdev_tidmap_prty_wifi3(): update tidmap priority in pdev
10328  * @pdev: DP_PDEV handle
10329  * @prio: tidmap priority value passed by the user
10330  *
10331  * Return: QDF_STATUS_SUCCESS on success
10332  */
10333 static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct dp_pdev *pdev,
10334 						uint8_t prio)
10335 {
10336 	struct dp_soc *soc = pdev->soc;
10337 
10338 	soc->tidmap_prty = prio;
10339 
10340 	hal_tx_set_tidmap_prty(soc->hal_soc, prio);
10341 	return QDF_STATUS_SUCCESS;
10342 }
10343 
10344 /*
10345  * dp_get_peer_param: function to get parameters in peer
10346  * @cdp_soc: DP soc handle
10347  * @vdev_id: id of vdev handle
10348  * @peer_mac: peer mac address
10349  * @param: parameter type to be set
10350  * @val : address of buffer
10351  *
10352  * Return: val
10353  */
10354 static QDF_STATUS dp_get_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
10355 				    uint8_t *peer_mac,
10356 				    enum cdp_peer_param_type param,
10357 				    cdp_config_param_type *val)
10358 {
10359 	return QDF_STATUS_SUCCESS;
10360 }
10361 
10362 /*
10363  * dp_set_peer_param: function to set parameters in peer
10364  * @cdp_soc: DP soc handle
10365  * @vdev_id: id of vdev handle
10366  * @peer_mac: peer mac address
10367  * @param: parameter type to be set
10368  * @val: value of parameter to be set
10369  *
10370  * Return: 0 for success. nonzero for failure.
10371  */
10372 static QDF_STATUS dp_set_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
10373 				    uint8_t *peer_mac,
10374 				    enum cdp_peer_param_type param,
10375 				    cdp_config_param_type val)
10376 {
10377 	struct dp_peer *peer =
10378 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
10379 						       peer_mac, 0, vdev_id,
10380 						       DP_MOD_ID_CDP);
10381 	struct dp_txrx_peer *txrx_peer;
10382 
10383 	if (!peer)
10384 		return QDF_STATUS_E_FAILURE;
10385 
10386 	txrx_peer = peer->txrx_peer;
10387 	if (!txrx_peer) {
10388 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10389 		return QDF_STATUS_E_FAILURE;
10390 	}
10391 
10392 	switch (param) {
10393 	case CDP_CONFIG_NAWDS:
10394 		txrx_peer->nawds_enabled = val.cdp_peer_param_nawds;
10395 		break;
10396 	case CDP_CONFIG_ISOLATION:
10397 		dp_set_peer_isolation(txrx_peer, val.cdp_peer_param_isolation);
10398 		break;
10399 	case CDP_CONFIG_IN_TWT:
10400 		txrx_peer->in_twt = !!(val.cdp_peer_param_in_twt);
10401 		break;
10402 	default:
10403 		break;
10404 	}
10405 
10406 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10407 
10408 	return QDF_STATUS_SUCCESS;
10409 }
10410 
10411 /*
10412  * dp_get_pdev_param: function to get parameters from pdev
10413  * @cdp_soc: DP soc handle
10414  * @pdev_id: id of pdev handle
10415  * @param: parameter type to be get
10416  * @value : buffer for value
10417  *
10418  * Return: status
10419  */
10420 static QDF_STATUS dp_get_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
10421 				    enum cdp_pdev_param_type param,
10422 				    cdp_config_param_type *val)
10423 {
10424 	struct cdp_pdev *pdev = (struct cdp_pdev *)
10425 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10426 						   pdev_id);
10427 	if (!pdev)
10428 		return QDF_STATUS_E_FAILURE;
10429 
10430 	switch (param) {
10431 	case CDP_CONFIG_VOW:
10432 		val->cdp_pdev_param_cfg_vow =
10433 				((struct dp_pdev *)pdev)->delay_stats_flag;
10434 		break;
10435 	case CDP_TX_PENDING:
10436 		val->cdp_pdev_param_tx_pending = dp_get_tx_pending(pdev);
10437 		break;
10438 	case CDP_FILTER_MCAST_DATA:
10439 		val->cdp_pdev_param_fltr_mcast =
10440 				dp_monitor_pdev_get_filter_mcast_data(pdev);
10441 		break;
10442 	case CDP_FILTER_NO_DATA:
10443 		val->cdp_pdev_param_fltr_none =
10444 				dp_monitor_pdev_get_filter_non_data(pdev);
10445 		break;
10446 	case CDP_FILTER_UCAST_DATA:
10447 		val->cdp_pdev_param_fltr_ucast =
10448 				dp_monitor_pdev_get_filter_ucast_data(pdev);
10449 		break;
10450 	case CDP_MONITOR_CHANNEL:
10451 		val->cdp_pdev_param_monitor_chan =
10452 			dp_monitor_get_chan_num((struct dp_pdev *)pdev);
10453 		break;
10454 	case CDP_MONITOR_FREQUENCY:
10455 		val->cdp_pdev_param_mon_freq =
10456 			dp_monitor_get_chan_freq((struct dp_pdev *)pdev);
10457 		break;
10458 	default:
10459 		return QDF_STATUS_E_FAILURE;
10460 	}
10461 
10462 	return QDF_STATUS_SUCCESS;
10463 }
10464 
10465 /*
10466  * dp_set_pdev_param: function to set parameters in pdev
10467  * @cdp_soc: DP soc handle
10468  * @pdev_id: id of pdev handle
10469  * @param: parameter type to be set
10470  * @val: value of parameter to be set
10471  *
10472  * Return: 0 for success. nonzero for failure.
10473  */
10474 static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
10475 				    enum cdp_pdev_param_type param,
10476 				    cdp_config_param_type val)
10477 {
10478 	int target_type;
10479 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
10480 	struct dp_pdev *pdev =
10481 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10482 						   pdev_id);
10483 	enum reg_wifi_band chan_band;
10484 
10485 	if (!pdev)
10486 		return QDF_STATUS_E_FAILURE;
10487 
10488 	target_type = hal_get_target_type(soc->hal_soc);
10489 	switch (target_type) {
10490 	case TARGET_TYPE_QCA6750:
10491 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC0_LMAC_ID;
10492 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
10493 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
10494 		break;
10495 	case TARGET_TYPE_KIWI:
10496 	case TARGET_TYPE_MANGO:
10497 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC0_LMAC_ID;
10498 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
10499 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
10500 		break;
10501 	default:
10502 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC1_LMAC_ID;
10503 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
10504 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
10505 		break;
10506 	}
10507 
10508 	switch (param) {
10509 	case CDP_CONFIG_TX_CAPTURE:
10510 		return dp_monitor_config_debug_sniffer(pdev,
10511 						val.cdp_pdev_param_tx_capture);
10512 	case CDP_CONFIG_DEBUG_SNIFFER:
10513 		return dp_monitor_config_debug_sniffer(pdev,
10514 						val.cdp_pdev_param_dbg_snf);
10515 	case CDP_CONFIG_BPR_ENABLE:
10516 		return dp_monitor_set_bpr_enable(pdev,
10517 						 val.cdp_pdev_param_bpr_enable);
10518 	case CDP_CONFIG_PRIMARY_RADIO:
10519 		pdev->is_primary = val.cdp_pdev_param_primary_radio;
10520 		break;
10521 	case CDP_CONFIG_CAPTURE_LATENCY:
10522 		pdev->latency_capture_enable = val.cdp_pdev_param_cptr_latcy;
10523 		break;
10524 	case CDP_INGRESS_STATS:
10525 		dp_pdev_tid_stats_ingress_inc(pdev,
10526 					      val.cdp_pdev_param_ingrs_stats);
10527 		break;
10528 	case CDP_OSIF_DROP:
10529 		dp_pdev_tid_stats_osif_drop(pdev,
10530 					    val.cdp_pdev_param_osif_drop);
10531 		break;
10532 	case CDP_CONFIG_ENH_RX_CAPTURE:
10533 		return dp_monitor_config_enh_rx_capture(pdev,
10534 						val.cdp_pdev_param_en_rx_cap);
10535 	case CDP_CONFIG_ENH_TX_CAPTURE:
10536 		return dp_monitor_config_enh_tx_capture(pdev,
10537 						val.cdp_pdev_param_en_tx_cap);
10538 	case CDP_CONFIG_HMMC_TID_OVERRIDE:
10539 		pdev->hmmc_tid_override_en = val.cdp_pdev_param_hmmc_tid_ovrd;
10540 		break;
10541 	case CDP_CONFIG_HMMC_TID_VALUE:
10542 		pdev->hmmc_tid = val.cdp_pdev_param_hmmc_tid;
10543 		break;
10544 	case CDP_CHAN_NOISE_FLOOR:
10545 		pdev->chan_noise_floor = val.cdp_pdev_param_chn_noise_flr;
10546 		break;
10547 	case CDP_TIDMAP_PRTY:
10548 		dp_set_pdev_tidmap_prty_wifi3(pdev,
10549 					      val.cdp_pdev_param_tidmap_prty);
10550 		break;
10551 	case CDP_FILTER_NEIGH_PEERS:
10552 		dp_monitor_set_filter_neigh_peers(pdev,
10553 					val.cdp_pdev_param_fltr_neigh_peers);
10554 		break;
10555 	case CDP_MONITOR_CHANNEL:
10556 		dp_monitor_set_chan_num(pdev, val.cdp_pdev_param_monitor_chan);
10557 		break;
10558 	case CDP_MONITOR_FREQUENCY:
10559 		chan_band = wlan_reg_freq_to_band(val.cdp_pdev_param_mon_freq);
10560 		dp_monitor_set_chan_freq(pdev, val.cdp_pdev_param_mon_freq);
10561 		dp_monitor_set_chan_band(pdev, chan_band);
10562 		break;
10563 	case CDP_CONFIG_BSS_COLOR:
10564 		dp_monitor_set_bsscolor(pdev, val.cdp_pdev_param_bss_color);
10565 		break;
10566 	case CDP_SET_ATF_STATS_ENABLE:
10567 		dp_monitor_set_atf_stats_enable(pdev,
10568 					val.cdp_pdev_param_atf_stats_enable);
10569 		break;
10570 	case CDP_CONFIG_SPECIAL_VAP:
10571 		dp_monitor_pdev_config_scan_spcl_vap(pdev,
10572 					val.cdp_pdev_param_config_special_vap);
10573 		dp_monitor_vdev_set_monitor_mode_buf_rings(pdev);
10574 		break;
10575 	case CDP_RESET_SCAN_SPCL_VAP_STATS_ENABLE:
10576 		dp_monitor_pdev_reset_scan_spcl_vap_stats_enable(pdev,
10577 				val.cdp_pdev_param_reset_scan_spcl_vap_stats_enable);
10578 		break;
10579 	case CDP_CONFIG_ENHANCED_STATS_ENABLE:
10580 		pdev->enhanced_stats_en = val.cdp_pdev_param_enhanced_stats_enable;
10581 		break;
10582 	case CDP_ISOLATION:
10583 		pdev->isolation = val.cdp_pdev_param_isolation;
10584 		break;
10585 	case CDP_CONFIG_UNDECODED_METADATA_CAPTURE_ENABLE:
10586 		return dp_monitor_config_undecoded_metadata_capture(pdev,
10587 				val.cdp_pdev_param_undecoded_metadata_enable);
10588 		break;
10589 	default:
10590 		return QDF_STATUS_E_INVAL;
10591 	}
10592 	return QDF_STATUS_SUCCESS;
10593 }
10594 
10595 #ifdef QCA_UNDECODED_METADATA_SUPPORT
10596 static
10597 QDF_STATUS dp_set_pdev_phyrx_error_mask(struct cdp_soc_t *cdp_soc,
10598 					uint8_t pdev_id, uint32_t mask,
10599 					uint32_t mask_cont)
10600 {
10601 	struct dp_pdev *pdev =
10602 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10603 						   pdev_id);
10604 
10605 	if (!pdev)
10606 		return QDF_STATUS_E_FAILURE;
10607 
10608 	return dp_monitor_config_undecoded_metadata_phyrx_error_mask(pdev,
10609 				mask, mask_cont);
10610 }
10611 
10612 static
10613 QDF_STATUS dp_get_pdev_phyrx_error_mask(struct cdp_soc_t *cdp_soc,
10614 					uint8_t pdev_id, uint32_t *mask,
10615 					uint32_t *mask_cont)
10616 {
10617 	struct dp_pdev *pdev =
10618 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10619 						   pdev_id);
10620 
10621 	if (!pdev)
10622 		return QDF_STATUS_E_FAILURE;
10623 
10624 	return dp_monitor_get_undecoded_metadata_phyrx_error_mask(pdev,
10625 				mask, mask_cont);
10626 }
10627 #endif
10628 
10629 #ifdef QCA_PEER_EXT_STATS
10630 static void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
10631 					  qdf_nbuf_t nbuf)
10632 {
10633 	struct dp_peer *peer = NULL;
10634 	uint16_t peer_id, ring_id;
10635 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
10636 	struct dp_peer_delay_stats *delay_stats = NULL;
10637 
10638 	peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
10639 	if (peer_id > soc->max_peer_id)
10640 		return;
10641 
10642 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
10643 	if (qdf_unlikely(!peer))
10644 		return;
10645 
10646 	if (qdf_unlikely(!peer->txrx_peer)) {
10647 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10648 		return;
10649 	}
10650 
10651 	if (qdf_likely(peer->txrx_peer->delay_stats)) {
10652 		delay_stats = peer->txrx_peer->delay_stats;
10653 		ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
10654 		dp_rx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
10655 					nbuf);
10656 	}
10657 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10658 }
10659 #else
10660 static inline void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
10661 						 qdf_nbuf_t nbuf)
10662 {
10663 }
10664 #endif
10665 
10666 /*
10667  * dp_calculate_delay_stats: function to get rx delay stats
10668  * @cdp_soc: DP soc handle
10669  * @vdev_id: id of DP vdev handle
10670  * @nbuf: skb
10671  *
10672  * Return: QDF_STATUS
10673  */
10674 static QDF_STATUS
10675 dp_calculate_delay_stats(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
10676 			 qdf_nbuf_t nbuf)
10677 {
10678 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
10679 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10680 						     DP_MOD_ID_CDP);
10681 
10682 	if (!vdev)
10683 		return QDF_STATUS_SUCCESS;
10684 
10685 	if (vdev->pdev->delay_stats_flag)
10686 		dp_rx_compute_delay(vdev, nbuf);
10687 	else
10688 		dp_rx_update_peer_delay_stats(soc, nbuf);
10689 
10690 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10691 	return QDF_STATUS_SUCCESS;
10692 }
10693 
10694 /*
10695  * dp_get_vdev_param: function to get parameters from vdev
10696  * @cdp_soc : DP soc handle
10697  * @vdev_id: id of DP vdev handle
10698  * @param: parameter type to get value
10699  * @val: buffer address
10700  *
10701  * return: status
10702  */
10703 static QDF_STATUS dp_get_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
10704 				    enum cdp_vdev_param_type param,
10705 				    cdp_config_param_type *val)
10706 {
10707 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
10708 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10709 						     DP_MOD_ID_CDP);
10710 
10711 	if (!vdev)
10712 		return QDF_STATUS_E_FAILURE;
10713 
10714 	switch (param) {
10715 	case CDP_ENABLE_WDS:
10716 		val->cdp_vdev_param_wds = vdev->wds_enabled;
10717 		break;
10718 	case CDP_ENABLE_MEC:
10719 		val->cdp_vdev_param_mec = vdev->mec_enabled;
10720 		break;
10721 	case CDP_ENABLE_DA_WAR:
10722 		val->cdp_vdev_param_da_war = vdev->pdev->soc->da_war_enabled;
10723 		break;
10724 	case CDP_ENABLE_IGMP_MCAST_EN:
10725 		val->cdp_vdev_param_igmp_mcast_en = vdev->igmp_mcast_enhanc_en;
10726 		break;
10727 	case CDP_ENABLE_MCAST_EN:
10728 		val->cdp_vdev_param_mcast_en = vdev->mcast_enhancement_en;
10729 		break;
10730 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
10731 		val->cdp_vdev_param_hlos_tid_override =
10732 			    dp_vdev_get_hlos_tid_override((struct cdp_vdev *)vdev);
10733 		break;
10734 	case CDP_ENABLE_PEER_AUTHORIZE:
10735 		val->cdp_vdev_param_peer_authorize =
10736 			    vdev->peer_authorize;
10737 		break;
10738 	case CDP_TX_ENCAP_TYPE:
10739 		val->cdp_vdev_param_tx_encap = vdev->tx_encap_type;
10740 		break;
10741 	case CDP_ENABLE_CIPHER:
10742 		val->cdp_vdev_param_cipher_en = vdev->sec_type;
10743 		break;
10744 #ifdef WLAN_SUPPORT_MESH_LATENCY
10745 	case CDP_ENABLE_PEER_TID_LATENCY:
10746 		val->cdp_vdev_param_peer_tid_latency_enable =
10747 			vdev->peer_tid_latency_enabled;
10748 		break;
10749 	case CDP_SET_VAP_MESH_TID:
10750 		val->cdp_vdev_param_mesh_tid =
10751 				vdev->mesh_tid_latency_config.latency_tid;
10752 		break;
10753 #endif
10754 	case CDP_DROP_3ADDR_MCAST:
10755 		val->cdp_drop_3addr_mcast = vdev->drop_3addr_mcast;
10756 		break;
10757 	default:
10758 		dp_cdp_err("%pK: param value %d is wrong",
10759 			   soc, param);
10760 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10761 		return QDF_STATUS_E_FAILURE;
10762 	}
10763 
10764 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10765 	return QDF_STATUS_SUCCESS;
10766 }
10767 
10768 /*
10769  * dp_set_vdev_param: function to set parameters in vdev
10770  * @cdp_soc : DP soc handle
10771  * @vdev_id: id of DP vdev handle
10772  * @param: parameter type to get value
10773  * @val: value
10774  *
10775  * return: QDF_STATUS
10776  */
10777 static QDF_STATUS
10778 dp_set_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
10779 		  enum cdp_vdev_param_type param, cdp_config_param_type val)
10780 {
10781 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
10782 	struct dp_vdev *vdev =
10783 		dp_vdev_get_ref_by_id(dsoc, vdev_id, DP_MOD_ID_CDP);
10784 	uint32_t var = 0;
10785 
10786 	if (!vdev)
10787 		return QDF_STATUS_E_FAILURE;
10788 
10789 	switch (param) {
10790 	case CDP_ENABLE_WDS:
10791 		dp_cdp_err("%pK: wds_enable %d for vdev(%pK) id(%d)\n",
10792 			   dsoc, val.cdp_vdev_param_wds, vdev, vdev->vdev_id);
10793 		vdev->wds_enabled = val.cdp_vdev_param_wds;
10794 		break;
10795 	case CDP_ENABLE_MEC:
10796 		dp_cdp_err("%pK: mec_enable %d for vdev(%pK) id(%d)\n",
10797 			   dsoc, val.cdp_vdev_param_mec, vdev, vdev->vdev_id);
10798 		vdev->mec_enabled = val.cdp_vdev_param_mec;
10799 		break;
10800 	case CDP_ENABLE_DA_WAR:
10801 		dp_cdp_err("%pK: da_war_enable %d for vdev(%pK) id(%d)\n",
10802 			   dsoc, val.cdp_vdev_param_da_war, vdev, vdev->vdev_id);
10803 		vdev->pdev->soc->da_war_enabled = val.cdp_vdev_param_da_war;
10804 		dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
10805 					     vdev->pdev->soc));
10806 		break;
10807 	case CDP_ENABLE_NAWDS:
10808 		vdev->nawds_enabled = val.cdp_vdev_param_nawds;
10809 		break;
10810 	case CDP_ENABLE_MCAST_EN:
10811 		vdev->mcast_enhancement_en = val.cdp_vdev_param_mcast_en;
10812 		break;
10813 	case CDP_ENABLE_IGMP_MCAST_EN:
10814 		vdev->igmp_mcast_enhanc_en = val.cdp_vdev_param_igmp_mcast_en;
10815 		break;
10816 	case CDP_ENABLE_PROXYSTA:
10817 		vdev->proxysta_vdev = val.cdp_vdev_param_proxysta;
10818 		break;
10819 	case CDP_UPDATE_TDLS_FLAGS:
10820 		vdev->tdls_link_connected = val.cdp_vdev_param_tdls_flags;
10821 		break;
10822 	case CDP_CFG_WDS_AGING_TIMER:
10823 		var = val.cdp_vdev_param_aging_tmr;
10824 		if (!var)
10825 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
10826 		else if (var != vdev->wds_aging_timer_val)
10827 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, var);
10828 
10829 		vdev->wds_aging_timer_val = var;
10830 		break;
10831 	case CDP_ENABLE_AP_BRIDGE:
10832 		if (wlan_op_mode_sta != vdev->opmode)
10833 			vdev->ap_bridge_enabled = val.cdp_vdev_param_ap_brdg_en;
10834 		else
10835 			vdev->ap_bridge_enabled = false;
10836 		break;
10837 	case CDP_ENABLE_CIPHER:
10838 		vdev->sec_type = val.cdp_vdev_param_cipher_en;
10839 		break;
10840 	case CDP_ENABLE_QWRAP_ISOLATION:
10841 		vdev->isolation_vdev = val.cdp_vdev_param_qwrap_isolation;
10842 		break;
10843 	case CDP_UPDATE_MULTIPASS:
10844 		vdev->multipass_en = val.cdp_vdev_param_update_multipass;
10845 		break;
10846 	case CDP_TX_ENCAP_TYPE:
10847 		vdev->tx_encap_type = val.cdp_vdev_param_tx_encap;
10848 		break;
10849 	case CDP_RX_DECAP_TYPE:
10850 		vdev->rx_decap_type = val.cdp_vdev_param_rx_decap;
10851 		break;
10852 	case CDP_TID_VDEV_PRTY:
10853 		vdev->tidmap_prty = val.cdp_vdev_param_tidmap_prty;
10854 		break;
10855 	case CDP_TIDMAP_TBL_ID:
10856 		vdev->tidmap_tbl_id = val.cdp_vdev_param_tidmap_tbl_id;
10857 		break;
10858 #ifdef MESH_MODE_SUPPORT
10859 	case CDP_MESH_RX_FILTER:
10860 		dp_vdev_set_mesh_rx_filter((struct cdp_vdev *)vdev,
10861 					   val.cdp_vdev_param_mesh_rx_filter);
10862 		break;
10863 	case CDP_MESH_MODE:
10864 		dp_vdev_set_mesh_mode((struct cdp_vdev *)vdev,
10865 				      val.cdp_vdev_param_mesh_mode);
10866 		break;
10867 #endif
10868 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
10869 		dp_info("vdev_id %d enable hlod tid override %d", vdev_id,
10870 			val.cdp_vdev_param_hlos_tid_override);
10871 		dp_vdev_set_hlos_tid_override(vdev,
10872 				val.cdp_vdev_param_hlos_tid_override);
10873 		break;
10874 #ifdef QCA_SUPPORT_WDS_EXTENDED
10875 	case CDP_CFG_WDS_EXT:
10876 		if (vdev->opmode == wlan_op_mode_ap)
10877 			vdev->wds_ext_enabled = val.cdp_vdev_param_wds_ext;
10878 		break;
10879 #endif
10880 	case CDP_ENABLE_PEER_AUTHORIZE:
10881 		vdev->peer_authorize = val.cdp_vdev_param_peer_authorize;
10882 		break;
10883 #ifdef WLAN_SUPPORT_MESH_LATENCY
10884 	case CDP_ENABLE_PEER_TID_LATENCY:
10885 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
10886 			val.cdp_vdev_param_peer_tid_latency_enable);
10887 		vdev->peer_tid_latency_enabled =
10888 			val.cdp_vdev_param_peer_tid_latency_enable;
10889 		break;
10890 	case CDP_SET_VAP_MESH_TID:
10891 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
10892 			val.cdp_vdev_param_mesh_tid);
10893 		vdev->mesh_tid_latency_config.latency_tid
10894 				= val.cdp_vdev_param_mesh_tid;
10895 		break;
10896 #endif
10897 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
10898 	case CDP_SKIP_BAR_UPDATE_AP:
10899 		dp_info("vdev_id %d skip BAR update: %u", vdev_id,
10900 			val.cdp_skip_bar_update);
10901 		vdev->skip_bar_update = val.cdp_skip_bar_update;
10902 		vdev->skip_bar_update_last_ts = 0;
10903 		break;
10904 #endif
10905 	case CDP_DROP_3ADDR_MCAST:
10906 		dp_info("vdev_id %d drop 3 addr mcast :%d", vdev_id,
10907 			val.cdp_drop_3addr_mcast);
10908 		vdev->drop_3addr_mcast = val.cdp_drop_3addr_mcast;
10909 		break;
10910 	case CDP_ENABLE_WRAP:
10911 		vdev->wrap_vdev = val.cdp_vdev_param_wrap;
10912 		break;
10913 #ifdef DP_TRAFFIC_END_INDICATION
10914 	case CDP_ENABLE_TRAFFIC_END_INDICATION:
10915 		vdev->traffic_end_ind_en = val.cdp_vdev_param_traffic_end_ind;
10916 		break;
10917 #endif
10918 	default:
10919 		break;
10920 	}
10921 
10922 	dp_tx_vdev_update_search_flags((struct dp_vdev *)vdev);
10923 	dsoc->arch_ops.txrx_set_vdev_param(dsoc, vdev, param, val);
10924 
10925 	/* Update PDEV flags as VDEV flags are updated */
10926 	dp_pdev_update_fast_rx_flag(dsoc, vdev->pdev);
10927 	dp_vdev_unref_delete(dsoc, vdev, DP_MOD_ID_CDP);
10928 
10929 	return QDF_STATUS_SUCCESS;
10930 }
10931 
10932 /*
10933  * dp_set_psoc_param: function to set parameters in psoc
10934  * @cdp_soc : DP soc handle
10935  * @param: parameter type to be set
10936  * @val: value of parameter to be set
10937  *
10938  * return: QDF_STATUS
10939  */
10940 static QDF_STATUS
10941 dp_set_psoc_param(struct cdp_soc_t *cdp_soc,
10942 		  enum cdp_psoc_param_type param, cdp_config_param_type val)
10943 {
10944 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
10945 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->wlan_cfg_ctx;
10946 
10947 	switch (param) {
10948 	case CDP_ENABLE_RATE_STATS:
10949 		soc->peerstats_enabled = val.cdp_psoc_param_en_rate_stats;
10950 		break;
10951 	case CDP_SET_NSS_CFG:
10952 		wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx,
10953 					    val.cdp_psoc_param_en_nss_cfg);
10954 		/*
10955 		 * TODO: masked out based on the per offloaded radio
10956 		 */
10957 		switch (val.cdp_psoc_param_en_nss_cfg) {
10958 		case dp_nss_cfg_default:
10959 			break;
10960 		case dp_nss_cfg_first_radio:
10961 		/*
10962 		 * This configuration is valid for single band radio which
10963 		 * is also NSS offload.
10964 		 */
10965 		case dp_nss_cfg_dbdc:
10966 		case dp_nss_cfg_dbtc:
10967 			wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
10968 			wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
10969 			wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
10970 			wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
10971 			break;
10972 		default:
10973 			dp_cdp_err("%pK: Invalid offload config %d",
10974 				   soc, val.cdp_psoc_param_en_nss_cfg);
10975 		}
10976 
10977 			dp_cdp_err("%pK: nss-wifi<0> nss config is enabled"
10978 				   , soc);
10979 		break;
10980 	case CDP_SET_PREFERRED_HW_MODE:
10981 		soc->preferred_hw_mode = val.cdp_psoc_param_preferred_hw_mode;
10982 		break;
10983 	case CDP_IPA_ENABLE:
10984 		soc->wlan_cfg_ctx->ipa_enabled = val.cdp_ipa_enabled;
10985 		break;
10986 	case CDP_CFG_VDEV_STATS_HW_OFFLOAD:
10987 		wlan_cfg_set_vdev_stats_hw_offload_config(wlan_cfg_ctx,
10988 				val.cdp_psoc_param_vdev_stats_hw_offload);
10989 		break;
10990 	case CDP_SAWF_ENABLE:
10991 		wlan_cfg_set_sawf_config(wlan_cfg_ctx, val.cdp_sawf_enabled);
10992 		break;
10993 	case CDP_UMAC_RST_SKEL_ENABLE:
10994 		dp_umac_rst_skel_enable_update(soc, val.cdp_umac_rst_skel);
10995 		break;
10996 	default:
10997 		break;
10998 	}
10999 
11000 	return QDF_STATUS_SUCCESS;
11001 }
11002 
11003 /*
11004  * dp_get_psoc_param: function to get parameters in soc
11005  * @cdp_soc : DP soc handle
11006  * @param: parameter type to be set
11007  * @val: address of buffer
11008  *
11009  * return: status
11010  */
11011 static QDF_STATUS dp_get_psoc_param(struct cdp_soc_t *cdp_soc,
11012 				    enum cdp_psoc_param_type param,
11013 				    cdp_config_param_type *val)
11014 {
11015 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
11016 
11017 	if (!soc)
11018 		return QDF_STATUS_E_FAILURE;
11019 
11020 	switch (param) {
11021 	case CDP_CFG_PEER_EXT_STATS:
11022 		val->cdp_psoc_param_pext_stats =
11023 			wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx);
11024 		break;
11025 	case CDP_CFG_VDEV_STATS_HW_OFFLOAD:
11026 		val->cdp_psoc_param_vdev_stats_hw_offload =
11027 			wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx);
11028 		break;
11029 	case CDP_UMAC_RST_SKEL_ENABLE:
11030 		val->cdp_umac_rst_skel = dp_umac_rst_skel_enable_get(soc);
11031 		break;
11032 	default:
11033 		dp_warn("Invalid param");
11034 		break;
11035 	}
11036 
11037 	return QDF_STATUS_SUCCESS;
11038 }
11039 
11040 /*
11041  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
11042  * @soc: DP_SOC handle
11043  * @vdev_id: id of DP_VDEV handle
11044  * @map_id:ID of map that needs to be updated
11045  *
11046  * Return: QDF_STATUS
11047  */
11048 static QDF_STATUS dp_set_vdev_dscp_tid_map_wifi3(ol_txrx_soc_handle cdp_soc,
11049 						 uint8_t vdev_id,
11050 						 uint8_t map_id)
11051 {
11052 	cdp_config_param_type val;
11053 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
11054 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11055 						     DP_MOD_ID_CDP);
11056 	if (vdev) {
11057 		vdev->dscp_tid_map_id = map_id;
11058 		val.cdp_vdev_param_dscp_tid_map_id = map_id;
11059 		soc->arch_ops.txrx_set_vdev_param(soc,
11060 						  vdev,
11061 						  CDP_UPDATE_DSCP_TO_TID_MAP,
11062 						  val);
11063 		/* Updatr flag for transmit tid classification */
11064 		if (vdev->dscp_tid_map_id < soc->num_hw_dscp_tid_map)
11065 			vdev->skip_sw_tid_classification |=
11066 				DP_TX_HW_DSCP_TID_MAP_VALID;
11067 		else
11068 			vdev->skip_sw_tid_classification &=
11069 				~DP_TX_HW_DSCP_TID_MAP_VALID;
11070 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11071 		return QDF_STATUS_SUCCESS;
11072 	}
11073 
11074 	return QDF_STATUS_E_FAILURE;
11075 }
11076 
11077 #ifdef DP_RATETABLE_SUPPORT
11078 static int dp_txrx_get_ratekbps(int preamb, int mcs,
11079 				int htflag, int gintval)
11080 {
11081 	uint32_t rix;
11082 	uint16_t ratecode;
11083 	enum cdp_punctured_modes punc_mode = NO_PUNCTURE;
11084 
11085 	return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
11086 			       (uint8_t)preamb, 1, punc_mode,
11087 			       &rix, &ratecode);
11088 }
11089 #else
11090 static int dp_txrx_get_ratekbps(int preamb, int mcs,
11091 				int htflag, int gintval)
11092 {
11093 	return 0;
11094 }
11095 #endif
11096 
11097 /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
11098  * @soc: DP soc handle
11099  * @pdev_id: id of DP pdev handle
11100  * @pdev_stats: buffer to copy to
11101  *
11102  * return : status success/failure
11103  */
11104 static QDF_STATUS
11105 dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
11106 		       struct cdp_pdev_stats *pdev_stats)
11107 {
11108 	struct dp_pdev *pdev =
11109 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11110 						   pdev_id);
11111 	if (!pdev)
11112 		return QDF_STATUS_E_FAILURE;
11113 
11114 	dp_aggregate_pdev_stats(pdev);
11115 
11116 	qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats));
11117 	return QDF_STATUS_SUCCESS;
11118 }
11119 
11120 /* dp_txrx_update_vdev_me_stats(): Update vdev ME stats sent from CDP
11121  * @vdev: DP vdev handle
11122  * @buf: buffer containing specific stats structure
11123  *
11124  * Returns: void
11125  */
11126 static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev,
11127 					 void *buf)
11128 {
11129 	struct cdp_tx_ingress_stats *host_stats = NULL;
11130 
11131 	if (!buf) {
11132 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
11133 		return;
11134 	}
11135 	host_stats = (struct cdp_tx_ingress_stats *)buf;
11136 
11137 	DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt,
11138 			 host_stats->mcast_en.mcast_pkt.num,
11139 			 host_stats->mcast_en.mcast_pkt.bytes);
11140 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error,
11141 		     host_stats->mcast_en.dropped_map_error);
11142 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac,
11143 		     host_stats->mcast_en.dropped_self_mac);
11144 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail,
11145 		     host_stats->mcast_en.dropped_send_fail);
11146 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast,
11147 		     host_stats->mcast_en.ucast);
11148 	DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc,
11149 		     host_stats->mcast_en.fail_seg_alloc);
11150 	DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail,
11151 		     host_stats->mcast_en.clone_fail);
11152 }
11153 
11154 /* dp_txrx_update_vdev_igmp_me_stats(): Update vdev IGMP ME stats sent from CDP
11155  * @vdev: DP vdev handle
11156  * @buf: buffer containing specific stats structure
11157  *
11158  * Returns: void
11159  */
11160 static void dp_txrx_update_vdev_igmp_me_stats(struct dp_vdev *vdev,
11161 					      void *buf)
11162 {
11163 	struct cdp_tx_ingress_stats *host_stats = NULL;
11164 
11165 	if (!buf) {
11166 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
11167 		return;
11168 	}
11169 	host_stats = (struct cdp_tx_ingress_stats *)buf;
11170 
11171 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_rcvd,
11172 		     host_stats->igmp_mcast_en.igmp_rcvd);
11173 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_ucast_converted,
11174 		     host_stats->igmp_mcast_en.igmp_ucast_converted);
11175 }
11176 
11177 /* dp_txrx_update_vdev_host_stats(): Update stats sent through CDP
11178  * @soc: DP soc handle
11179  * @vdev_id: id of DP vdev handle
11180  * @buf: buffer containing specific stats structure
11181  * @stats_id: stats type
11182  *
11183  * Returns: QDF_STATUS
11184  */
11185 static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc_hdl,
11186 						 uint8_t vdev_id,
11187 						 void *buf,
11188 						 uint16_t stats_id)
11189 {
11190 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11191 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11192 						     DP_MOD_ID_CDP);
11193 
11194 	if (!vdev) {
11195 		dp_cdp_err("%pK: Invalid vdev handle", soc);
11196 		return QDF_STATUS_E_FAILURE;
11197 	}
11198 
11199 	switch (stats_id) {
11200 	case DP_VDEV_STATS_PKT_CNT_ONLY:
11201 		break;
11202 	case DP_VDEV_STATS_TX_ME:
11203 		dp_txrx_update_vdev_me_stats(vdev, buf);
11204 		dp_txrx_update_vdev_igmp_me_stats(vdev, buf);
11205 		break;
11206 	default:
11207 		qdf_info("Invalid stats_id %d", stats_id);
11208 		break;
11209 	}
11210 
11211 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11212 	return QDF_STATUS_SUCCESS;
11213 }
11214 
11215 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
11216  * @soc: soc handle
11217  * @vdev_id: id of vdev handle
11218  * @peer_mac: mac of DP_PEER handle
11219  * @peer_stats: buffer to copy to
11220  * return : status success/failure
11221  */
11222 static QDF_STATUS
11223 dp_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
11224 		       uint8_t *peer_mac, struct cdp_peer_stats *peer_stats)
11225 {
11226 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11227 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
11228 						       peer_mac, 0, vdev_id,
11229 						       DP_MOD_ID_CDP);
11230 
11231 	qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats));
11232 
11233 	if (!peer)
11234 		return QDF_STATUS_E_FAILURE;
11235 
11236 	dp_get_peer_stats(peer, peer_stats);
11237 
11238 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11239 
11240 	return status;
11241 }
11242 
11243 /* dp_txrx_get_peer_stats_param - will return specified cdp_peer_stats
11244  * @param soc - soc handle
11245  * @param vdev_id - vdev_id of vdev object
11246  * @param peer_mac - mac address of the peer
11247  * @param type - enum of required stats
11248  * @param buf - buffer to hold the value
11249  * return : status success/failure
11250  */
11251 static QDF_STATUS
11252 dp_txrx_get_peer_stats_param(struct cdp_soc_t *soc, uint8_t vdev_id,
11253 			     uint8_t *peer_mac, enum cdp_peer_stats_type type,
11254 			     cdp_peer_stats_param_t *buf)
11255 {
11256 	QDF_STATUS ret;
11257 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
11258 						      peer_mac, 0, vdev_id,
11259 						      DP_MOD_ID_CDP);
11260 
11261 	if (!peer) {
11262 		dp_peer_err("%pK: Invalid Peer for Mac " QDF_MAC_ADDR_FMT,
11263 			    soc, QDF_MAC_ADDR_REF(peer_mac));
11264 		return QDF_STATUS_E_FAILURE;
11265 	}
11266 
11267 	if (type >= cdp_peer_per_pkt_stats_min &&
11268 	    type < cdp_peer_per_pkt_stats_max) {
11269 		ret = dp_txrx_get_peer_per_pkt_stats_param(peer, type, buf);
11270 	} else if (type >= cdp_peer_extd_stats_min &&
11271 		   type < cdp_peer_extd_stats_max) {
11272 		ret = dp_txrx_get_peer_extd_stats_param(peer, type, buf);
11273 	} else {
11274 		dp_err("%pK: Invalid stat type requested", soc);
11275 		ret = QDF_STATUS_E_FAILURE;
11276 	}
11277 
11278 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11279 
11280 	return ret;
11281 }
11282 
11283 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
11284  * @soc: soc handle
11285  * @vdev_id: id of vdev handle
11286  * @peer_mac: mac of DP_PEER handle
11287  *
11288  * return : QDF_STATUS
11289  */
11290 #ifdef WLAN_FEATURE_11BE_MLO
11291 static QDF_STATUS
11292 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
11293 			 uint8_t *peer_mac)
11294 {
11295 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11296 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11297 	struct dp_peer *peer =
11298 			dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
11299 						       vdev_id, DP_MOD_ID_CDP);
11300 
11301 	if (!peer)
11302 		return QDF_STATUS_E_FAILURE;
11303 
11304 	DP_STATS_CLR(peer);
11305 	dp_txrx_peer_stats_clr(peer->txrx_peer);
11306 
11307 	if (IS_MLO_DP_MLD_PEER(peer)) {
11308 		uint8_t i;
11309 		struct dp_peer *link_peer;
11310 		struct dp_soc *link_peer_soc;
11311 		struct dp_mld_link_peers link_peers_info;
11312 
11313 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
11314 						    &link_peers_info,
11315 						    DP_MOD_ID_CDP);
11316 		for (i = 0; i < link_peers_info.num_links; i++) {
11317 			link_peer = link_peers_info.link_peers[i];
11318 			link_peer_soc = link_peer->vdev->pdev->soc;
11319 
11320 			DP_STATS_CLR(link_peer);
11321 			dp_monitor_peer_reset_stats(link_peer_soc, link_peer);
11322 		}
11323 
11324 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
11325 	} else {
11326 		dp_monitor_peer_reset_stats(soc, peer);
11327 	}
11328 
11329 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11330 
11331 	return status;
11332 }
11333 #else
11334 static QDF_STATUS
11335 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
11336 			 uint8_t *peer_mac)
11337 {
11338 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11339 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
11340 						      peer_mac, 0, vdev_id,
11341 						      DP_MOD_ID_CDP);
11342 
11343 	if (!peer)
11344 		return QDF_STATUS_E_FAILURE;
11345 
11346 	DP_STATS_CLR(peer);
11347 	dp_txrx_peer_stats_clr(peer->txrx_peer);
11348 	dp_monitor_peer_reset_stats((struct dp_soc *)soc, peer);
11349 
11350 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11351 
11352 	return status;
11353 }
11354 #endif
11355 
11356 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
11357  * @vdev_handle: DP_VDEV handle
11358  * @buf: buffer for vdev stats
11359  *
11360  * return : int
11361  */
11362 static int dp_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
11363 				  void *buf, bool is_aggregate)
11364 {
11365 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11366 	struct cdp_vdev_stats *vdev_stats;
11367 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11368 						     DP_MOD_ID_CDP);
11369 
11370 	if (!vdev)
11371 		return 1;
11372 
11373 	vdev_stats = (struct cdp_vdev_stats *)buf;
11374 
11375 	if (is_aggregate) {
11376 		dp_aggregate_vdev_stats(vdev, buf);
11377 	} else {
11378 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
11379 	}
11380 
11381 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11382 	return 0;
11383 }
11384 
11385 /*
11386  * dp_get_total_per(): get total per
11387  * @soc: DP soc handle
11388  * @pdev_id: id of DP_PDEV handle
11389  *
11390  * Return: % error rate using retries per packet and success packets
11391  */
11392 static int dp_get_total_per(struct cdp_soc_t *soc, uint8_t pdev_id)
11393 {
11394 	struct dp_pdev *pdev =
11395 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11396 						   pdev_id);
11397 
11398 	if (!pdev)
11399 		return 0;
11400 
11401 	dp_aggregate_pdev_stats(pdev);
11402 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
11403 		return 0;
11404 	return ((pdev->stats.tx.retries * 100) /
11405 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
11406 }
11407 
11408 /*
11409  * dp_txrx_stats_publish(): publish pdev stats into a buffer
11410  * @soc: DP soc handle
11411  * @pdev_id: id of DP_PDEV handle
11412  * @buf: to hold pdev_stats
11413  *
11414  * Return: int
11415  */
11416 static int
11417 dp_txrx_stats_publish(struct cdp_soc_t *soc, uint8_t pdev_id,
11418 		      struct cdp_stats_extd *buf)
11419 {
11420 	struct cdp_txrx_stats_req req = {0,};
11421 	QDF_STATUS status;
11422 	struct dp_pdev *pdev =
11423 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11424 						   pdev_id);
11425 
11426 	if (!pdev)
11427 		return TXRX_STATS_LEVEL_OFF;
11428 
11429 	if (pdev->pending_fw_stats_response)
11430 		return TXRX_STATS_LEVEL_OFF;
11431 
11432 	dp_aggregate_pdev_stats(pdev);
11433 
11434 	pdev->pending_fw_stats_response = true;
11435 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
11436 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
11437 	pdev->fw_stats_tlv_bitmap_rcvd = 0;
11438 	qdf_event_reset(&pdev->fw_stats_event);
11439 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
11440 				req.param1, req.param2, req.param3, 0,
11441 				req.cookie_val, 0);
11442 
11443 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
11444 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
11445 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
11446 				req.param1, req.param2, req.param3, 0,
11447 				req.cookie_val, 0);
11448 
11449 	status =
11450 		qdf_wait_single_event(&pdev->fw_stats_event, DP_MAX_SLEEP_TIME);
11451 
11452 	if (status != QDF_STATUS_SUCCESS) {
11453 		if (status == QDF_STATUS_E_TIMEOUT)
11454 			qdf_debug("TIMEOUT_OCCURS");
11455 		pdev->pending_fw_stats_response = false;
11456 		return TXRX_STATS_LEVEL_OFF;
11457 	}
11458 	qdf_mem_copy(buf, &pdev->stats, sizeof(struct cdp_pdev_stats));
11459 	pdev->pending_fw_stats_response = false;
11460 
11461 	return TXRX_STATS_LEVEL;
11462 }
11463 
11464 /*
11465  * dp_get_obss_stats(): Get Pdev OBSS stats from Fw
11466  * @soc: DP soc handle
11467  * @pdev_id: id of DP_PDEV handle
11468  * @buf: to hold pdev obss stats
11469  *
11470  * Return: status
11471  */
11472 static QDF_STATUS
11473 dp_get_obss_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
11474 		  struct cdp_pdev_obss_pd_stats_tlv *buf)
11475 {
11476 	struct cdp_txrx_stats_req req = {0};
11477 	QDF_STATUS status;
11478 	struct dp_pdev *pdev =
11479 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11480 						   pdev_id);
11481 
11482 	if (!pdev)
11483 		return QDF_STATUS_E_INVAL;
11484 
11485 	if (pdev->pending_fw_obss_stats_response)
11486 		return QDF_STATUS_E_AGAIN;
11487 
11488 	pdev->pending_fw_obss_stats_response = true;
11489 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_OBSS_PD_STATS;
11490 	req.cookie_val = DBG_STATS_COOKIE_HTT_OBSS;
11491 	qdf_event_reset(&pdev->fw_obss_stats_event);
11492 	status = dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
11493 					   req.param1, req.param2, req.param3,
11494 					   0, req.cookie_val, 0);
11495 	if (QDF_IS_STATUS_ERROR(status)) {
11496 		pdev->pending_fw_obss_stats_response = false;
11497 		return status;
11498 	}
11499 	status =
11500 		qdf_wait_single_event(&pdev->fw_obss_stats_event,
11501 				      DP_MAX_SLEEP_TIME);
11502 
11503 	if (status != QDF_STATUS_SUCCESS) {
11504 		if (status == QDF_STATUS_E_TIMEOUT)
11505 			qdf_debug("TIMEOUT_OCCURS");
11506 		pdev->pending_fw_obss_stats_response = false;
11507 		return QDF_STATUS_E_TIMEOUT;
11508 	}
11509 	qdf_mem_copy(buf, &pdev->stats.htt_tx_pdev_stats.obss_pd_stats_tlv,
11510 		     sizeof(struct cdp_pdev_obss_pd_stats_tlv));
11511 	pdev->pending_fw_obss_stats_response = false;
11512 	return status;
11513 }
11514 
11515 /*
11516  * dp_clear_pdev_obss_pd_stats(): Clear pdev obss stats
11517  * @soc: DP soc handle
11518  * @pdev_id: id of DP_PDEV handle
11519  *
11520  * Return: status
11521  */
11522 static QDF_STATUS
11523 dp_clear_pdev_obss_pd_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
11524 {
11525 	struct cdp_txrx_stats_req req = {0};
11526 	struct dp_pdev *pdev =
11527 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11528 						   pdev_id);
11529 	uint32_t cookie_val = DBG_STATS_COOKIE_DEFAULT;
11530 
11531 	if (!pdev)
11532 		return QDF_STATUS_E_INVAL;
11533 
11534 	/*
11535 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
11536 	 * from param0 to param3 according to below rule:
11537 	 *
11538 	 * PARAM:
11539 	 *   - config_param0 : start_offset (stats type)
11540 	 *   - config_param1 : stats bmask from start offset
11541 	 *   - config_param2 : stats bmask from start offset + 32
11542 	 *   - config_param3 : stats bmask from start offset + 64
11543 	 */
11544 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_RESET;
11545 	req.param0 = HTT_DBG_EXT_STATS_PDEV_OBSS_PD_STATS;
11546 	req.param1 = 0x00000001;
11547 
11548 	return dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
11549 				  req.param1, req.param2, req.param3, 0,
11550 				cookie_val, 0);
11551 }
11552 
11553 /**
11554  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
11555  * @soc: soc handle
11556  * @pdev_id: id of DP_PDEV handle
11557  * @map_id: ID of map that needs to be updated
11558  * @tos: index value in map
11559  * @tid: tid value passed by the user
11560  *
11561  * Return: QDF_STATUS
11562  */
11563 static QDF_STATUS
11564 dp_set_pdev_dscp_tid_map_wifi3(struct cdp_soc_t *soc_handle,
11565 			       uint8_t pdev_id,
11566 			       uint8_t map_id,
11567 			       uint8_t tos, uint8_t tid)
11568 {
11569 	uint8_t dscp;
11570 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11571 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11572 
11573 	if (!pdev)
11574 		return QDF_STATUS_E_FAILURE;
11575 
11576 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
11577 	pdev->dscp_tid_map[map_id][dscp] = tid;
11578 
11579 	if (map_id < soc->num_hw_dscp_tid_map)
11580 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
11581 				       map_id, dscp);
11582 	else
11583 		return QDF_STATUS_E_FAILURE;
11584 
11585 	return QDF_STATUS_SUCCESS;
11586 }
11587 
11588 #ifdef WLAN_SYSFS_DP_STATS
11589 /*
11590  * dp_sysfs_event_trigger(): Trigger event to wait for firmware
11591  * stats request response.
11592  * @soc: soc handle
11593  * @cookie_val: cookie value
11594  *
11595  * @Return: QDF_STATUS
11596  */
11597 static QDF_STATUS
11598 dp_sysfs_event_trigger(struct dp_soc *soc, uint32_t cookie_val)
11599 {
11600 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11601 	/* wait for firmware response for sysfs stats request */
11602 	if (cookie_val == DBG_SYSFS_STATS_COOKIE) {
11603 		if (!soc) {
11604 			dp_cdp_err("soc is NULL");
11605 			return QDF_STATUS_E_FAILURE;
11606 		}
11607 		/* wait for event completion */
11608 		status = qdf_wait_single_event(&soc->sysfs_config->sysfs_txrx_fw_request_done,
11609 					       WLAN_SYSFS_STAT_REQ_WAIT_MS);
11610 		if (status == QDF_STATUS_SUCCESS)
11611 			dp_cdp_info("sysfs_txrx_fw_request_done event completed");
11612 		else if (status == QDF_STATUS_E_TIMEOUT)
11613 			dp_cdp_warn("sysfs_txrx_fw_request_done event expired");
11614 		else
11615 			dp_cdp_warn("sysfs_txrx_fw_request_done event erro code %d", status);
11616 	}
11617 
11618 	return status;
11619 }
11620 #else /* WLAN_SYSFS_DP_STATS */
11621 /*
11622  * dp_sysfs_event_trigger(): Trigger event to wait for firmware
11623  * stats request response.
11624  * @soc: soc handle
11625  * @cookie_val: cookie value
11626  *
11627  * @Return: QDF_STATUS
11628  */
11629 static QDF_STATUS
11630 dp_sysfs_event_trigger(struct dp_soc *soc, uint32_t cookie_val)
11631 {
11632 	return QDF_STATUS_SUCCESS;
11633 }
11634 #endif /* WLAN_SYSFS_DP_STATS */
11635 
11636 /**
11637  * dp_fw_stats_process(): Process TXRX FW stats request.
11638  * @vdev_handle: DP VDEV handle
11639  * @req: stats request
11640  *
11641  * return: QDF_STATUS
11642  */
11643 static QDF_STATUS
11644 dp_fw_stats_process(struct dp_vdev *vdev,
11645 		    struct cdp_txrx_stats_req *req)
11646 {
11647 	struct dp_pdev *pdev = NULL;
11648 	struct dp_soc *soc = NULL;
11649 	uint32_t stats = req->stats;
11650 	uint8_t mac_id = req->mac_id;
11651 	uint32_t cookie_val = DBG_STATS_COOKIE_DEFAULT;
11652 
11653 	if (!vdev) {
11654 		DP_TRACE(NONE, "VDEV not found");
11655 		return QDF_STATUS_E_FAILURE;
11656 	}
11657 
11658 	pdev = vdev->pdev;
11659 	if (!pdev) {
11660 		DP_TRACE(NONE, "PDEV not found");
11661 		return QDF_STATUS_E_FAILURE;
11662 	}
11663 
11664 	soc = pdev->soc;
11665 	if (!soc) {
11666 		DP_TRACE(NONE, "soc not found");
11667 		return QDF_STATUS_E_FAILURE;
11668 	}
11669 
11670 	/* In case request is from host sysfs for displaying stats on console */
11671 	if (req->cookie_val == DBG_SYSFS_STATS_COOKIE)
11672 		cookie_val = DBG_SYSFS_STATS_COOKIE;
11673 
11674 	/*
11675 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
11676 	 * from param0 to param3 according to below rule:
11677 	 *
11678 	 * PARAM:
11679 	 *   - config_param0 : start_offset (stats type)
11680 	 *   - config_param1 : stats bmask from start offset
11681 	 *   - config_param2 : stats bmask from start offset + 32
11682 	 *   - config_param3 : stats bmask from start offset + 64
11683 	 */
11684 	if (req->stats == CDP_TXRX_STATS_0) {
11685 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
11686 		req->param1 = 0xFFFFFFFF;
11687 		req->param2 = 0xFFFFFFFF;
11688 		req->param3 = 0xFFFFFFFF;
11689 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
11690 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
11691 	}
11692 
11693 	if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT) {
11694 		dp_h2t_ext_stats_msg_send(pdev,
11695 					  HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT,
11696 					  req->param0, req->param1, req->param2,
11697 					  req->param3, 0, cookie_val,
11698 					  mac_id);
11699 	} else {
11700 		dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
11701 					  req->param1, req->param2, req->param3,
11702 					  0, cookie_val, mac_id);
11703 	}
11704 
11705 	dp_sysfs_event_trigger(soc, cookie_val);
11706 
11707 	return QDF_STATUS_SUCCESS;
11708 }
11709 
11710 /**
11711  * dp_txrx_stats_request - function to map to firmware and host stats
11712  * @soc: soc handle
11713  * @vdev_id: virtual device ID
11714  * @req: stats request
11715  *
11716  * Return: QDF_STATUS
11717  */
11718 static
11719 QDF_STATUS dp_txrx_stats_request(struct cdp_soc_t *soc_handle,
11720 				 uint8_t vdev_id,
11721 				 struct cdp_txrx_stats_req *req)
11722 {
11723 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_handle);
11724 	int host_stats;
11725 	int fw_stats;
11726 	enum cdp_stats stats;
11727 	int num_stats;
11728 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11729 						     DP_MOD_ID_CDP);
11730 	QDF_STATUS status = QDF_STATUS_E_INVAL;
11731 
11732 	if (!vdev || !req) {
11733 		dp_cdp_err("%pK: Invalid vdev/req instance", soc);
11734 		status = QDF_STATUS_E_INVAL;
11735 		goto fail0;
11736 	}
11737 
11738 	if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) {
11739 		dp_err("Invalid mac id request");
11740 		status = QDF_STATUS_E_INVAL;
11741 		goto fail0;
11742 	}
11743 
11744 	stats = req->stats;
11745 	if (stats >= CDP_TXRX_MAX_STATS) {
11746 		status = QDF_STATUS_E_INVAL;
11747 		goto fail0;
11748 	}
11749 
11750 	/*
11751 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
11752 	 *			has to be updated if new FW HTT stats added
11753 	 */
11754 	if (stats > CDP_TXRX_STATS_HTT_MAX)
11755 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
11756 
11757 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
11758 
11759 	if (stats >= num_stats) {
11760 		dp_cdp_err("%pK : Invalid stats option: %d", soc, stats);
11761 		status = QDF_STATUS_E_INVAL;
11762 		goto fail0;
11763 	}
11764 
11765 	req->stats = stats;
11766 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
11767 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
11768 
11769 	dp_info("stats: %u fw_stats_type: %d host_stats: %d",
11770 		stats, fw_stats, host_stats);
11771 
11772 	if (fw_stats != TXRX_FW_STATS_INVALID) {
11773 		/* update request with FW stats type */
11774 		req->stats = fw_stats;
11775 		status = dp_fw_stats_process(vdev, req);
11776 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
11777 			(host_stats <= TXRX_HOST_STATS_MAX))
11778 		status = dp_print_host_stats(vdev, req, soc);
11779 	else
11780 		dp_cdp_info("%pK: Wrong Input for TxRx Stats", soc);
11781 fail0:
11782 	if (vdev)
11783 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11784 	return status;
11785 }
11786 
11787 /*
11788  * dp_txrx_dump_stats() -  Dump statistics
11789  * @value - Statistics option
11790  */
11791 static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value,
11792 				     enum qdf_stats_verbosity_level level)
11793 {
11794 	struct dp_soc *soc =
11795 		(struct dp_soc *)psoc;
11796 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11797 
11798 	if (!soc) {
11799 		dp_cdp_err("%pK: soc is NULL", soc);
11800 		return QDF_STATUS_E_INVAL;
11801 	}
11802 
11803 	switch (value) {
11804 	case CDP_TXRX_PATH_STATS:
11805 		dp_txrx_path_stats(soc);
11806 		dp_print_soc_interrupt_stats(soc);
11807 		hal_dump_reg_write_stats(soc->hal_soc);
11808 		dp_pdev_print_tx_delay_stats(soc);
11809 		/* Dump usage watermark stats for core TX/RX SRNGs */
11810 		dp_dump_srng_high_wm_stats(soc, (1 << REO_DST));
11811 		dp_print_fisa_stats(soc);
11812 		break;
11813 
11814 	case CDP_RX_RING_STATS:
11815 		dp_print_per_ring_stats(soc);
11816 		break;
11817 
11818 	case CDP_TXRX_TSO_STATS:
11819 		dp_print_tso_stats(soc, level);
11820 		break;
11821 
11822 	case CDP_DUMP_TX_FLOW_POOL_INFO:
11823 		if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH)
11824 			cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
11825 		else
11826 			dp_tx_dump_flow_pool_info_compact(soc);
11827 		break;
11828 
11829 	case CDP_DP_NAPI_STATS:
11830 		dp_print_napi_stats(soc);
11831 		break;
11832 
11833 	case CDP_TXRX_DESC_STATS:
11834 		/* TODO: NOT IMPLEMENTED */
11835 		break;
11836 
11837 	case CDP_DP_RX_FISA_STATS:
11838 		dp_rx_dump_fisa_stats(soc);
11839 		break;
11840 
11841 	case CDP_DP_SWLM_STATS:
11842 		dp_print_swlm_stats(soc);
11843 		break;
11844 
11845 	case CDP_DP_TX_HW_LATENCY_STATS:
11846 		dp_pdev_print_tx_delay_stats(soc);
11847 		break;
11848 
11849 	default:
11850 		status = QDF_STATUS_E_INVAL;
11851 		break;
11852 	}
11853 
11854 	return status;
11855 
11856 }
11857 
11858 #ifdef WLAN_SYSFS_DP_STATS
11859 static
11860 void dp_sysfs_get_stat_type(struct dp_soc *soc, uint32_t *mac_id,
11861 			    uint32_t *stat_type)
11862 {
11863 	qdf_spinlock_acquire(&soc->sysfs_config->rw_stats_lock);
11864 	*stat_type = soc->sysfs_config->stat_type_requested;
11865 	*mac_id   = soc->sysfs_config->mac_id;
11866 
11867 	qdf_spinlock_release(&soc->sysfs_config->rw_stats_lock);
11868 }
11869 
11870 static
11871 void dp_sysfs_update_config_buf_params(struct dp_soc *soc,
11872 				       uint32_t curr_len,
11873 				       uint32_t max_buf_len,
11874 				       char *buf)
11875 {
11876 	qdf_spinlock_acquire(&soc->sysfs_config->sysfs_write_user_buffer);
11877 	/* set sysfs_config parameters */
11878 	soc->sysfs_config->buf = buf;
11879 	soc->sysfs_config->curr_buffer_length = curr_len;
11880 	soc->sysfs_config->max_buffer_length = max_buf_len;
11881 	qdf_spinlock_release(&soc->sysfs_config->sysfs_write_user_buffer);
11882 }
11883 
11884 static
11885 QDF_STATUS dp_sysfs_fill_stats(ol_txrx_soc_handle soc_hdl,
11886 			       char *buf, uint32_t buf_size)
11887 {
11888 	uint32_t mac_id = 0;
11889 	uint32_t stat_type = 0;
11890 	uint32_t fw_stats = 0;
11891 	uint32_t host_stats = 0;
11892 	enum cdp_stats stats;
11893 	struct cdp_txrx_stats_req req;
11894 	uint32_t num_stats;
11895 	struct dp_soc *soc = NULL;
11896 
11897 	if (!soc_hdl) {
11898 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
11899 		return QDF_STATUS_E_INVAL;
11900 	}
11901 
11902 	soc = cdp_soc_t_to_dp_soc(soc_hdl);
11903 
11904 	if (!soc) {
11905 		dp_cdp_err("%pK: soc is NULL", soc);
11906 		return QDF_STATUS_E_INVAL;
11907 	}
11908 
11909 	dp_sysfs_get_stat_type(soc, &mac_id, &stat_type);
11910 
11911 	stats = stat_type;
11912 	if (stats >= CDP_TXRX_MAX_STATS) {
11913 		dp_cdp_info("sysfs stat type requested is invalid");
11914 		return QDF_STATUS_E_INVAL;
11915 	}
11916 	/*
11917 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
11918 	 *			has to be updated if new FW HTT stats added
11919 	 */
11920 	if (stats > CDP_TXRX_MAX_STATS)
11921 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
11922 
11923 	num_stats = QDF_ARRAY_SIZE(dp_stats_mapping_table);
11924 
11925 	if (stats >= num_stats) {
11926 		dp_cdp_err("%pK : Invalid stats option: %d, max num stats: %d",
11927 				soc, stats, num_stats);
11928 		return QDF_STATUS_E_INVAL;
11929 	}
11930 
11931 	/* build request */
11932 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
11933 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
11934 
11935 	req.stats = stat_type;
11936 	req.mac_id = mac_id;
11937 	/* request stats to be printed */
11938 	qdf_mutex_acquire(&soc->sysfs_config->sysfs_read_lock);
11939 
11940 	if (fw_stats != TXRX_FW_STATS_INVALID) {
11941 		/* update request with FW stats type */
11942 		req.cookie_val = DBG_SYSFS_STATS_COOKIE;
11943 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
11944 			(host_stats <= TXRX_HOST_STATS_MAX)) {
11945 		req.cookie_val = DBG_STATS_COOKIE_DEFAULT;
11946 		soc->sysfs_config->process_id = qdf_get_current_pid();
11947 		soc->sysfs_config->printing_mode = PRINTING_MODE_ENABLED;
11948 	}
11949 
11950 	dp_sysfs_update_config_buf_params(soc, 0, buf_size, buf);
11951 
11952 	dp_txrx_stats_request(soc_hdl, mac_id, &req);
11953 	soc->sysfs_config->process_id = 0;
11954 	soc->sysfs_config->printing_mode = PRINTING_MODE_DISABLED;
11955 
11956 	dp_sysfs_update_config_buf_params(soc, 0, 0, NULL);
11957 
11958 	qdf_mutex_release(&soc->sysfs_config->sysfs_read_lock);
11959 	return QDF_STATUS_SUCCESS;
11960 }
11961 
11962 static
11963 QDF_STATUS dp_sysfs_set_stat_type(ol_txrx_soc_handle soc_hdl,
11964 				  uint32_t stat_type, uint32_t mac_id)
11965 {
11966 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11967 
11968 	if (!soc_hdl) {
11969 		dp_cdp_err("%pK: soc is NULL", soc);
11970 		return QDF_STATUS_E_INVAL;
11971 	}
11972 
11973 	qdf_spinlock_acquire(&soc->sysfs_config->rw_stats_lock);
11974 
11975 	soc->sysfs_config->stat_type_requested = stat_type;
11976 	soc->sysfs_config->mac_id = mac_id;
11977 
11978 	qdf_spinlock_release(&soc->sysfs_config->rw_stats_lock);
11979 
11980 	return QDF_STATUS_SUCCESS;
11981 }
11982 
11983 static
11984 QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl)
11985 {
11986 	struct dp_soc *soc;
11987 	QDF_STATUS status;
11988 
11989 	if (!soc_hdl) {
11990 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
11991 		return QDF_STATUS_E_INVAL;
11992 	}
11993 
11994 	soc = soc_hdl;
11995 
11996 	soc->sysfs_config = qdf_mem_malloc(sizeof(struct sysfs_stats_config));
11997 	if (!soc->sysfs_config) {
11998 		dp_cdp_err("failed to allocate memory for sysfs_config no memory");
11999 		return QDF_STATUS_E_NOMEM;
12000 	}
12001 
12002 	status = qdf_event_create(&soc->sysfs_config->sysfs_txrx_fw_request_done);
12003 	/* create event for fw stats request from sysfs */
12004 	if (status != QDF_STATUS_SUCCESS) {
12005 		dp_cdp_err("failed to create event sysfs_txrx_fw_request_done");
12006 		qdf_mem_free(soc->sysfs_config);
12007 		soc->sysfs_config = NULL;
12008 		return QDF_STATUS_E_FAILURE;
12009 	}
12010 
12011 	qdf_spinlock_create(&soc->sysfs_config->rw_stats_lock);
12012 	qdf_mutex_create(&soc->sysfs_config->sysfs_read_lock);
12013 	qdf_spinlock_create(&soc->sysfs_config->sysfs_write_user_buffer);
12014 
12015 	return QDF_STATUS_SUCCESS;
12016 }
12017 
12018 static
12019 QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl)
12020 {
12021 	struct dp_soc *soc;
12022 	QDF_STATUS status;
12023 
12024 	if (!soc_hdl) {
12025 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
12026 		return QDF_STATUS_E_INVAL;
12027 	}
12028 
12029 	soc = soc_hdl;
12030 	if (!soc->sysfs_config) {
12031 		dp_cdp_err("soc->sysfs_config is NULL");
12032 		return QDF_STATUS_E_FAILURE;
12033 	}
12034 
12035 	status = qdf_event_destroy(&soc->sysfs_config->sysfs_txrx_fw_request_done);
12036 	if (status != QDF_STATUS_SUCCESS)
12037 		dp_cdp_err("Failed to detroy event sysfs_txrx_fw_request_done ");
12038 
12039 	qdf_mutex_destroy(&soc->sysfs_config->sysfs_read_lock);
12040 	qdf_spinlock_destroy(&soc->sysfs_config->rw_stats_lock);
12041 	qdf_spinlock_destroy(&soc->sysfs_config->sysfs_write_user_buffer);
12042 
12043 	qdf_mem_free(soc->sysfs_config);
12044 
12045 	return QDF_STATUS_SUCCESS;
12046 }
12047 
12048 #else /* WLAN_SYSFS_DP_STATS */
12049 
12050 static
12051 QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl)
12052 {
12053 	return QDF_STATUS_SUCCESS;
12054 }
12055 
12056 static
12057 QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl)
12058 {
12059 	return QDF_STATUS_SUCCESS;
12060 }
12061 #endif /* WLAN_SYSFS_DP_STATS */
12062 
12063 /**
12064  * dp_txrx_clear_dump_stats() - clear dumpStats
12065  * @soc- soc handle
12066  * @value - stats option
12067  *
12068  * Return: 0 - Success, non-zero - failure
12069  */
12070 static
12071 QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12072 				    uint8_t value)
12073 {
12074 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12075 	QDF_STATUS status = QDF_STATUS_SUCCESS;
12076 
12077 	if (!soc) {
12078 		dp_err("soc is NULL");
12079 		return QDF_STATUS_E_INVAL;
12080 	}
12081 
12082 	switch (value) {
12083 	case CDP_TXRX_TSO_STATS:
12084 		dp_txrx_clear_tso_stats(soc);
12085 		break;
12086 
12087 	case CDP_DP_TX_HW_LATENCY_STATS:
12088 		dp_pdev_clear_tx_delay_stats(soc);
12089 		break;
12090 
12091 	default:
12092 		status = QDF_STATUS_E_INVAL;
12093 		break;
12094 	}
12095 
12096 	return status;
12097 }
12098 
12099 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
12100 /**
12101  * dp_update_flow_control_parameters() - API to store datapath
12102  *                            config parameters
12103  * @soc: soc handle
12104  * @cfg: ini parameter handle
12105  *
12106  * Return: void
12107  */
12108 static inline
12109 void dp_update_flow_control_parameters(struct dp_soc *soc,
12110 				struct cdp_config_params *params)
12111 {
12112 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
12113 					params->tx_flow_stop_queue_threshold;
12114 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
12115 					params->tx_flow_start_queue_offset;
12116 }
12117 #else
12118 static inline
12119 void dp_update_flow_control_parameters(struct dp_soc *soc,
12120 				struct cdp_config_params *params)
12121 {
12122 }
12123 #endif
12124 
12125 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
12126 /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */
12127 #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024
12128 
12129 /* Max packet limit for RX REAP Loop (dp_rx_process) */
12130 #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024
12131 
12132 static
12133 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
12134 					struct cdp_config_params *params)
12135 {
12136 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit =
12137 				params->tx_comp_loop_pkt_limit;
12138 
12139 	if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX)
12140 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true;
12141 	else
12142 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false;
12143 
12144 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit =
12145 				params->rx_reap_loop_pkt_limit;
12146 
12147 	if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX)
12148 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = true;
12149 	else
12150 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = false;
12151 
12152 	soc->wlan_cfg_ctx->rx_hp_oos_update_limit =
12153 				params->rx_hp_oos_update_limit;
12154 
12155 	dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u",
12156 		soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit,
12157 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check,
12158 		soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit,
12159 		soc->wlan_cfg_ctx->rx_enable_eol_data_check,
12160 		soc->wlan_cfg_ctx->rx_hp_oos_update_limit);
12161 }
12162 
12163 static void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
12164 				      uint32_t rx_limit)
12165 {
12166 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit = tx_limit;
12167 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit = rx_limit;
12168 }
12169 
12170 #else
12171 static inline
12172 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
12173 					struct cdp_config_params *params)
12174 { }
12175 
12176 static inline
12177 void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
12178 			       uint32_t rx_limit)
12179 {
12180 }
12181 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
12182 
12183 /**
12184  * dp_update_config_parameters() - API to store datapath
12185  *                            config parameters
12186  * @soc: soc handle
12187  * @cfg: ini parameter handle
12188  *
12189  * Return: status
12190  */
12191 static
12192 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
12193 				struct cdp_config_params *params)
12194 {
12195 	struct dp_soc *soc = (struct dp_soc *)psoc;
12196 
12197 	if (!(soc)) {
12198 		dp_cdp_err("%pK: Invalid handle", soc);
12199 		return QDF_STATUS_E_INVAL;
12200 	}
12201 
12202 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
12203 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
12204 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
12205 	soc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload =
12206 				params->p2p_tcp_udp_checksumoffload;
12207 	soc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload =
12208 				params->nan_tcp_udp_checksumoffload;
12209 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
12210 				params->tcp_udp_checksumoffload;
12211 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
12212 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
12213 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
12214 
12215 	dp_update_rx_soft_irq_limit_params(soc, params);
12216 	dp_update_flow_control_parameters(soc, params);
12217 
12218 	return QDF_STATUS_SUCCESS;
12219 }
12220 
12221 static struct cdp_wds_ops dp_ops_wds = {
12222 	.vdev_set_wds = dp_vdev_set_wds,
12223 #ifdef WDS_VENDOR_EXTENSION
12224 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
12225 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
12226 #endif
12227 };
12228 
12229 /*
12230  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
12231  * @soc_hdl - datapath soc handle
12232  * @vdev_id - virtual interface id
12233  * @callback - callback function
12234  * @ctxt: callback context
12235  *
12236  */
12237 static void
12238 dp_txrx_data_tx_cb_set(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
12239 		       ol_txrx_data_tx_cb callback, void *ctxt)
12240 {
12241 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12242 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12243 						     DP_MOD_ID_CDP);
12244 
12245 	if (!vdev)
12246 		return;
12247 
12248 	vdev->tx_non_std_data_callback.func = callback;
12249 	vdev->tx_non_std_data_callback.ctxt = ctxt;
12250 
12251 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12252 }
12253 
12254 /**
12255  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
12256  * @soc: datapath soc handle
12257  * @pdev_id: id of datapath pdev handle
12258  *
12259  * Return: opaque pointer to dp txrx handle
12260  */
12261 static void *dp_pdev_get_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id)
12262 {
12263 	struct dp_pdev *pdev =
12264 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12265 						   pdev_id);
12266 	if (qdf_unlikely(!pdev))
12267 		return NULL;
12268 
12269 	return pdev->dp_txrx_handle;
12270 }
12271 
12272 /**
12273  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
12274  * @soc: datapath soc handle
12275  * @pdev_id: id of datapath pdev handle
12276  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
12277  *
12278  * Return: void
12279  */
12280 static void
12281 dp_pdev_set_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id,
12282 			   void *dp_txrx_hdl)
12283 {
12284 	struct dp_pdev *pdev =
12285 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12286 						   pdev_id);
12287 
12288 	if (!pdev)
12289 		return;
12290 
12291 	pdev->dp_txrx_handle = dp_txrx_hdl;
12292 }
12293 
12294 /**
12295  * dp_vdev_get_dp_ext_handle() - get dp handle from vdev
12296  * @soc: datapath soc handle
12297  * @vdev_id: vdev id
12298  *
12299  * Return: opaque pointer to dp txrx handle
12300  */
12301 static void *dp_vdev_get_dp_ext_handle(ol_txrx_soc_handle soc_hdl,
12302 				       uint8_t vdev_id)
12303 {
12304 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12305 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12306 						     DP_MOD_ID_CDP);
12307 	void *dp_ext_handle;
12308 
12309 	if (!vdev)
12310 		return NULL;
12311 	dp_ext_handle = vdev->vdev_dp_ext_handle;
12312 
12313 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12314 	return dp_ext_handle;
12315 }
12316 
12317 /**
12318  * dp_vdev_set_dp_ext_handle() - set dp handle in vdev
12319  * @soc: datapath soc handle
12320  * @vdev_id: vdev id
12321  * @size: size of advance dp handle
12322  *
12323  * Return: QDF_STATUS
12324  */
12325 static QDF_STATUS
12326 dp_vdev_set_dp_ext_handle(ol_txrx_soc_handle soc_hdl, uint8_t vdev_id,
12327 			  uint16_t size)
12328 {
12329 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12330 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12331 						     DP_MOD_ID_CDP);
12332 	void *dp_ext_handle;
12333 
12334 	if (!vdev)
12335 		return QDF_STATUS_E_FAILURE;
12336 
12337 	dp_ext_handle = qdf_mem_malloc(size);
12338 
12339 	if (!dp_ext_handle) {
12340 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12341 		return QDF_STATUS_E_FAILURE;
12342 	}
12343 
12344 	vdev->vdev_dp_ext_handle = dp_ext_handle;
12345 
12346 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12347 	return QDF_STATUS_SUCCESS;
12348 }
12349 
12350 /**
12351  * dp_vdev_inform_ll_conn() - Inform vdev to add/delete a latency critical
12352  *			      connection for this vdev
12353  * @soc_hdl: CDP soc handle
12354  * @vdev_id: vdev ID
12355  * @action: Add/Delete action
12356  *
12357  * Returns: QDF_STATUS.
12358  */
12359 static QDF_STATUS
12360 dp_vdev_inform_ll_conn(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
12361 		       enum vdev_ll_conn_actions action)
12362 {
12363 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12364 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12365 						     DP_MOD_ID_CDP);
12366 
12367 	if (!vdev) {
12368 		dp_err("LL connection action for invalid vdev %d", vdev_id);
12369 		return QDF_STATUS_E_FAILURE;
12370 	}
12371 
12372 	switch (action) {
12373 	case CDP_VDEV_LL_CONN_ADD:
12374 		vdev->num_latency_critical_conn++;
12375 		break;
12376 
12377 	case CDP_VDEV_LL_CONN_DEL:
12378 		vdev->num_latency_critical_conn--;
12379 		break;
12380 
12381 	default:
12382 		dp_err("LL connection action invalid %d", action);
12383 		break;
12384 	}
12385 
12386 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12387 	return QDF_STATUS_SUCCESS;
12388 }
12389 
12390 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
12391 /**
12392  * dp_soc_set_swlm_enable() - Enable/Disable SWLM if initialized.
12393  * @soc_hdl: CDP Soc handle
12394  * @value: Enable/Disable value
12395  *
12396  * Returns: QDF_STATUS
12397  */
12398 static QDF_STATUS dp_soc_set_swlm_enable(struct cdp_soc_t *soc_hdl,
12399 					 uint8_t value)
12400 {
12401 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12402 
12403 	if (!soc->swlm.is_init) {
12404 		dp_err("SWLM is not initialized");
12405 		return QDF_STATUS_E_FAILURE;
12406 	}
12407 
12408 	soc->swlm.is_enabled = !!value;
12409 
12410 	return QDF_STATUS_SUCCESS;
12411 }
12412 
12413 /**
12414  * dp_soc_is_swlm_enabled() - Check if SWLM is enabled.
12415  * @soc_hdl: CDP Soc handle
12416  *
12417  * Returns: QDF_STATUS
12418  */
12419 static uint8_t dp_soc_is_swlm_enabled(struct cdp_soc_t *soc_hdl)
12420 {
12421 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12422 
12423 	return soc->swlm.is_enabled;
12424 }
12425 #endif
12426 
12427 /**
12428  * dp_display_srng_info() - Dump the srng HP TP info
12429  * @soc_hdl: CDP Soc handle
12430  *
12431  * This function dumps the SW hp/tp values for the important rings.
12432  * HW hp/tp values are not being dumped, since it can lead to
12433  * READ NOC error when UMAC is in low power state. MCC does not have
12434  * device force wake working yet.
12435  *
12436  * Return: none
12437  */
12438 static void dp_display_srng_info(struct cdp_soc_t *soc_hdl)
12439 {
12440 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12441 	hal_soc_handle_t hal_soc = soc->hal_soc;
12442 	uint32_t hp, tp, i;
12443 
12444 	dp_info("SRNG HP-TP data:");
12445 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
12446 		hal_get_sw_hptp(hal_soc, soc->tcl_data_ring[i].hal_srng,
12447 				&tp, &hp);
12448 		dp_info("TCL DATA ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
12449 
12450 		if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, i) ==
12451 		    INVALID_WBM_RING_NUM)
12452 			continue;
12453 
12454 		hal_get_sw_hptp(hal_soc, soc->tx_comp_ring[i].hal_srng,
12455 				&tp, &hp);
12456 		dp_info("TX comp ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
12457 	}
12458 
12459 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
12460 		hal_get_sw_hptp(hal_soc, soc->reo_dest_ring[i].hal_srng,
12461 				&tp, &hp);
12462 		dp_info("REO DST ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
12463 	}
12464 
12465 	hal_get_sw_hptp(hal_soc, soc->reo_exception_ring.hal_srng, &tp, &hp);
12466 	dp_info("REO exception ring: hp=0x%x, tp=0x%x", hp, tp);
12467 
12468 	hal_get_sw_hptp(hal_soc, soc->rx_rel_ring.hal_srng, &tp, &hp);
12469 	dp_info("WBM RX release ring: hp=0x%x, tp=0x%x", hp, tp);
12470 
12471 	hal_get_sw_hptp(hal_soc, soc->wbm_desc_rel_ring.hal_srng, &tp, &hp);
12472 	dp_info("WBM desc release ring: hp=0x%x, tp=0x%x", hp, tp);
12473 }
12474 
12475 /**
12476  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
12477  * @soc_handle: datapath soc handle
12478  *
12479  * Return: opaque pointer to external dp (non-core DP)
12480  */
12481 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
12482 {
12483 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12484 
12485 	return soc->external_txrx_handle;
12486 }
12487 
12488 /**
12489  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
12490  * @soc_handle: datapath soc handle
12491  * @txrx_handle: opaque pointer to external dp (non-core DP)
12492  *
12493  * Return: void
12494  */
12495 static void
12496 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
12497 {
12498 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12499 
12500 	soc->external_txrx_handle = txrx_handle;
12501 }
12502 
12503 /**
12504  * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
12505  * @soc_hdl: datapath soc handle
12506  * @pdev_id: id of the datapath pdev handle
12507  * @lmac_id: lmac id
12508  *
12509  * Return: QDF_STATUS
12510  */
12511 static QDF_STATUS
12512 dp_soc_map_pdev_to_lmac
12513 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12514 	 uint32_t lmac_id)
12515 {
12516 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12517 
12518 	wlan_cfg_set_hw_mac_idx(soc->wlan_cfg_ctx,
12519 				pdev_id,
12520 				lmac_id);
12521 
12522 	/*Set host PDEV ID for lmac_id*/
12523 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
12524 			      pdev_id,
12525 			      lmac_id);
12526 
12527 	return QDF_STATUS_SUCCESS;
12528 }
12529 
12530 /**
12531  * dp_soc_handle_pdev_mode_change() - Update pdev to lmac mapping
12532  * @soc_hdl: datapath soc handle
12533  * @pdev_id: id of the datapath pdev handle
12534  * @lmac_id: lmac id
12535  *
12536  * In the event of a dynamic mode change, update the pdev to lmac mapping
12537  *
12538  * Return: QDF_STATUS
12539  */
12540 static QDF_STATUS
12541 dp_soc_handle_pdev_mode_change
12542 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12543 	 uint32_t lmac_id)
12544 {
12545 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12546 	struct dp_vdev *vdev = NULL;
12547 	uint8_t hw_pdev_id, mac_id;
12548 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc,
12549 								  pdev_id);
12550 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
12551 
12552 	if (qdf_unlikely(!pdev))
12553 		return QDF_STATUS_E_FAILURE;
12554 
12555 	pdev->lmac_id = lmac_id;
12556 	pdev->target_pdev_id =
12557 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
12558 	dp_info(" mode change %d %d\n", pdev->pdev_id, pdev->lmac_id);
12559 
12560 	/*Set host PDEV ID for lmac_id*/
12561 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
12562 			      pdev->pdev_id,
12563 			      lmac_id);
12564 
12565 	hw_pdev_id =
12566 		dp_get_target_pdev_id_for_host_pdev_id(soc,
12567 						       pdev->pdev_id);
12568 
12569 	/*
12570 	 * When NSS offload is enabled, send pdev_id->lmac_id
12571 	 * and pdev_id to hw_pdev_id to NSS FW
12572 	 */
12573 	if (nss_config) {
12574 		mac_id = pdev->lmac_id;
12575 		if (soc->cdp_soc.ol_ops->pdev_update_lmac_n_target_pdev_id)
12576 			soc->cdp_soc.ol_ops->
12577 				pdev_update_lmac_n_target_pdev_id(
12578 				soc->ctrl_psoc,
12579 				&pdev_id, &mac_id, &hw_pdev_id);
12580 	}
12581 
12582 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
12583 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
12584 		DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
12585 					       hw_pdev_id);
12586 		vdev->lmac_id = pdev->lmac_id;
12587 	}
12588 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
12589 
12590 	return QDF_STATUS_SUCCESS;
12591 }
12592 
12593 /**
12594  * dp_soc_set_pdev_status_down() - set pdev down/up status
12595  * @soc: datapath soc handle
12596  * @pdev_id: id of datapath pdev handle
12597  * @is_pdev_down: pdev down/up status
12598  *
12599  * Return: QDF_STATUS
12600  */
12601 static QDF_STATUS
12602 dp_soc_set_pdev_status_down(struct cdp_soc_t *soc, uint8_t pdev_id,
12603 			    bool is_pdev_down)
12604 {
12605 	struct dp_pdev *pdev =
12606 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12607 						   pdev_id);
12608 	if (!pdev)
12609 		return QDF_STATUS_E_FAILURE;
12610 
12611 	pdev->is_pdev_down = is_pdev_down;
12612 	return QDF_STATUS_SUCCESS;
12613 }
12614 
12615 /**
12616  * dp_get_cfg_capabilities() - get dp capabilities
12617  * @soc_handle: datapath soc handle
12618  * @dp_caps: enum for dp capabilities
12619  *
12620  * Return: bool to determine if dp caps is enabled
12621  */
12622 static bool
12623 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
12624 			enum cdp_capabilities dp_caps)
12625 {
12626 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12627 
12628 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
12629 }
12630 
12631 #ifdef FEATURE_AST
12632 static QDF_STATUS
12633 dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
12634 		       uint8_t *peer_mac)
12635 {
12636 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12637 	QDF_STATUS status = QDF_STATUS_SUCCESS;
12638 	struct dp_peer *peer =
12639 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
12640 					       DP_MOD_ID_CDP);
12641 
12642 	/* Peer can be null for monitor vap mac address */
12643 	if (!peer) {
12644 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
12645 			  "%s: Invalid peer\n", __func__);
12646 		return QDF_STATUS_E_FAILURE;
12647 	}
12648 
12649 	dp_peer_update_state(soc, peer, DP_PEER_STATE_LOGICAL_DELETE);
12650 
12651 	qdf_spin_lock_bh(&soc->ast_lock);
12652 	dp_peer_send_wds_disconnect(soc, peer);
12653 	dp_peer_delete_ast_entries(soc, peer);
12654 	qdf_spin_unlock_bh(&soc->ast_lock);
12655 
12656 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
12657 	return status;
12658 }
12659 #endif
12660 
12661 #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS
12662 /**
12663  * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
12664  * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
12665  * @soc: cdp_soc handle
12666  * @pdev_id: id of cdp_pdev handle
12667  * @protocol_type: protocol type for which stats should be displayed
12668  *
12669  * Return: none
12670  */
12671 static inline void
12672 dp_dump_pdev_rx_protocol_tag_stats(struct cdp_soc_t  *soc, uint8_t pdev_id,
12673 				   uint16_t protocol_type)
12674 {
12675 }
12676 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
12677 
12678 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
12679 /**
12680  * dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be
12681  * applied to the desired protocol type packets
12682  * @soc: soc handle
12683  * @pdev_id: id of cdp_pdev handle
12684  * @enable_rx_protocol_tag - bitmask that indicates what protocol types
12685  * are enabled for tagging. zero indicates disable feature, non-zero indicates
12686  * enable feature
12687  * @protocol_type: new protocol type for which the tag is being added
12688  * @tag: user configured tag for the new protocol
12689  *
12690  * Return: Success
12691  */
12692 static inline QDF_STATUS
12693 dp_update_pdev_rx_protocol_tag(struct cdp_soc_t  *soc, uint8_t pdev_id,
12694 			       uint32_t enable_rx_protocol_tag,
12695 			       uint16_t protocol_type,
12696 			       uint16_t tag)
12697 {
12698 	return QDF_STATUS_SUCCESS;
12699 }
12700 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
12701 
12702 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
12703 /**
12704  * dp_set_rx_flow_tag - add/delete a flow
12705  * @soc: soc handle
12706  * @pdev_id: id of cdp_pdev handle
12707  * @flow_info: flow tuple that is to be added to/deleted from flow search table
12708  *
12709  * Return: Success
12710  */
12711 static inline QDF_STATUS
12712 dp_set_rx_flow_tag(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
12713 		   struct cdp_rx_flow_info *flow_info)
12714 {
12715 	return QDF_STATUS_SUCCESS;
12716 }
12717 /**
12718  * dp_dump_rx_flow_tag_stats - dump the number of packets tagged for
12719  * given flow 5-tuple
12720  * @cdp_soc: soc handle
12721  * @pdev_id: id of cdp_pdev handle
12722  * @flow_info: flow 5-tuple for which stats should be displayed
12723  *
12724  * Return: Success
12725  */
12726 static inline QDF_STATUS
12727 dp_dump_rx_flow_tag_stats(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
12728 			  struct cdp_rx_flow_info *flow_info)
12729 {
12730 	return QDF_STATUS_SUCCESS;
12731 }
12732 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
12733 
12734 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
12735 					   uint32_t max_peers,
12736 					   uint32_t max_ast_index,
12737 					   uint8_t peer_map_unmap_versions)
12738 {
12739 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12740 	QDF_STATUS status;
12741 
12742 	soc->max_peers = max_peers;
12743 
12744 	wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
12745 
12746 	status = soc->arch_ops.txrx_peer_map_attach(soc);
12747 	if (!QDF_IS_STATUS_SUCCESS(status)) {
12748 		dp_err("failure in allocating peer tables");
12749 		return QDF_STATUS_E_FAILURE;
12750 	}
12751 
12752 	dp_info("max_peers %u, calculated max_peers %u max_ast_index: %u\n",
12753 		max_peers, soc->max_peer_id, max_ast_index);
12754 
12755 	status = dp_peer_find_attach(soc);
12756 	if (!QDF_IS_STATUS_SUCCESS(status)) {
12757 		dp_err("Peer find attach failure");
12758 		goto fail;
12759 	}
12760 
12761 	soc->peer_map_unmap_versions = peer_map_unmap_versions;
12762 	soc->peer_map_attach_success = TRUE;
12763 
12764 	return QDF_STATUS_SUCCESS;
12765 fail:
12766 	soc->arch_ops.txrx_peer_map_detach(soc);
12767 
12768 	return status;
12769 }
12770 
12771 static QDF_STATUS dp_soc_set_param(struct cdp_soc_t  *soc_hdl,
12772 				   enum cdp_soc_param_t param,
12773 				   uint32_t value)
12774 {
12775 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12776 
12777 	switch (param) {
12778 	case DP_SOC_PARAM_MSDU_EXCEPTION_DESC:
12779 		soc->num_msdu_exception_desc = value;
12780 		dp_info("num_msdu exception_desc %u",
12781 			value);
12782 		break;
12783 	case DP_SOC_PARAM_CMEM_FSE_SUPPORT:
12784 		if (wlan_cfg_is_fst_in_cmem_enabled(soc->wlan_cfg_ctx))
12785 			soc->fst_in_cmem = !!value;
12786 		dp_info("FW supports CMEM FSE %u", value);
12787 		break;
12788 	case DP_SOC_PARAM_MAX_AST_AGEOUT:
12789 		soc->max_ast_ageout_count = value;
12790 		dp_info("Max ast ageout count %u", soc->max_ast_ageout_count);
12791 		break;
12792 	case DP_SOC_PARAM_EAPOL_OVER_CONTROL_PORT:
12793 		soc->eapol_over_control_port = value;
12794 		dp_info("Eapol over control_port:%d",
12795 			soc->eapol_over_control_port);
12796 		break;
12797 	case DP_SOC_PARAM_MULTI_PEER_GRP_CMD_SUPPORT:
12798 		soc->multi_peer_grp_cmd_supported = value;
12799 		dp_info("Multi Peer group command support:%d",
12800 			soc->multi_peer_grp_cmd_supported);
12801 		break;
12802 	case DP_SOC_PARAM_RSSI_DBM_CONV_SUPPORT:
12803 		soc->features.rssi_dbm_conv_support = value;
12804 		dp_info("Rssi dbm converstion support:%u",
12805 			soc->features.rssi_dbm_conv_support);
12806 		break;
12807 	case DP_SOC_PARAM_UMAC_HW_RESET_SUPPORT:
12808 		soc->features.umac_hw_reset_support = value;
12809 		dp_info("UMAC HW reset support :%u",
12810 			soc->features.umac_hw_reset_support);
12811 		break;
12812 	default:
12813 		dp_info("not handled param %d ", param);
12814 		break;
12815 	}
12816 
12817 	return QDF_STATUS_SUCCESS;
12818 }
12819 
12820 static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
12821 				      void *stats_ctx)
12822 {
12823 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12824 
12825 	soc->rate_stats_ctx = (struct cdp_soc_rate_stats_ctx *)stats_ctx;
12826 }
12827 
12828 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
12829 /**
12830  * dp_peer_flush_rate_stats_req(): Flush peer rate stats
12831  * @soc: Datapath SOC handle
12832  * @peer: Datapath peer
12833  * @arg: argument to iter function
12834  *
12835  * Return: QDF_STATUS
12836  */
12837 static void
12838 dp_peer_flush_rate_stats_req(struct dp_soc *soc, struct dp_peer *peer,
12839 			     void *arg)
12840 {
12841 	if (peer->bss_peer)
12842 		return;
12843 
12844 	dp_wdi_event_handler(
12845 		WDI_EVENT_FLUSH_RATE_STATS_REQ,
12846 		soc, dp_monitor_peer_get_peerstats_ctx(soc, peer),
12847 		peer->peer_id,
12848 		WDI_NO_VAL, peer->vdev->pdev->pdev_id);
12849 }
12850 
12851 /**
12852  * dp_flush_rate_stats_req(): Flush peer rate stats in pdev
12853  * @soc_hdl: Datapath SOC handle
12854  * @pdev_id: pdev_id
12855  *
12856  * Return: QDF_STATUS
12857  */
12858 static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
12859 					  uint8_t pdev_id)
12860 {
12861 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12862 	struct dp_pdev *pdev =
12863 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12864 						   pdev_id);
12865 	if (!pdev)
12866 		return QDF_STATUS_E_FAILURE;
12867 
12868 	dp_pdev_iterate_peer(pdev, dp_peer_flush_rate_stats_req, NULL,
12869 			     DP_MOD_ID_CDP);
12870 
12871 	return QDF_STATUS_SUCCESS;
12872 }
12873 #else
12874 static inline QDF_STATUS
12875 dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
12876 			uint8_t pdev_id)
12877 {
12878 	return QDF_STATUS_SUCCESS;
12879 }
12880 #endif
12881 
12882 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
12883 #ifdef WLAN_FEATURE_11BE_MLO
12884 /**
12885  * dp_get_peer_extd_rate_link_stats(): function to get peer
12886  *				extended rate and link stats
12887  * @soc_hdl: dp soc handler
12888  * @mac_addr: mac address of peer
12889  *
12890  * Return: QDF_STATUS
12891  */
12892 static QDF_STATUS
12893 dp_get_peer_extd_rate_link_stats(struct cdp_soc_t *soc_hdl, uint8_t *mac_addr)
12894 {
12895 	uint8_t i;
12896 	struct dp_peer *link_peer;
12897 	struct dp_soc *link_peer_soc;
12898 	struct dp_mld_link_peers link_peers_info;
12899 	struct dp_peer *peer = NULL;
12900 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12901 
12902 	if (!mac_addr) {
12903 		dp_err("NULL peer mac addr\n");
12904 		return QDF_STATUS_E_FAILURE;
12905 	}
12906 
12907 	peer = dp_peer_find_hash_find(soc, mac_addr, 0,
12908 				      DP_VDEV_ALL, DP_MOD_ID_CDP);
12909 	if (!peer) {
12910 		dp_err("Invalid peer\n");
12911 		return QDF_STATUS_E_FAILURE;
12912 	}
12913 
12914 	if (IS_MLO_DP_MLD_PEER(peer)) {
12915 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
12916 						    &link_peers_info,
12917 						    DP_MOD_ID_CDP);
12918 		for (i = 0; i < link_peers_info.num_links; i++) {
12919 			link_peer = link_peers_info.link_peers[i];
12920 			link_peer_soc = link_peer->vdev->pdev->soc;
12921 			dp_wdi_event_handler(WDI_EVENT_FLUSH_RATE_STATS_REQ,
12922 					     link_peer_soc,
12923 					     dp_monitor_peer_get_peerstats_ctx
12924 					     (link_peer_soc, link_peer),
12925 					     link_peer->peer_id,
12926 					     WDI_NO_VAL,
12927 					     link_peer->vdev->pdev->pdev_id);
12928 		}
12929 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
12930 	} else {
12931 		dp_wdi_event_handler(
12932 				WDI_EVENT_FLUSH_RATE_STATS_REQ, soc,
12933 				dp_monitor_peer_get_peerstats_ctx(soc, peer),
12934 				peer->peer_id,
12935 				WDI_NO_VAL, peer->vdev->pdev->pdev_id);
12936 	}
12937 
12938 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
12939 	return QDF_STATUS_SUCCESS;
12940 }
12941 #else
12942 static QDF_STATUS
12943 dp_get_peer_extd_rate_link_stats(struct cdp_soc_t *soc_hdl, uint8_t *mac_addr)
12944 {
12945 	struct dp_peer *peer = NULL;
12946 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12947 
12948 	if (!mac_addr) {
12949 		dp_err("NULL peer mac addr\n");
12950 		return QDF_STATUS_E_FAILURE;
12951 	}
12952 
12953 	peer = dp_peer_find_hash_find(soc, mac_addr, 0,
12954 				      DP_VDEV_ALL, DP_MOD_ID_CDP);
12955 	if (!peer) {
12956 		dp_err("Invalid peer\n");
12957 		return QDF_STATUS_E_FAILURE;
12958 	}
12959 
12960 	dp_wdi_event_handler(
12961 			WDI_EVENT_FLUSH_RATE_STATS_REQ, soc,
12962 			dp_monitor_peer_get_peerstats_ctx(soc, peer),
12963 			peer->peer_id,
12964 			WDI_NO_VAL, peer->vdev->pdev->pdev_id);
12965 
12966 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
12967 	return QDF_STATUS_SUCCESS;
12968 }
12969 #endif
12970 #else
12971 static inline QDF_STATUS
12972 dp_get_peer_extd_rate_link_stats(struct cdp_soc_t *soc_hdl, uint8_t *mac_addr)
12973 {
12974 	return QDF_STATUS_SUCCESS;
12975 }
12976 #endif
12977 
12978 static void *dp_peer_get_peerstats_ctx(struct cdp_soc_t *soc_hdl,
12979 				       uint8_t vdev_id,
12980 				       uint8_t *mac_addr)
12981 {
12982 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12983 	struct dp_peer *peer;
12984 	void *peerstats_ctx = NULL;
12985 
12986 	if (mac_addr) {
12987 		peer = dp_peer_find_hash_find(soc, mac_addr,
12988 					      0, vdev_id,
12989 					      DP_MOD_ID_CDP);
12990 		if (!peer)
12991 			return NULL;
12992 
12993 		if (!IS_MLO_DP_MLD_PEER(peer))
12994 			peerstats_ctx = dp_monitor_peer_get_peerstats_ctx(soc,
12995 									  peer);
12996 
12997 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
12998 	}
12999 
13000 	return peerstats_ctx;
13001 }
13002 
13003 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
13004 static QDF_STATUS dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
13005 					   uint8_t pdev_id,
13006 					   void *buf)
13007 {
13008 	 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS,
13009 			      (struct dp_soc *)soc, buf, HTT_INVALID_PEER,
13010 			      WDI_NO_VAL, pdev_id);
13011 	return QDF_STATUS_SUCCESS;
13012 }
13013 #else
13014 static inline QDF_STATUS
13015 dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
13016 			 uint8_t pdev_id,
13017 			 void *buf)
13018 {
13019 	return QDF_STATUS_SUCCESS;
13020 }
13021 #endif
13022 
13023 static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle)
13024 {
13025 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13026 
13027 	return soc->rate_stats_ctx;
13028 }
13029 
13030 /*
13031  * dp_get_cfg() - get dp cfg
13032  * @soc: cdp soc handle
13033  * @cfg: cfg enum
13034  *
13035  * Return: cfg value
13036  */
13037 static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg)
13038 {
13039 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
13040 	uint32_t value = 0;
13041 
13042 	switch (cfg) {
13043 	case cfg_dp_enable_data_stall:
13044 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
13045 		break;
13046 	case cfg_dp_enable_p2p_ip_tcp_udp_checksum_offload:
13047 		value = dpsoc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload;
13048 		break;
13049 	case cfg_dp_enable_nan_ip_tcp_udp_checksum_offload:
13050 		value = dpsoc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload;
13051 		break;
13052 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
13053 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
13054 		break;
13055 	case cfg_dp_disable_legacy_mode_csum_offload:
13056 		value = dpsoc->wlan_cfg_ctx->
13057 					legacy_mode_checksumoffload_disable;
13058 		break;
13059 	case cfg_dp_tso_enable:
13060 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
13061 		break;
13062 	case cfg_dp_lro_enable:
13063 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
13064 		break;
13065 	case cfg_dp_gro_enable:
13066 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
13067 		break;
13068 	case cfg_dp_tc_based_dyn_gro_enable:
13069 		value = dpsoc->wlan_cfg_ctx->tc_based_dynamic_gro;
13070 		break;
13071 	case cfg_dp_tc_ingress_prio:
13072 		value = dpsoc->wlan_cfg_ctx->tc_ingress_prio;
13073 		break;
13074 	case cfg_dp_sg_enable:
13075 		value = dpsoc->wlan_cfg_ctx->sg_enabled;
13076 		break;
13077 	case cfg_dp_tx_flow_start_queue_offset:
13078 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
13079 		break;
13080 	case cfg_dp_tx_flow_stop_queue_threshold:
13081 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
13082 		break;
13083 	case cfg_dp_disable_intra_bss_fwd:
13084 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
13085 		break;
13086 	case cfg_dp_pktlog_buffer_size:
13087 		value = dpsoc->wlan_cfg_ctx->pktlog_buffer_size;
13088 		break;
13089 	case cfg_dp_wow_check_rx_pending:
13090 		value = dpsoc->wlan_cfg_ctx->wow_check_rx_pending_enable;
13091 		break;
13092 	default:
13093 		value =  0;
13094 	}
13095 
13096 	return value;
13097 }
13098 
13099 #ifdef PEER_FLOW_CONTROL
13100 /**
13101  * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params
13102  * @soc_handle: datapath soc handle
13103  * @pdev_id: id of datapath pdev handle
13104  * @param: ol ath params
13105  * @value: value of the flag
13106  * @buff: Buffer to be passed
13107  *
13108  * Implemented this function same as legacy function. In legacy code, single
13109  * function is used to display stats and update pdev params.
13110  *
13111  * Return: 0 for success. nonzero for failure.
13112  */
13113 static uint32_t dp_tx_flow_ctrl_configure_pdev(struct cdp_soc_t *soc_handle,
13114 					       uint8_t pdev_id,
13115 					       enum _dp_param_t param,
13116 					       uint32_t value, void *buff)
13117 {
13118 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13119 	struct dp_pdev *pdev =
13120 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
13121 						   pdev_id);
13122 
13123 	if (qdf_unlikely(!pdev))
13124 		return 1;
13125 
13126 	soc = pdev->soc;
13127 	if (!soc)
13128 		return 1;
13129 
13130 	switch (param) {
13131 #ifdef QCA_ENH_V3_STATS_SUPPORT
13132 	case DP_PARAM_VIDEO_DELAY_STATS_FC:
13133 		if (value)
13134 			pdev->delay_stats_flag = true;
13135 		else
13136 			pdev->delay_stats_flag = false;
13137 		break;
13138 	case DP_PARAM_VIDEO_STATS_FC:
13139 		qdf_print("------- TID Stats ------\n");
13140 		dp_pdev_print_tid_stats(pdev);
13141 		qdf_print("------ Delay Stats ------\n");
13142 		dp_pdev_print_delay_stats(pdev);
13143 		qdf_print("------ Rx Error Stats ------\n");
13144 		dp_pdev_print_rx_error_stats(pdev);
13145 		break;
13146 #endif
13147 	case DP_PARAM_TOTAL_Q_SIZE:
13148 		{
13149 			uint32_t tx_min, tx_max;
13150 
13151 			tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx);
13152 			tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
13153 
13154 			if (!buff) {
13155 				if ((value >= tx_min) && (value <= tx_max)) {
13156 					pdev->num_tx_allowed = value;
13157 				} else {
13158 					dp_tx_info("%pK: Failed to update num_tx_allowed, Q_min = %d Q_max = %d",
13159 						   soc, tx_min, tx_max);
13160 					break;
13161 				}
13162 			} else {
13163 				*(int *)buff = pdev->num_tx_allowed;
13164 			}
13165 		}
13166 		break;
13167 	default:
13168 		dp_tx_info("%pK: not handled param %d ", soc, param);
13169 		break;
13170 	}
13171 
13172 	return 0;
13173 }
13174 #endif
13175 
13176 /**
13177  * dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev
13178  * @psoc: dp soc handle
13179  * @pdev_id: id of DP_PDEV handle
13180  * @pcp: pcp value
13181  * @tid: tid value passed by the user
13182  *
13183  * Return: QDF_STATUS_SUCCESS on success
13184  */
13185 static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
13186 						uint8_t pdev_id,
13187 						uint8_t pcp, uint8_t tid)
13188 {
13189 	struct dp_soc *soc = (struct dp_soc *)psoc;
13190 
13191 	soc->pcp_tid_map[pcp] = tid;
13192 
13193 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
13194 	return QDF_STATUS_SUCCESS;
13195 }
13196 
13197 /**
13198  * dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev
13199  * @soc: DP soc handle
13200  * @vdev_id: id of DP_VDEV handle
13201  * @pcp: pcp value
13202  * @tid: tid value passed by the user
13203  *
13204  * Return: QDF_STATUS_SUCCESS on success
13205  */
13206 static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc_hdl,
13207 						uint8_t vdev_id,
13208 						uint8_t pcp, uint8_t tid)
13209 {
13210 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13211 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
13212 						     DP_MOD_ID_CDP);
13213 
13214 	if (!vdev)
13215 		return QDF_STATUS_E_FAILURE;
13216 
13217 	vdev->pcp_tid_map[pcp] = tid;
13218 
13219 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13220 	return QDF_STATUS_SUCCESS;
13221 }
13222 
13223 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
13224 static void dp_drain_txrx(struct cdp_soc_t *soc_handle)
13225 {
13226 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13227 	uint32_t cur_tx_limit, cur_rx_limit;
13228 	uint32_t budget = 0xffff;
13229 	uint32_t val;
13230 	int i;
13231 	int cpu = dp_srng_get_cpu();
13232 
13233 	cur_tx_limit = soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit;
13234 	cur_rx_limit = soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit;
13235 
13236 	/* Temporarily increase soft irq limits when going to drain
13237 	 * the UMAC/LMAC SRNGs and restore them after polling.
13238 	 * Though the budget is on higher side, the TX/RX reaping loops
13239 	 * will not execute longer as both TX and RX would be suspended
13240 	 * by the time this API is called.
13241 	 */
13242 	dp_update_soft_irq_limits(soc, budget, budget);
13243 
13244 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
13245 		dp_service_srngs(&soc->intr_ctx[i], budget, cpu);
13246 
13247 	dp_update_soft_irq_limits(soc, cur_tx_limit, cur_rx_limit);
13248 
13249 	/* Do a dummy read at offset 0; this will ensure all
13250 	 * pendings writes(HP/TP) are flushed before read returns.
13251 	 */
13252 	val = HAL_REG_READ((struct hal_soc *)soc->hal_soc, 0);
13253 	dp_debug("Register value at offset 0: %u\n", val);
13254 }
13255 #endif
13256 
13257 #ifdef DP_UMAC_HW_RESET_SUPPORT
13258 /**
13259  * dp_reset_interrupt_ring_masks(): Reset rx interrupt masks
13260  * @soc: dp soc handle
13261  *
13262  * Return: void
13263  */
13264 static void dp_reset_interrupt_ring_masks(struct dp_soc *soc)
13265 {
13266 	struct dp_intr_bkp *intr_bkp;
13267 	struct dp_intr *intr_ctx;
13268 	int num_ctxt = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
13269 	int i;
13270 
13271 	intr_bkp =
13272 	(struct dp_intr_bkp *)qdf_mem_malloc_atomic(sizeof(struct dp_intr_bkp) *
13273 			num_ctxt);
13274 
13275 	qdf_assert_always(intr_bkp);
13276 
13277 	soc->umac_reset_ctx.intr_ctx_bkp = intr_bkp;
13278 	for (i = 0; i < num_ctxt; i++) {
13279 		intr_ctx = &soc->intr_ctx[i];
13280 
13281 		intr_bkp->tx_ring_mask = intr_ctx->tx_ring_mask;
13282 		intr_bkp->rx_ring_mask = intr_ctx->rx_ring_mask;
13283 		intr_bkp->rx_mon_ring_mask = intr_ctx->rx_mon_ring_mask;
13284 		intr_bkp->rx_err_ring_mask = intr_ctx->rx_err_ring_mask;
13285 		intr_bkp->rx_wbm_rel_ring_mask = intr_ctx->rx_wbm_rel_ring_mask;
13286 		intr_bkp->reo_status_ring_mask = intr_ctx->reo_status_ring_mask;
13287 		intr_bkp->rxdma2host_ring_mask = intr_ctx->rxdma2host_ring_mask;
13288 		intr_bkp->host2rxdma_ring_mask = intr_ctx->host2rxdma_ring_mask;
13289 		intr_bkp->host2rxdma_mon_ring_mask =
13290 					intr_ctx->host2rxdma_mon_ring_mask;
13291 		intr_bkp->tx_mon_ring_mask = intr_ctx->tx_mon_ring_mask;
13292 
13293 		intr_ctx->tx_ring_mask = 0;
13294 		intr_ctx->rx_ring_mask = 0;
13295 		intr_ctx->rx_mon_ring_mask = 0;
13296 		intr_ctx->rx_err_ring_mask = 0;
13297 		intr_ctx->rx_wbm_rel_ring_mask = 0;
13298 		intr_ctx->reo_status_ring_mask = 0;
13299 		intr_ctx->rxdma2host_ring_mask = 0;
13300 		intr_ctx->host2rxdma_ring_mask = 0;
13301 		intr_ctx->host2rxdma_mon_ring_mask = 0;
13302 		intr_ctx->tx_mon_ring_mask = 0;
13303 
13304 		intr_bkp++;
13305 	}
13306 }
13307 
13308 /**
13309  * dp_restore_interrupt_ring_masks(): Restore rx interrupt masks
13310  * @soc: dp soc handle
13311  *
13312  * Return: void
13313  */
13314 static void dp_restore_interrupt_ring_masks(struct dp_soc *soc)
13315 {
13316 	struct dp_intr_bkp *intr_bkp = soc->umac_reset_ctx.intr_ctx_bkp;
13317 	struct dp_intr_bkp *intr_bkp_base = intr_bkp;
13318 	struct dp_intr *intr_ctx;
13319 	int num_ctxt = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
13320 	int i;
13321 
13322 	qdf_assert_always(intr_bkp);
13323 
13324 	for (i = 0; i < num_ctxt; i++) {
13325 		intr_ctx = &soc->intr_ctx[i];
13326 
13327 		intr_ctx->tx_ring_mask = intr_bkp->tx_ring_mask;
13328 		intr_ctx->rx_ring_mask = intr_bkp->rx_ring_mask;
13329 		intr_ctx->rx_mon_ring_mask = intr_bkp->rx_mon_ring_mask;
13330 		intr_ctx->rx_err_ring_mask = intr_bkp->rx_err_ring_mask;
13331 		intr_ctx->rx_wbm_rel_ring_mask = intr_bkp->rx_wbm_rel_ring_mask;
13332 		intr_ctx->reo_status_ring_mask = intr_bkp->reo_status_ring_mask;
13333 		intr_ctx->rxdma2host_ring_mask = intr_bkp->rxdma2host_ring_mask;
13334 		intr_ctx->host2rxdma_ring_mask = intr_bkp->host2rxdma_ring_mask;
13335 		intr_ctx->host2rxdma_mon_ring_mask =
13336 			intr_bkp->host2rxdma_mon_ring_mask;
13337 		intr_ctx->tx_mon_ring_mask = intr_bkp->tx_mon_ring_mask;
13338 
13339 		intr_bkp++;
13340 	}
13341 
13342 	qdf_mem_free(intr_bkp_base);
13343 	soc->umac_reset_ctx.intr_ctx_bkp = NULL;
13344 }
13345 
13346 /**
13347  * dp_resume_tx_hardstart(): Restore the old Tx hardstart functions
13348  * @soc: dp soc handle
13349  *
13350  * Return: void
13351  */
13352 static void dp_resume_tx_hardstart(struct dp_soc *soc)
13353 {
13354 	struct dp_vdev *vdev;
13355 	struct ol_txrx_hardtart_ctxt ctxt = {0};
13356 	struct cdp_ctrl_objmgr_psoc *psoc = soc->ctrl_psoc;
13357 	int i;
13358 
13359 	for (i = 0; i < MAX_PDEV_CNT; i++) {
13360 		struct dp_pdev *pdev = soc->pdev_list[i];
13361 
13362 		if (!pdev)
13363 			continue;
13364 
13365 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
13366 			uint8_t vdev_id = vdev->vdev_id;
13367 
13368 			dp_vdev_fetch_tx_handler(vdev, soc, &ctxt);
13369 			soc->cdp_soc.ol_ops->dp_update_tx_hardstart(psoc,
13370 								    vdev_id,
13371 								    &ctxt);
13372 		}
13373 	}
13374 }
13375 
13376 /**
13377  * dp_pause_tx_hardstart(): Register Tx hardstart functions to drop packets
13378  * @soc: dp soc handle
13379  *
13380  * Return: void
13381  */
13382 static void dp_pause_tx_hardstart(struct dp_soc *soc)
13383 {
13384 	struct dp_vdev *vdev;
13385 	struct ol_txrx_hardtart_ctxt ctxt;
13386 	struct cdp_ctrl_objmgr_psoc *psoc = soc->ctrl_psoc;
13387 	int i;
13388 
13389 	ctxt.tx = &dp_tx_drop;
13390 	ctxt.tx_fast = &dp_tx_drop;
13391 	ctxt.tx_exception = &dp_tx_exc_drop;
13392 
13393 	for (i = 0; i < MAX_PDEV_CNT; i++) {
13394 		struct dp_pdev *pdev = soc->pdev_list[i];
13395 
13396 		if (!pdev)
13397 			continue;
13398 
13399 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
13400 			uint8_t vdev_id = vdev->vdev_id;
13401 
13402 			soc->cdp_soc.ol_ops->dp_update_tx_hardstart(psoc,
13403 								    vdev_id,
13404 								    &ctxt);
13405 		}
13406 	}
13407 }
13408 
13409 /**
13410  * dp_unregister_notify_umac_pre_reset_fw_callback(): unregister notify_fw_cb
13411  * @soc: dp soc handle
13412  *
13413  * Return: void
13414  */
13415 static inline
13416 void dp_unregister_notify_umac_pre_reset_fw_callback(struct dp_soc *soc)
13417 {
13418 	soc->notify_fw_callback = NULL;
13419 }
13420 
13421 /**
13422  * dp_check_n_notify_umac_prereset_done(): Send pre reset done to firmware
13423  * @soc: dp soc handle
13424  *
13425  * Return: void
13426  */
13427 static inline
13428 void dp_check_n_notify_umac_prereset_done(struct dp_soc *soc)
13429 {
13430 	/* Some Cpu(s) is processing the umac rings*/
13431 	if (soc->service_rings_running)
13432 		return;
13433 
13434 	/* Notify the firmware that Umac pre reset is complete */
13435 	dp_umac_reset_notify_action_completion(soc,
13436 					       UMAC_RESET_ACTION_DO_PRE_RESET);
13437 
13438 	/* Unregister the callback */
13439 	dp_unregister_notify_umac_pre_reset_fw_callback(soc);
13440 }
13441 
13442 /**
13443  * dp_register_notify_umac_pre_reset_fw_callback(): register notify_fw_cb
13444  * @soc: dp soc handle
13445  *
13446  * Return: void
13447  */
13448 static inline
13449 void dp_register_notify_umac_pre_reset_fw_callback(struct dp_soc *soc)
13450 {
13451 	soc->notify_fw_callback = dp_check_n_notify_umac_prereset_done;
13452 }
13453 
13454 #ifdef DP_UMAC_HW_HARD_RESET
13455 /**
13456  * dp_set_umac_regs(): Reinitialize host umac registers
13457  * @soc: dp soc handle
13458  *
13459  * Return: void
13460  */
13461 static void dp_set_umac_regs(struct dp_soc *soc)
13462 {
13463 	int i;
13464 	struct hal_reo_params reo_params;
13465 
13466 	qdf_mem_zero(&reo_params, sizeof(reo_params));
13467 
13468 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
13469 		if (soc->arch_ops.reo_remap_config(soc, &reo_params.remap0,
13470 						   &reo_params.remap1,
13471 						   &reo_params.remap2))
13472 			reo_params.rx_hash_enabled = true;
13473 		else
13474 			reo_params.rx_hash_enabled = false;
13475 	}
13476 
13477 	hal_reo_setup(soc->hal_soc, &reo_params, 0);
13478 
13479 	soc->arch_ops.dp_cc_reg_cfg_init(soc, true);
13480 
13481 	for (i = 0; i < PCP_TID_MAP_MAX; i++)
13482 		hal_tx_update_pcp_tid_map(soc->hal_soc, soc->pcp_tid_map[i], i);
13483 
13484 	for (i = 0; i < MAX_PDEV_CNT; i++) {
13485 		struct dp_vdev *vdev = NULL;
13486 		struct dp_pdev *pdev = soc->pdev_list[i];
13487 
13488 		if (!pdev)
13489 			continue;
13490 
13491 		for (i = 0; i < soc->num_hw_dscp_tid_map; i++)
13492 			hal_tx_set_dscp_tid_map(soc->hal_soc,
13493 						pdev->dscp_tid_map[i], i);
13494 
13495 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
13496 			soc->arch_ops.dp_bank_reconfig(soc, vdev);
13497 			soc->arch_ops.dp_reconfig_tx_vdev_mcast_ctrl(soc,
13498 								      vdev);
13499 		}
13500 	}
13501 }
13502 #else
13503 static void dp_set_umac_regs(struct dp_soc *soc)
13504 {
13505 }
13506 #endif
13507 
13508 /**
13509  * dp_reinit_rings(): Reinitialize host managed rings
13510  * @soc: dp soc handle
13511  *
13512  * Return: QDF_STATUS
13513  */
13514 static void dp_reinit_rings(struct dp_soc *soc)
13515 {
13516 	unsigned long end;
13517 
13518 	dp_soc_srng_deinit(soc);
13519 	dp_hw_link_desc_ring_deinit(soc);
13520 
13521 	/* Busy wait for 2 ms to make sure the rings are in idle state
13522 	 * before we enable them again
13523 	 */
13524 	end = jiffies + msecs_to_jiffies(2);
13525 	while (time_before(jiffies, end))
13526 		;
13527 
13528 	dp_hw_link_desc_ring_init(soc);
13529 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
13530 	dp_soc_srng_init(soc);
13531 }
13532 
13533 /**
13534  * dp_umac_reset_handle_pre_reset(): Handle Umac prereset interrupt from FW
13535  * @soc: dp soc handle
13536  *
13537  * Return: QDF_STATUS
13538  */
13539 static QDF_STATUS dp_umac_reset_handle_pre_reset(struct dp_soc *soc)
13540 {
13541 	dp_reset_interrupt_ring_masks(soc);
13542 
13543 	dp_pause_tx_hardstart(soc);
13544 	dp_pause_reo_send_cmd(soc);
13545 
13546 	dp_check_n_notify_umac_prereset_done(soc);
13547 
13548 	soc->umac_reset_ctx.nbuf_list = NULL;
13549 
13550 	return QDF_STATUS_SUCCESS;
13551 }
13552 
13553 /**
13554  * dp_umac_reset_handle_post_reset(): Handle Umac postreset interrupt from FW
13555  * @soc: dp soc handle
13556  *
13557  * Return: QDF_STATUS
13558  */
13559 static QDF_STATUS dp_umac_reset_handle_post_reset(struct dp_soc *soc)
13560 {
13561 	if (!soc->umac_reset_ctx.skel_enable) {
13562 		qdf_nbuf_t *nbuf_list = &soc->umac_reset_ctx.nbuf_list;
13563 
13564 		dp_set_umac_regs(soc);
13565 
13566 		dp_reinit_rings(soc);
13567 
13568 		dp_rx_desc_reuse(soc, nbuf_list);
13569 
13570 		dp_cleanup_reo_cmd_module(soc);
13571 
13572 		dp_tx_desc_pool_cleanup(soc, nbuf_list);
13573 
13574 		dp_reset_tid_q_setup(soc);
13575 	}
13576 
13577 	return dp_umac_reset_notify_action_completion(soc,
13578 					UMAC_RESET_ACTION_DO_POST_RESET_START);
13579 }
13580 
13581 /**
13582  * dp_umac_reset_handle_post_reset_complete(): Handle Umac postreset_complete
13583  *						interrupt from FW
13584  * @soc: dp soc handle
13585  *
13586  * Return: QDF_STATUS
13587  */
13588 static QDF_STATUS dp_umac_reset_handle_post_reset_complete(struct dp_soc *soc)
13589 {
13590 	QDF_STATUS status;
13591 	qdf_nbuf_t nbuf_list = soc->umac_reset_ctx.nbuf_list;
13592 
13593 	soc->umac_reset_ctx.nbuf_list = NULL;
13594 
13595 	dp_resume_reo_send_cmd(soc);
13596 
13597 	dp_restore_interrupt_ring_masks(soc);
13598 
13599 	dp_resume_tx_hardstart(soc);
13600 
13601 	status = dp_umac_reset_notify_action_completion(soc,
13602 				UMAC_RESET_ACTION_DO_POST_RESET_COMPLETE);
13603 
13604 	while (nbuf_list) {
13605 		qdf_nbuf_t nbuf = nbuf_list->next;
13606 
13607 		qdf_nbuf_free(nbuf_list);
13608 		nbuf_list = nbuf;
13609 	}
13610 
13611 	dp_umac_reset_info("Umac reset done on soc %pK\n prereset : %u us\n"
13612 			   "postreset : %u us \n postreset complete: %u us \n",
13613 			   soc,
13614 			   soc->umac_reset_ctx.ts.pre_reset_done -
13615 			   soc->umac_reset_ctx.ts.pre_reset_start,
13616 			   soc->umac_reset_ctx.ts.post_reset_done -
13617 			   soc->umac_reset_ctx.ts.post_reset_start,
13618 			   soc->umac_reset_ctx.ts.post_reset_complete_done -
13619 			   soc->umac_reset_ctx.ts.post_reset_complete_start);
13620 
13621 	return status;
13622 }
13623 #endif
13624 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
13625 static void
13626 dp_set_pkt_capture_mode(struct cdp_soc_t *soc_handle, bool val)
13627 {
13628 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13629 
13630 	soc->wlan_cfg_ctx->pkt_capture_mode = val;
13631 }
13632 #endif
13633 
13634 #ifdef HW_TX_DELAY_STATS_ENABLE
13635 /**
13636  * dp_enable_disable_vdev_tx_delay_stats(): Start/Stop tx delay stats capture
13637  * @soc: DP soc handle
13638  * @vdev_id: vdev id
13639  * @value: value
13640  *
13641  * Return: None
13642  */
13643 static void
13644 dp_enable_disable_vdev_tx_delay_stats(struct cdp_soc_t *soc_hdl,
13645 				      uint8_t vdev_id,
13646 				      uint8_t value)
13647 {
13648 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13649 	struct dp_vdev *vdev = NULL;
13650 
13651 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
13652 	if (!vdev)
13653 		return;
13654 
13655 	vdev->hw_tx_delay_stats_enabled = value;
13656 
13657 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13658 }
13659 
13660 /**
13661  * dp_check_vdev_tx_delay_stats_enabled() - check the feature is enabled or not
13662  * @soc: DP soc handle
13663  * @vdev_id: vdev id
13664  *
13665  * Returns: 1 if enabled, 0 if disabled
13666  */
13667 static uint8_t
13668 dp_check_vdev_tx_delay_stats_enabled(struct cdp_soc_t *soc_hdl,
13669 				     uint8_t vdev_id)
13670 {
13671 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13672 	struct dp_vdev *vdev;
13673 	uint8_t ret_val = 0;
13674 
13675 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
13676 	if (!vdev)
13677 		return ret_val;
13678 
13679 	ret_val = vdev->hw_tx_delay_stats_enabled;
13680 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13681 
13682 	return ret_val;
13683 }
13684 #endif
13685 
13686 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
13687 static void
13688 dp_recovery_vdev_flush_peers(struct cdp_soc_t *cdp_soc,
13689 			     uint8_t vdev_id,
13690 			     bool mlo_peers_only)
13691 {
13692 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
13693 	struct dp_vdev *vdev;
13694 
13695 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
13696 
13697 	if (!vdev)
13698 		return;
13699 
13700 	dp_vdev_flush_peers((struct cdp_vdev *)vdev, false, mlo_peers_only);
13701 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13702 }
13703 #endif
13704 #ifdef QCA_GET_TSF_VIA_REG
13705 /**
13706  * dp_get_tsf_time() - get tsf time
13707  * @soc: Datapath soc handle
13708  * @mac_id: mac_id
13709  * @tsf: pointer to update tsf value
13710  * @tsf_sync_soc_time: pointer to update tsf sync time
13711  *
13712  * Return: None.
13713  */
13714 static inline void
13715 dp_get_tsf_time(struct cdp_soc_t *soc_hdl, uint32_t tsf_id, uint32_t mac_id,
13716 		uint64_t *tsf, uint64_t *tsf_sync_soc_time)
13717 {
13718 	hal_get_tsf_time(((struct dp_soc *)soc_hdl)->hal_soc, tsf_id, mac_id,
13719 			 tsf, tsf_sync_soc_time);
13720 }
13721 #else
13722 static inline void
13723 dp_get_tsf_time(struct cdp_soc_t *soc_hdl, uint32_t tsf_id, uint32_t mac_id,
13724 		uint64_t *tsf, uint64_t *tsf_sync_soc_time)
13725 {
13726 }
13727 #endif
13728 
13729 /**
13730  * dp_set_tx_pause() - Pause or resume tx path
13731  * @soc_hdl: Datapath soc handle
13732  * @flag: set or clear is_tx_pause
13733  *
13734  * Return: None.
13735  */
13736 static inline
13737 void dp_set_tx_pause(struct cdp_soc_t *soc_hdl, bool flag)
13738 {
13739 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13740 
13741 	soc->is_tx_pause = flag;
13742 }
13743 
13744 static struct cdp_cmn_ops dp_ops_cmn = {
13745 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
13746 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
13747 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
13748 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
13749 	.txrx_pdev_post_attach = dp_pdev_post_attach_wifi3,
13750 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
13751 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
13752 	.txrx_peer_create = dp_peer_create_wifi3,
13753 	.txrx_peer_setup = dp_peer_setup_wifi3,
13754 #ifdef FEATURE_AST
13755 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
13756 #else
13757 	.txrx_peer_teardown = NULL,
13758 #endif
13759 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
13760 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
13761 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
13762 	.txrx_peer_get_ast_info_by_pdev =
13763 		dp_peer_get_ast_info_by_pdevid_wifi3,
13764 	.txrx_peer_ast_delete_by_soc =
13765 		dp_peer_ast_entry_del_by_soc,
13766 	.txrx_peer_ast_delete_by_pdev =
13767 		dp_peer_ast_entry_del_by_pdev,
13768 	.txrx_peer_delete = dp_peer_delete_wifi3,
13769 #ifdef DP_RX_UDP_OVER_PEER_ROAM
13770 	.txrx_update_roaming_peer = dp_update_roaming_peer_wifi3,
13771 #endif
13772 	.txrx_vdev_register = dp_vdev_register_wifi3,
13773 	.txrx_soc_detach = dp_soc_detach_wifi3,
13774 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
13775 	.txrx_soc_init = dp_soc_init_wifi3,
13776 #ifndef QCA_HOST_MODE_WIFI_DISABLED
13777 	.txrx_tso_soc_attach = dp_tso_soc_attach,
13778 	.txrx_tso_soc_detach = dp_tso_soc_detach,
13779 	.tx_send = dp_tx_send,
13780 	.tx_send_exc = dp_tx_send_exception,
13781 #endif
13782 	.set_tx_pause = dp_set_tx_pause,
13783 	.txrx_pdev_init = dp_pdev_init_wifi3,
13784 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
13785 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
13786 	.txrx_ath_getstats = dp_get_device_stats,
13787 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
13788 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
13789 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
13790 	.delba_process = dp_delba_process_wifi3,
13791 	.set_addba_response = dp_set_addba_response,
13792 	.flush_cache_rx_queue = NULL,
13793 	.tid_update_ba_win_size = dp_rx_tid_update_ba_win_size,
13794 	/* TODO: get API's for dscp-tid need to be added*/
13795 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
13796 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
13797 	.txrx_get_total_per = dp_get_total_per,
13798 	.txrx_stats_request = dp_txrx_stats_request,
13799 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
13800 	.display_stats = dp_txrx_dump_stats,
13801 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
13802 	.txrx_intr_detach = dp_soc_interrupt_detach,
13803 	.set_pn_check = dp_set_pn_check_wifi3,
13804 	.set_key_sec_type = dp_set_key_sec_type_wifi3,
13805 	.update_config_parameters = dp_update_config_parameters,
13806 	/* TODO: Add other functions */
13807 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
13808 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
13809 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
13810 	.get_vdev_dp_ext_txrx_handle = dp_vdev_get_dp_ext_handle,
13811 	.set_vdev_dp_ext_txrx_handle = dp_vdev_set_dp_ext_handle,
13812 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
13813 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
13814 	.map_pdev_to_lmac = dp_soc_map_pdev_to_lmac,
13815 	.handle_mode_change = dp_soc_handle_pdev_mode_change,
13816 	.set_pdev_status_down = dp_soc_set_pdev_status_down,
13817 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
13818 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
13819 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
13820 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
13821 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
13822 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
13823 	.set_soc_param = dp_soc_set_param,
13824 	.txrx_get_os_rx_handles_from_vdev =
13825 					dp_get_os_rx_handles_from_vdev_wifi3,
13826 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
13827 	.get_dp_capabilities = dp_get_cfg_capabilities,
13828 	.txrx_get_cfg = dp_get_cfg,
13829 	.set_rate_stats_ctx = dp_soc_set_rate_stats_ctx,
13830 	.get_rate_stats_ctx = dp_soc_get_rate_stats_ctx,
13831 	.txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats,
13832 	.txrx_flush_rate_stats_request = dp_flush_rate_stats_req,
13833 	.txrx_peer_get_peerstats_ctx = dp_peer_get_peerstats_ctx,
13834 
13835 	.set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3,
13836 	.set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3,
13837 
13838 	.txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler,
13839 #ifdef QCA_MULTIPASS_SUPPORT
13840 	.set_vlan_groupkey = dp_set_vlan_groupkey,
13841 #endif
13842 	.get_peer_mac_list = dp_get_peer_mac_list,
13843 	.get_peer_id = dp_get_peer_id,
13844 #ifdef QCA_SUPPORT_WDS_EXTENDED
13845 	.set_wds_ext_peer_rx = dp_wds_ext_set_peer_rx,
13846 #endif /* QCA_SUPPORT_WDS_EXTENDED */
13847 
13848 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
13849 	.txrx_drain = dp_drain_txrx,
13850 #endif
13851 #if defined(FEATURE_RUNTIME_PM)
13852 	.set_rtpm_tput_policy = dp_set_rtpm_tput_policy_requirement,
13853 #endif
13854 #ifdef WLAN_SYSFS_DP_STATS
13855 	.txrx_sysfs_fill_stats = dp_sysfs_fill_stats,
13856 	.txrx_sysfs_set_stat_type = dp_sysfs_set_stat_type,
13857 #endif /* WLAN_SYSFS_DP_STATS */
13858 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
13859 	.set_pkt_capture_mode = dp_set_pkt_capture_mode,
13860 #endif
13861 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
13862 	.txrx_recovery_vdev_flush_peers = dp_recovery_vdev_flush_peers,
13863 #endif
13864 	.txrx_umac_reset_deinit = dp_soc_umac_reset_deinit,
13865 	.txrx_get_tsf_time = dp_get_tsf_time,
13866 };
13867 
13868 static struct cdp_ctrl_ops dp_ops_ctrl = {
13869 	.txrx_peer_authorize = dp_peer_authorize,
13870 	.txrx_peer_get_authorize = dp_peer_get_authorize,
13871 #ifdef VDEV_PEER_PROTOCOL_COUNT
13872 	.txrx_enable_peer_protocol_count = dp_enable_vdev_peer_protocol_count,
13873 	.txrx_set_peer_protocol_drop_mask =
13874 		dp_enable_vdev_peer_protocol_drop_mask,
13875 	.txrx_is_peer_protocol_count_enabled =
13876 		dp_is_vdev_peer_protocol_count_enabled,
13877 	.txrx_get_peer_protocol_drop_mask = dp_get_vdev_peer_protocol_drop_mask,
13878 #endif
13879 	.txrx_set_vdev_param = dp_set_vdev_param,
13880 	.txrx_set_psoc_param = dp_set_psoc_param,
13881 	.txrx_get_psoc_param = dp_get_psoc_param,
13882 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
13883 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
13884 	.txrx_get_sec_type = dp_get_sec_type,
13885 	.txrx_wdi_event_sub = dp_wdi_event_sub,
13886 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
13887 	.txrx_set_pdev_param = dp_set_pdev_param,
13888 	.txrx_get_pdev_param = dp_get_pdev_param,
13889 	.txrx_set_peer_param = dp_set_peer_param,
13890 	.txrx_get_peer_param = dp_get_peer_param,
13891 #ifdef VDEV_PEER_PROTOCOL_COUNT
13892 	.txrx_peer_protocol_cnt = dp_peer_stats_update_protocol_cnt,
13893 #endif
13894 #ifdef WLAN_SUPPORT_MSCS
13895 	.txrx_record_mscs_params = dp_record_mscs_params,
13896 #endif
13897 	.set_key = dp_set_michael_key,
13898 	.txrx_get_vdev_param = dp_get_vdev_param,
13899 	.calculate_delay_stats = dp_calculate_delay_stats,
13900 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
13901 	.txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag,
13902 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
13903 	.txrx_dump_pdev_rx_protocol_tag_stats =
13904 				dp_dump_pdev_rx_protocol_tag_stats,
13905 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
13906 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
13907 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
13908 	.txrx_set_rx_flow_tag = dp_set_rx_flow_tag,
13909 	.txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats,
13910 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
13911 #ifdef QCA_MULTIPASS_SUPPORT
13912 	.txrx_peer_set_vlan_id = dp_peer_set_vlan_id,
13913 #endif /*QCA_MULTIPASS_SUPPORT*/
13914 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
13915 	.txrx_set_delta_tsf = dp_set_delta_tsf,
13916 #endif
13917 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
13918 	.txrx_set_tsf_ul_delay_report = dp_set_tsf_ul_delay_report,
13919 	.txrx_get_uplink_delay = dp_get_uplink_delay,
13920 #endif
13921 #ifdef QCA_UNDECODED_METADATA_SUPPORT
13922 	.txrx_set_pdev_phyrx_error_mask = dp_set_pdev_phyrx_error_mask,
13923 	.txrx_get_pdev_phyrx_error_mask = dp_get_pdev_phyrx_error_mask,
13924 #endif
13925 	.txrx_peer_flush_frags = dp_peer_flush_frags,
13926 };
13927 
13928 static struct cdp_me_ops dp_ops_me = {
13929 #ifndef QCA_HOST_MODE_WIFI_DISABLED
13930 #ifdef ATH_SUPPORT_IQUE
13931 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
13932 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
13933 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
13934 #endif
13935 #endif
13936 };
13937 
13938 static struct cdp_host_stats_ops dp_ops_host_stats = {
13939 	.txrx_per_peer_stats = dp_get_host_peer_stats,
13940 	.get_fw_peer_stats = dp_get_fw_peer_stats,
13941 	.get_htt_stats = dp_get_htt_stats,
13942 	.txrx_stats_publish = dp_txrx_stats_publish,
13943 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
13944 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
13945 	.txrx_get_soc_stats = dp_txrx_get_soc_stats,
13946 	.txrx_get_peer_stats_param = dp_txrx_get_peer_stats_param,
13947 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
13948 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
13949 	.txrx_get_ratekbps = dp_txrx_get_ratekbps,
13950 	.txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats,
13951 	.txrx_get_peer_delay_stats = dp_txrx_get_peer_delay_stats,
13952 	.txrx_get_peer_jitter_stats = dp_txrx_get_peer_jitter_stats,
13953 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
13954 	.txrx_alloc_vdev_stats_id = dp_txrx_alloc_vdev_stats_id,
13955 	.txrx_reset_vdev_stats_id = dp_txrx_reset_vdev_stats_id,
13956 #endif
13957 #ifdef WLAN_TX_PKT_CAPTURE_ENH
13958 	.get_peer_tx_capture_stats = dp_peer_get_tx_capture_stats,
13959 	.get_pdev_tx_capture_stats = dp_pdev_get_tx_capture_stats,
13960 #endif /* WLAN_TX_PKT_CAPTURE_ENH */
13961 #ifdef HW_TX_DELAY_STATS_ENABLE
13962 	.enable_disable_vdev_tx_delay_stats =
13963 				dp_enable_disable_vdev_tx_delay_stats,
13964 	.is_tx_delay_stats_enabled = dp_check_vdev_tx_delay_stats_enabled,
13965 #endif
13966 	.txrx_get_pdev_tid_stats = dp_pdev_get_tid_stats,
13967 #ifdef WLAN_TELEMETRY_STATS_SUPPORT
13968 	.txrx_pdev_telemetry_stats = dp_get_pdev_telemetry_stats,
13969 	.txrx_peer_telemetry_stats = dp_get_peer_telemetry_stats,
13970 #endif
13971 	.txrx_get_peer_extd_rate_link_stats =
13972 					dp_get_peer_extd_rate_link_stats,
13973 	.get_pdev_obss_stats = dp_get_obss_stats,
13974 	.clear_pdev_obss_pd_stats = dp_clear_pdev_obss_pd_stats,
13975 	/* TODO */
13976 };
13977 
13978 static struct cdp_raw_ops dp_ops_raw = {
13979 	/* TODO */
13980 };
13981 
13982 #ifdef PEER_FLOW_CONTROL
13983 static struct cdp_pflow_ops dp_ops_pflow = {
13984 	dp_tx_flow_ctrl_configure_pdev,
13985 };
13986 #endif /* CONFIG_WIN */
13987 
13988 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
13989 static struct cdp_cfr_ops dp_ops_cfr = {
13990 	.txrx_cfr_filter = NULL,
13991 	.txrx_get_cfr_rcc = dp_get_cfr_rcc,
13992 	.txrx_set_cfr_rcc = dp_set_cfr_rcc,
13993 	.txrx_get_cfr_dbg_stats = dp_get_cfr_dbg_stats,
13994 	.txrx_clear_cfr_dbg_stats = dp_clear_cfr_dbg_stats,
13995 };
13996 #endif
13997 
13998 #ifdef WLAN_SUPPORT_MSCS
13999 static struct cdp_mscs_ops dp_ops_mscs = {
14000 	.mscs_peer_lookup_n_get_priority = dp_mscs_peer_lookup_n_get_priority,
14001 };
14002 #endif
14003 
14004 #ifdef WLAN_SUPPORT_MESH_LATENCY
14005 static struct cdp_mesh_latency_ops dp_ops_mesh_latency = {
14006 	.mesh_latency_update_peer_parameter =
14007 		dp_mesh_latency_update_peer_parameter,
14008 };
14009 #endif
14010 
14011 #ifdef WLAN_SUPPORT_SCS
14012 static struct cdp_scs_ops dp_ops_scs = {
14013 	.scs_peer_lookup_n_rule_match = dp_scs_peer_lookup_n_rule_match,
14014 };
14015 #endif
14016 
14017 #ifdef CONFIG_SAWF_DEF_QUEUES
14018 static struct cdp_sawf_ops dp_ops_sawf = {
14019 	.sawf_def_queues_map_req = dp_sawf_def_queues_map_req,
14020 	.sawf_def_queues_unmap_req = dp_sawf_def_queues_unmap_req,
14021 	.sawf_def_queues_get_map_report =
14022 		dp_sawf_def_queues_get_map_report,
14023 #ifdef CONFIG_SAWF
14024 	.txrx_get_peer_sawf_delay_stats = dp_sawf_get_peer_delay_stats,
14025 	.txrx_get_peer_sawf_tx_stats = dp_sawf_get_peer_tx_stats,
14026 	.sawf_mpdu_stats_req = dp_sawf_mpdu_stats_req,
14027 	.sawf_mpdu_details_stats_req = dp_sawf_mpdu_details_stats_req,
14028 	.txrx_sawf_set_mov_avg_params = dp_sawf_set_mov_avg_params,
14029 	.txrx_sawf_set_sla_params = dp_sawf_set_sla_params,
14030 	.txrx_sawf_init_telemtery_params = dp_sawf_init_telemetry_params,
14031 	.telemetry_get_throughput_stats = dp_sawf_get_tx_stats,
14032 	.telemetry_get_mpdu_stats = dp_sawf_get_mpdu_sched_stats,
14033 	.telemetry_get_drop_stats = dp_sawf_get_drop_stats,
14034 #endif
14035 };
14036 #endif
14037 
14038 #if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
14039 /**
14040  * dp_flush_ring_hptp() - Update ring shadow
14041  *			  register HP/TP address when runtime
14042  *                        resume
14043  * @opaque_soc: DP soc context
14044  *
14045  * Return: None
14046  */
14047 static
14048 void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
14049 {
14050 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
14051 						 HAL_SRNG_FLUSH_EVENT)) {
14052 		/* Acquire the lock */
14053 		hal_srng_access_start(soc->hal_soc, hal_srng);
14054 
14055 		hal_srng_access_end(soc->hal_soc, hal_srng);
14056 
14057 		hal_srng_set_flush_last_ts(hal_srng);
14058 
14059 		dp_debug("flushed");
14060 	}
14061 }
14062 #endif
14063 
14064 #ifdef DP_TX_TRACKING
14065 
14066 #define DP_TX_COMP_MAX_LATENCY_MS 30000
14067 /**
14068  * dp_tx_comp_delay_check() - calculate time latency for tx completion per pkt
14069  * @tx_desc: tx descriptor
14070  *
14071  * Calculate time latency for tx completion per pkt and trigger self recovery
14072  * when the delay is more than threshold value.
14073  *
14074  * Return: True if delay is more than threshold
14075  */
14076 static bool dp_tx_comp_delay_check(struct dp_tx_desc_s *tx_desc)
14077 {
14078 	uint64_t time_latency, timestamp_tick = tx_desc->timestamp_tick;
14079 	qdf_ktime_t current_time = qdf_ktime_real_get();
14080 	qdf_ktime_t timestamp = tx_desc->timestamp;
14081 
14082 	if (!timestamp)
14083 		return false;
14084 
14085 	if (dp_tx_pkt_tracepoints_enabled()) {
14086 		time_latency = qdf_ktime_to_ms(current_time) -
14087 				qdf_ktime_to_ms(timestamp);
14088 		if (time_latency >= DP_TX_COMP_MAX_LATENCY_MS) {
14089 			dp_err_rl("enqueued: %llu ms, current : %llu ms",
14090 				  timestamp, current_time);
14091 			return true;
14092 		}
14093 	} else {
14094 		current_time = qdf_system_ticks();
14095 		time_latency = qdf_system_ticks_to_msecs(current_time -
14096 							 timestamp_tick);
14097 		if (time_latency >= DP_TX_COMP_MAX_LATENCY_MS) {
14098 			dp_err_rl("enqueued: %u ms, current : %u ms",
14099 				  qdf_system_ticks_to_msecs(timestamp),
14100 				  qdf_system_ticks_to_msecs(current_time));
14101 			return true;
14102 		}
14103 	}
14104 
14105 	return false;
14106 }
14107 
14108 #if defined(CONFIG_SLUB_DEBUG_ON)
14109 /**
14110  * dp_find_missing_tx_comp() - check for leaked descriptor in tx path
14111  * @soc - DP SOC context
14112  *
14113  * Parse through descriptors in all pools and validate magic number and
14114  * completion time. Trigger self recovery if magic value is corrupted.
14115  *
14116  * Return: None.
14117  */
14118 static void dp_find_missing_tx_comp(struct dp_soc *soc)
14119 {
14120 	uint8_t i;
14121 	uint32_t j;
14122 	uint32_t num_desc, page_id, offset;
14123 	uint16_t num_desc_per_page;
14124 	struct dp_tx_desc_s *tx_desc = NULL;
14125 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
14126 	bool send_fw_stats_cmd = false;
14127 	uint8_t vdev_id;
14128 
14129 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
14130 		tx_desc_pool = &soc->tx_desc[i];
14131 		if (!(tx_desc_pool->pool_size) ||
14132 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
14133 		    !(tx_desc_pool->desc_pages.cacheable_pages))
14134 			continue;
14135 
14136 		num_desc = tx_desc_pool->pool_size;
14137 		num_desc_per_page =
14138 			tx_desc_pool->desc_pages.num_element_per_page;
14139 		for (j = 0; j < num_desc; j++) {
14140 			page_id = j / num_desc_per_page;
14141 			offset = j % num_desc_per_page;
14142 
14143 			if (qdf_unlikely(!(tx_desc_pool->
14144 					 desc_pages.cacheable_pages)))
14145 				break;
14146 
14147 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
14148 			if (tx_desc->magic == DP_TX_MAGIC_PATTERN_FREE) {
14149 				continue;
14150 			} else if (tx_desc->magic ==
14151 				   DP_TX_MAGIC_PATTERN_INUSE) {
14152 				if (dp_tx_comp_delay_check(tx_desc)) {
14153 					dp_err_rl("Tx completion not rcvd for id: %u",
14154 						  tx_desc->id);
14155 
14156 					if (!send_fw_stats_cmd) {
14157 						send_fw_stats_cmd = true;
14158 						vdev_id = i;
14159 					}
14160 				}
14161 			} else {
14162 				dp_err_rl("tx desc %u corrupted, flags: 0x%x",
14163 				       tx_desc->id, tx_desc->flags);
14164 			}
14165 		}
14166 	}
14167 
14168 	/*
14169 	 * The unit test command to dump FW stats is required only once as the
14170 	 * stats are dumped at pdev level and not vdev level.
14171 	 */
14172 	if (send_fw_stats_cmd && soc->cdp_soc.ol_ops->dp_send_unit_test_cmd) {
14173 		uint32_t fw_stats_args[2] = {533, 1};
14174 
14175 		soc->cdp_soc.ol_ops->dp_send_unit_test_cmd(vdev_id,
14176 							   WLAN_MODULE_TX, 2,
14177 							   fw_stats_args);
14178 	}
14179 }
14180 #else
14181 static void dp_find_missing_tx_comp(struct dp_soc *soc)
14182 {
14183 	uint8_t i;
14184 	uint32_t j;
14185 	uint32_t num_desc, page_id, offset;
14186 	uint16_t num_desc_per_page;
14187 	struct dp_tx_desc_s *tx_desc = NULL;
14188 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
14189 
14190 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
14191 		tx_desc_pool = &soc->tx_desc[i];
14192 		if (!(tx_desc_pool->pool_size) ||
14193 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
14194 		    !(tx_desc_pool->desc_pages.cacheable_pages))
14195 			continue;
14196 
14197 		num_desc = tx_desc_pool->pool_size;
14198 		num_desc_per_page =
14199 			tx_desc_pool->desc_pages.num_element_per_page;
14200 		for (j = 0; j < num_desc; j++) {
14201 			page_id = j / num_desc_per_page;
14202 			offset = j % num_desc_per_page;
14203 
14204 			if (qdf_unlikely(!(tx_desc_pool->
14205 					 desc_pages.cacheable_pages)))
14206 				break;
14207 
14208 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
14209 			if (tx_desc->magic == DP_TX_MAGIC_PATTERN_FREE) {
14210 				continue;
14211 			} else if (tx_desc->magic ==
14212 				   DP_TX_MAGIC_PATTERN_INUSE) {
14213 				if (dp_tx_comp_delay_check(tx_desc)) {
14214 					dp_err_rl("Tx completion not rcvd for id: %u",
14215 						  tx_desc->id);
14216 					if (tx_desc->vdev_id == DP_INVALID_VDEV_ID) {
14217 						tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
14218 						dp_tx_comp_free_buf(soc,
14219 								    tx_desc,
14220 								    false);
14221 						dp_tx_desc_release(tx_desc, i);
14222 						DP_STATS_INC(soc,
14223 							     tx.tx_comp_force_freed, 1);
14224 						dp_err_rl("Tx completion force freed");
14225 					}
14226 				}
14227 			} else {
14228 				dp_err_rl("tx desc %u corrupted, flags: 0x%x",
14229 					  tx_desc->id, tx_desc->flags);
14230 			}
14231 		}
14232 	}
14233 }
14234 #endif /* CONFIG_SLUB_DEBUG_ON */
14235 #else
14236 static inline void dp_find_missing_tx_comp(struct dp_soc *soc)
14237 {
14238 }
14239 #endif
14240 
14241 #ifdef FEATURE_RUNTIME_PM
14242 /**
14243  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
14244  * @soc_hdl: Datapath soc handle
14245  * @pdev_id: id of data path pdev handle
14246  *
14247  * DP is ready to runtime suspend if there are no pending TX packets.
14248  *
14249  * Return: QDF_STATUS
14250  */
14251 static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
14252 {
14253 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14254 	struct dp_pdev *pdev;
14255 	uint8_t i;
14256 	int32_t tx_pending;
14257 
14258 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14259 	if (!pdev) {
14260 		dp_err("pdev is NULL");
14261 		return QDF_STATUS_E_INVAL;
14262 	}
14263 
14264 	/* Abort if there are any pending TX packets */
14265 	tx_pending = dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev));
14266 	if (tx_pending) {
14267 		dp_info_rl("%pK: Abort suspend due to pending TX packets %d",
14268 			   soc, tx_pending);
14269 		dp_find_missing_tx_comp(soc);
14270 		/* perform a force flush if tx is pending */
14271 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
14272 			hal_srng_set_event(soc->tcl_data_ring[i].hal_srng,
14273 					   HAL_SRNG_FLUSH_EVENT);
14274 			dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
14275 		}
14276 		qdf_atomic_set(&soc->tx_pending_rtpm, 0);
14277 
14278 		return QDF_STATUS_E_AGAIN;
14279 	}
14280 
14281 	if (dp_runtime_get_refcount(soc)) {
14282 		dp_init_info("refcount: %d", dp_runtime_get_refcount(soc));
14283 
14284 		return QDF_STATUS_E_AGAIN;
14285 	}
14286 
14287 	if (soc->intr_mode == DP_INTR_POLL)
14288 		qdf_timer_stop(&soc->int_timer);
14289 
14290 	dp_rx_fst_update_pm_suspend_status(soc, true);
14291 
14292 	return QDF_STATUS_SUCCESS;
14293 }
14294 
14295 #define DP_FLUSH_WAIT_CNT 10
14296 #define DP_RUNTIME_SUSPEND_WAIT_MS 10
14297 /**
14298  * dp_runtime_resume() - ensure DP is ready to runtime resume
14299  * @soc_hdl: Datapath soc handle
14300  * @pdev_id: id of data path pdev handle
14301  *
14302  * Resume DP for runtime PM.
14303  *
14304  * Return: QDF_STATUS
14305  */
14306 static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
14307 {
14308 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14309 	int i, suspend_wait = 0;
14310 
14311 	if (soc->intr_mode == DP_INTR_POLL)
14312 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
14313 
14314 	/*
14315 	 * Wait until dp runtime refcount becomes zero or time out, then flush
14316 	 * pending tx for runtime suspend.
14317 	 */
14318 	while (dp_runtime_get_refcount(soc) &&
14319 	       suspend_wait < DP_FLUSH_WAIT_CNT) {
14320 		qdf_sleep(DP_RUNTIME_SUSPEND_WAIT_MS);
14321 		suspend_wait++;
14322 	}
14323 
14324 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
14325 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
14326 	}
14327 	qdf_atomic_set(&soc->tx_pending_rtpm, 0);
14328 
14329 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
14330 	dp_rx_fst_update_pm_suspend_status(soc, false);
14331 
14332 	return QDF_STATUS_SUCCESS;
14333 }
14334 #endif /* FEATURE_RUNTIME_PM */
14335 
14336 /**
14337  * dp_tx_get_success_ack_stats() - get tx success completion count
14338  * @soc_hdl: Datapath soc handle
14339  * @vdevid: vdev identifier
14340  *
14341  * Return: tx success ack count
14342  */
14343 static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl,
14344 					    uint8_t vdev_id)
14345 {
14346 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14347 	struct cdp_vdev_stats *vdev_stats = NULL;
14348 	uint32_t tx_success;
14349 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
14350 						     DP_MOD_ID_CDP);
14351 
14352 	if (!vdev) {
14353 		dp_cdp_err("%pK: Invalid vdev id %d", soc, vdev_id);
14354 		return 0;
14355 	}
14356 
14357 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
14358 	if (!vdev_stats) {
14359 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats", soc);
14360 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
14361 		return 0;
14362 	}
14363 
14364 	dp_aggregate_vdev_stats(vdev, vdev_stats);
14365 
14366 	tx_success = vdev_stats->tx.tx_success.num;
14367 	qdf_mem_free(vdev_stats);
14368 
14369 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
14370 	return tx_success;
14371 }
14372 
14373 #ifdef WLAN_SUPPORT_DATA_STALL
14374 /**
14375  * dp_register_data_stall_detect_cb() - register data stall callback
14376  * @soc_hdl: Datapath soc handle
14377  * @pdev_id: id of data path pdev handle
14378  * @data_stall_detect_callback: data stall callback function
14379  *
14380  * Return: QDF_STATUS Enumeration
14381  */
14382 static
14383 QDF_STATUS dp_register_data_stall_detect_cb(
14384 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14385 			data_stall_detect_cb data_stall_detect_callback)
14386 {
14387 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14388 	struct dp_pdev *pdev;
14389 
14390 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14391 	if (!pdev) {
14392 		dp_err("pdev NULL!");
14393 		return QDF_STATUS_E_INVAL;
14394 	}
14395 
14396 	pdev->data_stall_detect_callback = data_stall_detect_callback;
14397 	return QDF_STATUS_SUCCESS;
14398 }
14399 
14400 /**
14401  * dp_deregister_data_stall_detect_cb() - de-register data stall callback
14402  * @soc_hdl: Datapath soc handle
14403  * @pdev_id: id of data path pdev handle
14404  * @data_stall_detect_callback: data stall callback function
14405  *
14406  * Return: QDF_STATUS Enumeration
14407  */
14408 static
14409 QDF_STATUS dp_deregister_data_stall_detect_cb(
14410 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14411 			data_stall_detect_cb data_stall_detect_callback)
14412 {
14413 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14414 	struct dp_pdev *pdev;
14415 
14416 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14417 	if (!pdev) {
14418 		dp_err("pdev NULL!");
14419 		return QDF_STATUS_E_INVAL;
14420 	}
14421 
14422 	pdev->data_stall_detect_callback = NULL;
14423 	return QDF_STATUS_SUCCESS;
14424 }
14425 
14426 /**
14427  * dp_txrx_post_data_stall_event() - post data stall event
14428  * @soc_hdl: Datapath soc handle
14429  * @indicator: Module triggering data stall
14430  * @data_stall_type: data stall event type
14431  * @pdev_id: pdev id
14432  * @vdev_id_bitmap: vdev id bitmap
14433  * @recovery_type: data stall recovery type
14434  *
14435  * Return: None
14436  */
14437 static void
14438 dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl,
14439 			      enum data_stall_log_event_indicator indicator,
14440 			      enum data_stall_log_event_type data_stall_type,
14441 			      uint32_t pdev_id, uint32_t vdev_id_bitmap,
14442 			      enum data_stall_log_recovery_type recovery_type)
14443 {
14444 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14445 	struct data_stall_event_info data_stall_info;
14446 	struct dp_pdev *pdev;
14447 
14448 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14449 	if (!pdev) {
14450 		dp_err("pdev NULL!");
14451 		return;
14452 	}
14453 
14454 	if (!pdev->data_stall_detect_callback) {
14455 		dp_err("data stall cb not registered!");
14456 		return;
14457 	}
14458 
14459 	dp_info("data_stall_type: %x pdev_id: %d",
14460 		data_stall_type, pdev_id);
14461 
14462 	data_stall_info.indicator = indicator;
14463 	data_stall_info.data_stall_type = data_stall_type;
14464 	data_stall_info.vdev_id_bitmap = vdev_id_bitmap;
14465 	data_stall_info.pdev_id = pdev_id;
14466 	data_stall_info.recovery_type = recovery_type;
14467 
14468 	pdev->data_stall_detect_callback(&data_stall_info);
14469 }
14470 #endif /* WLAN_SUPPORT_DATA_STALL */
14471 
14472 #ifdef WLAN_FEATURE_STATS_EXT
14473 /* rx hw stats event wait timeout in ms */
14474 #define DP_REO_STATUS_STATS_TIMEOUT 1500
14475 /**
14476  * dp_txrx_ext_stats_request - request dp txrx extended stats request
14477  * @soc_hdl: soc handle
14478  * @pdev_id: pdev id
14479  * @req: stats request
14480  *
14481  * Return: QDF_STATUS
14482  */
14483 static QDF_STATUS
14484 dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14485 			  struct cdp_txrx_ext_stats *req)
14486 {
14487 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
14488 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14489 	int i = 0;
14490 	int tcl_ring_full = 0;
14491 
14492 	if (!pdev) {
14493 		dp_err("pdev is null");
14494 		return QDF_STATUS_E_INVAL;
14495 	}
14496 
14497 	dp_aggregate_pdev_stats(pdev);
14498 
14499 	for(i = 0 ; i < MAX_TCL_DATA_RINGS; i++)
14500 		tcl_ring_full += soc->stats.tx.tcl_ring_full[i];
14501 
14502 	req->tx_msdu_enqueue = pdev->stats.tx_i.processed.num;
14503 	req->tx_msdu_overflow = tcl_ring_full;
14504 	req->rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
14505 	req->rx_mpdu_delivered = soc->ext_stats.rx_mpdu_received;
14506 	req->rx_mpdu_missed = pdev->stats.err.reo_error;
14507 	/* only count error source from RXDMA */
14508 	req->rx_mpdu_error = pdev->stats.err.rxdma_error;
14509 
14510 	dp_info("ext stats: tx_msdu_enq = %u, tx_msdu_overflow = %u, "
14511 		"tx_mpdu_recieve = %u, rx_mpdu_delivered = %u, "
14512 		"rx_mpdu_missed = %u, rx_mpdu_error = %u",
14513 		req->tx_msdu_enqueue,
14514 		req->tx_msdu_overflow,
14515 		req->rx_mpdu_received,
14516 		req->rx_mpdu_delivered,
14517 		req->rx_mpdu_missed,
14518 		req->rx_mpdu_error);
14519 
14520 	return QDF_STATUS_SUCCESS;
14521 }
14522 
14523 /**
14524  * dp_rx_hw_stats_cb - request rx hw stats response callback
14525  * @soc: soc handle
14526  * @cb_ctxt: callback context
14527  * @reo_status: reo command response status
14528  *
14529  * Return: None
14530  */
14531 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
14532 			      union hal_reo_status *reo_status)
14533 {
14534 	struct dp_req_rx_hw_stats_t *rx_hw_stats = cb_ctxt;
14535 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
14536 	bool is_query_timeout;
14537 
14538 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
14539 	is_query_timeout = rx_hw_stats->is_query_timeout;
14540 	/* free the cb_ctxt if all pending tid stats query is received */
14541 	if (qdf_atomic_dec_and_test(&rx_hw_stats->pending_tid_stats_cnt)) {
14542 		if (!is_query_timeout) {
14543 			qdf_event_set(&soc->rx_hw_stats_event);
14544 			soc->is_last_stats_ctx_init = false;
14545 		}
14546 
14547 		qdf_mem_free(rx_hw_stats);
14548 	}
14549 
14550 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
14551 		dp_info("REO stats failure %d",
14552 			queue_status->header.status);
14553 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14554 		return;
14555 	}
14556 
14557 	if (!is_query_timeout) {
14558 		soc->ext_stats.rx_mpdu_received +=
14559 					queue_status->mpdu_frms_cnt;
14560 		soc->ext_stats.rx_mpdu_missed +=
14561 					queue_status->hole_cnt;
14562 	}
14563 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14564 }
14565 
14566 /**
14567  * dp_request_rx_hw_stats - request rx hardware stats
14568  * @soc_hdl: soc handle
14569  * @vdev_id: vdev id
14570  *
14571  * Return: None
14572  */
14573 static QDF_STATUS
14574 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
14575 {
14576 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
14577 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
14578 						     DP_MOD_ID_CDP);
14579 	struct dp_peer *peer = NULL;
14580 	QDF_STATUS status;
14581 	struct dp_req_rx_hw_stats_t *rx_hw_stats;
14582 	int rx_stats_sent_cnt = 0;
14583 	uint32_t last_rx_mpdu_received;
14584 	uint32_t last_rx_mpdu_missed;
14585 
14586 	if (!vdev) {
14587 		dp_err("vdev is null for vdev_id: %u", vdev_id);
14588 		status = QDF_STATUS_E_INVAL;
14589 		goto out;
14590 	}
14591 
14592 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_CDP);
14593 
14594 	if (!peer) {
14595 		dp_err("Peer is NULL");
14596 		status = QDF_STATUS_E_INVAL;
14597 		goto out;
14598 	}
14599 
14600 	rx_hw_stats = qdf_mem_malloc(sizeof(*rx_hw_stats));
14601 
14602 	if (!rx_hw_stats) {
14603 		dp_err("malloc failed for hw stats structure");
14604 		status = QDF_STATUS_E_INVAL;
14605 		goto out;
14606 	}
14607 
14608 	qdf_event_reset(&soc->rx_hw_stats_event);
14609 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
14610 	/* save the last soc cumulative stats and reset it to 0 */
14611 	last_rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
14612 	last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
14613 	soc->ext_stats.rx_mpdu_received = 0;
14614 
14615 	rx_stats_sent_cnt =
14616 		dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
14617 	if (!rx_stats_sent_cnt) {
14618 		dp_err("no tid stats sent successfully");
14619 		qdf_mem_free(rx_hw_stats);
14620 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14621 		status = QDF_STATUS_E_INVAL;
14622 		goto out;
14623 	}
14624 	qdf_atomic_set(&rx_hw_stats->pending_tid_stats_cnt,
14625 		       rx_stats_sent_cnt);
14626 	rx_hw_stats->is_query_timeout = false;
14627 	soc->is_last_stats_ctx_init = true;
14628 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14629 
14630 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
14631 				       DP_REO_STATUS_STATS_TIMEOUT);
14632 
14633 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
14634 	if (status != QDF_STATUS_SUCCESS) {
14635 		dp_info("rx hw stats event timeout");
14636 		if (soc->is_last_stats_ctx_init)
14637 			rx_hw_stats->is_query_timeout = true;
14638 		/**
14639 		 * If query timeout happened, use the last saved stats
14640 		 * for this time query.
14641 		 */
14642 		soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
14643 		soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
14644 	}
14645 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14646 
14647 out:
14648 	if (peer)
14649 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
14650 	if (vdev)
14651 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
14652 
14653 	return status;
14654 }
14655 
14656 /**
14657  * dp_reset_rx_hw_ext_stats - Reset rx hardware ext stats
14658  * @soc_hdl: soc handle
14659  *
14660  * Return: None
14661  */
14662 static
14663 void dp_reset_rx_hw_ext_stats(struct cdp_soc_t *soc_hdl)
14664 {
14665 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
14666 
14667 	soc->ext_stats.rx_mpdu_received = 0;
14668 	soc->ext_stats.rx_mpdu_missed = 0;
14669 }
14670 #endif /* WLAN_FEATURE_STATS_EXT */
14671 
14672 static
14673 uint32_t dp_get_tx_rings_grp_bitmap(struct cdp_soc_t *soc_hdl)
14674 {
14675 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
14676 
14677 	return soc->wlan_cfg_ctx->tx_rings_grp_bitmap;
14678 }
14679 
14680 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
14681 /**
14682  * dp_mark_first_wakeup_packet() - set flag to indicate that
14683  *    fw is compatible for marking first packet after wow wakeup
14684  * @soc_hdl: Datapath soc handle
14685  * @pdev_id: id of data path pdev handle
14686  * @value: 1 for enabled/ 0 for disabled
14687  *
14688  * Return: None
14689  */
14690 static void dp_mark_first_wakeup_packet(struct cdp_soc_t *soc_hdl,
14691 					uint8_t pdev_id, uint8_t value)
14692 {
14693 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14694 	struct dp_pdev *pdev;
14695 
14696 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14697 	if (!pdev) {
14698 		dp_err("pdev is NULL");
14699 		return;
14700 	}
14701 
14702 	pdev->is_first_wakeup_packet = value;
14703 }
14704 #endif
14705 
14706 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
14707 /**
14708  * dp_set_peer_txq_flush_config() - Set the peer txq flush configuration
14709  * @soc_hdl: Opaque handle to the DP soc object
14710  * @vdev_id: VDEV identifier
14711  * @mac: MAC address of the peer
14712  * @ac: access category mask
14713  * @tid: TID mask
14714  * @policy: Flush policy
14715  *
14716  * Return: 0 on success, errno on failure
14717  */
14718 static int dp_set_peer_txq_flush_config(struct cdp_soc_t *soc_hdl,
14719 					uint8_t vdev_id, uint8_t *mac,
14720 					uint8_t ac, uint32_t tid,
14721 					enum cdp_peer_txq_flush_policy policy)
14722 {
14723 	struct dp_soc *soc;
14724 
14725 	if (!soc_hdl) {
14726 		dp_err("soc is null");
14727 		return -EINVAL;
14728 	}
14729 	soc = cdp_soc_t_to_dp_soc(soc_hdl);
14730 	return target_if_peer_txq_flush_config(soc->ctrl_psoc, vdev_id,
14731 					       mac, ac, tid, policy);
14732 }
14733 #endif
14734 
14735 #ifdef CONNECTIVITY_PKTLOG
14736 /**
14737  * dp_register_packetdump_callback() - registers
14738  *  tx data packet, tx mgmt. packet and rx data packet
14739  *  dump callback handler.
14740  *
14741  * @soc_hdl: Datapath soc handle
14742  * @pdev_id: id of data path pdev handle
14743  * @dp_tx_packetdump_cb: tx packetdump cb
14744  * @dp_rx_packetdump_cb: rx packetdump cb
14745  *
14746  * This function is used to register tx data pkt, tx mgmt.
14747  * pkt and rx data pkt dump callback
14748  *
14749  * Return: None
14750  *
14751  */
14752 static inline
14753 void dp_register_packetdump_callback(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14754 				     ol_txrx_pktdump_cb dp_tx_packetdump_cb,
14755 				     ol_txrx_pktdump_cb dp_rx_packetdump_cb)
14756 {
14757 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14758 	struct dp_pdev *pdev;
14759 
14760 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14761 	if (!pdev) {
14762 		dp_err("pdev is NULL!");
14763 		return;
14764 	}
14765 
14766 	pdev->dp_tx_packetdump_cb = dp_tx_packetdump_cb;
14767 	pdev->dp_rx_packetdump_cb = dp_rx_packetdump_cb;
14768 }
14769 
14770 /**
14771  * dp_deregister_packetdump_callback() - deregidters
14772  *  tx data packet, tx mgmt. packet and rx data packet
14773  *  dump callback handler
14774  * @soc_hdl: Datapath soc handle
14775  * @pdev_id: id of data path pdev handle
14776  *
14777  * This function is used to deregidter tx data pkt.,
14778  * tx mgmt. pkt and rx data pkt. dump callback
14779  *
14780  * Return: None
14781  *
14782  */
14783 static inline
14784 void dp_deregister_packetdump_callback(struct cdp_soc_t *soc_hdl,
14785 				       uint8_t pdev_id)
14786 {
14787 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14788 	struct dp_pdev *pdev;
14789 
14790 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14791 	if (!pdev) {
14792 		dp_err("pdev is NULL!");
14793 		return;
14794 	}
14795 
14796 	pdev->dp_tx_packetdump_cb = NULL;
14797 	pdev->dp_rx_packetdump_cb = NULL;
14798 }
14799 #endif
14800 
14801 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
14802 /**
14803  * dp_set_bus_vote_lvl_high() - Take a vote on bus bandwidth from dp
14804  * @soc_hdl: Datapath soc handle
14805  * @high: whether the bus bw is high or not
14806  *
14807  * Return: void
14808  */
14809 static void
14810 dp_set_bus_vote_lvl_high(ol_txrx_soc_handle soc_hdl, bool high)
14811 {
14812 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14813 
14814 	soc->high_throughput = high;
14815 }
14816 
14817 /**
14818  * dp_get_bus_vote_lvl_high() - get bus bandwidth vote to dp
14819  * @soc_hdl: Datapath soc handle
14820  *
14821  * Return: bool
14822  */
14823 static bool
14824 dp_get_bus_vote_lvl_high(ol_txrx_soc_handle soc_hdl)
14825 {
14826 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14827 
14828 	return soc->high_throughput;
14829 }
14830 #endif
14831 
14832 #ifdef DP_PEER_EXTENDED_API
14833 static struct cdp_misc_ops dp_ops_misc = {
14834 #ifdef FEATURE_WLAN_TDLS
14835 	.tx_non_std = dp_tx_non_std,
14836 #endif /* FEATURE_WLAN_TDLS */
14837 	.get_opmode = dp_get_opmode,
14838 #ifdef FEATURE_RUNTIME_PM
14839 	.runtime_suspend = dp_runtime_suspend,
14840 	.runtime_resume = dp_runtime_resume,
14841 #endif /* FEATURE_RUNTIME_PM */
14842 	.get_num_rx_contexts = dp_get_num_rx_contexts,
14843 	.get_tx_ack_stats = dp_tx_get_success_ack_stats,
14844 #ifdef WLAN_SUPPORT_DATA_STALL
14845 	.txrx_data_stall_cb_register = dp_register_data_stall_detect_cb,
14846 	.txrx_data_stall_cb_deregister = dp_deregister_data_stall_detect_cb,
14847 	.txrx_post_data_stall_event = dp_txrx_post_data_stall_event,
14848 #endif
14849 
14850 #ifdef WLAN_FEATURE_STATS_EXT
14851 	.txrx_ext_stats_request = dp_txrx_ext_stats_request,
14852 	.request_rx_hw_stats = dp_request_rx_hw_stats,
14853 	.reset_rx_hw_ext_stats = dp_reset_rx_hw_ext_stats,
14854 #endif /* WLAN_FEATURE_STATS_EXT */
14855 	.vdev_inform_ll_conn = dp_vdev_inform_ll_conn,
14856 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
14857 	.set_swlm_enable = dp_soc_set_swlm_enable,
14858 	.is_swlm_enabled = dp_soc_is_swlm_enabled,
14859 #endif
14860 	.display_txrx_hw_info = dp_display_srng_info,
14861 	.get_tx_rings_grp_bitmap = dp_get_tx_rings_grp_bitmap,
14862 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
14863 	.mark_first_wakeup_packet = dp_mark_first_wakeup_packet,
14864 #endif
14865 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
14866 	.set_peer_txq_flush_config = dp_set_peer_txq_flush_config,
14867 #endif
14868 #ifdef CONNECTIVITY_PKTLOG
14869 	.register_pktdump_cb = dp_register_packetdump_callback,
14870 	.unregister_pktdump_cb = dp_deregister_packetdump_callback,
14871 #endif
14872 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
14873 	.set_bus_vote_lvl_high = dp_set_bus_vote_lvl_high,
14874 	.get_bus_vote_lvl_high = dp_get_bus_vote_lvl_high,
14875 #endif
14876 };
14877 #endif
14878 
14879 #ifdef DP_FLOW_CTL
14880 static struct cdp_flowctl_ops dp_ops_flowctl = {
14881 	/* WIFI 3.0 DP implement as required. */
14882 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
14883 	.flow_pool_map_handler = dp_tx_flow_pool_map,
14884 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
14885 	.register_pause_cb = dp_txrx_register_pause_cb,
14886 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
14887 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
14888 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
14889 };
14890 
14891 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
14892 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
14893 };
14894 #endif
14895 
14896 #ifdef IPA_OFFLOAD
14897 static struct cdp_ipa_ops dp_ops_ipa = {
14898 	.ipa_get_resource = dp_ipa_get_resource,
14899 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
14900 	.ipa_iounmap_doorbell_vaddr = dp_ipa_iounmap_doorbell_vaddr,
14901 	.ipa_op_response = dp_ipa_op_response,
14902 	.ipa_register_op_cb = dp_ipa_register_op_cb,
14903 	.ipa_deregister_op_cb = dp_ipa_deregister_op_cb,
14904 	.ipa_get_stat = dp_ipa_get_stat,
14905 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
14906 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
14907 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
14908 	.ipa_setup = dp_ipa_setup,
14909 	.ipa_cleanup = dp_ipa_cleanup,
14910 	.ipa_setup_iface = dp_ipa_setup_iface,
14911 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
14912 	.ipa_enable_pipes = dp_ipa_enable_pipes,
14913 	.ipa_disable_pipes = dp_ipa_disable_pipes,
14914 	.ipa_set_perf_level = dp_ipa_set_perf_level,
14915 	.ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd,
14916 	.ipa_tx_buf_smmu_mapping = dp_ipa_tx_buf_smmu_mapping,
14917 	.ipa_tx_buf_smmu_unmapping = dp_ipa_tx_buf_smmu_unmapping,
14918 #ifdef IPA_WDS_EASYMESH_FEATURE
14919 	.ipa_ast_create = dp_ipa_ast_create,
14920 #endif
14921 };
14922 #endif
14923 
14924 #ifdef DP_POWER_SAVE
14925 static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
14926 {
14927 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14928 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14929 	int timeout = SUSPEND_DRAIN_WAIT;
14930 	int drain_wait_delay = 50; /* 50 ms */
14931 	int32_t tx_pending;
14932 
14933 	if (qdf_unlikely(!pdev)) {
14934 		dp_err("pdev is NULL");
14935 		return QDF_STATUS_E_INVAL;
14936 	}
14937 
14938 	/* Abort if there are any pending TX packets */
14939 	while ((tx_pending = dp_get_tx_pending((struct cdp_pdev *)pdev))) {
14940 		qdf_sleep(drain_wait_delay);
14941 		if (timeout <= 0) {
14942 			dp_info("TX frames are pending %d, abort suspend",
14943 				tx_pending);
14944 			dp_find_missing_tx_comp(soc);
14945 			return QDF_STATUS_E_TIMEOUT;
14946 		}
14947 		timeout = timeout - drain_wait_delay;
14948 	}
14949 
14950 	if (soc->intr_mode == DP_INTR_POLL)
14951 		qdf_timer_stop(&soc->int_timer);
14952 
14953 	/* Stop monitor reap timer and reap any pending frames in ring */
14954 	dp_monitor_reap_timer_suspend(soc);
14955 
14956 	dp_suspend_fse_cache_flush(soc);
14957 
14958 	return QDF_STATUS_SUCCESS;
14959 }
14960 
14961 static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
14962 {
14963 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14964 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14965 	uint8_t i;
14966 
14967 	if (qdf_unlikely(!pdev)) {
14968 		dp_err("pdev is NULL");
14969 		return QDF_STATUS_E_INVAL;
14970 	}
14971 
14972 	if (soc->intr_mode == DP_INTR_POLL)
14973 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
14974 
14975 	/* Start monitor reap timer */
14976 	dp_monitor_reap_timer_start(soc, CDP_MON_REAP_SOURCE_ANY);
14977 
14978 	dp_resume_fse_cache_flush(soc);
14979 
14980 	for (i = 0; i < soc->num_tcl_data_rings; i++)
14981 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
14982 
14983 	return QDF_STATUS_SUCCESS;
14984 }
14985 
14986 /**
14987  * dp_process_wow_ack_rsp() - process wow ack response
14988  * @soc_hdl: datapath soc handle
14989  * @pdev_id: data path pdev handle id
14990  *
14991  * Return: none
14992  */
14993 static void dp_process_wow_ack_rsp(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
14994 {
14995 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14996 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14997 
14998 	if (qdf_unlikely(!pdev)) {
14999 		dp_err("pdev is NULL");
15000 		return;
15001 	}
15002 
15003 	/*
15004 	 * As part of wow enable FW disables the mon status ring and in wow ack
15005 	 * response from FW reap mon status ring to make sure no packets pending
15006 	 * in the ring.
15007 	 */
15008 	dp_monitor_reap_timer_suspend(soc);
15009 }
15010 
15011 /**
15012  * dp_process_target_suspend_req() - process target suspend request
15013  * @soc_hdl: datapath soc handle
15014  * @pdev_id: data path pdev handle id
15015  *
15016  * Return: none
15017  */
15018 static void dp_process_target_suspend_req(struct cdp_soc_t *soc_hdl,
15019 					  uint8_t pdev_id)
15020 {
15021 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15022 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15023 
15024 	if (qdf_unlikely(!pdev)) {
15025 		dp_err("pdev is NULL");
15026 		return;
15027 	}
15028 
15029 	/* Stop monitor reap timer and reap any pending frames in ring */
15030 	dp_monitor_reap_timer_suspend(soc);
15031 }
15032 
15033 static struct cdp_bus_ops dp_ops_bus = {
15034 	.bus_suspend = dp_bus_suspend,
15035 	.bus_resume = dp_bus_resume,
15036 	.process_wow_ack_rsp = dp_process_wow_ack_rsp,
15037 	.process_target_suspend_req = dp_process_target_suspend_req
15038 };
15039 #endif
15040 
15041 #ifdef DP_FLOW_CTL
15042 static struct cdp_throttle_ops dp_ops_throttle = {
15043 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
15044 };
15045 
15046 static struct cdp_cfg_ops dp_ops_cfg = {
15047 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
15048 };
15049 #endif
15050 
15051 #ifdef DP_PEER_EXTENDED_API
15052 static struct cdp_ocb_ops dp_ops_ocb = {
15053 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
15054 };
15055 
15056 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
15057 	.clear_stats = dp_txrx_clear_dump_stats,
15058 };
15059 
15060 static struct cdp_peer_ops dp_ops_peer = {
15061 	.register_peer = dp_register_peer,
15062 	.clear_peer = dp_clear_peer,
15063 	.find_peer_exist = dp_find_peer_exist,
15064 	.find_peer_exist_on_vdev = dp_find_peer_exist_on_vdev,
15065 	.find_peer_exist_on_other_vdev = dp_find_peer_exist_on_other_vdev,
15066 	.peer_state_update = dp_peer_state_update,
15067 	.get_vdevid = dp_get_vdevid,
15068 	.get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr,
15069 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
15070 	.get_peer_state = dp_get_peer_state,
15071 	.peer_flush_frags = dp_peer_flush_frags,
15072 	.set_peer_as_tdls_peer = dp_set_peer_as_tdls_peer,
15073 };
15074 #endif
15075 
15076 static void dp_soc_txrx_ops_attach(struct dp_soc *soc)
15077 {
15078 	soc->cdp_soc.ops->cmn_drv_ops = &dp_ops_cmn;
15079 	soc->cdp_soc.ops->ctrl_ops = &dp_ops_ctrl;
15080 	soc->cdp_soc.ops->me_ops = &dp_ops_me;
15081 	soc->cdp_soc.ops->host_stats_ops = &dp_ops_host_stats;
15082 	soc->cdp_soc.ops->wds_ops = &dp_ops_wds;
15083 	soc->cdp_soc.ops->raw_ops = &dp_ops_raw;
15084 #ifdef PEER_FLOW_CONTROL
15085 	soc->cdp_soc.ops->pflow_ops = &dp_ops_pflow;
15086 #endif /* PEER_FLOW_CONTROL */
15087 #ifdef DP_PEER_EXTENDED_API
15088 	soc->cdp_soc.ops->misc_ops = &dp_ops_misc;
15089 	soc->cdp_soc.ops->ocb_ops = &dp_ops_ocb;
15090 	soc->cdp_soc.ops->peer_ops = &dp_ops_peer;
15091 	soc->cdp_soc.ops->mob_stats_ops = &dp_ops_mob_stats;
15092 #endif
15093 #ifdef DP_FLOW_CTL
15094 	soc->cdp_soc.ops->cfg_ops = &dp_ops_cfg;
15095 	soc->cdp_soc.ops->flowctl_ops = &dp_ops_flowctl;
15096 	soc->cdp_soc.ops->l_flowctl_ops = &dp_ops_l_flowctl;
15097 	soc->cdp_soc.ops->throttle_ops = &dp_ops_throttle;
15098 #endif
15099 #ifdef IPA_OFFLOAD
15100 	soc->cdp_soc.ops->ipa_ops = &dp_ops_ipa;
15101 #endif
15102 #ifdef DP_POWER_SAVE
15103 	soc->cdp_soc.ops->bus_ops = &dp_ops_bus;
15104 #endif
15105 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
15106 	soc->cdp_soc.ops->cfr_ops = &dp_ops_cfr;
15107 #endif
15108 #ifdef WLAN_SUPPORT_MSCS
15109 	soc->cdp_soc.ops->mscs_ops = &dp_ops_mscs;
15110 #endif
15111 #ifdef WLAN_SUPPORT_MESH_LATENCY
15112 	soc->cdp_soc.ops->mesh_latency_ops = &dp_ops_mesh_latency;
15113 #endif
15114 #ifdef CONFIG_SAWF_DEF_QUEUES
15115 	soc->cdp_soc.ops->sawf_ops = &dp_ops_sawf;
15116 #endif
15117 #ifdef WLAN_SUPPORT_SCS
15118 	soc->cdp_soc.ops->scs_ops = &dp_ops_scs;
15119 #endif
15120 };
15121 
15122 /*
15123  * dp_soc_set_txrx_ring_map()
15124  * @dp_soc: DP handler for soc
15125  *
15126  * Return: Void
15127  */
15128 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
15129 {
15130 	uint32_t i;
15131 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
15132 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
15133 	}
15134 }
15135 
15136 qdf_export_symbol(dp_soc_set_txrx_ring_map);
15137 
15138 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
15139 	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574) || \
15140 	defined(QCA_WIFI_QCA5332)
15141 /**
15142  * dp_soc_attach_wifi3() - Attach txrx SOC
15143  * @ctrl_psoc: Opaque SOC handle from control plane
15144  * @params: SOC attach params
15145  *
15146  * Return: DP SOC handle on success, NULL on failure
15147  */
15148 struct cdp_soc_t *
15149 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
15150 		    struct cdp_soc_attach_params *params)
15151 {
15152 	struct dp_soc *dp_soc = NULL;
15153 
15154 	dp_soc = dp_soc_attach(ctrl_psoc, params);
15155 
15156 	return dp_soc_to_cdp_soc_t(dp_soc);
15157 }
15158 
15159 static inline void dp_soc_set_def_pdev(struct dp_soc *soc)
15160 {
15161 	int lmac_id;
15162 
15163 	for (lmac_id = 0; lmac_id < MAX_NUM_LMAC_HW; lmac_id++) {
15164 		/*Set default host PDEV ID for lmac_id*/
15165 		wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
15166 				      INVALID_PDEV_ID, lmac_id);
15167 	}
15168 }
15169 
15170 static uint32_t
15171 dp_get_link_desc_id_start(uint16_t arch_id)
15172 {
15173 	switch (arch_id) {
15174 	case CDP_ARCH_TYPE_LI:
15175 		return LINK_DESC_ID_START_21_BITS_COOKIE;
15176 	case CDP_ARCH_TYPE_BE:
15177 		return LINK_DESC_ID_START_20_BITS_COOKIE;
15178 	default:
15179 		dp_err("unkonwn arch_id 0x%x", arch_id);
15180 		QDF_BUG(0);
15181 		return LINK_DESC_ID_START_21_BITS_COOKIE;
15182 	}
15183 }
15184 
15185 /**
15186  * dp_soc_attach() - Attach txrx SOC
15187  * @ctrl_psoc: Opaque SOC handle from control plane
15188  * @params: SOC attach params
15189  *
15190  * Return: DP SOC handle on success, NULL on failure
15191  */
15192 static struct dp_soc *
15193 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
15194 	      struct cdp_soc_attach_params *params)
15195 {
15196 	int int_ctx;
15197 	struct dp_soc *soc =  NULL;
15198 	uint16_t arch_id;
15199 	struct hif_opaque_softc *hif_handle = params->hif_handle;
15200 	qdf_device_t qdf_osdev = params->qdf_osdev;
15201 	struct ol_if_ops *ol_ops = params->ol_ops;
15202 	uint16_t device_id = params->device_id;
15203 
15204 	if (!hif_handle) {
15205 		dp_err("HIF handle is NULL");
15206 		goto fail0;
15207 	}
15208 	arch_id = cdp_get_arch_type_from_devid(device_id);
15209 	soc = qdf_mem_malloc(dp_get_soc_context_size(device_id));
15210 	if (!soc) {
15211 		dp_err("DP SOC memory allocation failed");
15212 		goto fail0;
15213 	}
15214 
15215 	dp_info("soc memory allocated %pK", soc);
15216 	soc->hif_handle = hif_handle;
15217 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
15218 	if (!soc->hal_soc)
15219 		goto fail1;
15220 
15221 	hif_get_cmem_info(soc->hif_handle,
15222 			  &soc->cmem_base,
15223 			  &soc->cmem_total_size);
15224 	soc->cmem_avail_size = soc->cmem_total_size;
15225 	int_ctx = 0;
15226 	soc->device_id = device_id;
15227 	soc->cdp_soc.ops =
15228 		(struct cdp_ops *)qdf_mem_malloc(sizeof(struct cdp_ops));
15229 	if (!soc->cdp_soc.ops)
15230 		goto fail1;
15231 
15232 	dp_soc_txrx_ops_attach(soc);
15233 	soc->cdp_soc.ol_ops = ol_ops;
15234 	soc->ctrl_psoc = ctrl_psoc;
15235 	soc->osdev = qdf_osdev;
15236 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
15237 	hal_rx_get_tlv_size(soc->hal_soc, &soc->rx_pkt_tlv_size,
15238 			    &soc->rx_mon_pkt_tlv_size);
15239 	soc->idle_link_bm_id = hal_get_idle_link_bm_id(soc->hal_soc,
15240 						       params->mlo_chip_id);
15241 	soc->features.dmac_cmn_src_rxbuf_ring_enabled =
15242 		hal_dmac_cmn_src_rxbuf_ring_get(soc->hal_soc);
15243 	soc->arch_id = arch_id;
15244 	soc->link_desc_id_start =
15245 			dp_get_link_desc_id_start(soc->arch_id);
15246 	dp_configure_arch_ops(soc);
15247 
15248 	/* Reset wbm sg list and flags */
15249 	dp_rx_wbm_sg_list_reset(soc);
15250 
15251 	dp_soc_tx_hw_desc_history_attach(soc);
15252 	dp_soc_rx_history_attach(soc);
15253 	dp_soc_mon_status_ring_history_attach(soc);
15254 	dp_soc_tx_history_attach(soc);
15255 	wlan_set_srng_cfg(&soc->wlan_srng_cfg);
15256 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
15257 	if (!soc->wlan_cfg_ctx) {
15258 		dp_err("wlan_cfg_ctx failed\n");
15259 		goto fail2;
15260 	}
15261 	dp_soc_cfg_attach(soc);
15262 
15263 	if (dp_hw_link_desc_pool_banks_alloc(soc, WLAN_INVALID_PDEV_ID)) {
15264 		dp_err("failed to allocate link desc pool banks");
15265 		goto fail3;
15266 	}
15267 
15268 	if (dp_hw_link_desc_ring_alloc(soc)) {
15269 		dp_err("failed to allocate link_desc_ring");
15270 		goto fail4;
15271 	}
15272 
15273 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_attach(soc,
15274 								 params))) {
15275 		dp_err("unable to do target specific attach");
15276 		goto fail5;
15277 	}
15278 
15279 	if (dp_soc_srng_alloc(soc)) {
15280 		dp_err("failed to allocate soc srng rings");
15281 		goto fail6;
15282 	}
15283 
15284 	if (dp_soc_tx_desc_sw_pools_alloc(soc)) {
15285 		dp_err("dp_soc_tx_desc_sw_pools_alloc failed");
15286 		goto fail7;
15287 	}
15288 
15289 	if (!dp_monitor_modularized_enable()) {
15290 		if (dp_mon_soc_attach_wrapper(soc)) {
15291 			dp_err("failed to attach monitor");
15292 			goto fail8;
15293 		}
15294 	}
15295 
15296 	if (dp_sysfs_initialize_stats(soc) != QDF_STATUS_SUCCESS) {
15297 		dp_err("failed to initialize dp stats sysfs file");
15298 		dp_sysfs_deinitialize_stats(soc);
15299 	}
15300 
15301 	dp_soc_swlm_attach(soc);
15302 	dp_soc_set_interrupt_mode(soc);
15303 	dp_soc_set_def_pdev(soc);
15304 
15305 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
15306 		qdf_dma_mem_stats_read(),
15307 		qdf_heap_mem_stats_read(),
15308 		qdf_skb_total_mem_stats_read());
15309 
15310 	return soc;
15311 fail8:
15312 	dp_soc_tx_desc_sw_pools_free(soc);
15313 fail7:
15314 	dp_soc_srng_free(soc);
15315 fail6:
15316 	soc->arch_ops.txrx_soc_detach(soc);
15317 fail5:
15318 	dp_hw_link_desc_ring_free(soc);
15319 fail4:
15320 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
15321 fail3:
15322 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
15323 fail2:
15324 	qdf_mem_free(soc->cdp_soc.ops);
15325 fail1:
15326 	qdf_mem_free(soc);
15327 fail0:
15328 	return NULL;
15329 }
15330 
15331 /**
15332  * dp_soc_init() - Initialize txrx SOC
15333  * @dp_soc: Opaque DP SOC handle
15334  * @htc_handle: Opaque HTC handle
15335  * @hif_handle: Opaque HIF handle
15336  *
15337  * Return: DP SOC handle on success, NULL on failure
15338  */
15339 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
15340 		  struct hif_opaque_softc *hif_handle)
15341 {
15342 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
15343 	bool is_monitor_mode = false;
15344 	uint8_t i;
15345 	int num_dp_msi;
15346 
15347 	wlan_minidump_log(soc, sizeof(*soc), soc->ctrl_psoc,
15348 			  WLAN_MD_DP_SOC, "dp_soc");
15349 
15350 	soc->hif_handle = hif_handle;
15351 
15352 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
15353 	if (!soc->hal_soc)
15354 		goto fail0;
15355 
15356 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_init(soc))) {
15357 		dp_err("unable to do target specific init");
15358 		goto fail0;
15359 	}
15360 
15361 	htt_soc = htt_soc_attach(soc, htc_handle);
15362 	if (!htt_soc)
15363 		goto fail1;
15364 
15365 	soc->htt_handle = htt_soc;
15366 
15367 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
15368 		goto fail2;
15369 
15370 	htt_set_htc_handle(htt_soc, htc_handle);
15371 
15372 	dp_soc_cfg_init(soc);
15373 
15374 	dp_monitor_soc_cfg_init(soc);
15375 	/* Reset/Initialize wbm sg list and flags */
15376 	dp_rx_wbm_sg_list_reset(soc);
15377 
15378 	/* Note: Any SRNG ring initialization should happen only after
15379 	 * Interrupt mode is set and followed by filling up the
15380 	 * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER.
15381 	 */
15382 	dp_soc_set_interrupt_mode(soc);
15383 	if (soc->cdp_soc.ol_ops->get_con_mode &&
15384 	    soc->cdp_soc.ol_ops->get_con_mode() ==
15385 	    QDF_GLOBAL_MONITOR_MODE)
15386 		is_monitor_mode = true;
15387 
15388 	num_dp_msi = dp_get_num_msi_available(soc, soc->intr_mode);
15389 	if (num_dp_msi < 0) {
15390 		dp_init_err("%pK: dp_interrupt assignment failed", soc);
15391 		goto fail3;
15392 	}
15393 
15394 	wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, num_dp_msi,
15395 				     soc->intr_mode, is_monitor_mode);
15396 
15397 	/* initialize WBM_IDLE_LINK ring */
15398 	if (dp_hw_link_desc_ring_init(soc)) {
15399 		dp_init_err("%pK: dp_hw_link_desc_ring_init failed", soc);
15400 		goto fail3;
15401 	}
15402 
15403 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
15404 
15405 	if (dp_soc_srng_init(soc)) {
15406 		dp_init_err("%pK: dp_soc_srng_init failed", soc);
15407 		goto fail4;
15408 	}
15409 
15410 	if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
15411 			       htt_get_htc_handle(htt_soc),
15412 			       soc->hal_soc, soc->osdev) == NULL)
15413 		goto fail5;
15414 
15415 	/* Initialize descriptors in TCL Rings */
15416 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
15417 		hal_tx_init_data_ring(soc->hal_soc,
15418 				      soc->tcl_data_ring[i].hal_srng);
15419 	}
15420 
15421 	if (dp_soc_tx_desc_sw_pools_init(soc)) {
15422 		dp_init_err("%pK: dp_tx_soc_attach failed", soc);
15423 		goto fail6;
15424 	}
15425 
15426 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
15427 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
15428 	soc->cce_disable = false;
15429 	soc->max_ast_ageout_count = MAX_AST_AGEOUT_COUNT;
15430 
15431 	soc->sta_mode_search_policy = DP_TX_ADDR_SEARCH_ADDR_POLICY;
15432 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
15433 	qdf_spinlock_create(&soc->vdev_map_lock);
15434 	qdf_atomic_init(&soc->num_tx_outstanding);
15435 	qdf_atomic_init(&soc->num_tx_exception);
15436 	soc->num_tx_allowed =
15437 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
15438 
15439 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
15440 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
15441 				CDP_CFG_MAX_PEER_ID);
15442 
15443 		if (ret != -EINVAL)
15444 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
15445 
15446 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
15447 				CDP_CFG_CCE_DISABLE);
15448 		if (ret == 1)
15449 			soc->cce_disable = true;
15450 	}
15451 
15452 	/*
15453 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
15454 	 * and IPQ5018 WMAC2 is not there in these platforms.
15455 	 */
15456 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018 ||
15457 	    soc->disable_mac2_intr)
15458 		dp_soc_disable_unused_mac_intr_mask(soc, 0x2);
15459 
15460 	/*
15461 	 * Skip registering hw ring interrupts for WMAC1 on IPQ5018
15462 	 * WMAC1 is not there in this platform.
15463 	 */
15464 	if (soc->disable_mac1_intr)
15465 		dp_soc_disable_unused_mac_intr_mask(soc, 0x1);
15466 
15467 	/* setup the global rx defrag waitlist */
15468 	TAILQ_INIT(&soc->rx.defrag.waitlist);
15469 	soc->rx.defrag.timeout_ms =
15470 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
15471 	soc->rx.defrag.next_flush_ms = 0;
15472 	soc->rx.flags.defrag_timeout_check =
15473 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
15474 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
15475 
15476 	dp_monitor_soc_init(soc);
15477 
15478 	qdf_atomic_set(&soc->cmn_init_done, 1);
15479 
15480 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
15481 
15482 	qdf_spinlock_create(&soc->ast_lock);
15483 	dp_peer_mec_spinlock_create(soc);
15484 
15485 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
15486 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
15487 	INIT_RX_HW_STATS_LOCK(soc);
15488 
15489 	qdf_nbuf_queue_init(&soc->invalid_buf_queue);
15490 	/* fill the tx/rx cpu ring map*/
15491 	dp_soc_set_txrx_ring_map(soc);
15492 
15493 	TAILQ_INIT(&soc->inactive_peer_list);
15494 	qdf_spinlock_create(&soc->inactive_peer_list_lock);
15495 	TAILQ_INIT(&soc->inactive_vdev_list);
15496 	qdf_spinlock_create(&soc->inactive_vdev_list_lock);
15497 	qdf_spinlock_create(&soc->htt_stats.lock);
15498 	/* initialize work queue for stats processing */
15499 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
15500 
15501 	dp_reo_desc_deferred_freelist_create(soc);
15502 
15503 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
15504 		qdf_dma_mem_stats_read(),
15505 		qdf_heap_mem_stats_read(),
15506 		qdf_skb_total_mem_stats_read());
15507 
15508 	soc->vdev_stats_id_map = 0;
15509 
15510 	return soc;
15511 fail6:
15512 	htt_soc_htc_dealloc(soc->htt_handle);
15513 fail5:
15514 	dp_soc_srng_deinit(soc);
15515 fail4:
15516 	dp_hw_link_desc_ring_deinit(soc);
15517 fail3:
15518 	htt_htc_pkt_pool_free(htt_soc);
15519 fail2:
15520 	htt_soc_detach(htt_soc);
15521 fail1:
15522 	soc->arch_ops.txrx_soc_deinit(soc);
15523 fail0:
15524 	return NULL;
15525 }
15526 
15527 /**
15528  * dp_soc_init_wifi3() - Initialize txrx SOC
15529  * @soc: Opaque DP SOC handle
15530  * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
15531  * @hif_handle: Opaque HIF handle
15532  * @htc_handle: Opaque HTC handle
15533  * @qdf_osdev: QDF device (Unused)
15534  * @ol_ops: Offload Operations (Unused)
15535  * @device_id: Device ID (Unused)
15536  *
15537  * Return: DP SOC handle on success, NULL on failure
15538  */
15539 void *dp_soc_init_wifi3(struct cdp_soc_t *soc,
15540 			struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
15541 			struct hif_opaque_softc *hif_handle,
15542 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
15543 			struct ol_if_ops *ol_ops, uint16_t device_id)
15544 {
15545 	return dp_soc_init((struct dp_soc *)soc, htc_handle, hif_handle);
15546 }
15547 
15548 #endif
15549 
15550 /*
15551  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
15552  *
15553  * @soc: handle to DP soc
15554  * @mac_id: MAC id
15555  *
15556  * Return: Return pdev corresponding to MAC
15557  */
15558 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
15559 {
15560 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
15561 		return (mac_id < MAX_PDEV_CNT) ? soc->pdev_list[mac_id] : NULL;
15562 
15563 	/* Typically for MCL as there only 1 PDEV*/
15564 	return soc->pdev_list[0];
15565 }
15566 
15567 void dp_update_num_mac_rings_for_dbs(struct dp_soc *soc,
15568 				     int *max_mac_rings)
15569 {
15570 	bool dbs_enable = false;
15571 
15572 	if (soc->cdp_soc.ol_ops->is_hw_dbs_capable)
15573 		dbs_enable = soc->cdp_soc.ol_ops->
15574 				is_hw_dbs_capable((void *)soc->ctrl_psoc);
15575 
15576 	*max_mac_rings = dbs_enable ? (*max_mac_rings) : 1;
15577 	dp_info("dbs_enable %d, max_mac_rings %d",
15578 		dbs_enable, *max_mac_rings);
15579 }
15580 
15581 qdf_export_symbol(dp_update_num_mac_rings_for_dbs);
15582 
15583 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
15584 /**
15585  * dp_get_cfr_rcc() - get cfr rcc config
15586  * @soc_hdl: Datapath soc handle
15587  * @pdev_id: id of objmgr pdev
15588  *
15589  * Return: true/false based on cfr mode setting
15590  */
15591 static
15592 bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
15593 {
15594 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15595 	struct dp_pdev *pdev = NULL;
15596 
15597 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15598 	if (!pdev) {
15599 		dp_err("pdev is NULL");
15600 		return false;
15601 	}
15602 
15603 	return pdev->cfr_rcc_mode;
15604 }
15605 
15606 /**
15607  * dp_set_cfr_rcc() - enable/disable cfr rcc config
15608  * @soc_hdl: Datapath soc handle
15609  * @pdev_id: id of objmgr pdev
15610  * @enable: Enable/Disable cfr rcc mode
15611  *
15612  * Return: none
15613  */
15614 static
15615 void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, bool enable)
15616 {
15617 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15618 	struct dp_pdev *pdev = NULL;
15619 
15620 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15621 	if (!pdev) {
15622 		dp_err("pdev is NULL");
15623 		return;
15624 	}
15625 
15626 	pdev->cfr_rcc_mode = enable;
15627 }
15628 
15629 /*
15630  * dp_get_cfr_dbg_stats - Get the debug statistics for CFR
15631  * @soc_hdl: Datapath soc handle
15632  * @pdev_id: id of data path pdev handle
15633  * @cfr_rcc_stats: CFR RCC debug statistics buffer
15634  *
15635  * Return: none
15636  */
15637 static inline void
15638 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
15639 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats)
15640 {
15641 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15642 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15643 
15644 	if (!pdev) {
15645 		dp_err("Invalid pdev");
15646 		return;
15647 	}
15648 
15649 	qdf_mem_copy(cfr_rcc_stats, &pdev->stats.rcc,
15650 		     sizeof(struct cdp_cfr_rcc_stats));
15651 }
15652 
15653 /*
15654  * dp_clear_cfr_dbg_stats - Clear debug statistics for CFR
15655  * @soc_hdl: Datapath soc handle
15656  * @pdev_id: id of data path pdev handle
15657  *
15658  * Return: none
15659  */
15660 static void dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl,
15661 				   uint8_t pdev_id)
15662 {
15663 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15664 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15665 
15666 	if (!pdev) {
15667 		dp_err("dp pdev is NULL");
15668 		return;
15669 	}
15670 
15671 	qdf_mem_zero(&pdev->stats.rcc, sizeof(pdev->stats.rcc));
15672 }
15673 #endif
15674 
15675 /**
15676  * dp_bucket_index() - Return index from array
15677  *
15678  * @delay: delay measured
15679  * @array: array used to index corresponding delay
15680  * @delay_in_us: flag to indicate whether the delay in ms or us
15681  *
15682  * Return: index
15683  */
15684 static uint8_t
15685 dp_bucket_index(uint32_t delay, uint16_t *array, bool delay_in_us)
15686 {
15687 	uint8_t i = CDP_DELAY_BUCKET_0;
15688 	uint32_t thr_low, thr_high;
15689 
15690 	for (; i < CDP_DELAY_BUCKET_MAX - 1; i++) {
15691 		thr_low = array[i];
15692 		thr_high = array[i + 1];
15693 
15694 		if (delay_in_us) {
15695 			thr_low = thr_low * USEC_PER_MSEC;
15696 			thr_high = thr_high * USEC_PER_MSEC;
15697 		}
15698 		if (delay >= thr_low && delay <= thr_high)
15699 			return i;
15700 	}
15701 	return (CDP_DELAY_BUCKET_MAX - 1);
15702 }
15703 
15704 #ifdef HW_TX_DELAY_STATS_ENABLE
15705 /*
15706  * cdp_fw_to_hw_delay_range
15707  * Fw to hw delay ranges in milliseconds
15708  */
15709 static uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
15710 	0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
15711 #else
15712 static uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
15713 	0, 2, 4, 6, 8, 10, 20, 30, 40, 50, 100, 250, 500};
15714 #endif
15715 
15716 /*
15717  * cdp_sw_enq_delay_range
15718  * Software enqueue delay ranges in milliseconds
15719  */
15720 static uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
15721 	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
15722 
15723 /*
15724  * cdp_intfrm_delay_range
15725  * Interframe delay ranges in milliseconds
15726  */
15727 static uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
15728 	0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
15729 
15730 /**
15731  * dp_fill_delay_buckets() - Fill delay statistics bucket for each
15732  *				type of delay
15733  * @tstats: tid tx stats
15734  * @rstats: tid rx stats
15735  * @delay: delay in ms
15736  * @tid: tid value
15737  * @mode: type of tx delay mode
15738  * @ring_id: ring number
15739  * @delay_in_us: flag to indicate whether the delay in ms or us
15740  *
15741  * Return: pointer to cdp_delay_stats structure
15742  */
15743 static struct cdp_delay_stats *
15744 dp_fill_delay_buckets(struct cdp_tid_tx_stats *tstats,
15745 		      struct cdp_tid_rx_stats *rstats, uint32_t delay,
15746 		      uint8_t tid, uint8_t mode, uint8_t ring_id,
15747 		      bool delay_in_us)
15748 {
15749 	uint8_t delay_index = 0;
15750 	struct cdp_delay_stats *stats = NULL;
15751 
15752 	/*
15753 	 * Update delay stats in proper bucket
15754 	 */
15755 	switch (mode) {
15756 	/* Software Enqueue delay ranges */
15757 	case CDP_DELAY_STATS_SW_ENQ:
15758 		if (!tstats)
15759 			break;
15760 
15761 		delay_index = dp_bucket_index(delay, cdp_sw_enq_delay,
15762 					      delay_in_us);
15763 		tstats->swq_delay.delay_bucket[delay_index]++;
15764 		stats = &tstats->swq_delay;
15765 		break;
15766 
15767 	/* Tx Completion delay ranges */
15768 	case CDP_DELAY_STATS_FW_HW_TRANSMIT:
15769 		if (!tstats)
15770 			break;
15771 
15772 		delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay,
15773 					      delay_in_us);
15774 		tstats->hwtx_delay.delay_bucket[delay_index]++;
15775 		stats = &tstats->hwtx_delay;
15776 		break;
15777 
15778 	/* Interframe tx delay ranges */
15779 	case CDP_DELAY_STATS_TX_INTERFRAME:
15780 		if (!tstats)
15781 			break;
15782 
15783 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
15784 					      delay_in_us);
15785 		tstats->intfrm_delay.delay_bucket[delay_index]++;
15786 		stats = &tstats->intfrm_delay;
15787 		break;
15788 
15789 	/* Interframe rx delay ranges */
15790 	case CDP_DELAY_STATS_RX_INTERFRAME:
15791 		if (!rstats)
15792 			break;
15793 
15794 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
15795 					      delay_in_us);
15796 		rstats->intfrm_delay.delay_bucket[delay_index]++;
15797 		stats = &rstats->intfrm_delay;
15798 		break;
15799 
15800 	/* Ring reap to indication to network stack */
15801 	case CDP_DELAY_STATS_REAP_STACK:
15802 		if (!rstats)
15803 			break;
15804 
15805 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
15806 					      delay_in_us);
15807 		rstats->to_stack_delay.delay_bucket[delay_index]++;
15808 		stats = &rstats->to_stack_delay;
15809 		break;
15810 	default:
15811 		dp_debug("Incorrect delay mode: %d", mode);
15812 	}
15813 
15814 	return stats;
15815 }
15816 
15817 void dp_update_delay_stats(struct cdp_tid_tx_stats *tstats,
15818 			   struct cdp_tid_rx_stats *rstats, uint32_t delay,
15819 			   uint8_t tid, uint8_t mode, uint8_t ring_id,
15820 			   bool delay_in_us)
15821 {
15822 	struct cdp_delay_stats *dstats = NULL;
15823 
15824 	/*
15825 	 * Delay ranges are different for different delay modes
15826 	 * Get the correct index to update delay bucket
15827 	 */
15828 	dstats = dp_fill_delay_buckets(tstats, rstats, delay, tid, mode,
15829 				       ring_id, delay_in_us);
15830 	if (qdf_unlikely(!dstats))
15831 		return;
15832 
15833 	if (delay != 0) {
15834 		/*
15835 		 * Compute minimum,average and maximum
15836 		 * delay
15837 		 */
15838 		if (delay < dstats->min_delay)
15839 			dstats->min_delay = delay;
15840 
15841 		if (delay > dstats->max_delay)
15842 			dstats->max_delay = delay;
15843 
15844 		/*
15845 		 * Average over delay measured till now
15846 		 */
15847 		if (!dstats->avg_delay)
15848 			dstats->avg_delay = delay;
15849 		else
15850 			dstats->avg_delay = ((delay + dstats->avg_delay) >> 1);
15851 	}
15852 }
15853 
15854 /**
15855  * dp_get_peer_mac_list(): function to get peer mac list of vdev
15856  * @soc: Datapath soc handle
15857  * @vdev_id: vdev id
15858  * @newmac: Table of the clients mac
15859  * @mac_cnt: No. of MACs required
15860  * @limit: Limit the number of clients
15861  *
15862  * return: no of clients
15863  */
15864 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id,
15865 			      u_int8_t newmac[][QDF_MAC_ADDR_SIZE],
15866 			      u_int16_t mac_cnt, bool limit)
15867 {
15868 	struct dp_soc *dp_soc = (struct dp_soc *)soc;
15869 	struct dp_vdev *vdev =
15870 		dp_vdev_get_ref_by_id(dp_soc, vdev_id, DP_MOD_ID_CDP);
15871 	struct dp_peer *peer;
15872 	uint16_t new_mac_cnt = 0;
15873 
15874 	if (!vdev)
15875 		return new_mac_cnt;
15876 
15877 	if (limit && (vdev->num_peers > mac_cnt))
15878 		return 0;
15879 
15880 	qdf_spin_lock_bh(&vdev->peer_list_lock);
15881 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
15882 		if (peer->bss_peer)
15883 			continue;
15884 		if (new_mac_cnt < mac_cnt) {
15885 			WLAN_ADDR_COPY(newmac[new_mac_cnt], peer->mac_addr.raw);
15886 			new_mac_cnt++;
15887 		}
15888 	}
15889 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
15890 	dp_vdev_unref_delete(dp_soc, vdev, DP_MOD_ID_CDP);
15891 	return new_mac_cnt;
15892 }
15893 
15894 uint16_t dp_get_peer_id(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *mac)
15895 {
15896 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
15897 						       mac, 0, vdev_id,
15898 						       DP_MOD_ID_CDP);
15899 	uint16_t peer_id = HTT_INVALID_PEER;
15900 
15901 	if (!peer) {
15902 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
15903 		return peer_id;
15904 	}
15905 
15906 	peer_id = peer->peer_id;
15907 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
15908 	return peer_id;
15909 }
15910 
15911 #ifdef QCA_SUPPORT_WDS_EXTENDED
15912 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc,
15913 				  uint8_t vdev_id,
15914 				  uint8_t *mac,
15915 				  ol_txrx_rx_fp rx,
15916 				  ol_osif_peer_handle osif_peer)
15917 {
15918 	struct dp_txrx_peer *txrx_peer = NULL;
15919 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
15920 						       mac, 0, vdev_id,
15921 						       DP_MOD_ID_CDP);
15922 	QDF_STATUS status = QDF_STATUS_E_INVAL;
15923 
15924 	if (!peer) {
15925 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
15926 		return status;
15927 	}
15928 
15929 	txrx_peer = dp_get_txrx_peer(peer);
15930 	if (!txrx_peer) {
15931 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
15932 		return status;
15933 	}
15934 
15935 	if (rx) {
15936 		if (txrx_peer->osif_rx) {
15937 			status = QDF_STATUS_E_ALREADY;
15938 		} else {
15939 			txrx_peer->osif_rx = rx;
15940 			status = QDF_STATUS_SUCCESS;
15941 		}
15942 	} else {
15943 		if (txrx_peer->osif_rx) {
15944 			txrx_peer->osif_rx = NULL;
15945 			status = QDF_STATUS_SUCCESS;
15946 		} else {
15947 			status = QDF_STATUS_E_ALREADY;
15948 		}
15949 	}
15950 
15951 	txrx_peer->wds_ext.osif_peer = osif_peer;
15952 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
15953 
15954 	return status;
15955 }
15956 #endif /* QCA_SUPPORT_WDS_EXTENDED */
15957 
15958 /**
15959  * dp_pdev_srng_deinit() - de-initialize all pdev srng ring including
15960  *			   monitor rings
15961  * @pdev: Datapath pdev handle
15962  *
15963  */
15964 static void dp_pdev_srng_deinit(struct dp_pdev *pdev)
15965 {
15966 	struct dp_soc *soc = pdev->soc;
15967 	uint8_t i;
15968 
15969 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled)
15970 		dp_srng_deinit(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
15971 			       RXDMA_BUF,
15972 			       pdev->lmac_id);
15973 
15974 	if (!soc->rxdma2sw_rings_not_supported) {
15975 		for (i = 0;
15976 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
15977 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
15978 								 pdev->pdev_id);
15979 
15980 			wlan_minidump_remove(soc->rxdma_err_dst_ring[lmac_id].
15981 							base_vaddr_unaligned,
15982 					     soc->rxdma_err_dst_ring[lmac_id].
15983 								alloc_size,
15984 					     soc->ctrl_psoc,
15985 					     WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
15986 					     "rxdma_err_dst");
15987 			dp_srng_deinit(soc, &soc->rxdma_err_dst_ring[lmac_id],
15988 				       RXDMA_DST, lmac_id);
15989 		}
15990 	}
15991 
15992 
15993 }
15994 
15995 /**
15996  * dp_pdev_srng_init() - initialize all pdev srng rings including
15997  *			   monitor rings
15998  * @pdev: Datapath pdev handle
15999  *
16000  * return: QDF_STATUS_SUCCESS on success
16001  *	   QDF_STATUS_E_NOMEM on failure
16002  */
16003 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev)
16004 {
16005 	struct dp_soc *soc = pdev->soc;
16006 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16007 	uint32_t i;
16008 
16009 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16010 
16011 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
16012 		if (dp_srng_init(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
16013 				 RXDMA_BUF, 0, pdev->lmac_id)) {
16014 			dp_init_err("%pK: dp_srng_init failed rx refill ring",
16015 				    soc);
16016 			goto fail1;
16017 		}
16018 	}
16019 
16020 	/* LMAC RxDMA to SW Rings configuration */
16021 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
16022 		/* Only valid for MCL */
16023 		pdev = soc->pdev_list[0];
16024 
16025 	if (!soc->rxdma2sw_rings_not_supported) {
16026 		for (i = 0;
16027 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
16028 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
16029 								 pdev->pdev_id);
16030 			struct dp_srng *srng =
16031 				&soc->rxdma_err_dst_ring[lmac_id];
16032 
16033 			if (srng->hal_srng)
16034 				continue;
16035 
16036 			if (dp_srng_init(soc, srng, RXDMA_DST, 0, lmac_id)) {
16037 				dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring",
16038 					    soc);
16039 				goto fail1;
16040 			}
16041 			wlan_minidump_log(soc->rxdma_err_dst_ring[lmac_id].
16042 						base_vaddr_unaligned,
16043 					  soc->rxdma_err_dst_ring[lmac_id].
16044 						alloc_size,
16045 					  soc->ctrl_psoc,
16046 					  WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
16047 					  "rxdma_err_dst");
16048 		}
16049 	}
16050 	return QDF_STATUS_SUCCESS;
16051 
16052 fail1:
16053 	dp_pdev_srng_deinit(pdev);
16054 	return QDF_STATUS_E_NOMEM;
16055 }
16056 
16057 /**
16058  * dp_pdev_srng_free() - free all pdev srng rings including monitor rings
16059  * pdev: Datapath pdev handle
16060  *
16061  */
16062 static void dp_pdev_srng_free(struct dp_pdev *pdev)
16063 {
16064 	struct dp_soc *soc = pdev->soc;
16065 	uint8_t i;
16066 
16067 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled)
16068 		dp_srng_free(soc, &soc->rx_refill_buf_ring[pdev->lmac_id]);
16069 
16070 	if (!soc->rxdma2sw_rings_not_supported) {
16071 		for (i = 0;
16072 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
16073 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
16074 								 pdev->pdev_id);
16075 
16076 			dp_srng_free(soc, &soc->rxdma_err_dst_ring[lmac_id]);
16077 		}
16078 	}
16079 }
16080 
16081 /**
16082  * dp_pdev_srng_alloc() - allocate memory for all pdev srng rings including
16083  *			  monitor rings
16084  * pdev: Datapath pdev handle
16085  *
16086  * return: QDF_STATUS_SUCCESS on success
16087  *	   QDF_STATUS_E_NOMEM on failure
16088  */
16089 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev)
16090 {
16091 	struct dp_soc *soc = pdev->soc;
16092 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16093 	uint32_t ring_size;
16094 	uint32_t i;
16095 
16096 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16097 
16098 	ring_size = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
16099 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
16100 		if (dp_srng_alloc(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
16101 				  RXDMA_BUF, ring_size, 0)) {
16102 			dp_init_err("%pK: dp_srng_alloc failed rx refill ring",
16103 				    soc);
16104 			goto fail1;
16105 		}
16106 	}
16107 
16108 	ring_size = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
16109 	/* LMAC RxDMA to SW Rings configuration */
16110 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
16111 		/* Only valid for MCL */
16112 		pdev = soc->pdev_list[0];
16113 
16114 	if (!soc->rxdma2sw_rings_not_supported) {
16115 		for (i = 0;
16116 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
16117 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
16118 								 pdev->pdev_id);
16119 			struct dp_srng *srng =
16120 				&soc->rxdma_err_dst_ring[lmac_id];
16121 
16122 			if (srng->base_vaddr_unaligned)
16123 				continue;
16124 
16125 			if (dp_srng_alloc(soc, srng, RXDMA_DST, ring_size, 0)) {
16126 				dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring",
16127 					    soc);
16128 				goto fail1;
16129 			}
16130 		}
16131 	}
16132 
16133 	return QDF_STATUS_SUCCESS;
16134 fail1:
16135 	dp_pdev_srng_free(pdev);
16136 	return QDF_STATUS_E_NOMEM;
16137 }
16138 
16139 #ifndef WLAN_DP_DISABLE_TCL_CMD_CRED_SRNG
16140 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
16141 {
16142 	QDF_STATUS status;
16143 
16144 	if (soc->init_tcl_cmd_cred_ring) {
16145 		status =  dp_srng_init(soc, &soc->tcl_cmd_credit_ring,
16146 				       TCL_CMD_CREDIT, 0, 0);
16147 		if (QDF_IS_STATUS_ERROR(status))
16148 			return status;
16149 
16150 		wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
16151 				  soc->tcl_cmd_credit_ring.alloc_size,
16152 				  soc->ctrl_psoc,
16153 				  WLAN_MD_DP_SRNG_TCL_CMD,
16154 				  "wbm_desc_rel_ring");
16155 	}
16156 
16157 	return QDF_STATUS_SUCCESS;
16158 }
16159 
16160 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
16161 {
16162 	if (soc->init_tcl_cmd_cred_ring) {
16163 		wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
16164 				     soc->tcl_cmd_credit_ring.alloc_size,
16165 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_CMD,
16166 				     "wbm_desc_rel_ring");
16167 		dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring,
16168 			       TCL_CMD_CREDIT, 0);
16169 	}
16170 }
16171 
16172 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
16173 {
16174 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
16175 	uint32_t entries;
16176 	QDF_STATUS status;
16177 
16178 	entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx);
16179 	if (soc->init_tcl_cmd_cred_ring) {
16180 		status = dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring,
16181 				       TCL_CMD_CREDIT, entries, 0);
16182 		if (QDF_IS_STATUS_ERROR(status))
16183 			return status;
16184 	}
16185 
16186 	return QDF_STATUS_SUCCESS;
16187 }
16188 
16189 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
16190 {
16191 	if (soc->init_tcl_cmd_cred_ring)
16192 		dp_srng_free(soc, &soc->tcl_cmd_credit_ring);
16193 }
16194 
16195 static inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
16196 {
16197 	if (soc->init_tcl_cmd_cred_ring)
16198 		hal_tx_init_cmd_credit_ring(soc->hal_soc,
16199 					    soc->tcl_cmd_credit_ring.hal_srng);
16200 }
16201 #else
16202 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
16203 {
16204 	return QDF_STATUS_SUCCESS;
16205 }
16206 
16207 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
16208 {
16209 }
16210 
16211 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
16212 {
16213 	return QDF_STATUS_SUCCESS;
16214 }
16215 
16216 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
16217 {
16218 }
16219 
16220 static inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
16221 {
16222 }
16223 #endif
16224 
16225 #ifndef WLAN_DP_DISABLE_TCL_STATUS_SRNG
16226 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
16227 {
16228 	QDF_STATUS status;
16229 
16230 	status =  dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0);
16231 	if (QDF_IS_STATUS_ERROR(status))
16232 		return status;
16233 
16234 	wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned,
16235 			  soc->tcl_status_ring.alloc_size,
16236 			  soc->ctrl_psoc,
16237 			  WLAN_MD_DP_SRNG_TCL_STATUS,
16238 			  "wbm_desc_rel_ring");
16239 
16240 	return QDF_STATUS_SUCCESS;
16241 }
16242 
16243 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
16244 {
16245 	wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned,
16246 			     soc->tcl_status_ring.alloc_size,
16247 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_STATUS,
16248 			     "wbm_desc_rel_ring");
16249 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
16250 }
16251 
16252 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
16253 {
16254 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
16255 	uint32_t entries;
16256 	QDF_STATUS status = QDF_STATUS_SUCCESS;
16257 
16258 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
16259 	status = dp_srng_alloc(soc, &soc->tcl_status_ring,
16260 			       TCL_STATUS, entries, 0);
16261 
16262 	return status;
16263 }
16264 
16265 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
16266 {
16267 	dp_srng_free(soc, &soc->tcl_status_ring);
16268 }
16269 #else
16270 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
16271 {
16272 	return QDF_STATUS_SUCCESS;
16273 }
16274 
16275 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
16276 {
16277 }
16278 
16279 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
16280 {
16281 	return QDF_STATUS_SUCCESS;
16282 }
16283 
16284 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
16285 {
16286 }
16287 #endif
16288 
16289 /**
16290  * dp_soc_srng_deinit() - de-initialize soc srng rings
16291  * @soc: Datapath soc handle
16292  *
16293  */
16294 static void dp_soc_srng_deinit(struct dp_soc *soc)
16295 {
16296 	uint32_t i;
16297 
16298 	if (soc->arch_ops.txrx_soc_srng_deinit)
16299 		soc->arch_ops.txrx_soc_srng_deinit(soc);
16300 
16301 	/* Free the ring memories */
16302 	/* Common rings */
16303 	wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
16304 			     soc->wbm_desc_rel_ring.alloc_size,
16305 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_WBM_DESC_REL,
16306 			     "wbm_desc_rel_ring");
16307 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
16308 
16309 	/* Tx data rings */
16310 	for (i = 0; i < soc->num_tcl_data_rings; i++)
16311 		dp_deinit_tx_pair_by_index(soc, i);
16312 
16313 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16314 		dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
16315 		dp_ipa_deinit_alt_tx_ring(soc);
16316 	}
16317 
16318 	/* TCL command and status rings */
16319 	dp_soc_tcl_cmd_cred_srng_deinit(soc);
16320 	dp_soc_tcl_status_srng_deinit(soc);
16321 
16322 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
16323 		/* TODO: Get number of rings and ring sizes
16324 		 * from wlan_cfg
16325 		 */
16326 		wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned,
16327 				     soc->reo_dest_ring[i].alloc_size,
16328 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_DEST,
16329 				     "reo_dest_ring");
16330 		dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i);
16331 	}
16332 
16333 	/* REO reinjection ring */
16334 	wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned,
16335 			     soc->reo_reinject_ring.alloc_size,
16336 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_REINJECT,
16337 			     "reo_reinject_ring");
16338 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
16339 
16340 	/* Rx release ring */
16341 	wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned,
16342 			     soc->rx_rel_ring.alloc_size,
16343 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_RX_REL,
16344 			     "reo_release_ring");
16345 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
16346 
16347 	/* Rx exception ring */
16348 	/* TODO: Better to store ring_type and ring_num in
16349 	 * dp_srng during setup
16350 	 */
16351 	wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned,
16352 			     soc->reo_exception_ring.alloc_size,
16353 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_EXCEPTION,
16354 			     "reo_exception_ring");
16355 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
16356 
16357 	/* REO command and status rings */
16358 	wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned,
16359 			     soc->reo_cmd_ring.alloc_size,
16360 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_CMD,
16361 			     "reo_cmd_ring");
16362 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
16363 	wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned,
16364 			     soc->reo_status_ring.alloc_size,
16365 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_STATUS,
16366 			     "reo_status_ring");
16367 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
16368 }
16369 
16370 /**
16371  * dp_soc_srng_init() - Initialize soc level srng rings
16372  * @soc: Datapath soc handle
16373  *
16374  * return: QDF_STATUS_SUCCESS on success
16375  *	   QDF_STATUS_E_FAILURE on failure
16376  */
16377 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc)
16378 {
16379 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16380 	uint8_t i;
16381 	uint8_t wbm2_sw_rx_rel_ring_id;
16382 
16383 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16384 
16385 	dp_enable_verbose_debug(soc);
16386 
16387 	/* WBM descriptor release ring */
16388 	if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) {
16389 		dp_init_err("%pK: dp_srng_init failed for wbm_desc_rel_ring", soc);
16390 		goto fail1;
16391 	}
16392 
16393 	wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
16394 			  soc->wbm_desc_rel_ring.alloc_size,
16395 			  soc->ctrl_psoc,
16396 			  WLAN_MD_DP_SRNG_WBM_DESC_REL,
16397 			  "wbm_desc_rel_ring");
16398 
16399 	/* TCL command and status rings */
16400 	if (dp_soc_tcl_cmd_cred_srng_init(soc)) {
16401 		dp_init_err("%pK: dp_srng_init failed for tcl_cmd_ring", soc);
16402 		goto fail1;
16403 	}
16404 
16405 	if (dp_soc_tcl_status_srng_init(soc)) {
16406 		dp_init_err("%pK: dp_srng_init failed for tcl_status_ring", soc);
16407 		goto fail1;
16408 	}
16409 
16410 	/* REO reinjection ring */
16411 	if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) {
16412 		dp_init_err("%pK: dp_srng_init failed for reo_reinject_ring", soc);
16413 		goto fail1;
16414 	}
16415 
16416 	wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned,
16417 			  soc->reo_reinject_ring.alloc_size,
16418 			  soc->ctrl_psoc,
16419 			  WLAN_MD_DP_SRNG_REO_REINJECT,
16420 			  "reo_reinject_ring");
16421 
16422 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc_cfg_ctx);
16423 	/* Rx release ring */
16424 	if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
16425 			 wbm2_sw_rx_rel_ring_id, 0)) {
16426 		dp_init_err("%pK: dp_srng_init failed for rx_rel_ring", soc);
16427 		goto fail1;
16428 	}
16429 
16430 	wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned,
16431 			  soc->rx_rel_ring.alloc_size,
16432 			  soc->ctrl_psoc,
16433 			  WLAN_MD_DP_SRNG_RX_REL,
16434 			  "reo_release_ring");
16435 
16436 	/* Rx exception ring */
16437 	if (dp_srng_init(soc, &soc->reo_exception_ring,
16438 			 REO_EXCEPTION, 0, MAX_REO_DEST_RINGS)) {
16439 		dp_init_err("%pK: dp_srng_init failed - reo_exception", soc);
16440 		goto fail1;
16441 	}
16442 
16443 	wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned,
16444 			  soc->reo_exception_ring.alloc_size,
16445 			  soc->ctrl_psoc,
16446 			  WLAN_MD_DP_SRNG_REO_EXCEPTION,
16447 			  "reo_exception_ring");
16448 
16449 	/* REO command and status rings */
16450 	if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) {
16451 		dp_init_err("%pK: dp_srng_init failed for reo_cmd_ring", soc);
16452 		goto fail1;
16453 	}
16454 
16455 	wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned,
16456 			  soc->reo_cmd_ring.alloc_size,
16457 			  soc->ctrl_psoc,
16458 			  WLAN_MD_DP_SRNG_REO_CMD,
16459 			  "reo_cmd_ring");
16460 
16461 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
16462 	TAILQ_INIT(&soc->rx.reo_cmd_list);
16463 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
16464 
16465 	if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) {
16466 		dp_init_err("%pK: dp_srng_init failed for reo_status_ring", soc);
16467 		goto fail1;
16468 	}
16469 
16470 	wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned,
16471 			  soc->reo_status_ring.alloc_size,
16472 			  soc->ctrl_psoc,
16473 			  WLAN_MD_DP_SRNG_REO_STATUS,
16474 			  "reo_status_ring");
16475 
16476 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
16477 		if (dp_init_tx_ring_pair_by_index(soc, i))
16478 			goto fail1;
16479 	}
16480 
16481 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16482 		if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
16483 			goto fail1;
16484 
16485 		if (dp_ipa_init_alt_tx_ring(soc))
16486 			goto fail1;
16487 	}
16488 
16489 	dp_create_ext_stats_event(soc);
16490 
16491 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
16492 		/* Initialize REO destination ring */
16493 		if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) {
16494 			dp_init_err("%pK: dp_srng_init failed for reo_dest_ringn", soc);
16495 			goto fail1;
16496 		}
16497 
16498 		wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned,
16499 				  soc->reo_dest_ring[i].alloc_size,
16500 				  soc->ctrl_psoc,
16501 				  WLAN_MD_DP_SRNG_REO_DEST,
16502 				  "reo_dest_ring");
16503 	}
16504 
16505 	if (soc->arch_ops.txrx_soc_srng_init) {
16506 		if (soc->arch_ops.txrx_soc_srng_init(soc)) {
16507 			dp_init_err("%pK: dp_srng_init failed for arch rings",
16508 				    soc);
16509 			goto fail1;
16510 		}
16511 	}
16512 
16513 	return QDF_STATUS_SUCCESS;
16514 fail1:
16515 	/*
16516 	 * Cleanup will be done as part of soc_detach, which will
16517 	 * be called on pdev attach failure
16518 	 */
16519 	dp_soc_srng_deinit(soc);
16520 	return QDF_STATUS_E_FAILURE;
16521 }
16522 
16523 /**
16524  * dp_soc_srng_free() - free soc level srng rings
16525  * @soc: Datapath soc handle
16526  *
16527  */
16528 static void dp_soc_srng_free(struct dp_soc *soc)
16529 {
16530 	uint32_t i;
16531 
16532 	if (soc->arch_ops.txrx_soc_srng_free)
16533 		soc->arch_ops.txrx_soc_srng_free(soc);
16534 
16535 	dp_srng_free(soc, &soc->wbm_desc_rel_ring);
16536 
16537 	for (i = 0; i < soc->num_tcl_data_rings; i++)
16538 		dp_free_tx_ring_pair_by_index(soc, i);
16539 
16540 	/* Free IPA rings for TCL_TX and TCL_COMPL ring */
16541 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16542 		dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
16543 		dp_ipa_free_alt_tx_ring(soc);
16544 	}
16545 
16546 	dp_soc_tcl_cmd_cred_srng_free(soc);
16547 	dp_soc_tcl_status_srng_free(soc);
16548 
16549 	for (i = 0; i < soc->num_reo_dest_rings; i++)
16550 		dp_srng_free(soc, &soc->reo_dest_ring[i]);
16551 
16552 	dp_srng_free(soc, &soc->reo_reinject_ring);
16553 	dp_srng_free(soc, &soc->rx_rel_ring);
16554 
16555 	dp_srng_free(soc, &soc->reo_exception_ring);
16556 
16557 	dp_srng_free(soc, &soc->reo_cmd_ring);
16558 	dp_srng_free(soc, &soc->reo_status_ring);
16559 }
16560 
16561 /**
16562  * dp_soc_srng_alloc() - Allocate memory for soc level srng rings
16563  * @soc: Datapath soc handle
16564  *
16565  * return: QDF_STATUS_SUCCESS on success
16566  *	   QDF_STATUS_E_NOMEM on failure
16567  */
16568 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc)
16569 {
16570 	uint32_t entries;
16571 	uint32_t i;
16572 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16573 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
16574 	uint32_t tx_comp_ring_size, tx_ring_size, reo_dst_ring_size;
16575 
16576 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16577 
16578 	/* sw2wbm link descriptor release ring */
16579 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
16580 	if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE,
16581 			  entries, 0)) {
16582 		dp_init_err("%pK: dp_srng_alloc failed for wbm_desc_rel_ring", soc);
16583 		goto fail1;
16584 	}
16585 
16586 	/* TCL command and status rings */
16587 	if (dp_soc_tcl_cmd_cred_srng_alloc(soc)) {
16588 		dp_init_err("%pK: dp_srng_alloc failed for tcl_cmd_ring", soc);
16589 		goto fail1;
16590 	}
16591 
16592 	if (dp_soc_tcl_status_srng_alloc(soc)) {
16593 		dp_init_err("%pK: dp_srng_alloc failed for tcl_status_ring", soc);
16594 		goto fail1;
16595 	}
16596 
16597 	/* REO reinjection ring */
16598 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
16599 	if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT,
16600 			  entries, 0)) {
16601 		dp_init_err("%pK: dp_srng_alloc failed for reo_reinject_ring", soc);
16602 		goto fail1;
16603 	}
16604 
16605 	/* Rx release ring */
16606 	entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx);
16607 	if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
16608 			  entries, 0)) {
16609 		dp_init_err("%pK: dp_srng_alloc failed for rx_rel_ring", soc);
16610 		goto fail1;
16611 	}
16612 
16613 	/* Rx exception ring */
16614 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
16615 	if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION,
16616 			  entries, 0)) {
16617 		dp_init_err("%pK: dp_srng_alloc failed - reo_exception", soc);
16618 		goto fail1;
16619 	}
16620 
16621 	/* REO command and status rings */
16622 	entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx);
16623 	if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) {
16624 		dp_init_err("%pK: dp_srng_alloc failed for reo_cmd_ring", soc);
16625 		goto fail1;
16626 	}
16627 
16628 	entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx);
16629 	if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS,
16630 			  entries, 0)) {
16631 		dp_init_err("%pK: dp_srng_alloc failed for reo_status_ring", soc);
16632 		goto fail1;
16633 	}
16634 
16635 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
16636 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
16637 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx);
16638 
16639 	/* Disable cached desc if NSS offload is enabled */
16640 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
16641 		cached = 0;
16642 
16643 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
16644 		if (dp_alloc_tx_ring_pair_by_index(soc, i))
16645 			goto fail1;
16646 	}
16647 
16648 	/* IPA rings for TCL_TX and TX_COMP will be allocated here */
16649 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16650 		if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
16651 			goto fail1;
16652 
16653 		if (dp_ipa_alloc_alt_tx_ring(soc))
16654 			goto fail1;
16655 	}
16656 
16657 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
16658 		/* Setup REO destination ring */
16659 		if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST,
16660 				  reo_dst_ring_size, cached)) {
16661 			dp_init_err("%pK: dp_srng_alloc failed for reo_dest_ring", soc);
16662 			goto fail1;
16663 		}
16664 	}
16665 
16666 	if (soc->arch_ops.txrx_soc_srng_alloc) {
16667 		if (soc->arch_ops.txrx_soc_srng_alloc(soc)) {
16668 			dp_init_err("%pK: dp_srng_alloc failed for arch rings",
16669 				    soc);
16670 			goto fail1;
16671 		}
16672 	}
16673 
16674 	return QDF_STATUS_SUCCESS;
16675 
16676 fail1:
16677 	dp_soc_srng_free(soc);
16678 	return QDF_STATUS_E_NOMEM;
16679 }
16680 
16681 static void dp_soc_cfg_dump(struct dp_soc *soc, uint32_t target_type)
16682 {
16683 	dp_init_info("DP soc Dump for Target = %d", target_type);
16684 	dp_init_info("ast_override_support = %d, da_war_enabled = %d,",
16685 		     soc->ast_override_support, soc->da_war_enabled);
16686 
16687 	wlan_cfg_dp_soc_ctx_dump(soc->wlan_cfg_ctx);
16688 }
16689 
16690 /**
16691  * dp_soc_cfg_init() - initialize target specific configuration
16692  *		       during dp_soc_init
16693  * @soc: dp soc handle
16694  */
16695 static void dp_soc_cfg_init(struct dp_soc *soc)
16696 {
16697 	uint32_t target_type;
16698 
16699 	target_type = hal_get_target_type(soc->hal_soc);
16700 	switch (target_type) {
16701 	case TARGET_TYPE_QCA6290:
16702 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
16703 					       REO_DST_RING_SIZE_QCA6290);
16704 		soc->ast_override_support = 1;
16705 		soc->da_war_enabled = false;
16706 		break;
16707 	case TARGET_TYPE_QCA6390:
16708 	case TARGET_TYPE_QCA6490:
16709 	case TARGET_TYPE_QCA6750:
16710 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
16711 					       REO_DST_RING_SIZE_QCA6290);
16712 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
16713 		soc->ast_override_support = 1;
16714 		if (soc->cdp_soc.ol_ops->get_con_mode &&
16715 		    soc->cdp_soc.ol_ops->get_con_mode() ==
16716 		    QDF_GLOBAL_MONITOR_MODE) {
16717 			int int_ctx;
16718 
16719 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
16720 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
16721 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
16722 			}
16723 		}
16724 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
16725 		break;
16726 	case TARGET_TYPE_KIWI:
16727 	case TARGET_TYPE_MANGO:
16728 		soc->ast_override_support = 1;
16729 		soc->per_tid_basize_max_tid = 8;
16730 
16731 		if (soc->cdp_soc.ol_ops->get_con_mode &&
16732 		    soc->cdp_soc.ol_ops->get_con_mode() ==
16733 		    QDF_GLOBAL_MONITOR_MODE) {
16734 			int int_ctx;
16735 
16736 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS;
16737 			     int_ctx++) {
16738 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
16739 				if (dp_is_monitor_mode_using_poll(soc))
16740 					soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
16741 			}
16742 		}
16743 
16744 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
16745 		soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev = 1;
16746 		break;
16747 	case TARGET_TYPE_QCA8074:
16748 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
16749 		soc->da_war_enabled = true;
16750 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
16751 		break;
16752 	case TARGET_TYPE_QCA8074V2:
16753 	case TARGET_TYPE_QCA6018:
16754 	case TARGET_TYPE_QCA9574:
16755 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
16756 		soc->ast_override_support = 1;
16757 		soc->per_tid_basize_max_tid = 8;
16758 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
16759 		soc->da_war_enabled = false;
16760 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
16761 		break;
16762 	case TARGET_TYPE_QCN9000:
16763 		soc->ast_override_support = 1;
16764 		soc->da_war_enabled = false;
16765 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
16766 		soc->per_tid_basize_max_tid = 8;
16767 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
16768 		soc->lmac_polled_mode = 0;
16769 		soc->wbm_release_desc_rx_sg_support = 1;
16770 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
16771 		break;
16772 	case TARGET_TYPE_QCA5018:
16773 	case TARGET_TYPE_QCN6122:
16774 		soc->ast_override_support = 1;
16775 		soc->da_war_enabled = false;
16776 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
16777 		soc->per_tid_basize_max_tid = 8;
16778 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS_11AX;
16779 		soc->disable_mac1_intr = 1;
16780 		soc->disable_mac2_intr = 1;
16781 		soc->wbm_release_desc_rx_sg_support = 1;
16782 		break;
16783 	case TARGET_TYPE_QCN9224:
16784 		soc->ast_override_support = 1;
16785 		soc->da_war_enabled = false;
16786 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
16787 		soc->per_tid_basize_max_tid = 8;
16788 		soc->wbm_release_desc_rx_sg_support = 1;
16789 		soc->rxdma2sw_rings_not_supported = 1;
16790 		soc->wbm_sg_last_msdu_war = 1;
16791 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
16792 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
16793 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
16794 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
16795 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
16796 						  CFG_DP_HOST_AST_DB_ENABLE);
16797 		break;
16798 	case TARGET_TYPE_QCA5332:
16799 		soc->ast_override_support = 1;
16800 		soc->da_war_enabled = false;
16801 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
16802 		soc->per_tid_basize_max_tid = 8;
16803 		soc->wbm_release_desc_rx_sg_support = 1;
16804 		soc->rxdma2sw_rings_not_supported = 1;
16805 		soc->wbm_sg_last_msdu_war = 1;
16806 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
16807 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
16808 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS_5332;
16809 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
16810 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
16811 						  CFG_DP_HOST_AST_DB_ENABLE);
16812 		break;
16813 	default:
16814 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
16815 		qdf_assert_always(0);
16816 		break;
16817 	}
16818 	dp_soc_cfg_dump(soc, target_type);
16819 }
16820 
16821 /**
16822  * dp_soc_cfg_attach() - set target specific configuration in
16823  *			 dp soc cfg.
16824  * @soc: dp soc handle
16825  */
16826 static void dp_soc_cfg_attach(struct dp_soc *soc)
16827 {
16828 	int target_type;
16829 	int nss_cfg = 0;
16830 
16831 	target_type = hal_get_target_type(soc->hal_soc);
16832 	switch (target_type) {
16833 	case TARGET_TYPE_QCA6290:
16834 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
16835 					       REO_DST_RING_SIZE_QCA6290);
16836 		break;
16837 	case TARGET_TYPE_QCA6390:
16838 	case TARGET_TYPE_QCA6490:
16839 	case TARGET_TYPE_QCA6750:
16840 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
16841 					       REO_DST_RING_SIZE_QCA6290);
16842 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
16843 		break;
16844 	case TARGET_TYPE_KIWI:
16845 	case TARGET_TYPE_MANGO:
16846 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
16847 		break;
16848 	case TARGET_TYPE_QCA8074:
16849 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
16850 		break;
16851 	case TARGET_TYPE_QCA8074V2:
16852 	case TARGET_TYPE_QCA6018:
16853 	case TARGET_TYPE_QCA9574:
16854 	case TARGET_TYPE_QCN6122:
16855 	case TARGET_TYPE_QCA5018:
16856 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
16857 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
16858 		break;
16859 	case TARGET_TYPE_QCN9000:
16860 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
16861 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
16862 		break;
16863 	case TARGET_TYPE_QCN9224:
16864 	case TARGET_TYPE_QCA5332:
16865 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
16866 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
16867 		break;
16868 	default:
16869 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
16870 		qdf_assert_always(0);
16871 		break;
16872 	}
16873 
16874 	if (soc->cdp_soc.ol_ops->get_soc_nss_cfg)
16875 		nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc);
16876 
16877 	wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg);
16878 
16879 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
16880 		wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0);
16881 		wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0);
16882 		wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0);
16883 		wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0);
16884 		soc->init_tcl_cmd_cred_ring = false;
16885 		soc->num_tcl_data_rings =
16886 			wlan_cfg_num_nss_tcl_data_rings(soc->wlan_cfg_ctx);
16887 		soc->num_reo_dest_rings =
16888 			wlan_cfg_num_nss_reo_dest_rings(soc->wlan_cfg_ctx);
16889 
16890 	} else {
16891 		soc->init_tcl_cmd_cred_ring = true;
16892 		soc->num_tx_comp_rings =
16893 			wlan_cfg_num_tx_comp_rings(soc->wlan_cfg_ctx);
16894 		soc->num_tcl_data_rings =
16895 			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
16896 		soc->num_reo_dest_rings =
16897 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
16898 	}
16899 
16900 	soc->arch_ops.soc_cfg_attach(soc);
16901 }
16902 
16903 static inline  void dp_pdev_set_default_reo(struct dp_pdev *pdev)
16904 {
16905 	struct dp_soc *soc = pdev->soc;
16906 
16907 	switch (pdev->pdev_id) {
16908 	case 0:
16909 		pdev->reo_dest =
16910 			wlan_cfg_radio0_default_reo_get(soc->wlan_cfg_ctx);
16911 		break;
16912 
16913 	case 1:
16914 		pdev->reo_dest =
16915 			wlan_cfg_radio1_default_reo_get(soc->wlan_cfg_ctx);
16916 		break;
16917 
16918 	case 2:
16919 		pdev->reo_dest =
16920 			wlan_cfg_radio2_default_reo_get(soc->wlan_cfg_ctx);
16921 		break;
16922 
16923 	default:
16924 		dp_init_err("%pK: Invalid pdev_id %d for reo selection",
16925 			    soc, pdev->pdev_id);
16926 		break;
16927 	}
16928 }
16929 
16930 static QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc,
16931 				      HTC_HANDLE htc_handle,
16932 				      qdf_device_t qdf_osdev,
16933 				      uint8_t pdev_id)
16934 {
16935 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16936 	int nss_cfg;
16937 	void *sojourn_buf;
16938 	QDF_STATUS ret;
16939 
16940 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
16941 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
16942 
16943 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16944 	pdev->soc = soc;
16945 	pdev->pdev_id = pdev_id;
16946 
16947 	/*
16948 	 * Variable to prevent double pdev deinitialization during
16949 	 * radio detach execution .i.e. in the absence of any vdev.
16950 	 */
16951 	pdev->pdev_deinit = 0;
16952 
16953 	if (dp_wdi_event_attach(pdev)) {
16954 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
16955 			  "dp_wdi_evet_attach failed");
16956 		goto fail0;
16957 	}
16958 
16959 	if (dp_pdev_srng_init(pdev)) {
16960 		dp_init_err("%pK: Failed to initialize pdev srng rings", soc);
16961 		goto fail1;
16962 	}
16963 
16964 	/* Initialize descriptors in TCL Rings used by IPA */
16965 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16966 		hal_tx_init_data_ring(soc->hal_soc,
16967 				      soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng);
16968 		dp_ipa_hal_tx_init_alt_data_ring(soc);
16969 	}
16970 
16971 	/*
16972 	 * Initialize command/credit ring descriptor
16973 	 * Command/CREDIT ring also used for sending DATA cmds
16974 	 */
16975 	dp_tx_init_cmd_credit_ring(soc);
16976 
16977 	dp_tx_pdev_init(pdev);
16978 
16979 	/*
16980 	 * set nss pdev config based on soc config
16981 	 */
16982 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
16983 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
16984 					 (nss_cfg & (1 << pdev_id)));
16985 	pdev->target_pdev_id =
16986 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
16987 
16988 	if (soc->preferred_hw_mode == WMI_HOST_HW_MODE_2G_PHYB &&
16989 	    pdev->lmac_id == PHYB_2G_LMAC_ID) {
16990 		pdev->target_pdev_id = PHYB_2G_TARGET_PDEV_ID;
16991 	}
16992 
16993 	/* Reset the cpu ring map if radio is NSS offloaded */
16994 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
16995 		dp_soc_reset_cpu_ring_map(soc);
16996 		dp_soc_reset_intr_mask(soc);
16997 	}
16998 
16999 	/* Reset the cpu ring map if radio is NSS offloaded */
17000 	dp_soc_reset_ipa_vlan_intr_mask(soc);
17001 
17002 	TAILQ_INIT(&pdev->vdev_list);
17003 	qdf_spinlock_create(&pdev->vdev_list_lock);
17004 	pdev->vdev_count = 0;
17005 	pdev->is_lro_hash_configured = 0;
17006 
17007 	qdf_spinlock_create(&pdev->tx_mutex);
17008 	pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_INVALID_LMAC_ID;
17009 	pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_INVALID_LMAC_ID;
17010 	pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_INVALID_LMAC_ID;
17011 
17012 	DP_STATS_INIT(pdev);
17013 
17014 	dp_local_peer_id_pool_init(pdev);
17015 
17016 	dp_dscp_tid_map_setup(pdev);
17017 	dp_pcp_tid_map_setup(pdev);
17018 
17019 	/* set the reo destination during initialization */
17020 	dp_pdev_set_default_reo(pdev);
17021 
17022 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
17023 
17024 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
17025 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
17026 			      TRUE);
17027 
17028 	if (!pdev->sojourn_buf) {
17029 		dp_init_err("%pK: Failed to allocate sojourn buf", soc);
17030 		goto fail2;
17031 	}
17032 	sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
17033 	qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
17034 
17035 	qdf_event_create(&pdev->fw_peer_stats_event);
17036 	qdf_event_create(&pdev->fw_stats_event);
17037 	qdf_event_create(&pdev->fw_obss_stats_event);
17038 
17039 	pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
17040 
17041 	if (dp_rxdma_ring_setup(soc, pdev)) {
17042 		dp_init_err("%pK: RXDMA ring config failed", soc);
17043 		goto fail3;
17044 	}
17045 
17046 	if (dp_init_ipa_rx_refill_buf_ring(soc, pdev))
17047 		goto fail3;
17048 
17049 	if (dp_ipa_ring_resource_setup(soc, pdev))
17050 		goto fail4;
17051 
17052 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
17053 		dp_init_err("%pK: dp_ipa_uc_attach failed", soc);
17054 		goto fail4;
17055 	}
17056 
17057 	ret = dp_rx_fst_attach(soc, pdev);
17058 	if ((ret != QDF_STATUS_SUCCESS) &&
17059 	    (ret != QDF_STATUS_E_NOSUPPORT)) {
17060 		dp_init_err("%pK: RX Flow Search Table attach failed: pdev %d err %d",
17061 			    soc, pdev_id, ret);
17062 		goto fail5;
17063 	}
17064 
17065 	if (dp_pdev_bkp_stats_attach(pdev) != QDF_STATUS_SUCCESS) {
17066 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
17067 			  FL("dp_pdev_bkp_stats_attach failed"));
17068 		goto fail6;
17069 	}
17070 
17071 	if (dp_monitor_pdev_init(pdev)) {
17072 		dp_init_err("%pK: dp_monitor_pdev_init failed\n", soc);
17073 		goto fail7;
17074 	}
17075 
17076 	/* initialize sw rx descriptors */
17077 	dp_rx_pdev_desc_pool_init(pdev);
17078 	/* allocate buffers and replenish the RxDMA ring */
17079 	dp_rx_pdev_buffers_alloc(pdev);
17080 
17081 	dp_init_tso_stats(pdev);
17082 
17083 	pdev->rx_fast_flag = false;
17084 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
17085 		qdf_dma_mem_stats_read(),
17086 		qdf_heap_mem_stats_read(),
17087 		qdf_skb_total_mem_stats_read());
17088 
17089 	return QDF_STATUS_SUCCESS;
17090 fail7:
17091 	dp_pdev_bkp_stats_detach(pdev);
17092 fail6:
17093 	dp_rx_fst_detach(soc, pdev);
17094 fail5:
17095 	dp_ipa_uc_detach(soc, pdev);
17096 fail4:
17097 	dp_deinit_ipa_rx_refill_buf_ring(soc, pdev);
17098 fail3:
17099 	dp_rxdma_ring_cleanup(soc, pdev);
17100 	qdf_nbuf_free(pdev->sojourn_buf);
17101 fail2:
17102 	qdf_spinlock_destroy(&pdev->tx_mutex);
17103 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
17104 	dp_pdev_srng_deinit(pdev);
17105 fail1:
17106 	dp_wdi_event_detach(pdev);
17107 fail0:
17108 	return QDF_STATUS_E_FAILURE;
17109 }
17110 
17111 /*
17112  * dp_pdev_init_wifi3() - Init txrx pdev
17113  * @htc_handle: HTC handle for host-target interface
17114  * @qdf_osdev: QDF OS device
17115  * @force: Force deinit
17116  *
17117  * Return: QDF_STATUS
17118  */
17119 static QDF_STATUS dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
17120 				     HTC_HANDLE htc_handle,
17121 				     qdf_device_t qdf_osdev,
17122 				     uint8_t pdev_id)
17123 {
17124 	return dp_pdev_init(txrx_soc, htc_handle, qdf_osdev, pdev_id);
17125 }
17126 
17127