xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision 436c73ee609b16309acf59b55716e25add074049)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <wlan_ipa_obj_mgmt_api.h>
21 #include <qdf_types.h>
22 #include <qdf_lock.h>
23 #include <qdf_net_types.h>
24 #include <qdf_lro.h>
25 #include <qdf_module.h>
26 #include <hal_hw_headers.h>
27 #include <hal_api.h>
28 #include <hif.h>
29 #include <htt.h>
30 #include <wdi_event.h>
31 #include <queue.h>
32 #include "dp_types.h"
33 #include "dp_internal.h"
34 #include "dp_tx.h"
35 #include "dp_tx_desc.h"
36 #include "dp_rx.h"
37 #ifdef DP_RATETABLE_SUPPORT
38 #include "dp_ratetable.h"
39 #endif
40 #include <cdp_txrx_handle.h>
41 #include <wlan_cfg.h>
42 #include <wlan_utility.h>
43 #include "cdp_txrx_cmn_struct.h"
44 #include "cdp_txrx_stats_struct.h"
45 #include "cdp_txrx_cmn_reg.h"
46 #include <qdf_util.h>
47 #include "dp_peer.h"
48 #include "htt_stats.h"
49 #include "dp_htt.h"
50 #ifdef WLAN_SUPPORT_RX_FISA
51 #include <wlan_dp_fisa_rx.h>
52 #endif
53 #include "htt_ppdu_stats.h"
54 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
55 #include "cfg_ucfg_api.h"
56 #include <wlan_module_ids.h>
57 
58 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
59 #include "cdp_txrx_flow_ctrl_v2.h"
60 #else
61 
62 static inline void
63 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
64 {
65 	return;
66 }
67 #endif
68 #ifdef WIFI_MONITOR_SUPPORT
69 #include <dp_mon.h>
70 #endif
71 #include "dp_ipa.h"
72 #ifdef FEATURE_WDS
73 #include "dp_txrx_wds.h"
74 #endif
75 #ifdef WLAN_SUPPORT_MSCS
76 #include "dp_mscs.h"
77 #endif
78 #ifdef WLAN_SUPPORT_MESH_LATENCY
79 #include "dp_mesh_latency.h"
80 #endif
81 #ifdef WLAN_SUPPORT_SCS
82 #include "dp_scs.h"
83 #endif
84 #ifdef ATH_SUPPORT_IQUE
85 #include "dp_txrx_me.h"
86 #endif
87 #if defined(DP_CON_MON)
88 #ifndef REMOVE_PKT_LOG
89 #include <pktlog_ac_api.h>
90 #include <pktlog_ac.h>
91 #endif
92 #endif
93 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
94 #include <wlan_dp_swlm.h>
95 #endif
96 #ifdef CONFIG_SAWF_DEF_QUEUES
97 #include "dp_sawf.h"
98 #endif
99 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
100 #include <target_if_dp.h>
101 #endif
102 
103 #ifdef WLAN_FEATURE_STATS_EXT
104 #define INIT_RX_HW_STATS_LOCK(_soc) \
105 	qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
106 #define DEINIT_RX_HW_STATS_LOCK(_soc) \
107 	qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
108 #else
109 #define INIT_RX_HW_STATS_LOCK(_soc)  /* no op */
110 #define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
111 #endif
112 
113 #if defined(DP_PEER_EXTENDED_API) || defined(WLAN_DP_PENDING_MEM_FLUSH)
114 #define SET_PEER_REF_CNT_ONE(_peer) \
115 	qdf_atomic_set(&(_peer)->ref_cnt, 1)
116 #else
117 #define SET_PEER_REF_CNT_ONE(_peer)
118 #endif
119 
120 #ifdef WLAN_SYSFS_DP_STATS
121 /* sysfs event wait time for firmware stat request unit milliseconds */
122 #define WLAN_SYSFS_STAT_REQ_WAIT_MS 3000
123 #endif
124 
125 #ifdef QCA_DP_ENABLE_TX_COMP_RING4
126 #define TXCOMP_RING4_NUM 3
127 #else
128 #define TXCOMP_RING4_NUM WBM2SW_TXCOMP_RING4_NUM
129 #endif
130 
131 #ifdef QCA_DP_TX_FW_METADATA_V2
132 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) \
133 		HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
134 #else
135 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) \
136 		HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
137 #endif
138 
139 QDF_COMPILE_TIME_ASSERT(max_rx_rings_check,
140 			MAX_REO_DEST_RINGS == CDP_MAX_RX_RINGS);
141 
142 QDF_COMPILE_TIME_ASSERT(max_tx_rings_check,
143 			MAX_TCL_DATA_RINGS == CDP_MAX_TX_COMP_RINGS);
144 
145 #define dp_init_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_INIT, params)
146 #define dp_init_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_INIT, params)
147 #define dp_init_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_INIT, params)
148 #define dp_init_info(params...) \
149 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_INIT, ## params)
150 #define dp_init_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_INIT, params)
151 
152 #define dp_vdev_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_VDEV, params)
153 #define dp_vdev_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_VDEV, params)
154 #define dp_vdev_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_VDEV, params)
155 #define dp_vdev_info(params...) \
156 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_VDEV, ## params)
157 #define dp_vdev_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_VDEV, params)
158 
159 void dp_configure_arch_ops(struct dp_soc *soc);
160 qdf_size_t dp_get_soc_context_size(uint16_t device_id);
161 
162 /*
163  * The max size of cdp_peer_stats_param_t is limited to 16 bytes.
164  * If the buffer size is exceeding this size limit,
165  * dp_txrx_get_peer_stats is to be used instead.
166  */
167 QDF_COMPILE_TIME_ASSERT(cdp_peer_stats_param_t_max_size,
168 			(sizeof(cdp_peer_stats_param_t) <= 16));
169 
170 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
171 /*
172  * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS
173  * also should be updated accordingly
174  */
175 QDF_COMPILE_TIME_ASSERT(num_intr_grps,
176 			HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS);
177 
178 /*
179  * HIF_EVENT_HIST_MAX should always be power of 2
180  */
181 QDF_COMPILE_TIME_ASSERT(hif_event_history_size,
182 			(HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0);
183 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
184 
185 /*
186  * If WLAN_CFG_INT_NUM_CONTEXTS is changed,
187  * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated
188  */
189 QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs,
190 			WLAN_CFG_INT_NUM_CONTEXTS_MAX >=
191 			WLAN_CFG_INT_NUM_CONTEXTS);
192 
193 static QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl);
194 static QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl);
195 
196 static void dp_pdev_srng_deinit(struct dp_pdev *pdev);
197 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev);
198 static void dp_pdev_srng_free(struct dp_pdev *pdev);
199 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev);
200 
201 static void dp_soc_srng_deinit(struct dp_soc *soc);
202 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc);
203 static void dp_soc_srng_free(struct dp_soc *soc);
204 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc);
205 
206 static void dp_soc_cfg_init(struct dp_soc *soc);
207 static void dp_soc_cfg_attach(struct dp_soc *soc);
208 
209 static inline
210 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
211 				struct cdp_pdev_attach_params *params);
212 
213 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id);
214 
215 static QDF_STATUS
216 dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
217 		   HTC_HANDLE htc_handle,
218 		   qdf_device_t qdf_osdev,
219 		   uint8_t pdev_id);
220 
221 static QDF_STATUS
222 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, int force);
223 
224 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc);
225 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc);
226 
227 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
228 		  struct hif_opaque_softc *hif_handle);
229 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
230 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc,
231 				       uint8_t pdev_id,
232 				       int force);
233 static struct dp_soc *
234 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
235 	      struct cdp_soc_attach_params *params);
236 static inline QDF_STATUS dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl,
237 					      uint8_t vdev_id,
238 					      uint8_t *peer_mac_addr,
239 					      enum cdp_peer_type peer_type);
240 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
241 				       uint8_t vdev_id,
242 				       uint8_t *peer_mac, uint32_t bitmap,
243 				       enum cdp_peer_type peer_type);
244 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
245 				bool unmap_only,
246 				bool mlo_peers_only);
247 #ifdef ENABLE_VERBOSE_DEBUG
248 bool is_dp_verbose_debug_enabled;
249 #endif
250 
251 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
252 static bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
253 static void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
254 			   bool enable);
255 static inline void
256 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
257 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats);
258 static inline void
259 dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
260 #endif
261 
262 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
263 						uint8_t index);
264 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index);
265 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index);
266 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
267 						 uint8_t index);
268 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
269 					    enum hal_ring_type ring_type,
270 					    int ring_num);
271 #ifdef FEATURE_AST
272 void dp_print_mlo_ast_stats(struct dp_soc *soc);
273 #endif
274 
275 #ifdef DP_UMAC_HW_RESET_SUPPORT
276 static QDF_STATUS dp_umac_reset_handle_pre_reset(struct dp_soc *soc);
277 static QDF_STATUS dp_umac_reset_handle_post_reset(struct dp_soc *soc);
278 static QDF_STATUS dp_umac_reset_handle_post_reset_complete(struct dp_soc *soc);
279 #endif
280 
281 #define DP_INTR_POLL_TIMER_MS	5
282 
283 #define MON_VDEV_TIMER_INIT 0x1
284 #define MON_VDEV_TIMER_RUNNING 0x2
285 
286 #define DP_MCS_LENGTH (6*MAX_MCS)
287 
288 #define DP_CURR_FW_STATS_AVAIL 19
289 #define DP_HTT_DBG_EXT_STATS_MAX 256
290 #define DP_MAX_SLEEP_TIME 100
291 #ifndef QCA_WIFI_3_0_EMU
292 #define SUSPEND_DRAIN_WAIT 500
293 #else
294 #define SUSPEND_DRAIN_WAIT 3000
295 #endif
296 
297 #ifdef IPA_OFFLOAD
298 /* Exclude IPA rings from the interrupt context */
299 #define TX_RING_MASK_VAL	0xb
300 #define RX_RING_MASK_VAL	0x7
301 #else
302 #define TX_RING_MASK_VAL	0xF
303 #define RX_RING_MASK_VAL	0xF
304 #endif
305 
306 #define STR_MAXLEN	64
307 
308 #define RNG_ERR		"SRNG setup failed for"
309 
310 /**
311  * default_dscp_tid_map - Default DSCP-TID mapping
312  *
313  * DSCP        TID
314  * 000000      0
315  * 001000      1
316  * 010000      2
317  * 011000      3
318  * 100000      4
319  * 101000      5
320  * 110000      6
321  * 111000      7
322  */
323 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
324 	0, 0, 0, 0, 0, 0, 0, 0,
325 	1, 1, 1, 1, 1, 1, 1, 1,
326 	2, 2, 2, 2, 2, 2, 2, 2,
327 	3, 3, 3, 3, 3, 3, 3, 3,
328 	4, 4, 4, 4, 4, 4, 4, 4,
329 	5, 5, 5, 5, 5, 5, 5, 5,
330 	6, 6, 6, 6, 6, 6, 6, 6,
331 	7, 7, 7, 7, 7, 7, 7, 7,
332 };
333 
334 /**
335  * default_pcp_tid_map - Default PCP-TID mapping
336  *
337  * PCP     TID
338  * 000      0
339  * 001      1
340  * 010      2
341  * 011      3
342  * 100      4
343  * 101      5
344  * 110      6
345  * 111      7
346  */
347 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
348 	0, 1, 2, 3, 4, 5, 6, 7,
349 };
350 
351 /**
352  * @brief Cpu to tx ring map
353  */
354 uint8_t
355 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
356 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
357 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
358 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
359 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
360 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
361 #ifdef WLAN_TX_PKT_CAPTURE_ENH
362 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
363 #endif
364 };
365 
366 qdf_export_symbol(dp_cpu_ring_map);
367 
368 /**
369  * @brief Select the type of statistics
370  */
371 enum dp_stats_type {
372 	STATS_FW = 0,
373 	STATS_HOST = 1,
374 	STATS_TYPE_MAX = 2,
375 };
376 
377 /**
378  * @brief General Firmware statistics options
379  *
380  */
381 enum dp_fw_stats {
382 	TXRX_FW_STATS_INVALID	= -1,
383 };
384 
385 /**
386  * dp_stats_mapping_table - Firmware and Host statistics
387  * currently supported
388  */
389 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
390 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
391 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
392 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
393 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
394 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
395 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
396 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
397 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
398 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
399 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
400 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
401 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
402 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
403 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
404 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
405 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
406 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
407 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
408 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
409 	/* Last ENUM for HTT FW STATS */
410 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
411 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
412 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
413 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
414 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
415 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
416 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
417 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
418 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
419 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
420 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
421 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
422 	{TXRX_FW_STATS_INVALID, TXRX_NAPI_STATS},
423 	{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
424 	{TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS},
425 	{TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS},
426 	{TXRX_FW_STATS_INVALID, TXRX_SOC_REO_HW_DESC_DUMP},
427 	{TXRX_FW_STATS_INVALID, TXRX_SOC_WBM_IDLE_HPTP_DUMP},
428 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_USAGE_WM_STATS},
429 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, TXRX_HOST_STATS_INVALID},
430 	{HTT_DBG_EXT_STATS_TX_SOUNDING_INFO, TXRX_HOST_STATS_INVALID}
431 };
432 
433 /* MCL specific functions */
434 #if defined(DP_CON_MON)
435 
436 #ifdef DP_CON_MON_MSI_ENABLED
437 /**
438  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
439  * @soc: pointer to dp_soc handle
440  * @intr_ctx_num: interrupt context number for which mon mask is needed
441  *
442  * For MCL, monitor mode rings are being processed in timer contexts (polled).
443  * This function is returning 0, since in interrupt mode(softirq based RX),
444  * we donot want to process monitor mode rings in a softirq.
445  *
446  * So, in case packet log is enabled for SAP/STA/P2P modes,
447  * regular interrupt processing will not process monitor mode rings. It would be
448  * done in a separate timer context.
449  *
450  * Return: 0
451  */
452 static inline uint32_t
453 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
454 {
455 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
456 }
457 #else
458 /**
459  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
460  * @soc: pointer to dp_soc handle
461  * @intr_ctx_num: interrupt context number for which mon mask is needed
462  *
463  * For MCL, monitor mode rings are being processed in timer contexts (polled).
464  * This function is returning 0, since in interrupt mode(softirq based RX),
465  * we donot want to process monitor mode rings in a softirq.
466  *
467  * So, in case packet log is enabled for SAP/STA/P2P modes,
468  * regular interrupt processing will not process monitor mode rings. It would be
469  * done in a separate timer context.
470  *
471  * Return: 0
472  */
473 static inline uint32_t
474 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
475 {
476 	return 0;
477 }
478 #endif
479 
480 #ifdef IPA_OFFLOAD
481 /**
482  * dp_get_num_rx_contexts() - get number of RX contexts
483  * @soc_hdl: cdp opaque soc handle
484  *
485  * Return: number of RX contexts
486  */
487 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
488 {
489 	int num_rx_contexts;
490 	uint32_t reo_ring_map;
491 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
492 
493 	reo_ring_map = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
494 
495 	switch (soc->arch_id) {
496 	case CDP_ARCH_TYPE_BE:
497 		/* 2 REO rings are used for IPA */
498 		reo_ring_map &=  ~(BIT(3) | BIT(7));
499 
500 		break;
501 	case CDP_ARCH_TYPE_LI:
502 		/* 1 REO ring is used for IPA */
503 		reo_ring_map &=  ~BIT(3);
504 		break;
505 	default:
506 		dp_err("unknown arch_id 0x%x", soc->arch_id);
507 		QDF_BUG(0);
508 	}
509 	/*
510 	 * qdf_get_hweight32 prefer over qdf_get_hweight8 in case map is scaled
511 	 * in future
512 	 */
513 	num_rx_contexts = qdf_get_hweight32(reo_ring_map);
514 
515 	return num_rx_contexts;
516 }
517 #else
518 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
519 {
520 	int num_rx_contexts;
521 	uint32_t reo_config;
522 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
523 
524 	reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
525 	/*
526 	 * qdf_get_hweight32 prefer over qdf_get_hweight8 in case map is scaled
527 	 * in future
528 	 */
529 	num_rx_contexts = qdf_get_hweight32(reo_config);
530 
531 	return num_rx_contexts;
532 }
533 #endif
534 
535 #else
536 
537 /**
538  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
539  * @soc: pointer to dp_soc handle
540  * @intr_ctx_num: interrupt context number for which mon mask is needed
541  *
542  * Return: mon mask value
543  */
544 static inline
545 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
546 {
547 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
548 }
549 
550 /**
551  * dp_soc_reset_mon_intr_mask() - reset mon intr mask
552  * @soc: pointer to dp_soc handle
553  *
554  * Return:
555  */
556 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc)
557 {
558 	int i;
559 
560 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
561 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
562 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
563 	}
564 }
565 
566 qdf_export_symbol(dp_soc_reset_mon_intr_mask);
567 
568 /*
569  * dp_service_lmac_rings()- timer to reap lmac rings
570  * @arg: SoC Handle
571  *
572  * Return:
573  *
574  */
575 static void dp_service_lmac_rings(void *arg)
576 {
577 	struct dp_soc *soc = (struct dp_soc *)arg;
578 	int ring = 0, i;
579 	struct dp_pdev *pdev = NULL;
580 	union dp_rx_desc_list_elem_t *desc_list = NULL;
581 	union dp_rx_desc_list_elem_t *tail = NULL;
582 
583 	/* Process LMAC interrupts */
584 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
585 		int mac_for_pdev = ring;
586 		struct dp_srng *rx_refill_buf_ring;
587 
588 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
589 		if (!pdev)
590 			continue;
591 
592 		rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev];
593 
594 		dp_monitor_process(soc, NULL, mac_for_pdev,
595 				   QCA_NAPI_BUDGET);
596 
597 		for (i = 0;
598 		     i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
599 			dp_rxdma_err_process(&soc->intr_ctx[i], soc,
600 					     mac_for_pdev,
601 					     QCA_NAPI_BUDGET);
602 
603 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF,
604 						  mac_for_pdev))
605 			dp_rx_buffers_replenish(soc, mac_for_pdev,
606 						rx_refill_buf_ring,
607 						&soc->rx_desc_buf[mac_for_pdev],
608 						0, &desc_list, &tail, false);
609 	}
610 
611 	qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
612 }
613 
614 #endif
615 
616 #ifdef FEATURE_MEC
617 void dp_peer_mec_flush_entries(struct dp_soc *soc)
618 {
619 	unsigned int index;
620 	struct dp_mec_entry *mecentry, *mecentry_next;
621 
622 	TAILQ_HEAD(, dp_mec_entry) free_list;
623 	TAILQ_INIT(&free_list);
624 
625 	if (!soc->mec_hash.mask)
626 		return;
627 
628 	if (!soc->mec_hash.bins)
629 		return;
630 
631 	if (!qdf_atomic_read(&soc->mec_cnt))
632 		return;
633 
634 	qdf_spin_lock_bh(&soc->mec_lock);
635 	for (index = 0; index <= soc->mec_hash.mask; index++) {
636 		if (!TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
637 			TAILQ_FOREACH_SAFE(mecentry, &soc->mec_hash.bins[index],
638 					   hash_list_elem, mecentry_next) {
639 			    dp_peer_mec_detach_entry(soc, mecentry, &free_list);
640 			}
641 		}
642 	}
643 	qdf_spin_unlock_bh(&soc->mec_lock);
644 
645 	dp_peer_mec_free_list(soc, &free_list);
646 }
647 
648 /**
649  * dp_print_mec_entries() - Dump MEC entries in table
650  * @soc: Datapath soc handle
651  *
652  * Return: none
653  */
654 static void dp_print_mec_stats(struct dp_soc *soc)
655 {
656 	int i;
657 	uint32_t index;
658 	struct dp_mec_entry *mecentry = NULL, *mec_list;
659 	uint32_t num_entries = 0;
660 
661 	DP_PRINT_STATS("MEC Stats:");
662 	DP_PRINT_STATS("   Entries Added   = %d", soc->stats.mec.added);
663 	DP_PRINT_STATS("   Entries Deleted = %d", soc->stats.mec.deleted);
664 
665 	if (!qdf_atomic_read(&soc->mec_cnt))
666 		return;
667 
668 	mec_list = qdf_mem_malloc(sizeof(*mecentry) * DP_PEER_MAX_MEC_ENTRY);
669 	if (!mec_list) {
670 		dp_peer_warn("%pK: failed to allocate mec_list", soc);
671 		return;
672 	}
673 
674 	DP_PRINT_STATS("MEC Table:");
675 	for (index = 0; index <= soc->mec_hash.mask; index++) {
676 		qdf_spin_lock_bh(&soc->mec_lock);
677 		if (TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
678 			qdf_spin_unlock_bh(&soc->mec_lock);
679 			continue;
680 		}
681 
682 		TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index],
683 			      hash_list_elem) {
684 			qdf_mem_copy(&mec_list[num_entries], mecentry,
685 				     sizeof(*mecentry));
686 			num_entries++;
687 		}
688 		qdf_spin_unlock_bh(&soc->mec_lock);
689 	}
690 
691 	if (!num_entries) {
692 		qdf_mem_free(mec_list);
693 		return;
694 	}
695 
696 	for (i = 0; i < num_entries; i++) {
697 		DP_PRINT_STATS("%6d mac_addr = " QDF_MAC_ADDR_FMT
698 			       " is_active = %d pdev_id = %d vdev_id = %d",
699 			       i,
700 			       QDF_MAC_ADDR_REF(mec_list[i].mac_addr.raw),
701 			       mec_list[i].is_active,
702 			       mec_list[i].pdev_id,
703 			       mec_list[i].vdev_id);
704 	}
705 	qdf_mem_free(mec_list);
706 }
707 #else
708 static void dp_print_mec_stats(struct dp_soc *soc)
709 {
710 }
711 #endif
712 
713 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
714 				 uint8_t vdev_id,
715 				 uint8_t *peer_mac,
716 				 uint8_t *mac_addr,
717 				 enum cdp_txrx_ast_entry_type type,
718 				 uint32_t flags)
719 {
720 	int ret = -1;
721 	QDF_STATUS status = QDF_STATUS_SUCCESS;
722 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
723 						       peer_mac, 0, vdev_id,
724 						       DP_MOD_ID_CDP);
725 
726 	if (!peer) {
727 		dp_peer_debug("Peer is NULL!");
728 		return ret;
729 	}
730 
731 	status = dp_peer_add_ast((struct dp_soc *)soc_hdl,
732 				 peer,
733 				 mac_addr,
734 				 type,
735 				 flags);
736 	if ((status == QDF_STATUS_SUCCESS) ||
737 	    (status == QDF_STATUS_E_ALREADY) ||
738 	    (status == QDF_STATUS_E_AGAIN))
739 		ret = 0;
740 
741 	dp_hmwds_ast_add_notify(peer, mac_addr,
742 				type, status, false);
743 
744 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
745 
746 	return ret;
747 }
748 
749 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
750 						uint8_t vdev_id,
751 						uint8_t *peer_mac,
752 						uint8_t *wds_macaddr,
753 						uint32_t flags)
754 {
755 	int status = -1;
756 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
757 	struct dp_ast_entry  *ast_entry = NULL;
758 	struct dp_peer *peer;
759 
760 	if (soc->ast_offload_support)
761 		return status;
762 
763 	peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
764 				      peer_mac, 0, vdev_id,
765 				      DP_MOD_ID_CDP);
766 
767 	if (!peer) {
768 		dp_peer_debug("Peer is NULL!");
769 		return status;
770 	}
771 
772 	qdf_spin_lock_bh(&soc->ast_lock);
773 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
774 						    peer->vdev->pdev->pdev_id);
775 
776 	if (ast_entry) {
777 		status = dp_peer_update_ast(soc,
778 					    peer,
779 					    ast_entry, flags);
780 	}
781 	qdf_spin_unlock_bh(&soc->ast_lock);
782 
783 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
784 
785 	return status;
786 }
787 
788 /*
789  * dp_peer_reset_ast_entries() - Deletes all HMWDS entries for a peer
790  * @soc_handle:		Datapath SOC handle
791  * @peer:		DP peer
792  * @arg:		callback argument
793  *
794  * Return: None
795  */
796 static void
797 dp_peer_reset_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
798 {
799 	struct dp_ast_entry *ast_entry = NULL;
800 	struct dp_ast_entry *tmp_ast_entry;
801 
802 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
803 		if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
804 		    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
805 			dp_peer_del_ast(soc, ast_entry);
806 	}
807 }
808 
809 /*
810  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
811  * @soc_handle:		Datapath SOC handle
812  * @wds_macaddr:	WDS entry MAC Address
813  * @peer_macaddr:	WDS entry MAC Address
814  * @vdev_id:		id of vdev handle
815  * Return: QDF_STATUS
816  */
817 static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
818 					 uint8_t *wds_macaddr,
819 					 uint8_t *peer_mac_addr,
820 					 uint8_t vdev_id)
821 {
822 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
823 	struct dp_ast_entry *ast_entry = NULL;
824 	struct dp_peer *peer;
825 	struct dp_pdev *pdev;
826 	struct dp_vdev *vdev;
827 
828 	if (soc->ast_offload_support)
829 		return QDF_STATUS_E_FAILURE;
830 
831 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
832 
833 	if (!vdev)
834 		return QDF_STATUS_E_FAILURE;
835 
836 	pdev = vdev->pdev;
837 
838 	if (peer_mac_addr) {
839 		peer = dp_peer_find_hash_find(soc, peer_mac_addr,
840 					      0, vdev->vdev_id,
841 					      DP_MOD_ID_CDP);
842 		if (!peer) {
843 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
844 			return QDF_STATUS_E_FAILURE;
845 		}
846 
847 		qdf_spin_lock_bh(&soc->ast_lock);
848 		dp_peer_reset_ast_entries(soc, peer, NULL);
849 		qdf_spin_unlock_bh(&soc->ast_lock);
850 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
851 	} else if (wds_macaddr) {
852 		qdf_spin_lock_bh(&soc->ast_lock);
853 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
854 							    pdev->pdev_id);
855 
856 		if (ast_entry) {
857 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
858 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
859 				dp_peer_del_ast(soc, ast_entry);
860 		}
861 		qdf_spin_unlock_bh(&soc->ast_lock);
862 	}
863 
864 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
865 	return QDF_STATUS_SUCCESS;
866 }
867 
868 /*
869  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
870  * @soc:		Datapath SOC handle
871  * @vdev_id:		id of vdev object
872  *
873  * Return: QDF_STATUS
874  */
875 static QDF_STATUS
876 dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
877 			     uint8_t vdev_id)
878 {
879 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
880 
881 	if (soc->ast_offload_support)
882 		return QDF_STATUS_SUCCESS;
883 
884 	qdf_spin_lock_bh(&soc->ast_lock);
885 
886 	dp_soc_iterate_peer(soc, dp_peer_reset_ast_entries, NULL,
887 			    DP_MOD_ID_CDP);
888 	qdf_spin_unlock_bh(&soc->ast_lock);
889 
890 	return QDF_STATUS_SUCCESS;
891 }
892 
893 /*
894  * dp_peer_flush_ast_entries() - Delete all wds and hmwds ast entries of a peer
895  * @soc:		Datapath SOC
896  * @peer:		Datapath peer
897  * @arg:		arg to callback
898  *
899  * Return: None
900  */
901 static void
902 dp_peer_flush_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
903 {
904 	struct dp_ast_entry *ase = NULL;
905 	struct dp_ast_entry *temp_ase;
906 
907 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
908 		if ((ase->type ==
909 			CDP_TXRX_AST_TYPE_STATIC) ||
910 			(ase->type ==
911 			 CDP_TXRX_AST_TYPE_SELF) ||
912 			(ase->type ==
913 			 CDP_TXRX_AST_TYPE_STA_BSS))
914 			continue;
915 		dp_peer_del_ast(soc, ase);
916 	}
917 }
918 
919 /*
920  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
921  * @soc:		Datapath SOC handle
922  *
923  * Return: None
924  */
925 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
926 {
927 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
928 
929 	qdf_spin_lock_bh(&soc->ast_lock);
930 
931 	dp_soc_iterate_peer(soc, dp_peer_flush_ast_entries, NULL,
932 			    DP_MOD_ID_CDP);
933 
934 	qdf_spin_unlock_bh(&soc->ast_lock);
935 	dp_peer_mec_flush_entries(soc);
936 }
937 
938 #if defined(IPA_WDS_EASYMESH_FEATURE) && defined(FEATURE_AST)
939 /*
940  * dp_peer_send_wds_disconnect() - Send Disconnect event to IPA for each peer
941  * @soc: Datapath SOC
942  * @peer: Datapath peer
943  *
944  * Return: None
945  */
946 static void
947 dp_peer_send_wds_disconnect(struct dp_soc *soc, struct dp_peer *peer)
948 {
949 	struct dp_ast_entry *ase = NULL;
950 	struct dp_ast_entry *temp_ase;
951 
952 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
953 		if (ase->type == CDP_TXRX_AST_TYPE_WDS) {
954 			soc->cdp_soc.ol_ops->peer_send_wds_disconnect(soc->ctrl_psoc,
955 								      ase->mac_addr.raw,
956 								      ase->vdev_id);
957 		}
958 	}
959 }
960 #elif defined(FEATURE_AST)
961 static void
962 dp_peer_send_wds_disconnect(struct dp_soc *soc, struct dp_peer *peer)
963 {
964 }
965 #endif
966 
967 /**
968  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
969  *                                       and return ast entry information
970  *                                       of first ast entry found in the
971  *                                       table with given mac address
972  *
973  * @soc : data path soc handle
974  * @ast_mac_addr : AST entry mac address
975  * @ast_entry_info : ast entry information
976  *
977  * return : true if ast entry found with ast_mac_addr
978  *          false if ast entry not found
979  */
980 static bool dp_peer_get_ast_info_by_soc_wifi3
981 	(struct cdp_soc_t *soc_hdl,
982 	 uint8_t *ast_mac_addr,
983 	 struct cdp_ast_entry_info *ast_entry_info)
984 {
985 	struct dp_ast_entry *ast_entry = NULL;
986 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
987 	struct dp_peer *peer = NULL;
988 
989 	if (soc->ast_offload_support)
990 		return false;
991 
992 	qdf_spin_lock_bh(&soc->ast_lock);
993 
994 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
995 	if ((!ast_entry) ||
996 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
997 		qdf_spin_unlock_bh(&soc->ast_lock);
998 		return false;
999 	}
1000 
1001 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1002 				     DP_MOD_ID_AST);
1003 	if (!peer) {
1004 		qdf_spin_unlock_bh(&soc->ast_lock);
1005 		return false;
1006 	}
1007 
1008 	ast_entry_info->type = ast_entry->type;
1009 	ast_entry_info->pdev_id = ast_entry->pdev_id;
1010 	ast_entry_info->vdev_id = ast_entry->vdev_id;
1011 	ast_entry_info->peer_id = ast_entry->peer_id;
1012 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
1013 		     &peer->mac_addr.raw[0],
1014 		     QDF_MAC_ADDR_SIZE);
1015 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1016 	qdf_spin_unlock_bh(&soc->ast_lock);
1017 	return true;
1018 }
1019 
1020 /**
1021  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
1022  *                                          and return ast entry information
1023  *                                          if mac address and pdev_id matches
1024  *
1025  * @soc : data path soc handle
1026  * @ast_mac_addr : AST entry mac address
1027  * @pdev_id : pdev_id
1028  * @ast_entry_info : ast entry information
1029  *
1030  * return : true if ast entry found with ast_mac_addr
1031  *          false if ast entry not found
1032  */
1033 static bool dp_peer_get_ast_info_by_pdevid_wifi3
1034 		(struct cdp_soc_t *soc_hdl,
1035 		 uint8_t *ast_mac_addr,
1036 		 uint8_t pdev_id,
1037 		 struct cdp_ast_entry_info *ast_entry_info)
1038 {
1039 	struct dp_ast_entry *ast_entry;
1040 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
1041 	struct dp_peer *peer = NULL;
1042 
1043 	if (soc->ast_offload_support)
1044 		return false;
1045 
1046 	qdf_spin_lock_bh(&soc->ast_lock);
1047 
1048 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr,
1049 						    pdev_id);
1050 
1051 	if ((!ast_entry) ||
1052 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
1053 		qdf_spin_unlock_bh(&soc->ast_lock);
1054 		return false;
1055 	}
1056 
1057 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1058 				     DP_MOD_ID_AST);
1059 	if (!peer) {
1060 		qdf_spin_unlock_bh(&soc->ast_lock);
1061 		return false;
1062 	}
1063 
1064 	ast_entry_info->type = ast_entry->type;
1065 	ast_entry_info->pdev_id = ast_entry->pdev_id;
1066 	ast_entry_info->vdev_id = ast_entry->vdev_id;
1067 	ast_entry_info->peer_id = ast_entry->peer_id;
1068 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
1069 		     &peer->mac_addr.raw[0],
1070 		     QDF_MAC_ADDR_SIZE);
1071 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1072 	qdf_spin_unlock_bh(&soc->ast_lock);
1073 	return true;
1074 }
1075 
1076 /**
1077  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
1078  *                            with given mac address
1079  *
1080  * @soc : data path soc handle
1081  * @ast_mac_addr : AST entry mac address
1082  * @callback : callback function to called on ast delete response from FW
1083  * @cookie : argument to be passed to callback
1084  *
1085  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
1086  *          is sent
1087  *          QDF_STATUS_E_INVAL false if ast entry not found
1088  */
1089 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
1090 					       uint8_t *mac_addr,
1091 					       txrx_ast_free_cb callback,
1092 					       void *cookie)
1093 
1094 {
1095 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1096 	struct dp_ast_entry *ast_entry = NULL;
1097 	txrx_ast_free_cb cb = NULL;
1098 	void *arg = NULL;
1099 
1100 	if (soc->ast_offload_support)
1101 		return -QDF_STATUS_E_INVAL;
1102 
1103 	qdf_spin_lock_bh(&soc->ast_lock);
1104 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1105 	if (!ast_entry) {
1106 		qdf_spin_unlock_bh(&soc->ast_lock);
1107 		return -QDF_STATUS_E_INVAL;
1108 	}
1109 
1110 	if (ast_entry->callback) {
1111 		cb = ast_entry->callback;
1112 		arg = ast_entry->cookie;
1113 	}
1114 
1115 	ast_entry->callback = callback;
1116 	ast_entry->cookie = cookie;
1117 
1118 	/*
1119 	 * if delete_in_progress is set AST delete is sent to target
1120 	 * and host is waiting for response should not send delete
1121 	 * again
1122 	 */
1123 	if (!ast_entry->delete_in_progress)
1124 		dp_peer_del_ast(soc, ast_entry);
1125 
1126 	qdf_spin_unlock_bh(&soc->ast_lock);
1127 	if (cb) {
1128 		cb(soc->ctrl_psoc,
1129 		   dp_soc_to_cdp_soc(soc),
1130 		   arg,
1131 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1132 	}
1133 	return QDF_STATUS_SUCCESS;
1134 }
1135 
1136 /**
1137  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
1138  *                                   table if mac address and pdev_id matches
1139  *
1140  * @soc : data path soc handle
1141  * @ast_mac_addr : AST entry mac address
1142  * @pdev_id : pdev id
1143  * @callback : callback function to called on ast delete response from FW
1144  * @cookie : argument to be passed to callback
1145  *
1146  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
1147  *          is sent
1148  *          QDF_STATUS_E_INVAL false if ast entry not found
1149  */
1150 
1151 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
1152 						uint8_t *mac_addr,
1153 						uint8_t pdev_id,
1154 						txrx_ast_free_cb callback,
1155 						void *cookie)
1156 
1157 {
1158 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1159 	struct dp_ast_entry *ast_entry;
1160 	txrx_ast_free_cb cb = NULL;
1161 	void *arg = NULL;
1162 
1163 	if (soc->ast_offload_support)
1164 		return -QDF_STATUS_E_INVAL;
1165 
1166 	qdf_spin_lock_bh(&soc->ast_lock);
1167 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
1168 
1169 	if (!ast_entry) {
1170 		qdf_spin_unlock_bh(&soc->ast_lock);
1171 		return -QDF_STATUS_E_INVAL;
1172 	}
1173 
1174 	if (ast_entry->callback) {
1175 		cb = ast_entry->callback;
1176 		arg = ast_entry->cookie;
1177 	}
1178 
1179 	ast_entry->callback = callback;
1180 	ast_entry->cookie = cookie;
1181 
1182 	/*
1183 	 * if delete_in_progress is set AST delete is sent to target
1184 	 * and host is waiting for response should not sent delete
1185 	 * again
1186 	 */
1187 	if (!ast_entry->delete_in_progress)
1188 		dp_peer_del_ast(soc, ast_entry);
1189 
1190 	qdf_spin_unlock_bh(&soc->ast_lock);
1191 
1192 	if (cb) {
1193 		cb(soc->ctrl_psoc,
1194 		   dp_soc_to_cdp_soc(soc),
1195 		   arg,
1196 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1197 	}
1198 	return QDF_STATUS_SUCCESS;
1199 }
1200 
1201 /**
1202  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
1203  * @ring_num: ring num of the ring being queried
1204  * @grp_mask: the grp_mask array for the ring type in question.
1205  *
1206  * The grp_mask array is indexed by group number and the bit fields correspond
1207  * to ring numbers.  We are finding which interrupt group a ring belongs to.
1208  *
1209  * Return: the index in the grp_mask array with the ring number.
1210  * -QDF_STATUS_E_NOENT if no entry is found
1211  */
1212 static int dp_srng_find_ring_in_mask(int ring_num, uint8_t *grp_mask)
1213 {
1214 	int ext_group_num;
1215 	uint8_t mask = 1 << ring_num;
1216 
1217 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
1218 	     ext_group_num++) {
1219 		if (mask & grp_mask[ext_group_num])
1220 			return ext_group_num;
1221 	}
1222 
1223 	return -QDF_STATUS_E_NOENT;
1224 }
1225 
1226 /**
1227  * dp_is_msi_group_number_invalid() - check msi_group_number valid or not
1228  * @msi_group_number: MSI group number.
1229  * @msi_data_count: MSI data count.
1230  *
1231  * Return: true if msi_group_number is invalid.
1232  */
1233 #ifdef WLAN_ONE_MSI_VECTOR
1234 static bool dp_is_msi_group_number_invalid(int msi_group_number,
1235 					   int msi_data_count)
1236 {
1237 	return false;
1238 }
1239 #else
1240 static bool dp_is_msi_group_number_invalid(int msi_group_number,
1241 					   int msi_data_count)
1242 {
1243 	return msi_group_number > msi_data_count;
1244 }
1245 #endif
1246 
1247 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1248 /**
1249  * dp_is_reo_ring_num_in_nf_grp1() - Check if the current reo ring is part of
1250  *				rx_near_full_grp1 mask
1251  * @soc: Datapath SoC Handle
1252  * @ring_num: REO ring number
1253  *
1254  * Return: 1 if the ring_num belongs to reo_nf_grp1,
1255  *	   0, otherwise.
1256  */
1257 static inline int
1258 dp_is_reo_ring_num_in_nf_grp1(struct dp_soc *soc, int ring_num)
1259 {
1260 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_1 & (1 << ring_num));
1261 }
1262 
1263 /**
1264  * dp_is_reo_ring_num_in_nf_grp2() - Check if the current reo ring is part of
1265  *				rx_near_full_grp2 mask
1266  * @soc: Datapath SoC Handle
1267  * @ring_num: REO ring number
1268  *
1269  * Return: 1 if the ring_num belongs to reo_nf_grp2,
1270  *	   0, otherwise.
1271  */
1272 static inline int
1273 dp_is_reo_ring_num_in_nf_grp2(struct dp_soc *soc, int ring_num)
1274 {
1275 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_2 & (1 << ring_num));
1276 }
1277 
1278 /**
1279  * dp_srng_get_near_full_irq_mask() - Get near-full irq mask for a particular
1280  *				ring type and number
1281  * @soc: Datapath SoC handle
1282  * @ring_type: SRNG type
1283  * @ring_num: ring num
1284  *
1285  * Return: near ful irq mask pointer
1286  */
1287 static inline
1288 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
1289 					enum hal_ring_type ring_type,
1290 					int ring_num)
1291 {
1292 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
1293 	uint8_t wbm2_sw_rx_rel_ring_id;
1294 	uint8_t *nf_irq_mask = NULL;
1295 
1296 	switch (ring_type) {
1297 	case WBM2SW_RELEASE:
1298 		wbm2_sw_rx_rel_ring_id =
1299 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
1300 		if (ring_num != wbm2_sw_rx_rel_ring_id) {
1301 			nf_irq_mask = &soc->wlan_cfg_ctx->
1302 					int_tx_ring_near_full_irq_mask[0];
1303 		}
1304 		break;
1305 	case REO_DST:
1306 		if (dp_is_reo_ring_num_in_nf_grp1(soc, ring_num))
1307 			nf_irq_mask =
1308 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_1_mask[0];
1309 		else if (dp_is_reo_ring_num_in_nf_grp2(soc, ring_num))
1310 			nf_irq_mask =
1311 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_2_mask[0];
1312 		else
1313 			qdf_assert(0);
1314 		break;
1315 	default:
1316 		break;
1317 	}
1318 
1319 	return nf_irq_mask;
1320 }
1321 
1322 /**
1323  * dp_srng_set_msi2_ring_params() - Set the msi2 addr/data in the ring params
1324  * @soc: Datapath SoC handle
1325  * @ring_params: srng params handle
1326  * @msi2_addr: MSI2 addr to be set for the SRNG
1327  * @msi2_data: MSI2 data to be set for the SRNG
1328  *
1329  * Return: None
1330  */
1331 static inline
1332 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
1333 				  struct hal_srng_params *ring_params,
1334 				  qdf_dma_addr_t msi2_addr,
1335 				  uint32_t msi2_data)
1336 {
1337 	ring_params->msi2_addr = msi2_addr;
1338 	ring_params->msi2_data = msi2_data;
1339 }
1340 
1341 /**
1342  * dp_srng_msi2_setup() - Setup MSI2 details for near full IRQ of an SRNG
1343  * @soc: Datapath SoC handle
1344  * @ring_params: ring_params for SRNG
1345  * @ring_type: SENG type
1346  * @ring_num: ring number for the SRNG
1347  * @nf_msi_grp_num: near full msi group number
1348  *
1349  * Return: None
1350  */
1351 static inline void
1352 dp_srng_msi2_setup(struct dp_soc *soc,
1353 		   struct hal_srng_params *ring_params,
1354 		   int ring_type, int ring_num, int nf_msi_grp_num)
1355 {
1356 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1357 	int msi_data_count, ret;
1358 
1359 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1360 					  &msi_data_count, &msi_data_start,
1361 					  &msi_irq_start);
1362 	if (ret)
1363 		return;
1364 
1365 	if (nf_msi_grp_num < 0) {
1366 		dp_init_info("%pK: ring near full IRQ not part of an ext_group; ring_type: %d,ring_num %d",
1367 			     soc, ring_type, ring_num);
1368 		ring_params->msi2_addr = 0;
1369 		ring_params->msi2_data = 0;
1370 		return;
1371 	}
1372 
1373 	if (dp_is_msi_group_number_invalid(nf_msi_grp_num, msi_data_count)) {
1374 		dp_init_warn("%pK: 2 msi_groups will share an msi for near full IRQ; msi_group_num %d",
1375 			     soc, nf_msi_grp_num);
1376 		QDF_ASSERT(0);
1377 	}
1378 
1379 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1380 
1381 	ring_params->nf_irq_support = 1;
1382 	ring_params->msi2_addr = addr_low;
1383 	ring_params->msi2_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1384 	ring_params->msi2_data = (nf_msi_grp_num % msi_data_count)
1385 		+ msi_data_start;
1386 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1387 }
1388 
1389 /* Percentage of ring entries considered as nearly full */
1390 #define DP_NF_HIGH_THRESH_PERCENTAGE	75
1391 /* Percentage of ring entries considered as critically full */
1392 #define DP_NF_CRIT_THRESH_PERCENTAGE	90
1393 /* Percentage of ring entries considered as safe threshold */
1394 #define DP_NF_SAFE_THRESH_PERCENTAGE	50
1395 
1396 /**
1397  * dp_srng_configure_nf_interrupt_thresholds() - Configure the thresholds for
1398  *			near full irq
1399  * @soc: Datapath SoC handle
1400  * @ring_params: ring params for SRNG
1401  * @ring_type: ring type
1402  */
1403 static inline void
1404 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
1405 					  struct hal_srng_params *ring_params,
1406 					  int ring_type)
1407 {
1408 	if (ring_params->nf_irq_support) {
1409 		ring_params->high_thresh = (ring_params->num_entries *
1410 					    DP_NF_HIGH_THRESH_PERCENTAGE) / 100;
1411 		ring_params->crit_thresh = (ring_params->num_entries *
1412 					    DP_NF_CRIT_THRESH_PERCENTAGE) / 100;
1413 		ring_params->safe_thresh = (ring_params->num_entries *
1414 					    DP_NF_SAFE_THRESH_PERCENTAGE) /100;
1415 	}
1416 }
1417 
1418 /**
1419  * dp_srng_set_nf_thresholds() - Set the near full thresholds to srng data
1420  *			structure from the ring params
1421  * @soc: Datapath SoC handle
1422  * @srng: SRNG handle
1423  * @ring_params: ring params for a SRNG
1424  *
1425  * Return: None
1426  */
1427 static inline void
1428 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
1429 			  struct hal_srng_params *ring_params)
1430 {
1431 	srng->crit_thresh = ring_params->crit_thresh;
1432 	srng->safe_thresh = ring_params->safe_thresh;
1433 }
1434 
1435 #else
1436 static inline
1437 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
1438 					enum hal_ring_type ring_type,
1439 					int ring_num)
1440 {
1441 	return NULL;
1442 }
1443 
1444 static inline
1445 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
1446 				  struct hal_srng_params *ring_params,
1447 				  qdf_dma_addr_t msi2_addr,
1448 				  uint32_t msi2_data)
1449 {
1450 }
1451 
1452 static inline void
1453 dp_srng_msi2_setup(struct dp_soc *soc,
1454 		   struct hal_srng_params *ring_params,
1455 		   int ring_type, int ring_num, int nf_msi_grp_num)
1456 {
1457 }
1458 
1459 static inline void
1460 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
1461 					  struct hal_srng_params *ring_params,
1462 					  int ring_type)
1463 {
1464 }
1465 
1466 static inline void
1467 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
1468 			  struct hal_srng_params *ring_params)
1469 {
1470 }
1471 #endif
1472 
1473 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
1474 				       enum hal_ring_type ring_type,
1475 				       int ring_num,
1476 				       int *reg_msi_grp_num,
1477 				       bool nf_irq_support,
1478 				       int *nf_msi_grp_num)
1479 {
1480 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
1481 	uint8_t *grp_mask, *nf_irq_mask = NULL;
1482 	bool nf_irq_enabled = false;
1483 	uint8_t wbm2_sw_rx_rel_ring_id;
1484 
1485 	switch (ring_type) {
1486 	case WBM2SW_RELEASE:
1487 		wbm2_sw_rx_rel_ring_id =
1488 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
1489 		if (ring_num == wbm2_sw_rx_rel_ring_id) {
1490 			/* dp_rx_wbm_err_process - soc->rx_rel_ring */
1491 			grp_mask = &cfg_ctx->int_rx_wbm_rel_ring_mask[0];
1492 			ring_num = 0;
1493 		} else { /* dp_tx_comp_handler - soc->tx_comp_ring */
1494 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1495 			nf_irq_mask = dp_srng_get_near_full_irq_mask(soc,
1496 								     ring_type,
1497 								     ring_num);
1498 			if (nf_irq_mask)
1499 				nf_irq_enabled = true;
1500 
1501 			/*
1502 			 * Using ring 4 as 4th tx completion ring since ring 3
1503 			 * is Rx error ring
1504 			 */
1505 			if (ring_num == WBM2SW_TXCOMP_RING4_NUM)
1506 				ring_num = TXCOMP_RING4_NUM;
1507 		}
1508 	break;
1509 
1510 	case REO_EXCEPTION:
1511 		/* dp_rx_err_process - &soc->reo_exception_ring */
1512 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
1513 	break;
1514 
1515 	case REO_DST:
1516 		/* dp_rx_process - soc->reo_dest_ring */
1517 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1518 		nf_irq_mask = dp_srng_get_near_full_irq_mask(soc, ring_type,
1519 							     ring_num);
1520 		if (nf_irq_mask)
1521 			nf_irq_enabled = true;
1522 	break;
1523 
1524 	case REO_STATUS:
1525 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
1526 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
1527 	break;
1528 
1529 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
1530 	case RXDMA_MONITOR_STATUS:
1531 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
1532 	case RXDMA_MONITOR_DST:
1533 		/* dp_mon_process */
1534 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
1535 	break;
1536 	case TX_MONITOR_DST:
1537 		/* dp_tx_mon_process */
1538 		grp_mask = &soc->wlan_cfg_ctx->int_tx_mon_ring_mask[0];
1539 	break;
1540 	case RXDMA_DST:
1541 		/* dp_rxdma_err_process */
1542 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
1543 	break;
1544 
1545 	case RXDMA_BUF:
1546 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1547 	break;
1548 
1549 	case RXDMA_MONITOR_BUF:
1550 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
1551 	break;
1552 
1553 	case TX_MONITOR_BUF:
1554 		grp_mask = &soc->wlan_cfg_ctx->int_host2txmon_ring_mask[0];
1555 	break;
1556 
1557 	case TCL_DATA:
1558 	/* CMD_CREDIT_RING is used as command in 8074 and credit in 9000 */
1559 	case TCL_CMD_CREDIT:
1560 	case REO_CMD:
1561 	case SW2WBM_RELEASE:
1562 	case WBM_IDLE_LINK:
1563 		/* normally empty SW_TO_HW rings */
1564 		return -QDF_STATUS_E_NOENT;
1565 	break;
1566 
1567 	case TCL_STATUS:
1568 	case REO_REINJECT:
1569 		/* misc unused rings */
1570 		return -QDF_STATUS_E_NOENT;
1571 	break;
1572 
1573 	case CE_SRC:
1574 	case CE_DST:
1575 	case CE_DST_STATUS:
1576 		/* CE_rings - currently handled by hif */
1577 	default:
1578 		return -QDF_STATUS_E_NOENT;
1579 	break;
1580 	}
1581 
1582 	*reg_msi_grp_num = dp_srng_find_ring_in_mask(ring_num, grp_mask);
1583 
1584 	if (nf_irq_support && nf_irq_enabled) {
1585 		*nf_msi_grp_num = dp_srng_find_ring_in_mask(ring_num,
1586 							    nf_irq_mask);
1587 	}
1588 
1589 	return QDF_STATUS_SUCCESS;
1590 }
1591 
1592 /*
1593  * dp_get_num_msi_available()- API to get number of MSIs available
1594  * @dp_soc: DP soc Handle
1595  * @interrupt_mode: Mode of interrupts
1596  *
1597  * Return: Number of MSIs available or 0 in case of integrated
1598  */
1599 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
1600 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1601 {
1602 	return 0;
1603 }
1604 #else
1605 /*
1606  * dp_get_num_msi_available()- API to get number of MSIs available
1607  * @dp_soc: DP soc Handle
1608  * @interrupt_mode: Mode of interrupts
1609  *
1610  * Return: Number of MSIs available or 0 in case of integrated
1611  */
1612 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1613 {
1614 	int msi_data_count;
1615 	int msi_data_start;
1616 	int msi_irq_start;
1617 	int ret;
1618 
1619 	if (interrupt_mode == DP_INTR_INTEGRATED) {
1620 		return 0;
1621 	} else if (interrupt_mode == DP_INTR_MSI || interrupt_mode ==
1622 		   DP_INTR_POLL) {
1623 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1624 						  &msi_data_count,
1625 						  &msi_data_start,
1626 						  &msi_irq_start);
1627 		if (ret) {
1628 			qdf_err("Unable to get DP MSI assignment %d",
1629 				interrupt_mode);
1630 			return -EINVAL;
1631 		}
1632 		return msi_data_count;
1633 	}
1634 	qdf_err("Interrupt mode invalid %d", interrupt_mode);
1635 	return -EINVAL;
1636 }
1637 #endif
1638 
1639 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
1640 			      *ring_params, int ring_type, int ring_num)
1641 {
1642 	int reg_msi_grp_num;
1643 	/*
1644 	 * nf_msi_grp_num needs to be initialized with negative value,
1645 	 * to avoid configuring near-full msi for WBM2SW3 ring
1646 	 */
1647 	int nf_msi_grp_num = -1;
1648 	int msi_data_count;
1649 	int ret;
1650 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1651 	bool nf_irq_support;
1652 
1653 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1654 					    &msi_data_count, &msi_data_start,
1655 					    &msi_irq_start);
1656 
1657 	if (ret)
1658 		return;
1659 
1660 	nf_irq_support = hal_srng_is_near_full_irq_supported(soc->hal_soc,
1661 							     ring_type,
1662 							     ring_num);
1663 	ret = dp_srng_calculate_msi_group(soc, ring_type, ring_num,
1664 					  &reg_msi_grp_num,
1665 					  nf_irq_support,
1666 					  &nf_msi_grp_num);
1667 	if (ret < 0) {
1668 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1669 			     soc, ring_type, ring_num);
1670 		ring_params->msi_addr = 0;
1671 		ring_params->msi_data = 0;
1672 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
1673 		return;
1674 	}
1675 
1676 	if (reg_msi_grp_num < 0) {
1677 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1678 			     soc, ring_type, ring_num);
1679 		ring_params->msi_addr = 0;
1680 		ring_params->msi_data = 0;
1681 		goto configure_msi2;
1682 	}
1683 
1684 	if (dp_is_msi_group_number_invalid(reg_msi_grp_num, msi_data_count)) {
1685 		dp_init_warn("%pK: 2 msi_groups will share an msi; msi_group_num %d",
1686 			     soc, reg_msi_grp_num);
1687 		QDF_ASSERT(0);
1688 	}
1689 
1690 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1691 
1692 	ring_params->msi_addr = addr_low;
1693 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1694 	ring_params->msi_data = (reg_msi_grp_num % msi_data_count)
1695 		+ msi_data_start;
1696 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1697 
1698 	dp_debug("ring type %u ring_num %u msi->data %u msi_addr %llx",
1699 		 ring_type, ring_num, ring_params->msi_data,
1700 		 (uint64_t)ring_params->msi_addr);
1701 
1702 configure_msi2:
1703 	if (!nf_irq_support) {
1704 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
1705 		return;
1706 	}
1707 
1708 	dp_srng_msi2_setup(soc, ring_params, ring_type, ring_num,
1709 			   nf_msi_grp_num);
1710 }
1711 
1712 #ifdef FEATURE_AST
1713 /**
1714  * dp_print_mlo_ast_stats() - Print AST stats for MLO peers
1715  *
1716  * @soc : core DP soc context
1717  *
1718  * Return: void
1719  */
1720 void dp_print_mlo_ast_stats(struct dp_soc *soc)
1721 {
1722 	if (soc->arch_ops.print_mlo_ast_stats)
1723 		soc->arch_ops.print_mlo_ast_stats(soc);
1724 }
1725 
1726 /**
1727  * dp_print_peer_ast_entries() - Dump AST entries of peer
1728  * @soc: Datapath soc handle
1729  * @peer: Datapath peer
1730  * @arg: argument to iterate function
1731  *
1732  * return void
1733  */
1734 void
1735 dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1736 {
1737 	struct dp_ast_entry *ase, *tmp_ase;
1738 	uint32_t num_entries = 0;
1739 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1740 			"NONE", "STATIC", "SELF", "WDS", "HMWDS", "BSS",
1741 			"DA", "HMWDS_SEC", "MLD"};
1742 
1743 	DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1744 	    DP_PRINT_STATS("%6d mac_addr = "QDF_MAC_ADDR_FMT
1745 		    " peer_mac_addr = "QDF_MAC_ADDR_FMT
1746 		    " peer_id = %u"
1747 		    " type = %s"
1748 		    " next_hop = %d"
1749 		    " is_active = %d"
1750 		    " ast_idx = %d"
1751 		    " ast_hash = %d"
1752 		    " delete_in_progress = %d"
1753 		    " pdev_id = %d"
1754 		    " vdev_id = %d",
1755 		    ++num_entries,
1756 		    QDF_MAC_ADDR_REF(ase->mac_addr.raw),
1757 		    QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1758 		    ase->peer_id,
1759 		    type[ase->type],
1760 		    ase->next_hop,
1761 		    ase->is_active,
1762 		    ase->ast_idx,
1763 		    ase->ast_hash_value,
1764 		    ase->delete_in_progress,
1765 		    ase->pdev_id,
1766 		    ase->vdev_id);
1767 	}
1768 }
1769 
1770 /**
1771  * dp_print_ast_stats() - Dump AST table contents
1772  * @soc: Datapath soc handle
1773  *
1774  * return void
1775  */
1776 void dp_print_ast_stats(struct dp_soc *soc)
1777 {
1778 	DP_PRINT_STATS("AST Stats:");
1779 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1780 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1781 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1782 	DP_PRINT_STATS("	Entries MAP ERR  = %d", soc->stats.ast.map_err);
1783 	DP_PRINT_STATS("	Entries Mismatch ERR  = %d",
1784 		       soc->stats.ast.ast_mismatch);
1785 
1786 	DP_PRINT_STATS("AST Table:");
1787 
1788 	qdf_spin_lock_bh(&soc->ast_lock);
1789 
1790 	dp_soc_iterate_peer(soc, dp_print_peer_ast_entries, NULL,
1791 			    DP_MOD_ID_GENERIC_STATS);
1792 
1793 	qdf_spin_unlock_bh(&soc->ast_lock);
1794 
1795 	dp_print_mlo_ast_stats(soc);
1796 }
1797 #else
1798 void dp_print_ast_stats(struct dp_soc *soc)
1799 {
1800 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1801 	return;
1802 }
1803 #endif
1804 
1805 /**
1806  * dp_print_peer_info() - Dump peer info
1807  * @soc: Datapath soc handle
1808  * @peer: Datapath peer handle
1809  * @arg: argument to iter function
1810  *
1811  * return void
1812  */
1813 static void
1814 dp_print_peer_info(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1815 {
1816 	struct dp_txrx_peer *txrx_peer = NULL;
1817 
1818 	txrx_peer = dp_get_txrx_peer(peer);
1819 	if (!txrx_peer)
1820 		return;
1821 
1822 	DP_PRINT_STATS(" peer id = %d"
1823 		       " peer_mac_addr = "QDF_MAC_ADDR_FMT
1824 		       " nawds_enabled = %d"
1825 		       " bss_peer = %d"
1826 		       " wds_enabled = %d"
1827 		       " tx_cap_enabled = %d"
1828 		       " rx_cap_enabled = %d",
1829 		       peer->peer_id,
1830 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1831 		       txrx_peer->nawds_enabled,
1832 		       txrx_peer->bss_peer,
1833 		       txrx_peer->wds_enabled,
1834 		       dp_monitor_is_tx_cap_enabled(peer),
1835 		       dp_monitor_is_rx_cap_enabled(peer));
1836 }
1837 
1838 /**
1839  * dp_print_peer_table() - Dump all Peer stats
1840  * @vdev: Datapath Vdev handle
1841  *
1842  * return void
1843  */
1844 static void dp_print_peer_table(struct dp_vdev *vdev)
1845 {
1846 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1847 	dp_vdev_iterate_peer(vdev, dp_print_peer_info, NULL,
1848 			     DP_MOD_ID_GENERIC_STATS);
1849 }
1850 
1851 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
1852 /**
1853  * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt
1854  * threshold values from the wlan_srng_cfg table for each ring type
1855  * @soc: device handle
1856  * @ring_params: per ring specific parameters
1857  * @ring_type: Ring type
1858  * @ring_num: Ring number for a given ring type
1859  *
1860  * Fill the ring params with the interrupt threshold
1861  * configuration parameters available in the per ring type wlan_srng_cfg
1862  * table.
1863  *
1864  * Return: None
1865  */
1866 static void
1867 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1868 				       struct hal_srng_params *ring_params,
1869 				       int ring_type, int ring_num,
1870 				       int num_entries)
1871 {
1872 	uint8_t wbm2_sw_rx_rel_ring_id;
1873 
1874 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
1875 
1876 	if (ring_type == REO_DST) {
1877 		ring_params->intr_timer_thres_us =
1878 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1879 		ring_params->intr_batch_cntr_thres_entries =
1880 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1881 	} else if (ring_type == WBM2SW_RELEASE &&
1882 		   (ring_num == wbm2_sw_rx_rel_ring_id)) {
1883 		ring_params->intr_timer_thres_us =
1884 				wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1885 		ring_params->intr_batch_cntr_thres_entries =
1886 				wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1887 	} else {
1888 		ring_params->intr_timer_thres_us =
1889 				soc->wlan_srng_cfg[ring_type].timer_threshold;
1890 		ring_params->intr_batch_cntr_thres_entries =
1891 				soc->wlan_srng_cfg[ring_type].batch_count_threshold;
1892 	}
1893 	ring_params->low_threshold =
1894 			soc->wlan_srng_cfg[ring_type].low_threshold;
1895 	if (ring_params->low_threshold)
1896 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1897 
1898 	dp_srng_configure_nf_interrupt_thresholds(soc, ring_params, ring_type);
1899 }
1900 #else
1901 static void
1902 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1903 				       struct hal_srng_params *ring_params,
1904 				       int ring_type, int ring_num,
1905 				       int num_entries)
1906 {
1907 	uint8_t wbm2_sw_rx_rel_ring_id;
1908 	bool rx_refill_lt_disable;
1909 
1910 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
1911 
1912 	if (ring_type == REO_DST) {
1913 		ring_params->intr_timer_thres_us =
1914 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1915 		ring_params->intr_batch_cntr_thres_entries =
1916 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1917 	} else if (ring_type == WBM2SW_RELEASE &&
1918 		   (ring_num < wbm2_sw_rx_rel_ring_id ||
1919 		   ring_num == WBM2SW_TXCOMP_RING4_NUM)) {
1920 		ring_params->intr_timer_thres_us =
1921 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1922 		ring_params->intr_batch_cntr_thres_entries =
1923 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1924 	} else if (ring_type == RXDMA_BUF) {
1925 		rx_refill_lt_disable =
1926 			wlan_cfg_get_dp_soc_rxdma_refill_lt_disable
1927 							(soc->wlan_cfg_ctx);
1928 		ring_params->intr_timer_thres_us =
1929 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1930 
1931 		if (!rx_refill_lt_disable) {
1932 			ring_params->low_threshold = num_entries >> 3;
1933 			ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1934 			ring_params->intr_batch_cntr_thres_entries = 0;
1935 		}
1936 	} else {
1937 		ring_params->intr_timer_thres_us =
1938 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1939 		ring_params->intr_batch_cntr_thres_entries =
1940 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1941 	}
1942 
1943 	/* These rings donot require interrupt to host. Make them zero */
1944 	switch (ring_type) {
1945 	case REO_REINJECT:
1946 	case REO_CMD:
1947 	case TCL_DATA:
1948 	case TCL_CMD_CREDIT:
1949 	case TCL_STATUS:
1950 	case WBM_IDLE_LINK:
1951 	case SW2WBM_RELEASE:
1952 	case PPE2TCL:
1953 	case SW2RXDMA_NEW:
1954 		ring_params->intr_timer_thres_us = 0;
1955 		ring_params->intr_batch_cntr_thres_entries = 0;
1956 		break;
1957 	}
1958 
1959 	/* Enable low threshold interrupts for rx buffer rings (regular and
1960 	 * monitor buffer rings.
1961 	 * TODO: See if this is required for any other ring
1962 	 */
1963 	if ((ring_type == RXDMA_MONITOR_BUF) ||
1964 	    (ring_type == RXDMA_MONITOR_STATUS ||
1965 	    (ring_type == TX_MONITOR_BUF))) {
1966 		/* TODO: Setting low threshold to 1/8th of ring size
1967 		 * see if this needs to be configurable
1968 		 */
1969 		ring_params->low_threshold = num_entries >> 3;
1970 		ring_params->intr_timer_thres_us =
1971 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1972 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1973 		ring_params->intr_batch_cntr_thres_entries = 0;
1974 	}
1975 
1976 	/* During initialisation monitor rings are only filled with
1977 	 * MON_BUF_MIN_ENTRIES entries. So low threshold needs to be set to
1978 	 * a value less than that. Low threshold value is reconfigured again
1979 	 * to 1/8th of the ring size when monitor vap is created.
1980 	 */
1981 	if (ring_type == RXDMA_MONITOR_BUF)
1982 		ring_params->low_threshold = MON_BUF_MIN_ENTRIES >> 1;
1983 
1984 	/* In case of PCI chipsets, we dont have PPDU end interrupts,
1985 	 * so MONITOR STATUS ring is reaped by receiving MSI from srng.
1986 	 * Keep batch threshold as 8 so that interrupt is received for
1987 	 * every 4 packets in MONITOR_STATUS ring
1988 	 */
1989 	if ((ring_type == RXDMA_MONITOR_STATUS) &&
1990 	    (soc->intr_mode == DP_INTR_MSI))
1991 		ring_params->intr_batch_cntr_thres_entries = 4;
1992 }
1993 #endif
1994 
1995 #ifdef DP_MEM_PRE_ALLOC
1996 
1997 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
1998 			   size_t ctxt_size)
1999 {
2000 	void *ctxt_mem;
2001 
2002 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_context) {
2003 		dp_warn("dp_prealloc_get_context null!");
2004 		goto dynamic_alloc;
2005 	}
2006 
2007 	ctxt_mem = soc->cdp_soc.ol_ops->dp_prealloc_get_context(ctxt_type,
2008 								ctxt_size);
2009 
2010 	if (ctxt_mem)
2011 		goto end;
2012 
2013 dynamic_alloc:
2014 	dp_info("switch to dynamic-alloc for type %d, size %zu",
2015 		ctxt_type, ctxt_size);
2016 	ctxt_mem = qdf_mem_malloc(ctxt_size);
2017 end:
2018 	return ctxt_mem;
2019 }
2020 
2021 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
2022 			 void *vaddr)
2023 {
2024 	QDF_STATUS status;
2025 
2026 	if (soc->cdp_soc.ol_ops->dp_prealloc_put_context) {
2027 		status = soc->cdp_soc.ol_ops->dp_prealloc_put_context(
2028 								ctxt_type,
2029 								vaddr);
2030 	} else {
2031 		dp_warn("dp_prealloc_put_context null!");
2032 		status = QDF_STATUS_E_NOSUPPORT;
2033 	}
2034 
2035 	if (QDF_IS_STATUS_ERROR(status)) {
2036 		dp_info("Context type %d not pre-allocated", ctxt_type);
2037 		qdf_mem_free(vaddr);
2038 	}
2039 }
2040 
2041 static inline
2042 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
2043 					   struct dp_srng *srng,
2044 					   uint32_t ring_type)
2045 {
2046 	void *mem;
2047 
2048 	qdf_assert(!srng->is_mem_prealloc);
2049 
2050 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_consistent) {
2051 		dp_warn("dp_prealloc_get_consistent is null!");
2052 		goto qdf;
2053 	}
2054 
2055 	mem =
2056 		soc->cdp_soc.ol_ops->dp_prealloc_get_consistent
2057 						(&srng->alloc_size,
2058 						 &srng->base_vaddr_unaligned,
2059 						 &srng->base_paddr_unaligned,
2060 						 &srng->base_paddr_aligned,
2061 						 DP_RING_BASE_ALIGN, ring_type);
2062 
2063 	if (mem) {
2064 		srng->is_mem_prealloc = true;
2065 		goto end;
2066 	}
2067 qdf:
2068 	mem =  qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
2069 						&srng->base_vaddr_unaligned,
2070 						&srng->base_paddr_unaligned,
2071 						&srng->base_paddr_aligned,
2072 						DP_RING_BASE_ALIGN);
2073 end:
2074 	dp_info("%s memory %pK dp_srng %pK ring_type %d alloc_size %d num_entries %d",
2075 		srng->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", mem,
2076 		srng, ring_type, srng->alloc_size, srng->num_entries);
2077 	return mem;
2078 }
2079 
2080 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
2081 					       struct dp_srng *srng)
2082 {
2083 	if (srng->is_mem_prealloc) {
2084 		if (!soc->cdp_soc.ol_ops->dp_prealloc_put_consistent) {
2085 			dp_warn("dp_prealloc_put_consistent is null!");
2086 			QDF_BUG(0);
2087 			return;
2088 		}
2089 		soc->cdp_soc.ol_ops->dp_prealloc_put_consistent
2090 						(srng->alloc_size,
2091 						 srng->base_vaddr_unaligned,
2092 						 srng->base_paddr_unaligned);
2093 
2094 	} else {
2095 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2096 					srng->alloc_size,
2097 					srng->base_vaddr_unaligned,
2098 					srng->base_paddr_unaligned, 0);
2099 	}
2100 }
2101 
2102 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
2103 				   enum dp_desc_type desc_type,
2104 				   struct qdf_mem_multi_page_t *pages,
2105 				   size_t element_size,
2106 				   uint32_t element_num,
2107 				   qdf_dma_context_t memctxt,
2108 				   bool cacheable)
2109 {
2110 	if (!soc->cdp_soc.ol_ops->dp_get_multi_pages) {
2111 		dp_warn("dp_get_multi_pages is null!");
2112 		goto qdf;
2113 	}
2114 
2115 	pages->num_pages = 0;
2116 	pages->is_mem_prealloc = 0;
2117 	soc->cdp_soc.ol_ops->dp_get_multi_pages(desc_type,
2118 						element_size,
2119 						element_num,
2120 						pages,
2121 						cacheable);
2122 	if (pages->num_pages)
2123 		goto end;
2124 
2125 qdf:
2126 	qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size,
2127 				  element_num, memctxt, cacheable);
2128 end:
2129 	dp_info("%s desc_type %d element_size %d element_num %d cacheable %d",
2130 		pages->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc",
2131 		desc_type, (int)element_size, element_num, cacheable);
2132 }
2133 
2134 void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
2135 				  enum dp_desc_type desc_type,
2136 				  struct qdf_mem_multi_page_t *pages,
2137 				  qdf_dma_context_t memctxt,
2138 				  bool cacheable)
2139 {
2140 	if (pages->is_mem_prealloc) {
2141 		if (!soc->cdp_soc.ol_ops->dp_put_multi_pages) {
2142 			dp_warn("dp_put_multi_pages is null!");
2143 			QDF_BUG(0);
2144 			return;
2145 		}
2146 
2147 		soc->cdp_soc.ol_ops->dp_put_multi_pages(desc_type, pages);
2148 		qdf_mem_zero(pages, sizeof(*pages));
2149 	} else {
2150 		qdf_mem_multi_pages_free(soc->osdev, pages,
2151 					 memctxt, cacheable);
2152 	}
2153 }
2154 
2155 #else
2156 
2157 static inline
2158 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
2159 					   struct dp_srng *srng,
2160 					   uint32_t ring_type)
2161 
2162 {
2163 	void *mem;
2164 
2165 	mem = qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
2166 					       &srng->base_vaddr_unaligned,
2167 					       &srng->base_paddr_unaligned,
2168 					       &srng->base_paddr_aligned,
2169 					       DP_RING_BASE_ALIGN);
2170 	if (mem)
2171 		qdf_mem_set(srng->base_vaddr_unaligned, 0, srng->alloc_size);
2172 
2173 	return mem;
2174 }
2175 
2176 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
2177 					       struct dp_srng *srng)
2178 {
2179 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2180 				srng->alloc_size,
2181 				srng->base_vaddr_unaligned,
2182 				srng->base_paddr_unaligned, 0);
2183 }
2184 
2185 #endif /* DP_MEM_PRE_ALLOC */
2186 
2187 #ifdef QCA_SUPPORT_WDS_EXTENDED
2188 static bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev)
2189 {
2190 	return vdev->wds_ext_enabled;
2191 }
2192 #else
2193 static bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev)
2194 {
2195 	return false;
2196 }
2197 #endif
2198 
2199 void dp_pdev_update_fast_rx_flag(struct dp_soc *soc, struct dp_pdev *pdev)
2200 {
2201 	struct dp_vdev *vdev = NULL;
2202 	uint8_t rx_fast_flag = true;
2203 
2204 	if (wlan_cfg_is_rx_flow_tag_enabled(soc->wlan_cfg_ctx)) {
2205 		rx_fast_flag = false;
2206 		goto update_flag;
2207 	}
2208 
2209 	/* Check if protocol tagging enable */
2210 	if (pdev->is_rx_protocol_tagging_enabled) {
2211 		rx_fast_flag = false;
2212 		goto update_flag;
2213 	}
2214 
2215 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
2216 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2217 		/* Check if any VDEV has NAWDS enabled */
2218 		if (vdev->nawds_enabled) {
2219 			rx_fast_flag = false;
2220 			break;
2221 		}
2222 
2223 		/* Check if any VDEV has multipass enabled */
2224 		if (vdev->multipass_en) {
2225 			rx_fast_flag = false;
2226 			break;
2227 		}
2228 
2229 		/* Check if any VDEV has mesh enabled */
2230 		if (vdev->mesh_vdev) {
2231 			rx_fast_flag = false;
2232 			break;
2233 		}
2234 
2235 		/* Check if any VDEV has WDS ext enabled */
2236 		if (dp_vdev_is_wds_ext_enabled(vdev)) {
2237 			rx_fast_flag = false;
2238 			break;
2239 		}
2240 	}
2241 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
2242 
2243 update_flag:
2244 	dp_init_info("Updated Rx fast flag to %u", rx_fast_flag);
2245 	pdev->rx_fast_flag = rx_fast_flag;
2246 }
2247 
2248 /*
2249  * dp_srng_free() - Free SRNG memory
2250  * @soc  : Data path soc handle
2251  * @srng : SRNG pointer
2252  *
2253  * return: None
2254  */
2255 void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng)
2256 {
2257 	if (srng->alloc_size && srng->base_vaddr_unaligned) {
2258 		if (!srng->cached) {
2259 			dp_srng_mem_free_consistent(soc, srng);
2260 		} else {
2261 			qdf_mem_free(srng->base_vaddr_unaligned);
2262 		}
2263 		srng->alloc_size = 0;
2264 		srng->base_vaddr_unaligned = NULL;
2265 	}
2266 	srng->hal_srng = NULL;
2267 }
2268 
2269 qdf_export_symbol(dp_srng_free);
2270 
2271 #ifdef DISABLE_MON_RING_MSI_CFG
2272 /*
2273  * dp_skip_msi_cfg() - Check if msi cfg has to be skipped for ring_type
2274  * @ring_type: sring type
2275  *
2276  * Return: True if msi cfg should be skipped for srng type else false
2277  */
2278 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2279 {
2280 	if (ring_type == RXDMA_MONITOR_STATUS)
2281 		return true;
2282 
2283 	return false;
2284 }
2285 #else
2286 #ifdef DP_CON_MON_MSI_ENABLED
2287 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2288 {
2289 	if (soc->cdp_soc.ol_ops->get_con_mode &&
2290 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE) {
2291 		if (ring_type == REO_DST || ring_type == RXDMA_DST)
2292 			return true;
2293 	} else if (ring_type == RXDMA_MONITOR_STATUS) {
2294 		return true;
2295 	}
2296 
2297 	return false;
2298 }
2299 #else
2300 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2301 {
2302 	return false;
2303 }
2304 #endif /* DP_CON_MON_MSI_ENABLED */
2305 #endif /* DISABLE_MON_RING_MSI_CFG */
2306 
2307 #ifdef DP_UMAC_HW_RESET_SUPPORT
2308 static bool dp_check_umac_reset_in_progress(struct dp_soc *soc)
2309 {
2310 	return !!soc->umac_reset_ctx.intr_ctx_bkp;
2311 }
2312 #else
2313 static bool dp_check_umac_reset_in_progress(struct dp_soc *soc)
2314 {
2315 	return false;
2316 }
2317 #endif
2318 
2319 /*
2320  * dp_srng_init() - Initialize SRNG
2321  * @soc  : Data path soc handle
2322  * @srng : SRNG pointer
2323  * @ring_type : Ring Type
2324  * @ring_num: Ring number
2325  * @mac_id: mac_id
2326  *
2327  * return: QDF_STATUS
2328  */
2329 QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
2330 			int ring_type, int ring_num, int mac_id)
2331 {
2332 	bool idle_check;
2333 
2334 	hal_soc_handle_t hal_soc = soc->hal_soc;
2335 	struct hal_srng_params ring_params;
2336 
2337 	if (srng->hal_srng) {
2338 		dp_init_err("%pK: Ring type: %d, num:%d is already initialized",
2339 			    soc, ring_type, ring_num);
2340 		return QDF_STATUS_SUCCESS;
2341 	}
2342 
2343 	/* memset the srng ring to zero */
2344 	qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size);
2345 
2346 	qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params));
2347 	ring_params.ring_base_paddr = srng->base_paddr_aligned;
2348 	ring_params.ring_base_vaddr = srng->base_vaddr_aligned;
2349 
2350 	ring_params.num_entries = srng->num_entries;
2351 
2352 	dp_info("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
2353 		ring_type, ring_num,
2354 		(void *)ring_params.ring_base_vaddr,
2355 		(void *)ring_params.ring_base_paddr,
2356 		ring_params.num_entries);
2357 
2358 	if (soc->intr_mode == DP_INTR_MSI && !dp_skip_msi_cfg(soc, ring_type)) {
2359 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
2360 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
2361 				 ring_type, ring_num);
2362 	} else {
2363 		ring_params.msi_data = 0;
2364 		ring_params.msi_addr = 0;
2365 		dp_srng_set_msi2_ring_params(soc, &ring_params, 0, 0);
2366 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
2367 				 ring_type, ring_num);
2368 	}
2369 
2370 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
2371 					       ring_type, ring_num,
2372 					       srng->num_entries);
2373 
2374 	dp_srng_set_nf_thresholds(soc, srng, &ring_params);
2375 
2376 	if (srng->cached)
2377 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
2378 
2379 	idle_check = dp_check_umac_reset_in_progress(soc);
2380 
2381 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
2382 					mac_id, &ring_params, idle_check);
2383 
2384 	if (!srng->hal_srng) {
2385 		dp_srng_free(soc, srng);
2386 		return QDF_STATUS_E_FAILURE;
2387 	}
2388 
2389 	return QDF_STATUS_SUCCESS;
2390 }
2391 
2392 qdf_export_symbol(dp_srng_init);
2393 
2394 /*
2395  * dp_srng_alloc() - Allocate memory for SRNG
2396  * @soc  : Data path soc handle
2397  * @srng : SRNG pointer
2398  * @ring_type : Ring Type
2399  * @num_entries: Number of entries
2400  * @cached: cached flag variable
2401  *
2402  * return: QDF_STATUS
2403  */
2404 QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
2405 			 int ring_type, uint32_t num_entries,
2406 			 bool cached)
2407 {
2408 	hal_soc_handle_t hal_soc = soc->hal_soc;
2409 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
2410 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
2411 
2412 	if (srng->base_vaddr_unaligned) {
2413 		dp_init_err("%pK: Ring type: %d, is already allocated",
2414 			    soc, ring_type);
2415 		return QDF_STATUS_SUCCESS;
2416 	}
2417 
2418 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
2419 	srng->hal_srng = NULL;
2420 	srng->alloc_size = num_entries * entry_size;
2421 	srng->num_entries = num_entries;
2422 	srng->cached = cached;
2423 
2424 	if (!cached) {
2425 		srng->base_vaddr_aligned =
2426 		    dp_srng_aligned_mem_alloc_consistent(soc,
2427 							 srng,
2428 							 ring_type);
2429 	} else {
2430 		srng->base_vaddr_aligned = qdf_aligned_malloc(
2431 					&srng->alloc_size,
2432 					&srng->base_vaddr_unaligned,
2433 					&srng->base_paddr_unaligned,
2434 					&srng->base_paddr_aligned,
2435 					DP_RING_BASE_ALIGN);
2436 	}
2437 
2438 	if (!srng->base_vaddr_aligned)
2439 		return QDF_STATUS_E_NOMEM;
2440 
2441 	return QDF_STATUS_SUCCESS;
2442 }
2443 
2444 qdf_export_symbol(dp_srng_alloc);
2445 
2446 /*
2447  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
2448  * @soc: DP SOC handle
2449  * @srng: source ring structure
2450  * @ring_type: type of ring
2451  * @ring_num: ring number
2452  *
2453  * Return: None
2454  */
2455 void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
2456 		    int ring_type, int ring_num)
2457 {
2458 	if (!srng->hal_srng) {
2459 		dp_init_err("%pK: Ring type: %d, num:%d not setup",
2460 			    soc, ring_type, ring_num);
2461 		return;
2462 	}
2463 
2464 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
2465 	srng->hal_srng = NULL;
2466 }
2467 
2468 qdf_export_symbol(dp_srng_deinit);
2469 
2470 /* TODO: Need this interface from HIF */
2471 void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle);
2472 
2473 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2474 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2475 			 hal_ring_handle_t hal_ring_hdl)
2476 {
2477 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2478 	uint32_t hp, tp;
2479 	uint8_t ring_id;
2480 
2481 	if (!int_ctx)
2482 		return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2483 
2484 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2485 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2486 
2487 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2488 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START);
2489 
2490 	return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2491 }
2492 
2493 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2494 			hal_ring_handle_t hal_ring_hdl)
2495 {
2496 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2497 	uint32_t hp, tp;
2498 	uint8_t ring_id;
2499 
2500 	if (!int_ctx)
2501 		return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2502 
2503 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2504 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2505 
2506 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2507 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END);
2508 
2509 	return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2510 }
2511 
2512 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2513 					      uint8_t hist_group_id)
2514 {
2515 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2516 			 0, 0, 0, HIF_EVENT_TIMER_ENTRY);
2517 }
2518 
2519 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2520 					     uint8_t hist_group_id)
2521 {
2522 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2523 			 0, 0, 0, HIF_EVENT_TIMER_EXIT);
2524 }
2525 #else
2526 
2527 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2528 					      uint8_t hist_group_id)
2529 {
2530 }
2531 
2532 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2533 					     uint8_t hist_group_id)
2534 {
2535 }
2536 
2537 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
2538 
2539 /*
2540  * dp_should_timer_irq_yield() - Decide if the bottom half should yield
2541  * @soc: DP soc handle
2542  * @work_done: work done in softirq context
2543  * @start_time: start time for the softirq
2544  *
2545  * Return: enum with yield code
2546  */
2547 enum timer_yield_status
2548 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
2549 			  uint64_t start_time)
2550 {
2551 	uint64_t cur_time = qdf_get_log_timestamp();
2552 
2553 	if (!work_done)
2554 		return DP_TIMER_WORK_DONE;
2555 
2556 	if (cur_time - start_time > DP_MAX_TIMER_EXEC_TIME_TICKS)
2557 		return DP_TIMER_TIME_EXHAUST;
2558 
2559 	return DP_TIMER_NO_YIELD;
2560 }
2561 
2562 qdf_export_symbol(dp_should_timer_irq_yield);
2563 
2564 static int dp_process_rxdma_dst_ring(struct dp_soc *soc,
2565 				     struct dp_intr *int_ctx,
2566 				     int mac_for_pdev,
2567 				     int total_budget)
2568 {
2569 	return dp_rxdma_err_process(int_ctx, soc, mac_for_pdev,
2570 				    total_budget);
2571 }
2572 
2573 /**
2574  * dp_process_lmac_rings() - Process LMAC rings
2575  * @int_ctx: interrupt context
2576  * @total_budget: budget of work which can be done
2577  *
2578  * Return: work done
2579  */
2580 static int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget)
2581 {
2582 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2583 	struct dp_soc *soc = int_ctx->soc;
2584 	uint32_t remaining_quota = total_budget;
2585 	struct dp_pdev *pdev = NULL;
2586 	uint32_t work_done  = 0;
2587 	int budget = total_budget;
2588 	int ring = 0;
2589 
2590 	/* Process LMAC interrupts */
2591 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
2592 		int mac_for_pdev = ring;
2593 
2594 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
2595 		if (!pdev)
2596 			continue;
2597 		if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
2598 			work_done = dp_monitor_process(soc, int_ctx,
2599 						       mac_for_pdev,
2600 						       remaining_quota);
2601 			if (work_done)
2602 				intr_stats->num_rx_mon_ring_masks++;
2603 			budget -= work_done;
2604 			if (budget <= 0)
2605 				goto budget_done;
2606 			remaining_quota = budget;
2607 		}
2608 
2609 		if (int_ctx->tx_mon_ring_mask & (1 << mac_for_pdev)) {
2610 			work_done = dp_tx_mon_process(soc, int_ctx,
2611 						      mac_for_pdev,
2612 						      remaining_quota);
2613 			if (work_done)
2614 				intr_stats->num_tx_mon_ring_masks++;
2615 			budget -= work_done;
2616 			if (budget <= 0)
2617 				goto budget_done;
2618 			remaining_quota = budget;
2619 		}
2620 
2621 		if (int_ctx->rxdma2host_ring_mask &
2622 				(1 << mac_for_pdev)) {
2623 			work_done = dp_process_rxdma_dst_ring(soc, int_ctx,
2624 							      mac_for_pdev,
2625 							      remaining_quota);
2626 			if (work_done)
2627 				intr_stats->num_rxdma2host_ring_masks++;
2628 			budget -=  work_done;
2629 			if (budget <= 0)
2630 				goto budget_done;
2631 			remaining_quota = budget;
2632 		}
2633 
2634 		if (int_ctx->host2rxdma_ring_mask & (1 << mac_for_pdev)) {
2635 			union dp_rx_desc_list_elem_t *desc_list = NULL;
2636 			union dp_rx_desc_list_elem_t *tail = NULL;
2637 			struct dp_srng *rx_refill_buf_ring;
2638 			struct rx_desc_pool *rx_desc_pool;
2639 
2640 			rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2641 			if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
2642 				rx_refill_buf_ring =
2643 					&soc->rx_refill_buf_ring[mac_for_pdev];
2644 			else
2645 				rx_refill_buf_ring =
2646 					&soc->rx_refill_buf_ring[pdev->lmac_id];
2647 
2648 			intr_stats->num_host2rxdma_ring_masks++;
2649 			dp_rx_buffers_lt_replenish_simple(soc, mac_for_pdev,
2650 							  rx_refill_buf_ring,
2651 							  rx_desc_pool,
2652 							  0,
2653 							  &desc_list,
2654 							  &tail);
2655 		}
2656 
2657 	}
2658 
2659 	if (int_ctx->host2rxdma_mon_ring_mask)
2660 		dp_rx_mon_buf_refill(int_ctx);
2661 
2662 	if (int_ctx->host2txmon_ring_mask)
2663 		dp_tx_mon_buf_refill(int_ctx);
2664 
2665 budget_done:
2666 	return total_budget - budget;
2667 }
2668 
2669 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
2670 /**
2671  * dp_service_near_full_srngs() - Bottom half handler to process the near
2672  *				full IRQ on a SRNG
2673  * @dp_ctx: Datapath SoC handle
2674  * @dp_budget: Number of SRNGs which can be processed in a single attempt
2675  *		without rescheduling
2676  * @cpu: cpu id
2677  *
2678  * Return: remaining budget/quota for the soc device
2679  */
2680 static
2681 uint32_t dp_service_near_full_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
2682 {
2683 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2684 	struct dp_soc *soc = int_ctx->soc;
2685 
2686 	/*
2687 	 * dp_service_near_full_srngs arch ops should be initialized always
2688 	 * if the NEAR FULL IRQ feature is enabled.
2689 	 */
2690 	return soc->arch_ops.dp_service_near_full_srngs(soc, int_ctx,
2691 							dp_budget);
2692 }
2693 #endif
2694 
2695 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2696 
2697 /*
2698  * dp_srng_get_cpu() - Get the smp processor id for srng processing
2699  *
2700  * Return: smp processor id
2701  */
2702 static inline int dp_srng_get_cpu(void)
2703 {
2704 	return smp_processor_id();
2705 }
2706 
2707 /*
2708  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
2709  * @dp_ctx: DP SOC handle
2710  * @budget: Number of frames/descriptors that can be processed in one shot
2711  * @cpu: CPU on which this instance is running
2712  *
2713  * Return: remaining budget/quota for the soc device
2714  */
2715 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
2716 {
2717 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2718 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2719 	struct dp_soc *soc = int_ctx->soc;
2720 	int ring = 0;
2721 	int index;
2722 	uint32_t work_done  = 0;
2723 	int budget = dp_budget;
2724 	uint8_t tx_mask = int_ctx->tx_ring_mask;
2725 	uint8_t rx_mask = int_ctx->rx_ring_mask;
2726 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
2727 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
2728 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2729 	uint32_t remaining_quota = dp_budget;
2730 
2731 	qdf_atomic_set_bit(cpu, &soc->service_rings_running);
2732 
2733 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n",
2734 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
2735 			 reo_status_mask,
2736 			 int_ctx->rx_mon_ring_mask,
2737 			 int_ctx->host2rxdma_ring_mask,
2738 			 int_ctx->rxdma2host_ring_mask);
2739 
2740 	/* Process Tx completion interrupts first to return back buffers */
2741 	for (index = 0; index < soc->num_tx_comp_rings; index++) {
2742 		if (!(1 << wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) & tx_mask))
2743 			continue;
2744 		work_done = dp_tx_comp_handler(int_ctx,
2745 					       soc,
2746 					       soc->tx_comp_ring[index].hal_srng,
2747 					       index, remaining_quota);
2748 		if (work_done) {
2749 			intr_stats->num_tx_ring_masks[index]++;
2750 			dp_verbose_debug("tx mask 0x%x index %d, budget %d, work_done %d",
2751 					 tx_mask, index, budget,
2752 					 work_done);
2753 		}
2754 		budget -= work_done;
2755 		if (budget <= 0)
2756 			goto budget_done;
2757 
2758 		remaining_quota = budget;
2759 	}
2760 
2761 	/* Process REO Exception ring interrupt */
2762 	if (rx_err_mask) {
2763 		work_done = dp_rx_err_process(int_ctx, soc,
2764 					      soc->reo_exception_ring.hal_srng,
2765 					      remaining_quota);
2766 
2767 		if (work_done) {
2768 			intr_stats->num_rx_err_ring_masks++;
2769 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
2770 					 work_done, budget);
2771 		}
2772 
2773 		budget -=  work_done;
2774 		if (budget <= 0) {
2775 			goto budget_done;
2776 		}
2777 		remaining_quota = budget;
2778 	}
2779 
2780 	/* Process Rx WBM release ring interrupt */
2781 	if (rx_wbm_rel_mask) {
2782 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
2783 						  soc->rx_rel_ring.hal_srng,
2784 						  remaining_quota);
2785 
2786 		if (work_done) {
2787 			intr_stats->num_rx_wbm_rel_ring_masks++;
2788 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
2789 					 work_done, budget);
2790 		}
2791 
2792 		budget -=  work_done;
2793 		if (budget <= 0) {
2794 			goto budget_done;
2795 		}
2796 		remaining_quota = budget;
2797 	}
2798 
2799 	/* Process Rx interrupts */
2800 	if (rx_mask) {
2801 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
2802 			if (!(rx_mask & (1 << ring)))
2803 				continue;
2804 			work_done = soc->arch_ops.dp_rx_process(int_ctx,
2805 						  soc->reo_dest_ring[ring].hal_srng,
2806 						  ring,
2807 						  remaining_quota);
2808 			if (work_done) {
2809 				intr_stats->num_rx_ring_masks[ring]++;
2810 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
2811 						 rx_mask, ring,
2812 						 work_done, budget);
2813 				budget -=  work_done;
2814 				if (budget <= 0)
2815 					goto budget_done;
2816 				remaining_quota = budget;
2817 			}
2818 		}
2819 	}
2820 
2821 	if (reo_status_mask) {
2822 		if (dp_reo_status_ring_handler(int_ctx, soc))
2823 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2824 	}
2825 
2826 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
2827 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2828 		if (work_done) {
2829 			budget -=  work_done;
2830 			if (budget <= 0)
2831 				goto budget_done;
2832 			remaining_quota = budget;
2833 		}
2834 	}
2835 
2836 	qdf_lro_flush(int_ctx->lro_ctx);
2837 	intr_stats->num_masks++;
2838 
2839 budget_done:
2840 	qdf_atomic_clear_bit(cpu, &soc->service_rings_running);
2841 
2842 	if (soc->notify_fw_callback)
2843 		soc->notify_fw_callback(soc);
2844 
2845 	return dp_budget - budget;
2846 }
2847 
2848 #else /* QCA_HOST_MODE_WIFI_DISABLED */
2849 
2850 /*
2851  * dp_srng_get_cpu() - Get the smp processor id for srng processing
2852  *
2853  * Return: smp processor id
2854  */
2855 static inline int dp_srng_get_cpu(void)
2856 {
2857 	return 0;
2858 }
2859 
2860 /*
2861  * dp_service_srngs() - Top level handler for DP Monitor Ring interrupts
2862  * @dp_ctx: DP SOC handle
2863  * @budget: Number of frames/descriptors that can be processed in one shot
2864  *
2865  * Return: remaining budget/quota for the soc device
2866  */
2867 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
2868 {
2869 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2870 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2871 	struct dp_soc *soc = int_ctx->soc;
2872 	uint32_t remaining_quota = dp_budget;
2873 	uint32_t work_done  = 0;
2874 	int budget = dp_budget;
2875 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2876 
2877 	if (reo_status_mask) {
2878 		if (dp_reo_status_ring_handler(int_ctx, soc))
2879 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2880 	}
2881 
2882 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
2883 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2884 		if (work_done) {
2885 			budget -=  work_done;
2886 			if (budget <= 0)
2887 				goto budget_done;
2888 			remaining_quota = budget;
2889 		}
2890 	}
2891 
2892 	qdf_lro_flush(int_ctx->lro_ctx);
2893 	intr_stats->num_masks++;
2894 
2895 budget_done:
2896 	return dp_budget - budget;
2897 }
2898 
2899 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2900 
2901 /* dp_interrupt_timer()- timer poll for interrupts
2902  *
2903  * @arg: SoC Handle
2904  *
2905  * Return:
2906  *
2907  */
2908 static void dp_interrupt_timer(void *arg)
2909 {
2910 	struct dp_soc *soc = (struct dp_soc *) arg;
2911 	struct dp_pdev *pdev = soc->pdev_list[0];
2912 	enum timer_yield_status yield = DP_TIMER_NO_YIELD;
2913 	uint32_t work_done  = 0, total_work_done = 0;
2914 	int budget = 0xffff, i;
2915 	uint32_t remaining_quota = budget;
2916 	uint64_t start_time;
2917 	uint32_t lmac_id = DP_MON_INVALID_LMAC_ID;
2918 	uint8_t dp_intr_id = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
2919 	uint32_t lmac_iter;
2920 	int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
2921 	enum reg_wifi_band mon_band;
2922 	int cpu = dp_srng_get_cpu();
2923 
2924 	/*
2925 	 * this logic makes all data path interfacing rings (UMAC/LMAC)
2926 	 * and Monitor rings polling mode when NSS offload is disabled
2927 	 */
2928 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx) &&
2929 	    !wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2930 		if (qdf_atomic_read(&soc->cmn_init_done)) {
2931 			for (i = 0; i < wlan_cfg_get_num_contexts(
2932 						soc->wlan_cfg_ctx); i++)
2933 				dp_service_srngs(&soc->intr_ctx[i], 0xffff,
2934 						 cpu);
2935 
2936 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
2937 		}
2938 		return;
2939 	}
2940 
2941 	if (!qdf_atomic_read(&soc->cmn_init_done))
2942 		return;
2943 
2944 	if (dp_monitor_is_chan_band_known(pdev)) {
2945 		mon_band = dp_monitor_get_chan_band(pdev);
2946 		lmac_id = pdev->ch_band_lmac_id_mapping[mon_band];
2947 		if (qdf_likely(lmac_id != DP_MON_INVALID_LMAC_ID)) {
2948 			dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id];
2949 			dp_srng_record_timer_entry(soc, dp_intr_id);
2950 		}
2951 	}
2952 
2953 	start_time = qdf_get_log_timestamp();
2954 	dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
2955 
2956 	while (yield == DP_TIMER_NO_YIELD) {
2957 		for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) {
2958 			if (lmac_iter == lmac_id)
2959 				work_done = dp_monitor_process(soc,
2960 						&soc->intr_ctx[dp_intr_id],
2961 						lmac_iter, remaining_quota);
2962 			else
2963 				work_done =
2964 					dp_monitor_drop_packets_for_mac(pdev,
2965 							     lmac_iter,
2966 							     remaining_quota);
2967 			if (work_done) {
2968 				budget -=  work_done;
2969 				if (budget <= 0) {
2970 					yield = DP_TIMER_WORK_EXHAUST;
2971 					goto budget_done;
2972 				}
2973 				remaining_quota = budget;
2974 				total_work_done += work_done;
2975 			}
2976 		}
2977 
2978 		yield = dp_should_timer_irq_yield(soc, total_work_done,
2979 						  start_time);
2980 		total_work_done = 0;
2981 	}
2982 
2983 budget_done:
2984 	if (yield == DP_TIMER_WORK_EXHAUST ||
2985 	    yield == DP_TIMER_TIME_EXHAUST)
2986 		qdf_timer_mod(&soc->int_timer, 1);
2987 	else
2988 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
2989 
2990 	if (lmac_id != DP_MON_INVALID_LMAC_ID)
2991 		dp_srng_record_timer_exit(soc, dp_intr_id);
2992 }
2993 
2994 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2995 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
2996 					struct dp_intr *intr_ctx)
2997 {
2998 	if (intr_ctx->rx_mon_ring_mask)
2999 		return true;
3000 
3001 	return false;
3002 }
3003 #else
3004 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
3005 					struct dp_intr *intr_ctx)
3006 {
3007 	return false;
3008 }
3009 #endif
3010 
3011 /*
3012  * dp_soc_attach_poll() - Register handlers for DP interrupts
3013  * @txrx_soc: DP SOC handle
3014  *
3015  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
3016  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
3017  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
3018  *
3019  * Return: 0 for success, nonzero for failure.
3020  */
3021 static QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
3022 {
3023 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3024 	int i;
3025 	int lmac_id = 0;
3026 
3027 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3028 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
3029 	soc->intr_mode = DP_INTR_POLL;
3030 
3031 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3032 		soc->intr_ctx[i].dp_intr_id = i;
3033 		soc->intr_ctx[i].tx_ring_mask =
3034 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
3035 		soc->intr_ctx[i].rx_ring_mask =
3036 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
3037 		soc->intr_ctx[i].rx_mon_ring_mask =
3038 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
3039 		soc->intr_ctx[i].rx_err_ring_mask =
3040 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
3041 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
3042 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
3043 		soc->intr_ctx[i].reo_status_ring_mask =
3044 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
3045 		soc->intr_ctx[i].rxdma2host_ring_mask =
3046 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
3047 		soc->intr_ctx[i].soc = soc;
3048 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
3049 
3050 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
3051 			hif_event_history_init(soc->hif_handle, i);
3052 			soc->mon_intr_id_lmac_map[lmac_id] = i;
3053 			lmac_id++;
3054 		}
3055 	}
3056 
3057 	qdf_timer_init(soc->osdev, &soc->int_timer,
3058 			dp_interrupt_timer, (void *)soc,
3059 			QDF_TIMER_TYPE_WAKE_APPS);
3060 
3061 	return QDF_STATUS_SUCCESS;
3062 }
3063 
3064 /**
3065  * dp_soc_set_interrupt_mode() - Set the interrupt mode in soc
3066  * soc: DP soc handle
3067  *
3068  * Set the appropriate interrupt mode flag in the soc
3069  */
3070 static void dp_soc_set_interrupt_mode(struct dp_soc *soc)
3071 {
3072 	uint32_t msi_base_data, msi_vector_start;
3073 	int msi_vector_count, ret;
3074 
3075 	soc->intr_mode = DP_INTR_INTEGRATED;
3076 
3077 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
3078 	    (dp_is_monitor_mode_using_poll(soc) &&
3079 	     soc->cdp_soc.ol_ops->get_con_mode &&
3080 	     soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) {
3081 		soc->intr_mode = DP_INTR_POLL;
3082 	} else {
3083 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
3084 						  &msi_vector_count,
3085 						  &msi_base_data,
3086 						  &msi_vector_start);
3087 		if (ret)
3088 			return;
3089 
3090 		soc->intr_mode = DP_INTR_MSI;
3091 	}
3092 }
3093 
3094 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc);
3095 #if defined(DP_INTR_POLL_BOTH)
3096 /*
3097  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
3098  * @txrx_soc: DP SOC handle
3099  *
3100  * Call the appropriate attach function based on the mode of operation.
3101  * This is a WAR for enabling monitor mode.
3102  *
3103  * Return: 0 for success. nonzero for failure.
3104  */
3105 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
3106 {
3107 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3108 
3109 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
3110 	    (dp_is_monitor_mode_using_poll(soc) &&
3111 	     soc->cdp_soc.ol_ops->get_con_mode &&
3112 	     soc->cdp_soc.ol_ops->get_con_mode() ==
3113 	     QDF_GLOBAL_MONITOR_MODE)) {
3114 		dp_info("Poll mode");
3115 		return dp_soc_attach_poll(txrx_soc);
3116 	} else {
3117 		dp_info("Interrupt  mode");
3118 		return dp_soc_interrupt_attach(txrx_soc);
3119 	}
3120 }
3121 #else
3122 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
3123 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
3124 {
3125 	return dp_soc_attach_poll(txrx_soc);
3126 }
3127 #else
3128 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
3129 {
3130 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3131 
3132 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx))
3133 		return dp_soc_attach_poll(txrx_soc);
3134 	else
3135 		return dp_soc_interrupt_attach(txrx_soc);
3136 }
3137 #endif
3138 #endif
3139 
3140 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
3141 /**
3142  * dp_soc_interrupt_map_calculate_wifi3_pci_legacy()
3143  * Calculate interrupt map for legacy interrupts
3144  * @soc: DP soc handle
3145  * @intr_ctx_num: Interrupt context number
3146  * @irq_id_map: IRQ map
3147  * num_irq_r: Number of interrupts assigned for this context
3148  *
3149  * Return: void
3150  */
3151 static void dp_soc_interrupt_map_calculate_wifi3_pci_legacy(struct dp_soc *soc,
3152 							    int intr_ctx_num,
3153 							    int *irq_id_map,
3154 							    int *num_irq_r)
3155 {
3156 	int j;
3157 	int num_irq = 0;
3158 	int tx_mask = wlan_cfg_get_tx_ring_mask(
3159 					soc->wlan_cfg_ctx, intr_ctx_num);
3160 	int rx_mask = wlan_cfg_get_rx_ring_mask(
3161 					soc->wlan_cfg_ctx, intr_ctx_num);
3162 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
3163 					soc->wlan_cfg_ctx, intr_ctx_num);
3164 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3165 					soc->wlan_cfg_ctx, intr_ctx_num);
3166 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3167 					soc->wlan_cfg_ctx, intr_ctx_num);
3168 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3169 					soc->wlan_cfg_ctx, intr_ctx_num);
3170 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3171 					soc->wlan_cfg_ctx, intr_ctx_num);
3172 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3173 					soc->wlan_cfg_ctx, intr_ctx_num);
3174 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3175 					soc->wlan_cfg_ctx, intr_ctx_num);
3176 	soc->intr_mode = DP_INTR_LEGACY_VIRTUAL_IRQ;
3177 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
3178 		if (tx_mask & (1 << j))
3179 			irq_id_map[num_irq++] = (wbm2sw0_release - j);
3180 		if (rx_mask & (1 << j))
3181 			irq_id_map[num_irq++] = (reo2sw1_intr - j);
3182 		if (rx_mon_mask & (1 << j))
3183 			irq_id_map[num_irq++] = (rxmon2sw_p0_dest0 - j);
3184 		if (rx_err_ring_mask & (1 << j))
3185 			irq_id_map[num_irq++] = (reo2sw0_intr - j);
3186 		if (rx_wbm_rel_ring_mask & (1 << j))
3187 			irq_id_map[num_irq++] = (wbm2sw5_release - j);
3188 		if (reo_status_ring_mask & (1 << j))
3189 			irq_id_map[num_irq++] = (reo_status - j);
3190 		if (rxdma2host_ring_mask & (1 << j))
3191 			irq_id_map[num_irq++] = (rxdma2sw_dst_ring0 - j);
3192 		if (host2rxdma_ring_mask & (1 << j))
3193 			irq_id_map[num_irq++] = (sw2rxdma_0 - j);
3194 		if (host2rxdma_mon_ring_mask & (1 << j))
3195 			irq_id_map[num_irq++] = (sw2rxmon_src_ring - j);
3196 	}
3197 	*num_irq_r = num_irq;
3198 }
3199 #else
3200 /**
3201  * dp_soc_interrupt_map_calculate_wifi3_pci_legacy()
3202  * Calculate interrupt map for legacy interrupts
3203  * @soc: DP soc handle
3204  * @intr_ctx_num: Interrupt context number
3205  * @irq_id_map: IRQ map
3206  * num_irq_r: Number of interrupts assigned for this context
3207  *
3208  * Return: void
3209  */
3210 static void dp_soc_interrupt_map_calculate_wifi3_pci_legacy(struct dp_soc *soc,
3211 							    int intr_ctx_num,
3212 							    int *irq_id_map,
3213 							    int *num_irq_r)
3214 {
3215 }
3216 #endif
3217 
3218 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
3219 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
3220 {
3221 	int j;
3222 	int num_irq = 0;
3223 
3224 	int tx_mask =
3225 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3226 	int rx_mask =
3227 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3228 	int rx_mon_mask =
3229 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3230 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3231 					soc->wlan_cfg_ctx, intr_ctx_num);
3232 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3233 					soc->wlan_cfg_ctx, intr_ctx_num);
3234 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3235 					soc->wlan_cfg_ctx, intr_ctx_num);
3236 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3237 					soc->wlan_cfg_ctx, intr_ctx_num);
3238 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3239 					soc->wlan_cfg_ctx, intr_ctx_num);
3240 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3241 					soc->wlan_cfg_ctx, intr_ctx_num);
3242 
3243 	soc->intr_mode = DP_INTR_INTEGRATED;
3244 
3245 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
3246 
3247 		if (tx_mask & (1 << j)) {
3248 			irq_id_map[num_irq++] =
3249 				(wbm2host_tx_completions_ring1 - j);
3250 		}
3251 
3252 		if (rx_mask & (1 << j)) {
3253 			irq_id_map[num_irq++] =
3254 				(reo2host_destination_ring1 - j);
3255 		}
3256 
3257 		if (rxdma2host_ring_mask & (1 << j)) {
3258 			irq_id_map[num_irq++] =
3259 				rxdma2host_destination_ring_mac1 - j;
3260 		}
3261 
3262 		if (host2rxdma_ring_mask & (1 << j)) {
3263 			irq_id_map[num_irq++] =
3264 				host2rxdma_host_buf_ring_mac1 -	j;
3265 		}
3266 
3267 		if (host2rxdma_mon_ring_mask & (1 << j)) {
3268 			irq_id_map[num_irq++] =
3269 				host2rxdma_monitor_ring1 - j;
3270 		}
3271 
3272 		if (rx_mon_mask & (1 << j)) {
3273 			irq_id_map[num_irq++] =
3274 				ppdu_end_interrupts_mac1 - j;
3275 			irq_id_map[num_irq++] =
3276 				rxdma2host_monitor_status_ring_mac1 - j;
3277 			irq_id_map[num_irq++] =
3278 				rxdma2host_monitor_destination_mac1 - j;
3279 		}
3280 
3281 		if (rx_wbm_rel_ring_mask & (1 << j))
3282 			irq_id_map[num_irq++] = wbm2host_rx_release;
3283 
3284 		if (rx_err_ring_mask & (1 << j))
3285 			irq_id_map[num_irq++] = reo2host_exception;
3286 
3287 		if (reo_status_ring_mask & (1 << j))
3288 			irq_id_map[num_irq++] = reo2host_status;
3289 
3290 	}
3291 	*num_irq_r = num_irq;
3292 }
3293 
3294 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
3295 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
3296 		int msi_vector_count, int msi_vector_start)
3297 {
3298 	int tx_mask = wlan_cfg_get_tx_ring_mask(
3299 					soc->wlan_cfg_ctx, intr_ctx_num);
3300 	int rx_mask = wlan_cfg_get_rx_ring_mask(
3301 					soc->wlan_cfg_ctx, intr_ctx_num);
3302 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
3303 					soc->wlan_cfg_ctx, intr_ctx_num);
3304 	int tx_mon_mask = wlan_cfg_get_tx_mon_ring_mask(
3305 					soc->wlan_cfg_ctx, intr_ctx_num);
3306 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3307 					soc->wlan_cfg_ctx, intr_ctx_num);
3308 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3309 					soc->wlan_cfg_ctx, intr_ctx_num);
3310 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3311 					soc->wlan_cfg_ctx, intr_ctx_num);
3312 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3313 					soc->wlan_cfg_ctx, intr_ctx_num);
3314 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3315 					soc->wlan_cfg_ctx, intr_ctx_num);
3316 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3317 					soc->wlan_cfg_ctx, intr_ctx_num);
3318 	int rx_near_full_grp_1_mask =
3319 		wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
3320 						     intr_ctx_num);
3321 	int rx_near_full_grp_2_mask =
3322 		wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
3323 						     intr_ctx_num);
3324 	int tx_ring_near_full_mask =
3325 		wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
3326 						    intr_ctx_num);
3327 
3328 	int host2txmon_ring_mask =
3329 		wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx,
3330 						  intr_ctx_num);
3331 	unsigned int vector =
3332 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
3333 	int num_irq = 0;
3334 
3335 	soc->intr_mode = DP_INTR_MSI;
3336 
3337 	if (tx_mask | rx_mask | rx_mon_mask | tx_mon_mask | rx_err_ring_mask |
3338 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask |
3339 	    host2rxdma_ring_mask | host2rxdma_mon_ring_mask |
3340 	    rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
3341 	    tx_ring_near_full_mask | host2txmon_ring_mask)
3342 		irq_id_map[num_irq++] =
3343 			pld_get_msi_irq(soc->osdev->dev, vector);
3344 
3345 	*num_irq_r = num_irq;
3346 }
3347 
3348 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
3349 				    int *irq_id_map, int *num_irq)
3350 {
3351 	int msi_vector_count, ret;
3352 	uint32_t msi_base_data, msi_vector_start;
3353 
3354 	if (pld_get_enable_intx(soc->osdev->dev)) {
3355 		return dp_soc_interrupt_map_calculate_wifi3_pci_legacy(soc,
3356 				intr_ctx_num, irq_id_map, num_irq);
3357 	}
3358 
3359 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
3360 					    &msi_vector_count,
3361 					    &msi_base_data,
3362 					    &msi_vector_start);
3363 	if (ret)
3364 		return dp_soc_interrupt_map_calculate_integrated(soc,
3365 				intr_ctx_num, irq_id_map, num_irq);
3366 
3367 	else
3368 		dp_soc_interrupt_map_calculate_msi(soc,
3369 				intr_ctx_num, irq_id_map, num_irq,
3370 				msi_vector_count, msi_vector_start);
3371 }
3372 
3373 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
3374 /**
3375  * dp_soc_near_full_interrupt_attach() - Register handler for DP near fill irq
3376  * @soc: DP soc handle
3377  * @num_irq: IRQ number
3378  * @irq_id_map: IRQ map
3379  * intr_id: interrupt context ID
3380  *
3381  * Return: 0 for success. nonzero for failure.
3382  */
3383 static inline int
3384 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
3385 				  int irq_id_map[], int intr_id)
3386 {
3387 	return hif_register_ext_group(soc->hif_handle,
3388 				      num_irq, irq_id_map,
3389 				      dp_service_near_full_srngs,
3390 				      &soc->intr_ctx[intr_id], "dp_nf_intr",
3391 				      HIF_EXEC_NAPI_TYPE,
3392 				      QCA_NAPI_DEF_SCALE_BIN_SHIFT);
3393 }
3394 #else
3395 static inline int
3396 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
3397 				  int *irq_id_map, int intr_id)
3398 {
3399 	return 0;
3400 }
3401 #endif
3402 
3403 #ifdef DP_CON_MON_MSI_SKIP_SET
3404 static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
3405 {
3406 	return !!(soc->cdp_soc.ol_ops->get_con_mode() !=
3407 			QDF_GLOBAL_MONITOR_MODE);
3408 }
3409 #else
3410 static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
3411 {
3412 	return false;
3413 }
3414 #endif
3415 
3416 /*
3417  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
3418  * @txrx_soc: DP SOC handle
3419  *
3420  * Return: none
3421  */
3422 static void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
3423 {
3424 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3425 	int i;
3426 
3427 	if (soc->intr_mode == DP_INTR_POLL) {
3428 		qdf_timer_free(&soc->int_timer);
3429 	} else {
3430 		hif_deconfigure_ext_group_interrupts(soc->hif_handle);
3431 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
3432 		hif_deregister_exec_group(soc->hif_handle, "dp_nf_intr");
3433 	}
3434 
3435 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3436 		soc->intr_ctx[i].tx_ring_mask = 0;
3437 		soc->intr_ctx[i].rx_ring_mask = 0;
3438 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
3439 		soc->intr_ctx[i].rx_err_ring_mask = 0;
3440 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
3441 		soc->intr_ctx[i].reo_status_ring_mask = 0;
3442 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
3443 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
3444 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
3445 		soc->intr_ctx[i].rx_near_full_grp_1_mask = 0;
3446 		soc->intr_ctx[i].rx_near_full_grp_2_mask = 0;
3447 		soc->intr_ctx[i].tx_ring_near_full_mask = 0;
3448 		soc->intr_ctx[i].tx_mon_ring_mask = 0;
3449 		soc->intr_ctx[i].host2txmon_ring_mask = 0;
3450 		soc->intr_ctx[i].umac_reset_intr_mask = 0;
3451 
3452 		hif_event_history_deinit(soc->hif_handle, i);
3453 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
3454 	}
3455 
3456 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3457 		    sizeof(soc->mon_intr_id_lmac_map),
3458 		    DP_MON_INVALID_LMAC_ID);
3459 }
3460 
3461 /*
3462  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
3463  * @txrx_soc: DP SOC handle
3464  *
3465  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
3466  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
3467  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
3468  *
3469  * Return: 0 for success. nonzero for failure.
3470  */
3471 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
3472 {
3473 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3474 
3475 	int i = 0;
3476 	int num_irq = 0;
3477 	int rx_err_ring_intr_ctxt_id = HIF_MAX_GROUP;
3478 	int lmac_id = 0;
3479 	int napi_scale;
3480 
3481 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3482 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
3483 
3484 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3485 		int ret = 0;
3486 
3487 		/* Map of IRQ ids registered with one interrupt context */
3488 		int irq_id_map[HIF_MAX_GRP_IRQ];
3489 
3490 		int tx_mask =
3491 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
3492 		int rx_mask =
3493 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
3494 		int rx_mon_mask =
3495 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
3496 		int tx_mon_ring_mask =
3497 			wlan_cfg_get_tx_mon_ring_mask(soc->wlan_cfg_ctx, i);
3498 		int rx_err_ring_mask =
3499 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
3500 		int rx_wbm_rel_ring_mask =
3501 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
3502 		int reo_status_ring_mask =
3503 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
3504 		int rxdma2host_ring_mask =
3505 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
3506 		int host2rxdma_ring_mask =
3507 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
3508 		int host2rxdma_mon_ring_mask =
3509 			wlan_cfg_get_host2rxdma_mon_ring_mask(
3510 				soc->wlan_cfg_ctx, i);
3511 		int rx_near_full_grp_1_mask =
3512 			wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
3513 							     i);
3514 		int rx_near_full_grp_2_mask =
3515 			wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
3516 							     i);
3517 		int tx_ring_near_full_mask =
3518 			wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
3519 							    i);
3520 		int host2txmon_ring_mask =
3521 			wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx, i);
3522 		int umac_reset_intr_mask =
3523 			wlan_cfg_get_umac_reset_intr_mask(soc->wlan_cfg_ctx, i);
3524 
3525 		if (dp_skip_rx_mon_ring_mask_set(soc))
3526 			rx_mon_mask = 0;
3527 
3528 		soc->intr_ctx[i].dp_intr_id = i;
3529 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
3530 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
3531 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
3532 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
3533 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
3534 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
3535 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
3536 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
3537 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
3538 			 host2rxdma_mon_ring_mask;
3539 		soc->intr_ctx[i].rx_near_full_grp_1_mask =
3540 						rx_near_full_grp_1_mask;
3541 		soc->intr_ctx[i].rx_near_full_grp_2_mask =
3542 						rx_near_full_grp_2_mask;
3543 		soc->intr_ctx[i].tx_ring_near_full_mask =
3544 						tx_ring_near_full_mask;
3545 		soc->intr_ctx[i].tx_mon_ring_mask = tx_mon_ring_mask;
3546 		soc->intr_ctx[i].host2txmon_ring_mask = host2txmon_ring_mask;
3547 		soc->intr_ctx[i].umac_reset_intr_mask = umac_reset_intr_mask;
3548 
3549 		soc->intr_ctx[i].soc = soc;
3550 
3551 		num_irq = 0;
3552 
3553 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
3554 					       &num_irq);
3555 
3556 		if (rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
3557 		    tx_ring_near_full_mask) {
3558 			dp_soc_near_full_interrupt_attach(soc, num_irq,
3559 							  irq_id_map, i);
3560 		} else {
3561 			napi_scale = wlan_cfg_get_napi_scale_factor(
3562 							    soc->wlan_cfg_ctx);
3563 			if (!napi_scale)
3564 				napi_scale = QCA_NAPI_DEF_SCALE_BIN_SHIFT;
3565 
3566 			ret = hif_register_ext_group(soc->hif_handle,
3567 				num_irq, irq_id_map, dp_service_srngs,
3568 				&soc->intr_ctx[i], "dp_intr",
3569 				HIF_EXEC_NAPI_TYPE, napi_scale);
3570 		}
3571 
3572 		dp_debug(" int ctx %u num_irq %u irq_id_map %u %u",
3573 			 i, num_irq, irq_id_map[0], irq_id_map[1]);
3574 
3575 		if (ret) {
3576 			dp_init_err("%pK: failed, ret = %d", soc, ret);
3577 			dp_soc_interrupt_detach(txrx_soc);
3578 			return QDF_STATUS_E_FAILURE;
3579 		}
3580 
3581 		hif_event_history_init(soc->hif_handle, i);
3582 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
3583 
3584 		if (rx_err_ring_mask)
3585 			rx_err_ring_intr_ctxt_id = i;
3586 
3587 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
3588 			soc->mon_intr_id_lmac_map[lmac_id] = i;
3589 			lmac_id++;
3590 		}
3591 	}
3592 
3593 	hif_configure_ext_group_interrupts(soc->hif_handle);
3594 	if (rx_err_ring_intr_ctxt_id != HIF_MAX_GROUP)
3595 		hif_config_irq_clear_cpu_affinity(soc->hif_handle,
3596 						  rx_err_ring_intr_ctxt_id, 0);
3597 
3598 	return QDF_STATUS_SUCCESS;
3599 }
3600 
3601 #define AVG_MAX_MPDUS_PER_TID 128
3602 #define AVG_TIDS_PER_CLIENT 2
3603 #define AVG_FLOWS_PER_TID 2
3604 #define AVG_MSDUS_PER_FLOW 128
3605 #define AVG_MSDUS_PER_MPDU 4
3606 
3607 /*
3608  * dp_hw_link_desc_pool_banks_free() - Free h/w link desc pool banks
3609  * @soc: DP SOC handle
3610  * @mac_id: mac id
3611  *
3612  * Return: none
3613  */
3614 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
3615 {
3616 	struct qdf_mem_multi_page_t *pages;
3617 
3618 	if (mac_id != WLAN_INVALID_PDEV_ID) {
3619 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3620 	} else {
3621 		pages = &soc->link_desc_pages;
3622 	}
3623 
3624 	if (!pages) {
3625 		dp_err("can not get link desc pages");
3626 		QDF_ASSERT(0);
3627 		return;
3628 	}
3629 
3630 	if (pages->dma_pages) {
3631 		wlan_minidump_remove((void *)
3632 				     pages->dma_pages->page_v_addr_start,
3633 				     pages->num_pages * pages->page_size,
3634 				     soc->ctrl_psoc,
3635 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3636 				     "hw_link_desc_bank");
3637 		dp_desc_multi_pages_mem_free(soc, DP_HW_LINK_DESC_TYPE,
3638 					     pages, 0, false);
3639 	}
3640 }
3641 
3642 qdf_export_symbol(dp_hw_link_desc_pool_banks_free);
3643 
3644 /*
3645  * dp_hw_link_desc_pool_banks_alloc() - Allocate h/w link desc pool banks
3646  * @soc: DP SOC handle
3647  * @mac_id: mac id
3648  *
3649  * Allocates memory pages for link descriptors, the page size is 4K for
3650  * MCL and 2MB for WIN. if the mac_id is invalid link descriptor pages are
3651  * allocated for regular RX/TX and if the there is a proper mac_id link
3652  * descriptors are allocated for RX monitor mode.
3653  *
3654  * Return: QDF_STATUS_SUCCESS: Success
3655  *	   QDF_STATUS_E_FAILURE: Failure
3656  */
3657 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
3658 {
3659 	hal_soc_handle_t hal_soc = soc->hal_soc;
3660 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
3661 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
3662 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
3663 	uint32_t num_mpdus_per_link_desc = hal_num_mpdus_per_link_desc(hal_soc);
3664 	uint32_t num_msdus_per_link_desc = hal_num_msdus_per_link_desc(hal_soc);
3665 	uint32_t num_mpdu_links_per_queue_desc =
3666 		hal_num_mpdu_links_per_queue_desc(hal_soc);
3667 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3668 	uint32_t *total_link_descs, total_mem_size;
3669 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
3670 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
3671 	uint32_t num_entries;
3672 	struct qdf_mem_multi_page_t *pages;
3673 	struct dp_srng *dp_srng;
3674 	uint8_t minidump_str[MINIDUMP_STR_SIZE];
3675 
3676 	/* Only Tx queue descriptors are allocated from common link descriptor
3677 	 * pool Rx queue descriptors are not included in this because (REO queue
3678 	 * extension descriptors) they are expected to be allocated contiguously
3679 	 * with REO queue descriptors
3680 	 */
3681 	if (mac_id != WLAN_INVALID_PDEV_ID) {
3682 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3683 		/* dp_monitor_get_link_desc_pages returns NULL only
3684 		 * if monitor SOC is  NULL
3685 		 */
3686 		if (!pages) {
3687 			dp_err("can not get link desc pages");
3688 			QDF_ASSERT(0);
3689 			return QDF_STATUS_E_FAULT;
3690 		}
3691 		dp_srng = &soc->rxdma_mon_desc_ring[mac_id];
3692 		num_entries = dp_srng->alloc_size /
3693 			hal_srng_get_entrysize(soc->hal_soc,
3694 					       RXDMA_MONITOR_DESC);
3695 		total_link_descs = dp_monitor_get_total_link_descs(soc, mac_id);
3696 		qdf_str_lcopy(minidump_str, "mon_link_desc_bank",
3697 			      MINIDUMP_STR_SIZE);
3698 	} else {
3699 		num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3700 			AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
3701 
3702 		num_mpdu_queue_descs = num_mpdu_link_descs /
3703 			num_mpdu_links_per_queue_desc;
3704 
3705 		num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3706 			AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
3707 			num_msdus_per_link_desc;
3708 
3709 		num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3710 			AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
3711 
3712 		num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
3713 			num_tx_msdu_link_descs + num_rx_msdu_link_descs;
3714 
3715 		pages = &soc->link_desc_pages;
3716 		total_link_descs = &soc->total_link_descs;
3717 		qdf_str_lcopy(minidump_str, "link_desc_bank",
3718 			      MINIDUMP_STR_SIZE);
3719 	}
3720 
3721 	/* If link descriptor banks are allocated, return from here */
3722 	if (pages->num_pages)
3723 		return QDF_STATUS_SUCCESS;
3724 
3725 	/* Round up to power of 2 */
3726 	*total_link_descs = 1;
3727 	while (*total_link_descs < num_entries)
3728 		*total_link_descs <<= 1;
3729 
3730 	dp_init_info("%pK: total_link_descs: %u, link_desc_size: %d",
3731 		     soc, *total_link_descs, link_desc_size);
3732 	total_mem_size =  *total_link_descs * link_desc_size;
3733 	total_mem_size += link_desc_align;
3734 
3735 	dp_init_info("%pK: total_mem_size: %d",
3736 		     soc, total_mem_size);
3737 
3738 	dp_set_max_page_size(pages, max_alloc_size);
3739 	dp_desc_multi_pages_mem_alloc(soc, DP_HW_LINK_DESC_TYPE,
3740 				      pages,
3741 				      link_desc_size,
3742 				      *total_link_descs,
3743 				      0, false);
3744 	if (!pages->num_pages) {
3745 		dp_err("Multi page alloc fail for hw link desc pool");
3746 		return QDF_STATUS_E_FAULT;
3747 	}
3748 
3749 	wlan_minidump_log(pages->dma_pages->page_v_addr_start,
3750 			  pages->num_pages * pages->page_size,
3751 			  soc->ctrl_psoc,
3752 			  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3753 			  "hw_link_desc_bank");
3754 
3755 	return QDF_STATUS_SUCCESS;
3756 }
3757 
3758 /*
3759  * dp_hw_link_desc_ring_free() - Free h/w link desc rings
3760  * @soc: DP SOC handle
3761  *
3762  * Return: none
3763  */
3764 static void dp_hw_link_desc_ring_free(struct dp_soc *soc)
3765 {
3766 	uint32_t i;
3767 	uint32_t size = soc->wbm_idle_scatter_buf_size;
3768 	void *vaddr = soc->wbm_idle_link_ring.base_vaddr_unaligned;
3769 	qdf_dma_addr_t paddr;
3770 
3771 	if (soc->wbm_idle_scatter_buf_base_vaddr[0]) {
3772 		for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3773 			vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3774 			paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3775 			if (vaddr) {
3776 				qdf_mem_free_consistent(soc->osdev,
3777 							soc->osdev->dev,
3778 							size,
3779 							vaddr,
3780 							paddr,
3781 							0);
3782 				vaddr = NULL;
3783 			}
3784 		}
3785 	} else {
3786 		wlan_minidump_remove(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3787 				     soc->wbm_idle_link_ring.alloc_size,
3788 				     soc->ctrl_psoc,
3789 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3790 				     "wbm_idle_link_ring");
3791 		dp_srng_free(soc, &soc->wbm_idle_link_ring);
3792 	}
3793 }
3794 
3795 /*
3796  * dp_hw_link_desc_ring_alloc() - Allocate hw link desc rings
3797  * @soc: DP SOC handle
3798  *
3799  * Allocate memory for WBM_IDLE_LINK srng ring if the number of
3800  * link descriptors is less then the max_allocated size. else
3801  * allocate memory for wbm_idle_scatter_buffer.
3802  *
3803  * Return: QDF_STATUS_SUCCESS: success
3804  *         QDF_STATUS_E_NO_MEM: No memory (Failure)
3805  */
3806 static QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc)
3807 {
3808 	uint32_t entry_size, i;
3809 	uint32_t total_mem_size;
3810 	qdf_dma_addr_t *baseaddr = NULL;
3811 	struct dp_srng *dp_srng;
3812 	uint32_t ring_type;
3813 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3814 	uint32_t tlds;
3815 
3816 	ring_type = WBM_IDLE_LINK;
3817 	dp_srng = &soc->wbm_idle_link_ring;
3818 	tlds = soc->total_link_descs;
3819 
3820 	entry_size = hal_srng_get_entrysize(soc->hal_soc, ring_type);
3821 	total_mem_size = entry_size * tlds;
3822 
3823 	if (total_mem_size <= max_alloc_size) {
3824 		if (dp_srng_alloc(soc, dp_srng, ring_type, tlds, 0)) {
3825 			dp_init_err("%pK: Link desc idle ring setup failed",
3826 				    soc);
3827 			goto fail;
3828 		}
3829 
3830 		wlan_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3831 				  soc->wbm_idle_link_ring.alloc_size,
3832 				  soc->ctrl_psoc,
3833 				  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3834 				  "wbm_idle_link_ring");
3835 	} else {
3836 		uint32_t num_scatter_bufs;
3837 		uint32_t num_entries_per_buf;
3838 		uint32_t buf_size = 0;
3839 
3840 		soc->wbm_idle_scatter_buf_size =
3841 			hal_idle_list_scatter_buf_size(soc->hal_soc);
3842 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
3843 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
3844 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
3845 					soc->hal_soc, total_mem_size,
3846 					soc->wbm_idle_scatter_buf_size);
3847 
3848 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
3849 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3850 				  FL("scatter bufs size out of bounds"));
3851 			goto fail;
3852 		}
3853 
3854 		for (i = 0; i < num_scatter_bufs; i++) {
3855 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
3856 			buf_size = soc->wbm_idle_scatter_buf_size;
3857 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
3858 				qdf_mem_alloc_consistent(soc->osdev,
3859 							 soc->osdev->dev,
3860 							 buf_size,
3861 							 baseaddr);
3862 
3863 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
3864 				QDF_TRACE(QDF_MODULE_ID_DP,
3865 					  QDF_TRACE_LEVEL_ERROR,
3866 					  FL("Scatter lst memory alloc fail"));
3867 				goto fail;
3868 			}
3869 		}
3870 		soc->num_scatter_bufs = num_scatter_bufs;
3871 	}
3872 	return QDF_STATUS_SUCCESS;
3873 
3874 fail:
3875 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3876 		void *vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3877 		qdf_dma_addr_t paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3878 
3879 		if (vaddr) {
3880 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
3881 						soc->wbm_idle_scatter_buf_size,
3882 						vaddr,
3883 						paddr, 0);
3884 			vaddr = NULL;
3885 		}
3886 	}
3887 	return QDF_STATUS_E_NOMEM;
3888 }
3889 
3890 qdf_export_symbol(dp_hw_link_desc_pool_banks_alloc);
3891 
3892 /*
3893  * dp_hw_link_desc_ring_init() - Initialize hw link desc rings
3894  * @soc: DP SOC handle
3895  *
3896  * Return: QDF_STATUS_SUCCESS: success
3897  *         QDF_STATUS_E_FAILURE: failure
3898  */
3899 static QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc)
3900 {
3901 	struct dp_srng *dp_srng = &soc->wbm_idle_link_ring;
3902 
3903 	if (dp_srng->base_vaddr_unaligned) {
3904 		if (dp_srng_init(soc, dp_srng, WBM_IDLE_LINK, 0, 0))
3905 			return QDF_STATUS_E_FAILURE;
3906 	}
3907 	return QDF_STATUS_SUCCESS;
3908 }
3909 
3910 /*
3911  * dp_hw_link_desc_ring_deinit() - Reset hw link desc rings
3912  * @soc: DP SOC handle
3913  *
3914  * Return: None
3915  */
3916 static void dp_hw_link_desc_ring_deinit(struct dp_soc *soc)
3917 {
3918 	dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0);
3919 }
3920 
3921 /*
3922  * dp_hw_link_desc_ring_replenish() - Replenish hw link desc rings
3923  * @soc: DP SOC handle
3924  * @mac_id: mac id
3925  *
3926  * Return: None
3927  */
3928 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id)
3929 {
3930 	uint32_t cookie = 0;
3931 	uint32_t page_idx = 0;
3932 	struct qdf_mem_multi_page_t *pages;
3933 	struct qdf_mem_dma_page_t *dma_pages;
3934 	uint32_t offset = 0;
3935 	uint32_t count = 0;
3936 	uint32_t desc_id = 0;
3937 	void *desc_srng;
3938 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
3939 	uint32_t *total_link_descs_addr;
3940 	uint32_t total_link_descs;
3941 	uint32_t scatter_buf_num;
3942 	uint32_t num_entries_per_buf = 0;
3943 	uint32_t rem_entries;
3944 	uint32_t num_descs_per_page;
3945 	uint32_t num_scatter_bufs = 0;
3946 	uint8_t *scatter_buf_ptr;
3947 	void *desc;
3948 
3949 	num_scatter_bufs = soc->num_scatter_bufs;
3950 
3951 	if (mac_id == WLAN_INVALID_PDEV_ID) {
3952 		pages = &soc->link_desc_pages;
3953 		total_link_descs = soc->total_link_descs;
3954 		desc_srng = soc->wbm_idle_link_ring.hal_srng;
3955 	} else {
3956 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3957 		/* dp_monitor_get_link_desc_pages returns NULL only
3958 		 * if monitor SOC is  NULL
3959 		 */
3960 		if (!pages) {
3961 			dp_err("can not get link desc pages");
3962 			QDF_ASSERT(0);
3963 			return;
3964 		}
3965 		total_link_descs_addr =
3966 				dp_monitor_get_total_link_descs(soc, mac_id);
3967 		total_link_descs = *total_link_descs_addr;
3968 		desc_srng = soc->rxdma_mon_desc_ring[mac_id].hal_srng;
3969 	}
3970 
3971 	dma_pages = pages->dma_pages;
3972 	do {
3973 		qdf_mem_zero(dma_pages[page_idx].page_v_addr_start,
3974 			     pages->page_size);
3975 		page_idx++;
3976 	} while (page_idx < pages->num_pages);
3977 
3978 	if (desc_srng) {
3979 		hal_srng_access_start_unlocked(soc->hal_soc, desc_srng);
3980 		page_idx = 0;
3981 		count = 0;
3982 		offset = 0;
3983 		pages = &soc->link_desc_pages;
3984 		while ((desc = hal_srng_src_get_next(soc->hal_soc,
3985 						     desc_srng)) &&
3986 			(count < total_link_descs)) {
3987 			page_idx = count / pages->num_element_per_page;
3988 			if (desc_id == pages->num_element_per_page)
3989 				desc_id = 0;
3990 
3991 			offset = count % pages->num_element_per_page;
3992 			cookie = LINK_DESC_COOKIE(desc_id, page_idx,
3993 						  soc->link_desc_id_start);
3994 
3995 			hal_set_link_desc_addr(soc->hal_soc, desc, cookie,
3996 					       dma_pages[page_idx].page_p_addr
3997 					       + (offset * link_desc_size),
3998 					       soc->idle_link_bm_id);
3999 			count++;
4000 			desc_id++;
4001 		}
4002 		hal_srng_access_end_unlocked(soc->hal_soc, desc_srng);
4003 	} else {
4004 		/* Populate idle list scatter buffers with link descriptor
4005 		 * pointers
4006 		 */
4007 		scatter_buf_num = 0;
4008 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
4009 					soc->hal_soc,
4010 					soc->wbm_idle_scatter_buf_size);
4011 
4012 		scatter_buf_ptr = (uint8_t *)(
4013 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
4014 		rem_entries = num_entries_per_buf;
4015 		pages = &soc->link_desc_pages;
4016 		page_idx = 0; count = 0;
4017 		offset = 0;
4018 		num_descs_per_page = pages->num_element_per_page;
4019 
4020 		while (count < total_link_descs) {
4021 			page_idx = count / num_descs_per_page;
4022 			offset = count % num_descs_per_page;
4023 			if (desc_id == pages->num_element_per_page)
4024 				desc_id = 0;
4025 
4026 			cookie = LINK_DESC_COOKIE(desc_id, page_idx,
4027 						  soc->link_desc_id_start);
4028 			hal_set_link_desc_addr(soc->hal_soc,
4029 					       (void *)scatter_buf_ptr,
4030 					       cookie,
4031 					       dma_pages[page_idx].page_p_addr +
4032 					       (offset * link_desc_size),
4033 					       soc->idle_link_bm_id);
4034 			rem_entries--;
4035 			if (rem_entries) {
4036 				scatter_buf_ptr += link_desc_size;
4037 			} else {
4038 				rem_entries = num_entries_per_buf;
4039 				scatter_buf_num++;
4040 				if (scatter_buf_num >= num_scatter_bufs)
4041 					break;
4042 				scatter_buf_ptr = (uint8_t *)
4043 					(soc->wbm_idle_scatter_buf_base_vaddr[
4044 					 scatter_buf_num]);
4045 			}
4046 			count++;
4047 			desc_id++;
4048 		}
4049 		/* Setup link descriptor idle list in HW */
4050 		hal_setup_link_idle_list(soc->hal_soc,
4051 			soc->wbm_idle_scatter_buf_base_paddr,
4052 			soc->wbm_idle_scatter_buf_base_vaddr,
4053 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
4054 			(uint32_t)(scatter_buf_ptr -
4055 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
4056 			scatter_buf_num-1])), total_link_descs);
4057 	}
4058 }
4059 
4060 qdf_export_symbol(dp_link_desc_ring_replenish);
4061 
4062 #ifdef IPA_OFFLOAD
4063 #define USE_1_IPA_RX_REO_RING 1
4064 #define USE_2_IPA_RX_REO_RINGS 2
4065 #define REO_DST_RING_SIZE_QCA6290 1023
4066 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
4067 #define REO_DST_RING_SIZE_QCA8074 1023
4068 #define REO_DST_RING_SIZE_QCN9000 2048
4069 #else
4070 #define REO_DST_RING_SIZE_QCA8074 8
4071 #define REO_DST_RING_SIZE_QCN9000 8
4072 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
4073 
4074 #ifdef IPA_WDI3_TX_TWO_PIPES
4075 #ifdef DP_MEMORY_OPT
4076 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4077 {
4078 	return dp_init_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4079 }
4080 
4081 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4082 {
4083 	dp_deinit_tx_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4084 }
4085 
4086 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4087 {
4088 	return dp_alloc_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4089 }
4090 
4091 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4092 {
4093 	dp_free_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4094 }
4095 
4096 #else /* !DP_MEMORY_OPT */
4097 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4098 {
4099 	return 0;
4100 }
4101 
4102 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4103 {
4104 }
4105 
4106 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4107 {
4108 	return 0
4109 }
4110 
4111 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4112 {
4113 }
4114 #endif /* DP_MEMORY_OPT */
4115 
4116 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
4117 {
4118 	hal_tx_init_data_ring(soc->hal_soc,
4119 			      soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng);
4120 }
4121 
4122 #else /* !IPA_WDI3_TX_TWO_PIPES */
4123 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4124 {
4125 	return 0;
4126 }
4127 
4128 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4129 {
4130 }
4131 
4132 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4133 {
4134 	return 0;
4135 }
4136 
4137 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4138 {
4139 }
4140 
4141 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
4142 {
4143 }
4144 
4145 #endif /* IPA_WDI3_TX_TWO_PIPES */
4146 
4147 #else
4148 
4149 #define REO_DST_RING_SIZE_QCA6290 1024
4150 
4151 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4152 {
4153 	return 0;
4154 }
4155 
4156 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4157 {
4158 }
4159 
4160 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4161 {
4162 	return 0;
4163 }
4164 
4165 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4166 {
4167 }
4168 
4169 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
4170 {
4171 }
4172 
4173 #endif /* IPA_OFFLOAD */
4174 
4175 /*
4176  * dp_soc_reset_ring_map() - Reset cpu ring map
4177  * @soc: Datapath soc handler
4178  *
4179  * This api resets the default cpu ring map
4180  */
4181 
4182 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
4183 {
4184 	uint8_t i;
4185 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4186 
4187 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
4188 		switch (nss_config) {
4189 		case dp_nss_cfg_first_radio:
4190 			/*
4191 			 * Setting Tx ring map for one nss offloaded radio
4192 			 */
4193 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
4194 			break;
4195 
4196 		case dp_nss_cfg_second_radio:
4197 			/*
4198 			 * Setting Tx ring for two nss offloaded radios
4199 			 */
4200 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
4201 			break;
4202 
4203 		case dp_nss_cfg_dbdc:
4204 			/*
4205 			 * Setting Tx ring map for 2 nss offloaded radios
4206 			 */
4207 			soc->tx_ring_map[i] =
4208 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
4209 			break;
4210 
4211 		case dp_nss_cfg_dbtc:
4212 			/*
4213 			 * Setting Tx ring map for 3 nss offloaded radios
4214 			 */
4215 			soc->tx_ring_map[i] =
4216 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
4217 			break;
4218 
4219 		default:
4220 			dp_err("tx_ring_map failed due to invalid nss cfg");
4221 			break;
4222 		}
4223 	}
4224 }
4225 
4226 /*
4227  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
4228  * @dp_soc - DP soc handle
4229  * @ring_type - ring type
4230  * @ring_num - ring_num
4231  *
4232  * return 0 or 1
4233  */
4234 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
4235 {
4236 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4237 	uint8_t status = 0;
4238 
4239 	switch (ring_type) {
4240 	case WBM2SW_RELEASE:
4241 	case REO_DST:
4242 	case RXDMA_BUF:
4243 	case REO_EXCEPTION:
4244 		status = ((nss_config) & (1 << ring_num));
4245 		break;
4246 	default:
4247 		break;
4248 	}
4249 
4250 	return status;
4251 }
4252 
4253 /*
4254  * dp_soc_disable_unused_mac_intr_mask() - reset interrupt mask for
4255  *					  unused WMAC hw rings
4256  * @dp_soc - DP Soc handle
4257  * @mac_num - wmac num
4258  *
4259  * Return: Return void
4260  */
4261 static void dp_soc_disable_unused_mac_intr_mask(struct dp_soc *soc,
4262 						int mac_num)
4263 {
4264 	uint8_t *grp_mask = NULL;
4265 	int group_number;
4266 
4267 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
4268 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4269 	wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4270 					  group_number, 0x0);
4271 
4272 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
4273 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4274 	wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
4275 				      group_number, 0x0);
4276 
4277 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
4278 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4279 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
4280 					  group_number, 0x0);
4281 
4282 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
4283 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4284 	wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
4285 					      group_number, 0x0);
4286 }
4287 
4288 #ifdef IPA_OFFLOAD
4289 #ifdef IPA_WDI3_VLAN_SUPPORT
4290 /*
4291  * dp_soc_reset_ipa_vlan_intr_mask() - reset interrupt mask for IPA offloaded
4292  * ring for vlan tagged traffic
4293  * @dp_soc - DP Soc handle
4294  *
4295  * Return: Return void
4296  */
4297 static void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
4298 {
4299 	uint8_t *grp_mask = NULL;
4300 	int group_number, mask;
4301 
4302 	if (!wlan_ipa_is_vlan_enabled())
4303 		return;
4304 
4305 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
4306 
4307 	group_number = dp_srng_find_ring_in_mask(IPA_ALT_REO_DEST_RING_IDX, grp_mask);
4308 	if (group_number < 0) {
4309 		dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4310 			      soc, REO_DST, IPA_ALT_REO_DEST_RING_IDX);
4311 		return;
4312 	}
4313 
4314 	mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
4315 
4316 	/* reset the interrupt mask for offloaded ring */
4317 	mask &= (~(1 << IPA_ALT_REO_DEST_RING_IDX));
4318 
4319 	/*
4320 	 * set the interrupt mask to zero for rx offloaded radio.
4321 	 */
4322 	wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4323 }
4324 #else
4325 static inline
4326 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
4327 { }
4328 #endif /* IPA_WDI3_VLAN_SUPPORT */
4329 #else
4330 static inline
4331 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
4332 { }
4333 #endif /* IPA_OFFLOAD */
4334 
4335 /*
4336  * dp_soc_reset_intr_mask() - reset interrupt mask
4337  * @dp_soc - DP Soc handle
4338  *
4339  * Return: Return void
4340  */
4341 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
4342 {
4343 	uint8_t j;
4344 	uint8_t *grp_mask = NULL;
4345 	int group_number, mask, num_ring;
4346 
4347 	/* number of tx ring */
4348 	num_ring = soc->num_tcl_data_rings;
4349 
4350 	/*
4351 	 * group mask for tx completion  ring.
4352 	 */
4353 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
4354 
4355 	/* loop and reset the mask for only offloaded ring */
4356 	for (j = 0; j < WLAN_CFG_NUM_TCL_DATA_RINGS; j++) {
4357 		/*
4358 		 * Group number corresponding to tx offloaded ring.
4359 		 */
4360 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4361 		if (group_number < 0) {
4362 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4363 				      soc, WBM2SW_RELEASE, j);
4364 			continue;
4365 		}
4366 
4367 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
4368 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j) &&
4369 		    (!mask)) {
4370 			continue;
4371 		}
4372 
4373 		/* reset the tx mask for offloaded ring */
4374 		mask &= (~(1 << j));
4375 
4376 		/*
4377 		 * reset the interrupt mask for offloaded ring.
4378 		 */
4379 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4380 	}
4381 
4382 	/* number of rx rings */
4383 	num_ring = soc->num_reo_dest_rings;
4384 
4385 	/*
4386 	 * group mask for reo destination ring.
4387 	 */
4388 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
4389 
4390 	/* loop and reset the mask for only offloaded ring */
4391 	for (j = 0; j < WLAN_CFG_NUM_REO_DEST_RING; j++) {
4392 		/*
4393 		 * Group number corresponding to rx offloaded ring.
4394 		 */
4395 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4396 		if (group_number < 0) {
4397 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4398 				      soc, REO_DST, j);
4399 			continue;
4400 		}
4401 
4402 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
4403 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j) &&
4404 		    (!mask)) {
4405 			continue;
4406 		}
4407 
4408 		/* reset the interrupt mask for offloaded ring */
4409 		mask &= (~(1 << j));
4410 
4411 		/*
4412 		 * set the interrupt mask to zero for rx offloaded radio.
4413 		 */
4414 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4415 	}
4416 
4417 	/*
4418 	 * group mask for Rx buffer refill ring
4419 	 */
4420 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
4421 
4422 	/* loop and reset the mask for only offloaded ring */
4423 	for (j = 0; j < MAX_PDEV_CNT; j++) {
4424 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
4425 
4426 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
4427 			continue;
4428 		}
4429 
4430 		/*
4431 		 * Group number corresponding to rx offloaded ring.
4432 		 */
4433 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
4434 		if (group_number < 0) {
4435 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4436 				      soc, REO_DST, lmac_id);
4437 			continue;
4438 		}
4439 
4440 		/* set the interrupt mask for offloaded ring */
4441 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4442 				group_number);
4443 		mask &= (~(1 << lmac_id));
4444 
4445 		/*
4446 		 * set the interrupt mask to zero for rx offloaded radio.
4447 		 */
4448 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4449 			group_number, mask);
4450 	}
4451 
4452 	grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
4453 
4454 	for (j = 0; j < num_ring; j++) {
4455 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_EXCEPTION, j)) {
4456 			continue;
4457 		}
4458 
4459 		/*
4460 		 * Group number corresponding to rx err ring.
4461 		 */
4462 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4463 		if (group_number < 0) {
4464 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4465 				      soc, REO_EXCEPTION, j);
4466 			continue;
4467 		}
4468 
4469 		wlan_cfg_set_rx_err_ring_mask(soc->wlan_cfg_ctx,
4470 					      group_number, 0);
4471 	}
4472 }
4473 
4474 #ifdef IPA_OFFLOAD
4475 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
4476 			 uint32_t *remap1, uint32_t *remap2)
4477 {
4478 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX] = {
4479 				REO_REMAP_SW1, REO_REMAP_SW2, REO_REMAP_SW3,
4480 				REO_REMAP_SW5, REO_REMAP_SW6, REO_REMAP_SW7};
4481 
4482 	switch (soc->arch_id) {
4483 	case CDP_ARCH_TYPE_BE:
4484 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4485 					      soc->num_reo_dest_rings -
4486 					      USE_2_IPA_RX_REO_RINGS, remap1,
4487 					      remap2);
4488 		break;
4489 
4490 	case CDP_ARCH_TYPE_LI:
4491 		if (wlan_ipa_is_vlan_enabled()) {
4492 			hal_compute_reo_remap_ix2_ix3(
4493 					soc->hal_soc, ring,
4494 					soc->num_reo_dest_rings -
4495 					USE_2_IPA_RX_REO_RINGS, remap1,
4496 					remap2);
4497 
4498 		} else {
4499 			hal_compute_reo_remap_ix2_ix3(
4500 					soc->hal_soc, ring,
4501 					soc->num_reo_dest_rings -
4502 					USE_1_IPA_RX_REO_RING, remap1,
4503 					remap2);
4504 		}
4505 
4506 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
4507 		break;
4508 	default:
4509 		dp_err("unknown arch_id 0x%x", soc->arch_id);
4510 		QDF_BUG(0);
4511 
4512 	}
4513 
4514 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
4515 
4516 	return true;
4517 }
4518 
4519 #ifdef IPA_WDI3_TX_TWO_PIPES
4520 static bool dp_ipa_is_alt_tx_ring(int index)
4521 {
4522 	return index == IPA_TX_ALT_RING_IDX;
4523 }
4524 
4525 static bool dp_ipa_is_alt_tx_comp_ring(int index)
4526 {
4527 	return index == IPA_TX_ALT_COMP_RING_IDX;
4528 }
4529 #else /* !IPA_WDI3_TX_TWO_PIPES */
4530 static bool dp_ipa_is_alt_tx_ring(int index)
4531 {
4532 	return false;
4533 }
4534 
4535 static bool dp_ipa_is_alt_tx_comp_ring(int index)
4536 {
4537 	return false;
4538 }
4539 #endif /* IPA_WDI3_TX_TWO_PIPES */
4540 
4541 /**
4542  * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA
4543  *
4544  * @tx_ring_num: Tx ring number
4545  * @tx_ipa_ring_sz: Return param only updated for IPA.
4546  * @soc_cfg_ctx: dp soc cfg context
4547  *
4548  * Return: None
4549  */
4550 static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz,
4551 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4552 {
4553 	if (!soc_cfg_ctx->ipa_enabled)
4554 		return;
4555 
4556 	if (tx_ring_num == IPA_TCL_DATA_RING_IDX)
4557 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_ring_size(soc_cfg_ctx);
4558 	else if (dp_ipa_is_alt_tx_ring(tx_ring_num))
4559 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_alt_ring_size(soc_cfg_ctx);
4560 }
4561 
4562 /**
4563  * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA
4564  *
4565  * @tx_comp_ring_num: Tx comp ring number
4566  * @tx_comp_ipa_ring_sz: Return param only updated for IPA.
4567  * @soc_cfg_ctx: dp soc cfg context
4568  *
4569  * Return: None
4570  */
4571 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
4572 					 int *tx_comp_ipa_ring_sz,
4573 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4574 {
4575 	if (!soc_cfg_ctx->ipa_enabled)
4576 		return;
4577 
4578 	if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX)
4579 		*tx_comp_ipa_ring_sz =
4580 				wlan_cfg_ipa_tx_comp_ring_size(soc_cfg_ctx);
4581 	else if (dp_ipa_is_alt_tx_comp_ring(tx_comp_ring_num))
4582 		*tx_comp_ipa_ring_sz =
4583 				wlan_cfg_ipa_tx_alt_comp_ring_size(soc_cfg_ctx);
4584 }
4585 #else
4586 static uint8_t dp_reo_ring_selection(uint32_t value, uint32_t *ring)
4587 {
4588 	uint8_t num = 0;
4589 
4590 	switch (value) {
4591 	/* should we have all the different possible ring configs */
4592 	case 0xFF:
4593 		num = 8;
4594 		ring[0] = REO_REMAP_SW1;
4595 		ring[1] = REO_REMAP_SW2;
4596 		ring[2] = REO_REMAP_SW3;
4597 		ring[3] = REO_REMAP_SW4;
4598 		ring[4] = REO_REMAP_SW5;
4599 		ring[5] = REO_REMAP_SW6;
4600 		ring[6] = REO_REMAP_SW7;
4601 		ring[7] = REO_REMAP_SW8;
4602 		break;
4603 
4604 	case 0x3F:
4605 		num = 6;
4606 		ring[0] = REO_REMAP_SW1;
4607 		ring[1] = REO_REMAP_SW2;
4608 		ring[2] = REO_REMAP_SW3;
4609 		ring[3] = REO_REMAP_SW4;
4610 		ring[4] = REO_REMAP_SW5;
4611 		ring[5] = REO_REMAP_SW6;
4612 		break;
4613 
4614 	case 0xF:
4615 		num = 4;
4616 		ring[0] = REO_REMAP_SW1;
4617 		ring[1] = REO_REMAP_SW2;
4618 		ring[2] = REO_REMAP_SW3;
4619 		ring[3] = REO_REMAP_SW4;
4620 		break;
4621 	case 0xE:
4622 		num = 3;
4623 		ring[0] = REO_REMAP_SW2;
4624 		ring[1] = REO_REMAP_SW3;
4625 		ring[2] = REO_REMAP_SW4;
4626 		break;
4627 	case 0xD:
4628 		num = 3;
4629 		ring[0] = REO_REMAP_SW1;
4630 		ring[1] = REO_REMAP_SW3;
4631 		ring[2] = REO_REMAP_SW4;
4632 		break;
4633 	case 0xC:
4634 		num = 2;
4635 		ring[0] = REO_REMAP_SW3;
4636 		ring[1] = REO_REMAP_SW4;
4637 		break;
4638 	case 0xB:
4639 		num = 3;
4640 		ring[0] = REO_REMAP_SW1;
4641 		ring[1] = REO_REMAP_SW2;
4642 		ring[2] = REO_REMAP_SW4;
4643 		break;
4644 	case 0xA:
4645 		num = 2;
4646 		ring[0] = REO_REMAP_SW2;
4647 		ring[1] = REO_REMAP_SW4;
4648 		break;
4649 	case 0x9:
4650 		num = 2;
4651 		ring[0] = REO_REMAP_SW1;
4652 		ring[1] = REO_REMAP_SW4;
4653 		break;
4654 	case 0x8:
4655 		num = 1;
4656 		ring[0] = REO_REMAP_SW4;
4657 		break;
4658 	case 0x7:
4659 		num = 3;
4660 		ring[0] = REO_REMAP_SW1;
4661 		ring[1] = REO_REMAP_SW2;
4662 		ring[2] = REO_REMAP_SW3;
4663 		break;
4664 	case 0x6:
4665 		num = 2;
4666 		ring[0] = REO_REMAP_SW2;
4667 		ring[1] = REO_REMAP_SW3;
4668 		break;
4669 	case 0x5:
4670 		num = 2;
4671 		ring[0] = REO_REMAP_SW1;
4672 		ring[1] = REO_REMAP_SW3;
4673 		break;
4674 	case 0x4:
4675 		num = 1;
4676 		ring[0] = REO_REMAP_SW3;
4677 		break;
4678 	case 0x3:
4679 		num = 2;
4680 		ring[0] = REO_REMAP_SW1;
4681 		ring[1] = REO_REMAP_SW2;
4682 		break;
4683 	case 0x2:
4684 		num = 1;
4685 		ring[0] = REO_REMAP_SW2;
4686 		break;
4687 	case 0x1:
4688 		num = 1;
4689 		ring[0] = REO_REMAP_SW1;
4690 		break;
4691 	default:
4692 		dp_err("unknown reo ring map 0x%x", value);
4693 		QDF_BUG(0);
4694 	}
4695 	return num;
4696 }
4697 
4698 bool dp_reo_remap_config(struct dp_soc *soc,
4699 			 uint32_t *remap0,
4700 			 uint32_t *remap1,
4701 			 uint32_t *remap2)
4702 {
4703 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4704 	uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
4705 	uint8_t num;
4706 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX];
4707 	uint32_t value;
4708 
4709 	switch (offload_radio) {
4710 	case dp_nss_cfg_default:
4711 		value = reo_config & WLAN_CFG_NUM_REO_RINGS_MAP_MAX;
4712 		num = dp_reo_ring_selection(value, ring);
4713 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4714 					      num, remap1, remap2);
4715 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
4716 
4717 		break;
4718 	case dp_nss_cfg_first_radio:
4719 		value = reo_config & 0xE;
4720 		num = dp_reo_ring_selection(value, ring);
4721 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4722 					      num, remap1, remap2);
4723 
4724 		break;
4725 	case dp_nss_cfg_second_radio:
4726 		value = reo_config & 0xD;
4727 		num = dp_reo_ring_selection(value, ring);
4728 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4729 					      num, remap1, remap2);
4730 
4731 		break;
4732 	case dp_nss_cfg_dbdc:
4733 	case dp_nss_cfg_dbtc:
4734 		/* return false if both or all are offloaded to NSS */
4735 		return false;
4736 
4737 	}
4738 
4739 	dp_debug("remap1 %x remap2 %x offload_radio %u",
4740 		 *remap1, *remap2, offload_radio);
4741 	return true;
4742 }
4743 
4744 static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz,
4745 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4746 {
4747 }
4748 
4749 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
4750 					 int *tx_comp_ipa_ring_sz,
4751 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4752 {
4753 }
4754 #endif /* IPA_OFFLOAD */
4755 
4756 /*
4757  * dp_reo_frag_dst_set() - configure reo register to set the
4758  *                        fragment destination ring
4759  * @soc : Datapath soc
4760  * @frag_dst_ring : output parameter to set fragment destination ring
4761  *
4762  * Based on offload_radio below fragment destination rings is selected
4763  * 0 - TCL
4764  * 1 - SW1
4765  * 2 - SW2
4766  * 3 - SW3
4767  * 4 - SW4
4768  * 5 - Release
4769  * 6 - FW
4770  * 7 - alternate select
4771  *
4772  * return: void
4773  */
4774 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
4775 {
4776 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4777 
4778 	switch (offload_radio) {
4779 	case dp_nss_cfg_default:
4780 		*frag_dst_ring = REO_REMAP_TCL;
4781 		break;
4782 	case dp_nss_cfg_first_radio:
4783 		/*
4784 		 * This configuration is valid for single band radio which
4785 		 * is also NSS offload.
4786 		 */
4787 	case dp_nss_cfg_dbdc:
4788 	case dp_nss_cfg_dbtc:
4789 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
4790 		break;
4791 	default:
4792 		dp_init_err("%pK: dp_reo_frag_dst_set invalid offload radio config", soc);
4793 		break;
4794 	}
4795 }
4796 
4797 #ifdef ENABLE_VERBOSE_DEBUG
4798 static void dp_enable_verbose_debug(struct dp_soc *soc)
4799 {
4800 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4801 
4802 	soc_cfg_ctx = soc->wlan_cfg_ctx;
4803 
4804 	if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask)
4805 		is_dp_verbose_debug_enabled = true;
4806 
4807 	if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask)
4808 		hal_set_verbose_debug(true);
4809 	else
4810 		hal_set_verbose_debug(false);
4811 }
4812 #else
4813 static void dp_enable_verbose_debug(struct dp_soc *soc)
4814 {
4815 }
4816 #endif
4817 
4818 #ifdef WLAN_FEATURE_STATS_EXT
4819 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
4820 {
4821 	qdf_event_create(&soc->rx_hw_stats_event);
4822 }
4823 #else
4824 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
4825 {
4826 }
4827 #endif
4828 
4829 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index)
4830 {
4831 	int tcl_ring_num, wbm_ring_num;
4832 
4833 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
4834 						index,
4835 						&tcl_ring_num,
4836 						&wbm_ring_num);
4837 
4838 	if (tcl_ring_num == -1) {
4839 		dp_err("incorrect tcl ring num for index %u", index);
4840 		return;
4841 	}
4842 
4843 	wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned,
4844 			     soc->tcl_data_ring[index].alloc_size,
4845 			     soc->ctrl_psoc,
4846 			     WLAN_MD_DP_SRNG_TCL_DATA,
4847 			     "tcl_data_ring");
4848 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
4849 	dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA,
4850 		       tcl_ring_num);
4851 
4852 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
4853 		return;
4854 
4855 	wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned,
4856 			     soc->tx_comp_ring[index].alloc_size,
4857 			     soc->ctrl_psoc,
4858 			     WLAN_MD_DP_SRNG_TX_COMP,
4859 			     "tcl_comp_ring");
4860 	dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4861 		       wbm_ring_num);
4862 }
4863 
4864 /**
4865  * dp_init_tx_ring_pair_by_index() - The function inits tcl data/wbm completion
4866  * ring pair
4867  * @soc: DP soc pointer
4868  * @index: index of soc->tcl_data or soc->tx_comp to initialize
4869  *
4870  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
4871  */
4872 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
4873 						uint8_t index)
4874 {
4875 	int tcl_ring_num, wbm_ring_num;
4876 	uint8_t bm_id;
4877 
4878 	if (index >= MAX_TCL_DATA_RINGS) {
4879 		dp_err("unexpected index!");
4880 		QDF_BUG(0);
4881 		goto fail1;
4882 	}
4883 
4884 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
4885 						index,
4886 						&tcl_ring_num,
4887 						&wbm_ring_num);
4888 
4889 	if (tcl_ring_num == -1) {
4890 		dp_err("incorrect tcl ring num for index %u", index);
4891 		goto fail1;
4892 	}
4893 
4894 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
4895 	if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA,
4896 			 tcl_ring_num, 0)) {
4897 		dp_err("dp_srng_init failed for tcl_data_ring");
4898 		goto fail1;
4899 	}
4900 	wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned,
4901 			  soc->tcl_data_ring[index].alloc_size,
4902 			  soc->ctrl_psoc,
4903 			  WLAN_MD_DP_SRNG_TCL_DATA,
4904 			  "tcl_data_ring");
4905 
4906 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
4907 		goto set_rbm;
4908 
4909 	if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4910 			 wbm_ring_num, 0)) {
4911 		dp_err("dp_srng_init failed for tx_comp_ring");
4912 		goto fail1;
4913 	}
4914 
4915 	wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned,
4916 			  soc->tx_comp_ring[index].alloc_size,
4917 			  soc->ctrl_psoc,
4918 			  WLAN_MD_DP_SRNG_TX_COMP,
4919 			  "tcl_comp_ring");
4920 set_rbm:
4921 	bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_ring_num);
4922 
4923 	soc->arch_ops.tx_implicit_rbm_set(soc, tcl_ring_num, bm_id);
4924 
4925 	return QDF_STATUS_SUCCESS;
4926 
4927 fail1:
4928 	return QDF_STATUS_E_FAILURE;
4929 }
4930 
4931 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index)
4932 {
4933 	dp_debug("index %u", index);
4934 	dp_srng_free(soc, &soc->tcl_data_ring[index]);
4935 	dp_srng_free(soc, &soc->tx_comp_ring[index]);
4936 }
4937 
4938 /**
4939  * dp_alloc_tx_ring_pair_by_index() - The function allocs tcl data/wbm2sw
4940  * ring pair for the given "index"
4941  * @soc: DP soc pointer
4942  * @index: index of soc->tcl_data or soc->tx_comp to initialize
4943  *
4944  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
4945  */
4946 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
4947 						 uint8_t index)
4948 {
4949 	int tx_ring_size;
4950 	int tx_comp_ring_size;
4951 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
4952 	int cached = 0;
4953 
4954 	if (index >= MAX_TCL_DATA_RINGS) {
4955 		dp_err("unexpected index!");
4956 		QDF_BUG(0);
4957 		goto fail1;
4958 	}
4959 
4960 	dp_debug("index %u", index);
4961 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
4962 	dp_ipa_get_tx_ring_size(index, &tx_ring_size, soc_cfg_ctx);
4963 
4964 	if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA,
4965 			  tx_ring_size, cached)) {
4966 		dp_err("dp_srng_alloc failed for tcl_data_ring");
4967 		goto fail1;
4968 	}
4969 
4970 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
4971 	dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size, soc_cfg_ctx);
4972 	/* Enable cached TCL desc if NSS offload is disabled */
4973 	if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
4974 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
4975 
4976 	if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) ==
4977 	    INVALID_WBM_RING_NUM)
4978 		return QDF_STATUS_SUCCESS;
4979 
4980 	if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4981 			  tx_comp_ring_size, cached)) {
4982 		dp_err("dp_srng_alloc failed for tx_comp_ring");
4983 		goto fail1;
4984 	}
4985 
4986 	return QDF_STATUS_SUCCESS;
4987 
4988 fail1:
4989 	return QDF_STATUS_E_FAILURE;
4990 }
4991 
4992 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
4993 {
4994 	struct cdp_lro_hash_config lro_hash;
4995 	QDF_STATUS status;
4996 
4997 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
4998 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
4999 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
5000 		dp_err("LRO, GRO and RX hash disabled");
5001 		return QDF_STATUS_E_FAILURE;
5002 	}
5003 
5004 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
5005 
5006 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
5007 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
5008 		lro_hash.lro_enable = 1;
5009 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
5010 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
5011 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
5012 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
5013 	}
5014 
5015 	soc->arch_ops.get_rx_hash_key(soc, &lro_hash);
5016 
5017 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
5018 
5019 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
5020 		QDF_BUG(0);
5021 		dp_err("lro_hash_config not configured");
5022 		return QDF_STATUS_E_FAILURE;
5023 	}
5024 
5025 	status = soc->cdp_soc.ol_ops->lro_hash_config(soc->ctrl_psoc,
5026 						      pdev->pdev_id,
5027 						      &lro_hash);
5028 	if (!QDF_IS_STATUS_SUCCESS(status)) {
5029 		dp_err("failed to send lro_hash_config to FW %u", status);
5030 		return status;
5031 	}
5032 
5033 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
5034 		lro_hash.lro_enable, lro_hash.tcp_flag,
5035 		lro_hash.tcp_flag_mask);
5036 
5037 	dp_info("toeplitz_hash_ipv4:");
5038 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5039 			   lro_hash.toeplitz_hash_ipv4,
5040 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
5041 			   LRO_IPV4_SEED_ARR_SZ));
5042 
5043 	dp_info("toeplitz_hash_ipv6:");
5044 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5045 			   lro_hash.toeplitz_hash_ipv6,
5046 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
5047 			   LRO_IPV6_SEED_ARR_SZ));
5048 
5049 	return status;
5050 }
5051 
5052 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
5053 /*
5054  * dp_reap_timer_init() - initialize the reap timer
5055  * @soc: data path SoC handle
5056  *
5057  * Return: void
5058  */
5059 static void dp_reap_timer_init(struct dp_soc *soc)
5060 {
5061 	/*
5062 	 * Timer to reap rxdma status rings.
5063 	 * Needed until we enable ppdu end interrupts
5064 	 */
5065 	dp_monitor_reap_timer_init(soc);
5066 	dp_monitor_vdev_timer_init(soc);
5067 }
5068 
5069 /*
5070  * dp_reap_timer_deinit() - de-initialize the reap timer
5071  * @soc: data path SoC handle
5072  *
5073  * Return: void
5074  */
5075 static void dp_reap_timer_deinit(struct dp_soc *soc)
5076 {
5077 	dp_monitor_reap_timer_deinit(soc);
5078 }
5079 #else
5080 /* WIN use case */
5081 static void dp_reap_timer_init(struct dp_soc *soc)
5082 {
5083 	/* Configure LMAC rings in Polled mode */
5084 	if (soc->lmac_polled_mode) {
5085 		/*
5086 		 * Timer to reap lmac rings.
5087 		 */
5088 		qdf_timer_init(soc->osdev, &soc->lmac_reap_timer,
5089 			       dp_service_lmac_rings, (void *)soc,
5090 			       QDF_TIMER_TYPE_WAKE_APPS);
5091 		soc->lmac_timer_init = 1;
5092 		qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
5093 	}
5094 }
5095 
5096 static void dp_reap_timer_deinit(struct dp_soc *soc)
5097 {
5098 	if (soc->lmac_timer_init) {
5099 		qdf_timer_stop(&soc->lmac_reap_timer);
5100 		qdf_timer_free(&soc->lmac_reap_timer);
5101 		soc->lmac_timer_init = 0;
5102 	}
5103 }
5104 #endif
5105 
5106 #ifdef QCA_HOST2FW_RXBUF_RING
5107 /*
5108  * dp_rxdma_ring_alloc() - allocate the RXDMA rings
5109  * @soc: data path SoC handle
5110  * @pdev: Physical device handle
5111  *
5112  * Return: 0 - success, > 0 - failure
5113  */
5114 static int dp_rxdma_ring_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
5115 {
5116 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
5117 	int max_mac_rings;
5118 	int i;
5119 	int ring_size;
5120 
5121 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
5122 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
5123 	ring_size =  wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx);
5124 
5125 	for (i = 0; i < max_mac_rings; i++) {
5126 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
5127 		if (dp_srng_alloc(soc, &pdev->rx_mac_buf_ring[i],
5128 				  RXDMA_BUF, ring_size, 0)) {
5129 			dp_init_err("%pK: failed rx mac ring setup", soc);
5130 			return QDF_STATUS_E_FAILURE;
5131 		}
5132 	}
5133 	return QDF_STATUS_SUCCESS;
5134 }
5135 
5136 /*
5137  * dp_rxdma_ring_setup() - configure the RXDMA rings
5138  * @soc: data path SoC handle
5139  * @pdev: Physical device handle
5140  *
5141  * Return: 0 - success, > 0 - failure
5142  */
5143 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
5144 {
5145 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
5146 	int max_mac_rings;
5147 	int i;
5148 
5149 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
5150 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
5151 
5152 	for (i = 0; i < max_mac_rings; i++) {
5153 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
5154 		if (dp_srng_init(soc, &pdev->rx_mac_buf_ring[i],
5155 				 RXDMA_BUF, 1, i)) {
5156 			dp_init_err("%pK: failed rx mac ring setup", soc);
5157 			return QDF_STATUS_E_FAILURE;
5158 		}
5159 	}
5160 	return QDF_STATUS_SUCCESS;
5161 }
5162 
5163 /*
5164  * dp_rxdma_ring_cleanup() - Deinit the RXDMA rings and reap timer
5165  * @soc: data path SoC handle
5166  * @pdev: Physical device handle
5167  *
5168  * Return: void
5169  */
5170 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
5171 {
5172 	int i;
5173 
5174 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
5175 		dp_srng_deinit(soc, &pdev->rx_mac_buf_ring[i], RXDMA_BUF, 1);
5176 
5177 	dp_reap_timer_deinit(soc);
5178 }
5179 
5180 /*
5181  * dp_rxdma_ring_free() - Free the RXDMA rings
5182  * @pdev: Physical device handle
5183  *
5184  * Return: void
5185  */
5186 static void dp_rxdma_ring_free(struct dp_pdev *pdev)
5187 {
5188 	int i;
5189 
5190 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
5191 		dp_srng_free(pdev->soc, &pdev->rx_mac_buf_ring[i]);
5192 }
5193 
5194 #else
5195 static int dp_rxdma_ring_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
5196 {
5197 	return QDF_STATUS_SUCCESS;
5198 }
5199 
5200 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
5201 {
5202 	return QDF_STATUS_SUCCESS;
5203 }
5204 
5205 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
5206 {
5207 	dp_reap_timer_deinit(soc);
5208 }
5209 
5210 static void dp_rxdma_ring_free(struct dp_pdev *pdev)
5211 {
5212 }
5213 #endif
5214 
5215 /**
5216  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
5217  * @pdev - DP_PDEV handle
5218  *
5219  * Return: void
5220  */
5221 static inline void
5222 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
5223 {
5224 	uint8_t map_id;
5225 	struct dp_soc *soc = pdev->soc;
5226 
5227 	if (!soc)
5228 		return;
5229 
5230 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
5231 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
5232 			     default_dscp_tid_map,
5233 			     sizeof(default_dscp_tid_map));
5234 	}
5235 
5236 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
5237 		hal_tx_set_dscp_tid_map(soc->hal_soc,
5238 					default_dscp_tid_map,
5239 					map_id);
5240 	}
5241 }
5242 
5243 /**
5244  * dp_pcp_tid_map_setup(): Initialize the pcp-tid maps
5245  * @pdev - DP_PDEV handle
5246  *
5247  * Return: void
5248  */
5249 static inline void
5250 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
5251 {
5252 	struct dp_soc *soc = pdev->soc;
5253 
5254 	if (!soc)
5255 		return;
5256 
5257 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
5258 		     sizeof(default_pcp_tid_map));
5259 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
5260 }
5261 
5262 #ifdef IPA_OFFLOAD
5263 /**
5264  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
5265  * @soc: data path instance
5266  * @pdev: core txrx pdev context
5267  *
5268  * Return: QDF_STATUS_SUCCESS: success
5269  *         QDF_STATUS_E_RESOURCES: Error return
5270  */
5271 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5272 					   struct dp_pdev *pdev)
5273 {
5274 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5275 	int entries;
5276 
5277 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5278 		soc_cfg_ctx = soc->wlan_cfg_ctx;
5279 		entries =
5280 			wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
5281 
5282 		/* Setup second Rx refill buffer ring */
5283 		if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
5284 				  entries, 0)) {
5285 			dp_init_err("%pK: dp_srng_alloc failed second"
5286 				    "rx refill ring", soc);
5287 			return QDF_STATUS_E_FAILURE;
5288 		}
5289 	}
5290 
5291 	return QDF_STATUS_SUCCESS;
5292 }
5293 
5294 #ifdef IPA_WDI3_VLAN_SUPPORT
5295 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5296 					       struct dp_pdev *pdev)
5297 {
5298 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5299 	int entries;
5300 
5301 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5302 	    wlan_ipa_is_vlan_enabled()) {
5303 		soc_cfg_ctx = soc->wlan_cfg_ctx;
5304 		entries =
5305 			wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
5306 
5307 		/* Setup second Rx refill buffer ring */
5308 		if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF,
5309 				  entries, 0)) {
5310 			dp_init_err("%pK: alloc failed for 3rd rx refill ring",
5311 				    soc);
5312 			return QDF_STATUS_E_FAILURE;
5313 		}
5314 	}
5315 
5316 	return QDF_STATUS_SUCCESS;
5317 }
5318 
5319 static int dp_init_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5320 					      struct dp_pdev *pdev)
5321 {
5322 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5323 	    wlan_ipa_is_vlan_enabled()) {
5324 		if (dp_srng_init(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF,
5325 				 IPA_RX_ALT_REFILL_BUF_RING_IDX,
5326 				 pdev->pdev_id)) {
5327 			dp_init_err("%pK: init failed for 3rd rx refill ring",
5328 				    soc);
5329 			return QDF_STATUS_E_FAILURE;
5330 		}
5331 	}
5332 
5333 	return QDF_STATUS_SUCCESS;
5334 }
5335 
5336 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5337 						 struct dp_pdev *pdev)
5338 {
5339 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5340 	    wlan_ipa_is_vlan_enabled())
5341 		dp_srng_deinit(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF, 0);
5342 }
5343 
5344 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5345 					       struct dp_pdev *pdev)
5346 {
5347 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5348 	    wlan_ipa_is_vlan_enabled())
5349 		dp_srng_free(soc, &pdev->rx_refill_buf_ring3);
5350 }
5351 #else
5352 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5353 					       struct dp_pdev *pdev)
5354 {
5355 	return QDF_STATUS_SUCCESS;
5356 }
5357 
5358 static int dp_init_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5359 					      struct dp_pdev *pdev)
5360 {
5361 	return QDF_STATUS_SUCCESS;
5362 }
5363 
5364 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5365 						 struct dp_pdev *pdev)
5366 {
5367 }
5368 
5369 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5370 					       struct dp_pdev *pdev)
5371 {
5372 }
5373 #endif
5374 
5375 /**
5376  * dp_deinit_ipa_rx_refill_buf_ring - deinit second Rx refill buffer ring
5377  * @soc: data path instance
5378  * @pdev: core txrx pdev context
5379  *
5380  * Return: void
5381  */
5382 static void dp_deinit_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5383 					     struct dp_pdev *pdev)
5384 {
5385 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
5386 		dp_srng_deinit(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 0);
5387 }
5388 
5389 /**
5390  * dp_init_ipa_rx_refill_buf_ring - Init second Rx refill buffer ring
5391  * @soc: data path instance
5392  * @pdev: core txrx pdev context
5393  *
5394  * Return: QDF_STATUS_SUCCESS: success
5395  *         QDF_STATUS_E_RESOURCES: Error return
5396  */
5397 static int dp_init_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5398 					  struct dp_pdev *pdev)
5399 {
5400 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5401 		if (dp_srng_init(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
5402 				 IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id)) {
5403 			dp_init_err("%pK: dp_srng_init failed second"
5404 				    "rx refill ring", soc);
5405 			return QDF_STATUS_E_FAILURE;
5406 		}
5407 	}
5408 
5409 	if (dp_init_ipa_rx_alt_refill_buf_ring(soc, pdev)) {
5410 		dp_deinit_ipa_rx_refill_buf_ring(soc, pdev);
5411 		return QDF_STATUS_E_FAILURE;
5412 	}
5413 
5414 	return QDF_STATUS_SUCCESS;
5415 }
5416 
5417 /**
5418  * dp_free_ipa_rx_refill_buf_ring - free second Rx refill buffer ring
5419  * @soc: data path instance
5420  * @pdev: core txrx pdev context
5421  *
5422  * Return: void
5423  */
5424 static void dp_free_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5425 					   struct dp_pdev *pdev)
5426 {
5427 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
5428 		dp_srng_free(soc, &pdev->rx_refill_buf_ring2);
5429 }
5430 #else
5431 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5432 					   struct dp_pdev *pdev)
5433 {
5434 	return QDF_STATUS_SUCCESS;
5435 }
5436 
5437 static int dp_init_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5438 					  struct dp_pdev *pdev)
5439 {
5440 	return QDF_STATUS_SUCCESS;
5441 }
5442 
5443 static void dp_deinit_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5444 					     struct dp_pdev *pdev)
5445 {
5446 }
5447 
5448 static void dp_free_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5449 					   struct dp_pdev *pdev)
5450 {
5451 }
5452 
5453 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5454 					       struct dp_pdev *pdev)
5455 {
5456 	return QDF_STATUS_SUCCESS;
5457 }
5458 
5459 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5460 						 struct dp_pdev *pdev)
5461 {
5462 }
5463 
5464 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5465 					       struct dp_pdev *pdev)
5466 {
5467 }
5468 #endif
5469 
5470 #ifdef DP_TX_HW_DESC_HISTORY
5471 /**
5472  * dp_soc_tx_hw_desc_history_attach - Attach TX HW descriptor history
5473  *
5474  * @soc: DP soc handle
5475  *
5476  * Return: None
5477  */
5478 static void dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
5479 {
5480 	dp_soc_frag_history_attach(soc, &soc->tx_hw_desc_history,
5481 				   DP_TX_HW_DESC_HIST_MAX_SLOTS,
5482 				   DP_TX_HW_DESC_HIST_PER_SLOT_MAX,
5483 				   sizeof(struct dp_tx_hw_desc_evt),
5484 				   true, DP_TX_HW_DESC_HIST_TYPE);
5485 }
5486 
5487 static void dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
5488 {
5489 	dp_soc_frag_history_detach(soc, &soc->tx_hw_desc_history,
5490 				   DP_TX_HW_DESC_HIST_MAX_SLOTS,
5491 				   true, DP_TX_HW_DESC_HIST_TYPE);
5492 }
5493 
5494 #else /* DP_TX_HW_DESC_HISTORY */
5495 static inline void
5496 dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
5497 {
5498 }
5499 
5500 static inline void
5501 dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
5502 {
5503 }
5504 #endif /* DP_TX_HW_DESC_HISTORY */
5505 
5506 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
5507 #ifndef RX_DEFRAG_DO_NOT_REINJECT
5508 /**
5509  * dp_soc_rx_reinject_ring_history_attach - Attach the reo reinject ring
5510  *					    history.
5511  * @soc: DP soc handle
5512  *
5513  * Return: None
5514  */
5515 static void dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
5516 {
5517 	soc->rx_reinject_ring_history =
5518 		dp_context_alloc_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
5519 				     sizeof(struct dp_rx_reinject_history));
5520 	if (soc->rx_reinject_ring_history)
5521 		qdf_atomic_init(&soc->rx_reinject_ring_history->index);
5522 }
5523 #else /* RX_DEFRAG_DO_NOT_REINJECT */
5524 static inline void
5525 dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
5526 {
5527 }
5528 #endif /* RX_DEFRAG_DO_NOT_REINJECT */
5529 
5530 /**
5531  * dp_soc_rx_history_attach() - Attach the ring history record buffers
5532  * @soc: DP soc structure
5533  *
5534  * This function allocates the memory for recording the rx ring, rx error
5535  * ring and the reinject ring entries. There is no error returned in case
5536  * of allocation failure since the record function checks if the history is
5537  * initialized or not. We do not want to fail the driver load in case of
5538  * failure to allocate memory for debug history.
5539  *
5540  * Returns: None
5541  */
5542 static void dp_soc_rx_history_attach(struct dp_soc *soc)
5543 {
5544 	int i;
5545 	uint32_t rx_ring_hist_size;
5546 	uint32_t rx_refill_ring_hist_size;
5547 
5548 	rx_ring_hist_size = sizeof(*soc->rx_ring_history[0]);
5549 	rx_refill_ring_hist_size = sizeof(*soc->rx_refill_ring_history[0]);
5550 
5551 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5552 		soc->rx_ring_history[i] = dp_context_alloc_mem(
5553 				soc, DP_RX_RING_HIST_TYPE, rx_ring_hist_size);
5554 		if (soc->rx_ring_history[i])
5555 			qdf_atomic_init(&soc->rx_ring_history[i]->index);
5556 	}
5557 
5558 	soc->rx_err_ring_history = dp_context_alloc_mem(
5559 			soc, DP_RX_ERR_RING_HIST_TYPE, rx_ring_hist_size);
5560 	if (soc->rx_err_ring_history)
5561 		qdf_atomic_init(&soc->rx_err_ring_history->index);
5562 
5563 	dp_soc_rx_reinject_ring_history_attach(soc);
5564 
5565 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5566 		soc->rx_refill_ring_history[i] = dp_context_alloc_mem(
5567 						soc,
5568 						DP_RX_REFILL_RING_HIST_TYPE,
5569 						rx_refill_ring_hist_size);
5570 
5571 		if (soc->rx_refill_ring_history[i])
5572 			qdf_atomic_init(&soc->rx_refill_ring_history[i]->index);
5573 	}
5574 }
5575 
5576 static void dp_soc_rx_history_detach(struct dp_soc *soc)
5577 {
5578 	int i;
5579 
5580 	for (i = 0; i < MAX_REO_DEST_RINGS; i++)
5581 		dp_context_free_mem(soc, DP_RX_RING_HIST_TYPE,
5582 				    soc->rx_ring_history[i]);
5583 
5584 	dp_context_free_mem(soc, DP_RX_ERR_RING_HIST_TYPE,
5585 			    soc->rx_err_ring_history);
5586 
5587 	/*
5588 	 * No need for a featurized detach since qdf_mem_free takes
5589 	 * care of NULL pointer.
5590 	 */
5591 	dp_context_free_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
5592 			    soc->rx_reinject_ring_history);
5593 
5594 	for (i = 0; i < MAX_PDEV_CNT; i++)
5595 		dp_context_free_mem(soc, DP_RX_REFILL_RING_HIST_TYPE,
5596 				    soc->rx_refill_ring_history[i]);
5597 }
5598 
5599 #else
5600 static inline void dp_soc_rx_history_attach(struct dp_soc *soc)
5601 {
5602 }
5603 
5604 static inline void dp_soc_rx_history_detach(struct dp_soc *soc)
5605 {
5606 }
5607 #endif
5608 
5609 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY
5610 /**
5611  * dp_soc_mon_status_ring_history_attach() - Attach the monitor status
5612  *					     buffer record history.
5613  * @soc: DP soc handle
5614  *
5615  * This function allocates memory to track the event for a monitor
5616  * status buffer, before its parsed and freed.
5617  *
5618  * Return: None
5619  */
5620 static void dp_soc_mon_status_ring_history_attach(struct dp_soc *soc)
5621 {
5622 	soc->mon_status_ring_history = dp_context_alloc_mem(soc,
5623 				DP_MON_STATUS_BUF_HIST_TYPE,
5624 				sizeof(struct dp_mon_status_ring_history));
5625 	if (!soc->mon_status_ring_history) {
5626 		dp_err("Failed to alloc memory for mon status ring history");
5627 		return;
5628 	}
5629 }
5630 
5631 /**
5632  * dp_soc_mon_status_ring_history_detach() - Detach the monitor status buffer
5633  *					     record history.
5634  * @soc: DP soc handle
5635  *
5636  * Return: None
5637  */
5638 static void dp_soc_mon_status_ring_history_detach(struct dp_soc *soc)
5639 {
5640 	dp_context_free_mem(soc, DP_MON_STATUS_BUF_HIST_TYPE,
5641 			    soc->mon_status_ring_history);
5642 }
5643 #else
5644 static void dp_soc_mon_status_ring_history_attach(struct dp_soc *soc)
5645 {
5646 }
5647 
5648 static void dp_soc_mon_status_ring_history_detach(struct dp_soc *soc)
5649 {
5650 }
5651 #endif
5652 
5653 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
5654 /**
5655  * dp_soc_tx_history_attach() - Attach the ring history record buffers
5656  * @soc: DP soc structure
5657  *
5658  * This function allocates the memory for recording the tx tcl ring and
5659  * the tx comp ring entries. There is no error returned in case
5660  * of allocation failure since the record function checks if the history is
5661  * initialized or not. We do not want to fail the driver load in case of
5662  * failure to allocate memory for debug history.
5663  *
5664  * Returns: None
5665  */
5666 static void dp_soc_tx_history_attach(struct dp_soc *soc)
5667 {
5668 	dp_soc_frag_history_attach(soc, &soc->tx_tcl_history,
5669 				   DP_TX_TCL_HIST_MAX_SLOTS,
5670 				   DP_TX_TCL_HIST_PER_SLOT_MAX,
5671 				   sizeof(struct dp_tx_desc_event),
5672 				   true, DP_TX_TCL_HIST_TYPE);
5673 	dp_soc_frag_history_attach(soc, &soc->tx_comp_history,
5674 				   DP_TX_COMP_HIST_MAX_SLOTS,
5675 				   DP_TX_COMP_HIST_PER_SLOT_MAX,
5676 				   sizeof(struct dp_tx_desc_event),
5677 				   true, DP_TX_COMP_HIST_TYPE);
5678 }
5679 
5680 /**
5681  * dp_soc_tx_history_detach() - Detach the ring history record buffers
5682  * @soc: DP soc structure
5683  *
5684  * This function frees the memory for recording the tx tcl ring and
5685  * the tx comp ring entries.
5686  *
5687  * Returns: None
5688  */
5689 static void dp_soc_tx_history_detach(struct dp_soc *soc)
5690 {
5691 	dp_soc_frag_history_detach(soc, &soc->tx_tcl_history,
5692 				   DP_TX_TCL_HIST_MAX_SLOTS,
5693 				   true, DP_TX_TCL_HIST_TYPE);
5694 	dp_soc_frag_history_detach(soc, &soc->tx_comp_history,
5695 				   DP_TX_COMP_HIST_MAX_SLOTS,
5696 				   true, DP_TX_COMP_HIST_TYPE);
5697 }
5698 
5699 #else
5700 static inline void dp_soc_tx_history_attach(struct dp_soc *soc)
5701 {
5702 }
5703 
5704 static inline void dp_soc_tx_history_detach(struct dp_soc *soc)
5705 {
5706 }
5707 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
5708 
5709 /*
5710 * dp_pdev_attach_wifi3() - attach txrx pdev
5711 * @txrx_soc: Datapath SOC handle
5712 * @params: Params for PDEV attach
5713 *
5714 * Return: QDF_STATUS
5715 */
5716 static inline
5717 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
5718 				struct cdp_pdev_attach_params *params)
5719 {
5720 	qdf_size_t pdev_context_size;
5721 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5722 	struct dp_pdev *pdev = NULL;
5723 	uint8_t pdev_id = params->pdev_id;
5724 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5725 	int nss_cfg;
5726 
5727 	pdev_context_size =
5728 		soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_PDEV);
5729 	if (pdev_context_size)
5730 		pdev = dp_context_alloc_mem(soc, DP_PDEV_TYPE, pdev_context_size);
5731 
5732 	if (!pdev) {
5733 		dp_init_err("%pK: DP PDEV memory allocation failed",
5734 			    soc);
5735 		goto fail0;
5736 	}
5737 	wlan_minidump_log(pdev, sizeof(*pdev), soc->ctrl_psoc,
5738 			  WLAN_MD_DP_PDEV, "dp_pdev");
5739 
5740 	soc_cfg_ctx = soc->wlan_cfg_ctx;
5741 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
5742 
5743 	if (!pdev->wlan_cfg_ctx) {
5744 		dp_init_err("%pK: pdev cfg_attach failed", soc);
5745 		goto fail1;
5746 	}
5747 
5748 	/*
5749 	 * set nss pdev config based on soc config
5750 	 */
5751 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
5752 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
5753 					 (nss_cfg & (1 << pdev_id)));
5754 
5755 	pdev->soc = soc;
5756 	pdev->pdev_id = pdev_id;
5757 	soc->pdev_list[pdev_id] = pdev;
5758 
5759 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
5760 	soc->pdev_count++;
5761 
5762 	/* Allocate memory for pdev srng rings */
5763 	if (dp_pdev_srng_alloc(pdev)) {
5764 		dp_init_err("%pK: dp_pdev_srng_alloc failed", soc);
5765 		goto fail2;
5766 	}
5767 
5768 	/* Setup second Rx refill buffer ring */
5769 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev)) {
5770 		dp_init_err("%pK: dp_srng_alloc failed rxrefill2 ring",
5771 			    soc);
5772 		goto fail3;
5773 	}
5774 
5775 	/* Allocate memory for pdev rxdma rings */
5776 	if (dp_rxdma_ring_alloc(soc, pdev)) {
5777 		dp_init_err("%pK: dp_rxdma_ring_alloc failed", soc);
5778 		goto fail4;
5779 	}
5780 
5781 	/* Rx specific init */
5782 	if (dp_rx_pdev_desc_pool_alloc(pdev)) {
5783 		dp_init_err("%pK: dp_rx_pdev_attach failed", soc);
5784 		goto fail4;
5785 	}
5786 
5787 	if (dp_monitor_pdev_attach(pdev)) {
5788 		dp_init_err("%pK: dp_monitor_pdev_attach failed", soc);
5789 		goto fail5;
5790 	}
5791 
5792 	soc->arch_ops.txrx_pdev_attach(pdev, params);
5793 
5794 	/* Setup third Rx refill buffer ring */
5795 	if (dp_setup_ipa_rx_alt_refill_buf_ring(soc, pdev)) {
5796 		dp_init_err("%pK: dp_srng_alloc failed rxrefill3 ring",
5797 			    soc);
5798 		goto fail6;
5799 	}
5800 
5801 	return QDF_STATUS_SUCCESS;
5802 
5803 fail6:
5804 	dp_monitor_pdev_detach(pdev);
5805 fail5:
5806 	dp_rx_pdev_desc_pool_free(pdev);
5807 fail4:
5808 	dp_rxdma_ring_free(pdev);
5809 	dp_free_ipa_rx_refill_buf_ring(soc, pdev);
5810 fail3:
5811 	dp_pdev_srng_free(pdev);
5812 fail2:
5813 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
5814 fail1:
5815 	soc->pdev_list[pdev_id] = NULL;
5816 	qdf_mem_free(pdev);
5817 fail0:
5818 	return QDF_STATUS_E_FAILURE;
5819 }
5820 
5821 /**
5822  * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev
5823  * @pdev: Datapath PDEV handle
5824  *
5825  * This is the last chance to flush all pending dp vdevs/peers,
5826  * some peer/vdev leak case like Non-SSR + peer unmap missing
5827  * will be covered here.
5828  *
5829  * Return: None
5830  */
5831 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
5832 {
5833 	struct dp_soc *soc = pdev->soc;
5834 	struct dp_vdev *vdev_arr[MAX_VDEV_CNT] = {0};
5835 	uint32_t i = 0;
5836 	uint32_t num_vdevs = 0;
5837 	struct dp_vdev *vdev = NULL;
5838 
5839 	if (TAILQ_EMPTY(&soc->inactive_vdev_list))
5840 		return;
5841 
5842 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
5843 	TAILQ_FOREACH(vdev, &soc->inactive_vdev_list,
5844 		      inactive_list_elem) {
5845 		if (vdev->pdev != pdev)
5846 			continue;
5847 
5848 		vdev_arr[num_vdevs] = vdev;
5849 		num_vdevs++;
5850 		/* take reference to free */
5851 		dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CDP);
5852 	}
5853 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
5854 
5855 	for (i = 0; i < num_vdevs; i++) {
5856 		dp_vdev_flush_peers((struct cdp_vdev *)vdev_arr[i], 0, 0);
5857 		dp_vdev_unref_delete(soc, vdev_arr[i], DP_MOD_ID_CDP);
5858 	}
5859 }
5860 
5861 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
5862 /**
5863  * dp_vdev_stats_hw_offload_target_config() - Send HTT command to FW
5864  *                                          for enable/disable of HW vdev stats
5865  * @soc: Datapath soc handle
5866  * @pdev_id: INVALID_PDEV_ID for all pdevs or 0,1,2 for individual pdev
5867  * @enable: flag to represent enable/disable of hw vdev stats
5868  *
5869  * Return: none
5870  */
5871 static void dp_vdev_stats_hw_offload_target_config(struct dp_soc *soc,
5872 						   uint8_t pdev_id,
5873 						   bool enable)
5874 {
5875 	/* Check SOC level config for HW offload vdev stats support */
5876 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
5877 		dp_debug("%pK: HW vdev offload stats is disabled", soc);
5878 		return;
5879 	}
5880 
5881 	/* Send HTT command to FW for enable of stats */
5882 	dp_h2t_hw_vdev_stats_config_send(soc, pdev_id, enable, false, 0);
5883 }
5884 
5885 /**
5886  * dp_vdev_stats_hw_offload_target_clear() - Clear HW vdev stats on target
5887  * @soc: Datapath soc handle
5888  * @pdev_id: pdev_id (0,1,2)
5889  * @bitmask: bitmask with vdev_id(s) for which stats are to be cleared on HW
5890  *
5891  * Return: none
5892  */
5893 static
5894 void dp_vdev_stats_hw_offload_target_clear(struct dp_soc *soc, uint8_t pdev_id,
5895 					   uint64_t vdev_id_bitmask)
5896 {
5897 	/* Check SOC level config for HW offload vdev stats support */
5898 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
5899 		dp_debug("%pK: HW vdev offload stats is disabled", soc);
5900 		return;
5901 	}
5902 
5903 	/* Send HTT command to FW for reset of stats */
5904 	dp_h2t_hw_vdev_stats_config_send(soc, pdev_id, true, true,
5905 					 vdev_id_bitmask);
5906 }
5907 #else
5908 static void
5909 dp_vdev_stats_hw_offload_target_config(struct dp_soc *soc, uint8_t pdev_id,
5910 				       bool enable)
5911 {
5912 }
5913 
5914 static
5915 void dp_vdev_stats_hw_offload_target_clear(struct dp_soc *soc, uint8_t pdev_id,
5916 					   uint64_t vdev_id_bitmask)
5917 {
5918 }
5919 #endif /*QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT */
5920 
5921 /**
5922  * dp_pdev_deinit() - Deinit txrx pdev
5923  * @txrx_pdev: Datapath PDEV handle
5924  * @force: Force deinit
5925  *
5926  * Return: None
5927  */
5928 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
5929 {
5930 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
5931 	qdf_nbuf_t curr_nbuf, next_nbuf;
5932 
5933 	if (pdev->pdev_deinit)
5934 		return;
5935 
5936 	dp_tx_me_exit(pdev);
5937 	dp_rx_fst_detach(pdev->soc, pdev);
5938 	dp_rx_pdev_buffers_free(pdev);
5939 	dp_rx_pdev_desc_pool_deinit(pdev);
5940 	dp_pdev_bkp_stats_detach(pdev);
5941 	qdf_event_destroy(&pdev->fw_peer_stats_event);
5942 	qdf_event_destroy(&pdev->fw_stats_event);
5943 	qdf_event_destroy(&pdev->fw_obss_stats_event);
5944 	if (pdev->sojourn_buf)
5945 		qdf_nbuf_free(pdev->sojourn_buf);
5946 
5947 	dp_pdev_flush_pending_vdevs(pdev);
5948 	dp_tx_desc_flush(pdev, NULL, true);
5949 
5950 	qdf_spinlock_destroy(&pdev->tx_mutex);
5951 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
5952 
5953 	dp_monitor_pdev_deinit(pdev);
5954 
5955 	dp_pdev_srng_deinit(pdev);
5956 
5957 	dp_ipa_uc_detach(pdev->soc, pdev);
5958 	dp_deinit_ipa_rx_alt_refill_buf_ring(pdev->soc, pdev);
5959 	dp_deinit_ipa_rx_refill_buf_ring(pdev->soc, pdev);
5960 	dp_rxdma_ring_cleanup(pdev->soc, pdev);
5961 
5962 	curr_nbuf = pdev->invalid_peer_head_msdu;
5963 	while (curr_nbuf) {
5964 		next_nbuf = qdf_nbuf_next(curr_nbuf);
5965 		dp_rx_nbuf_free(curr_nbuf);
5966 		curr_nbuf = next_nbuf;
5967 	}
5968 	pdev->invalid_peer_head_msdu = NULL;
5969 	pdev->invalid_peer_tail_msdu = NULL;
5970 
5971 	dp_wdi_event_detach(pdev);
5972 	pdev->pdev_deinit = 1;
5973 }
5974 
5975 /**
5976  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
5977  * @psoc: Datapath psoc handle
5978  * @pdev_id: Id of datapath PDEV handle
5979  * @force: Force deinit
5980  *
5981  * Return: QDF_STATUS
5982  */
5983 static QDF_STATUS
5984 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
5985 		     int force)
5986 {
5987 	struct dp_pdev *txrx_pdev;
5988 
5989 	txrx_pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
5990 						       pdev_id);
5991 
5992 	if (!txrx_pdev)
5993 		return QDF_STATUS_E_FAILURE;
5994 
5995 	dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force);
5996 
5997 	return QDF_STATUS_SUCCESS;
5998 }
5999 
6000 /*
6001  * dp_pdev_post_attach() - Do post pdev attach after dev_alloc_name
6002  * @txrx_pdev: Datapath PDEV handle
6003  *
6004  * Return: None
6005  */
6006 static void dp_pdev_post_attach(struct cdp_pdev *txrx_pdev)
6007 {
6008 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
6009 
6010 	dp_monitor_tx_capture_debugfs_init(pdev);
6011 
6012 	if (dp_pdev_htt_stats_dbgfs_init(pdev)) {
6013 		dp_init_err("%pK: Failed to initialize pdev HTT stats debugfs", pdev->soc);
6014 	}
6015 }
6016 
6017 /*
6018  * dp_pdev_post_attach_wifi3() - attach txrx pdev post
6019  * @psoc: Datapath soc handle
6020  * @pdev_id: pdev id of pdev
6021  *
6022  * Return: QDF_STATUS
6023  */
6024 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *soc,
6025 				     uint8_t pdev_id)
6026 {
6027 	struct dp_pdev *pdev;
6028 
6029 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
6030 						  pdev_id);
6031 
6032 	if (!pdev) {
6033 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
6034 			    (struct dp_soc *)soc, pdev_id);
6035 		return QDF_STATUS_E_FAILURE;
6036 	}
6037 
6038 	dp_pdev_post_attach((struct cdp_pdev *)pdev);
6039 	return QDF_STATUS_SUCCESS;
6040 }
6041 
6042 /*
6043  * dp_pdev_detach() - Complete rest of pdev detach
6044  * @txrx_pdev: Datapath PDEV handle
6045  * @force: Force deinit
6046  *
6047  * Return: None
6048  */
6049 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
6050 {
6051 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
6052 	struct dp_soc *soc = pdev->soc;
6053 
6054 	dp_pdev_htt_stats_dbgfs_deinit(pdev);
6055 	dp_rx_pdev_desc_pool_free(pdev);
6056 	dp_monitor_pdev_detach(pdev);
6057 	dp_rxdma_ring_free(pdev);
6058 	dp_free_ipa_rx_refill_buf_ring(soc, pdev);
6059 	dp_free_ipa_rx_alt_refill_buf_ring(soc, pdev);
6060 	dp_pdev_srng_free(pdev);
6061 
6062 	soc->pdev_count--;
6063 	soc->pdev_list[pdev->pdev_id] = NULL;
6064 
6065 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
6066 	wlan_minidump_remove(pdev, sizeof(*pdev), soc->ctrl_psoc,
6067 			     WLAN_MD_DP_PDEV, "dp_pdev");
6068 	dp_context_free_mem(soc, DP_PDEV_TYPE, pdev);
6069 }
6070 
6071 /*
6072  * dp_pdev_detach_wifi3() - detach txrx pdev
6073  * @psoc: Datapath soc handle
6074  * @pdev_id: pdev id of pdev
6075  * @force: Force detach
6076  *
6077  * Return: QDF_STATUS
6078  */
6079 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
6080 				       int force)
6081 {
6082 	struct dp_pdev *pdev;
6083 	struct dp_soc *soc = (struct dp_soc *)psoc;
6084 
6085 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
6086 						  pdev_id);
6087 
6088 	if (!pdev) {
6089 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
6090 			    (struct dp_soc *)psoc, pdev_id);
6091 		return QDF_STATUS_E_FAILURE;
6092 	}
6093 
6094 	soc->arch_ops.txrx_pdev_detach(pdev);
6095 
6096 	dp_pdev_detach((struct cdp_pdev *)pdev, force);
6097 	return QDF_STATUS_SUCCESS;
6098 }
6099 
6100 /*
6101  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
6102  * @soc: DP SOC handle
6103  */
6104 #ifndef DP_UMAC_HW_RESET_SUPPORT
6105 static inline
6106 #endif
6107 void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
6108 {
6109 	struct reo_desc_list_node *desc;
6110 	struct dp_rx_tid *rx_tid;
6111 
6112 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
6113 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
6114 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
6115 		rx_tid = &desc->rx_tid;
6116 		qdf_mem_unmap_nbytes_single(soc->osdev,
6117 			rx_tid->hw_qdesc_paddr,
6118 			QDF_DMA_BIDIRECTIONAL,
6119 			rx_tid->hw_qdesc_alloc_size);
6120 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
6121 		qdf_mem_free(desc);
6122 	}
6123 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
6124 	qdf_list_destroy(&soc->reo_desc_freelist);
6125 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
6126 }
6127 
6128 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
6129 /*
6130  * dp_reo_desc_deferred_freelist_create() - Initialize the resources used
6131  *                                          for deferred reo desc list
6132  * @psoc: Datapath soc handle
6133  *
6134  * Return: void
6135  */
6136 static void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
6137 {
6138 	qdf_spinlock_create(&soc->reo_desc_deferred_freelist_lock);
6139 	qdf_list_create(&soc->reo_desc_deferred_freelist,
6140 			REO_DESC_DEFERRED_FREELIST_SIZE);
6141 	soc->reo_desc_deferred_freelist_init = true;
6142 }
6143 
6144 /*
6145  * dp_reo_desc_deferred_freelist_destroy() - loop the deferred free list &
6146  *                                           free the leftover REO QDESCs
6147  * @psoc: Datapath soc handle
6148  *
6149  * Return: void
6150  */
6151 static void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
6152 {
6153 	struct reo_desc_deferred_freelist_node *desc;
6154 
6155 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
6156 	soc->reo_desc_deferred_freelist_init = false;
6157 	while (qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
6158 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
6159 		qdf_mem_unmap_nbytes_single(soc->osdev,
6160 					    desc->hw_qdesc_paddr,
6161 					    QDF_DMA_BIDIRECTIONAL,
6162 					    desc->hw_qdesc_alloc_size);
6163 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
6164 		qdf_mem_free(desc);
6165 	}
6166 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
6167 
6168 	qdf_list_destroy(&soc->reo_desc_deferred_freelist);
6169 	qdf_spinlock_destroy(&soc->reo_desc_deferred_freelist_lock);
6170 }
6171 #else
6172 static inline void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
6173 {
6174 }
6175 
6176 static inline void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
6177 {
6178 }
6179 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
6180 
6181 /*
6182  * dp_soc_reset_txrx_ring_map() - reset tx ring map
6183  * @soc: DP SOC handle
6184  *
6185  */
6186 static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc)
6187 {
6188 	uint32_t i;
6189 
6190 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++)
6191 		soc->tx_ring_map[i] = 0;
6192 }
6193 
6194 /*
6195  * dp_soc_print_inactive_objects() - prints inactive peer and vdev list
6196  * @soc: DP SOC handle
6197  *
6198  */
6199 static void dp_soc_print_inactive_objects(struct dp_soc *soc)
6200 {
6201 	struct dp_peer *peer = NULL;
6202 	struct dp_peer *tmp_peer = NULL;
6203 	struct dp_vdev *vdev = NULL;
6204 	struct dp_vdev *tmp_vdev = NULL;
6205 	int i = 0;
6206 	uint32_t count;
6207 
6208 	if (TAILQ_EMPTY(&soc->inactive_peer_list) &&
6209 	    TAILQ_EMPTY(&soc->inactive_vdev_list))
6210 		return;
6211 
6212 	TAILQ_FOREACH_SAFE(peer, &soc->inactive_peer_list,
6213 			   inactive_list_elem, tmp_peer) {
6214 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
6215 			count = qdf_atomic_read(&peer->mod_refs[i]);
6216 			if (count)
6217 				DP_PRINT_STATS("peer %pK Module id %u ==> %u",
6218 					       peer, i, count);
6219 		}
6220 	}
6221 
6222 	TAILQ_FOREACH_SAFE(vdev, &soc->inactive_vdev_list,
6223 			   inactive_list_elem, tmp_vdev) {
6224 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
6225 			count = qdf_atomic_read(&vdev->mod_refs[i]);
6226 			if (count)
6227 				DP_PRINT_STATS("vdev %pK Module id %u ==> %u",
6228 					       vdev, i, count);
6229 		}
6230 	}
6231 	QDF_BUG(0);
6232 }
6233 
6234 /**
6235  * dp_soc_deinit() - Deinitialize txrx SOC
6236  * @txrx_soc: Opaque DP SOC handle
6237  *
6238  * Return: None
6239  */
6240 static void dp_soc_deinit(void *txrx_soc)
6241 {
6242 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6243 	struct htt_soc *htt_soc = soc->htt_handle;
6244 
6245 	qdf_atomic_set(&soc->cmn_init_done, 0);
6246 
6247 	if (soc->arch_ops.txrx_soc_ppeds_stop)
6248 		soc->arch_ops.txrx_soc_ppeds_stop(soc);
6249 
6250 	soc->arch_ops.txrx_soc_deinit(soc);
6251 
6252 	dp_monitor_soc_deinit(soc);
6253 
6254 	/* free peer tables & AST tables allocated during peer_map_attach */
6255 	if (soc->peer_map_attach_success) {
6256 		dp_peer_find_detach(soc);
6257 		soc->arch_ops.txrx_peer_map_detach(soc);
6258 		soc->peer_map_attach_success = FALSE;
6259 	}
6260 
6261 	qdf_flush_work(&soc->htt_stats.work);
6262 	qdf_disable_work(&soc->htt_stats.work);
6263 
6264 	qdf_spinlock_destroy(&soc->htt_stats.lock);
6265 
6266 	dp_soc_reset_txrx_ring_map(soc);
6267 
6268 	dp_reo_desc_freelist_destroy(soc);
6269 	dp_reo_desc_deferred_freelist_destroy(soc);
6270 
6271 	DEINIT_RX_HW_STATS_LOCK(soc);
6272 
6273 	qdf_spinlock_destroy(&soc->ast_lock);
6274 
6275 	dp_peer_mec_spinlock_destroy(soc);
6276 
6277 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
6278 
6279 	qdf_nbuf_queue_free(&soc->invalid_buf_queue);
6280 
6281 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
6282 
6283 	qdf_spinlock_destroy(&soc->vdev_map_lock);
6284 
6285 	dp_reo_cmdlist_destroy(soc);
6286 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
6287 
6288 	dp_soc_tx_desc_sw_pools_deinit(soc);
6289 
6290 	dp_soc_srng_deinit(soc);
6291 
6292 	dp_hw_link_desc_ring_deinit(soc);
6293 
6294 	dp_soc_print_inactive_objects(soc);
6295 	qdf_spinlock_destroy(&soc->inactive_peer_list_lock);
6296 	qdf_spinlock_destroy(&soc->inactive_vdev_list_lock);
6297 
6298 	htt_soc_htc_dealloc(soc->htt_handle);
6299 
6300 	htt_soc_detach(htt_soc);
6301 
6302 	/* Free wbm sg list and reset flags in down path */
6303 	dp_rx_wbm_sg_list_deinit(soc);
6304 
6305 	wlan_minidump_remove(soc, sizeof(*soc), soc->ctrl_psoc,
6306 			     WLAN_MD_DP_SOC, "dp_soc");
6307 }
6308 
6309 /**
6310  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
6311  * @txrx_soc: Opaque DP SOC handle
6312  *
6313  * Return: None
6314  */
6315 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc)
6316 {
6317 	dp_soc_deinit(txrx_soc);
6318 }
6319 
6320 /*
6321  * dp_soc_detach() - Detach rest of txrx SOC
6322  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
6323  *
6324  * Return: None
6325  */
6326 static void dp_soc_detach(struct cdp_soc_t *txrx_soc)
6327 {
6328 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6329 
6330 	soc->arch_ops.txrx_soc_detach(soc);
6331 
6332 	dp_runtime_deinit();
6333 
6334 	dp_sysfs_deinitialize_stats(soc);
6335 	dp_soc_swlm_detach(soc);
6336 	dp_soc_tx_desc_sw_pools_free(soc);
6337 	dp_soc_srng_free(soc);
6338 	dp_hw_link_desc_ring_free(soc);
6339 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
6340 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
6341 	dp_soc_tx_hw_desc_history_detach(soc);
6342 	dp_soc_tx_history_detach(soc);
6343 	dp_soc_mon_status_ring_history_detach(soc);
6344 	dp_soc_rx_history_detach(soc);
6345 
6346 	if (!dp_monitor_modularized_enable()) {
6347 		dp_mon_soc_detach_wrapper(soc);
6348 	}
6349 
6350 	qdf_mem_free(soc->cdp_soc.ops);
6351 	qdf_mem_free(soc);
6352 }
6353 
6354 /*
6355  * dp_soc_detach_wifi3() - Detach txrx SOC
6356  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
6357  *
6358  * Return: None
6359  */
6360 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc)
6361 {
6362 	dp_soc_detach(txrx_soc);
6363 }
6364 
6365 /*
6366  * dp_rxdma_ring_config() - configure the RX DMA rings
6367  *
6368  * This function is used to configure the MAC rings.
6369  * On MCL host provides buffers in Host2FW ring
6370  * FW refills (copies) buffers to the ring and updates
6371  * ring_idx in register
6372  *
6373  * @soc: data path SoC handle
6374  *
6375  * Return: zero on success, non-zero on failure
6376  */
6377 #ifdef QCA_HOST2FW_RXBUF_RING
6378 static inline void
6379 dp_htt_setup_rxdma_err_dst_ring(struct dp_soc *soc, int mac_id,
6380 				int lmac_id)
6381 {
6382 	if (soc->rxdma_err_dst_ring[lmac_id].hal_srng)
6383 		htt_srng_setup(soc->htt_handle, mac_id,
6384 			       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
6385 			       RXDMA_DST);
6386 }
6387 
6388 #ifdef IPA_WDI3_VLAN_SUPPORT
6389 static inline
6390 void dp_rxdma_setup_refill_ring3(struct dp_soc *soc,
6391 				 struct dp_pdev *pdev,
6392 				 uint8_t idx)
6393 {
6394 	if (pdev->rx_refill_buf_ring3.hal_srng)
6395 		htt_srng_setup(soc->htt_handle, idx,
6396 			       pdev->rx_refill_buf_ring3.hal_srng,
6397 			       RXDMA_BUF);
6398 }
6399 #else
6400 static inline
6401 void dp_rxdma_setup_refill_ring3(struct dp_soc *soc,
6402 				 struct dp_pdev *pdev,
6403 				 uint8_t idx)
6404 { }
6405 #endif
6406 
6407 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
6408 {
6409 	int i;
6410 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6411 
6412 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6413 		struct dp_pdev *pdev = soc->pdev_list[i];
6414 
6415 		if (pdev) {
6416 			int mac_id;
6417 			int max_mac_rings =
6418 				 wlan_cfg_get_num_mac_rings
6419 				(pdev->wlan_cfg_ctx);
6420 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
6421 
6422 			htt_srng_setup(soc->htt_handle, i,
6423 				       soc->rx_refill_buf_ring[lmac_id]
6424 				       .hal_srng,
6425 				       RXDMA_BUF);
6426 
6427 			if (pdev->rx_refill_buf_ring2.hal_srng)
6428 				htt_srng_setup(soc->htt_handle, i,
6429 					       pdev->rx_refill_buf_ring2
6430 					       .hal_srng,
6431 					       RXDMA_BUF);
6432 
6433 			dp_rxdma_setup_refill_ring3(soc, pdev, i);
6434 
6435 			dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
6436 			dp_err("pdev_id %d max_mac_rings %d",
6437 			       pdev->pdev_id, max_mac_rings);
6438 
6439 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
6440 				int mac_for_pdev =
6441 					dp_get_mac_id_for_pdev(mac_id,
6442 							       pdev->pdev_id);
6443 				/*
6444 				 * Obtain lmac id from pdev to access the LMAC
6445 				 * ring in soc context
6446 				 */
6447 				lmac_id =
6448 				dp_get_lmac_id_for_pdev_id(soc,
6449 							   mac_id,
6450 							   pdev->pdev_id);
6451 				QDF_TRACE(QDF_MODULE_ID_TXRX,
6452 					 QDF_TRACE_LEVEL_ERROR,
6453 					 FL("mac_id %d"), mac_for_pdev);
6454 
6455 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
6456 					 pdev->rx_mac_buf_ring[mac_id]
6457 						.hal_srng,
6458 					 RXDMA_BUF);
6459 
6460 				if (!soc->rxdma2sw_rings_not_supported)
6461 					dp_htt_setup_rxdma_err_dst_ring(soc,
6462 						mac_for_pdev, lmac_id);
6463 
6464 				/* Configure monitor mode rings */
6465 				status = dp_monitor_htt_srng_setup(soc, pdev,
6466 								   lmac_id,
6467 								   mac_for_pdev);
6468 				if (status != QDF_STATUS_SUCCESS) {
6469 					dp_err("Failed to send htt monitor messages to target");
6470 					return status;
6471 				}
6472 
6473 			}
6474 		}
6475 	}
6476 
6477 	dp_reap_timer_init(soc);
6478 	return status;
6479 }
6480 #else
6481 /* This is only for WIN */
6482 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
6483 {
6484 	int i;
6485 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6486 	int mac_for_pdev;
6487 	int lmac_id;
6488 
6489 	/* Configure monitor mode rings */
6490 	dp_monitor_soc_htt_srng_setup(soc);
6491 
6492 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6493 		struct dp_pdev *pdev =  soc->pdev_list[i];
6494 
6495 		if (!pdev)
6496 			continue;
6497 
6498 		mac_for_pdev = i;
6499 		lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
6500 
6501 		if (soc->rx_refill_buf_ring[lmac_id].hal_srng)
6502 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
6503 				       soc->rx_refill_buf_ring[lmac_id].
6504 				       hal_srng, RXDMA_BUF);
6505 
6506 		/* Configure monitor mode rings */
6507 		dp_monitor_htt_srng_setup(soc, pdev,
6508 					  lmac_id,
6509 					  mac_for_pdev);
6510 		if (!soc->rxdma2sw_rings_not_supported)
6511 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
6512 				       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
6513 				       RXDMA_DST);
6514 	}
6515 
6516 	dp_reap_timer_init(soc);
6517 	return status;
6518 }
6519 #endif
6520 
6521 /*
6522  * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine
6523  *
6524  * This function is used to configure the FSE HW block in RX OLE on a
6525  * per pdev basis. Here, we will be programming parameters related to
6526  * the Flow Search Table.
6527  *
6528  * @soc: data path SoC handle
6529  *
6530  * Return: zero on success, non-zero on failure
6531  */
6532 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
6533 static QDF_STATUS
6534 dp_rx_target_fst_config(struct dp_soc *soc)
6535 {
6536 	int i;
6537 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6538 
6539 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6540 		struct dp_pdev *pdev = soc->pdev_list[i];
6541 
6542 		/* Flow search is not enabled if NSS offload is enabled */
6543 		if (pdev &&
6544 		    !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
6545 			status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev);
6546 			if (status != QDF_STATUS_SUCCESS)
6547 				break;
6548 		}
6549 	}
6550 	return status;
6551 }
6552 #elif defined(WLAN_SUPPORT_RX_FISA)
6553 /**
6554  * dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW
6555  * @soc: SoC handle
6556  *
6557  * Return: Success
6558  */
6559 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
6560 {
6561 	QDF_STATUS status;
6562 	struct dp_rx_fst *fst = soc->rx_fst;
6563 
6564 	/* Check if it is enabled in the INI */
6565 	if (!soc->fisa_enable) {
6566 		dp_err("RX FISA feature is disabled");
6567 		return QDF_STATUS_E_NOSUPPORT;
6568 	}
6569 
6570 	status = dp_rx_flow_send_fst_fw_setup(soc, soc->pdev_list[0]);
6571 	if (QDF_IS_STATUS_ERROR(status)) {
6572 		dp_err("dp_rx_flow_send_fst_fw_setup failed %d",
6573 		       status);
6574 		return status;
6575 	}
6576 
6577 	if (soc->fst_cmem_base) {
6578 		soc->fst_in_cmem = true;
6579 		dp_rx_fst_update_cmem_params(soc, fst->max_entries,
6580 					     soc->fst_cmem_base & 0xffffffff,
6581 					     soc->fst_cmem_base >> 32);
6582 	}
6583 	return status;
6584 }
6585 
6586 #define FISA_MAX_TIMEOUT 0xffffffff
6587 #define FISA_DISABLE_TIMEOUT 0
6588 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
6589 {
6590 	struct dp_htt_rx_fisa_cfg fisa_config;
6591 
6592 	fisa_config.pdev_id = 0;
6593 	fisa_config.fisa_timeout = FISA_MAX_TIMEOUT;
6594 
6595 	return dp_htt_rx_fisa_config(soc->pdev_list[0], &fisa_config);
6596 }
6597 
6598 #else /* !WLAN_SUPPORT_RX_FISA */
6599 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
6600 {
6601 	return QDF_STATUS_SUCCESS;
6602 }
6603 #endif /* !WLAN_SUPPORT_RX_FISA */
6604 
6605 #ifndef WLAN_SUPPORT_RX_FISA
6606 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
6607 {
6608 	return QDF_STATUS_SUCCESS;
6609 }
6610 
6611 static QDF_STATUS dp_rx_dump_fisa_stats(struct dp_soc *soc)
6612 {
6613 	return QDF_STATUS_SUCCESS;
6614 }
6615 
6616 static void dp_rx_dump_fisa_table(struct dp_soc *soc)
6617 {
6618 }
6619 
6620 static void dp_suspend_fse_cache_flush(struct dp_soc *soc)
6621 {
6622 }
6623 
6624 static void dp_resume_fse_cache_flush(struct dp_soc *soc)
6625 {
6626 }
6627 #endif /* !WLAN_SUPPORT_RX_FISA */
6628 
6629 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR
6630 static inline QDF_STATUS dp_print_swlm_stats(struct dp_soc *soc)
6631 {
6632 	return QDF_STATUS_SUCCESS;
6633 }
6634 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */
6635 
6636 #ifdef WLAN_SUPPORT_PPEDS
6637 /*
6638  * dp_soc_target_ppe_rxole_rxdma_cfg() - Configure the RxOLe and RxDMA for PPE
6639  * @soc: DP Tx/Rx handle
6640  *
6641  * Return: QDF_STATUS
6642  */
6643 static
6644 QDF_STATUS dp_soc_target_ppe_rxole_rxdma_cfg(struct dp_soc *soc)
6645 {
6646 	struct dp_htt_rxdma_rxole_ppe_config htt_cfg = {0};
6647 	QDF_STATUS status;
6648 
6649 	/*
6650 	 * Program RxDMA to override the reo destination indication
6651 	 * with REO2PPE_DST_IND, when use_ppe is set to 1 in RX_MSDU_END,
6652 	 * thereby driving the packet to REO2PPE ring.
6653 	 * If the MSDU is spanning more than 1 buffer, then this
6654 	 * override is not done.
6655 	 */
6656 	htt_cfg.override = 1;
6657 	htt_cfg.reo_destination_indication = REO2PPE_DST_IND;
6658 	htt_cfg.multi_buffer_msdu_override_en = 0;
6659 
6660 	/*
6661 	 * Override use_ppe to 0 in RxOLE for the following
6662 	 * cases.
6663 	 */
6664 	htt_cfg.intra_bss_override = 1;
6665 	htt_cfg.decap_raw_override = 1;
6666 	htt_cfg.decap_nwifi_override = 1;
6667 	htt_cfg.ip_frag_override = 1;
6668 
6669 	status = dp_htt_rxdma_rxole_ppe_cfg_set(soc, &htt_cfg);
6670 	if (status != QDF_STATUS_SUCCESS)
6671 		dp_err("RxOLE and RxDMA PPE config failed %d", status);
6672 
6673 	return status;
6674 }
6675 #else
6676 static inline
6677 QDF_STATUS dp_soc_target_ppe_rxole_rxdma_cfg(struct dp_soc *soc)
6678 {
6679 	return QDF_STATUS_SUCCESS;
6680 }
6681 #endif /* WLAN_SUPPORT_PPEDS */
6682 
6683 #ifdef DP_UMAC_HW_RESET_SUPPORT
6684 static void dp_register_umac_reset_handlers(struct dp_soc *soc)
6685 {
6686 	dp_umac_reset_register_rx_action_callback(soc,
6687 		dp_umac_reset_handle_pre_reset, UMAC_RESET_ACTION_DO_PRE_RESET);
6688 
6689 	dp_umac_reset_register_rx_action_callback(soc,
6690 					dp_umac_reset_handle_post_reset,
6691 					UMAC_RESET_ACTION_DO_POST_RESET_START);
6692 
6693 	dp_umac_reset_register_rx_action_callback(soc,
6694 				dp_umac_reset_handle_post_reset_complete,
6695 				UMAC_RESET_ACTION_DO_POST_RESET_COMPLETE);
6696 
6697 }
6698 #else
6699 static void dp_register_umac_reset_handlers(struct dp_soc *soc)
6700 {
6701 }
6702 #endif
6703 /*
6704  * dp_soc_attach_target_wifi3() - SOC initialization in the target
6705  * @cdp_soc: Opaque Datapath SOC handle
6706  *
6707  * Return: zero on success, non-zero on failure
6708  */
6709 static QDF_STATUS
6710 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
6711 {
6712 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
6713 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6714 	struct hal_reo_params reo_params;
6715 
6716 	htt_soc_attach_target(soc->htt_handle);
6717 
6718 	status = dp_soc_target_ppe_rxole_rxdma_cfg(soc);
6719 	if (status != QDF_STATUS_SUCCESS) {
6720 		dp_err("Failed to send htt RxOLE and RxDMA messages to target");
6721 		return status;
6722 	}
6723 
6724 	status = dp_rxdma_ring_config(soc);
6725 	if (status != QDF_STATUS_SUCCESS) {
6726 		dp_err("Failed to send htt srng setup messages to target");
6727 		return status;
6728 	}
6729 
6730 	status = soc->arch_ops.dp_rxdma_ring_sel_cfg(soc);
6731 	if (status != QDF_STATUS_SUCCESS) {
6732 		dp_err("Failed to send htt ring config message to target");
6733 		return status;
6734 	}
6735 
6736 	status = dp_soc_umac_reset_init(soc);
6737 	if (status != QDF_STATUS_SUCCESS &&
6738 	    status != QDF_STATUS_E_NOSUPPORT) {
6739 		dp_err("Failed to initialize UMAC reset");
6740 		return status;
6741 	}
6742 
6743 	dp_register_umac_reset_handlers(soc);
6744 
6745 	status = dp_rx_target_fst_config(soc);
6746 	if (status != QDF_STATUS_SUCCESS &&
6747 	    status != QDF_STATUS_E_NOSUPPORT) {
6748 		dp_err("Failed to send htt fst setup config message to target");
6749 		return status;
6750 	}
6751 
6752 	if (status == QDF_STATUS_SUCCESS) {
6753 		status = dp_rx_fisa_config(soc);
6754 		if (status != QDF_STATUS_SUCCESS) {
6755 			dp_err("Failed to send htt FISA config message to target");
6756 			return status;
6757 		}
6758 	}
6759 
6760 	DP_STATS_INIT(soc);
6761 
6762 	dp_runtime_init(soc);
6763 
6764 	/* Enable HW vdev offload stats if feature is supported */
6765 	dp_vdev_stats_hw_offload_target_config(soc, INVALID_PDEV_ID, true);
6766 
6767 	/* initialize work queue for stats processing */
6768 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
6769 
6770 	wlan_cfg_soc_update_tgt_params(soc->wlan_cfg_ctx,
6771 				       soc->ctrl_psoc);
6772 	/* Setup HW REO */
6773 	qdf_mem_zero(&reo_params, sizeof(reo_params));
6774 
6775 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
6776 		/*
6777 		 * Reo ring remap is not required if both radios
6778 		 * are offloaded to NSS
6779 		 */
6780 
6781 		if (soc->arch_ops.reo_remap_config(soc, &reo_params.remap0,
6782 						   &reo_params.remap1,
6783 						   &reo_params.remap2))
6784 			reo_params.rx_hash_enabled = true;
6785 		else
6786 			reo_params.rx_hash_enabled = false;
6787 	}
6788 
6789 	/*
6790 	 * set the fragment destination ring
6791 	 */
6792 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
6793 
6794 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
6795 		reo_params.alt_dst_ind_0 = REO_REMAP_RELEASE;
6796 
6797 	hal_reo_setup(soc->hal_soc, &reo_params, 1);
6798 
6799 	hal_reo_set_err_dst_remap(soc->hal_soc);
6800 
6801 	soc->features.pn_in_reo_dest = hal_reo_enable_pn_in_dest(soc->hal_soc);
6802 
6803 	return QDF_STATUS_SUCCESS;
6804 }
6805 
6806 /*
6807  * dp_vdev_id_map_tbl_add() - Add vdev into vdev_id table
6808  * @soc: SoC handle
6809  * @vdev: vdev handle
6810  * @vdev_id: vdev_id
6811  *
6812  * Return: None
6813  */
6814 static void dp_vdev_id_map_tbl_add(struct dp_soc *soc,
6815 				   struct dp_vdev *vdev,
6816 				   uint8_t vdev_id)
6817 {
6818 	QDF_ASSERT(vdev_id <= MAX_VDEV_CNT);
6819 
6820 	qdf_spin_lock_bh(&soc->vdev_map_lock);
6821 
6822 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
6823 			QDF_STATUS_SUCCESS) {
6824 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK vdev_id %u",
6825 			     soc, vdev, vdev_id);
6826 		qdf_spin_unlock_bh(&soc->vdev_map_lock);
6827 		return;
6828 	}
6829 
6830 	if (!soc->vdev_id_map[vdev_id])
6831 		soc->vdev_id_map[vdev_id] = vdev;
6832 	else
6833 		QDF_ASSERT(0);
6834 
6835 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
6836 }
6837 
6838 /*
6839  * dp_vdev_id_map_tbl_remove() - remove vdev from vdev_id table
6840  * @soc: SoC handle
6841  * @vdev: vdev handle
6842  *
6843  * Return: None
6844  */
6845 static void dp_vdev_id_map_tbl_remove(struct dp_soc *soc,
6846 				      struct dp_vdev *vdev)
6847 {
6848 	qdf_spin_lock_bh(&soc->vdev_map_lock);
6849 	QDF_ASSERT(soc->vdev_id_map[vdev->vdev_id] == vdev);
6850 
6851 	soc->vdev_id_map[vdev->vdev_id] = NULL;
6852 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
6853 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
6854 }
6855 
6856 /*
6857  * dp_vdev_pdev_list_add() - add vdev into pdev's list
6858  * @soc: soc handle
6859  * @pdev: pdev handle
6860  * @vdev: vdev handle
6861  *
6862  * return: none
6863  */
6864 static void dp_vdev_pdev_list_add(struct dp_soc *soc,
6865 				  struct dp_pdev *pdev,
6866 				  struct dp_vdev *vdev)
6867 {
6868 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
6869 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
6870 			QDF_STATUS_SUCCESS) {
6871 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK",
6872 			     soc, vdev);
6873 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6874 		return;
6875 	}
6876 	/* add this vdev into the pdev's list */
6877 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
6878 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6879 }
6880 
6881 /*
6882  * dp_vdev_pdev_list_remove() - remove vdev from pdev's list
6883  * @soc: SoC handle
6884  * @pdev: pdev handle
6885  * @vdev: VDEV handle
6886  *
6887  * Return: none
6888  */
6889 static void dp_vdev_pdev_list_remove(struct dp_soc *soc,
6890 				     struct dp_pdev *pdev,
6891 				     struct dp_vdev *vdev)
6892 {
6893 	uint8_t found = 0;
6894 	struct dp_vdev *tmpvdev = NULL;
6895 
6896 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
6897 	TAILQ_FOREACH(tmpvdev, &pdev->vdev_list, vdev_list_elem) {
6898 		if (tmpvdev == vdev) {
6899 			found = 1;
6900 			break;
6901 		}
6902 	}
6903 
6904 	if (found) {
6905 		TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
6906 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
6907 	} else {
6908 		dp_vdev_debug("%pK: vdev:%pK not found in pdev:%pK vdevlist:%pK",
6909 			      soc, vdev, pdev, &pdev->vdev_list);
6910 		QDF_ASSERT(0);
6911 	}
6912 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6913 }
6914 
6915 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
6916 /*
6917  * dp_vdev_init_rx_eapol() - initializing osif_rx_eapol
6918  * @vdev: Datapath VDEV handle
6919  *
6920  * Return: None
6921  */
6922 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev)
6923 {
6924 	vdev->osif_rx_eapol = NULL;
6925 }
6926 
6927 /*
6928  * dp_vdev_register_rx_eapol() - Register VDEV operations for rx_eapol
6929  * @vdev: DP vdev handle
6930  * @txrx_ops: Tx and Rx operations
6931  *
6932  * Return: None
6933  */
6934 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev,
6935 					     struct ol_txrx_ops *txrx_ops)
6936 {
6937 	vdev->osif_rx_eapol = txrx_ops->rx.rx_eapol;
6938 }
6939 #else
6940 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev)
6941 {
6942 }
6943 
6944 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev,
6945 					     struct ol_txrx_ops *txrx_ops)
6946 {
6947 }
6948 #endif
6949 
6950 #ifdef WLAN_FEATURE_11BE_MLO
6951 #if defined(WLAN_MLO_MULTI_CHIP) && defined(WLAN_MCAST_MLO)
6952 static inline void dp_vdev_save_mld_info(struct dp_vdev *vdev,
6953 					 struct cdp_vdev_info *vdev_info)
6954 {
6955 	if (qdf_is_macaddr_zero((struct qdf_mac_addr *)vdev_info->mld_mac_addr))
6956 		vdev->mlo_vdev = false;
6957 	else
6958 		vdev->mlo_vdev = true;
6959 }
6960 #else
6961 static inline void dp_vdev_save_mld_info(struct dp_vdev *vdev,
6962 					 struct cdp_vdev_info *vdev_info)
6963 {
6964 }
6965 #endif
6966 static inline void dp_vdev_save_mld_addr(struct dp_vdev *vdev,
6967 					 struct cdp_vdev_info *vdev_info)
6968 {
6969 	if (vdev_info->mld_mac_addr)
6970 		qdf_mem_copy(&vdev->mld_mac_addr.raw[0],
6971 			     vdev_info->mld_mac_addr, QDF_MAC_ADDR_SIZE);
6972 
6973 	dp_vdev_save_mld_info(vdev, vdev_info);
6974 
6975 }
6976 #else
6977 static inline void dp_vdev_save_mld_addr(struct dp_vdev *vdev,
6978 					 struct cdp_vdev_info *vdev_info)
6979 {
6980 
6981 }
6982 #endif
6983 
6984 #ifdef DP_TRAFFIC_END_INDICATION
6985 /*
6986  * dp_tx_traffic_end_indication_attach() - Initialize data end indication
6987  *                                         related members in VDEV
6988  * @vdev: DP vdev handle
6989  *
6990  * Return: None
6991  */
6992 static inline void
6993 dp_tx_vdev_traffic_end_indication_attach(struct dp_vdev *vdev)
6994 {
6995 	qdf_nbuf_queue_init(&vdev->end_ind_pkt_q);
6996 }
6997 
6998 /*
6999  * dp_tx_vdev_traffic_end_indication_detach() - De-init data end indication
7000  *                                              related members in VDEV
7001  * @vdev: DP vdev handle
7002  *
7003  * Return: None
7004  */
7005 static inline void
7006 dp_tx_vdev_traffic_end_indication_detach(struct dp_vdev *vdev)
7007 {
7008 	qdf_nbuf_t nbuf;
7009 
7010 	while ((nbuf = qdf_nbuf_queue_remove(&vdev->end_ind_pkt_q)) != NULL)
7011 		qdf_nbuf_free(nbuf);
7012 }
7013 #else
7014 static inline void
7015 dp_tx_vdev_traffic_end_indication_attach(struct dp_vdev *vdev)
7016 {}
7017 
7018 static inline void
7019 dp_tx_vdev_traffic_end_indication_detach(struct dp_vdev *vdev)
7020 {}
7021 #endif
7022 
7023 /*
7024 * dp_vdev_attach_wifi3() - attach txrx vdev
7025 * @txrx_pdev: Datapath PDEV handle
7026 * @pdev_id: PDEV ID for vdev creation
7027 * @vdev_info: parameters used for vdev creation
7028 *
7029 * Return: status
7030 */
7031 static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc,
7032 				       uint8_t pdev_id,
7033 				       struct cdp_vdev_info *vdev_info)
7034 {
7035 	int i = 0;
7036 	qdf_size_t vdev_context_size;
7037 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
7038 	struct dp_pdev *pdev =
7039 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7040 						   pdev_id);
7041 	struct dp_vdev *vdev;
7042 	uint8_t *vdev_mac_addr = vdev_info->vdev_mac_addr;
7043 	uint8_t vdev_id = vdev_info->vdev_id;
7044 	enum wlan_op_mode op_mode = vdev_info->op_mode;
7045 	enum wlan_op_subtype subtype = vdev_info->subtype;
7046 	uint8_t vdev_stats_id = vdev_info->vdev_stats_id;
7047 
7048 	vdev_context_size =
7049 		soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_VDEV);
7050 	vdev = qdf_mem_malloc(vdev_context_size);
7051 
7052 	if (!pdev) {
7053 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
7054 			    cdp_soc, pdev_id);
7055 		qdf_mem_free(vdev);
7056 		goto fail0;
7057 	}
7058 
7059 	if (!vdev) {
7060 		dp_init_err("%pK: DP VDEV memory allocation failed",
7061 			    cdp_soc);
7062 		goto fail0;
7063 	}
7064 
7065 	wlan_minidump_log(vdev, sizeof(*vdev), soc->ctrl_psoc,
7066 			  WLAN_MD_DP_VDEV, "dp_vdev");
7067 
7068 	vdev->pdev = pdev;
7069 	vdev->vdev_id = vdev_id;
7070 	vdev->vdev_stats_id = vdev_stats_id;
7071 	vdev->opmode = op_mode;
7072 	vdev->subtype = subtype;
7073 	vdev->osdev = soc->osdev;
7074 
7075 	vdev->osif_rx = NULL;
7076 	vdev->osif_rsim_rx_decap = NULL;
7077 	vdev->osif_get_key = NULL;
7078 	vdev->osif_tx_free_ext = NULL;
7079 	vdev->osif_vdev = NULL;
7080 
7081 	vdev->delete.pending = 0;
7082 	vdev->safemode = 0;
7083 	vdev->drop_unenc = 1;
7084 	vdev->sec_type = cdp_sec_type_none;
7085 	vdev->multipass_en = false;
7086 	vdev->wrap_vdev = false;
7087 	dp_vdev_init_rx_eapol(vdev);
7088 	qdf_atomic_init(&vdev->ref_cnt);
7089 	for (i = 0; i < DP_MOD_ID_MAX; i++)
7090 		qdf_atomic_init(&vdev->mod_refs[i]);
7091 
7092 	/* Take one reference for create*/
7093 	qdf_atomic_inc(&vdev->ref_cnt);
7094 	qdf_atomic_inc(&vdev->mod_refs[DP_MOD_ID_CONFIG]);
7095 	vdev->num_peers = 0;
7096 #ifdef notyet
7097 	vdev->filters_num = 0;
7098 #endif
7099 	vdev->lmac_id = pdev->lmac_id;
7100 
7101 	qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
7102 
7103 	dp_vdev_save_mld_addr(vdev, vdev_info);
7104 
7105 	/* TODO: Initialize default HTT meta data that will be used in
7106 	 * TCL descriptors for packets transmitted from this VDEV
7107 	 */
7108 
7109 	qdf_spinlock_create(&vdev->peer_list_lock);
7110 	TAILQ_INIT(&vdev->peer_list);
7111 	dp_peer_multipass_list_init(vdev);
7112 	if ((soc->intr_mode == DP_INTR_POLL) &&
7113 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
7114 		if ((pdev->vdev_count == 0) ||
7115 		    (wlan_op_mode_monitor == vdev->opmode))
7116 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7117 	} else if (dp_soc_get_con_mode(soc) == QDF_GLOBAL_MISSION_MODE &&
7118 		   soc->intr_mode == DP_INTR_MSI &&
7119 		   wlan_op_mode_monitor == vdev->opmode) {
7120 		/* Timer to reap status ring in mission mode */
7121 		dp_monitor_vdev_timer_start(soc);
7122 	}
7123 
7124 	dp_vdev_id_map_tbl_add(soc, vdev, vdev_id);
7125 
7126 	if (wlan_op_mode_monitor == vdev->opmode) {
7127 		if (dp_monitor_vdev_attach(vdev) == QDF_STATUS_SUCCESS) {
7128 			dp_monitor_pdev_set_mon_vdev(vdev);
7129 			return dp_monitor_vdev_set_monitor_mode_buf_rings(pdev);
7130 		}
7131 		return QDF_STATUS_E_FAILURE;
7132 	}
7133 
7134 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
7135 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
7136 	vdev->dscp_tid_map_id = 0;
7137 	vdev->mcast_enhancement_en = 0;
7138 	vdev->igmp_mcast_enhanc_en = 0;
7139 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
7140 	vdev->prev_tx_enq_tstamp = 0;
7141 	vdev->prev_rx_deliver_tstamp = 0;
7142 	vdev->skip_sw_tid_classification = DP_TX_HW_DSCP_TID_MAP_VALID;
7143 	dp_tx_vdev_traffic_end_indication_attach(vdev);
7144 
7145 	dp_vdev_pdev_list_add(soc, pdev, vdev);
7146 	pdev->vdev_count++;
7147 
7148 	if (wlan_op_mode_sta != vdev->opmode &&
7149 	    wlan_op_mode_ndi != vdev->opmode)
7150 		vdev->ap_bridge_enabled = true;
7151 	else
7152 		vdev->ap_bridge_enabled = false;
7153 	dp_init_info("%pK: wlan_cfg_ap_bridge_enabled %d",
7154 		     cdp_soc, vdev->ap_bridge_enabled);
7155 
7156 	dp_tx_vdev_attach(vdev);
7157 
7158 	dp_monitor_vdev_attach(vdev);
7159 	if (!pdev->is_lro_hash_configured) {
7160 		if (QDF_IS_STATUS_SUCCESS(dp_lro_hash_setup(soc, pdev)))
7161 			pdev->is_lro_hash_configured = true;
7162 		else
7163 			dp_err("LRO hash setup failure!");
7164 	}
7165 
7166 	dp_info("Created vdev %pK ("QDF_MAC_ADDR_FMT")", vdev,
7167 		QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
7168 	DP_STATS_INIT(vdev);
7169 
7170 	if (QDF_IS_STATUS_ERROR(soc->arch_ops.txrx_vdev_attach(soc, vdev)))
7171 		goto fail0;
7172 
7173 	if (wlan_op_mode_sta == vdev->opmode)
7174 		dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id,
7175 				     vdev->mac_addr.raw, CDP_LINK_PEER_TYPE);
7176 
7177 	dp_pdev_update_fast_rx_flag(soc, pdev);
7178 
7179 	return QDF_STATUS_SUCCESS;
7180 
7181 fail0:
7182 	return QDF_STATUS_E_FAILURE;
7183 }
7184 
7185 #ifndef QCA_HOST_MODE_WIFI_DISABLED
7186 /**
7187  * dp_vdev_fetch_tx_handlers() - Fetch Tx handlers
7188  * @vdev: struct dp_vdev *
7189  * @soc: struct dp_soc *
7190  * @ctx: struct ol_txrx_hardtart_ctxt *
7191  */
7192 static inline void dp_vdev_fetch_tx_handler(struct dp_vdev *vdev,
7193 					    struct dp_soc *soc,
7194 					    struct ol_txrx_hardtart_ctxt *ctx)
7195 {
7196 	/* Enable vdev_id check only for ap, if flag is enabled */
7197 	if (vdev->mesh_vdev)
7198 		ctx->tx = dp_tx_send_mesh;
7199 	else if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
7200 		 (vdev->opmode == wlan_op_mode_ap)) {
7201 		ctx->tx = dp_tx_send_vdev_id_check;
7202 		ctx->tx_fast = dp_tx_send_vdev_id_check;
7203 	} else {
7204 		ctx->tx = dp_tx_send;
7205 		ctx->tx_fast = soc->arch_ops.dp_tx_send_fast;
7206 	}
7207 
7208 	/* Avoid check in regular exception Path */
7209 	if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
7210 	    (vdev->opmode == wlan_op_mode_ap))
7211 		ctx->tx_exception = dp_tx_send_exception_vdev_id_check;
7212 	else
7213 		ctx->tx_exception = dp_tx_send_exception;
7214 }
7215 
7216 /**
7217  * dp_vdev_register_tx_handler() - Register Tx handler
7218  * @vdev: struct dp_vdev *
7219  * @soc: struct dp_soc *
7220  * @txrx_ops: struct ol_txrx_ops *
7221  */
7222 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
7223 					       struct dp_soc *soc,
7224 					       struct ol_txrx_ops *txrx_ops)
7225 {
7226 	struct ol_txrx_hardtart_ctxt ctx = {0};
7227 
7228 	dp_vdev_fetch_tx_handler(vdev, soc, &ctx);
7229 
7230 	txrx_ops->tx.tx = ctx.tx;
7231 	txrx_ops->tx.tx_fast = ctx.tx_fast;
7232 	txrx_ops->tx.tx_exception = ctx.tx_exception;
7233 
7234 	dp_info("Configure tx_vdev_id_chk_handler Feature Flag: %d and mode:%d for vdev_id:%d",
7235 		wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx),
7236 		vdev->opmode, vdev->vdev_id);
7237 }
7238 #else /* QCA_HOST_MODE_WIFI_DISABLED */
7239 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
7240 					       struct dp_soc *soc,
7241 					       struct ol_txrx_ops *txrx_ops)
7242 {
7243 }
7244 
7245 static inline void dp_vdev_fetch_tx_handler(struct dp_vdev *vdev,
7246 					    struct dp_soc *soc,
7247 					    struct ol_txrx_hardtart_ctxt *ctx)
7248 {
7249 }
7250 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
7251 
7252 /**
7253  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
7254  * @soc: Datapath soc handle
7255  * @vdev_id: id of Datapath VDEV handle
7256  * @osif_vdev: OSIF vdev handle
7257  * @txrx_ops: Tx and Rx operations
7258  *
7259  * Return: DP VDEV handle on success, NULL on failure
7260  */
7261 static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc_hdl,
7262 					 uint8_t vdev_id,
7263 					 ol_osif_vdev_handle osif_vdev,
7264 					 struct ol_txrx_ops *txrx_ops)
7265 {
7266 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7267 	struct dp_vdev *vdev =	dp_vdev_get_ref_by_id(soc, vdev_id,
7268 						      DP_MOD_ID_CDP);
7269 
7270 	if (!vdev)
7271 		return QDF_STATUS_E_FAILURE;
7272 
7273 	vdev->osif_vdev = osif_vdev;
7274 	vdev->osif_rx = txrx_ops->rx.rx;
7275 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
7276 	vdev->osif_rx_flush = txrx_ops->rx.rx_flush;
7277 	vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush;
7278 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
7279 	vdev->osif_fisa_rx = txrx_ops->rx.osif_fisa_rx;
7280 	vdev->osif_fisa_flush = txrx_ops->rx.osif_fisa_flush;
7281 	vdev->osif_get_key = txrx_ops->get_key;
7282 	dp_monitor_vdev_register_osif(vdev, txrx_ops);
7283 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
7284 	vdev->tx_comp = txrx_ops->tx.tx_comp;
7285 	vdev->stats_cb = txrx_ops->rx.stats_rx;
7286 	vdev->tx_classify_critical_pkt_cb =
7287 		txrx_ops->tx.tx_classify_critical_pkt_cb;
7288 #ifdef notyet
7289 #if ATH_SUPPORT_WAPI
7290 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
7291 #endif
7292 #endif
7293 #ifdef UMAC_SUPPORT_PROXY_ARP
7294 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
7295 #endif
7296 	vdev->me_convert = txrx_ops->me_convert;
7297 	vdev->get_tsf_time = txrx_ops->get_tsf_time;
7298 
7299 	dp_vdev_register_rx_eapol(vdev, txrx_ops);
7300 
7301 	dp_vdev_register_tx_handler(vdev, soc, txrx_ops);
7302 
7303 	dp_init_info("%pK: DP Vdev Register success", soc);
7304 
7305 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7306 	return QDF_STATUS_SUCCESS;
7307 }
7308 
7309 #ifdef WLAN_FEATURE_11BE_MLO
7310 void dp_peer_delete(struct dp_soc *soc,
7311 		    struct dp_peer *peer,
7312 		    void *arg)
7313 {
7314 	if (!peer->valid)
7315 		return;
7316 
7317 	dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
7318 			     peer->vdev->vdev_id,
7319 			     peer->mac_addr.raw, 0,
7320 			     peer->peer_type);
7321 }
7322 #else
7323 void dp_peer_delete(struct dp_soc *soc,
7324 		    struct dp_peer *peer,
7325 		    void *arg)
7326 {
7327 	if (!peer->valid)
7328 		return;
7329 
7330 	dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
7331 			     peer->vdev->vdev_id,
7332 			     peer->mac_addr.raw, 0,
7333 			     CDP_LINK_PEER_TYPE);
7334 }
7335 #endif
7336 
7337 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
7338 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg)
7339 {
7340 	if (!peer->valid)
7341 		return;
7342 
7343 	if (IS_MLO_DP_LINK_PEER(peer))
7344 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
7345 				     peer->vdev->vdev_id,
7346 				     peer->mac_addr.raw, 0,
7347 				     CDP_LINK_PEER_TYPE);
7348 }
7349 #else
7350 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg)
7351 {
7352 }
7353 #endif
7354 /**
7355  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
7356  * @vdev: Datapath VDEV handle
7357  * @unmap_only: Flag to indicate "only unmap"
7358  *
7359  * Return: void
7360  */
7361 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
7362 				bool unmap_only,
7363 				bool mlo_peers_only)
7364 {
7365 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7366 	struct dp_pdev *pdev = vdev->pdev;
7367 	struct dp_soc *soc = pdev->soc;
7368 	struct dp_peer *peer;
7369 	uint32_t i = 0;
7370 
7371 
7372 	if (!unmap_only) {
7373 		if (!mlo_peers_only)
7374 			dp_vdev_iterate_peer_lock_safe(vdev,
7375 						       dp_peer_delete,
7376 						       NULL,
7377 						       DP_MOD_ID_CDP);
7378 		else
7379 			dp_vdev_iterate_peer_lock_safe(vdev,
7380 						       dp_mlo_peer_delete,
7381 						       NULL,
7382 						       DP_MOD_ID_CDP);
7383 	}
7384 
7385 	for (i = 0; i < soc->max_peer_id ; i++) {
7386 		peer = __dp_peer_get_ref_by_id(soc, i, DP_MOD_ID_CDP);
7387 
7388 		if (!peer)
7389 			continue;
7390 
7391 		if (peer->vdev != vdev) {
7392 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7393 			continue;
7394 		}
7395 
7396 		if (!mlo_peers_only) {
7397 			dp_info("peer: " QDF_MAC_ADDR_FMT " is getting unmap",
7398 				QDF_MAC_ADDR_REF(peer->mac_addr.raw));
7399 			dp_rx_peer_unmap_handler(soc, i,
7400 						 vdev->vdev_id,
7401 						 peer->mac_addr.raw, 0,
7402 						 DP_PEER_WDS_COUNT_INVALID);
7403 			SET_PEER_REF_CNT_ONE(peer);
7404 		} else if (IS_MLO_DP_LINK_PEER(peer) ||
7405 			   IS_MLO_DP_MLD_PEER(peer)) {
7406 			dp_info("peer: " QDF_MAC_ADDR_FMT " is getting unmap",
7407 				QDF_MAC_ADDR_REF(peer->mac_addr.raw));
7408 			dp_rx_peer_unmap_handler(soc, i,
7409 						 vdev->vdev_id,
7410 						 peer->mac_addr.raw, 0,
7411 						 DP_PEER_WDS_COUNT_INVALID);
7412 			SET_PEER_REF_CNT_ONE(peer);
7413 		}
7414 
7415 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7416 	}
7417 }
7418 
7419 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
7420 /*
7421  * dp_txrx_alloc_vdev_stats_id()- Allocate vdev_stats_id
7422  * @soc_hdl: Datapath soc handle
7423  * @vdev_stats_id: Address of vdev_stats_id
7424  *
7425  * Return: QDF_STATUS
7426  */
7427 static QDF_STATUS dp_txrx_alloc_vdev_stats_id(struct cdp_soc_t *soc_hdl,
7428 					      uint8_t *vdev_stats_id)
7429 {
7430 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7431 	uint8_t id = 0;
7432 
7433 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
7434 		*vdev_stats_id = CDP_INVALID_VDEV_STATS_ID;
7435 		return QDF_STATUS_E_FAILURE;
7436 	}
7437 
7438 	while (id < CDP_MAX_VDEV_STATS_ID) {
7439 		if (!qdf_atomic_test_and_set_bit(id, &soc->vdev_stats_id_map)) {
7440 			*vdev_stats_id = id;
7441 			return QDF_STATUS_SUCCESS;
7442 		}
7443 		id++;
7444 	}
7445 
7446 	*vdev_stats_id = CDP_INVALID_VDEV_STATS_ID;
7447 	return QDF_STATUS_E_FAILURE;
7448 }
7449 
7450 /*
7451  * dp_txrx_reset_vdev_stats_id() - Reset vdev_stats_id in dp_soc
7452  * @soc_hdl: Datapath soc handle
7453  * @vdev_stats_id: vdev_stats_id to reset in dp_soc
7454  *
7455  * Return: none
7456  */
7457 static void dp_txrx_reset_vdev_stats_id(struct cdp_soc_t *soc_hdl,
7458 					uint8_t vdev_stats_id)
7459 {
7460 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7461 
7462 	if ((!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) ||
7463 	    (vdev_stats_id >= CDP_MAX_VDEV_STATS_ID))
7464 		return;
7465 
7466 	qdf_atomic_clear_bit(vdev_stats_id, &soc->vdev_stats_id_map);
7467 }
7468 #else
7469 static void dp_txrx_reset_vdev_stats_id(struct cdp_soc_t *soc,
7470 					uint8_t vdev_stats_id)
7471 {}
7472 #endif
7473 /*
7474  * dp_vdev_detach_wifi3() - Detach txrx vdev
7475  * @cdp_soc: Datapath soc handle
7476  * @vdev_id: VDEV Id
7477  * @callback: Callback OL_IF on completion of detach
7478  * @cb_context:	Callback context
7479  *
7480  */
7481 static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc,
7482 				       uint8_t vdev_id,
7483 				       ol_txrx_vdev_delete_cb callback,
7484 				       void *cb_context)
7485 {
7486 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
7487 	struct dp_pdev *pdev;
7488 	struct dp_neighbour_peer *peer = NULL;
7489 	struct dp_peer *vap_self_peer = NULL;
7490 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7491 						     DP_MOD_ID_CDP);
7492 
7493 	if (!vdev)
7494 		return QDF_STATUS_E_FAILURE;
7495 
7496 	soc->arch_ops.txrx_vdev_detach(soc, vdev);
7497 
7498 	pdev = vdev->pdev;
7499 
7500 	vap_self_peer = dp_sta_vdev_self_peer_ref_n_get(soc, vdev,
7501 							DP_MOD_ID_CONFIG);
7502 	if (vap_self_peer) {
7503 		qdf_spin_lock_bh(&soc->ast_lock);
7504 		if (vap_self_peer->self_ast_entry) {
7505 			dp_peer_del_ast(soc, vap_self_peer->self_ast_entry);
7506 			vap_self_peer->self_ast_entry = NULL;
7507 		}
7508 		qdf_spin_unlock_bh(&soc->ast_lock);
7509 
7510 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc, vdev->vdev_id,
7511 				     vap_self_peer->mac_addr.raw, 0,
7512 				     CDP_LINK_PEER_TYPE);
7513 		dp_peer_unref_delete(vap_self_peer, DP_MOD_ID_CONFIG);
7514 	}
7515 
7516 	/*
7517 	 * If Target is hung, flush all peers before detaching vdev
7518 	 * this will free all references held due to missing
7519 	 * unmap commands from Target
7520 	 */
7521 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
7522 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, false, false);
7523 	else if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
7524 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, true, false);
7525 
7526 	/* indicate that the vdev needs to be deleted */
7527 	vdev->delete.pending = 1;
7528 	dp_rx_vdev_detach(vdev);
7529 	/*
7530 	 * move it after dp_rx_vdev_detach(),
7531 	 * as the call back done in dp_rx_vdev_detach()
7532 	 * still need to get vdev pointer by vdev_id.
7533 	 */
7534 	dp_vdev_id_map_tbl_remove(soc, vdev);
7535 
7536 	dp_monitor_neighbour_peer_list_remove(pdev, vdev, peer);
7537 
7538 	dp_txrx_reset_vdev_stats_id(cdp_soc, vdev->vdev_stats_id);
7539 
7540 	dp_tx_vdev_multipass_deinit(vdev);
7541 	dp_tx_vdev_traffic_end_indication_detach(vdev);
7542 
7543 	if (vdev->vdev_dp_ext_handle) {
7544 		qdf_mem_free(vdev->vdev_dp_ext_handle);
7545 		vdev->vdev_dp_ext_handle = NULL;
7546 	}
7547 	vdev->delete.callback = callback;
7548 	vdev->delete.context = cb_context;
7549 
7550 	if (vdev->opmode != wlan_op_mode_monitor)
7551 		dp_vdev_pdev_list_remove(soc, pdev, vdev);
7552 
7553 	pdev->vdev_count--;
7554 	/* release reference taken above for find */
7555 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7556 
7557 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
7558 	TAILQ_INSERT_TAIL(&soc->inactive_vdev_list, vdev, inactive_list_elem);
7559 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
7560 
7561 	/* release reference taken at dp_vdev_create */
7562 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
7563 
7564 	return QDF_STATUS_SUCCESS;
7565 }
7566 
7567 #ifdef WLAN_FEATURE_11BE_MLO
7568 /**
7569  * is_dp_peer_can_reuse() - check if the dp_peer match condition to be reused
7570  * @vdev: Target DP vdev handle
7571  * @peer: DP peer handle to be checked
7572  * @peer_mac_addr: Target peer mac address
7573  * @peer_type: Target peer type
7574  *
7575  * Return: true - if match, false - not match
7576  */
7577 static inline
7578 bool is_dp_peer_can_reuse(struct dp_vdev *vdev,
7579 			  struct dp_peer *peer,
7580 			  uint8_t *peer_mac_addr,
7581 			  enum cdp_peer_type peer_type)
7582 {
7583 	if (peer->bss_peer && (peer->vdev == vdev) &&
7584 	    (peer->peer_type == peer_type) &&
7585 	    (qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw,
7586 			 QDF_MAC_ADDR_SIZE) == 0))
7587 		return true;
7588 
7589 	return false;
7590 }
7591 #else
7592 static inline
7593 bool is_dp_peer_can_reuse(struct dp_vdev *vdev,
7594 			  struct dp_peer *peer,
7595 			  uint8_t *peer_mac_addr,
7596 			  enum cdp_peer_type peer_type)
7597 {
7598 	if (peer->bss_peer && (peer->vdev == vdev) &&
7599 	    (qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw,
7600 			 QDF_MAC_ADDR_SIZE) == 0))
7601 		return true;
7602 
7603 	return false;
7604 }
7605 #endif
7606 
7607 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
7608 						uint8_t *peer_mac_addr,
7609 						enum cdp_peer_type peer_type)
7610 {
7611 	struct dp_peer *peer;
7612 	struct dp_soc *soc = vdev->pdev->soc;
7613 
7614 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
7615 	TAILQ_FOREACH(peer, &soc->inactive_peer_list,
7616 		      inactive_list_elem) {
7617 
7618 		/* reuse bss peer only when vdev matches*/
7619 		if (is_dp_peer_can_reuse(vdev, peer,
7620 					 peer_mac_addr, peer_type)) {
7621 			/* increment ref count for cdp_peer_create*/
7622 			if (dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG) ==
7623 						QDF_STATUS_SUCCESS) {
7624 				TAILQ_REMOVE(&soc->inactive_peer_list, peer,
7625 					     inactive_list_elem);
7626 				qdf_spin_unlock_bh
7627 					(&soc->inactive_peer_list_lock);
7628 				return peer;
7629 			}
7630 		}
7631 	}
7632 
7633 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
7634 	return NULL;
7635 }
7636 
7637 #ifdef FEATURE_AST
7638 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
7639 					       struct dp_pdev *pdev,
7640 					       uint8_t *peer_mac_addr)
7641 {
7642 	struct dp_ast_entry *ast_entry;
7643 
7644 	if (soc->ast_offload_support)
7645 		return;
7646 
7647 	qdf_spin_lock_bh(&soc->ast_lock);
7648 	if (soc->ast_override_support)
7649 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
7650 							    pdev->pdev_id);
7651 	else
7652 		ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
7653 
7654 	if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress)
7655 		dp_peer_del_ast(soc, ast_entry);
7656 
7657 	qdf_spin_unlock_bh(&soc->ast_lock);
7658 }
7659 #else
7660 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
7661 					       struct dp_pdev *pdev,
7662 					       uint8_t *peer_mac_addr)
7663 {
7664 }
7665 #endif
7666 
7667 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
7668 /*
7669  * dp_peer_hw_txrx_stats_init() - Initialize hw_txrx_stats_en in dp_peer
7670  * @soc: Datapath soc handle
7671  * @peer: Datapath peer handle
7672  *
7673  * Return: none
7674  */
7675 static inline
7676 void dp_peer_hw_txrx_stats_init(struct dp_soc *soc,
7677 				struct dp_txrx_peer *txrx_peer)
7678 {
7679 	txrx_peer->hw_txrx_stats_en =
7680 		wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx);
7681 }
7682 #else
7683 static inline
7684 void dp_peer_hw_txrx_stats_init(struct dp_soc *soc,
7685 				struct dp_txrx_peer *txrx_peer)
7686 {
7687 	txrx_peer->hw_txrx_stats_en = 0;
7688 }
7689 #endif
7690 
7691 static QDF_STATUS dp_txrx_peer_detach(struct dp_soc *soc, struct dp_peer *peer)
7692 {
7693 	struct dp_txrx_peer *txrx_peer;
7694 	struct dp_pdev *pdev;
7695 
7696 	/* dp_txrx_peer exists for mld peer and legacy peer */
7697 	if (peer->txrx_peer) {
7698 		txrx_peer = peer->txrx_peer;
7699 		peer->txrx_peer = NULL;
7700 		pdev = txrx_peer->vdev->pdev;
7701 
7702 		dp_peer_defrag_rx_tids_deinit(txrx_peer);
7703 		/*
7704 		 * Deallocate the extended stats contenxt
7705 		 */
7706 		dp_peer_delay_stats_ctx_dealloc(soc, txrx_peer);
7707 		dp_peer_rx_bufq_resources_deinit(txrx_peer);
7708 		dp_peer_jitter_stats_ctx_dealloc(pdev, txrx_peer);
7709 		dp_peer_sawf_stats_ctx_free(soc, txrx_peer);
7710 
7711 		qdf_mem_free(txrx_peer);
7712 	}
7713 
7714 	return QDF_STATUS_SUCCESS;
7715 }
7716 
7717 static QDF_STATUS dp_txrx_peer_attach(struct dp_soc *soc, struct dp_peer *peer)
7718 {
7719 	struct dp_txrx_peer *txrx_peer;
7720 	struct dp_pdev *pdev;
7721 
7722 	txrx_peer = (struct dp_txrx_peer *)qdf_mem_malloc(sizeof(*txrx_peer));
7723 
7724 	if (!txrx_peer)
7725 		return QDF_STATUS_E_NOMEM; /* failure */
7726 
7727 	txrx_peer->peer_id = HTT_INVALID_PEER;
7728 	/* initialize the peer_id */
7729 	txrx_peer->vdev = peer->vdev;
7730 	pdev = peer->vdev->pdev;
7731 
7732 	DP_STATS_INIT(txrx_peer);
7733 
7734 	dp_wds_ext_peer_init(txrx_peer);
7735 	dp_peer_rx_bufq_resources_init(txrx_peer);
7736 	dp_peer_hw_txrx_stats_init(soc, txrx_peer);
7737 	/*
7738 	 * Allocate peer extended stats context. Fall through in
7739 	 * case of failure as its not an implicit requirement to have
7740 	 * this object for regular statistics updates.
7741 	 */
7742 	if (dp_peer_delay_stats_ctx_alloc(soc, txrx_peer) !=
7743 					  QDF_STATUS_SUCCESS)
7744 		dp_warn("peer delay_stats ctx alloc failed");
7745 
7746 	/*
7747 	 * Alloctate memory for jitter stats. Fall through in
7748 	 * case of failure as its not an implicit requirement to have
7749 	 * this object for regular statistics updates.
7750 	 */
7751 	if (dp_peer_jitter_stats_ctx_alloc(pdev, txrx_peer) !=
7752 					   QDF_STATUS_SUCCESS)
7753 		dp_warn("peer jitter_stats ctx alloc failed");
7754 
7755 	dp_set_peer_isolation(txrx_peer, false);
7756 
7757 	dp_peer_defrag_rx_tids_init(txrx_peer);
7758 
7759 	if (dp_peer_sawf_stats_ctx_alloc(soc, txrx_peer) != QDF_STATUS_SUCCESS)
7760 		dp_warn("peer sawf stats alloc failed");
7761 
7762 	dp_txrx_peer_attach_add(soc, peer, txrx_peer);
7763 
7764 	return QDF_STATUS_SUCCESS;
7765 }
7766 
7767 static inline
7768 void dp_txrx_peer_stats_clr(struct dp_txrx_peer *txrx_peer)
7769 {
7770 	if (!txrx_peer)
7771 		return;
7772 
7773 	txrx_peer->tx_failed = 0;
7774 	txrx_peer->comp_pkt.num = 0;
7775 	txrx_peer->comp_pkt.bytes = 0;
7776 	txrx_peer->to_stack.num = 0;
7777 	txrx_peer->to_stack.bytes = 0;
7778 
7779 	DP_STATS_CLR(txrx_peer);
7780 	dp_peer_delay_stats_ctx_clr(txrx_peer);
7781 	dp_peer_jitter_stats_ctx_clr(txrx_peer);
7782 }
7783 
7784 /*
7785  * dp_peer_create_wifi3() - attach txrx peer
7786  * @soc_hdl: Datapath soc handle
7787  * @vdev_id: id of vdev
7788  * @peer_mac_addr: Peer MAC address
7789  * @peer_type: link or MLD peer type
7790  *
7791  * Return: 0 on success, -1 on failure
7792  */
7793 static QDF_STATUS
7794 dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
7795 		     uint8_t *peer_mac_addr, enum cdp_peer_type peer_type)
7796 {
7797 	struct dp_peer *peer;
7798 	int i;
7799 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7800 	struct dp_pdev *pdev;
7801 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
7802 	struct dp_vdev *vdev = NULL;
7803 
7804 	if (!peer_mac_addr)
7805 		return QDF_STATUS_E_FAILURE;
7806 
7807 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
7808 
7809 	if (!vdev)
7810 		return QDF_STATUS_E_FAILURE;
7811 
7812 	pdev = vdev->pdev;
7813 	soc = pdev->soc;
7814 
7815 	/*
7816 	 * If a peer entry with given MAC address already exists,
7817 	 * reuse the peer and reset the state of peer.
7818 	 */
7819 	peer = dp_peer_can_reuse(vdev, peer_mac_addr, peer_type);
7820 
7821 	if (peer) {
7822 		qdf_atomic_init(&peer->is_default_route_set);
7823 		dp_peer_cleanup(vdev, peer);
7824 
7825 		dp_peer_vdev_list_add(soc, vdev, peer);
7826 		dp_peer_find_hash_add(soc, peer);
7827 
7828 		dp_peer_rx_tids_create(peer);
7829 		if (IS_MLO_DP_MLD_PEER(peer))
7830 			dp_mld_peer_init_link_peers_info(peer);
7831 
7832 		qdf_spin_lock_bh(&soc->ast_lock);
7833 		dp_peer_delete_ast_entries(soc, peer);
7834 		qdf_spin_unlock_bh(&soc->ast_lock);
7835 
7836 		if ((vdev->opmode == wlan_op_mode_sta) &&
7837 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
7838 		     QDF_MAC_ADDR_SIZE)) {
7839 			ast_type = CDP_TXRX_AST_TYPE_SELF;
7840 		}
7841 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
7842 
7843 		peer->valid = 1;
7844 		peer->is_tdls_peer = false;
7845 		dp_local_peer_id_alloc(pdev, peer);
7846 
7847 		qdf_spinlock_create(&peer->peer_info_lock);
7848 
7849 		DP_STATS_INIT(peer);
7850 
7851 		/*
7852 		 * In tx_monitor mode, filter may be set for unassociated peer
7853 		 * when unassociated peer get associated peer need to
7854 		 * update tx_cap_enabled flag to support peer filter.
7855 		 */
7856 		if (!IS_MLO_DP_MLD_PEER(peer)) {
7857 			dp_monitor_peer_tx_capture_filter_check(pdev, peer);
7858 			dp_monitor_peer_reset_stats(soc, peer);
7859 		}
7860 
7861 		if (peer->txrx_peer) {
7862 			dp_peer_rx_bufq_resources_init(peer->txrx_peer);
7863 			dp_txrx_peer_stats_clr(peer->txrx_peer);
7864 			dp_set_peer_isolation(peer->txrx_peer, false);
7865 			dp_wds_ext_peer_init(peer->txrx_peer);
7866 			dp_peer_hw_txrx_stats_init(soc, peer->txrx_peer);
7867 		}
7868 
7869 		dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
7870 
7871 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7872 		return QDF_STATUS_SUCCESS;
7873 	} else {
7874 		/*
7875 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
7876 		 * need to remove the AST entry which was earlier added as a WDS
7877 		 * entry.
7878 		 * If an AST entry exists, but no peer entry exists with a given
7879 		 * MAC addresses, we could deduce it as a WDS entry
7880 		 */
7881 		dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
7882 	}
7883 
7884 #ifdef notyet
7885 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
7886 		soc->mempool_ol_ath_peer);
7887 #else
7888 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
7889 #endif
7890 	wlan_minidump_log(peer,
7891 			  sizeof(*peer),
7892 			  soc->ctrl_psoc,
7893 			  WLAN_MD_DP_PEER, "dp_peer");
7894 	if (!peer) {
7895 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7896 		return QDF_STATUS_E_FAILURE; /* failure */
7897 	}
7898 
7899 	qdf_mem_zero(peer, sizeof(struct dp_peer));
7900 
7901 	/* store provided params */
7902 	peer->vdev = vdev;
7903 
7904 	/* initialize the peer_id */
7905 	peer->peer_id = HTT_INVALID_PEER;
7906 
7907 	qdf_mem_copy(
7908 		&peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
7909 
7910 	DP_PEER_SET_TYPE(peer, peer_type);
7911 	if (IS_MLO_DP_MLD_PEER(peer)) {
7912 		if (dp_txrx_peer_attach(soc, peer) !=
7913 				QDF_STATUS_SUCCESS)
7914 			goto fail; /* failure */
7915 
7916 		dp_mld_peer_init_link_peers_info(peer);
7917 	} else if (dp_monitor_peer_attach(soc, peer) !=
7918 				QDF_STATUS_SUCCESS)
7919 		dp_warn("peer monitor ctx alloc failed");
7920 
7921 	TAILQ_INIT(&peer->ast_entry_list);
7922 
7923 	/* get the vdev reference for new peer */
7924 	dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CHILD);
7925 
7926 	if ((vdev->opmode == wlan_op_mode_sta) &&
7927 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
7928 			 QDF_MAC_ADDR_SIZE)) {
7929 		ast_type = CDP_TXRX_AST_TYPE_SELF;
7930 	}
7931 	qdf_spinlock_create(&peer->peer_state_lock);
7932 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
7933 	qdf_spinlock_create(&peer->peer_info_lock);
7934 
7935 	/* reset the ast index to flowid table */
7936 	dp_peer_reset_flowq_map(peer);
7937 
7938 	qdf_atomic_init(&peer->ref_cnt);
7939 
7940 	for (i = 0; i < DP_MOD_ID_MAX; i++)
7941 		qdf_atomic_init(&peer->mod_refs[i]);
7942 
7943 	/* keep one reference for attach */
7944 	qdf_atomic_inc(&peer->ref_cnt);
7945 	qdf_atomic_inc(&peer->mod_refs[DP_MOD_ID_CONFIG]);
7946 
7947 	dp_peer_vdev_list_add(soc, vdev, peer);
7948 
7949 	/* TODO: See if hash based search is required */
7950 	dp_peer_find_hash_add(soc, peer);
7951 
7952 	/* Initialize the peer state */
7953 	peer->state = OL_TXRX_PEER_STATE_DISC;
7954 
7955 	dp_info("vdev %pK created peer %pK ("QDF_MAC_ADDR_FMT") vdev_ref_cnt "
7956 		"%d peer_ref_cnt: %d",
7957 		vdev, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
7958 		qdf_atomic_read(&vdev->ref_cnt),
7959 		qdf_atomic_read(&peer->ref_cnt));
7960 	/*
7961 	 * For every peer MAp message search and set if bss_peer
7962 	 */
7963 	if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
7964 			QDF_MAC_ADDR_SIZE) == 0 &&
7965 			(wlan_op_mode_sta != vdev->opmode)) {
7966 		dp_info("vdev bss_peer!!");
7967 		peer->bss_peer = 1;
7968 		if (peer->txrx_peer)
7969 			peer->txrx_peer->bss_peer = 1;
7970 	}
7971 
7972 	if (wlan_op_mode_sta == vdev->opmode &&
7973 	    qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
7974 			QDF_MAC_ADDR_SIZE) == 0) {
7975 		peer->sta_self_peer = 1;
7976 	}
7977 
7978 	dp_peer_rx_tids_create(peer);
7979 
7980 	peer->valid = 1;
7981 	dp_local_peer_id_alloc(pdev, peer);
7982 	DP_STATS_INIT(peer);
7983 
7984 	if (dp_peer_sawf_ctx_alloc(soc, peer) != QDF_STATUS_SUCCESS)
7985 		dp_warn("peer sawf context alloc failed");
7986 
7987 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
7988 
7989 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7990 
7991 	return QDF_STATUS_SUCCESS;
7992 fail:
7993 	qdf_mem_free(peer);
7994 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7995 
7996 	return QDF_STATUS_E_FAILURE;
7997 }
7998 
7999 static QDF_STATUS dp_peer_legacy_setup(struct dp_soc *soc, struct dp_peer *peer)
8000 {
8001 	/* txrx_peer might exist already in peer reuse case */
8002 	if (peer->txrx_peer)
8003 		return QDF_STATUS_SUCCESS;
8004 
8005 	if (dp_txrx_peer_attach(soc, peer) !=
8006 				QDF_STATUS_SUCCESS) {
8007 		dp_err("peer txrx ctx alloc failed");
8008 		return QDF_STATUS_E_FAILURE;
8009 	}
8010 
8011 	return QDF_STATUS_SUCCESS;
8012 }
8013 
8014 #ifdef WLAN_FEATURE_11BE_MLO
8015 QDF_STATUS dp_peer_mlo_setup(
8016 			struct dp_soc *soc,
8017 			struct dp_peer *peer,
8018 			uint8_t vdev_id,
8019 			struct cdp_peer_setup_info *setup_info)
8020 {
8021 	struct dp_peer *mld_peer = NULL;
8022 
8023 	/* Non-MLO connection, do nothing */
8024 	if (!setup_info || !setup_info->mld_peer_mac)
8025 		return QDF_STATUS_SUCCESS;
8026 
8027 	dp_info("link peer:" QDF_MAC_ADDR_FMT "mld peer:" QDF_MAC_ADDR_FMT
8028 		"assoc_link %d, primary_link %d",
8029 		QDF_MAC_ADDR_REF(peer->mac_addr.raw),
8030 		QDF_MAC_ADDR_REF(setup_info->mld_peer_mac),
8031 		setup_info->is_first_link,
8032 		setup_info->is_primary_link);
8033 
8034 	/* if this is the first link peer */
8035 	if (setup_info->is_first_link)
8036 		/* create MLD peer */
8037 		dp_peer_create_wifi3((struct cdp_soc_t *)soc,
8038 				     vdev_id,
8039 				     setup_info->mld_peer_mac,
8040 				     CDP_MLD_PEER_TYPE);
8041 
8042 	peer->first_link = setup_info->is_first_link;
8043 	peer->primary_link = setup_info->is_primary_link;
8044 	mld_peer = dp_mld_peer_find_hash_find(soc,
8045 					      setup_info->mld_peer_mac,
8046 					      0, vdev_id, DP_MOD_ID_CDP);
8047 	if (mld_peer) {
8048 		if (setup_info->is_first_link) {
8049 			/* assign rx_tid to mld peer */
8050 			mld_peer->rx_tid = peer->rx_tid;
8051 			/* no cdp_peer_setup for MLD peer,
8052 			 * set it for addba processing
8053 			 */
8054 			qdf_atomic_set(&mld_peer->is_default_route_set, 1);
8055 		} else {
8056 			/* free link peer original rx_tids mem */
8057 			dp_peer_rx_tids_destroy(peer);
8058 			/* assign mld peer rx_tid to link peer */
8059 			peer->rx_tid = mld_peer->rx_tid;
8060 		}
8061 
8062 		if (setup_info->is_primary_link &&
8063 		    !setup_info->is_first_link) {
8064 			/*
8065 			 * if first link is not the primary link,
8066 			 * then need to change mld_peer->vdev as
8067 			 * primary link dp_vdev is not same one
8068 			 * during mld peer creation.
8069 			 */
8070 			dp_info("Primary link is not the first link. vdev: %pK,"
8071 				"vdev_id %d vdev_ref_cnt %d",
8072 				mld_peer->vdev, vdev_id,
8073 				qdf_atomic_read(&mld_peer->vdev->ref_cnt));
8074 			/* release the ref to original dp_vdev */
8075 			dp_vdev_unref_delete(soc, mld_peer->vdev,
8076 					     DP_MOD_ID_CHILD);
8077 			/*
8078 			 * get the ref to new dp_vdev,
8079 			 * increase dp_vdev ref_cnt
8080 			 */
8081 			mld_peer->vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8082 							       DP_MOD_ID_CHILD);
8083 			mld_peer->txrx_peer->vdev = mld_peer->vdev;
8084 		}
8085 
8086 		/* associate mld and link peer */
8087 		dp_link_peer_add_mld_peer(peer, mld_peer);
8088 		dp_mld_peer_add_link_peer(mld_peer, peer);
8089 
8090 		mld_peer->txrx_peer->mld_peer = 1;
8091 		dp_peer_unref_delete(mld_peer, DP_MOD_ID_CDP);
8092 	} else {
8093 		peer->mld_peer = NULL;
8094 		dp_err("mld peer" QDF_MAC_ADDR_FMT "not found!",
8095 		       QDF_MAC_ADDR_REF(setup_info->mld_peer_mac));
8096 		return QDF_STATUS_E_FAILURE;
8097 	}
8098 
8099 	return QDF_STATUS_SUCCESS;
8100 }
8101 
8102 /*
8103  * dp_mlo_peer_authorize() - authorize MLO peer
8104  * @soc: soc handle
8105  * @peer: pointer to link peer
8106  *
8107  * return void
8108  */
8109 static void dp_mlo_peer_authorize(struct dp_soc *soc,
8110 				  struct dp_peer *peer)
8111 {
8112 	int i;
8113 	struct dp_peer *link_peer = NULL;
8114 	struct dp_peer *mld_peer = peer->mld_peer;
8115 	struct dp_mld_link_peers link_peers_info;
8116 
8117 	if (!mld_peer)
8118 		return;
8119 
8120 	/* get link peers with reference */
8121 	dp_get_link_peers_ref_from_mld_peer(soc, mld_peer,
8122 					    &link_peers_info,
8123 					    DP_MOD_ID_CDP);
8124 
8125 	for (i = 0; i < link_peers_info.num_links; i++) {
8126 		link_peer = link_peers_info.link_peers[i];
8127 
8128 		if (!link_peer->authorize) {
8129 			dp_release_link_peers_ref(&link_peers_info,
8130 						  DP_MOD_ID_CDP);
8131 			mld_peer->authorize = false;
8132 			return;
8133 		}
8134 	}
8135 
8136 	/* if we are here all link peers are authorized,
8137 	 * authorize ml_peer also
8138 	 */
8139 	mld_peer->authorize = true;
8140 
8141 	/* release link peers reference */
8142 	dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
8143 }
8144 #endif
8145 
8146 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
8147 				  enum cdp_host_reo_dest_ring *reo_dest,
8148 				  bool *hash_based)
8149 {
8150 	struct dp_soc *soc;
8151 	struct dp_pdev *pdev;
8152 
8153 	pdev = vdev->pdev;
8154 	soc = pdev->soc;
8155 	/*
8156 	 * hash based steering is disabled for Radios which are offloaded
8157 	 * to NSS
8158 	 */
8159 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
8160 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
8161 
8162 	/*
8163 	 * Below line of code will ensure the proper reo_dest ring is chosen
8164 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
8165 	 */
8166 	*reo_dest = pdev->reo_dest;
8167 }
8168 
8169 #ifdef IPA_OFFLOAD
8170 /**
8171  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
8172  * @vdev: Virtual device
8173  *
8174  * Return: true if the vdev is of subtype P2P
8175  *	   false if the vdev is of any other subtype
8176  */
8177 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
8178 {
8179 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
8180 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
8181 	    vdev->subtype == wlan_op_subtype_p2p_go)
8182 		return true;
8183 
8184 	return false;
8185 }
8186 
8187 /*
8188  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
8189  * @vdev: Datapath VDEV handle
8190  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
8191  * @hash_based: pointer to hash value (enabled/disabled) to be populated
8192  *
8193  * If IPA is enabled in ini, for SAP mode, disable hash based
8194  * steering, use default reo_dst ring for RX. Use config values for other modes.
8195  * Return: None
8196  */
8197 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
8198 				       struct cdp_peer_setup_info *setup_info,
8199 				       enum cdp_host_reo_dest_ring *reo_dest,
8200 				       bool *hash_based,
8201 				       uint8_t *lmac_peer_id_msb)
8202 {
8203 	struct dp_soc *soc;
8204 	struct dp_pdev *pdev;
8205 
8206 	pdev = vdev->pdev;
8207 	soc = pdev->soc;
8208 
8209 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
8210 
8211 	/* For P2P-GO interfaces we do not need to change the REO
8212 	 * configuration even if IPA config is enabled
8213 	 */
8214 	if (dp_is_vdev_subtype_p2p(vdev))
8215 		return;
8216 
8217 	/*
8218 	 * If IPA is enabled, disable hash-based flow steering and set
8219 	 * reo_dest_ring_4 as the REO ring to receive packets on.
8220 	 * IPA is configured to reap reo_dest_ring_4.
8221 	 *
8222 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
8223 	 * value enum value is from 1 - 4.
8224 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
8225 	 */
8226 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
8227 		if (vdev->opmode == wlan_op_mode_ap) {
8228 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
8229 			*hash_based = 0;
8230 		} else if (vdev->opmode == wlan_op_mode_sta &&
8231 			   dp_ipa_is_mdm_platform()) {
8232 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
8233 		}
8234 	}
8235 }
8236 
8237 #else
8238 
8239 /*
8240  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
8241  * @vdev: Datapath VDEV handle
8242  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
8243  * @hash_based: pointer to hash value (enabled/disabled) to be populated
8244  *
8245  * Use system config values for hash based steering.
8246  * Return: None
8247  */
8248 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
8249 				       struct cdp_peer_setup_info *setup_info,
8250 				       enum cdp_host_reo_dest_ring *reo_dest,
8251 				       bool *hash_based,
8252 				       uint8_t *lmac_peer_id_msb)
8253 {
8254 	struct dp_soc *soc = vdev->pdev->soc;
8255 
8256 	soc->arch_ops.peer_get_reo_hash(vdev, setup_info, reo_dest, hash_based,
8257 					lmac_peer_id_msb);
8258 }
8259 #endif /* IPA_OFFLOAD */
8260 
8261 /*
8262  * dp_peer_setup_wifi3() - initialize the peer
8263  * @soc_hdl: soc handle object
8264  * @vdev_id : vdev_id of vdev object
8265  * @peer_mac: Peer's mac address
8266  * @peer_setup_info: peer setup info for MLO
8267  *
8268  * Return: QDF_STATUS
8269  */
8270 static QDF_STATUS
8271 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8272 		    uint8_t *peer_mac,
8273 		    struct cdp_peer_setup_info *setup_info)
8274 {
8275 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8276 	struct dp_pdev *pdev;
8277 	bool hash_based = 0;
8278 	enum cdp_host_reo_dest_ring reo_dest;
8279 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8280 	struct dp_vdev *vdev = NULL;
8281 	struct dp_peer *peer =
8282 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
8283 					       DP_MOD_ID_CDP);
8284 	struct dp_peer *mld_peer = NULL;
8285 	enum wlan_op_mode vdev_opmode;
8286 	uint8_t lmac_peer_id_msb = 0;
8287 
8288 	if (!peer)
8289 		return QDF_STATUS_E_FAILURE;
8290 
8291 	vdev = peer->vdev;
8292 	if (!vdev) {
8293 		status = QDF_STATUS_E_FAILURE;
8294 		goto fail;
8295 	}
8296 
8297 	/* save vdev related member in case vdev freed */
8298 	vdev_opmode = vdev->opmode;
8299 	pdev = vdev->pdev;
8300 	dp_peer_setup_get_reo_hash(vdev, setup_info,
8301 				   &reo_dest, &hash_based,
8302 				   &lmac_peer_id_msb);
8303 
8304 	dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
8305 		pdev->pdev_id, vdev->vdev_id,
8306 		vdev->opmode, hash_based, reo_dest);
8307 
8308 	/*
8309 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
8310 	 * i.e both the devices have same MAC address. In these
8311 	 * cases we want such pkts to be processed in NULL Q handler
8312 	 * which is REO2TCL ring. for this reason we should
8313 	 * not setup reo_queues and default route for bss_peer.
8314 	 */
8315 	if (!IS_MLO_DP_MLD_PEER(peer))
8316 		dp_monitor_peer_tx_init(pdev, peer);
8317 
8318 	if (!setup_info)
8319 		if (dp_peer_legacy_setup(soc, peer) !=
8320 				QDF_STATUS_SUCCESS) {
8321 			status = QDF_STATUS_E_RESOURCES;
8322 			goto fail;
8323 		}
8324 
8325 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
8326 		status = QDF_STATUS_E_FAILURE;
8327 		goto fail;
8328 	}
8329 
8330 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
8331 		/* TODO: Check the destination ring number to be passed to FW */
8332 		soc->cdp_soc.ol_ops->peer_set_default_routing(
8333 				soc->ctrl_psoc,
8334 				peer->vdev->pdev->pdev_id,
8335 				peer->mac_addr.raw,
8336 				peer->vdev->vdev_id, hash_based, reo_dest,
8337 				lmac_peer_id_msb);
8338 	}
8339 
8340 	qdf_atomic_set(&peer->is_default_route_set, 1);
8341 
8342 	status = dp_peer_mlo_setup(soc, peer, vdev->vdev_id, setup_info);
8343 	if (QDF_IS_STATUS_ERROR(status)) {
8344 		dp_peer_err("peer mlo setup failed");
8345 		qdf_assert_always(0);
8346 	}
8347 
8348 	if (vdev_opmode != wlan_op_mode_monitor) {
8349 		/* In case of MLD peer, switch peer to mld peer and
8350 		 * do peer_rx_init.
8351 		 */
8352 		if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
8353 		    IS_MLO_DP_LINK_PEER(peer)) {
8354 			if (setup_info && setup_info->is_first_link) {
8355 				mld_peer = DP_GET_MLD_PEER_FROM_PEER(peer);
8356 				if (mld_peer)
8357 					dp_peer_rx_init(pdev, mld_peer);
8358 				else
8359 					dp_peer_err("MLD peer null. Primary link peer:%pK", peer);
8360 			}
8361 		} else {
8362 			dp_peer_rx_init(pdev, peer);
8363 		}
8364 	}
8365 
8366 	if (!IS_MLO_DP_MLD_PEER(peer))
8367 		dp_peer_ppdu_delayed_ba_init(peer);
8368 
8369 fail:
8370 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8371 	return status;
8372 }
8373 
8374 /*
8375  * dp_cp_peer_del_resp_handler - Handle the peer delete response
8376  * @soc_hdl: Datapath SOC handle
8377  * @vdev_id: id of virtual device object
8378  * @mac_addr: Mac address of the peer
8379  *
8380  * Return: QDF_STATUS
8381  */
8382 static QDF_STATUS dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl,
8383 					      uint8_t vdev_id,
8384 					      uint8_t *mac_addr)
8385 {
8386 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8387 	struct dp_ast_entry  *ast_entry = NULL;
8388 	txrx_ast_free_cb cb = NULL;
8389 	void *cookie;
8390 
8391 	if (soc->ast_offload_support)
8392 		return QDF_STATUS_E_INVAL;
8393 
8394 	qdf_spin_lock_bh(&soc->ast_lock);
8395 
8396 	ast_entry =
8397 		dp_peer_ast_hash_find_by_vdevid(soc, mac_addr,
8398 						vdev_id);
8399 
8400 	/* in case of qwrap we have multiple BSS peers
8401 	 * with same mac address
8402 	 *
8403 	 * AST entry for this mac address will be created
8404 	 * only for one peer hence it will be NULL here
8405 	 */
8406 	if ((!ast_entry || !ast_entry->delete_in_progress) ||
8407 	    (ast_entry->peer_id != HTT_INVALID_PEER)) {
8408 		qdf_spin_unlock_bh(&soc->ast_lock);
8409 		return QDF_STATUS_E_FAILURE;
8410 	}
8411 
8412 	if (ast_entry->is_mapped)
8413 		soc->ast_table[ast_entry->ast_idx] = NULL;
8414 
8415 	DP_STATS_INC(soc, ast.deleted, 1);
8416 	dp_peer_ast_hash_remove(soc, ast_entry);
8417 
8418 	cb = ast_entry->callback;
8419 	cookie = ast_entry->cookie;
8420 	ast_entry->callback = NULL;
8421 	ast_entry->cookie = NULL;
8422 
8423 	soc->num_ast_entries--;
8424 	qdf_spin_unlock_bh(&soc->ast_lock);
8425 
8426 	if (cb) {
8427 		cb(soc->ctrl_psoc,
8428 		   dp_soc_to_cdp_soc(soc),
8429 		   cookie,
8430 		   CDP_TXRX_AST_DELETED);
8431 	}
8432 	qdf_mem_free(ast_entry);
8433 
8434 	return QDF_STATUS_SUCCESS;
8435 }
8436 
8437 /*
8438  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
8439  * @txrx_soc: cdp soc handle
8440  * @ac: Access category
8441  * @value: timeout value in millisec
8442  *
8443  * Return: void
8444  */
8445 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
8446 				    uint8_t ac, uint32_t value)
8447 {
8448 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
8449 
8450 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
8451 }
8452 
8453 /*
8454  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
8455  * @txrx_soc: cdp soc handle
8456  * @ac: access category
8457  * @value: timeout value in millisec
8458  *
8459  * Return: void
8460  */
8461 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
8462 				    uint8_t ac, uint32_t *value)
8463 {
8464 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
8465 
8466 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
8467 }
8468 
8469 /*
8470  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
8471  * @txrx_soc: cdp soc handle
8472  * @pdev_id: id of physical device object
8473  * @val: reo destination ring index (1 - 4)
8474  *
8475  * Return: QDF_STATUS
8476  */
8477 static QDF_STATUS
8478 dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
8479 		     enum cdp_host_reo_dest_ring val)
8480 {
8481 	struct dp_pdev *pdev =
8482 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
8483 						   pdev_id);
8484 
8485 	if (pdev) {
8486 		pdev->reo_dest = val;
8487 		return QDF_STATUS_SUCCESS;
8488 	}
8489 
8490 	return QDF_STATUS_E_FAILURE;
8491 }
8492 
8493 /*
8494  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
8495  * @txrx_soc: cdp soc handle
8496  * @pdev_id: id of physical device object
8497  *
8498  * Return: reo destination ring index
8499  */
8500 static enum cdp_host_reo_dest_ring
8501 dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id)
8502 {
8503 	struct dp_pdev *pdev =
8504 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
8505 						   pdev_id);
8506 
8507 	if (pdev)
8508 		return pdev->reo_dest;
8509 	else
8510 		return cdp_host_reo_dest_ring_unknown;
8511 }
8512 
8513 #ifdef WLAN_SUPPORT_MSCS
8514 /*
8515  * dp_record_mscs_params - MSCS parameters sent by the STA in
8516  * the MSCS Request to the AP. The AP makes a note of these
8517  * parameters while comparing the MSDUs sent by the STA, to
8518  * send the downlink traffic with correct User priority.
8519  * @soc - Datapath soc handle
8520  * @peer_mac - STA Mac address
8521  * @vdev_id - ID of the vdev handle
8522  * @mscs_params - Structure having MSCS parameters obtained
8523  * from handshake
8524  * @active - Flag to set MSCS active/inactive
8525  * return type - QDF_STATUS - Success/Invalid
8526  */
8527 static QDF_STATUS
8528 dp_record_mscs_params(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
8529 		      uint8_t vdev_id, struct cdp_mscs_params *mscs_params,
8530 		      bool active)
8531 {
8532 	struct dp_peer *peer;
8533 	QDF_STATUS status = QDF_STATUS_E_INVAL;
8534 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8535 
8536 	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
8537 				      DP_MOD_ID_CDP);
8538 
8539 	if (!peer) {
8540 		dp_err("Peer is NULL!");
8541 		goto fail;
8542 	}
8543 	if (!active) {
8544 		dp_info("MSCS Procedure is terminated");
8545 		peer->mscs_active = active;
8546 		goto fail;
8547 	}
8548 
8549 	if (mscs_params->classifier_type == IEEE80211_TCLAS_MASK_CLA_TYPE_4) {
8550 		/* Populate entries inside IPV4 database first */
8551 		peer->mscs_ipv4_parameter.user_priority_bitmap =
8552 			mscs_params->user_pri_bitmap;
8553 		peer->mscs_ipv4_parameter.user_priority_limit =
8554 			mscs_params->user_pri_limit;
8555 		peer->mscs_ipv4_parameter.classifier_mask =
8556 			mscs_params->classifier_mask;
8557 
8558 		/* Populate entries inside IPV6 database */
8559 		peer->mscs_ipv6_parameter.user_priority_bitmap =
8560 			mscs_params->user_pri_bitmap;
8561 		peer->mscs_ipv6_parameter.user_priority_limit =
8562 			mscs_params->user_pri_limit;
8563 		peer->mscs_ipv6_parameter.classifier_mask =
8564 			mscs_params->classifier_mask;
8565 		peer->mscs_active = 1;
8566 		dp_info("\n\tMSCS Procedure request based parameters for "QDF_MAC_ADDR_FMT"\n"
8567 			"\tClassifier_type = %d\tUser priority bitmap = %x\n"
8568 			"\tUser priority limit = %x\tClassifier mask = %x",
8569 			QDF_MAC_ADDR_REF(peer_mac),
8570 			mscs_params->classifier_type,
8571 			peer->mscs_ipv4_parameter.user_priority_bitmap,
8572 			peer->mscs_ipv4_parameter.user_priority_limit,
8573 			peer->mscs_ipv4_parameter.classifier_mask);
8574 	}
8575 
8576 	status = QDF_STATUS_SUCCESS;
8577 fail:
8578 	if (peer)
8579 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8580 	return status;
8581 }
8582 #endif
8583 
8584 /*
8585  * dp_get_sec_type() - Get the security type
8586  * @soc: soc handle
8587  * @vdev_id: id of dp handle
8588  * @peer_mac: mac of datapath PEER handle
8589  * @sec_idx:    Security id (mcast, ucast)
8590  *
8591  * return sec_type: Security type
8592  */
8593 static int dp_get_sec_type(struct cdp_soc_t *soc, uint8_t vdev_id,
8594 			   uint8_t *peer_mac, uint8_t sec_idx)
8595 {
8596 	int sec_type = 0;
8597 	struct dp_peer *peer =
8598 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
8599 						       peer_mac, 0, vdev_id,
8600 						       DP_MOD_ID_CDP);
8601 
8602 	if (!peer) {
8603 		dp_cdp_err("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
8604 		return sec_type;
8605 	}
8606 
8607 	if (!peer->txrx_peer) {
8608 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8609 		dp_peer_debug("%pK: txrx peer is NULL!\n", soc);
8610 		return sec_type;
8611 	}
8612 	sec_type = peer->txrx_peer->security[sec_idx].sec_type;
8613 
8614 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8615 	return sec_type;
8616 }
8617 
8618 /*
8619  * dp_peer_authorize() - authorize txrx peer
8620  * @soc: soc handle
8621  * @vdev_id: id of dp handle
8622  * @peer_mac: mac of datapath PEER handle
8623  * @authorize
8624  *
8625  */
8626 static QDF_STATUS
8627 dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8628 		  uint8_t *peer_mac, uint32_t authorize)
8629 {
8630 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8631 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8632 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(soc, peer_mac,
8633 							      0, vdev_id,
8634 							      DP_MOD_ID_CDP);
8635 
8636 	if (!peer) {
8637 		dp_cdp_debug("%pK: Peer is NULL!\n", soc);
8638 		status = QDF_STATUS_E_FAILURE;
8639 	} else {
8640 		peer->authorize = authorize ? 1 : 0;
8641 		if (peer->txrx_peer)
8642 			peer->txrx_peer->authorize = peer->authorize;
8643 
8644 		if (!peer->authorize)
8645 			dp_peer_flush_frags(soc_hdl, vdev_id, peer_mac);
8646 
8647 		dp_mlo_peer_authorize(soc, peer);
8648 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8649 	}
8650 
8651 	return status;
8652 }
8653 
8654 /*
8655  * dp_peer_get_authorize() - get peer authorize status
8656  * @soc: soc handle
8657  * @vdev_id: id of dp handle
8658  * @peer_mac: mac of datapath PEER handle
8659  *
8660  * Retusn: true is peer is authorized, false otherwise
8661  */
8662 static bool
8663 dp_peer_get_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8664 		      uint8_t *peer_mac)
8665 {
8666 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8667 	bool authorize = false;
8668 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
8669 						      0, vdev_id,
8670 						      DP_MOD_ID_CDP);
8671 
8672 	if (!peer) {
8673 		dp_cdp_debug("%pK: Peer is NULL!\n", soc);
8674 		return authorize;
8675 	}
8676 
8677 	authorize = peer->authorize;
8678 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8679 
8680 	return authorize;
8681 }
8682 
8683 /**
8684  * dp_vdev_unref_delete() - check and process vdev delete
8685  * @soc : DP specific soc pointer
8686  * @vdev: DP specific vdev pointer
8687  * @mod_id: module id
8688  *
8689  */
8690 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev,
8691 			  enum dp_mod_id mod_id)
8692 {
8693 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
8694 	void *vdev_delete_context = NULL;
8695 	uint8_t vdev_id = vdev->vdev_id;
8696 	struct dp_pdev *pdev = vdev->pdev;
8697 	struct dp_vdev *tmp_vdev = NULL;
8698 	uint8_t found = 0;
8699 
8700 	QDF_ASSERT(qdf_atomic_dec_return(&vdev->mod_refs[mod_id]) >= 0);
8701 
8702 	/* Return if this is not the last reference*/
8703 	if (!qdf_atomic_dec_and_test(&vdev->ref_cnt))
8704 		return;
8705 
8706 	/*
8707 	 * This should be set as last reference need to released
8708 	 * after cdp_vdev_detach() is called
8709 	 *
8710 	 * if this assert is hit there is a ref count issue
8711 	 */
8712 	QDF_ASSERT(vdev->delete.pending);
8713 
8714 	vdev_delete_cb = vdev->delete.callback;
8715 	vdev_delete_context = vdev->delete.context;
8716 
8717 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")- its last peer is done",
8718 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
8719 
8720 	if (wlan_op_mode_monitor == vdev->opmode) {
8721 		dp_monitor_vdev_delete(soc, vdev);
8722 		goto free_vdev;
8723 	}
8724 
8725 	/* all peers are gone, go ahead and delete it */
8726 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
8727 			FLOW_TYPE_VDEV, vdev_id);
8728 	dp_tx_vdev_detach(vdev);
8729 	dp_monitor_vdev_detach(vdev);
8730 
8731 free_vdev:
8732 	qdf_spinlock_destroy(&vdev->peer_list_lock);
8733 
8734 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
8735 	TAILQ_FOREACH(tmp_vdev, &soc->inactive_vdev_list,
8736 		      inactive_list_elem) {
8737 		if (tmp_vdev == vdev) {
8738 			found = 1;
8739 			break;
8740 		}
8741 	}
8742 	if (found)
8743 		TAILQ_REMOVE(&soc->inactive_vdev_list, vdev,
8744 			     inactive_list_elem);
8745 	/* delete this peer from the list */
8746 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
8747 
8748 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")",
8749 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
8750 	wlan_minidump_remove(vdev, sizeof(*vdev), soc->ctrl_psoc,
8751 			     WLAN_MD_DP_VDEV, "dp_vdev");
8752 	qdf_mem_free(vdev);
8753 	vdev = NULL;
8754 
8755 	if (vdev_delete_cb)
8756 		vdev_delete_cb(vdev_delete_context);
8757 }
8758 
8759 qdf_export_symbol(dp_vdev_unref_delete);
8760 
8761 /*
8762  * dp_peer_unref_delete() - unref and delete peer
8763  * @peer_handle:    Datapath peer handle
8764  * @mod_id:         ID of module releasing reference
8765  *
8766  */
8767 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id mod_id)
8768 {
8769 	struct dp_vdev *vdev = peer->vdev;
8770 	struct dp_pdev *pdev = vdev->pdev;
8771 	struct dp_soc *soc = pdev->soc;
8772 	uint16_t peer_id;
8773 	struct dp_peer *tmp_peer;
8774 	bool found = false;
8775 
8776 	if (mod_id > DP_MOD_ID_RX)
8777 		QDF_ASSERT(qdf_atomic_dec_return(&peer->mod_refs[mod_id]) >= 0);
8778 
8779 	/*
8780 	 * Hold the lock all the way from checking if the peer ref count
8781 	 * is zero until the peer references are removed from the hash
8782 	 * table and vdev list (if the peer ref count is zero).
8783 	 * This protects against a new HL tx operation starting to use the
8784 	 * peer object just after this function concludes it's done being used.
8785 	 * Furthermore, the lock needs to be held while checking whether the
8786 	 * vdev's list of peers is empty, to make sure that list is not modified
8787 	 * concurrently with the empty check.
8788 	 */
8789 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
8790 		peer_id = peer->peer_id;
8791 
8792 		/*
8793 		 * Make sure that the reference to the peer in
8794 		 * peer object map is removed
8795 		 */
8796 		QDF_ASSERT(peer_id == HTT_INVALID_PEER);
8797 
8798 		dp_peer_info("Deleting peer %pK ("QDF_MAC_ADDR_FMT")", peer,
8799 			     QDF_MAC_ADDR_REF(peer->mac_addr.raw));
8800 
8801 		dp_peer_sawf_ctx_free(soc, peer);
8802 
8803 		wlan_minidump_remove(peer, sizeof(*peer), soc->ctrl_psoc,
8804 				     WLAN_MD_DP_PEER, "dp_peer");
8805 
8806 		qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
8807 		TAILQ_FOREACH(tmp_peer, &soc->inactive_peer_list,
8808 			      inactive_list_elem) {
8809 			if (tmp_peer == peer) {
8810 				found = 1;
8811 				break;
8812 			}
8813 		}
8814 		if (found)
8815 			TAILQ_REMOVE(&soc->inactive_peer_list, peer,
8816 				     inactive_list_elem);
8817 		/* delete this peer from the list */
8818 		qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
8819 		DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
8820 		dp_peer_update_state(soc, peer, DP_PEER_STATE_FREED);
8821 
8822 		/* cleanup the peer data */
8823 		dp_peer_cleanup(vdev, peer);
8824 
8825 		if (!IS_MLO_DP_MLD_PEER(peer))
8826 			dp_monitor_peer_detach(soc, peer);
8827 
8828 		qdf_spinlock_destroy(&peer->peer_state_lock);
8829 
8830 		dp_txrx_peer_detach(soc, peer);
8831 		qdf_mem_free(peer);
8832 
8833 		/*
8834 		 * Decrement ref count taken at peer create
8835 		 */
8836 		dp_peer_info("Deleted peer. Unref vdev %pK, vdev_ref_cnt %d",
8837 			     vdev, qdf_atomic_read(&vdev->ref_cnt));
8838 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CHILD);
8839 	}
8840 }
8841 
8842 qdf_export_symbol(dp_peer_unref_delete);
8843 
8844 /*
8845  * dp_txrx_peer_unref_delete() - unref and delete peer
8846  * @handle: Datapath txrx ref handle
8847  * @mod_id: Module ID of the caller
8848  *
8849  */
8850 void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle,
8851 			       enum dp_mod_id mod_id)
8852 {
8853 	dp_peer_unref_delete((struct dp_peer *)handle, mod_id);
8854 }
8855 
8856 qdf_export_symbol(dp_txrx_peer_unref_delete);
8857 
8858 /*
8859  * dp_peer_delete_wifi3() – Delete txrx peer
8860  * @soc_hdl: soc handle
8861  * @vdev_id: id of dp handle
8862  * @peer_mac: mac of datapath PEER handle
8863  * @bitmap: bitmap indicating special handling of request.
8864  * @peer_type: peer type (link or MLD)
8865  *
8866  */
8867 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
8868 				       uint8_t vdev_id,
8869 				       uint8_t *peer_mac, uint32_t bitmap,
8870 				       enum cdp_peer_type peer_type)
8871 {
8872 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8873 	struct dp_peer *peer;
8874 	struct cdp_peer_info peer_info = { 0 };
8875 	struct dp_vdev *vdev = NULL;
8876 
8877 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac,
8878 				 false, peer_type);
8879 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP);
8880 
8881 	/* Peer can be null for monitor vap mac address */
8882 	if (!peer) {
8883 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
8884 			  "%s: Invalid peer\n", __func__);
8885 		return QDF_STATUS_E_FAILURE;
8886 	}
8887 
8888 	if (!peer->valid) {
8889 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8890 		dp_err("Invalid peer: "QDF_MAC_ADDR_FMT,
8891 			QDF_MAC_ADDR_REF(peer_mac));
8892 		return QDF_STATUS_E_ALREADY;
8893 	}
8894 
8895 	vdev = peer->vdev;
8896 
8897 	if (!vdev) {
8898 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8899 		return QDF_STATUS_E_FAILURE;
8900 	}
8901 
8902 	peer->valid = 0;
8903 
8904 	dp_init_info("%pK: peer %pK (" QDF_MAC_ADDR_FMT ")",
8905 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
8906 
8907 	dp_local_peer_id_free(peer->vdev->pdev, peer);
8908 
8909 	/* Drop all rx packets before deleting peer */
8910 	dp_clear_peer_internal(soc, peer);
8911 
8912 	qdf_spinlock_destroy(&peer->peer_info_lock);
8913 	dp_peer_multipass_list_remove(peer);
8914 
8915 	/* remove the reference to the peer from the hash table */
8916 	dp_peer_find_hash_remove(soc, peer);
8917 
8918 	dp_peer_vdev_list_remove(soc, vdev, peer);
8919 
8920 	dp_peer_mlo_delete(peer);
8921 
8922 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
8923 	TAILQ_INSERT_TAIL(&soc->inactive_peer_list, peer,
8924 			  inactive_list_elem);
8925 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
8926 
8927 	/*
8928 	 * Remove the reference added during peer_attach.
8929 	 * The peer will still be left allocated until the
8930 	 * PEER_UNMAP message arrives to remove the other
8931 	 * reference, added by the PEER_MAP message.
8932 	 */
8933 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
8934 	/*
8935 	 * Remove the reference taken above
8936 	 */
8937 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8938 
8939 	return QDF_STATUS_SUCCESS;
8940 }
8941 
8942 #ifdef DP_RX_UDP_OVER_PEER_ROAM
8943 static QDF_STATUS dp_update_roaming_peer_wifi3(struct cdp_soc_t *soc_hdl,
8944 					       uint8_t vdev_id,
8945 					       uint8_t *peer_mac,
8946 					       uint32_t auth_status)
8947 {
8948 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8949 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8950 						     DP_MOD_ID_CDP);
8951 	if (!vdev)
8952 		return QDF_STATUS_E_FAILURE;
8953 
8954 	vdev->roaming_peer_status = auth_status;
8955 	qdf_mem_copy(vdev->roaming_peer_mac.raw, peer_mac,
8956 		     QDF_MAC_ADDR_SIZE);
8957 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8958 
8959 	return QDF_STATUS_SUCCESS;
8960 }
8961 #endif
8962 /*
8963  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
8964  * @soc_hdl: Datapath soc handle
8965  * @vdev_id: virtual interface id
8966  *
8967  * Return: MAC address on success, NULL on failure.
8968  *
8969  */
8970 static uint8_t *dp_get_vdev_mac_addr_wifi3(struct cdp_soc_t *soc_hdl,
8971 					   uint8_t vdev_id)
8972 {
8973 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8974 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8975 						     DP_MOD_ID_CDP);
8976 	uint8_t *mac = NULL;
8977 
8978 	if (!vdev)
8979 		return NULL;
8980 
8981 	mac = vdev->mac_addr.raw;
8982 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8983 
8984 	return mac;
8985 }
8986 
8987 /*
8988  * dp_vdev_set_wds() - Enable per packet stats
8989  * @soc: DP soc handle
8990  * @vdev_id: id of DP VDEV handle
8991  * @val: value
8992  *
8993  * Return: none
8994  */
8995 static int dp_vdev_set_wds(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8996 			   uint32_t val)
8997 {
8998 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8999 	struct dp_vdev *vdev =
9000 		dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id,
9001 				      DP_MOD_ID_CDP);
9002 
9003 	if (!vdev)
9004 		return QDF_STATUS_E_FAILURE;
9005 
9006 	vdev->wds_enabled = val;
9007 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9008 
9009 	return QDF_STATUS_SUCCESS;
9010 }
9011 
9012 static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
9013 {
9014 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9015 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9016 						     DP_MOD_ID_CDP);
9017 	int opmode;
9018 
9019 	if (!vdev) {
9020 		dp_err_rl("vdev for id %d is NULL", vdev_id);
9021 		return -EINVAL;
9022 	}
9023 	opmode = vdev->opmode;
9024 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9025 
9026 	return opmode;
9027 }
9028 
9029 /**
9030  * dp_get_os_rx_handles_from_vdev_wifi3() - Get os rx handles for a vdev
9031  * @soc_hdl: ol_txrx_soc_handle handle
9032  * @vdev_id: vdev id for which os rx handles are needed
9033  * @stack_fn_p: pointer to stack function pointer
9034  * @osif_handle_p: pointer to ol_osif_vdev_handle
9035  *
9036  * Return: void
9037  */
9038 static
9039 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_soc_t *soc_hdl,
9040 					  uint8_t vdev_id,
9041 					  ol_txrx_rx_fp *stack_fn_p,
9042 					  ol_osif_vdev_handle *osif_vdev_p)
9043 {
9044 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9045 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9046 						     DP_MOD_ID_CDP);
9047 
9048 	if (qdf_unlikely(!vdev)) {
9049 		*stack_fn_p = NULL;
9050 		*osif_vdev_p = NULL;
9051 		return;
9052 	}
9053 	*stack_fn_p = vdev->osif_rx_stack;
9054 	*osif_vdev_p = vdev->osif_vdev;
9055 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9056 }
9057 
9058 /**
9059  * dp_get_ctrl_pdev_from_vdev() - Get control pdev of vdev
9060  * @soc_hdl: datapath soc handle
9061  * @vdev_id: virtual device/interface id
9062  *
9063  * Return: Handle to control pdev
9064  */
9065 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(
9066 						struct cdp_soc_t *soc_hdl,
9067 						uint8_t vdev_id)
9068 {
9069 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9070 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9071 						     DP_MOD_ID_CDP);
9072 	struct dp_pdev *pdev;
9073 
9074 	if (!vdev)
9075 		return NULL;
9076 
9077 	pdev = vdev->pdev;
9078 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9079 	return pdev ? (struct cdp_cfg *)pdev->wlan_cfg_ctx : NULL;
9080 }
9081 
9082 /**
9083  * dp_get_tx_pending() - read pending tx
9084  * @pdev_handle: Datapath PDEV handle
9085  *
9086  * Return: outstanding tx
9087  */
9088 static int32_t dp_get_tx_pending(struct cdp_pdev *pdev_handle)
9089 {
9090 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9091 
9092 	return qdf_atomic_read(&pdev->num_tx_outstanding);
9093 }
9094 
9095 /**
9096  * dp_get_peer_mac_from_peer_id() - get peer mac
9097  * @pdev_handle: Datapath PDEV handle
9098  * @peer_id: Peer ID
9099  * @peer_mac: MAC addr of PEER
9100  *
9101  * Return: QDF_STATUS
9102  */
9103 static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc,
9104 					       uint32_t peer_id,
9105 					       uint8_t *peer_mac)
9106 {
9107 	struct dp_peer *peer;
9108 
9109 	if (soc && peer_mac) {
9110 		peer = dp_peer_get_ref_by_id((struct dp_soc *)soc,
9111 					     (uint16_t)peer_id,
9112 					     DP_MOD_ID_CDP);
9113 		if (peer) {
9114 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
9115 				     QDF_MAC_ADDR_SIZE);
9116 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9117 			return QDF_STATUS_SUCCESS;
9118 		}
9119 	}
9120 
9121 	return QDF_STATUS_E_FAILURE;
9122 }
9123 
9124 #ifdef MESH_MODE_SUPPORT
9125 static
9126 void dp_vdev_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
9127 {
9128 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
9129 
9130 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
9131 	vdev->mesh_vdev = val;
9132 	if (val)
9133 		vdev->skip_sw_tid_classification |=
9134 			DP_TX_MESH_ENABLED;
9135 	else
9136 		vdev->skip_sw_tid_classification &=
9137 			~DP_TX_MESH_ENABLED;
9138 }
9139 
9140 /*
9141  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
9142  * @vdev_hdl: virtual device object
9143  * @val: value to be set
9144  *
9145  * Return: void
9146  */
9147 static
9148 void dp_vdev_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
9149 {
9150 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
9151 
9152 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
9153 	vdev->mesh_rx_filter = val;
9154 }
9155 #endif
9156 
9157 /*
9158  * dp_vdev_set_hlos_tid_override() - to set hlos tid override
9159  * @vdev_hdl: virtual device object
9160  * @val: value to be set
9161  *
9162  * Return: void
9163  */
9164 static
9165 void dp_vdev_set_hlos_tid_override(struct dp_vdev *vdev, uint32_t val)
9166 {
9167 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
9168 	if (val)
9169 		vdev->skip_sw_tid_classification |=
9170 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
9171 	else
9172 		vdev->skip_sw_tid_classification &=
9173 			~DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
9174 }
9175 
9176 /*
9177  * dp_vdev_get_hlos_tid_override() - to get hlos tid override flag
9178  * @vdev_hdl: virtual device object
9179  * @val: value to be set
9180  *
9181  * Return: 1 if this flag is set
9182  */
9183 static
9184 uint8_t dp_vdev_get_hlos_tid_override(struct cdp_vdev *vdev_hdl)
9185 {
9186 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
9187 
9188 	return !!(vdev->skip_sw_tid_classification &
9189 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED);
9190 }
9191 
9192 #ifdef VDEV_PEER_PROTOCOL_COUNT
9193 static void dp_enable_vdev_peer_protocol_count(struct cdp_soc_t *soc_hdl,
9194 					       int8_t vdev_id,
9195 					       bool enable)
9196 {
9197 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9198 	struct dp_vdev *vdev;
9199 
9200 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9201 	if (!vdev)
9202 		return;
9203 
9204 	dp_info("enable %d vdev_id %d", enable, vdev_id);
9205 	vdev->peer_protocol_count_track = enable;
9206 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9207 }
9208 
9209 static void dp_enable_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
9210 						   int8_t vdev_id,
9211 						   int drop_mask)
9212 {
9213 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9214 	struct dp_vdev *vdev;
9215 
9216 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9217 	if (!vdev)
9218 		return;
9219 
9220 	dp_info("drop_mask %d vdev_id %d", drop_mask, vdev_id);
9221 	vdev->peer_protocol_count_dropmask = drop_mask;
9222 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9223 }
9224 
9225 static int dp_is_vdev_peer_protocol_count_enabled(struct cdp_soc_t *soc_hdl,
9226 						  int8_t vdev_id)
9227 {
9228 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9229 	struct dp_vdev *vdev;
9230 	int peer_protocol_count_track;
9231 
9232 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9233 	if (!vdev)
9234 		return 0;
9235 
9236 	dp_info("enable %d vdev_id %d", vdev->peer_protocol_count_track,
9237 		vdev_id);
9238 	peer_protocol_count_track =
9239 		vdev->peer_protocol_count_track;
9240 
9241 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9242 	return peer_protocol_count_track;
9243 }
9244 
9245 static int dp_get_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
9246 					       int8_t vdev_id)
9247 {
9248 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9249 	struct dp_vdev *vdev;
9250 	int peer_protocol_count_dropmask;
9251 
9252 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9253 	if (!vdev)
9254 		return 0;
9255 
9256 	dp_info("drop_mask %d vdev_id %d", vdev->peer_protocol_count_dropmask,
9257 		vdev_id);
9258 	peer_protocol_count_dropmask =
9259 		vdev->peer_protocol_count_dropmask;
9260 
9261 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9262 	return peer_protocol_count_dropmask;
9263 }
9264 
9265 #endif
9266 
9267 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data)
9268 {
9269 	uint8_t pdev_count;
9270 
9271 	for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) {
9272 		if (soc->pdev_list[pdev_count] &&
9273 		    soc->pdev_list[pdev_count] == data)
9274 			return true;
9275 	}
9276 	return false;
9277 }
9278 
9279 /**
9280  * dp_rx_bar_stats_cb(): BAR received stats callback
9281  * @soc: SOC handle
9282  * @cb_ctxt: Call back context
9283  * @reo_status: Reo status
9284  *
9285  * return: void
9286  */
9287 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
9288 	union hal_reo_status *reo_status)
9289 {
9290 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
9291 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
9292 
9293 	if (!dp_check_pdev_exists(soc, pdev)) {
9294 		dp_err_rl("pdev doesn't exist");
9295 		return;
9296 	}
9297 
9298 	if (!qdf_atomic_read(&soc->cmn_init_done))
9299 		return;
9300 
9301 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
9302 		DP_PRINT_STATS("REO stats failure %d",
9303 			       queue_status->header.status);
9304 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
9305 		return;
9306 	}
9307 
9308 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
9309 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
9310 
9311 }
9312 
9313 /**
9314  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
9315  * @vdev: DP VDEV handle
9316  *
9317  * return: void
9318  */
9319 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
9320 			     struct cdp_vdev_stats *vdev_stats)
9321 {
9322 	struct dp_soc *soc = NULL;
9323 
9324 	if (!vdev || !vdev->pdev)
9325 		return;
9326 
9327 	soc = vdev->pdev->soc;
9328 
9329 	dp_update_vdev_ingress_stats(vdev);
9330 
9331 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
9332 
9333 	dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats,
9334 			     DP_MOD_ID_GENERIC_STATS);
9335 
9336 	dp_update_vdev_rate_stats(vdev_stats, &vdev->stats);
9337 
9338 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9339 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
9340 			     vdev_stats, vdev->vdev_id,
9341 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
9342 #endif
9343 }
9344 
9345 void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
9346 {
9347 	struct dp_vdev *vdev = NULL;
9348 	struct dp_soc *soc;
9349 	struct cdp_vdev_stats *vdev_stats =
9350 			qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
9351 
9352 	if (!vdev_stats) {
9353 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
9354 			   pdev->soc);
9355 		return;
9356 	}
9357 
9358 	soc = pdev->soc;
9359 
9360 	qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
9361 	qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
9362 	qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
9363 	qdf_mem_zero(&pdev->stats.rx_i, sizeof(pdev->stats.rx_i));
9364 
9365 	if (dp_monitor_is_enable_mcopy_mode(pdev))
9366 		dp_monitor_invalid_peer_update_pdev_stats(soc, pdev);
9367 
9368 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
9369 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
9370 
9371 		dp_aggregate_vdev_stats(vdev, vdev_stats);
9372 		dp_update_pdev_stats(pdev, vdev_stats);
9373 		dp_update_pdev_ingress_stats(pdev, vdev);
9374 	}
9375 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
9376 	qdf_mem_free(vdev_stats);
9377 
9378 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9379 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
9380 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
9381 #endif
9382 }
9383 
9384 /**
9385  * dp_vdev_getstats() - get vdev packet level stats
9386  * @vdev_handle: Datapath VDEV handle
9387  * @stats: cdp network device stats structure
9388  *
9389  * Return: QDF_STATUS
9390  */
9391 static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle,
9392 				   struct cdp_dev_stats *stats)
9393 {
9394 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
9395 	struct dp_pdev *pdev;
9396 	struct dp_soc *soc;
9397 	struct cdp_vdev_stats *vdev_stats;
9398 
9399 	if (!vdev)
9400 		return QDF_STATUS_E_FAILURE;
9401 
9402 	pdev = vdev->pdev;
9403 	if (!pdev)
9404 		return QDF_STATUS_E_FAILURE;
9405 
9406 	soc = pdev->soc;
9407 
9408 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
9409 
9410 	if (!vdev_stats) {
9411 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
9412 			   soc);
9413 		return QDF_STATUS_E_FAILURE;
9414 	}
9415 
9416 	dp_aggregate_vdev_stats(vdev, vdev_stats);
9417 
9418 	stats->tx_packets = vdev_stats->tx.comp_pkt.num;
9419 	stats->tx_bytes = vdev_stats->tx.comp_pkt.bytes;
9420 
9421 	stats->tx_errors = vdev_stats->tx.tx_failed;
9422 	stats->tx_dropped = vdev_stats->tx_i.dropped.dropped_pkt.num +
9423 			    vdev_stats->tx_i.sg.dropped_host.num +
9424 			    vdev_stats->tx_i.mcast_en.dropped_map_error +
9425 			    vdev_stats->tx_i.mcast_en.dropped_self_mac +
9426 			    vdev_stats->tx_i.mcast_en.dropped_send_fail +
9427 			    vdev_stats->tx.nawds_mcast_drop;
9428 
9429 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
9430 		stats->rx_packets = vdev_stats->rx.to_stack.num;
9431 		stats->rx_bytes = vdev_stats->rx.to_stack.bytes;
9432 	} else {
9433 		stats->rx_packets = vdev_stats->rx_i.reo_rcvd_pkt.num +
9434 				    vdev_stats->rx_i.null_q_desc_pkt.num +
9435 				    vdev_stats->rx_i.routed_eapol_pkt.num;
9436 		stats->rx_bytes = vdev_stats->rx_i.reo_rcvd_pkt.bytes +
9437 				  vdev_stats->rx_i.null_q_desc_pkt.bytes +
9438 				  vdev_stats->rx_i.routed_eapol_pkt.bytes;
9439 	}
9440 
9441 	stats->rx_errors = vdev_stats->rx.err.mic_err +
9442 			   vdev_stats->rx.err.decrypt_err +
9443 			   vdev_stats->rx.err.fcserr +
9444 			   vdev_stats->rx.err.pn_err +
9445 			   vdev_stats->rx.err.oor_err +
9446 			   vdev_stats->rx.err.jump_2k_err +
9447 			   vdev_stats->rx.err.rxdma_wifi_parse_err;
9448 
9449 	stats->rx_dropped = vdev_stats->rx.mec_drop.num +
9450 			    vdev_stats->rx.multipass_rx_pkt_drop +
9451 			    vdev_stats->rx.peer_unauth_rx_pkt_drop +
9452 			    vdev_stats->rx.policy_check_drop +
9453 			    vdev_stats->rx.nawds_mcast_drop +
9454 			    vdev_stats->rx.mcast_3addr_drop;
9455 
9456 	qdf_mem_free(vdev_stats);
9457 
9458 	return QDF_STATUS_SUCCESS;
9459 }
9460 
9461 /**
9462  * dp_pdev_getstats() - get pdev packet level stats
9463  * @pdev_handle: Datapath PDEV handle
9464  * @stats: cdp network device stats structure
9465  *
9466  * Return: QDF_STATUS
9467  */
9468 static void dp_pdev_getstats(struct cdp_pdev *pdev_handle,
9469 			     struct cdp_dev_stats *stats)
9470 {
9471 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9472 
9473 	dp_aggregate_pdev_stats(pdev);
9474 
9475 	stats->tx_packets = pdev->stats.tx.comp_pkt.num;
9476 	stats->tx_bytes = pdev->stats.tx.comp_pkt.bytes;
9477 
9478 	stats->tx_errors = pdev->stats.tx.tx_failed;
9479 	stats->tx_dropped = pdev->stats.tx_i.dropped.dropped_pkt.num +
9480 			    pdev->stats.tx_i.sg.dropped_host.num +
9481 			    pdev->stats.tx_i.mcast_en.dropped_map_error +
9482 			    pdev->stats.tx_i.mcast_en.dropped_self_mac +
9483 			    pdev->stats.tx_i.mcast_en.dropped_send_fail +
9484 			    pdev->stats.tx.nawds_mcast_drop +
9485 			    pdev->stats.tso_stats.dropped_host.num;
9486 
9487 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(pdev->soc->wlan_cfg_ctx)) {
9488 		stats->rx_packets = pdev->stats.rx.to_stack.num;
9489 		stats->rx_bytes = pdev->stats.rx.to_stack.bytes;
9490 	} else {
9491 		stats->rx_packets = pdev->stats.rx_i.reo_rcvd_pkt.num +
9492 				    pdev->stats.rx_i.null_q_desc_pkt.num +
9493 				    pdev->stats.rx_i.routed_eapol_pkt.num;
9494 		stats->rx_bytes = pdev->stats.rx_i.reo_rcvd_pkt.bytes +
9495 				  pdev->stats.rx_i.null_q_desc_pkt.bytes +
9496 				  pdev->stats.rx_i.routed_eapol_pkt.bytes;
9497 	}
9498 
9499 	stats->rx_errors = pdev->stats.err.ip_csum_err +
9500 		pdev->stats.err.tcp_udp_csum_err +
9501 		pdev->stats.rx.err.mic_err +
9502 		pdev->stats.rx.err.decrypt_err +
9503 		pdev->stats.rx.err.fcserr +
9504 		pdev->stats.rx.err.pn_err +
9505 		pdev->stats.rx.err.oor_err +
9506 		pdev->stats.rx.err.jump_2k_err +
9507 		pdev->stats.rx.err.rxdma_wifi_parse_err;
9508 	stats->rx_dropped = pdev->stats.dropped.msdu_not_done +
9509 		pdev->stats.dropped.mec +
9510 		pdev->stats.dropped.mesh_filter +
9511 		pdev->stats.dropped.wifi_parse +
9512 		pdev->stats.dropped.mon_rx_drop +
9513 		pdev->stats.dropped.mon_radiotap_update_err +
9514 		pdev->stats.rx.mec_drop.num +
9515 		pdev->stats.rx.multipass_rx_pkt_drop +
9516 		pdev->stats.rx.peer_unauth_rx_pkt_drop +
9517 		pdev->stats.rx.policy_check_drop +
9518 		pdev->stats.rx.nawds_mcast_drop +
9519 		pdev->stats.rx.mcast_3addr_drop;
9520 }
9521 
9522 /**
9523  * dp_get_device_stats() - get interface level packet stats
9524  * @soc: soc handle
9525  * @id : vdev_id or pdev_id based on type
9526  * @stats: cdp network device stats structure
9527  * @type: device type pdev/vdev
9528  *
9529  * Return: QDF_STATUS
9530  */
9531 static QDF_STATUS dp_get_device_stats(struct cdp_soc_t *soc_hdl, uint8_t id,
9532 				      struct cdp_dev_stats *stats,
9533 				      uint8_t type)
9534 {
9535 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9536 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
9537 	struct dp_vdev *vdev;
9538 
9539 	switch (type) {
9540 	case UPDATE_VDEV_STATS:
9541 		vdev = dp_vdev_get_ref_by_id(soc, id, DP_MOD_ID_CDP);
9542 
9543 		if (vdev) {
9544 			status = dp_vdev_getstats((struct cdp_vdev *)vdev,
9545 						  stats);
9546 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9547 		}
9548 		return status;
9549 	case UPDATE_PDEV_STATS:
9550 		{
9551 			struct dp_pdev *pdev =
9552 				dp_get_pdev_from_soc_pdev_id_wifi3(
9553 						(struct dp_soc *)soc,
9554 						 id);
9555 			if (pdev) {
9556 				dp_pdev_getstats((struct cdp_pdev *)pdev,
9557 						 stats);
9558 				return QDF_STATUS_SUCCESS;
9559 			}
9560 		}
9561 		break;
9562 	default:
9563 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9564 			"apstats cannot be updated for this input "
9565 			"type %d", type);
9566 		break;
9567 	}
9568 
9569 	return QDF_STATUS_E_FAILURE;
9570 }
9571 
9572 const
9573 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
9574 {
9575 	switch (ring_type) {
9576 	case REO_DST:
9577 		return "Reo_dst";
9578 	case REO_EXCEPTION:
9579 		return "Reo_exception";
9580 	case REO_CMD:
9581 		return "Reo_cmd";
9582 	case REO_REINJECT:
9583 		return "Reo_reinject";
9584 	case REO_STATUS:
9585 		return "Reo_status";
9586 	case WBM2SW_RELEASE:
9587 		return "wbm2sw_release";
9588 	case TCL_DATA:
9589 		return "tcl_data";
9590 	case TCL_CMD_CREDIT:
9591 		return "tcl_cmd_credit";
9592 	case TCL_STATUS:
9593 		return "tcl_status";
9594 	case SW2WBM_RELEASE:
9595 		return "sw2wbm_release";
9596 	case RXDMA_BUF:
9597 		return "Rxdma_buf";
9598 	case RXDMA_DST:
9599 		return "Rxdma_dst";
9600 	case RXDMA_MONITOR_BUF:
9601 		return "Rxdma_monitor_buf";
9602 	case RXDMA_MONITOR_DESC:
9603 		return "Rxdma_monitor_desc";
9604 	case RXDMA_MONITOR_STATUS:
9605 		return "Rxdma_monitor_status";
9606 	case RXDMA_MONITOR_DST:
9607 		return "Rxdma_monitor_destination";
9608 	case WBM_IDLE_LINK:
9609 		return "WBM_hw_idle_link";
9610 	case PPE2TCL:
9611 		return "PPE2TCL";
9612 	case REO2PPE:
9613 		return "REO2PPE";
9614 	default:
9615 		dp_err("Invalid ring type");
9616 		break;
9617 	}
9618 	return "Invalid";
9619 }
9620 
9621 /*
9622  * dp_print_napi_stats(): NAPI stats
9623  * @soc - soc handle
9624  */
9625 void dp_print_napi_stats(struct dp_soc *soc)
9626 {
9627 	hif_print_napi_stats(soc->hif_handle);
9628 }
9629 
9630 /**
9631  * dp_txrx_host_peer_stats_clr): Reinitialize the txrx peer stats
9632  * @soc: Datapath soc
9633  * @peer: Datatpath peer
9634  * @arg: argument to iter function
9635  *
9636  * Return: QDF_STATUS
9637  */
9638 static inline void
9639 dp_txrx_host_peer_stats_clr(struct dp_soc *soc,
9640 			    struct dp_peer *peer,
9641 			    void *arg)
9642 {
9643 	struct dp_txrx_peer *txrx_peer = NULL;
9644 	struct dp_peer *tgt_peer = NULL;
9645 	struct cdp_interface_peer_stats peer_stats_intf;
9646 
9647 	qdf_mem_zero(&peer_stats_intf, sizeof(struct cdp_interface_peer_stats));
9648 
9649 	DP_STATS_CLR(peer);
9650 	/* Clear monitor peer stats */
9651 	dp_monitor_peer_reset_stats(soc, peer);
9652 
9653 	/* Clear MLD peer stats only when link peer is primary */
9654 	if (dp_peer_is_primary_link_peer(peer)) {
9655 		tgt_peer = dp_get_tgt_peer_from_peer(peer);
9656 		if (tgt_peer) {
9657 			DP_STATS_CLR(tgt_peer);
9658 			txrx_peer = tgt_peer->txrx_peer;
9659 			dp_txrx_peer_stats_clr(txrx_peer);
9660 		}
9661 	}
9662 
9663 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9664 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, peer->vdev->pdev->soc,
9665 			     &peer_stats_intf,  peer->peer_id,
9666 			     UPDATE_PEER_STATS, peer->vdev->pdev->pdev_id);
9667 #endif
9668 }
9669 
9670 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING
9671 static inline void dp_srng_clear_ring_usage_wm_stats(struct dp_soc *soc)
9672 {
9673 	int ring;
9674 
9675 	for (ring = 0; ring < soc->num_reo_dest_rings; ring++)
9676 		hal_srng_clear_ring_usage_wm_locked(soc->hal_soc,
9677 					    soc->reo_dest_ring[ring].hal_srng);
9678 }
9679 #else
9680 static inline void dp_srng_clear_ring_usage_wm_stats(struct dp_soc *soc)
9681 {
9682 }
9683 #endif
9684 
9685 /**
9686  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
9687  * @vdev: DP_VDEV handle
9688  * @dp_soc: DP_SOC handle
9689  *
9690  * Return: QDF_STATUS
9691  */
9692 static inline QDF_STATUS
9693 dp_txrx_host_stats_clr(struct dp_vdev *vdev, struct dp_soc *soc)
9694 {
9695 	if (!vdev || !vdev->pdev)
9696 		return QDF_STATUS_E_FAILURE;
9697 
9698 	/*
9699 	 * if NSS offload is enabled, then send message
9700 	 * to NSS FW to clear the stats. Once NSS FW clears the statistics
9701 	 * then clear host statistics.
9702 	 */
9703 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
9704 		if (soc->cdp_soc.ol_ops->nss_stats_clr)
9705 			soc->cdp_soc.ol_ops->nss_stats_clr(soc->ctrl_psoc,
9706 							   vdev->vdev_id);
9707 	}
9708 
9709 	dp_vdev_stats_hw_offload_target_clear(soc, vdev->pdev->pdev_id,
9710 					      (1 << vdev->vdev_id));
9711 
9712 	DP_STATS_CLR(vdev->pdev);
9713 	DP_STATS_CLR(vdev->pdev->soc);
9714 	DP_STATS_CLR(vdev);
9715 
9716 	hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
9717 
9718 	dp_vdev_iterate_peer(vdev, dp_txrx_host_peer_stats_clr, NULL,
9719 			     DP_MOD_ID_GENERIC_STATS);
9720 
9721 	dp_srng_clear_ring_usage_wm_stats(soc);
9722 
9723 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9724 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
9725 			     &vdev->stats,  vdev->vdev_id,
9726 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
9727 #endif
9728 	return QDF_STATUS_SUCCESS;
9729 }
9730 
9731 /**
9732  * dp_get_peer_calibr_stats()- Get peer calibrated stats
9733  * @peer: Datapath peer
9734  * @peer_stats: buffer for peer stats
9735  *
9736  * Return: none
9737  */
9738 static inline
9739 void dp_get_peer_calibr_stats(struct dp_peer *peer,
9740 			      struct cdp_peer_stats *peer_stats)
9741 {
9742 	struct dp_peer *tgt_peer;
9743 
9744 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
9745 	if (!tgt_peer)
9746 		return;
9747 
9748 	peer_stats->tx.last_per = tgt_peer->stats.tx.last_per;
9749 	peer_stats->tx.tx_bytes_success_last =
9750 				tgt_peer->stats.tx.tx_bytes_success_last;
9751 	peer_stats->tx.tx_data_success_last =
9752 					tgt_peer->stats.tx.tx_data_success_last;
9753 	peer_stats->tx.tx_byte_rate = tgt_peer->stats.tx.tx_byte_rate;
9754 	peer_stats->tx.tx_data_rate = tgt_peer->stats.tx.tx_data_rate;
9755 	peer_stats->tx.tx_data_ucast_last =
9756 					tgt_peer->stats.tx.tx_data_ucast_last;
9757 	peer_stats->tx.tx_data_ucast_rate =
9758 					tgt_peer->stats.tx.tx_data_ucast_rate;
9759 	peer_stats->tx.inactive_time = tgt_peer->stats.tx.inactive_time;
9760 	peer_stats->rx.rx_bytes_success_last =
9761 				tgt_peer->stats.rx.rx_bytes_success_last;
9762 	peer_stats->rx.rx_data_success_last =
9763 				tgt_peer->stats.rx.rx_data_success_last;
9764 	peer_stats->rx.rx_byte_rate = tgt_peer->stats.rx.rx_byte_rate;
9765 	peer_stats->rx.rx_data_rate = tgt_peer->stats.rx.rx_data_rate;
9766 }
9767 
9768 /**
9769  * dp_get_peer_basic_stats()- Get peer basic stats
9770  * @peer: Datapath peer
9771  * @peer_stats: buffer for peer stats
9772  *
9773  * Return: none
9774  */
9775 #ifdef QCA_ENHANCED_STATS_SUPPORT
9776 static inline
9777 void dp_get_peer_basic_stats(struct dp_peer *peer,
9778 			     struct cdp_peer_stats *peer_stats)
9779 {
9780 	struct dp_txrx_peer *txrx_peer;
9781 
9782 	txrx_peer = dp_get_txrx_peer(peer);
9783 	if (!txrx_peer)
9784 		return;
9785 
9786 	peer_stats->tx.comp_pkt.num += txrx_peer->comp_pkt.num;
9787 	peer_stats->tx.comp_pkt.bytes += txrx_peer->comp_pkt.bytes;
9788 	peer_stats->tx.tx_failed += txrx_peer->tx_failed;
9789 	peer_stats->rx.to_stack.num += txrx_peer->to_stack.num;
9790 	peer_stats->rx.to_stack.bytes += txrx_peer->to_stack.bytes;
9791 }
9792 #else
9793 static inline
9794 void dp_get_peer_basic_stats(struct dp_peer *peer,
9795 			     struct cdp_peer_stats *peer_stats)
9796 {
9797 	struct dp_txrx_peer *txrx_peer;
9798 
9799 	txrx_peer = dp_get_txrx_peer(peer);
9800 	if (!txrx_peer)
9801 		return;
9802 
9803 	peer_stats->tx.comp_pkt.num += txrx_peer->comp_pkt.num;
9804 	peer_stats->tx.comp_pkt.bytes += txrx_peer->comp_pkt.bytes;
9805 	peer_stats->tx.tx_failed += txrx_peer->tx_failed;
9806 	peer_stats->rx.to_stack.num += txrx_peer->to_stack.num;
9807 	peer_stats->rx.to_stack.bytes += txrx_peer->to_stack.bytes;
9808 }
9809 #endif
9810 
9811 /**
9812  * dp_get_peer_per_pkt_stats()- Get peer per pkt stats
9813  * @peer: Datapath peer
9814  * @peer_stats: buffer for peer stats
9815  *
9816  * Return: none
9817  */
9818 #ifdef QCA_ENHANCED_STATS_SUPPORT
9819 static inline
9820 void dp_get_peer_per_pkt_stats(struct dp_peer *peer,
9821 			       struct cdp_peer_stats *peer_stats)
9822 {
9823 	struct dp_txrx_peer *txrx_peer;
9824 	struct dp_peer_per_pkt_stats *per_pkt_stats;
9825 
9826 	txrx_peer = dp_get_txrx_peer(peer);
9827 	if (!txrx_peer)
9828 		return;
9829 
9830 	per_pkt_stats = &txrx_peer->stats.per_pkt_stats;
9831 	DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats);
9832 }
9833 #else
9834 static inline
9835 void dp_get_peer_per_pkt_stats(struct dp_peer *peer,
9836 			       struct cdp_peer_stats *peer_stats)
9837 {
9838 	struct dp_txrx_peer *txrx_peer;
9839 	struct dp_peer_per_pkt_stats *per_pkt_stats;
9840 
9841 	txrx_peer = dp_get_txrx_peer(peer);
9842 	if (!txrx_peer)
9843 		return;
9844 
9845 	per_pkt_stats = &txrx_peer->stats.per_pkt_stats;
9846 	DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats);
9847 }
9848 #endif
9849 
9850 /**
9851  * dp_get_peer_extd_stats()- Get peer extd stats
9852  * @peer: Datapath peer
9853  * @peer_stats: buffer for peer stats
9854  *
9855  * Return: none
9856  */
9857 #ifdef QCA_ENHANCED_STATS_SUPPORT
9858 #ifdef WLAN_FEATURE_11BE_MLO
9859 static inline
9860 void dp_get_peer_extd_stats(struct dp_peer *peer,
9861 			    struct cdp_peer_stats *peer_stats)
9862 {
9863 	struct dp_soc *soc = peer->vdev->pdev->soc;
9864 
9865 	if (IS_MLO_DP_MLD_PEER(peer)) {
9866 		uint8_t i;
9867 		struct dp_peer *link_peer;
9868 		struct dp_soc *link_peer_soc;
9869 		struct dp_mld_link_peers link_peers_info;
9870 
9871 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
9872 						    &link_peers_info,
9873 						    DP_MOD_ID_CDP);
9874 		for (i = 0; i < link_peers_info.num_links; i++) {
9875 			link_peer = link_peers_info.link_peers[i];
9876 			link_peer_soc = link_peer->vdev->pdev->soc;
9877 			dp_monitor_peer_get_stats(link_peer_soc, link_peer,
9878 						  peer_stats,
9879 						  UPDATE_PEER_STATS);
9880 		}
9881 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
9882 	} else {
9883 		dp_monitor_peer_get_stats(soc, peer, peer_stats,
9884 					  UPDATE_PEER_STATS);
9885 	}
9886 }
9887 #else
9888 static inline
9889 void dp_get_peer_extd_stats(struct dp_peer *peer,
9890 			    struct cdp_peer_stats *peer_stats)
9891 {
9892 	struct dp_soc *soc = peer->vdev->pdev->soc;
9893 
9894 	dp_monitor_peer_get_stats(soc, peer, peer_stats, UPDATE_PEER_STATS);
9895 }
9896 #endif
9897 #else
9898 static inline
9899 void dp_get_peer_extd_stats(struct dp_peer *peer,
9900 			    struct cdp_peer_stats *peer_stats)
9901 {
9902 	struct dp_txrx_peer *txrx_peer;
9903 	struct dp_peer_extd_stats *extd_stats;
9904 
9905 	txrx_peer = dp_get_txrx_peer(peer);
9906 	if (qdf_unlikely(!txrx_peer)) {
9907 		dp_err_rl("txrx_peer NULL");
9908 		return;
9909 	}
9910 
9911 	extd_stats = &txrx_peer->stats.extd_stats;
9912 	DP_UPDATE_EXTD_STATS(peer_stats, extd_stats);
9913 }
9914 #endif
9915 
9916 /**
9917  * dp_get_peer_tx_per()- Get peer packet error ratio
9918  * @peer_stats: buffer for peer stats
9919  *
9920  * Return: none
9921  */
9922 static inline
9923 void dp_get_peer_tx_per(struct cdp_peer_stats *peer_stats)
9924 {
9925 	if (peer_stats->tx.tx_success.num + peer_stats->tx.retries > 0)
9926 		peer_stats->tx.per = (peer_stats->tx.retries * 100) /
9927 				  (peer_stats->tx.tx_success.num +
9928 				   peer_stats->tx.retries);
9929 	else
9930 		peer_stats->tx.per = 0;
9931 }
9932 
9933 /**
9934  * dp_get_peer_stats()- Get peer stats
9935  * @peer: Datapath peer
9936  * @peer_stats: buffer for peer stats
9937  *
9938  * Return: none
9939  */
9940 static inline
9941 void dp_get_peer_stats(struct dp_peer *peer, struct cdp_peer_stats *peer_stats)
9942 {
9943 	dp_get_peer_calibr_stats(peer, peer_stats);
9944 
9945 	dp_get_peer_basic_stats(peer, peer_stats);
9946 
9947 	dp_get_peer_per_pkt_stats(peer, peer_stats);
9948 
9949 	dp_get_peer_extd_stats(peer, peer_stats);
9950 
9951 	dp_get_peer_tx_per(peer_stats);
9952 }
9953 
9954 /*
9955  * dp_get_host_peer_stats()- function to print peer stats
9956  * @soc: dp_soc handle
9957  * @mac_addr: mac address of the peer
9958  *
9959  * Return: QDF_STATUS
9960  */
9961 static QDF_STATUS
9962 dp_get_host_peer_stats(struct cdp_soc_t *soc, uint8_t *mac_addr)
9963 {
9964 	struct dp_peer *peer = NULL;
9965 	struct cdp_peer_stats *peer_stats = NULL;
9966 	struct cdp_peer_info peer_info = { 0 };
9967 
9968 	if (!mac_addr) {
9969 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9970 			  "%s: NULL peer mac addr\n", __func__);
9971 		return QDF_STATUS_E_FAILURE;
9972 	}
9973 
9974 	DP_PEER_INFO_PARAMS_INIT(&peer_info, DP_VDEV_ALL, mac_addr, false,
9975 				 CDP_WILD_PEER_TYPE);
9976 
9977 	peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info,
9978 					 DP_MOD_ID_CDP);
9979 	if (!peer) {
9980 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9981 			  "%s: Invalid peer\n", __func__);
9982 		return QDF_STATUS_E_FAILURE;
9983 	}
9984 
9985 	peer_stats = qdf_mem_malloc(sizeof(struct cdp_peer_stats));
9986 	if (!peer_stats) {
9987 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9988 			  "%s: Memory allocation failed for cdp_peer_stats\n",
9989 			  __func__);
9990 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9991 		return QDF_STATUS_E_NOMEM;
9992 	}
9993 
9994 	qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats));
9995 
9996 	dp_get_peer_stats(peer, peer_stats);
9997 	dp_print_peer_stats(peer, peer_stats);
9998 
9999 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
10000 
10001 	qdf_mem_free(peer_stats);
10002 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10003 
10004 	return QDF_STATUS_SUCCESS;
10005 }
10006 
10007 /* *
10008  * dp_dump_wbm_idle_hptp() -dump wbm idle ring, hw hp tp info.
10009  * @soc: dp soc.
10010  * @pdev: dp pdev.
10011  *
10012  * Return: None.
10013  */
10014 static void
10015 dp_dump_wbm_idle_hptp(struct dp_soc *soc, struct dp_pdev *pdev)
10016 {
10017 	uint32_t hw_head;
10018 	uint32_t hw_tail;
10019 	struct dp_srng *srng;
10020 
10021 	if (!soc) {
10022 		dp_err("soc is NULL");
10023 		return;
10024 	}
10025 
10026 	if (!pdev) {
10027 		dp_err("pdev is NULL");
10028 		return;
10029 	}
10030 
10031 	srng = &pdev->soc->wbm_idle_link_ring;
10032 	if (!srng) {
10033 		dp_err("wbm_idle_link_ring srng is NULL");
10034 		return;
10035 	}
10036 
10037 	hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &hw_head,
10038 			&hw_tail, WBM_IDLE_LINK);
10039 
10040 	dp_debug("WBM_IDLE_LINK: HW hp: %d, HW tp: %d",
10041 			hw_head, hw_tail);
10042 }
10043 
10044 
10045 /**
10046  * dp_txrx_stats_help() - Helper function for Txrx_Stats
10047  *
10048  * Return: None
10049  */
10050 static void dp_txrx_stats_help(void)
10051 {
10052 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
10053 	dp_info("stats_option:");
10054 	dp_info("  1 -- HTT Tx Statistics");
10055 	dp_info("  2 -- HTT Rx Statistics");
10056 	dp_info("  3 -- HTT Tx HW Queue Statistics");
10057 	dp_info("  4 -- HTT Tx HW Sched Statistics");
10058 	dp_info("  5 -- HTT Error Statistics");
10059 	dp_info("  6 -- HTT TQM Statistics");
10060 	dp_info("  7 -- HTT TQM CMDQ Statistics");
10061 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
10062 	dp_info("  9 -- HTT Tx Rate Statistics");
10063 	dp_info(" 10 -- HTT Rx Rate Statistics");
10064 	dp_info(" 11 -- HTT Peer Statistics");
10065 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
10066 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
10067 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
10068 	dp_info(" 15 -- HTT SRNG Statistics");
10069 	dp_info(" 16 -- HTT SFM Info Statistics");
10070 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
10071 	dp_info(" 18 -- HTT Peer List Details");
10072 	dp_info(" 20 -- Clear Host Statistics");
10073 	dp_info(" 21 -- Host Rx Rate Statistics");
10074 	dp_info(" 22 -- Host Tx Rate Statistics");
10075 	dp_info(" 23 -- Host Tx Statistics");
10076 	dp_info(" 24 -- Host Rx Statistics");
10077 	dp_info(" 25 -- Host AST Statistics");
10078 	dp_info(" 26 -- Host SRNG PTR Statistics");
10079 	dp_info(" 27 -- Host Mon Statistics");
10080 	dp_info(" 28 -- Host REO Queue Statistics");
10081 	dp_info(" 29 -- Host Soc cfg param Statistics");
10082 	dp_info(" 30 -- Host pdev cfg param Statistics");
10083 	dp_info(" 31 -- Host NAPI stats");
10084 	dp_info(" 32 -- Host Interrupt stats");
10085 	dp_info(" 33 -- Host FISA stats");
10086 	dp_info(" 34 -- Host Register Work stats");
10087 	dp_info(" 35 -- HW REO Queue stats");
10088 	dp_info(" 36 -- Host WBM IDLE link desc ring HP/TP");
10089 	dp_info(" 37 -- Host SRNG usage watermark stats");
10090 }
10091 
10092 #ifdef DP_UMAC_HW_RESET_SUPPORT
10093 /**
10094  * dp_umac_rst_skel_enable_update(): Update skel dbg flag for umac reset
10095  * @soc: dp soc handle
10096  * @en: ebable/disable
10097  *
10098  * Return: void
10099  */
10100 static void dp_umac_rst_skel_enable_update(struct dp_soc *soc, bool en)
10101 {
10102 	soc->umac_reset_ctx.skel_enable = en;
10103 	dp_cdp_debug("UMAC HW reset debug skeleton code enabled :%u",
10104 		     soc->umac_reset_ctx.skel_enable);
10105 }
10106 
10107 /**
10108  * dp_umac_rst_skel_enable_get(): Get skel dbg flag for umac reset
10109  * @soc: dp soc handle
10110  *
10111  * Return: enable/disable flag
10112  */
10113 static bool dp_umac_rst_skel_enable_get(struct dp_soc *soc)
10114 {
10115 	return soc->umac_reset_ctx.skel_enable;
10116 }
10117 #else
10118 static void dp_umac_rst_skel_enable_update(struct dp_soc *soc, bool en)
10119 {
10120 }
10121 
10122 static bool dp_umac_rst_skel_enable_get(struct dp_soc *soc)
10123 {
10124 	return false;
10125 }
10126 #endif
10127 
10128 /**
10129  * dp_print_host_stats()- Function to print the stats aggregated at host
10130  * @vdev_handle: DP_VDEV handle
10131  * @req: host stats type
10132  * @soc: dp soc handler
10133  *
10134  * Return: 0 on success, print error message in case of failure
10135  */
10136 static int
10137 dp_print_host_stats(struct dp_vdev *vdev,
10138 		    struct cdp_txrx_stats_req *req,
10139 		    struct dp_soc *soc)
10140 {
10141 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
10142 	enum cdp_host_txrx_stats type =
10143 			dp_stats_mapping_table[req->stats][STATS_HOST];
10144 
10145 	dp_aggregate_pdev_stats(pdev);
10146 
10147 	switch (type) {
10148 	case TXRX_CLEAR_STATS:
10149 		dp_txrx_host_stats_clr(vdev, soc);
10150 		break;
10151 	case TXRX_RX_RATE_STATS:
10152 		dp_print_rx_rates(vdev);
10153 		break;
10154 	case TXRX_TX_RATE_STATS:
10155 		dp_print_tx_rates(vdev);
10156 		break;
10157 	case TXRX_TX_HOST_STATS:
10158 		dp_print_pdev_tx_stats(pdev);
10159 		dp_print_soc_tx_stats(pdev->soc);
10160 		break;
10161 	case TXRX_RX_HOST_STATS:
10162 		dp_print_pdev_rx_stats(pdev);
10163 		dp_print_soc_rx_stats(pdev->soc);
10164 		break;
10165 	case TXRX_AST_STATS:
10166 		dp_print_ast_stats(pdev->soc);
10167 		dp_print_mec_stats(pdev->soc);
10168 		dp_print_peer_table(vdev);
10169 		break;
10170 	case TXRX_SRNG_PTR_STATS:
10171 		dp_print_ring_stats(pdev);
10172 		break;
10173 	case TXRX_RX_MON_STATS:
10174 		dp_monitor_print_pdev_rx_mon_stats(pdev);
10175 		break;
10176 	case TXRX_REO_QUEUE_STATS:
10177 		dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc,
10178 				       req->peer_addr);
10179 		break;
10180 	case TXRX_SOC_CFG_PARAMS:
10181 		dp_print_soc_cfg_params(pdev->soc);
10182 		break;
10183 	case TXRX_PDEV_CFG_PARAMS:
10184 		dp_print_pdev_cfg_params(pdev);
10185 		break;
10186 	case TXRX_NAPI_STATS:
10187 		dp_print_napi_stats(pdev->soc);
10188 		break;
10189 	case TXRX_SOC_INTERRUPT_STATS:
10190 		dp_print_soc_interrupt_stats(pdev->soc);
10191 		break;
10192 	case TXRX_SOC_FSE_STATS:
10193 		dp_rx_dump_fisa_table(pdev->soc);
10194 		break;
10195 	case TXRX_HAL_REG_WRITE_STATS:
10196 		hal_dump_reg_write_stats(pdev->soc->hal_soc);
10197 		hal_dump_reg_write_srng_stats(pdev->soc->hal_soc);
10198 		break;
10199 	case TXRX_SOC_REO_HW_DESC_DUMP:
10200 		dp_get_rx_reo_queue_info((struct cdp_soc_t *)pdev->soc,
10201 					 vdev->vdev_id);
10202 		break;
10203 	case TXRX_SOC_WBM_IDLE_HPTP_DUMP:
10204 		dp_dump_wbm_idle_hptp(pdev->soc, pdev);
10205 		break;
10206 	case TXRX_SRNG_USAGE_WM_STATS:
10207 		/* Dump usage watermark stats for all SRNGs */
10208 		dp_dump_srng_high_wm_stats(soc, 0xFF);
10209 		break;
10210 	default:
10211 		dp_info("Wrong Input For TxRx Host Stats");
10212 		dp_txrx_stats_help();
10213 		break;
10214 	}
10215 	return 0;
10216 }
10217 
10218 /*
10219  * dp_pdev_tid_stats_ingress_inc
10220  * @pdev: pdev handle
10221  * @val: increase in value
10222  *
10223  * Return: void
10224  */
10225 static void
10226 dp_pdev_tid_stats_ingress_inc(struct dp_pdev *pdev, uint32_t val)
10227 {
10228 	pdev->stats.tid_stats.ingress_stack += val;
10229 }
10230 
10231 /*
10232  * dp_pdev_tid_stats_osif_drop
10233  * @pdev: pdev handle
10234  * @val: increase in value
10235  *
10236  * Return: void
10237  */
10238 static void
10239 dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val)
10240 {
10241 	pdev->stats.tid_stats.osif_drop += val;
10242 }
10243 
10244 /*
10245  * dp_get_fw_peer_stats()- function to print peer stats
10246  * @soc: soc handle
10247  * @pdev_id : id of the pdev handle
10248  * @mac_addr: mac address of the peer
10249  * @cap: Type of htt stats requested
10250  * @is_wait: if set, wait on completion from firmware response
10251  *
10252  * Currently Supporting only MAC ID based requests Only
10253  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
10254  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
10255  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
10256  *
10257  * Return: QDF_STATUS
10258  */
10259 static QDF_STATUS
10260 dp_get_fw_peer_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
10261 		     uint8_t *mac_addr,
10262 		     uint32_t cap, uint32_t is_wait)
10263 {
10264 	int i;
10265 	uint32_t config_param0 = 0;
10266 	uint32_t config_param1 = 0;
10267 	uint32_t config_param2 = 0;
10268 	uint32_t config_param3 = 0;
10269 	struct dp_pdev *pdev =
10270 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10271 						   pdev_id);
10272 
10273 	if (!pdev)
10274 		return QDF_STATUS_E_FAILURE;
10275 
10276 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
10277 	config_param0 |= (1 << (cap + 1));
10278 
10279 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
10280 		config_param1 |= (1 << i);
10281 	}
10282 
10283 	config_param2 |= (mac_addr[0] & 0x000000ff);
10284 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
10285 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
10286 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
10287 
10288 	config_param3 |= (mac_addr[4] & 0x000000ff);
10289 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
10290 
10291 	if (is_wait) {
10292 		qdf_event_reset(&pdev->fw_peer_stats_event);
10293 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
10294 					  config_param0, config_param1,
10295 					  config_param2, config_param3,
10296 					  0, DBG_STATS_COOKIE_DP_STATS, 0);
10297 		qdf_wait_single_event(&pdev->fw_peer_stats_event,
10298 				      DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
10299 	} else {
10300 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
10301 					  config_param0, config_param1,
10302 					  config_param2, config_param3,
10303 					  0, DBG_STATS_COOKIE_DEFAULT, 0);
10304 	}
10305 
10306 	return QDF_STATUS_SUCCESS;
10307 
10308 }
10309 
10310 /* This struct definition will be removed from here
10311  * once it get added in FW headers*/
10312 struct httstats_cmd_req {
10313     uint32_t    config_param0;
10314     uint32_t    config_param1;
10315     uint32_t    config_param2;
10316     uint32_t    config_param3;
10317     int cookie;
10318     u_int8_t    stats_id;
10319 };
10320 
10321 /*
10322  * dp_get_htt_stats: function to process the httstas request
10323  * @soc: DP soc handle
10324  * @pdev_id: id of pdev handle
10325  * @data: pointer to request data
10326  * @data_len: length for request data
10327  *
10328  * return: QDF_STATUS
10329  */
10330 static QDF_STATUS
10331 dp_get_htt_stats(struct cdp_soc_t *soc, uint8_t pdev_id, void *data,
10332 		 uint32_t data_len)
10333 {
10334 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
10335 	struct dp_pdev *pdev =
10336 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10337 						   pdev_id);
10338 
10339 	if (!pdev)
10340 		return QDF_STATUS_E_FAILURE;
10341 
10342 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
10343 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
10344 				req->config_param0, req->config_param1,
10345 				req->config_param2, req->config_param3,
10346 				req->cookie, DBG_STATS_COOKIE_DEFAULT, 0);
10347 
10348 	return QDF_STATUS_SUCCESS;
10349 }
10350 
10351 /**
10352  * dp_set_pdev_tidmap_prty_wifi3(): update tidmap priority in pdev
10353  * @pdev: DP_PDEV handle
10354  * @prio: tidmap priority value passed by the user
10355  *
10356  * Return: QDF_STATUS_SUCCESS on success
10357  */
10358 static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct dp_pdev *pdev,
10359 						uint8_t prio)
10360 {
10361 	struct dp_soc *soc = pdev->soc;
10362 
10363 	soc->tidmap_prty = prio;
10364 
10365 	hal_tx_set_tidmap_prty(soc->hal_soc, prio);
10366 	return QDF_STATUS_SUCCESS;
10367 }
10368 
10369 /*
10370  * dp_get_peer_param: function to get parameters in peer
10371  * @cdp_soc: DP soc handle
10372  * @vdev_id: id of vdev handle
10373  * @peer_mac: peer mac address
10374  * @param: parameter type to be set
10375  * @val : address of buffer
10376  *
10377  * Return: val
10378  */
10379 static QDF_STATUS dp_get_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
10380 				    uint8_t *peer_mac,
10381 				    enum cdp_peer_param_type param,
10382 				    cdp_config_param_type *val)
10383 {
10384 	return QDF_STATUS_SUCCESS;
10385 }
10386 
10387 /*
10388  * dp_set_peer_param: function to set parameters in peer
10389  * @cdp_soc: DP soc handle
10390  * @vdev_id: id of vdev handle
10391  * @peer_mac: peer mac address
10392  * @param: parameter type to be set
10393  * @val: value of parameter to be set
10394  *
10395  * Return: 0 for success. nonzero for failure.
10396  */
10397 static QDF_STATUS dp_set_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
10398 				    uint8_t *peer_mac,
10399 				    enum cdp_peer_param_type param,
10400 				    cdp_config_param_type val)
10401 {
10402 	struct dp_peer *peer =
10403 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
10404 						       peer_mac, 0, vdev_id,
10405 						       DP_MOD_ID_CDP);
10406 	struct dp_txrx_peer *txrx_peer;
10407 
10408 	if (!peer)
10409 		return QDF_STATUS_E_FAILURE;
10410 
10411 	txrx_peer = peer->txrx_peer;
10412 	if (!txrx_peer) {
10413 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10414 		return QDF_STATUS_E_FAILURE;
10415 	}
10416 
10417 	switch (param) {
10418 	case CDP_CONFIG_NAWDS:
10419 		txrx_peer->nawds_enabled = val.cdp_peer_param_nawds;
10420 		break;
10421 	case CDP_CONFIG_ISOLATION:
10422 		dp_set_peer_isolation(txrx_peer, val.cdp_peer_param_isolation);
10423 		break;
10424 	case CDP_CONFIG_IN_TWT:
10425 		txrx_peer->in_twt = !!(val.cdp_peer_param_in_twt);
10426 		break;
10427 	default:
10428 		break;
10429 	}
10430 
10431 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10432 
10433 	return QDF_STATUS_SUCCESS;
10434 }
10435 
10436 /*
10437  * dp_get_pdev_param: function to get parameters from pdev
10438  * @cdp_soc: DP soc handle
10439  * @pdev_id: id of pdev handle
10440  * @param: parameter type to be get
10441  * @value : buffer for value
10442  *
10443  * Return: status
10444  */
10445 static QDF_STATUS dp_get_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
10446 				    enum cdp_pdev_param_type param,
10447 				    cdp_config_param_type *val)
10448 {
10449 	struct cdp_pdev *pdev = (struct cdp_pdev *)
10450 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10451 						   pdev_id);
10452 	if (!pdev)
10453 		return QDF_STATUS_E_FAILURE;
10454 
10455 	switch (param) {
10456 	case CDP_CONFIG_VOW:
10457 		val->cdp_pdev_param_cfg_vow =
10458 				((struct dp_pdev *)pdev)->delay_stats_flag;
10459 		break;
10460 	case CDP_TX_PENDING:
10461 		val->cdp_pdev_param_tx_pending = dp_get_tx_pending(pdev);
10462 		break;
10463 	case CDP_FILTER_MCAST_DATA:
10464 		val->cdp_pdev_param_fltr_mcast =
10465 				dp_monitor_pdev_get_filter_mcast_data(pdev);
10466 		break;
10467 	case CDP_FILTER_NO_DATA:
10468 		val->cdp_pdev_param_fltr_none =
10469 				dp_monitor_pdev_get_filter_non_data(pdev);
10470 		break;
10471 	case CDP_FILTER_UCAST_DATA:
10472 		val->cdp_pdev_param_fltr_ucast =
10473 				dp_monitor_pdev_get_filter_ucast_data(pdev);
10474 		break;
10475 	case CDP_MONITOR_CHANNEL:
10476 		val->cdp_pdev_param_monitor_chan =
10477 			dp_monitor_get_chan_num((struct dp_pdev *)pdev);
10478 		break;
10479 	case CDP_MONITOR_FREQUENCY:
10480 		val->cdp_pdev_param_mon_freq =
10481 			dp_monitor_get_chan_freq((struct dp_pdev *)pdev);
10482 		break;
10483 	default:
10484 		return QDF_STATUS_E_FAILURE;
10485 	}
10486 
10487 	return QDF_STATUS_SUCCESS;
10488 }
10489 
10490 /*
10491  * dp_set_pdev_param: function to set parameters in pdev
10492  * @cdp_soc: DP soc handle
10493  * @pdev_id: id of pdev handle
10494  * @param: parameter type to be set
10495  * @val: value of parameter to be set
10496  *
10497  * Return: 0 for success. nonzero for failure.
10498  */
10499 static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
10500 				    enum cdp_pdev_param_type param,
10501 				    cdp_config_param_type val)
10502 {
10503 	int target_type;
10504 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
10505 	struct dp_pdev *pdev =
10506 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10507 						   pdev_id);
10508 	enum reg_wifi_band chan_band;
10509 
10510 	if (!pdev)
10511 		return QDF_STATUS_E_FAILURE;
10512 
10513 	target_type = hal_get_target_type(soc->hal_soc);
10514 	switch (target_type) {
10515 	case TARGET_TYPE_QCA6750:
10516 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC0_LMAC_ID;
10517 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
10518 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
10519 		break;
10520 	case TARGET_TYPE_KIWI:
10521 	case TARGET_TYPE_MANGO:
10522 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC0_LMAC_ID;
10523 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
10524 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
10525 		break;
10526 	default:
10527 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC1_LMAC_ID;
10528 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
10529 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
10530 		break;
10531 	}
10532 
10533 	switch (param) {
10534 	case CDP_CONFIG_TX_CAPTURE:
10535 		return dp_monitor_config_debug_sniffer(pdev,
10536 						val.cdp_pdev_param_tx_capture);
10537 	case CDP_CONFIG_DEBUG_SNIFFER:
10538 		return dp_monitor_config_debug_sniffer(pdev,
10539 						val.cdp_pdev_param_dbg_snf);
10540 	case CDP_CONFIG_BPR_ENABLE:
10541 		return dp_monitor_set_bpr_enable(pdev,
10542 						 val.cdp_pdev_param_bpr_enable);
10543 	case CDP_CONFIG_PRIMARY_RADIO:
10544 		pdev->is_primary = val.cdp_pdev_param_primary_radio;
10545 		break;
10546 	case CDP_CONFIG_CAPTURE_LATENCY:
10547 		pdev->latency_capture_enable = val.cdp_pdev_param_cptr_latcy;
10548 		break;
10549 	case CDP_INGRESS_STATS:
10550 		dp_pdev_tid_stats_ingress_inc(pdev,
10551 					      val.cdp_pdev_param_ingrs_stats);
10552 		break;
10553 	case CDP_OSIF_DROP:
10554 		dp_pdev_tid_stats_osif_drop(pdev,
10555 					    val.cdp_pdev_param_osif_drop);
10556 		break;
10557 	case CDP_CONFIG_ENH_RX_CAPTURE:
10558 		return dp_monitor_config_enh_rx_capture(pdev,
10559 						val.cdp_pdev_param_en_rx_cap);
10560 	case CDP_CONFIG_ENH_TX_CAPTURE:
10561 		return dp_monitor_config_enh_tx_capture(pdev,
10562 						val.cdp_pdev_param_en_tx_cap);
10563 	case CDP_CONFIG_HMMC_TID_OVERRIDE:
10564 		pdev->hmmc_tid_override_en = val.cdp_pdev_param_hmmc_tid_ovrd;
10565 		break;
10566 	case CDP_CONFIG_HMMC_TID_VALUE:
10567 		pdev->hmmc_tid = val.cdp_pdev_param_hmmc_tid;
10568 		break;
10569 	case CDP_CHAN_NOISE_FLOOR:
10570 		pdev->chan_noise_floor = val.cdp_pdev_param_chn_noise_flr;
10571 		break;
10572 	case CDP_TIDMAP_PRTY:
10573 		dp_set_pdev_tidmap_prty_wifi3(pdev,
10574 					      val.cdp_pdev_param_tidmap_prty);
10575 		break;
10576 	case CDP_FILTER_NEIGH_PEERS:
10577 		dp_monitor_set_filter_neigh_peers(pdev,
10578 					val.cdp_pdev_param_fltr_neigh_peers);
10579 		break;
10580 	case CDP_MONITOR_CHANNEL:
10581 		dp_monitor_set_chan_num(pdev, val.cdp_pdev_param_monitor_chan);
10582 		break;
10583 	case CDP_MONITOR_FREQUENCY:
10584 		chan_band = wlan_reg_freq_to_band(val.cdp_pdev_param_mon_freq);
10585 		dp_monitor_set_chan_freq(pdev, val.cdp_pdev_param_mon_freq);
10586 		dp_monitor_set_chan_band(pdev, chan_band);
10587 		break;
10588 	case CDP_CONFIG_BSS_COLOR:
10589 		dp_monitor_set_bsscolor(pdev, val.cdp_pdev_param_bss_color);
10590 		break;
10591 	case CDP_SET_ATF_STATS_ENABLE:
10592 		dp_monitor_set_atf_stats_enable(pdev,
10593 					val.cdp_pdev_param_atf_stats_enable);
10594 		break;
10595 	case CDP_CONFIG_SPECIAL_VAP:
10596 		dp_monitor_pdev_config_scan_spcl_vap(pdev,
10597 					val.cdp_pdev_param_config_special_vap);
10598 		dp_monitor_vdev_set_monitor_mode_buf_rings(pdev);
10599 		break;
10600 	case CDP_RESET_SCAN_SPCL_VAP_STATS_ENABLE:
10601 		dp_monitor_pdev_reset_scan_spcl_vap_stats_enable(pdev,
10602 				val.cdp_pdev_param_reset_scan_spcl_vap_stats_enable);
10603 		break;
10604 	case CDP_CONFIG_ENHANCED_STATS_ENABLE:
10605 		pdev->enhanced_stats_en = val.cdp_pdev_param_enhanced_stats_enable;
10606 		break;
10607 	case CDP_ISOLATION:
10608 		pdev->isolation = val.cdp_pdev_param_isolation;
10609 		break;
10610 	case CDP_CONFIG_UNDECODED_METADATA_CAPTURE_ENABLE:
10611 		return dp_monitor_config_undecoded_metadata_capture(pdev,
10612 				val.cdp_pdev_param_undecoded_metadata_enable);
10613 		break;
10614 	default:
10615 		return QDF_STATUS_E_INVAL;
10616 	}
10617 	return QDF_STATUS_SUCCESS;
10618 }
10619 
10620 #ifdef QCA_UNDECODED_METADATA_SUPPORT
10621 static
10622 QDF_STATUS dp_set_pdev_phyrx_error_mask(struct cdp_soc_t *cdp_soc,
10623 					uint8_t pdev_id, uint32_t mask,
10624 					uint32_t mask_cont)
10625 {
10626 	struct dp_pdev *pdev =
10627 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10628 						   pdev_id);
10629 
10630 	if (!pdev)
10631 		return QDF_STATUS_E_FAILURE;
10632 
10633 	return dp_monitor_config_undecoded_metadata_phyrx_error_mask(pdev,
10634 				mask, mask_cont);
10635 }
10636 
10637 static
10638 QDF_STATUS dp_get_pdev_phyrx_error_mask(struct cdp_soc_t *cdp_soc,
10639 					uint8_t pdev_id, uint32_t *mask,
10640 					uint32_t *mask_cont)
10641 {
10642 	struct dp_pdev *pdev =
10643 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10644 						   pdev_id);
10645 
10646 	if (!pdev)
10647 		return QDF_STATUS_E_FAILURE;
10648 
10649 	return dp_monitor_get_undecoded_metadata_phyrx_error_mask(pdev,
10650 				mask, mask_cont);
10651 }
10652 #endif
10653 
10654 #ifdef QCA_PEER_EXT_STATS
10655 static void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
10656 					  qdf_nbuf_t nbuf)
10657 {
10658 	struct dp_peer *peer = NULL;
10659 	uint16_t peer_id, ring_id;
10660 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
10661 	struct dp_peer_delay_stats *delay_stats = NULL;
10662 
10663 	peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
10664 	if (peer_id > soc->max_peer_id)
10665 		return;
10666 
10667 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
10668 	if (qdf_unlikely(!peer))
10669 		return;
10670 
10671 	if (qdf_unlikely(!peer->txrx_peer)) {
10672 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10673 		return;
10674 	}
10675 
10676 	if (qdf_likely(peer->txrx_peer->delay_stats)) {
10677 		delay_stats = peer->txrx_peer->delay_stats;
10678 		ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
10679 		dp_rx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
10680 					nbuf);
10681 	}
10682 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10683 }
10684 #else
10685 static inline void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
10686 						 qdf_nbuf_t nbuf)
10687 {
10688 }
10689 #endif
10690 
10691 /*
10692  * dp_calculate_delay_stats: function to get rx delay stats
10693  * @cdp_soc: DP soc handle
10694  * @vdev_id: id of DP vdev handle
10695  * @nbuf: skb
10696  *
10697  * Return: QDF_STATUS
10698  */
10699 static QDF_STATUS
10700 dp_calculate_delay_stats(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
10701 			 qdf_nbuf_t nbuf)
10702 {
10703 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
10704 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10705 						     DP_MOD_ID_CDP);
10706 
10707 	if (!vdev)
10708 		return QDF_STATUS_SUCCESS;
10709 
10710 	if (vdev->pdev->delay_stats_flag)
10711 		dp_rx_compute_delay(vdev, nbuf);
10712 	else
10713 		dp_rx_update_peer_delay_stats(soc, nbuf);
10714 
10715 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10716 	return QDF_STATUS_SUCCESS;
10717 }
10718 
10719 /*
10720  * dp_get_vdev_param: function to get parameters from vdev
10721  * @cdp_soc : DP soc handle
10722  * @vdev_id: id of DP vdev handle
10723  * @param: parameter type to get value
10724  * @val: buffer address
10725  *
10726  * return: status
10727  */
10728 static QDF_STATUS dp_get_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
10729 				    enum cdp_vdev_param_type param,
10730 				    cdp_config_param_type *val)
10731 {
10732 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
10733 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10734 						     DP_MOD_ID_CDP);
10735 
10736 	if (!vdev)
10737 		return QDF_STATUS_E_FAILURE;
10738 
10739 	switch (param) {
10740 	case CDP_ENABLE_WDS:
10741 		val->cdp_vdev_param_wds = vdev->wds_enabled;
10742 		break;
10743 	case CDP_ENABLE_MEC:
10744 		val->cdp_vdev_param_mec = vdev->mec_enabled;
10745 		break;
10746 	case CDP_ENABLE_DA_WAR:
10747 		val->cdp_vdev_param_da_war = vdev->pdev->soc->da_war_enabled;
10748 		break;
10749 	case CDP_ENABLE_IGMP_MCAST_EN:
10750 		val->cdp_vdev_param_igmp_mcast_en = vdev->igmp_mcast_enhanc_en;
10751 		break;
10752 	case CDP_ENABLE_MCAST_EN:
10753 		val->cdp_vdev_param_mcast_en = vdev->mcast_enhancement_en;
10754 		break;
10755 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
10756 		val->cdp_vdev_param_hlos_tid_override =
10757 			    dp_vdev_get_hlos_tid_override((struct cdp_vdev *)vdev);
10758 		break;
10759 	case CDP_ENABLE_PEER_AUTHORIZE:
10760 		val->cdp_vdev_param_peer_authorize =
10761 			    vdev->peer_authorize;
10762 		break;
10763 	case CDP_TX_ENCAP_TYPE:
10764 		val->cdp_vdev_param_tx_encap = vdev->tx_encap_type;
10765 		break;
10766 	case CDP_ENABLE_CIPHER:
10767 		val->cdp_vdev_param_cipher_en = vdev->sec_type;
10768 		break;
10769 #ifdef WLAN_SUPPORT_MESH_LATENCY
10770 	case CDP_ENABLE_PEER_TID_LATENCY:
10771 		val->cdp_vdev_param_peer_tid_latency_enable =
10772 			vdev->peer_tid_latency_enabled;
10773 		break;
10774 	case CDP_SET_VAP_MESH_TID:
10775 		val->cdp_vdev_param_mesh_tid =
10776 				vdev->mesh_tid_latency_config.latency_tid;
10777 		break;
10778 #endif
10779 	case CDP_DROP_3ADDR_MCAST:
10780 		val->cdp_drop_3addr_mcast = vdev->drop_3addr_mcast;
10781 		break;
10782 	default:
10783 		dp_cdp_err("%pK: param value %d is wrong",
10784 			   soc, param);
10785 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10786 		return QDF_STATUS_E_FAILURE;
10787 	}
10788 
10789 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10790 	return QDF_STATUS_SUCCESS;
10791 }
10792 
10793 /*
10794  * dp_set_vdev_param: function to set parameters in vdev
10795  * @cdp_soc : DP soc handle
10796  * @vdev_id: id of DP vdev handle
10797  * @param: parameter type to get value
10798  * @val: value
10799  *
10800  * return: QDF_STATUS
10801  */
10802 static QDF_STATUS
10803 dp_set_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
10804 		  enum cdp_vdev_param_type param, cdp_config_param_type val)
10805 {
10806 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
10807 	struct dp_vdev *vdev =
10808 		dp_vdev_get_ref_by_id(dsoc, vdev_id, DP_MOD_ID_CDP);
10809 	uint32_t var = 0;
10810 
10811 	if (!vdev)
10812 		return QDF_STATUS_E_FAILURE;
10813 
10814 	switch (param) {
10815 	case CDP_ENABLE_WDS:
10816 		dp_cdp_err("%pK: wds_enable %d for vdev(%pK) id(%d)\n",
10817 			   dsoc, val.cdp_vdev_param_wds, vdev, vdev->vdev_id);
10818 		vdev->wds_enabled = val.cdp_vdev_param_wds;
10819 		break;
10820 	case CDP_ENABLE_MEC:
10821 		dp_cdp_err("%pK: mec_enable %d for vdev(%pK) id(%d)\n",
10822 			   dsoc, val.cdp_vdev_param_mec, vdev, vdev->vdev_id);
10823 		vdev->mec_enabled = val.cdp_vdev_param_mec;
10824 		break;
10825 	case CDP_ENABLE_DA_WAR:
10826 		dp_cdp_err("%pK: da_war_enable %d for vdev(%pK) id(%d)\n",
10827 			   dsoc, val.cdp_vdev_param_da_war, vdev, vdev->vdev_id);
10828 		vdev->pdev->soc->da_war_enabled = val.cdp_vdev_param_da_war;
10829 		dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
10830 					     vdev->pdev->soc));
10831 		break;
10832 	case CDP_ENABLE_NAWDS:
10833 		vdev->nawds_enabled = val.cdp_vdev_param_nawds;
10834 		break;
10835 	case CDP_ENABLE_MCAST_EN:
10836 		vdev->mcast_enhancement_en = val.cdp_vdev_param_mcast_en;
10837 		break;
10838 	case CDP_ENABLE_IGMP_MCAST_EN:
10839 		vdev->igmp_mcast_enhanc_en = val.cdp_vdev_param_igmp_mcast_en;
10840 		break;
10841 	case CDP_ENABLE_PROXYSTA:
10842 		vdev->proxysta_vdev = val.cdp_vdev_param_proxysta;
10843 		break;
10844 	case CDP_UPDATE_TDLS_FLAGS:
10845 		vdev->tdls_link_connected = val.cdp_vdev_param_tdls_flags;
10846 		break;
10847 	case CDP_CFG_WDS_AGING_TIMER:
10848 		var = val.cdp_vdev_param_aging_tmr;
10849 		if (!var)
10850 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
10851 		else if (var != vdev->wds_aging_timer_val)
10852 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, var);
10853 
10854 		vdev->wds_aging_timer_val = var;
10855 		break;
10856 	case CDP_ENABLE_AP_BRIDGE:
10857 		if (wlan_op_mode_sta != vdev->opmode)
10858 			vdev->ap_bridge_enabled = val.cdp_vdev_param_ap_brdg_en;
10859 		else
10860 			vdev->ap_bridge_enabled = false;
10861 		break;
10862 	case CDP_ENABLE_CIPHER:
10863 		vdev->sec_type = val.cdp_vdev_param_cipher_en;
10864 		break;
10865 	case CDP_ENABLE_QWRAP_ISOLATION:
10866 		vdev->isolation_vdev = val.cdp_vdev_param_qwrap_isolation;
10867 		break;
10868 	case CDP_UPDATE_MULTIPASS:
10869 		vdev->multipass_en = val.cdp_vdev_param_update_multipass;
10870 		break;
10871 	case CDP_TX_ENCAP_TYPE:
10872 		vdev->tx_encap_type = val.cdp_vdev_param_tx_encap;
10873 		break;
10874 	case CDP_RX_DECAP_TYPE:
10875 		vdev->rx_decap_type = val.cdp_vdev_param_rx_decap;
10876 		break;
10877 	case CDP_TID_VDEV_PRTY:
10878 		vdev->tidmap_prty = val.cdp_vdev_param_tidmap_prty;
10879 		break;
10880 	case CDP_TIDMAP_TBL_ID:
10881 		vdev->tidmap_tbl_id = val.cdp_vdev_param_tidmap_tbl_id;
10882 		break;
10883 #ifdef MESH_MODE_SUPPORT
10884 	case CDP_MESH_RX_FILTER:
10885 		dp_vdev_set_mesh_rx_filter((struct cdp_vdev *)vdev,
10886 					   val.cdp_vdev_param_mesh_rx_filter);
10887 		break;
10888 	case CDP_MESH_MODE:
10889 		dp_vdev_set_mesh_mode((struct cdp_vdev *)vdev,
10890 				      val.cdp_vdev_param_mesh_mode);
10891 		break;
10892 #endif
10893 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
10894 		dp_info("vdev_id %d enable hlod tid override %d", vdev_id,
10895 			val.cdp_vdev_param_hlos_tid_override);
10896 		dp_vdev_set_hlos_tid_override(vdev,
10897 				val.cdp_vdev_param_hlos_tid_override);
10898 		break;
10899 #ifdef QCA_SUPPORT_WDS_EXTENDED
10900 	case CDP_CFG_WDS_EXT:
10901 		if (vdev->opmode == wlan_op_mode_ap)
10902 			vdev->wds_ext_enabled = val.cdp_vdev_param_wds_ext;
10903 		break;
10904 #endif
10905 	case CDP_ENABLE_PEER_AUTHORIZE:
10906 		vdev->peer_authorize = val.cdp_vdev_param_peer_authorize;
10907 		break;
10908 #ifdef WLAN_SUPPORT_MESH_LATENCY
10909 	case CDP_ENABLE_PEER_TID_LATENCY:
10910 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
10911 			val.cdp_vdev_param_peer_tid_latency_enable);
10912 		vdev->peer_tid_latency_enabled =
10913 			val.cdp_vdev_param_peer_tid_latency_enable;
10914 		break;
10915 	case CDP_SET_VAP_MESH_TID:
10916 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
10917 			val.cdp_vdev_param_mesh_tid);
10918 		vdev->mesh_tid_latency_config.latency_tid
10919 				= val.cdp_vdev_param_mesh_tid;
10920 		break;
10921 #endif
10922 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
10923 	case CDP_SKIP_BAR_UPDATE_AP:
10924 		dp_info("vdev_id %d skip BAR update: %u", vdev_id,
10925 			val.cdp_skip_bar_update);
10926 		vdev->skip_bar_update = val.cdp_skip_bar_update;
10927 		vdev->skip_bar_update_last_ts = 0;
10928 		break;
10929 #endif
10930 	case CDP_DROP_3ADDR_MCAST:
10931 		dp_info("vdev_id %d drop 3 addr mcast :%d", vdev_id,
10932 			val.cdp_drop_3addr_mcast);
10933 		vdev->drop_3addr_mcast = val.cdp_drop_3addr_mcast;
10934 		break;
10935 	case CDP_ENABLE_WRAP:
10936 		vdev->wrap_vdev = val.cdp_vdev_param_wrap;
10937 		break;
10938 #ifdef DP_TRAFFIC_END_INDICATION
10939 	case CDP_ENABLE_TRAFFIC_END_INDICATION:
10940 		vdev->traffic_end_ind_en = val.cdp_vdev_param_traffic_end_ind;
10941 		break;
10942 #endif
10943 	default:
10944 		break;
10945 	}
10946 
10947 	dp_tx_vdev_update_search_flags((struct dp_vdev *)vdev);
10948 	dsoc->arch_ops.txrx_set_vdev_param(dsoc, vdev, param, val);
10949 
10950 	/* Update PDEV flags as VDEV flags are updated */
10951 	dp_pdev_update_fast_rx_flag(dsoc, vdev->pdev);
10952 	dp_vdev_unref_delete(dsoc, vdev, DP_MOD_ID_CDP);
10953 
10954 	return QDF_STATUS_SUCCESS;
10955 }
10956 
10957 /*
10958  * dp_set_psoc_param: function to set parameters in psoc
10959  * @cdp_soc : DP soc handle
10960  * @param: parameter type to be set
10961  * @val: value of parameter to be set
10962  *
10963  * return: QDF_STATUS
10964  */
10965 static QDF_STATUS
10966 dp_set_psoc_param(struct cdp_soc_t *cdp_soc,
10967 		  enum cdp_psoc_param_type param, cdp_config_param_type val)
10968 {
10969 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
10970 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->wlan_cfg_ctx;
10971 
10972 	switch (param) {
10973 	case CDP_ENABLE_RATE_STATS:
10974 		soc->peerstats_enabled = val.cdp_psoc_param_en_rate_stats;
10975 		break;
10976 	case CDP_SET_NSS_CFG:
10977 		wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx,
10978 					    val.cdp_psoc_param_en_nss_cfg);
10979 		/*
10980 		 * TODO: masked out based on the per offloaded radio
10981 		 */
10982 		switch (val.cdp_psoc_param_en_nss_cfg) {
10983 		case dp_nss_cfg_default:
10984 			break;
10985 		case dp_nss_cfg_first_radio:
10986 		/*
10987 		 * This configuration is valid for single band radio which
10988 		 * is also NSS offload.
10989 		 */
10990 		case dp_nss_cfg_dbdc:
10991 		case dp_nss_cfg_dbtc:
10992 			wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
10993 			wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
10994 			wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
10995 			wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
10996 			break;
10997 		default:
10998 			dp_cdp_err("%pK: Invalid offload config %d",
10999 				   soc, val.cdp_psoc_param_en_nss_cfg);
11000 		}
11001 
11002 			dp_cdp_err("%pK: nss-wifi<0> nss config is enabled"
11003 				   , soc);
11004 		break;
11005 	case CDP_SET_PREFERRED_HW_MODE:
11006 		soc->preferred_hw_mode = val.cdp_psoc_param_preferred_hw_mode;
11007 		break;
11008 	case CDP_IPA_ENABLE:
11009 		soc->wlan_cfg_ctx->ipa_enabled = val.cdp_ipa_enabled;
11010 		break;
11011 	case CDP_CFG_VDEV_STATS_HW_OFFLOAD:
11012 		wlan_cfg_set_vdev_stats_hw_offload_config(wlan_cfg_ctx,
11013 				val.cdp_psoc_param_vdev_stats_hw_offload);
11014 		break;
11015 	case CDP_SAWF_ENABLE:
11016 		wlan_cfg_set_sawf_config(wlan_cfg_ctx, val.cdp_sawf_enabled);
11017 		break;
11018 	case CDP_UMAC_RST_SKEL_ENABLE:
11019 		dp_umac_rst_skel_enable_update(soc, val.cdp_umac_rst_skel);
11020 		break;
11021 	case CDP_SAWF_STATS:
11022 		wlan_cfg_set_sawf_stats_config(wlan_cfg_ctx,
11023 					       val.cdp_sawf_stats);
11024 		break;
11025 	default:
11026 		break;
11027 	}
11028 
11029 	return QDF_STATUS_SUCCESS;
11030 }
11031 
11032 /*
11033  * dp_get_psoc_param: function to get parameters in soc
11034  * @cdp_soc : DP soc handle
11035  * @param: parameter type to be set
11036  * @val: address of buffer
11037  *
11038  * return: status
11039  */
11040 static QDF_STATUS dp_get_psoc_param(struct cdp_soc_t *cdp_soc,
11041 				    enum cdp_psoc_param_type param,
11042 				    cdp_config_param_type *val)
11043 {
11044 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
11045 
11046 	if (!soc)
11047 		return QDF_STATUS_E_FAILURE;
11048 
11049 	switch (param) {
11050 	case CDP_CFG_PEER_EXT_STATS:
11051 		val->cdp_psoc_param_pext_stats =
11052 			wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx);
11053 		break;
11054 	case CDP_CFG_VDEV_STATS_HW_OFFLOAD:
11055 		val->cdp_psoc_param_vdev_stats_hw_offload =
11056 			wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx);
11057 		break;
11058 	case CDP_UMAC_RST_SKEL_ENABLE:
11059 		val->cdp_umac_rst_skel = dp_umac_rst_skel_enable_get(soc);
11060 		break;
11061 	case CDP_PPEDS_ENABLE:
11062 		val->cdp_psoc_param_ppeds_enabled =
11063 			wlan_cfg_get_dp_soc_is_ppe_enabled(soc->wlan_cfg_ctx);
11064 		break;
11065 	default:
11066 		dp_warn("Invalid param");
11067 		break;
11068 	}
11069 
11070 	return QDF_STATUS_SUCCESS;
11071 }
11072 
11073 /*
11074  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
11075  * @soc: DP_SOC handle
11076  * @vdev_id: id of DP_VDEV handle
11077  * @map_id:ID of map that needs to be updated
11078  *
11079  * Return: QDF_STATUS
11080  */
11081 static QDF_STATUS dp_set_vdev_dscp_tid_map_wifi3(ol_txrx_soc_handle cdp_soc,
11082 						 uint8_t vdev_id,
11083 						 uint8_t map_id)
11084 {
11085 	cdp_config_param_type val;
11086 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
11087 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11088 						     DP_MOD_ID_CDP);
11089 	if (vdev) {
11090 		vdev->dscp_tid_map_id = map_id;
11091 		val.cdp_vdev_param_dscp_tid_map_id = map_id;
11092 		soc->arch_ops.txrx_set_vdev_param(soc,
11093 						  vdev,
11094 						  CDP_UPDATE_DSCP_TO_TID_MAP,
11095 						  val);
11096 		/* Updatr flag for transmit tid classification */
11097 		if (vdev->dscp_tid_map_id < soc->num_hw_dscp_tid_map)
11098 			vdev->skip_sw_tid_classification |=
11099 				DP_TX_HW_DSCP_TID_MAP_VALID;
11100 		else
11101 			vdev->skip_sw_tid_classification &=
11102 				~DP_TX_HW_DSCP_TID_MAP_VALID;
11103 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11104 		return QDF_STATUS_SUCCESS;
11105 	}
11106 
11107 	return QDF_STATUS_E_FAILURE;
11108 }
11109 
11110 #ifdef DP_RATETABLE_SUPPORT
11111 static int dp_txrx_get_ratekbps(int preamb, int mcs,
11112 				int htflag, int gintval)
11113 {
11114 	uint32_t rix;
11115 	uint16_t ratecode;
11116 	enum cdp_punctured_modes punc_mode = NO_PUNCTURE;
11117 
11118 	return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
11119 			       (uint8_t)preamb, 1, punc_mode,
11120 			       &rix, &ratecode);
11121 }
11122 #else
11123 static int dp_txrx_get_ratekbps(int preamb, int mcs,
11124 				int htflag, int gintval)
11125 {
11126 	return 0;
11127 }
11128 #endif
11129 
11130 /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
11131  * @soc: DP soc handle
11132  * @pdev_id: id of DP pdev handle
11133  * @pdev_stats: buffer to copy to
11134  *
11135  * return : status success/failure
11136  */
11137 static QDF_STATUS
11138 dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
11139 		       struct cdp_pdev_stats *pdev_stats)
11140 {
11141 	struct dp_pdev *pdev =
11142 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11143 						   pdev_id);
11144 	if (!pdev)
11145 		return QDF_STATUS_E_FAILURE;
11146 
11147 	dp_aggregate_pdev_stats(pdev);
11148 
11149 	qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats));
11150 	return QDF_STATUS_SUCCESS;
11151 }
11152 
11153 /* dp_txrx_update_vdev_me_stats(): Update vdev ME stats sent from CDP
11154  * @vdev: DP vdev handle
11155  * @buf: buffer containing specific stats structure
11156  *
11157  * Returns: void
11158  */
11159 static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev,
11160 					 void *buf)
11161 {
11162 	struct cdp_tx_ingress_stats *host_stats = NULL;
11163 
11164 	if (!buf) {
11165 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
11166 		return;
11167 	}
11168 	host_stats = (struct cdp_tx_ingress_stats *)buf;
11169 
11170 	DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt,
11171 			 host_stats->mcast_en.mcast_pkt.num,
11172 			 host_stats->mcast_en.mcast_pkt.bytes);
11173 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error,
11174 		     host_stats->mcast_en.dropped_map_error);
11175 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac,
11176 		     host_stats->mcast_en.dropped_self_mac);
11177 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail,
11178 		     host_stats->mcast_en.dropped_send_fail);
11179 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast,
11180 		     host_stats->mcast_en.ucast);
11181 	DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc,
11182 		     host_stats->mcast_en.fail_seg_alloc);
11183 	DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail,
11184 		     host_stats->mcast_en.clone_fail);
11185 }
11186 
11187 /* dp_txrx_update_vdev_igmp_me_stats(): Update vdev IGMP ME stats sent from CDP
11188  * @vdev: DP vdev handle
11189  * @buf: buffer containing specific stats structure
11190  *
11191  * Returns: void
11192  */
11193 static void dp_txrx_update_vdev_igmp_me_stats(struct dp_vdev *vdev,
11194 					      void *buf)
11195 {
11196 	struct cdp_tx_ingress_stats *host_stats = NULL;
11197 
11198 	if (!buf) {
11199 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
11200 		return;
11201 	}
11202 	host_stats = (struct cdp_tx_ingress_stats *)buf;
11203 
11204 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_rcvd,
11205 		     host_stats->igmp_mcast_en.igmp_rcvd);
11206 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_ucast_converted,
11207 		     host_stats->igmp_mcast_en.igmp_ucast_converted);
11208 }
11209 
11210 /* dp_txrx_update_vdev_host_stats(): Update stats sent through CDP
11211  * @soc: DP soc handle
11212  * @vdev_id: id of DP vdev handle
11213  * @buf: buffer containing specific stats structure
11214  * @stats_id: stats type
11215  *
11216  * Returns: QDF_STATUS
11217  */
11218 static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc_hdl,
11219 						 uint8_t vdev_id,
11220 						 void *buf,
11221 						 uint16_t stats_id)
11222 {
11223 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11224 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11225 						     DP_MOD_ID_CDP);
11226 
11227 	if (!vdev) {
11228 		dp_cdp_err("%pK: Invalid vdev handle", soc);
11229 		return QDF_STATUS_E_FAILURE;
11230 	}
11231 
11232 	switch (stats_id) {
11233 	case DP_VDEV_STATS_PKT_CNT_ONLY:
11234 		break;
11235 	case DP_VDEV_STATS_TX_ME:
11236 		dp_txrx_update_vdev_me_stats(vdev, buf);
11237 		dp_txrx_update_vdev_igmp_me_stats(vdev, buf);
11238 		break;
11239 	default:
11240 		qdf_info("Invalid stats_id %d", stats_id);
11241 		break;
11242 	}
11243 
11244 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11245 	return QDF_STATUS_SUCCESS;
11246 }
11247 
11248 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
11249  * @soc: soc handle
11250  * @vdev_id: id of vdev handle
11251  * @peer_mac: mac of DP_PEER handle
11252  * @peer_stats: buffer to copy to
11253  * return : status success/failure
11254  */
11255 static QDF_STATUS
11256 dp_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
11257 		       uint8_t *peer_mac, struct cdp_peer_stats *peer_stats)
11258 {
11259 	struct dp_peer *peer = NULL;
11260 	struct cdp_peer_info peer_info = { 0 };
11261 
11262 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac, false,
11263 				 CDP_WILD_PEER_TYPE);
11264 
11265 	peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info,
11266 					 DP_MOD_ID_CDP);
11267 
11268 	qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats));
11269 
11270 	if (!peer)
11271 		return QDF_STATUS_E_FAILURE;
11272 
11273 	dp_get_peer_stats(peer, peer_stats);
11274 
11275 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11276 
11277 	return QDF_STATUS_SUCCESS;
11278 }
11279 
11280 /* dp_txrx_get_peer_stats_param - will return specified cdp_peer_stats
11281  * @param soc - soc handle
11282  * @param vdev_id - vdev_id of vdev object
11283  * @param peer_mac - mac address of the peer
11284  * @param type - enum of required stats
11285  * @param buf - buffer to hold the value
11286  * return : status success/failure
11287  */
11288 static QDF_STATUS
11289 dp_txrx_get_peer_stats_param(struct cdp_soc_t *soc, uint8_t vdev_id,
11290 			     uint8_t *peer_mac, enum cdp_peer_stats_type type,
11291 			     cdp_peer_stats_param_t *buf)
11292 {
11293 	QDF_STATUS ret;
11294 	struct dp_peer *peer = NULL;
11295 	struct cdp_peer_info peer_info = { 0 };
11296 
11297 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac, false,
11298 				 CDP_WILD_PEER_TYPE);
11299 
11300 	peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info,
11301 				         DP_MOD_ID_CDP);
11302 
11303 	if (!peer) {
11304 		dp_peer_err("%pK: Invalid Peer for Mac " QDF_MAC_ADDR_FMT,
11305 			    soc, QDF_MAC_ADDR_REF(peer_mac));
11306 		return QDF_STATUS_E_FAILURE;
11307 	}
11308 
11309 	if (type >= cdp_peer_per_pkt_stats_min &&
11310 	    type < cdp_peer_per_pkt_stats_max) {
11311 		ret = dp_txrx_get_peer_per_pkt_stats_param(peer, type, buf);
11312 	} else if (type >= cdp_peer_extd_stats_min &&
11313 		   type < cdp_peer_extd_stats_max) {
11314 		ret = dp_txrx_get_peer_extd_stats_param(peer, type, buf);
11315 	} else {
11316 		dp_err("%pK: Invalid stat type requested", soc);
11317 		ret = QDF_STATUS_E_FAILURE;
11318 	}
11319 
11320 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11321 
11322 	return ret;
11323 }
11324 
11325 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
11326  * @soc: soc handle
11327  * @vdev_id: id of vdev handle
11328  * @peer_mac: mac of DP_PEER handle
11329  *
11330  * return : QDF_STATUS
11331  */
11332 #ifdef WLAN_FEATURE_11BE_MLO
11333 static QDF_STATUS
11334 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
11335 			 uint8_t *peer_mac)
11336 {
11337 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11338 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11339 	struct dp_peer *peer =
11340 			dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
11341 						       vdev_id, DP_MOD_ID_CDP);
11342 
11343 	if (!peer)
11344 		return QDF_STATUS_E_FAILURE;
11345 
11346 	DP_STATS_CLR(peer);
11347 	dp_txrx_peer_stats_clr(peer->txrx_peer);
11348 
11349 	if (IS_MLO_DP_MLD_PEER(peer)) {
11350 		uint8_t i;
11351 		struct dp_peer *link_peer;
11352 		struct dp_soc *link_peer_soc;
11353 		struct dp_mld_link_peers link_peers_info;
11354 
11355 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
11356 						    &link_peers_info,
11357 						    DP_MOD_ID_CDP);
11358 		for (i = 0; i < link_peers_info.num_links; i++) {
11359 			link_peer = link_peers_info.link_peers[i];
11360 			link_peer_soc = link_peer->vdev->pdev->soc;
11361 
11362 			DP_STATS_CLR(link_peer);
11363 			dp_monitor_peer_reset_stats(link_peer_soc, link_peer);
11364 		}
11365 
11366 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
11367 	} else {
11368 		dp_monitor_peer_reset_stats(soc, peer);
11369 	}
11370 
11371 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11372 
11373 	return status;
11374 }
11375 #else
11376 static QDF_STATUS
11377 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
11378 			 uint8_t *peer_mac)
11379 {
11380 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11381 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
11382 						      peer_mac, 0, vdev_id,
11383 						      DP_MOD_ID_CDP);
11384 
11385 	if (!peer)
11386 		return QDF_STATUS_E_FAILURE;
11387 
11388 	DP_STATS_CLR(peer);
11389 	dp_txrx_peer_stats_clr(peer->txrx_peer);
11390 	dp_monitor_peer_reset_stats((struct dp_soc *)soc, peer);
11391 
11392 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11393 
11394 	return status;
11395 }
11396 #endif
11397 
11398 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
11399  * @vdev_handle: DP_VDEV handle
11400  * @buf: buffer for vdev stats
11401  *
11402  * return : int
11403  */
11404 static int dp_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
11405 				  void *buf, bool is_aggregate)
11406 {
11407 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11408 	struct cdp_vdev_stats *vdev_stats;
11409 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11410 						     DP_MOD_ID_CDP);
11411 
11412 	if (!vdev)
11413 		return 1;
11414 
11415 	vdev_stats = (struct cdp_vdev_stats *)buf;
11416 
11417 	if (is_aggregate) {
11418 		dp_aggregate_vdev_stats(vdev, buf);
11419 	} else {
11420 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
11421 	}
11422 
11423 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11424 	return 0;
11425 }
11426 
11427 /*
11428  * dp_get_total_per(): get total per
11429  * @soc: DP soc handle
11430  * @pdev_id: id of DP_PDEV handle
11431  *
11432  * Return: % error rate using retries per packet and success packets
11433  */
11434 static int dp_get_total_per(struct cdp_soc_t *soc, uint8_t pdev_id)
11435 {
11436 	struct dp_pdev *pdev =
11437 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11438 						   pdev_id);
11439 
11440 	if (!pdev)
11441 		return 0;
11442 
11443 	dp_aggregate_pdev_stats(pdev);
11444 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
11445 		return 0;
11446 	return ((pdev->stats.tx.retries * 100) /
11447 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
11448 }
11449 
11450 /*
11451  * dp_txrx_stats_publish(): publish pdev stats into a buffer
11452  * @soc: DP soc handle
11453  * @pdev_id: id of DP_PDEV handle
11454  * @buf: to hold pdev_stats
11455  *
11456  * Return: int
11457  */
11458 static int
11459 dp_txrx_stats_publish(struct cdp_soc_t *soc, uint8_t pdev_id,
11460 		      struct cdp_stats_extd *buf)
11461 {
11462 	struct cdp_txrx_stats_req req = {0,};
11463 	QDF_STATUS status;
11464 	struct dp_pdev *pdev =
11465 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11466 						   pdev_id);
11467 
11468 	if (!pdev)
11469 		return TXRX_STATS_LEVEL_OFF;
11470 
11471 	if (pdev->pending_fw_stats_response)
11472 		return TXRX_STATS_LEVEL_OFF;
11473 
11474 	dp_aggregate_pdev_stats(pdev);
11475 
11476 	pdev->pending_fw_stats_response = true;
11477 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
11478 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
11479 	pdev->fw_stats_tlv_bitmap_rcvd = 0;
11480 	qdf_event_reset(&pdev->fw_stats_event);
11481 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
11482 				req.param1, req.param2, req.param3, 0,
11483 				req.cookie_val, 0);
11484 
11485 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
11486 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
11487 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
11488 				req.param1, req.param2, req.param3, 0,
11489 				req.cookie_val, 0);
11490 
11491 	status =
11492 		qdf_wait_single_event(&pdev->fw_stats_event, DP_MAX_SLEEP_TIME);
11493 
11494 	if (status != QDF_STATUS_SUCCESS) {
11495 		if (status == QDF_STATUS_E_TIMEOUT)
11496 			qdf_debug("TIMEOUT_OCCURS");
11497 		pdev->pending_fw_stats_response = false;
11498 		return TXRX_STATS_LEVEL_OFF;
11499 	}
11500 	qdf_mem_copy(buf, &pdev->stats, sizeof(struct cdp_pdev_stats));
11501 	pdev->pending_fw_stats_response = false;
11502 
11503 	return TXRX_STATS_LEVEL;
11504 }
11505 
11506 /*
11507  * dp_get_obss_stats(): Get Pdev OBSS stats from Fw
11508  * @soc: DP soc handle
11509  * @pdev_id: id of DP_PDEV handle
11510  * @buf: to hold pdev obss stats
11511  * @req: Pointer to CDP TxRx stats
11512  *
11513  * Return: status
11514  */
11515 static QDF_STATUS
11516 dp_get_obss_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
11517 		  struct cdp_pdev_obss_pd_stats_tlv *buf,
11518 		  struct cdp_txrx_stats_req *req)
11519 {
11520 	QDF_STATUS status;
11521 	struct dp_pdev *pdev =
11522 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11523 						   pdev_id);
11524 
11525 	if (!pdev)
11526 		return QDF_STATUS_E_INVAL;
11527 
11528 	if (pdev->pending_fw_obss_stats_response)
11529 		return QDF_STATUS_E_AGAIN;
11530 
11531 	pdev->pending_fw_obss_stats_response = true;
11532 	req->stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_OBSS_PD_STATS;
11533 	req->cookie_val = DBG_STATS_COOKIE_HTT_OBSS;
11534 	qdf_event_reset(&pdev->fw_obss_stats_event);
11535 	status = dp_h2t_ext_stats_msg_send(pdev, req->stats, req->param0,
11536 					   req->param1, req->param2,
11537 					   req->param3, 0, req->cookie_val,
11538 					   req->mac_id);
11539 	if (QDF_IS_STATUS_ERROR(status)) {
11540 		pdev->pending_fw_obss_stats_response = false;
11541 		return status;
11542 	}
11543 	status =
11544 		qdf_wait_single_event(&pdev->fw_obss_stats_event,
11545 				      DP_MAX_SLEEP_TIME);
11546 
11547 	if (status != QDF_STATUS_SUCCESS) {
11548 		if (status == QDF_STATUS_E_TIMEOUT)
11549 			qdf_debug("TIMEOUT_OCCURS");
11550 		pdev->pending_fw_obss_stats_response = false;
11551 		return QDF_STATUS_E_TIMEOUT;
11552 	}
11553 	qdf_mem_copy(buf, &pdev->stats.htt_tx_pdev_stats.obss_pd_stats_tlv,
11554 		     sizeof(struct cdp_pdev_obss_pd_stats_tlv));
11555 	pdev->pending_fw_obss_stats_response = false;
11556 	return status;
11557 }
11558 
11559 /*
11560  * dp_clear_pdev_obss_pd_stats(): Clear pdev obss stats
11561  * @soc: DP soc handle
11562  * @pdev_id: id of DP_PDEV handle
11563  *
11564  * Return: status
11565  */
11566 static QDF_STATUS
11567 dp_clear_pdev_obss_pd_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
11568 {
11569 	struct cdp_txrx_stats_req req = {0};
11570 	struct dp_pdev *pdev =
11571 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11572 						   pdev_id);
11573 	uint32_t cookie_val = DBG_STATS_COOKIE_DEFAULT;
11574 
11575 	if (!pdev)
11576 		return QDF_STATUS_E_INVAL;
11577 
11578 	/*
11579 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
11580 	 * from param0 to param3 according to below rule:
11581 	 *
11582 	 * PARAM:
11583 	 *   - config_param0 : start_offset (stats type)
11584 	 *   - config_param1 : stats bmask from start offset
11585 	 *   - config_param2 : stats bmask from start offset + 32
11586 	 *   - config_param3 : stats bmask from start offset + 64
11587 	 */
11588 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_RESET;
11589 	req.param0 = HTT_DBG_EXT_STATS_PDEV_OBSS_PD_STATS;
11590 	req.param1 = 0x00000001;
11591 
11592 	return dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
11593 				  req.param1, req.param2, req.param3, 0,
11594 				cookie_val, 0);
11595 }
11596 
11597 /**
11598  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
11599  * @soc: soc handle
11600  * @pdev_id: id of DP_PDEV handle
11601  * @map_id: ID of map that needs to be updated
11602  * @tos: index value in map
11603  * @tid: tid value passed by the user
11604  *
11605  * Return: QDF_STATUS
11606  */
11607 static QDF_STATUS
11608 dp_set_pdev_dscp_tid_map_wifi3(struct cdp_soc_t *soc_handle,
11609 			       uint8_t pdev_id,
11610 			       uint8_t map_id,
11611 			       uint8_t tos, uint8_t tid)
11612 {
11613 	uint8_t dscp;
11614 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11615 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11616 
11617 	if (!pdev)
11618 		return QDF_STATUS_E_FAILURE;
11619 
11620 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
11621 	pdev->dscp_tid_map[map_id][dscp] = tid;
11622 
11623 	if (map_id < soc->num_hw_dscp_tid_map)
11624 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
11625 				       map_id, dscp);
11626 	else
11627 		return QDF_STATUS_E_FAILURE;
11628 
11629 	return QDF_STATUS_SUCCESS;
11630 }
11631 
11632 #ifdef WLAN_SYSFS_DP_STATS
11633 /*
11634  * dp_sysfs_event_trigger(): Trigger event to wait for firmware
11635  * stats request response.
11636  * @soc: soc handle
11637  * @cookie_val: cookie value
11638  *
11639  * @Return: QDF_STATUS
11640  */
11641 static QDF_STATUS
11642 dp_sysfs_event_trigger(struct dp_soc *soc, uint32_t cookie_val)
11643 {
11644 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11645 	/* wait for firmware response for sysfs stats request */
11646 	if (cookie_val == DBG_SYSFS_STATS_COOKIE) {
11647 		if (!soc) {
11648 			dp_cdp_err("soc is NULL");
11649 			return QDF_STATUS_E_FAILURE;
11650 		}
11651 		/* wait for event completion */
11652 		status = qdf_wait_single_event(&soc->sysfs_config->sysfs_txrx_fw_request_done,
11653 					       WLAN_SYSFS_STAT_REQ_WAIT_MS);
11654 		if (status == QDF_STATUS_SUCCESS)
11655 			dp_cdp_info("sysfs_txrx_fw_request_done event completed");
11656 		else if (status == QDF_STATUS_E_TIMEOUT)
11657 			dp_cdp_warn("sysfs_txrx_fw_request_done event expired");
11658 		else
11659 			dp_cdp_warn("sysfs_txrx_fw_request_done event error code %d", status);
11660 	}
11661 
11662 	return status;
11663 }
11664 #else /* WLAN_SYSFS_DP_STATS */
11665 /*
11666  * dp_sysfs_event_trigger(): Trigger event to wait for firmware
11667  * stats request response.
11668  * @soc: soc handle
11669  * @cookie_val: cookie value
11670  *
11671  * @Return: QDF_STATUS
11672  */
11673 static QDF_STATUS
11674 dp_sysfs_event_trigger(struct dp_soc *soc, uint32_t cookie_val)
11675 {
11676 	return QDF_STATUS_SUCCESS;
11677 }
11678 #endif /* WLAN_SYSFS_DP_STATS */
11679 
11680 /**
11681  * dp_fw_stats_process(): Process TXRX FW stats request.
11682  * @vdev_handle: DP VDEV handle
11683  * @req: stats request
11684  *
11685  * return: QDF_STATUS
11686  */
11687 static QDF_STATUS
11688 dp_fw_stats_process(struct dp_vdev *vdev,
11689 		    struct cdp_txrx_stats_req *req)
11690 {
11691 	struct dp_pdev *pdev = NULL;
11692 	struct dp_soc *soc = NULL;
11693 	uint32_t stats = req->stats;
11694 	uint8_t mac_id = req->mac_id;
11695 	uint32_t cookie_val = DBG_STATS_COOKIE_DEFAULT;
11696 
11697 	if (!vdev) {
11698 		DP_TRACE(NONE, "VDEV not found");
11699 		return QDF_STATUS_E_FAILURE;
11700 	}
11701 
11702 	pdev = vdev->pdev;
11703 	if (!pdev) {
11704 		DP_TRACE(NONE, "PDEV not found");
11705 		return QDF_STATUS_E_FAILURE;
11706 	}
11707 
11708 	soc = pdev->soc;
11709 	if (!soc) {
11710 		DP_TRACE(NONE, "soc not found");
11711 		return QDF_STATUS_E_FAILURE;
11712 	}
11713 
11714 	/* In case request is from host sysfs for displaying stats on console */
11715 	if (req->cookie_val == DBG_SYSFS_STATS_COOKIE)
11716 		cookie_val = DBG_SYSFS_STATS_COOKIE;
11717 
11718 	/*
11719 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
11720 	 * from param0 to param3 according to below rule:
11721 	 *
11722 	 * PARAM:
11723 	 *   - config_param0 : start_offset (stats type)
11724 	 *   - config_param1 : stats bmask from start offset
11725 	 *   - config_param2 : stats bmask from start offset + 32
11726 	 *   - config_param3 : stats bmask from start offset + 64
11727 	 */
11728 	if (req->stats == CDP_TXRX_STATS_0) {
11729 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
11730 		req->param1 = 0xFFFFFFFF;
11731 		req->param2 = 0xFFFFFFFF;
11732 		req->param3 = 0xFFFFFFFF;
11733 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
11734 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
11735 	}
11736 
11737 	if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT) {
11738 		dp_h2t_ext_stats_msg_send(pdev,
11739 					  HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT,
11740 					  req->param0, req->param1, req->param2,
11741 					  req->param3, 0, cookie_val,
11742 					  mac_id);
11743 	} else {
11744 		dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
11745 					  req->param1, req->param2, req->param3,
11746 					  0, cookie_val, mac_id);
11747 	}
11748 
11749 	dp_sysfs_event_trigger(soc, cookie_val);
11750 
11751 	return QDF_STATUS_SUCCESS;
11752 }
11753 
11754 /**
11755  * dp_txrx_stats_request - function to map to firmware and host stats
11756  * @soc: soc handle
11757  * @vdev_id: virtual device ID
11758  * @req: stats request
11759  *
11760  * Return: QDF_STATUS
11761  */
11762 static
11763 QDF_STATUS dp_txrx_stats_request(struct cdp_soc_t *soc_handle,
11764 				 uint8_t vdev_id,
11765 				 struct cdp_txrx_stats_req *req)
11766 {
11767 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_handle);
11768 	int host_stats;
11769 	int fw_stats;
11770 	enum cdp_stats stats;
11771 	int num_stats;
11772 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11773 						     DP_MOD_ID_CDP);
11774 	QDF_STATUS status = QDF_STATUS_E_INVAL;
11775 
11776 	if (!vdev || !req) {
11777 		dp_cdp_err("%pK: Invalid vdev/req instance", soc);
11778 		status = QDF_STATUS_E_INVAL;
11779 		goto fail0;
11780 	}
11781 
11782 	if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) {
11783 		dp_err("Invalid mac id request");
11784 		status = QDF_STATUS_E_INVAL;
11785 		goto fail0;
11786 	}
11787 
11788 	stats = req->stats;
11789 	if (stats >= CDP_TXRX_MAX_STATS) {
11790 		status = QDF_STATUS_E_INVAL;
11791 		goto fail0;
11792 	}
11793 
11794 	/*
11795 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
11796 	 *			has to be updated if new FW HTT stats added
11797 	 */
11798 	if (stats > CDP_TXRX_STATS_HTT_MAX)
11799 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
11800 
11801 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
11802 
11803 	if (stats >= num_stats) {
11804 		dp_cdp_err("%pK : Invalid stats option: %d", soc, stats);
11805 		status = QDF_STATUS_E_INVAL;
11806 		goto fail0;
11807 	}
11808 
11809 	req->stats = stats;
11810 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
11811 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
11812 
11813 	dp_info("stats: %u fw_stats_type: %d host_stats: %d",
11814 		stats, fw_stats, host_stats);
11815 
11816 	if (fw_stats != TXRX_FW_STATS_INVALID) {
11817 		/* update request with FW stats type */
11818 		req->stats = fw_stats;
11819 		status = dp_fw_stats_process(vdev, req);
11820 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
11821 			(host_stats <= TXRX_HOST_STATS_MAX))
11822 		status = dp_print_host_stats(vdev, req, soc);
11823 	else
11824 		dp_cdp_info("%pK: Wrong Input for TxRx Stats", soc);
11825 fail0:
11826 	if (vdev)
11827 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11828 	return status;
11829 }
11830 
11831 /*
11832  * dp_txrx_dump_stats() -  Dump statistics
11833  * @value - Statistics option
11834  */
11835 static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value,
11836 				     enum qdf_stats_verbosity_level level)
11837 {
11838 	struct dp_soc *soc =
11839 		(struct dp_soc *)psoc;
11840 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11841 
11842 	if (!soc) {
11843 		dp_cdp_err("%pK: soc is NULL", soc);
11844 		return QDF_STATUS_E_INVAL;
11845 	}
11846 
11847 	switch (value) {
11848 	case CDP_TXRX_PATH_STATS:
11849 		dp_txrx_path_stats(soc);
11850 		dp_print_soc_interrupt_stats(soc);
11851 		hal_dump_reg_write_stats(soc->hal_soc);
11852 		dp_pdev_print_tx_delay_stats(soc);
11853 		/* Dump usage watermark stats for core TX/RX SRNGs */
11854 		dp_dump_srng_high_wm_stats(soc, (1 << REO_DST));
11855 		dp_print_fisa_stats(soc);
11856 		break;
11857 
11858 	case CDP_RX_RING_STATS:
11859 		dp_print_per_ring_stats(soc);
11860 		break;
11861 
11862 	case CDP_TXRX_TSO_STATS:
11863 		dp_print_tso_stats(soc, level);
11864 		break;
11865 
11866 	case CDP_DUMP_TX_FLOW_POOL_INFO:
11867 		if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH)
11868 			cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
11869 		else
11870 			dp_tx_dump_flow_pool_info_compact(soc);
11871 		break;
11872 
11873 	case CDP_DP_NAPI_STATS:
11874 		dp_print_napi_stats(soc);
11875 		break;
11876 
11877 	case CDP_TXRX_DESC_STATS:
11878 		/* TODO: NOT IMPLEMENTED */
11879 		break;
11880 
11881 	case CDP_DP_RX_FISA_STATS:
11882 		dp_rx_dump_fisa_stats(soc);
11883 		break;
11884 
11885 	case CDP_DP_SWLM_STATS:
11886 		dp_print_swlm_stats(soc);
11887 		break;
11888 
11889 	case CDP_DP_TX_HW_LATENCY_STATS:
11890 		dp_pdev_print_tx_delay_stats(soc);
11891 		break;
11892 
11893 	default:
11894 		status = QDF_STATUS_E_INVAL;
11895 		break;
11896 	}
11897 
11898 	return status;
11899 
11900 }
11901 
11902 #ifdef WLAN_SYSFS_DP_STATS
11903 static
11904 void dp_sysfs_get_stat_type(struct dp_soc *soc, uint32_t *mac_id,
11905 			    uint32_t *stat_type)
11906 {
11907 	qdf_spinlock_acquire(&soc->sysfs_config->rw_stats_lock);
11908 	*stat_type = soc->sysfs_config->stat_type_requested;
11909 	*mac_id   = soc->sysfs_config->mac_id;
11910 
11911 	qdf_spinlock_release(&soc->sysfs_config->rw_stats_lock);
11912 }
11913 
11914 static
11915 void dp_sysfs_update_config_buf_params(struct dp_soc *soc,
11916 				       uint32_t curr_len,
11917 				       uint32_t max_buf_len,
11918 				       char *buf)
11919 {
11920 	qdf_spinlock_acquire(&soc->sysfs_config->sysfs_write_user_buffer);
11921 	/* set sysfs_config parameters */
11922 	soc->sysfs_config->buf = buf;
11923 	soc->sysfs_config->curr_buffer_length = curr_len;
11924 	soc->sysfs_config->max_buffer_length = max_buf_len;
11925 	qdf_spinlock_release(&soc->sysfs_config->sysfs_write_user_buffer);
11926 }
11927 
11928 static
11929 QDF_STATUS dp_sysfs_fill_stats(ol_txrx_soc_handle soc_hdl,
11930 			       char *buf, uint32_t buf_size)
11931 {
11932 	uint32_t mac_id = 0;
11933 	uint32_t stat_type = 0;
11934 	uint32_t fw_stats = 0;
11935 	uint32_t host_stats = 0;
11936 	enum cdp_stats stats;
11937 	struct cdp_txrx_stats_req req;
11938 	uint32_t num_stats;
11939 	struct dp_soc *soc = NULL;
11940 
11941 	if (!soc_hdl) {
11942 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
11943 		return QDF_STATUS_E_INVAL;
11944 	}
11945 
11946 	soc = cdp_soc_t_to_dp_soc(soc_hdl);
11947 
11948 	if (!soc) {
11949 		dp_cdp_err("%pK: soc is NULL", soc);
11950 		return QDF_STATUS_E_INVAL;
11951 	}
11952 
11953 	dp_sysfs_get_stat_type(soc, &mac_id, &stat_type);
11954 
11955 	stats = stat_type;
11956 	if (stats >= CDP_TXRX_MAX_STATS) {
11957 		dp_cdp_info("sysfs stat type requested is invalid");
11958 		return QDF_STATUS_E_INVAL;
11959 	}
11960 	/*
11961 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
11962 	 *			has to be updated if new FW HTT stats added
11963 	 */
11964 	if (stats > CDP_TXRX_MAX_STATS)
11965 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
11966 
11967 	num_stats = QDF_ARRAY_SIZE(dp_stats_mapping_table);
11968 
11969 	if (stats >= num_stats) {
11970 		dp_cdp_err("%pK : Invalid stats option: %d, max num stats: %d",
11971 				soc, stats, num_stats);
11972 		return QDF_STATUS_E_INVAL;
11973 	}
11974 
11975 	/* build request */
11976 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
11977 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
11978 
11979 	req.stats = stat_type;
11980 	req.mac_id = mac_id;
11981 	/* request stats to be printed */
11982 	qdf_mutex_acquire(&soc->sysfs_config->sysfs_read_lock);
11983 
11984 	if (fw_stats != TXRX_FW_STATS_INVALID) {
11985 		/* update request with FW stats type */
11986 		req.cookie_val = DBG_SYSFS_STATS_COOKIE;
11987 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
11988 			(host_stats <= TXRX_HOST_STATS_MAX)) {
11989 		req.cookie_val = DBG_STATS_COOKIE_DEFAULT;
11990 		soc->sysfs_config->process_id = qdf_get_current_pid();
11991 		soc->sysfs_config->printing_mode = PRINTING_MODE_ENABLED;
11992 	}
11993 
11994 	dp_sysfs_update_config_buf_params(soc, 0, buf_size, buf);
11995 
11996 	dp_txrx_stats_request(soc_hdl, mac_id, &req);
11997 	soc->sysfs_config->process_id = 0;
11998 	soc->sysfs_config->printing_mode = PRINTING_MODE_DISABLED;
11999 
12000 	dp_sysfs_update_config_buf_params(soc, 0, 0, NULL);
12001 
12002 	qdf_mutex_release(&soc->sysfs_config->sysfs_read_lock);
12003 	return QDF_STATUS_SUCCESS;
12004 }
12005 
12006 static
12007 QDF_STATUS dp_sysfs_set_stat_type(ol_txrx_soc_handle soc_hdl,
12008 				  uint32_t stat_type, uint32_t mac_id)
12009 {
12010 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12011 
12012 	if (!soc_hdl) {
12013 		dp_cdp_err("%pK: soc is NULL", soc);
12014 		return QDF_STATUS_E_INVAL;
12015 	}
12016 
12017 	qdf_spinlock_acquire(&soc->sysfs_config->rw_stats_lock);
12018 
12019 	soc->sysfs_config->stat_type_requested = stat_type;
12020 	soc->sysfs_config->mac_id = mac_id;
12021 
12022 	qdf_spinlock_release(&soc->sysfs_config->rw_stats_lock);
12023 
12024 	return QDF_STATUS_SUCCESS;
12025 }
12026 
12027 static
12028 QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl)
12029 {
12030 	struct dp_soc *soc;
12031 	QDF_STATUS status;
12032 
12033 	if (!soc_hdl) {
12034 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
12035 		return QDF_STATUS_E_INVAL;
12036 	}
12037 
12038 	soc = soc_hdl;
12039 
12040 	soc->sysfs_config = qdf_mem_malloc(sizeof(struct sysfs_stats_config));
12041 	if (!soc->sysfs_config) {
12042 		dp_cdp_err("failed to allocate memory for sysfs_config no memory");
12043 		return QDF_STATUS_E_NOMEM;
12044 	}
12045 
12046 	status = qdf_event_create(&soc->sysfs_config->sysfs_txrx_fw_request_done);
12047 	/* create event for fw stats request from sysfs */
12048 	if (status != QDF_STATUS_SUCCESS) {
12049 		dp_cdp_err("failed to create event sysfs_txrx_fw_request_done");
12050 		qdf_mem_free(soc->sysfs_config);
12051 		soc->sysfs_config = NULL;
12052 		return QDF_STATUS_E_FAILURE;
12053 	}
12054 
12055 	qdf_spinlock_create(&soc->sysfs_config->rw_stats_lock);
12056 	qdf_mutex_create(&soc->sysfs_config->sysfs_read_lock);
12057 	qdf_spinlock_create(&soc->sysfs_config->sysfs_write_user_buffer);
12058 
12059 	return QDF_STATUS_SUCCESS;
12060 }
12061 
12062 static
12063 QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl)
12064 {
12065 	struct dp_soc *soc;
12066 	QDF_STATUS status;
12067 
12068 	if (!soc_hdl) {
12069 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
12070 		return QDF_STATUS_E_INVAL;
12071 	}
12072 
12073 	soc = soc_hdl;
12074 	if (!soc->sysfs_config) {
12075 		dp_cdp_err("soc->sysfs_config is NULL");
12076 		return QDF_STATUS_E_FAILURE;
12077 	}
12078 
12079 	status = qdf_event_destroy(&soc->sysfs_config->sysfs_txrx_fw_request_done);
12080 	if (status != QDF_STATUS_SUCCESS)
12081 		dp_cdp_err("Failed to destroy event sysfs_txrx_fw_request_done ");
12082 
12083 	qdf_mutex_destroy(&soc->sysfs_config->sysfs_read_lock);
12084 	qdf_spinlock_destroy(&soc->sysfs_config->rw_stats_lock);
12085 	qdf_spinlock_destroy(&soc->sysfs_config->sysfs_write_user_buffer);
12086 
12087 	qdf_mem_free(soc->sysfs_config);
12088 
12089 	return QDF_STATUS_SUCCESS;
12090 }
12091 
12092 #else /* WLAN_SYSFS_DP_STATS */
12093 
12094 static
12095 QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl)
12096 {
12097 	return QDF_STATUS_SUCCESS;
12098 }
12099 
12100 static
12101 QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl)
12102 {
12103 	return QDF_STATUS_SUCCESS;
12104 }
12105 #endif /* WLAN_SYSFS_DP_STATS */
12106 
12107 /**
12108  * dp_txrx_clear_dump_stats() - clear dumpStats
12109  * @soc- soc handle
12110  * @value - stats option
12111  *
12112  * Return: 0 - Success, non-zero - failure
12113  */
12114 static
12115 QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12116 				    uint8_t value)
12117 {
12118 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12119 	QDF_STATUS status = QDF_STATUS_SUCCESS;
12120 
12121 	if (!soc) {
12122 		dp_err("soc is NULL");
12123 		return QDF_STATUS_E_INVAL;
12124 	}
12125 
12126 	switch (value) {
12127 	case CDP_TXRX_TSO_STATS:
12128 		dp_txrx_clear_tso_stats(soc);
12129 		break;
12130 
12131 	case CDP_DP_TX_HW_LATENCY_STATS:
12132 		dp_pdev_clear_tx_delay_stats(soc);
12133 		break;
12134 
12135 	default:
12136 		status = QDF_STATUS_E_INVAL;
12137 		break;
12138 	}
12139 
12140 	return status;
12141 }
12142 
12143 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
12144 /**
12145  * dp_update_flow_control_parameters() - API to store datapath
12146  *                            config parameters
12147  * @soc: soc handle
12148  * @cfg: ini parameter handle
12149  *
12150  * Return: void
12151  */
12152 static inline
12153 void dp_update_flow_control_parameters(struct dp_soc *soc,
12154 				struct cdp_config_params *params)
12155 {
12156 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
12157 					params->tx_flow_stop_queue_threshold;
12158 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
12159 					params->tx_flow_start_queue_offset;
12160 }
12161 #else
12162 static inline
12163 void dp_update_flow_control_parameters(struct dp_soc *soc,
12164 				struct cdp_config_params *params)
12165 {
12166 }
12167 #endif
12168 
12169 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
12170 /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */
12171 #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024
12172 
12173 /* Max packet limit for RX REAP Loop (dp_rx_process) */
12174 #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024
12175 
12176 static
12177 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
12178 					struct cdp_config_params *params)
12179 {
12180 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit =
12181 				params->tx_comp_loop_pkt_limit;
12182 
12183 	if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX)
12184 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true;
12185 	else
12186 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false;
12187 
12188 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit =
12189 				params->rx_reap_loop_pkt_limit;
12190 
12191 	if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX)
12192 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = true;
12193 	else
12194 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = false;
12195 
12196 	soc->wlan_cfg_ctx->rx_hp_oos_update_limit =
12197 				params->rx_hp_oos_update_limit;
12198 
12199 	dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u",
12200 		soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit,
12201 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check,
12202 		soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit,
12203 		soc->wlan_cfg_ctx->rx_enable_eol_data_check,
12204 		soc->wlan_cfg_ctx->rx_hp_oos_update_limit);
12205 }
12206 
12207 static void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
12208 				      uint32_t rx_limit)
12209 {
12210 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit = tx_limit;
12211 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit = rx_limit;
12212 }
12213 
12214 #else
12215 static inline
12216 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
12217 					struct cdp_config_params *params)
12218 { }
12219 
12220 static inline
12221 void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
12222 			       uint32_t rx_limit)
12223 {
12224 }
12225 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
12226 
12227 /**
12228  * dp_update_config_parameters() - API to store datapath
12229  *                            config parameters
12230  * @soc: soc handle
12231  * @cfg: ini parameter handle
12232  *
12233  * Return: status
12234  */
12235 static
12236 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
12237 				struct cdp_config_params *params)
12238 {
12239 	struct dp_soc *soc = (struct dp_soc *)psoc;
12240 
12241 	if (!(soc)) {
12242 		dp_cdp_err("%pK: Invalid handle", soc);
12243 		return QDF_STATUS_E_INVAL;
12244 	}
12245 
12246 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
12247 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
12248 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
12249 	soc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload =
12250 				params->p2p_tcp_udp_checksumoffload;
12251 	soc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload =
12252 				params->nan_tcp_udp_checksumoffload;
12253 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
12254 				params->tcp_udp_checksumoffload;
12255 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
12256 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
12257 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
12258 
12259 	dp_update_rx_soft_irq_limit_params(soc, params);
12260 	dp_update_flow_control_parameters(soc, params);
12261 
12262 	return QDF_STATUS_SUCCESS;
12263 }
12264 
12265 static struct cdp_wds_ops dp_ops_wds = {
12266 	.vdev_set_wds = dp_vdev_set_wds,
12267 #ifdef WDS_VENDOR_EXTENSION
12268 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
12269 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
12270 #endif
12271 };
12272 
12273 /*
12274  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
12275  * @soc_hdl - datapath soc handle
12276  * @vdev_id - virtual interface id
12277  * @callback - callback function
12278  * @ctxt: callback context
12279  *
12280  */
12281 static void
12282 dp_txrx_data_tx_cb_set(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
12283 		       ol_txrx_data_tx_cb callback, void *ctxt)
12284 {
12285 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12286 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12287 						     DP_MOD_ID_CDP);
12288 
12289 	if (!vdev)
12290 		return;
12291 
12292 	vdev->tx_non_std_data_callback.func = callback;
12293 	vdev->tx_non_std_data_callback.ctxt = ctxt;
12294 
12295 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12296 }
12297 
12298 /**
12299  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
12300  * @soc: datapath soc handle
12301  * @pdev_id: id of datapath pdev handle
12302  *
12303  * Return: opaque pointer to dp txrx handle
12304  */
12305 static void *dp_pdev_get_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id)
12306 {
12307 	struct dp_pdev *pdev =
12308 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12309 						   pdev_id);
12310 	if (qdf_unlikely(!pdev))
12311 		return NULL;
12312 
12313 	return pdev->dp_txrx_handle;
12314 }
12315 
12316 /**
12317  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
12318  * @soc: datapath soc handle
12319  * @pdev_id: id of datapath pdev handle
12320  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
12321  *
12322  * Return: void
12323  */
12324 static void
12325 dp_pdev_set_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id,
12326 			   void *dp_txrx_hdl)
12327 {
12328 	struct dp_pdev *pdev =
12329 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12330 						   pdev_id);
12331 
12332 	if (!pdev)
12333 		return;
12334 
12335 	pdev->dp_txrx_handle = dp_txrx_hdl;
12336 }
12337 
12338 /**
12339  * dp_vdev_get_dp_ext_handle() - get dp handle from vdev
12340  * @soc: datapath soc handle
12341  * @vdev_id: vdev id
12342  *
12343  * Return: opaque pointer to dp txrx handle
12344  */
12345 static void *dp_vdev_get_dp_ext_handle(ol_txrx_soc_handle soc_hdl,
12346 				       uint8_t vdev_id)
12347 {
12348 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12349 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12350 						     DP_MOD_ID_CDP);
12351 	void *dp_ext_handle;
12352 
12353 	if (!vdev)
12354 		return NULL;
12355 	dp_ext_handle = vdev->vdev_dp_ext_handle;
12356 
12357 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12358 	return dp_ext_handle;
12359 }
12360 
12361 /**
12362  * dp_vdev_set_dp_ext_handle() - set dp handle in vdev
12363  * @soc: datapath soc handle
12364  * @vdev_id: vdev id
12365  * @size: size of advance dp handle
12366  *
12367  * Return: QDF_STATUS
12368  */
12369 static QDF_STATUS
12370 dp_vdev_set_dp_ext_handle(ol_txrx_soc_handle soc_hdl, uint8_t vdev_id,
12371 			  uint16_t size)
12372 {
12373 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12374 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12375 						     DP_MOD_ID_CDP);
12376 	void *dp_ext_handle;
12377 
12378 	if (!vdev)
12379 		return QDF_STATUS_E_FAILURE;
12380 
12381 	dp_ext_handle = qdf_mem_malloc(size);
12382 
12383 	if (!dp_ext_handle) {
12384 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12385 		return QDF_STATUS_E_FAILURE;
12386 	}
12387 
12388 	vdev->vdev_dp_ext_handle = dp_ext_handle;
12389 
12390 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12391 	return QDF_STATUS_SUCCESS;
12392 }
12393 
12394 /**
12395  * dp_vdev_inform_ll_conn() - Inform vdev to add/delete a latency critical
12396  *			      connection for this vdev
12397  * @soc_hdl: CDP soc handle
12398  * @vdev_id: vdev ID
12399  * @action: Add/Delete action
12400  *
12401  * Returns: QDF_STATUS.
12402  */
12403 static QDF_STATUS
12404 dp_vdev_inform_ll_conn(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
12405 		       enum vdev_ll_conn_actions action)
12406 {
12407 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12408 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12409 						     DP_MOD_ID_CDP);
12410 
12411 	if (!vdev) {
12412 		dp_err("LL connection action for invalid vdev %d", vdev_id);
12413 		return QDF_STATUS_E_FAILURE;
12414 	}
12415 
12416 	switch (action) {
12417 	case CDP_VDEV_LL_CONN_ADD:
12418 		vdev->num_latency_critical_conn++;
12419 		break;
12420 
12421 	case CDP_VDEV_LL_CONN_DEL:
12422 		vdev->num_latency_critical_conn--;
12423 		break;
12424 
12425 	default:
12426 		dp_err("LL connection action invalid %d", action);
12427 		break;
12428 	}
12429 
12430 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12431 	return QDF_STATUS_SUCCESS;
12432 }
12433 
12434 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
12435 /**
12436  * dp_soc_set_swlm_enable() - Enable/Disable SWLM if initialized.
12437  * @soc_hdl: CDP Soc handle
12438  * @value: Enable/Disable value
12439  *
12440  * Returns: QDF_STATUS
12441  */
12442 static QDF_STATUS dp_soc_set_swlm_enable(struct cdp_soc_t *soc_hdl,
12443 					 uint8_t value)
12444 {
12445 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12446 
12447 	if (!soc->swlm.is_init) {
12448 		dp_err("SWLM is not initialized");
12449 		return QDF_STATUS_E_FAILURE;
12450 	}
12451 
12452 	soc->swlm.is_enabled = !!value;
12453 
12454 	return QDF_STATUS_SUCCESS;
12455 }
12456 
12457 /**
12458  * dp_soc_is_swlm_enabled() - Check if SWLM is enabled.
12459  * @soc_hdl: CDP Soc handle
12460  *
12461  * Returns: QDF_STATUS
12462  */
12463 static uint8_t dp_soc_is_swlm_enabled(struct cdp_soc_t *soc_hdl)
12464 {
12465 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12466 
12467 	return soc->swlm.is_enabled;
12468 }
12469 #endif
12470 
12471 /**
12472  * dp_display_srng_info() - Dump the srng HP TP info
12473  * @soc_hdl: CDP Soc handle
12474  *
12475  * This function dumps the SW hp/tp values for the important rings.
12476  * HW hp/tp values are not being dumped, since it can lead to
12477  * READ NOC error when UMAC is in low power state. MCC does not have
12478  * device force wake working yet.
12479  *
12480  * Return: none
12481  */
12482 static void dp_display_srng_info(struct cdp_soc_t *soc_hdl)
12483 {
12484 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12485 	hal_soc_handle_t hal_soc = soc->hal_soc;
12486 	uint32_t hp, tp, i;
12487 
12488 	dp_info("SRNG HP-TP data:");
12489 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
12490 		hal_get_sw_hptp(hal_soc, soc->tcl_data_ring[i].hal_srng,
12491 				&tp, &hp);
12492 		dp_info("TCL DATA ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
12493 
12494 		if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, i) ==
12495 		    INVALID_WBM_RING_NUM)
12496 			continue;
12497 
12498 		hal_get_sw_hptp(hal_soc, soc->tx_comp_ring[i].hal_srng,
12499 				&tp, &hp);
12500 		dp_info("TX comp ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
12501 	}
12502 
12503 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
12504 		hal_get_sw_hptp(hal_soc, soc->reo_dest_ring[i].hal_srng,
12505 				&tp, &hp);
12506 		dp_info("REO DST ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
12507 	}
12508 
12509 	hal_get_sw_hptp(hal_soc, soc->reo_exception_ring.hal_srng, &tp, &hp);
12510 	dp_info("REO exception ring: hp=0x%x, tp=0x%x", hp, tp);
12511 
12512 	hal_get_sw_hptp(hal_soc, soc->rx_rel_ring.hal_srng, &tp, &hp);
12513 	dp_info("WBM RX release ring: hp=0x%x, tp=0x%x", hp, tp);
12514 
12515 	hal_get_sw_hptp(hal_soc, soc->wbm_desc_rel_ring.hal_srng, &tp, &hp);
12516 	dp_info("WBM desc release ring: hp=0x%x, tp=0x%x", hp, tp);
12517 }
12518 
12519 /**
12520  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
12521  * @soc_handle: datapath soc handle
12522  *
12523  * Return: opaque pointer to external dp (non-core DP)
12524  */
12525 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
12526 {
12527 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12528 
12529 	return soc->external_txrx_handle;
12530 }
12531 
12532 /**
12533  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
12534  * @soc_handle: datapath soc handle
12535  * @txrx_handle: opaque pointer to external dp (non-core DP)
12536  *
12537  * Return: void
12538  */
12539 static void
12540 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
12541 {
12542 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12543 
12544 	soc->external_txrx_handle = txrx_handle;
12545 }
12546 
12547 /**
12548  * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
12549  * @soc_hdl: datapath soc handle
12550  * @pdev_id: id of the datapath pdev handle
12551  * @lmac_id: lmac id
12552  *
12553  * Return: QDF_STATUS
12554  */
12555 static QDF_STATUS
12556 dp_soc_map_pdev_to_lmac
12557 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12558 	 uint32_t lmac_id)
12559 {
12560 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12561 
12562 	wlan_cfg_set_hw_mac_idx(soc->wlan_cfg_ctx,
12563 				pdev_id,
12564 				lmac_id);
12565 
12566 	/*Set host PDEV ID for lmac_id*/
12567 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
12568 			      pdev_id,
12569 			      lmac_id);
12570 
12571 	return QDF_STATUS_SUCCESS;
12572 }
12573 
12574 /**
12575  * dp_soc_handle_pdev_mode_change() - Update pdev to lmac mapping
12576  * @soc_hdl: datapath soc handle
12577  * @pdev_id: id of the datapath pdev handle
12578  * @lmac_id: lmac id
12579  *
12580  * In the event of a dynamic mode change, update the pdev to lmac mapping
12581  *
12582  * Return: QDF_STATUS
12583  */
12584 static QDF_STATUS
12585 dp_soc_handle_pdev_mode_change
12586 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12587 	 uint32_t lmac_id)
12588 {
12589 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12590 	struct dp_vdev *vdev = NULL;
12591 	uint8_t hw_pdev_id, mac_id;
12592 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc,
12593 								  pdev_id);
12594 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
12595 
12596 	if (qdf_unlikely(!pdev))
12597 		return QDF_STATUS_E_FAILURE;
12598 
12599 	pdev->lmac_id = lmac_id;
12600 	pdev->target_pdev_id =
12601 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
12602 	dp_info(" mode change %d %d\n", pdev->pdev_id, pdev->lmac_id);
12603 
12604 	/*Set host PDEV ID for lmac_id*/
12605 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
12606 			      pdev->pdev_id,
12607 			      lmac_id);
12608 
12609 	hw_pdev_id =
12610 		dp_get_target_pdev_id_for_host_pdev_id(soc,
12611 						       pdev->pdev_id);
12612 
12613 	/*
12614 	 * When NSS offload is enabled, send pdev_id->lmac_id
12615 	 * and pdev_id to hw_pdev_id to NSS FW
12616 	 */
12617 	if (nss_config) {
12618 		mac_id = pdev->lmac_id;
12619 		if (soc->cdp_soc.ol_ops->pdev_update_lmac_n_target_pdev_id)
12620 			soc->cdp_soc.ol_ops->
12621 				pdev_update_lmac_n_target_pdev_id(
12622 				soc->ctrl_psoc,
12623 				&pdev_id, &mac_id, &hw_pdev_id);
12624 	}
12625 
12626 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
12627 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
12628 		DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
12629 					       hw_pdev_id);
12630 		vdev->lmac_id = pdev->lmac_id;
12631 	}
12632 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
12633 
12634 	return QDF_STATUS_SUCCESS;
12635 }
12636 
12637 /**
12638  * dp_soc_set_pdev_status_down() - set pdev down/up status
12639  * @soc: datapath soc handle
12640  * @pdev_id: id of datapath pdev handle
12641  * @is_pdev_down: pdev down/up status
12642  *
12643  * Return: QDF_STATUS
12644  */
12645 static QDF_STATUS
12646 dp_soc_set_pdev_status_down(struct cdp_soc_t *soc, uint8_t pdev_id,
12647 			    bool is_pdev_down)
12648 {
12649 	struct dp_pdev *pdev =
12650 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12651 						   pdev_id);
12652 	if (!pdev)
12653 		return QDF_STATUS_E_FAILURE;
12654 
12655 	pdev->is_pdev_down = is_pdev_down;
12656 	return QDF_STATUS_SUCCESS;
12657 }
12658 
12659 /**
12660  * dp_get_cfg_capabilities() - get dp capabilities
12661  * @soc_handle: datapath soc handle
12662  * @dp_caps: enum for dp capabilities
12663  *
12664  * Return: bool to determine if dp caps is enabled
12665  */
12666 static bool
12667 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
12668 			enum cdp_capabilities dp_caps)
12669 {
12670 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12671 
12672 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
12673 }
12674 
12675 #ifdef FEATURE_AST
12676 static QDF_STATUS
12677 dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
12678 		       uint8_t *peer_mac)
12679 {
12680 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12681 	QDF_STATUS status = QDF_STATUS_SUCCESS;
12682 	struct dp_peer *peer =
12683 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
12684 					       DP_MOD_ID_CDP);
12685 
12686 	/* Peer can be null for monitor vap mac address */
12687 	if (!peer) {
12688 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
12689 			  "%s: Invalid peer\n", __func__);
12690 		return QDF_STATUS_E_FAILURE;
12691 	}
12692 
12693 	dp_peer_update_state(soc, peer, DP_PEER_STATE_LOGICAL_DELETE);
12694 
12695 	qdf_spin_lock_bh(&soc->ast_lock);
12696 	dp_peer_send_wds_disconnect(soc, peer);
12697 	dp_peer_delete_ast_entries(soc, peer);
12698 	qdf_spin_unlock_bh(&soc->ast_lock);
12699 
12700 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
12701 	return status;
12702 }
12703 #endif
12704 
12705 #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS
12706 /**
12707  * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
12708  * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
12709  * @soc: cdp_soc handle
12710  * @pdev_id: id of cdp_pdev handle
12711  * @protocol_type: protocol type for which stats should be displayed
12712  *
12713  * Return: none
12714  */
12715 static inline void
12716 dp_dump_pdev_rx_protocol_tag_stats(struct cdp_soc_t  *soc, uint8_t pdev_id,
12717 				   uint16_t protocol_type)
12718 {
12719 }
12720 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
12721 
12722 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
12723 /**
12724  * dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be
12725  * applied to the desired protocol type packets
12726  * @soc: soc handle
12727  * @pdev_id: id of cdp_pdev handle
12728  * @enable_rx_protocol_tag - bitmask that indicates what protocol types
12729  * are enabled for tagging. zero indicates disable feature, non-zero indicates
12730  * enable feature
12731  * @protocol_type: new protocol type for which the tag is being added
12732  * @tag: user configured tag for the new protocol
12733  *
12734  * Return: Success
12735  */
12736 static inline QDF_STATUS
12737 dp_update_pdev_rx_protocol_tag(struct cdp_soc_t  *soc, uint8_t pdev_id,
12738 			       uint32_t enable_rx_protocol_tag,
12739 			       uint16_t protocol_type,
12740 			       uint16_t tag)
12741 {
12742 	return QDF_STATUS_SUCCESS;
12743 }
12744 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
12745 
12746 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
12747 /**
12748  * dp_set_rx_flow_tag - add/delete a flow
12749  * @soc: soc handle
12750  * @pdev_id: id of cdp_pdev handle
12751  * @flow_info: flow tuple that is to be added to/deleted from flow search table
12752  *
12753  * Return: Success
12754  */
12755 static inline QDF_STATUS
12756 dp_set_rx_flow_tag(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
12757 		   struct cdp_rx_flow_info *flow_info)
12758 {
12759 	return QDF_STATUS_SUCCESS;
12760 }
12761 /**
12762  * dp_dump_rx_flow_tag_stats - dump the number of packets tagged for
12763  * given flow 5-tuple
12764  * @cdp_soc: soc handle
12765  * @pdev_id: id of cdp_pdev handle
12766  * @flow_info: flow 5-tuple for which stats should be displayed
12767  *
12768  * Return: Success
12769  */
12770 static inline QDF_STATUS
12771 dp_dump_rx_flow_tag_stats(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
12772 			  struct cdp_rx_flow_info *flow_info)
12773 {
12774 	return QDF_STATUS_SUCCESS;
12775 }
12776 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
12777 
12778 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
12779 					   uint32_t max_peers,
12780 					   uint32_t max_ast_index,
12781 					   uint8_t peer_map_unmap_versions)
12782 {
12783 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12784 	QDF_STATUS status;
12785 
12786 	soc->max_peers = max_peers;
12787 
12788 	wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
12789 
12790 	status = soc->arch_ops.txrx_peer_map_attach(soc);
12791 	if (!QDF_IS_STATUS_SUCCESS(status)) {
12792 		dp_err("failure in allocating peer tables");
12793 		return QDF_STATUS_E_FAILURE;
12794 	}
12795 
12796 	dp_info("max_peers %u, calculated max_peers %u max_ast_index: %u\n",
12797 		max_peers, soc->max_peer_id, max_ast_index);
12798 
12799 	status = dp_peer_find_attach(soc);
12800 	if (!QDF_IS_STATUS_SUCCESS(status)) {
12801 		dp_err("Peer find attach failure");
12802 		goto fail;
12803 	}
12804 
12805 	soc->peer_map_unmap_versions = peer_map_unmap_versions;
12806 	soc->peer_map_attach_success = TRUE;
12807 
12808 	return QDF_STATUS_SUCCESS;
12809 fail:
12810 	soc->arch_ops.txrx_peer_map_detach(soc);
12811 
12812 	return status;
12813 }
12814 
12815 static QDF_STATUS dp_soc_set_param(struct cdp_soc_t  *soc_hdl,
12816 				   enum cdp_soc_param_t param,
12817 				   uint32_t value)
12818 {
12819 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12820 
12821 	switch (param) {
12822 	case DP_SOC_PARAM_MSDU_EXCEPTION_DESC:
12823 		soc->num_msdu_exception_desc = value;
12824 		dp_info("num_msdu exception_desc %u",
12825 			value);
12826 		break;
12827 	case DP_SOC_PARAM_CMEM_FSE_SUPPORT:
12828 		if (wlan_cfg_is_fst_in_cmem_enabled(soc->wlan_cfg_ctx))
12829 			soc->fst_in_cmem = !!value;
12830 		dp_info("FW supports CMEM FSE %u", value);
12831 		break;
12832 	case DP_SOC_PARAM_MAX_AST_AGEOUT:
12833 		soc->max_ast_ageout_count = value;
12834 		dp_info("Max ast ageout count %u", soc->max_ast_ageout_count);
12835 		break;
12836 	case DP_SOC_PARAM_EAPOL_OVER_CONTROL_PORT:
12837 		soc->eapol_over_control_port = value;
12838 		dp_info("Eapol over control_port:%d",
12839 			soc->eapol_over_control_port);
12840 		break;
12841 	case DP_SOC_PARAM_MULTI_PEER_GRP_CMD_SUPPORT:
12842 		soc->multi_peer_grp_cmd_supported = value;
12843 		dp_info("Multi Peer group command support:%d",
12844 			soc->multi_peer_grp_cmd_supported);
12845 		break;
12846 	case DP_SOC_PARAM_RSSI_DBM_CONV_SUPPORT:
12847 		soc->features.rssi_dbm_conv_support = value;
12848 		dp_info("Rssi dbm conversion support:%u",
12849 			soc->features.rssi_dbm_conv_support);
12850 		break;
12851 	case DP_SOC_PARAM_UMAC_HW_RESET_SUPPORT:
12852 		soc->features.umac_hw_reset_support = value;
12853 		dp_info("UMAC HW reset support :%u",
12854 			soc->features.umac_hw_reset_support);
12855 		break;
12856 	default:
12857 		dp_info("not handled param %d ", param);
12858 		break;
12859 	}
12860 
12861 	return QDF_STATUS_SUCCESS;
12862 }
12863 
12864 static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
12865 				      void *stats_ctx)
12866 {
12867 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12868 
12869 	soc->rate_stats_ctx = (struct cdp_soc_rate_stats_ctx *)stats_ctx;
12870 }
12871 
12872 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
12873 /**
12874  * dp_peer_flush_rate_stats_req(): Flush peer rate stats
12875  * @soc: Datapath SOC handle
12876  * @peer: Datapath peer
12877  * @arg: argument to iter function
12878  *
12879  * Return: QDF_STATUS
12880  */
12881 static void
12882 dp_peer_flush_rate_stats_req(struct dp_soc *soc, struct dp_peer *peer,
12883 			     void *arg)
12884 {
12885 	if (peer->bss_peer)
12886 		return;
12887 
12888 	dp_wdi_event_handler(
12889 		WDI_EVENT_FLUSH_RATE_STATS_REQ,
12890 		soc, dp_monitor_peer_get_peerstats_ctx(soc, peer),
12891 		peer->peer_id,
12892 		WDI_NO_VAL, peer->vdev->pdev->pdev_id);
12893 }
12894 
12895 /**
12896  * dp_flush_rate_stats_req(): Flush peer rate stats in pdev
12897  * @soc_hdl: Datapath SOC handle
12898  * @pdev_id: pdev_id
12899  *
12900  * Return: QDF_STATUS
12901  */
12902 static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
12903 					  uint8_t pdev_id)
12904 {
12905 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12906 	struct dp_pdev *pdev =
12907 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12908 						   pdev_id);
12909 	if (!pdev)
12910 		return QDF_STATUS_E_FAILURE;
12911 
12912 	dp_pdev_iterate_peer(pdev, dp_peer_flush_rate_stats_req, NULL,
12913 			     DP_MOD_ID_CDP);
12914 
12915 	return QDF_STATUS_SUCCESS;
12916 }
12917 #else
12918 static inline QDF_STATUS
12919 dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
12920 			uint8_t pdev_id)
12921 {
12922 	return QDF_STATUS_SUCCESS;
12923 }
12924 #endif
12925 
12926 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
12927 #ifdef WLAN_FEATURE_11BE_MLO
12928 /**
12929  * dp_get_peer_extd_rate_link_stats(): function to get peer
12930  *				extended rate and link stats
12931  * @soc_hdl: dp soc handler
12932  * @mac_addr: mac address of peer
12933  *
12934  * Return: QDF_STATUS
12935  */
12936 static QDF_STATUS
12937 dp_get_peer_extd_rate_link_stats(struct cdp_soc_t *soc_hdl, uint8_t *mac_addr)
12938 {
12939 	uint8_t i;
12940 	struct dp_peer *link_peer;
12941 	struct dp_soc *link_peer_soc;
12942 	struct dp_mld_link_peers link_peers_info;
12943 	struct dp_peer *peer = NULL;
12944 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12945 	struct cdp_peer_info peer_info = { 0 };
12946 
12947 	if (!mac_addr) {
12948 		dp_err("NULL peer mac addr\n");
12949 		return QDF_STATUS_E_FAILURE;
12950 	}
12951 
12952 	DP_PEER_INFO_PARAMS_INIT(&peer_info, DP_VDEV_ALL, mac_addr, false,
12953 				 CDP_WILD_PEER_TYPE);
12954 
12955 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP);
12956 	if (!peer) {
12957 		dp_err("Invalid peer\n");
12958 		return QDF_STATUS_E_FAILURE;
12959 	}
12960 
12961 	if (IS_MLO_DP_MLD_PEER(peer)) {
12962 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
12963 						    &link_peers_info,
12964 						    DP_MOD_ID_CDP);
12965 		for (i = 0; i < link_peers_info.num_links; i++) {
12966 			link_peer = link_peers_info.link_peers[i];
12967 			link_peer_soc = link_peer->vdev->pdev->soc;
12968 			dp_wdi_event_handler(WDI_EVENT_FLUSH_RATE_STATS_REQ,
12969 					     link_peer_soc,
12970 					     dp_monitor_peer_get_peerstats_ctx
12971 					     (link_peer_soc, link_peer),
12972 					     link_peer->peer_id,
12973 					     WDI_NO_VAL,
12974 					     link_peer->vdev->pdev->pdev_id);
12975 		}
12976 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
12977 	} else {
12978 		dp_wdi_event_handler(
12979 				WDI_EVENT_FLUSH_RATE_STATS_REQ, soc,
12980 				dp_monitor_peer_get_peerstats_ctx(soc, peer),
12981 				peer->peer_id,
12982 				WDI_NO_VAL, peer->vdev->pdev->pdev_id);
12983 	}
12984 
12985 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
12986 	return QDF_STATUS_SUCCESS;
12987 }
12988 #else
12989 static QDF_STATUS
12990 dp_get_peer_extd_rate_link_stats(struct cdp_soc_t *soc_hdl, uint8_t *mac_addr)
12991 {
12992 	struct dp_peer *peer = NULL;
12993 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12994 
12995 	if (!mac_addr) {
12996 		dp_err("NULL peer mac addr\n");
12997 		return QDF_STATUS_E_FAILURE;
12998 	}
12999 
13000 	peer = dp_peer_find_hash_find(soc, mac_addr, 0,
13001 				      DP_VDEV_ALL, DP_MOD_ID_CDP);
13002 	if (!peer) {
13003 		dp_err("Invalid peer\n");
13004 		return QDF_STATUS_E_FAILURE;
13005 	}
13006 
13007 	dp_wdi_event_handler(
13008 			WDI_EVENT_FLUSH_RATE_STATS_REQ, soc,
13009 			dp_monitor_peer_get_peerstats_ctx(soc, peer),
13010 			peer->peer_id,
13011 			WDI_NO_VAL, peer->vdev->pdev->pdev_id);
13012 
13013 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
13014 	return QDF_STATUS_SUCCESS;
13015 }
13016 #endif
13017 #else
13018 static inline QDF_STATUS
13019 dp_get_peer_extd_rate_link_stats(struct cdp_soc_t *soc_hdl, uint8_t *mac_addr)
13020 {
13021 	return QDF_STATUS_SUCCESS;
13022 }
13023 #endif
13024 
13025 static void *dp_peer_get_peerstats_ctx(struct cdp_soc_t *soc_hdl,
13026 				       uint8_t vdev_id,
13027 				       uint8_t *mac_addr)
13028 {
13029 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13030 	struct dp_peer *peer;
13031 	void *peerstats_ctx = NULL;
13032 
13033 	if (mac_addr) {
13034 		peer = dp_peer_find_hash_find(soc, mac_addr,
13035 					      0, vdev_id,
13036 					      DP_MOD_ID_CDP);
13037 		if (!peer)
13038 			return NULL;
13039 
13040 		if (!IS_MLO_DP_MLD_PEER(peer))
13041 			peerstats_ctx = dp_monitor_peer_get_peerstats_ctx(soc,
13042 									  peer);
13043 
13044 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
13045 	}
13046 
13047 	return peerstats_ctx;
13048 }
13049 
13050 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
13051 static QDF_STATUS dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
13052 					   uint8_t pdev_id,
13053 					   void *buf)
13054 {
13055 	 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS,
13056 			      (struct dp_soc *)soc, buf, HTT_INVALID_PEER,
13057 			      WDI_NO_VAL, pdev_id);
13058 	return QDF_STATUS_SUCCESS;
13059 }
13060 #else
13061 static inline QDF_STATUS
13062 dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
13063 			 uint8_t pdev_id,
13064 			 void *buf)
13065 {
13066 	return QDF_STATUS_SUCCESS;
13067 }
13068 #endif
13069 
13070 static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle)
13071 {
13072 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13073 
13074 	return soc->rate_stats_ctx;
13075 }
13076 
13077 /*
13078  * dp_get_cfg() - get dp cfg
13079  * @soc: cdp soc handle
13080  * @cfg: cfg enum
13081  *
13082  * Return: cfg value
13083  */
13084 static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg)
13085 {
13086 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
13087 	uint32_t value = 0;
13088 
13089 	switch (cfg) {
13090 	case cfg_dp_enable_data_stall:
13091 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
13092 		break;
13093 	case cfg_dp_enable_p2p_ip_tcp_udp_checksum_offload:
13094 		value = dpsoc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload;
13095 		break;
13096 	case cfg_dp_enable_nan_ip_tcp_udp_checksum_offload:
13097 		value = dpsoc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload;
13098 		break;
13099 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
13100 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
13101 		break;
13102 	case cfg_dp_disable_legacy_mode_csum_offload:
13103 		value = dpsoc->wlan_cfg_ctx->
13104 					legacy_mode_checksumoffload_disable;
13105 		break;
13106 	case cfg_dp_tso_enable:
13107 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
13108 		break;
13109 	case cfg_dp_lro_enable:
13110 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
13111 		break;
13112 	case cfg_dp_gro_enable:
13113 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
13114 		break;
13115 	case cfg_dp_tc_based_dyn_gro_enable:
13116 		value = dpsoc->wlan_cfg_ctx->tc_based_dynamic_gro;
13117 		break;
13118 	case cfg_dp_tc_ingress_prio:
13119 		value = dpsoc->wlan_cfg_ctx->tc_ingress_prio;
13120 		break;
13121 	case cfg_dp_sg_enable:
13122 		value = dpsoc->wlan_cfg_ctx->sg_enabled;
13123 		break;
13124 	case cfg_dp_tx_flow_start_queue_offset:
13125 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
13126 		break;
13127 	case cfg_dp_tx_flow_stop_queue_threshold:
13128 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
13129 		break;
13130 	case cfg_dp_disable_intra_bss_fwd:
13131 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
13132 		break;
13133 	case cfg_dp_pktlog_buffer_size:
13134 		value = dpsoc->wlan_cfg_ctx->pktlog_buffer_size;
13135 		break;
13136 	case cfg_dp_wow_check_rx_pending:
13137 		value = dpsoc->wlan_cfg_ctx->wow_check_rx_pending_enable;
13138 		break;
13139 	default:
13140 		value =  0;
13141 	}
13142 
13143 	return value;
13144 }
13145 
13146 #ifdef PEER_FLOW_CONTROL
13147 /**
13148  * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params
13149  * @soc_handle: datapath soc handle
13150  * @pdev_id: id of datapath pdev handle
13151  * @param: ol ath params
13152  * @value: value of the flag
13153  * @buff: Buffer to be passed
13154  *
13155  * Implemented this function same as legacy function. In legacy code, single
13156  * function is used to display stats and update pdev params.
13157  *
13158  * Return: 0 for success. nonzero for failure.
13159  */
13160 static uint32_t dp_tx_flow_ctrl_configure_pdev(struct cdp_soc_t *soc_handle,
13161 					       uint8_t pdev_id,
13162 					       enum _dp_param_t param,
13163 					       uint32_t value, void *buff)
13164 {
13165 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13166 	struct dp_pdev *pdev =
13167 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
13168 						   pdev_id);
13169 
13170 	if (qdf_unlikely(!pdev))
13171 		return 1;
13172 
13173 	soc = pdev->soc;
13174 	if (!soc)
13175 		return 1;
13176 
13177 	switch (param) {
13178 #ifdef QCA_ENH_V3_STATS_SUPPORT
13179 	case DP_PARAM_VIDEO_DELAY_STATS_FC:
13180 		if (value)
13181 			pdev->delay_stats_flag = true;
13182 		else
13183 			pdev->delay_stats_flag = false;
13184 		break;
13185 	case DP_PARAM_VIDEO_STATS_FC:
13186 		qdf_print("------- TID Stats ------\n");
13187 		dp_pdev_print_tid_stats(pdev);
13188 		qdf_print("------ Delay Stats ------\n");
13189 		dp_pdev_print_delay_stats(pdev);
13190 		qdf_print("------ Rx Error Stats ------\n");
13191 		dp_pdev_print_rx_error_stats(pdev);
13192 		break;
13193 #endif
13194 	case DP_PARAM_TOTAL_Q_SIZE:
13195 		{
13196 			uint32_t tx_min, tx_max;
13197 
13198 			tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx);
13199 			tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
13200 
13201 			if (!buff) {
13202 				if ((value >= tx_min) && (value <= tx_max)) {
13203 					pdev->num_tx_allowed = value;
13204 				} else {
13205 					dp_tx_info("%pK: Failed to update num_tx_allowed, Q_min = %d Q_max = %d",
13206 						   soc, tx_min, tx_max);
13207 					break;
13208 				}
13209 			} else {
13210 				*(int *)buff = pdev->num_tx_allowed;
13211 			}
13212 		}
13213 		break;
13214 	default:
13215 		dp_tx_info("%pK: not handled param %d ", soc, param);
13216 		break;
13217 	}
13218 
13219 	return 0;
13220 }
13221 #endif
13222 
13223 /**
13224  * dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev
13225  * @psoc: dp soc handle
13226  * @pdev_id: id of DP_PDEV handle
13227  * @pcp: pcp value
13228  * @tid: tid value passed by the user
13229  *
13230  * Return: QDF_STATUS_SUCCESS on success
13231  */
13232 static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
13233 						uint8_t pdev_id,
13234 						uint8_t pcp, uint8_t tid)
13235 {
13236 	struct dp_soc *soc = (struct dp_soc *)psoc;
13237 
13238 	soc->pcp_tid_map[pcp] = tid;
13239 
13240 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
13241 	return QDF_STATUS_SUCCESS;
13242 }
13243 
13244 /**
13245  * dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev
13246  * @soc: DP soc handle
13247  * @vdev_id: id of DP_VDEV handle
13248  * @pcp: pcp value
13249  * @tid: tid value passed by the user
13250  *
13251  * Return: QDF_STATUS_SUCCESS on success
13252  */
13253 static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc_hdl,
13254 						uint8_t vdev_id,
13255 						uint8_t pcp, uint8_t tid)
13256 {
13257 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13258 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
13259 						     DP_MOD_ID_CDP);
13260 
13261 	if (!vdev)
13262 		return QDF_STATUS_E_FAILURE;
13263 
13264 	vdev->pcp_tid_map[pcp] = tid;
13265 
13266 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13267 	return QDF_STATUS_SUCCESS;
13268 }
13269 
13270 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
13271 static void dp_drain_txrx(struct cdp_soc_t *soc_handle)
13272 {
13273 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13274 	uint32_t cur_tx_limit, cur_rx_limit;
13275 	uint32_t budget = 0xffff;
13276 	uint32_t val;
13277 	int i;
13278 	int cpu = dp_srng_get_cpu();
13279 
13280 	cur_tx_limit = soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit;
13281 	cur_rx_limit = soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit;
13282 
13283 	/* Temporarily increase soft irq limits when going to drain
13284 	 * the UMAC/LMAC SRNGs and restore them after polling.
13285 	 * Though the budget is on higher side, the TX/RX reaping loops
13286 	 * will not execute longer as both TX and RX would be suspended
13287 	 * by the time this API is called.
13288 	 */
13289 	dp_update_soft_irq_limits(soc, budget, budget);
13290 
13291 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
13292 		dp_service_srngs(&soc->intr_ctx[i], budget, cpu);
13293 
13294 	dp_update_soft_irq_limits(soc, cur_tx_limit, cur_rx_limit);
13295 
13296 	/* Do a dummy read at offset 0; this will ensure all
13297 	 * pendings writes(HP/TP) are flushed before read returns.
13298 	 */
13299 	val = HAL_REG_READ((struct hal_soc *)soc->hal_soc, 0);
13300 	dp_debug("Register value at offset 0: %u\n", val);
13301 }
13302 #endif
13303 
13304 #ifdef DP_UMAC_HW_RESET_SUPPORT
13305 /**
13306  * dp_reset_interrupt_ring_masks(): Reset rx interrupt masks
13307  * @soc: dp soc handle
13308  *
13309  * Return: void
13310  */
13311 static void dp_reset_interrupt_ring_masks(struct dp_soc *soc)
13312 {
13313 	struct dp_intr_bkp *intr_bkp;
13314 	struct dp_intr *intr_ctx;
13315 	int num_ctxt = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
13316 	int i;
13317 
13318 	intr_bkp =
13319 	(struct dp_intr_bkp *)qdf_mem_malloc_atomic(sizeof(struct dp_intr_bkp) *
13320 			num_ctxt);
13321 
13322 	qdf_assert_always(intr_bkp);
13323 
13324 	soc->umac_reset_ctx.intr_ctx_bkp = intr_bkp;
13325 	for (i = 0; i < num_ctxt; i++) {
13326 		intr_ctx = &soc->intr_ctx[i];
13327 
13328 		intr_bkp->tx_ring_mask = intr_ctx->tx_ring_mask;
13329 		intr_bkp->rx_ring_mask = intr_ctx->rx_ring_mask;
13330 		intr_bkp->rx_mon_ring_mask = intr_ctx->rx_mon_ring_mask;
13331 		intr_bkp->rx_err_ring_mask = intr_ctx->rx_err_ring_mask;
13332 		intr_bkp->rx_wbm_rel_ring_mask = intr_ctx->rx_wbm_rel_ring_mask;
13333 		intr_bkp->reo_status_ring_mask = intr_ctx->reo_status_ring_mask;
13334 		intr_bkp->rxdma2host_ring_mask = intr_ctx->rxdma2host_ring_mask;
13335 		intr_bkp->host2rxdma_ring_mask = intr_ctx->host2rxdma_ring_mask;
13336 		intr_bkp->host2rxdma_mon_ring_mask =
13337 					intr_ctx->host2rxdma_mon_ring_mask;
13338 		intr_bkp->tx_mon_ring_mask = intr_ctx->tx_mon_ring_mask;
13339 
13340 		intr_ctx->tx_ring_mask = 0;
13341 		intr_ctx->rx_ring_mask = 0;
13342 		intr_ctx->rx_mon_ring_mask = 0;
13343 		intr_ctx->rx_err_ring_mask = 0;
13344 		intr_ctx->rx_wbm_rel_ring_mask = 0;
13345 		intr_ctx->reo_status_ring_mask = 0;
13346 		intr_ctx->rxdma2host_ring_mask = 0;
13347 		intr_ctx->host2rxdma_ring_mask = 0;
13348 		intr_ctx->host2rxdma_mon_ring_mask = 0;
13349 		intr_ctx->tx_mon_ring_mask = 0;
13350 
13351 		intr_bkp++;
13352 	}
13353 }
13354 
13355 /**
13356  * dp_restore_interrupt_ring_masks(): Restore rx interrupt masks
13357  * @soc: dp soc handle
13358  *
13359  * Return: void
13360  */
13361 static void dp_restore_interrupt_ring_masks(struct dp_soc *soc)
13362 {
13363 	struct dp_intr_bkp *intr_bkp = soc->umac_reset_ctx.intr_ctx_bkp;
13364 	struct dp_intr_bkp *intr_bkp_base = intr_bkp;
13365 	struct dp_intr *intr_ctx;
13366 	int num_ctxt = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
13367 	int i;
13368 
13369 	qdf_assert_always(intr_bkp);
13370 
13371 	for (i = 0; i < num_ctxt; i++) {
13372 		intr_ctx = &soc->intr_ctx[i];
13373 
13374 		intr_ctx->tx_ring_mask = intr_bkp->tx_ring_mask;
13375 		intr_ctx->rx_ring_mask = intr_bkp->rx_ring_mask;
13376 		intr_ctx->rx_mon_ring_mask = intr_bkp->rx_mon_ring_mask;
13377 		intr_ctx->rx_err_ring_mask = intr_bkp->rx_err_ring_mask;
13378 		intr_ctx->rx_wbm_rel_ring_mask = intr_bkp->rx_wbm_rel_ring_mask;
13379 		intr_ctx->reo_status_ring_mask = intr_bkp->reo_status_ring_mask;
13380 		intr_ctx->rxdma2host_ring_mask = intr_bkp->rxdma2host_ring_mask;
13381 		intr_ctx->host2rxdma_ring_mask = intr_bkp->host2rxdma_ring_mask;
13382 		intr_ctx->host2rxdma_mon_ring_mask =
13383 			intr_bkp->host2rxdma_mon_ring_mask;
13384 		intr_ctx->tx_mon_ring_mask = intr_bkp->tx_mon_ring_mask;
13385 
13386 		intr_bkp++;
13387 	}
13388 
13389 	qdf_mem_free(intr_bkp_base);
13390 	soc->umac_reset_ctx.intr_ctx_bkp = NULL;
13391 }
13392 
13393 /**
13394  * dp_resume_tx_hardstart(): Restore the old Tx hardstart functions
13395  * @soc: dp soc handle
13396  *
13397  * Return: void
13398  */
13399 static void dp_resume_tx_hardstart(struct dp_soc *soc)
13400 {
13401 	struct dp_vdev *vdev;
13402 	struct ol_txrx_hardtart_ctxt ctxt = {0};
13403 	struct cdp_ctrl_objmgr_psoc *psoc = soc->ctrl_psoc;
13404 	int i;
13405 
13406 	for (i = 0; i < MAX_PDEV_CNT; i++) {
13407 		struct dp_pdev *pdev = soc->pdev_list[i];
13408 
13409 		if (!pdev)
13410 			continue;
13411 
13412 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
13413 			uint8_t vdev_id = vdev->vdev_id;
13414 
13415 			dp_vdev_fetch_tx_handler(vdev, soc, &ctxt);
13416 			soc->cdp_soc.ol_ops->dp_update_tx_hardstart(psoc,
13417 								    vdev_id,
13418 								    &ctxt);
13419 		}
13420 	}
13421 }
13422 
13423 /**
13424  * dp_pause_tx_hardstart(): Register Tx hardstart functions to drop packets
13425  * @soc: dp soc handle
13426  *
13427  * Return: void
13428  */
13429 static void dp_pause_tx_hardstart(struct dp_soc *soc)
13430 {
13431 	struct dp_vdev *vdev;
13432 	struct ol_txrx_hardtart_ctxt ctxt;
13433 	struct cdp_ctrl_objmgr_psoc *psoc = soc->ctrl_psoc;
13434 	int i;
13435 
13436 	ctxt.tx = &dp_tx_drop;
13437 	ctxt.tx_fast = &dp_tx_drop;
13438 	ctxt.tx_exception = &dp_tx_exc_drop;
13439 
13440 	for (i = 0; i < MAX_PDEV_CNT; i++) {
13441 		struct dp_pdev *pdev = soc->pdev_list[i];
13442 
13443 		if (!pdev)
13444 			continue;
13445 
13446 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
13447 			uint8_t vdev_id = vdev->vdev_id;
13448 
13449 			soc->cdp_soc.ol_ops->dp_update_tx_hardstart(psoc,
13450 								    vdev_id,
13451 								    &ctxt);
13452 		}
13453 	}
13454 }
13455 
13456 /**
13457  * dp_unregister_notify_umac_pre_reset_fw_callback(): unregister notify_fw_cb
13458  * @soc: dp soc handle
13459  *
13460  * Return: void
13461  */
13462 static inline
13463 void dp_unregister_notify_umac_pre_reset_fw_callback(struct dp_soc *soc)
13464 {
13465 	soc->notify_fw_callback = NULL;
13466 }
13467 
13468 /**
13469  * dp_check_n_notify_umac_prereset_done(): Send pre reset done to firmware
13470  * @soc: dp soc handle
13471  *
13472  * Return: void
13473  */
13474 static inline
13475 void dp_check_n_notify_umac_prereset_done(struct dp_soc *soc)
13476 {
13477 	/* Some Cpu(s) is processing the umac rings*/
13478 	if (soc->service_rings_running)
13479 		return;
13480 
13481 	/* Notify the firmware that Umac pre reset is complete */
13482 	dp_umac_reset_notify_action_completion(soc,
13483 					       UMAC_RESET_ACTION_DO_PRE_RESET);
13484 
13485 	/* Unregister the callback */
13486 	dp_unregister_notify_umac_pre_reset_fw_callback(soc);
13487 }
13488 
13489 /**
13490  * dp_register_notify_umac_pre_reset_fw_callback(): register notify_fw_cb
13491  * @soc: dp soc handle
13492  *
13493  * Return: void
13494  */
13495 static inline
13496 void dp_register_notify_umac_pre_reset_fw_callback(struct dp_soc *soc)
13497 {
13498 	soc->notify_fw_callback = dp_check_n_notify_umac_prereset_done;
13499 }
13500 
13501 #ifdef DP_UMAC_HW_HARD_RESET
13502 /**
13503  * dp_set_umac_regs(): Reinitialize host umac registers
13504  * @soc: dp soc handle
13505  *
13506  * Return: void
13507  */
13508 static void dp_set_umac_regs(struct dp_soc *soc)
13509 {
13510 	int i;
13511 	struct hal_reo_params reo_params;
13512 
13513 	qdf_mem_zero(&reo_params, sizeof(reo_params));
13514 
13515 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
13516 		if (soc->arch_ops.reo_remap_config(soc, &reo_params.remap0,
13517 						   &reo_params.remap1,
13518 						   &reo_params.remap2))
13519 			reo_params.rx_hash_enabled = true;
13520 		else
13521 			reo_params.rx_hash_enabled = false;
13522 	}
13523 
13524 	hal_reo_setup(soc->hal_soc, &reo_params, 0);
13525 
13526 	soc->arch_ops.dp_cc_reg_cfg_init(soc, true);
13527 
13528 	for (i = 0; i < PCP_TID_MAP_MAX; i++)
13529 		hal_tx_update_pcp_tid_map(soc->hal_soc, soc->pcp_tid_map[i], i);
13530 
13531 	for (i = 0; i < MAX_PDEV_CNT; i++) {
13532 		struct dp_vdev *vdev = NULL;
13533 		struct dp_pdev *pdev = soc->pdev_list[i];
13534 
13535 		if (!pdev)
13536 			continue;
13537 
13538 		for (i = 0; i < soc->num_hw_dscp_tid_map; i++)
13539 			hal_tx_set_dscp_tid_map(soc->hal_soc,
13540 						pdev->dscp_tid_map[i], i);
13541 
13542 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
13543 			soc->arch_ops.dp_bank_reconfig(soc, vdev);
13544 			soc->arch_ops.dp_reconfig_tx_vdev_mcast_ctrl(soc,
13545 								      vdev);
13546 		}
13547 	}
13548 }
13549 #else
13550 static void dp_set_umac_regs(struct dp_soc *soc)
13551 {
13552 }
13553 #endif
13554 
13555 /**
13556  * dp_reinit_rings(): Reinitialize host managed rings
13557  * @soc: dp soc handle
13558  *
13559  * Return: QDF_STATUS
13560  */
13561 static void dp_reinit_rings(struct dp_soc *soc)
13562 {
13563 	unsigned long end;
13564 
13565 	dp_soc_srng_deinit(soc);
13566 	dp_hw_link_desc_ring_deinit(soc);
13567 
13568 	/* Busy wait for 2 ms to make sure the rings are in idle state
13569 	 * before we enable them again
13570 	 */
13571 	end = jiffies + msecs_to_jiffies(2);
13572 	while (time_before(jiffies, end))
13573 		;
13574 
13575 	dp_hw_link_desc_ring_init(soc);
13576 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
13577 	dp_soc_srng_init(soc);
13578 }
13579 
13580 /**
13581  * dp_umac_reset_handle_pre_reset(): Handle Umac prereset interrupt from FW
13582  * @soc: dp soc handle
13583  *
13584  * Return: QDF_STATUS
13585  */
13586 static QDF_STATUS dp_umac_reset_handle_pre_reset(struct dp_soc *soc)
13587 {
13588 	dp_reset_interrupt_ring_masks(soc);
13589 
13590 	dp_pause_tx_hardstart(soc);
13591 	dp_pause_reo_send_cmd(soc);
13592 
13593 	dp_check_n_notify_umac_prereset_done(soc);
13594 
13595 	soc->umac_reset_ctx.nbuf_list = NULL;
13596 
13597 	return QDF_STATUS_SUCCESS;
13598 }
13599 
13600 /**
13601  * dp_umac_reset_handle_post_reset(): Handle Umac postreset interrupt from FW
13602  * @soc: dp soc handle
13603  *
13604  * Return: QDF_STATUS
13605  */
13606 static QDF_STATUS dp_umac_reset_handle_post_reset(struct dp_soc *soc)
13607 {
13608 	if (!soc->umac_reset_ctx.skel_enable) {
13609 		qdf_nbuf_t *nbuf_list = &soc->umac_reset_ctx.nbuf_list;
13610 
13611 		dp_set_umac_regs(soc);
13612 
13613 		dp_reinit_rings(soc);
13614 
13615 		dp_rx_desc_reuse(soc, nbuf_list);
13616 
13617 		dp_cleanup_reo_cmd_module(soc);
13618 
13619 		dp_tx_desc_pool_cleanup(soc, nbuf_list);
13620 
13621 		dp_reset_tid_q_setup(soc);
13622 	}
13623 
13624 	return dp_umac_reset_notify_action_completion(soc,
13625 					UMAC_RESET_ACTION_DO_POST_RESET_START);
13626 }
13627 
13628 /**
13629  * dp_umac_reset_handle_post_reset_complete(): Handle Umac postreset_complete
13630  *						interrupt from FW
13631  * @soc: dp soc handle
13632  *
13633  * Return: QDF_STATUS
13634  */
13635 static QDF_STATUS dp_umac_reset_handle_post_reset_complete(struct dp_soc *soc)
13636 {
13637 	QDF_STATUS status;
13638 	qdf_nbuf_t nbuf_list = soc->umac_reset_ctx.nbuf_list;
13639 
13640 	soc->umac_reset_ctx.nbuf_list = NULL;
13641 
13642 	dp_resume_reo_send_cmd(soc);
13643 
13644 	dp_restore_interrupt_ring_masks(soc);
13645 
13646 	dp_resume_tx_hardstart(soc);
13647 
13648 	status = dp_umac_reset_notify_action_completion(soc,
13649 				UMAC_RESET_ACTION_DO_POST_RESET_COMPLETE);
13650 
13651 	while (nbuf_list) {
13652 		qdf_nbuf_t nbuf = nbuf_list->next;
13653 
13654 		qdf_nbuf_free(nbuf_list);
13655 		nbuf_list = nbuf;
13656 	}
13657 
13658 	dp_umac_reset_info("Umac reset done on soc %pK\n prereset : %u us\n"
13659 			   "postreset : %u us \n postreset complete: %u us \n",
13660 			   soc,
13661 			   soc->umac_reset_ctx.ts.pre_reset_done -
13662 			   soc->umac_reset_ctx.ts.pre_reset_start,
13663 			   soc->umac_reset_ctx.ts.post_reset_done -
13664 			   soc->umac_reset_ctx.ts.post_reset_start,
13665 			   soc->umac_reset_ctx.ts.post_reset_complete_done -
13666 			   soc->umac_reset_ctx.ts.post_reset_complete_start);
13667 
13668 	return status;
13669 }
13670 #endif
13671 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
13672 static void
13673 dp_set_pkt_capture_mode(struct cdp_soc_t *soc_handle, bool val)
13674 {
13675 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13676 
13677 	soc->wlan_cfg_ctx->pkt_capture_mode = val;
13678 }
13679 #endif
13680 
13681 #ifdef HW_TX_DELAY_STATS_ENABLE
13682 /**
13683  * dp_enable_disable_vdev_tx_delay_stats(): Start/Stop tx delay stats capture
13684  * @soc: DP soc handle
13685  * @vdev_id: vdev id
13686  * @value: value
13687  *
13688  * Return: None
13689  */
13690 static void
13691 dp_enable_disable_vdev_tx_delay_stats(struct cdp_soc_t *soc_hdl,
13692 				      uint8_t vdev_id,
13693 				      uint8_t value)
13694 {
13695 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13696 	struct dp_vdev *vdev = NULL;
13697 
13698 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
13699 	if (!vdev)
13700 		return;
13701 
13702 	vdev->hw_tx_delay_stats_enabled = value;
13703 
13704 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13705 }
13706 
13707 /**
13708  * dp_check_vdev_tx_delay_stats_enabled() - check the feature is enabled or not
13709  * @soc: DP soc handle
13710  * @vdev_id: vdev id
13711  *
13712  * Returns: 1 if enabled, 0 if disabled
13713  */
13714 static uint8_t
13715 dp_check_vdev_tx_delay_stats_enabled(struct cdp_soc_t *soc_hdl,
13716 				     uint8_t vdev_id)
13717 {
13718 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13719 	struct dp_vdev *vdev;
13720 	uint8_t ret_val = 0;
13721 
13722 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
13723 	if (!vdev)
13724 		return ret_val;
13725 
13726 	ret_val = vdev->hw_tx_delay_stats_enabled;
13727 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13728 
13729 	return ret_val;
13730 }
13731 #endif
13732 
13733 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
13734 static void
13735 dp_recovery_vdev_flush_peers(struct cdp_soc_t *cdp_soc,
13736 			     uint8_t vdev_id,
13737 			     bool mlo_peers_only)
13738 {
13739 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
13740 	struct dp_vdev *vdev;
13741 
13742 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
13743 
13744 	if (!vdev)
13745 		return;
13746 
13747 	dp_vdev_flush_peers((struct cdp_vdev *)vdev, false, mlo_peers_only);
13748 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13749 }
13750 #endif
13751 #ifdef QCA_GET_TSF_VIA_REG
13752 /**
13753  * dp_get_tsf_time() - get tsf time
13754  * @soc: Datapath soc handle
13755  * @mac_id: mac_id
13756  * @tsf: pointer to update tsf value
13757  * @tsf_sync_soc_time: pointer to update tsf sync time
13758  *
13759  * Return: None.
13760  */
13761 static inline void
13762 dp_get_tsf_time(struct cdp_soc_t *soc_hdl, uint32_t tsf_id, uint32_t mac_id,
13763 		uint64_t *tsf, uint64_t *tsf_sync_soc_time)
13764 {
13765 	hal_get_tsf_time(((struct dp_soc *)soc_hdl)->hal_soc, tsf_id, mac_id,
13766 			 tsf, tsf_sync_soc_time);
13767 }
13768 #else
13769 static inline void
13770 dp_get_tsf_time(struct cdp_soc_t *soc_hdl, uint32_t tsf_id, uint32_t mac_id,
13771 		uint64_t *tsf, uint64_t *tsf_sync_soc_time)
13772 {
13773 }
13774 #endif
13775 
13776 /**
13777  * dp_get_tsf2_scratch_reg() - get tsf2 offset from the scratch register
13778  * @soc: Datapath soc handle
13779  * @mac_id: mac_id
13780  * @value: pointer to update tsf2 offset value
13781  *
13782  * Return: None.
13783  */
13784 static inline void
13785 dp_get_tsf2_scratch_reg(struct cdp_soc_t *soc_hdl, uint8_t mac_id,
13786 			uint64_t *value)
13787 {
13788 	hal_get_tsf2_offset(((struct dp_soc *)soc_hdl)->hal_soc, mac_id, value);
13789 }
13790 
13791 /**
13792  * dp_get_tqm_scratch_reg() - get tqm offset from the scratch register
13793  * @soc: Datapath soc handle
13794  * @value: pointer to update tqm offset value
13795  *
13796  * Return: None.
13797  */
13798 static inline void
13799 dp_get_tqm_scratch_reg(struct cdp_soc_t *soc_hdl, uint64_t *value)
13800 {
13801 	hal_get_tqm_offset(((struct dp_soc *)soc_hdl)->hal_soc, value);
13802 }
13803 
13804 /**
13805  * dp_set_tx_pause() - Pause or resume tx path
13806  * @soc_hdl: Datapath soc handle
13807  * @flag: set or clear is_tx_pause
13808  *
13809  * Return: None.
13810  */
13811 static inline
13812 void dp_set_tx_pause(struct cdp_soc_t *soc_hdl, bool flag)
13813 {
13814 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13815 
13816 	soc->is_tx_pause = flag;
13817 }
13818 
13819 static struct cdp_cmn_ops dp_ops_cmn = {
13820 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
13821 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
13822 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
13823 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
13824 	.txrx_pdev_post_attach = dp_pdev_post_attach_wifi3,
13825 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
13826 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
13827 	.txrx_peer_create = dp_peer_create_wifi3,
13828 	.txrx_peer_setup = dp_peer_setup_wifi3,
13829 #ifdef FEATURE_AST
13830 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
13831 #else
13832 	.txrx_peer_teardown = NULL,
13833 #endif
13834 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
13835 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
13836 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
13837 	.txrx_peer_get_ast_info_by_pdev =
13838 		dp_peer_get_ast_info_by_pdevid_wifi3,
13839 	.txrx_peer_ast_delete_by_soc =
13840 		dp_peer_ast_entry_del_by_soc,
13841 	.txrx_peer_ast_delete_by_pdev =
13842 		dp_peer_ast_entry_del_by_pdev,
13843 	.txrx_peer_delete = dp_peer_delete_wifi3,
13844 #ifdef DP_RX_UDP_OVER_PEER_ROAM
13845 	.txrx_update_roaming_peer = dp_update_roaming_peer_wifi3,
13846 #endif
13847 	.txrx_vdev_register = dp_vdev_register_wifi3,
13848 	.txrx_soc_detach = dp_soc_detach_wifi3,
13849 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
13850 	.txrx_soc_init = dp_soc_init_wifi3,
13851 #ifndef QCA_HOST_MODE_WIFI_DISABLED
13852 	.txrx_tso_soc_attach = dp_tso_soc_attach,
13853 	.txrx_tso_soc_detach = dp_tso_soc_detach,
13854 	.tx_send = dp_tx_send,
13855 	.tx_send_exc = dp_tx_send_exception,
13856 #endif
13857 	.set_tx_pause = dp_set_tx_pause,
13858 	.txrx_pdev_init = dp_pdev_init_wifi3,
13859 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
13860 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
13861 	.txrx_ath_getstats = dp_get_device_stats,
13862 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
13863 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
13864 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
13865 	.delba_process = dp_delba_process_wifi3,
13866 	.set_addba_response = dp_set_addba_response,
13867 	.flush_cache_rx_queue = NULL,
13868 	.tid_update_ba_win_size = dp_rx_tid_update_ba_win_size,
13869 	/* TODO: get API's for dscp-tid need to be added*/
13870 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
13871 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
13872 	.txrx_get_total_per = dp_get_total_per,
13873 	.txrx_stats_request = dp_txrx_stats_request,
13874 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
13875 	.display_stats = dp_txrx_dump_stats,
13876 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
13877 	.txrx_intr_detach = dp_soc_interrupt_detach,
13878 	.set_pn_check = dp_set_pn_check_wifi3,
13879 	.set_key_sec_type = dp_set_key_sec_type_wifi3,
13880 	.update_config_parameters = dp_update_config_parameters,
13881 	/* TODO: Add other functions */
13882 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
13883 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
13884 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
13885 	.get_vdev_dp_ext_txrx_handle = dp_vdev_get_dp_ext_handle,
13886 	.set_vdev_dp_ext_txrx_handle = dp_vdev_set_dp_ext_handle,
13887 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
13888 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
13889 	.map_pdev_to_lmac = dp_soc_map_pdev_to_lmac,
13890 	.handle_mode_change = dp_soc_handle_pdev_mode_change,
13891 	.set_pdev_status_down = dp_soc_set_pdev_status_down,
13892 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
13893 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
13894 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
13895 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
13896 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
13897 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
13898 	.set_soc_param = dp_soc_set_param,
13899 	.txrx_get_os_rx_handles_from_vdev =
13900 					dp_get_os_rx_handles_from_vdev_wifi3,
13901 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
13902 	.get_dp_capabilities = dp_get_cfg_capabilities,
13903 	.txrx_get_cfg = dp_get_cfg,
13904 	.set_rate_stats_ctx = dp_soc_set_rate_stats_ctx,
13905 	.get_rate_stats_ctx = dp_soc_get_rate_stats_ctx,
13906 	.txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats,
13907 	.txrx_flush_rate_stats_request = dp_flush_rate_stats_req,
13908 	.txrx_peer_get_peerstats_ctx = dp_peer_get_peerstats_ctx,
13909 
13910 	.set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3,
13911 	.set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3,
13912 
13913 	.txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler,
13914 #ifdef QCA_MULTIPASS_SUPPORT
13915 	.set_vlan_groupkey = dp_set_vlan_groupkey,
13916 #endif
13917 	.get_peer_mac_list = dp_get_peer_mac_list,
13918 	.get_peer_id = dp_get_peer_id,
13919 #ifdef QCA_SUPPORT_WDS_EXTENDED
13920 	.set_wds_ext_peer_rx = dp_wds_ext_set_peer_rx,
13921 #endif /* QCA_SUPPORT_WDS_EXTENDED */
13922 
13923 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
13924 	.txrx_drain = dp_drain_txrx,
13925 #endif
13926 #if defined(FEATURE_RUNTIME_PM)
13927 	.set_rtpm_tput_policy = dp_set_rtpm_tput_policy_requirement,
13928 #endif
13929 #ifdef WLAN_SYSFS_DP_STATS
13930 	.txrx_sysfs_fill_stats = dp_sysfs_fill_stats,
13931 	.txrx_sysfs_set_stat_type = dp_sysfs_set_stat_type,
13932 #endif /* WLAN_SYSFS_DP_STATS */
13933 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
13934 	.set_pkt_capture_mode = dp_set_pkt_capture_mode,
13935 #endif
13936 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
13937 	.txrx_recovery_vdev_flush_peers = dp_recovery_vdev_flush_peers,
13938 #endif
13939 	.txrx_umac_reset_deinit = dp_soc_umac_reset_deinit,
13940 	.txrx_get_tsf_time = dp_get_tsf_time,
13941 	.txrx_get_tsf2_offset = dp_get_tsf2_scratch_reg,
13942 	.txrx_get_tqm_offset = dp_get_tqm_scratch_reg,
13943 };
13944 
13945 static struct cdp_ctrl_ops dp_ops_ctrl = {
13946 	.txrx_peer_authorize = dp_peer_authorize,
13947 	.txrx_peer_get_authorize = dp_peer_get_authorize,
13948 #ifdef VDEV_PEER_PROTOCOL_COUNT
13949 	.txrx_enable_peer_protocol_count = dp_enable_vdev_peer_protocol_count,
13950 	.txrx_set_peer_protocol_drop_mask =
13951 		dp_enable_vdev_peer_protocol_drop_mask,
13952 	.txrx_is_peer_protocol_count_enabled =
13953 		dp_is_vdev_peer_protocol_count_enabled,
13954 	.txrx_get_peer_protocol_drop_mask = dp_get_vdev_peer_protocol_drop_mask,
13955 #endif
13956 	.txrx_set_vdev_param = dp_set_vdev_param,
13957 	.txrx_set_psoc_param = dp_set_psoc_param,
13958 	.txrx_get_psoc_param = dp_get_psoc_param,
13959 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
13960 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
13961 	.txrx_get_sec_type = dp_get_sec_type,
13962 	.txrx_wdi_event_sub = dp_wdi_event_sub,
13963 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
13964 	.txrx_set_pdev_param = dp_set_pdev_param,
13965 	.txrx_get_pdev_param = dp_get_pdev_param,
13966 	.txrx_set_peer_param = dp_set_peer_param,
13967 	.txrx_get_peer_param = dp_get_peer_param,
13968 #ifdef VDEV_PEER_PROTOCOL_COUNT
13969 	.txrx_peer_protocol_cnt = dp_peer_stats_update_protocol_cnt,
13970 #endif
13971 #ifdef WLAN_SUPPORT_MSCS
13972 	.txrx_record_mscs_params = dp_record_mscs_params,
13973 #endif
13974 	.set_key = dp_set_michael_key,
13975 	.txrx_get_vdev_param = dp_get_vdev_param,
13976 	.calculate_delay_stats = dp_calculate_delay_stats,
13977 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
13978 	.txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag,
13979 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
13980 	.txrx_dump_pdev_rx_protocol_tag_stats =
13981 				dp_dump_pdev_rx_protocol_tag_stats,
13982 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
13983 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
13984 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
13985 	.txrx_set_rx_flow_tag = dp_set_rx_flow_tag,
13986 	.txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats,
13987 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
13988 #ifdef QCA_MULTIPASS_SUPPORT
13989 	.txrx_peer_set_vlan_id = dp_peer_set_vlan_id,
13990 #endif /*QCA_MULTIPASS_SUPPORT*/
13991 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
13992 	.txrx_set_delta_tsf = dp_set_delta_tsf,
13993 #endif
13994 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
13995 	.txrx_set_tsf_ul_delay_report = dp_set_tsf_ul_delay_report,
13996 	.txrx_get_uplink_delay = dp_get_uplink_delay,
13997 #endif
13998 #ifdef QCA_UNDECODED_METADATA_SUPPORT
13999 	.txrx_set_pdev_phyrx_error_mask = dp_set_pdev_phyrx_error_mask,
14000 	.txrx_get_pdev_phyrx_error_mask = dp_get_pdev_phyrx_error_mask,
14001 #endif
14002 	.txrx_peer_flush_frags = dp_peer_flush_frags,
14003 };
14004 
14005 static struct cdp_me_ops dp_ops_me = {
14006 #ifndef QCA_HOST_MODE_WIFI_DISABLED
14007 #ifdef ATH_SUPPORT_IQUE
14008 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
14009 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
14010 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
14011 #endif
14012 #endif
14013 };
14014 
14015 static struct cdp_host_stats_ops dp_ops_host_stats = {
14016 	.txrx_per_peer_stats = dp_get_host_peer_stats,
14017 	.get_fw_peer_stats = dp_get_fw_peer_stats,
14018 	.get_htt_stats = dp_get_htt_stats,
14019 	.txrx_stats_publish = dp_txrx_stats_publish,
14020 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
14021 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
14022 	.txrx_get_soc_stats = dp_txrx_get_soc_stats,
14023 	.txrx_get_peer_stats_param = dp_txrx_get_peer_stats_param,
14024 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
14025 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
14026 	.txrx_get_ratekbps = dp_txrx_get_ratekbps,
14027 	.txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats,
14028 	.txrx_get_peer_delay_stats = dp_txrx_get_peer_delay_stats,
14029 	.txrx_get_peer_jitter_stats = dp_txrx_get_peer_jitter_stats,
14030 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
14031 	.txrx_alloc_vdev_stats_id = dp_txrx_alloc_vdev_stats_id,
14032 	.txrx_reset_vdev_stats_id = dp_txrx_reset_vdev_stats_id,
14033 #endif
14034 #ifdef WLAN_TX_PKT_CAPTURE_ENH
14035 	.get_peer_tx_capture_stats = dp_peer_get_tx_capture_stats,
14036 	.get_pdev_tx_capture_stats = dp_pdev_get_tx_capture_stats,
14037 #endif /* WLAN_TX_PKT_CAPTURE_ENH */
14038 #ifdef HW_TX_DELAY_STATS_ENABLE
14039 	.enable_disable_vdev_tx_delay_stats =
14040 				dp_enable_disable_vdev_tx_delay_stats,
14041 	.is_tx_delay_stats_enabled = dp_check_vdev_tx_delay_stats_enabled,
14042 #endif
14043 	.txrx_get_pdev_tid_stats = dp_pdev_get_tid_stats,
14044 #ifdef WLAN_TELEMETRY_STATS_SUPPORT
14045 	.txrx_pdev_telemetry_stats = dp_get_pdev_telemetry_stats,
14046 	.txrx_peer_telemetry_stats = dp_get_peer_telemetry_stats,
14047 #endif
14048 	.txrx_get_peer_extd_rate_link_stats =
14049 					dp_get_peer_extd_rate_link_stats,
14050 	.get_pdev_obss_stats = dp_get_obss_stats,
14051 	.clear_pdev_obss_pd_stats = dp_clear_pdev_obss_pd_stats,
14052 	/* TODO */
14053 };
14054 
14055 static struct cdp_raw_ops dp_ops_raw = {
14056 	/* TODO */
14057 };
14058 
14059 #ifdef PEER_FLOW_CONTROL
14060 static struct cdp_pflow_ops dp_ops_pflow = {
14061 	dp_tx_flow_ctrl_configure_pdev,
14062 };
14063 #endif /* CONFIG_WIN */
14064 
14065 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
14066 static struct cdp_cfr_ops dp_ops_cfr = {
14067 	.txrx_cfr_filter = NULL,
14068 	.txrx_get_cfr_rcc = dp_get_cfr_rcc,
14069 	.txrx_set_cfr_rcc = dp_set_cfr_rcc,
14070 	.txrx_get_cfr_dbg_stats = dp_get_cfr_dbg_stats,
14071 	.txrx_clear_cfr_dbg_stats = dp_clear_cfr_dbg_stats,
14072 };
14073 #endif
14074 
14075 #ifdef WLAN_SUPPORT_MSCS
14076 static struct cdp_mscs_ops dp_ops_mscs = {
14077 	.mscs_peer_lookup_n_get_priority = dp_mscs_peer_lookup_n_get_priority,
14078 };
14079 #endif
14080 
14081 #ifdef WLAN_SUPPORT_MESH_LATENCY
14082 static struct cdp_mesh_latency_ops dp_ops_mesh_latency = {
14083 	.mesh_latency_update_peer_parameter =
14084 		dp_mesh_latency_update_peer_parameter,
14085 };
14086 #endif
14087 
14088 #ifdef WLAN_SUPPORT_SCS
14089 static struct cdp_scs_ops dp_ops_scs = {
14090 	.scs_peer_lookup_n_rule_match = dp_scs_peer_lookup_n_rule_match,
14091 };
14092 #endif
14093 
14094 #ifdef CONFIG_SAWF_DEF_QUEUES
14095 static struct cdp_sawf_ops dp_ops_sawf = {
14096 	.sawf_def_queues_map_req = dp_sawf_def_queues_map_req,
14097 	.sawf_def_queues_unmap_req = dp_sawf_def_queues_unmap_req,
14098 	.sawf_def_queues_get_map_report =
14099 		dp_sawf_def_queues_get_map_report,
14100 #ifdef CONFIG_SAWF_STATS
14101 	.txrx_get_peer_sawf_delay_stats = dp_sawf_get_peer_delay_stats,
14102 	.txrx_get_peer_sawf_tx_stats = dp_sawf_get_peer_tx_stats,
14103 	.sawf_mpdu_stats_req = dp_sawf_mpdu_stats_req,
14104 	.sawf_mpdu_details_stats_req = dp_sawf_mpdu_details_stats_req,
14105 	.txrx_sawf_set_mov_avg_params = dp_sawf_set_mov_avg_params,
14106 	.txrx_sawf_set_sla_params = dp_sawf_set_sla_params,
14107 	.txrx_sawf_init_telemtery_params = dp_sawf_init_telemetry_params,
14108 	.telemetry_get_throughput_stats = dp_sawf_get_tx_stats,
14109 	.telemetry_get_mpdu_stats = dp_sawf_get_mpdu_sched_stats,
14110 	.telemetry_get_drop_stats = dp_sawf_get_drop_stats,
14111 #endif
14112 };
14113 #endif
14114 
14115 #if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
14116 /**
14117  * dp_flush_ring_hptp() - Update ring shadow
14118  *			  register HP/TP address when runtime
14119  *                        resume
14120  * @opaque_soc: DP soc context
14121  *
14122  * Return: None
14123  */
14124 static
14125 void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
14126 {
14127 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
14128 						 HAL_SRNG_FLUSH_EVENT)) {
14129 		/* Acquire the lock */
14130 		hal_srng_access_start(soc->hal_soc, hal_srng);
14131 
14132 		hal_srng_access_end(soc->hal_soc, hal_srng);
14133 
14134 		hal_srng_set_flush_last_ts(hal_srng);
14135 
14136 		dp_debug("flushed");
14137 	}
14138 }
14139 #endif
14140 
14141 #ifdef DP_TX_TRACKING
14142 
14143 #define DP_TX_COMP_MAX_LATENCY_MS 60000
14144 /**
14145  * dp_tx_comp_delay_check() - calculate time latency for tx completion per pkt
14146  * @tx_desc: tx descriptor
14147  *
14148  * Calculate time latency for tx completion per pkt and trigger self recovery
14149  * when the delay is more than threshold value.
14150  *
14151  * Return: True if delay is more than threshold
14152  */
14153 static bool dp_tx_comp_delay_check(struct dp_tx_desc_s *tx_desc)
14154 {
14155 	uint64_t time_latency, timestamp_tick = tx_desc->timestamp_tick;
14156 	qdf_ktime_t current_time = qdf_ktime_real_get();
14157 	qdf_ktime_t timestamp = tx_desc->timestamp;
14158 
14159 	if (!timestamp)
14160 		return false;
14161 
14162 	if (dp_tx_pkt_tracepoints_enabled()) {
14163 		time_latency = qdf_ktime_to_ms(current_time) -
14164 				qdf_ktime_to_ms(timestamp);
14165 		if (time_latency >= DP_TX_COMP_MAX_LATENCY_MS) {
14166 			dp_err_rl("enqueued: %llu ms, current : %llu ms",
14167 				  timestamp, current_time);
14168 			return true;
14169 		}
14170 	} else {
14171 		current_time = qdf_system_ticks();
14172 		time_latency = qdf_system_ticks_to_msecs(current_time -
14173 							 timestamp_tick);
14174 		if (time_latency >= DP_TX_COMP_MAX_LATENCY_MS) {
14175 			dp_err_rl("enqueued: %u ms, current : %u ms",
14176 				  qdf_system_ticks_to_msecs(timestamp),
14177 				  qdf_system_ticks_to_msecs(current_time));
14178 			return true;
14179 		}
14180 	}
14181 
14182 	return false;
14183 }
14184 
14185 /**
14186  * dp_find_missing_tx_comp() - check for leaked descriptor in tx path
14187  * @soc - DP SOC context
14188  *
14189  * Parse through descriptors in all pools and validate magic number and
14190  * completion time. Trigger self recovery if magic value is corrupted.
14191  *
14192  * Return: None.
14193  */
14194 static void dp_find_missing_tx_comp(struct dp_soc *soc)
14195 {
14196 	uint8_t i;
14197 	uint32_t j;
14198 	uint32_t num_desc, page_id, offset;
14199 	uint16_t num_desc_per_page;
14200 	struct dp_tx_desc_s *tx_desc = NULL;
14201 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
14202 
14203 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
14204 		tx_desc_pool = &soc->tx_desc[i];
14205 		if (!(tx_desc_pool->pool_size) ||
14206 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
14207 		    !(tx_desc_pool->desc_pages.cacheable_pages))
14208 			continue;
14209 
14210 		num_desc = tx_desc_pool->pool_size;
14211 		num_desc_per_page =
14212 			tx_desc_pool->desc_pages.num_element_per_page;
14213 		for (j = 0; j < num_desc; j++) {
14214 			page_id = j / num_desc_per_page;
14215 			offset = j % num_desc_per_page;
14216 
14217 			if (qdf_unlikely(!(tx_desc_pool->
14218 					 desc_pages.cacheable_pages)))
14219 				break;
14220 
14221 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
14222 			if (tx_desc->magic == DP_TX_MAGIC_PATTERN_FREE) {
14223 				continue;
14224 			} else if (tx_desc->magic ==
14225 				   DP_TX_MAGIC_PATTERN_INUSE) {
14226 				if (dp_tx_comp_delay_check(tx_desc)) {
14227 					dp_err_rl("Tx completion not rcvd for id: %u",
14228 						  tx_desc->id);
14229 					if (tx_desc->vdev_id == DP_INVALID_VDEV_ID) {
14230 						tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
14231 						dp_err_rl("Freed tx_desc %u",
14232 							  tx_desc->id);
14233 						dp_tx_comp_free_buf(soc,
14234 								    tx_desc,
14235 								    false);
14236 						dp_tx_desc_release(tx_desc, i);
14237 						DP_STATS_INC(soc,
14238 							     tx.tx_comp_force_freed, 1);
14239 					}
14240 				}
14241 			} else {
14242 				dp_err_rl("tx desc %u corrupted, flags: 0x%x",
14243 					  tx_desc->id, tx_desc->flags);
14244 			}
14245 		}
14246 	}
14247 }
14248 #else
14249 static inline void dp_find_missing_tx_comp(struct dp_soc *soc)
14250 {
14251 }
14252 #endif
14253 
14254 #ifdef FEATURE_RUNTIME_PM
14255 /**
14256  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
14257  * @soc_hdl: Datapath soc handle
14258  * @pdev_id: id of data path pdev handle
14259  *
14260  * DP is ready to runtime suspend if there are no pending TX packets.
14261  *
14262  * Return: QDF_STATUS
14263  */
14264 static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
14265 {
14266 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14267 	struct dp_pdev *pdev;
14268 	uint8_t i;
14269 	int32_t tx_pending;
14270 
14271 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14272 	if (!pdev) {
14273 		dp_err("pdev is NULL");
14274 		return QDF_STATUS_E_INVAL;
14275 	}
14276 
14277 	/* Abort if there are any pending TX packets */
14278 	tx_pending = dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev));
14279 	if (tx_pending) {
14280 		dp_info_rl("%pK: Abort suspend due to pending TX packets %d",
14281 			   soc, tx_pending);
14282 		dp_find_missing_tx_comp(soc);
14283 		/* perform a force flush if tx is pending */
14284 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
14285 			hal_srng_set_event(soc->tcl_data_ring[i].hal_srng,
14286 					   HAL_SRNG_FLUSH_EVENT);
14287 			dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
14288 		}
14289 		qdf_atomic_set(&soc->tx_pending_rtpm, 0);
14290 
14291 		return QDF_STATUS_E_AGAIN;
14292 	}
14293 
14294 	if (dp_runtime_get_refcount(soc)) {
14295 		dp_init_info("refcount: %d", dp_runtime_get_refcount(soc));
14296 
14297 		return QDF_STATUS_E_AGAIN;
14298 	}
14299 
14300 	if (soc->intr_mode == DP_INTR_POLL)
14301 		qdf_timer_stop(&soc->int_timer);
14302 
14303 	dp_rx_fst_update_pm_suspend_status(soc, true);
14304 
14305 	return QDF_STATUS_SUCCESS;
14306 }
14307 
14308 #define DP_FLUSH_WAIT_CNT 10
14309 #define DP_RUNTIME_SUSPEND_WAIT_MS 10
14310 /**
14311  * dp_runtime_resume() - ensure DP is ready to runtime resume
14312  * @soc_hdl: Datapath soc handle
14313  * @pdev_id: id of data path pdev handle
14314  *
14315  * Resume DP for runtime PM.
14316  *
14317  * Return: QDF_STATUS
14318  */
14319 static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
14320 {
14321 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14322 	int i, suspend_wait = 0;
14323 
14324 	if (soc->intr_mode == DP_INTR_POLL)
14325 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
14326 
14327 	/*
14328 	 * Wait until dp runtime refcount becomes zero or time out, then flush
14329 	 * pending tx for runtime suspend.
14330 	 */
14331 	while (dp_runtime_get_refcount(soc) &&
14332 	       suspend_wait < DP_FLUSH_WAIT_CNT) {
14333 		qdf_sleep(DP_RUNTIME_SUSPEND_WAIT_MS);
14334 		suspend_wait++;
14335 	}
14336 
14337 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
14338 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
14339 	}
14340 	qdf_atomic_set(&soc->tx_pending_rtpm, 0);
14341 
14342 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
14343 	dp_rx_fst_update_pm_suspend_status(soc, false);
14344 
14345 	return QDF_STATUS_SUCCESS;
14346 }
14347 #endif /* FEATURE_RUNTIME_PM */
14348 
14349 /**
14350  * dp_tx_get_success_ack_stats() - get tx success completion count
14351  * @soc_hdl: Datapath soc handle
14352  * @vdevid: vdev identifier
14353  *
14354  * Return: tx success ack count
14355  */
14356 static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl,
14357 					    uint8_t vdev_id)
14358 {
14359 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14360 	struct cdp_vdev_stats *vdev_stats = NULL;
14361 	uint32_t tx_success;
14362 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
14363 						     DP_MOD_ID_CDP);
14364 
14365 	if (!vdev) {
14366 		dp_cdp_err("%pK: Invalid vdev id %d", soc, vdev_id);
14367 		return 0;
14368 	}
14369 
14370 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
14371 	if (!vdev_stats) {
14372 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats", soc);
14373 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
14374 		return 0;
14375 	}
14376 
14377 	dp_aggregate_vdev_stats(vdev, vdev_stats);
14378 
14379 	tx_success = vdev_stats->tx.tx_success.num;
14380 	qdf_mem_free(vdev_stats);
14381 
14382 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
14383 	return tx_success;
14384 }
14385 
14386 #ifdef WLAN_SUPPORT_DATA_STALL
14387 /**
14388  * dp_register_data_stall_detect_cb() - register data stall callback
14389  * @soc_hdl: Datapath soc handle
14390  * @pdev_id: id of data path pdev handle
14391  * @data_stall_detect_callback: data stall callback function
14392  *
14393  * Return: QDF_STATUS Enumeration
14394  */
14395 static
14396 QDF_STATUS dp_register_data_stall_detect_cb(
14397 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14398 			data_stall_detect_cb data_stall_detect_callback)
14399 {
14400 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14401 	struct dp_pdev *pdev;
14402 
14403 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14404 	if (!pdev) {
14405 		dp_err("pdev NULL!");
14406 		return QDF_STATUS_E_INVAL;
14407 	}
14408 
14409 	pdev->data_stall_detect_callback = data_stall_detect_callback;
14410 	return QDF_STATUS_SUCCESS;
14411 }
14412 
14413 /**
14414  * dp_deregister_data_stall_detect_cb() - de-register data stall callback
14415  * @soc_hdl: Datapath soc handle
14416  * @pdev_id: id of data path pdev handle
14417  * @data_stall_detect_callback: data stall callback function
14418  *
14419  * Return: QDF_STATUS Enumeration
14420  */
14421 static
14422 QDF_STATUS dp_deregister_data_stall_detect_cb(
14423 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14424 			data_stall_detect_cb data_stall_detect_callback)
14425 {
14426 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14427 	struct dp_pdev *pdev;
14428 
14429 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14430 	if (!pdev) {
14431 		dp_err("pdev NULL!");
14432 		return QDF_STATUS_E_INVAL;
14433 	}
14434 
14435 	pdev->data_stall_detect_callback = NULL;
14436 	return QDF_STATUS_SUCCESS;
14437 }
14438 
14439 /**
14440  * dp_txrx_post_data_stall_event() - post data stall event
14441  * @soc_hdl: Datapath soc handle
14442  * @indicator: Module triggering data stall
14443  * @data_stall_type: data stall event type
14444  * @pdev_id: pdev id
14445  * @vdev_id_bitmap: vdev id bitmap
14446  * @recovery_type: data stall recovery type
14447  *
14448  * Return: None
14449  */
14450 static void
14451 dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl,
14452 			      enum data_stall_log_event_indicator indicator,
14453 			      enum data_stall_log_event_type data_stall_type,
14454 			      uint32_t pdev_id, uint32_t vdev_id_bitmap,
14455 			      enum data_stall_log_recovery_type recovery_type)
14456 {
14457 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14458 	struct data_stall_event_info data_stall_info;
14459 	struct dp_pdev *pdev;
14460 
14461 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14462 	if (!pdev) {
14463 		dp_err("pdev NULL!");
14464 		return;
14465 	}
14466 
14467 	if (!pdev->data_stall_detect_callback) {
14468 		dp_err("data stall cb not registered!");
14469 		return;
14470 	}
14471 
14472 	dp_info("data_stall_type: %x pdev_id: %d",
14473 		data_stall_type, pdev_id);
14474 
14475 	data_stall_info.indicator = indicator;
14476 	data_stall_info.data_stall_type = data_stall_type;
14477 	data_stall_info.vdev_id_bitmap = vdev_id_bitmap;
14478 	data_stall_info.pdev_id = pdev_id;
14479 	data_stall_info.recovery_type = recovery_type;
14480 
14481 	pdev->data_stall_detect_callback(&data_stall_info);
14482 }
14483 #endif /* WLAN_SUPPORT_DATA_STALL */
14484 
14485 #ifdef WLAN_FEATURE_STATS_EXT
14486 /* rx hw stats event wait timeout in ms */
14487 #define DP_REO_STATUS_STATS_TIMEOUT 1500
14488 /**
14489  * dp_txrx_ext_stats_request - request dp txrx extended stats request
14490  * @soc_hdl: soc handle
14491  * @pdev_id: pdev id
14492  * @req: stats request
14493  *
14494  * Return: QDF_STATUS
14495  */
14496 static QDF_STATUS
14497 dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14498 			  struct cdp_txrx_ext_stats *req)
14499 {
14500 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
14501 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14502 	int i = 0;
14503 	int tcl_ring_full = 0;
14504 
14505 	if (!pdev) {
14506 		dp_err("pdev is null");
14507 		return QDF_STATUS_E_INVAL;
14508 	}
14509 
14510 	dp_aggregate_pdev_stats(pdev);
14511 
14512 	for(i = 0 ; i < MAX_TCL_DATA_RINGS; i++)
14513 		tcl_ring_full += soc->stats.tx.tcl_ring_full[i];
14514 
14515 	req->tx_msdu_enqueue = pdev->stats.tx_i.processed.num;
14516 	req->tx_msdu_overflow = tcl_ring_full;
14517 	req->rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
14518 	req->rx_mpdu_delivered = soc->ext_stats.rx_mpdu_received;
14519 	req->rx_mpdu_missed = pdev->stats.err.reo_error;
14520 	/* only count error source from RXDMA */
14521 	req->rx_mpdu_error = pdev->stats.err.rxdma_error;
14522 
14523 	dp_info("ext stats: tx_msdu_enq = %u, tx_msdu_overflow = %u, "
14524 		"tx_mpdu_recieve = %u, rx_mpdu_delivered = %u, "
14525 		"rx_mpdu_missed = %u, rx_mpdu_error = %u",
14526 		req->tx_msdu_enqueue,
14527 		req->tx_msdu_overflow,
14528 		req->rx_mpdu_received,
14529 		req->rx_mpdu_delivered,
14530 		req->rx_mpdu_missed,
14531 		req->rx_mpdu_error);
14532 
14533 	return QDF_STATUS_SUCCESS;
14534 }
14535 
14536 /**
14537  * dp_rx_hw_stats_cb - request rx hw stats response callback
14538  * @soc: soc handle
14539  * @cb_ctxt: callback context
14540  * @reo_status: reo command response status
14541  *
14542  * Return: None
14543  */
14544 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
14545 			      union hal_reo_status *reo_status)
14546 {
14547 	struct dp_req_rx_hw_stats_t *rx_hw_stats = cb_ctxt;
14548 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
14549 	bool is_query_timeout;
14550 
14551 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
14552 	is_query_timeout = rx_hw_stats->is_query_timeout;
14553 	/* free the cb_ctxt if all pending tid stats query is received */
14554 	if (qdf_atomic_dec_and_test(&rx_hw_stats->pending_tid_stats_cnt)) {
14555 		if (!is_query_timeout) {
14556 			qdf_event_set(&soc->rx_hw_stats_event);
14557 			soc->is_last_stats_ctx_init = false;
14558 		}
14559 
14560 		qdf_mem_free(rx_hw_stats);
14561 	}
14562 
14563 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
14564 		dp_info("REO stats failure %d",
14565 			queue_status->header.status);
14566 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14567 		return;
14568 	}
14569 
14570 	if (!is_query_timeout) {
14571 		soc->ext_stats.rx_mpdu_received +=
14572 					queue_status->mpdu_frms_cnt;
14573 		soc->ext_stats.rx_mpdu_missed +=
14574 					queue_status->hole_cnt;
14575 	}
14576 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14577 }
14578 
14579 /**
14580  * dp_request_rx_hw_stats - request rx hardware stats
14581  * @soc_hdl: soc handle
14582  * @vdev_id: vdev id
14583  *
14584  * Return: None
14585  */
14586 static QDF_STATUS
14587 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
14588 {
14589 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
14590 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
14591 						     DP_MOD_ID_CDP);
14592 	struct dp_peer *peer = NULL;
14593 	QDF_STATUS status;
14594 	struct dp_req_rx_hw_stats_t *rx_hw_stats;
14595 	int rx_stats_sent_cnt = 0;
14596 	uint32_t last_rx_mpdu_received;
14597 	uint32_t last_rx_mpdu_missed;
14598 
14599 	if (!vdev) {
14600 		dp_err("vdev is null for vdev_id: %u", vdev_id);
14601 		status = QDF_STATUS_E_INVAL;
14602 		goto out;
14603 	}
14604 
14605 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_CDP);
14606 
14607 	if (!peer) {
14608 		dp_err("Peer is NULL");
14609 		status = QDF_STATUS_E_INVAL;
14610 		goto out;
14611 	}
14612 
14613 	rx_hw_stats = qdf_mem_malloc(sizeof(*rx_hw_stats));
14614 
14615 	if (!rx_hw_stats) {
14616 		dp_err("malloc failed for hw stats structure");
14617 		status = QDF_STATUS_E_INVAL;
14618 		goto out;
14619 	}
14620 
14621 	qdf_event_reset(&soc->rx_hw_stats_event);
14622 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
14623 	/* save the last soc cumulative stats and reset it to 0 */
14624 	last_rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
14625 	last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
14626 	soc->ext_stats.rx_mpdu_received = 0;
14627 
14628 	rx_stats_sent_cnt =
14629 		dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
14630 	if (!rx_stats_sent_cnt) {
14631 		dp_err("no tid stats sent successfully");
14632 		qdf_mem_free(rx_hw_stats);
14633 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14634 		status = QDF_STATUS_E_INVAL;
14635 		goto out;
14636 	}
14637 	qdf_atomic_set(&rx_hw_stats->pending_tid_stats_cnt,
14638 		       rx_stats_sent_cnt);
14639 	rx_hw_stats->is_query_timeout = false;
14640 	soc->is_last_stats_ctx_init = true;
14641 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14642 
14643 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
14644 				       DP_REO_STATUS_STATS_TIMEOUT);
14645 
14646 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
14647 	if (status != QDF_STATUS_SUCCESS) {
14648 		dp_info("rx hw stats event timeout");
14649 		if (soc->is_last_stats_ctx_init)
14650 			rx_hw_stats->is_query_timeout = true;
14651 		/**
14652 		 * If query timeout happened, use the last saved stats
14653 		 * for this time query.
14654 		 */
14655 		soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
14656 		soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
14657 	}
14658 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14659 
14660 out:
14661 	if (peer)
14662 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
14663 	if (vdev)
14664 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
14665 
14666 	return status;
14667 }
14668 
14669 /**
14670  * dp_reset_rx_hw_ext_stats - Reset rx hardware ext stats
14671  * @soc_hdl: soc handle
14672  *
14673  * Return: None
14674  */
14675 static
14676 void dp_reset_rx_hw_ext_stats(struct cdp_soc_t *soc_hdl)
14677 {
14678 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
14679 
14680 	soc->ext_stats.rx_mpdu_received = 0;
14681 	soc->ext_stats.rx_mpdu_missed = 0;
14682 }
14683 #endif /* WLAN_FEATURE_STATS_EXT */
14684 
14685 static
14686 uint32_t dp_get_tx_rings_grp_bitmap(struct cdp_soc_t *soc_hdl)
14687 {
14688 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
14689 
14690 	return soc->wlan_cfg_ctx->tx_rings_grp_bitmap;
14691 }
14692 
14693 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
14694 /**
14695  * dp_mark_first_wakeup_packet() - set flag to indicate that
14696  *    fw is compatible for marking first packet after wow wakeup
14697  * @soc_hdl: Datapath soc handle
14698  * @pdev_id: id of data path pdev handle
14699  * @value: 1 for enabled/ 0 for disabled
14700  *
14701  * Return: None
14702  */
14703 static void dp_mark_first_wakeup_packet(struct cdp_soc_t *soc_hdl,
14704 					uint8_t pdev_id, uint8_t value)
14705 {
14706 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14707 	struct dp_pdev *pdev;
14708 
14709 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14710 	if (!pdev) {
14711 		dp_err("pdev is NULL");
14712 		return;
14713 	}
14714 
14715 	pdev->is_first_wakeup_packet = value;
14716 }
14717 #endif
14718 
14719 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
14720 /**
14721  * dp_set_peer_txq_flush_config() - Set the peer txq flush configuration
14722  * @soc_hdl: Opaque handle to the DP soc object
14723  * @vdev_id: VDEV identifier
14724  * @mac: MAC address of the peer
14725  * @ac: access category mask
14726  * @tid: TID mask
14727  * @policy: Flush policy
14728  *
14729  * Return: 0 on success, errno on failure
14730  */
14731 static int dp_set_peer_txq_flush_config(struct cdp_soc_t *soc_hdl,
14732 					uint8_t vdev_id, uint8_t *mac,
14733 					uint8_t ac, uint32_t tid,
14734 					enum cdp_peer_txq_flush_policy policy)
14735 {
14736 	struct dp_soc *soc;
14737 
14738 	if (!soc_hdl) {
14739 		dp_err("soc is null");
14740 		return -EINVAL;
14741 	}
14742 	soc = cdp_soc_t_to_dp_soc(soc_hdl);
14743 	return target_if_peer_txq_flush_config(soc->ctrl_psoc, vdev_id,
14744 					       mac, ac, tid, policy);
14745 }
14746 #endif
14747 
14748 #ifdef CONNECTIVITY_PKTLOG
14749 /**
14750  * dp_register_packetdump_callback() - registers
14751  *  tx data packet, tx mgmt. packet and rx data packet
14752  *  dump callback handler.
14753  *
14754  * @soc_hdl: Datapath soc handle
14755  * @pdev_id: id of data path pdev handle
14756  * @dp_tx_packetdump_cb: tx packetdump cb
14757  * @dp_rx_packetdump_cb: rx packetdump cb
14758  *
14759  * This function is used to register tx data pkt, tx mgmt.
14760  * pkt and rx data pkt dump callback
14761  *
14762  * Return: None
14763  *
14764  */
14765 static inline
14766 void dp_register_packetdump_callback(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14767 				     ol_txrx_pktdump_cb dp_tx_packetdump_cb,
14768 				     ol_txrx_pktdump_cb dp_rx_packetdump_cb)
14769 {
14770 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14771 	struct dp_pdev *pdev;
14772 
14773 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14774 	if (!pdev) {
14775 		dp_err("pdev is NULL!");
14776 		return;
14777 	}
14778 
14779 	pdev->dp_tx_packetdump_cb = dp_tx_packetdump_cb;
14780 	pdev->dp_rx_packetdump_cb = dp_rx_packetdump_cb;
14781 }
14782 
14783 /**
14784  * dp_deregister_packetdump_callback() - deregidters
14785  *  tx data packet, tx mgmt. packet and rx data packet
14786  *  dump callback handler
14787  * @soc_hdl: Datapath soc handle
14788  * @pdev_id: id of data path pdev handle
14789  *
14790  * This function is used to deregidter tx data pkt.,
14791  * tx mgmt. pkt and rx data pkt. dump callback
14792  *
14793  * Return: None
14794  *
14795  */
14796 static inline
14797 void dp_deregister_packetdump_callback(struct cdp_soc_t *soc_hdl,
14798 				       uint8_t pdev_id)
14799 {
14800 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14801 	struct dp_pdev *pdev;
14802 
14803 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14804 	if (!pdev) {
14805 		dp_err("pdev is NULL!");
14806 		return;
14807 	}
14808 
14809 	pdev->dp_tx_packetdump_cb = NULL;
14810 	pdev->dp_rx_packetdump_cb = NULL;
14811 }
14812 #endif
14813 
14814 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
14815 /**
14816  * dp_set_bus_vote_lvl_high() - Take a vote on bus bandwidth from dp
14817  * @soc_hdl: Datapath soc handle
14818  * @high: whether the bus bw is high or not
14819  *
14820  * Return: void
14821  */
14822 static void
14823 dp_set_bus_vote_lvl_high(ol_txrx_soc_handle soc_hdl, bool high)
14824 {
14825 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14826 
14827 	soc->high_throughput = high;
14828 }
14829 
14830 /**
14831  * dp_get_bus_vote_lvl_high() - get bus bandwidth vote to dp
14832  * @soc_hdl: Datapath soc handle
14833  *
14834  * Return: bool
14835  */
14836 static bool
14837 dp_get_bus_vote_lvl_high(ol_txrx_soc_handle soc_hdl)
14838 {
14839 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14840 
14841 	return soc->high_throughput;
14842 }
14843 #endif
14844 
14845 #ifdef DP_PEER_EXTENDED_API
14846 static struct cdp_misc_ops dp_ops_misc = {
14847 #ifdef FEATURE_WLAN_TDLS
14848 	.tx_non_std = dp_tx_non_std,
14849 #endif /* FEATURE_WLAN_TDLS */
14850 	.get_opmode = dp_get_opmode,
14851 #ifdef FEATURE_RUNTIME_PM
14852 	.runtime_suspend = dp_runtime_suspend,
14853 	.runtime_resume = dp_runtime_resume,
14854 #endif /* FEATURE_RUNTIME_PM */
14855 	.get_num_rx_contexts = dp_get_num_rx_contexts,
14856 	.get_tx_ack_stats = dp_tx_get_success_ack_stats,
14857 #ifdef WLAN_SUPPORT_DATA_STALL
14858 	.txrx_data_stall_cb_register = dp_register_data_stall_detect_cb,
14859 	.txrx_data_stall_cb_deregister = dp_deregister_data_stall_detect_cb,
14860 	.txrx_post_data_stall_event = dp_txrx_post_data_stall_event,
14861 #endif
14862 
14863 #ifdef WLAN_FEATURE_STATS_EXT
14864 	.txrx_ext_stats_request = dp_txrx_ext_stats_request,
14865 	.request_rx_hw_stats = dp_request_rx_hw_stats,
14866 	.reset_rx_hw_ext_stats = dp_reset_rx_hw_ext_stats,
14867 #endif /* WLAN_FEATURE_STATS_EXT */
14868 	.vdev_inform_ll_conn = dp_vdev_inform_ll_conn,
14869 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
14870 	.set_swlm_enable = dp_soc_set_swlm_enable,
14871 	.is_swlm_enabled = dp_soc_is_swlm_enabled,
14872 #endif
14873 	.display_txrx_hw_info = dp_display_srng_info,
14874 	.get_tx_rings_grp_bitmap = dp_get_tx_rings_grp_bitmap,
14875 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
14876 	.mark_first_wakeup_packet = dp_mark_first_wakeup_packet,
14877 #endif
14878 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
14879 	.set_peer_txq_flush_config = dp_set_peer_txq_flush_config,
14880 #endif
14881 #ifdef CONNECTIVITY_PKTLOG
14882 	.register_pktdump_cb = dp_register_packetdump_callback,
14883 	.unregister_pktdump_cb = dp_deregister_packetdump_callback,
14884 #endif
14885 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
14886 	.set_bus_vote_lvl_high = dp_set_bus_vote_lvl_high,
14887 	.get_bus_vote_lvl_high = dp_get_bus_vote_lvl_high,
14888 #endif
14889 };
14890 #endif
14891 
14892 #ifdef DP_FLOW_CTL
14893 static struct cdp_flowctl_ops dp_ops_flowctl = {
14894 	/* WIFI 3.0 DP implement as required. */
14895 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
14896 	.flow_pool_map_handler = dp_tx_flow_pool_map,
14897 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
14898 	.register_pause_cb = dp_txrx_register_pause_cb,
14899 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
14900 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
14901 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
14902 };
14903 
14904 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
14905 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
14906 };
14907 #endif
14908 
14909 #ifdef IPA_OFFLOAD
14910 static struct cdp_ipa_ops dp_ops_ipa = {
14911 	.ipa_get_resource = dp_ipa_get_resource,
14912 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
14913 	.ipa_iounmap_doorbell_vaddr = dp_ipa_iounmap_doorbell_vaddr,
14914 	.ipa_op_response = dp_ipa_op_response,
14915 	.ipa_register_op_cb = dp_ipa_register_op_cb,
14916 	.ipa_deregister_op_cb = dp_ipa_deregister_op_cb,
14917 	.ipa_get_stat = dp_ipa_get_stat,
14918 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
14919 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
14920 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
14921 	.ipa_setup = dp_ipa_setup,
14922 	.ipa_cleanup = dp_ipa_cleanup,
14923 	.ipa_setup_iface = dp_ipa_setup_iface,
14924 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
14925 	.ipa_enable_pipes = dp_ipa_enable_pipes,
14926 	.ipa_disable_pipes = dp_ipa_disable_pipes,
14927 	.ipa_set_perf_level = dp_ipa_set_perf_level,
14928 	.ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd,
14929 	.ipa_tx_buf_smmu_mapping = dp_ipa_tx_buf_smmu_mapping,
14930 	.ipa_tx_buf_smmu_unmapping = dp_ipa_tx_buf_smmu_unmapping,
14931 #ifdef IPA_WDS_EASYMESH_FEATURE
14932 	.ipa_ast_create = dp_ipa_ast_create,
14933 #endif
14934 };
14935 #endif
14936 
14937 #ifdef DP_POWER_SAVE
14938 static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
14939 {
14940 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14941 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14942 	int timeout = SUSPEND_DRAIN_WAIT;
14943 	int drain_wait_delay = 50; /* 50 ms */
14944 	int32_t tx_pending;
14945 
14946 	if (qdf_unlikely(!pdev)) {
14947 		dp_err("pdev is NULL");
14948 		return QDF_STATUS_E_INVAL;
14949 	}
14950 
14951 	/* Abort if there are any pending TX packets */
14952 	while ((tx_pending = dp_get_tx_pending((struct cdp_pdev *)pdev))) {
14953 		qdf_sleep(drain_wait_delay);
14954 		if (timeout <= 0) {
14955 			dp_info("TX frames are pending %d, abort suspend",
14956 				tx_pending);
14957 			dp_find_missing_tx_comp(soc);
14958 			return QDF_STATUS_E_TIMEOUT;
14959 		}
14960 		timeout = timeout - drain_wait_delay;
14961 	}
14962 
14963 	if (soc->intr_mode == DP_INTR_POLL)
14964 		qdf_timer_stop(&soc->int_timer);
14965 
14966 	/* Stop monitor reap timer and reap any pending frames in ring */
14967 	dp_monitor_reap_timer_suspend(soc);
14968 
14969 	dp_suspend_fse_cache_flush(soc);
14970 	dp_rx_fst_update_pm_suspend_status(soc, true);
14971 
14972 	return QDF_STATUS_SUCCESS;
14973 }
14974 
14975 static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
14976 {
14977 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14978 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14979 	uint8_t i;
14980 
14981 	if (qdf_unlikely(!pdev)) {
14982 		dp_err("pdev is NULL");
14983 		return QDF_STATUS_E_INVAL;
14984 	}
14985 
14986 	if (soc->intr_mode == DP_INTR_POLL)
14987 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
14988 
14989 	/* Start monitor reap timer */
14990 	dp_monitor_reap_timer_start(soc, CDP_MON_REAP_SOURCE_ANY);
14991 
14992 	dp_resume_fse_cache_flush(soc);
14993 
14994 	for (i = 0; i < soc->num_tcl_data_rings; i++)
14995 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
14996 
14997 	dp_rx_fst_update_pm_suspend_status(soc, false);
14998 
14999 	dp_rx_fst_requeue_wq(soc);
15000 
15001 	return QDF_STATUS_SUCCESS;
15002 }
15003 
15004 /**
15005  * dp_process_wow_ack_rsp() - process wow ack response
15006  * @soc_hdl: datapath soc handle
15007  * @pdev_id: data path pdev handle id
15008  *
15009  * Return: none
15010  */
15011 static void dp_process_wow_ack_rsp(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
15012 {
15013 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15014 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15015 
15016 	if (qdf_unlikely(!pdev)) {
15017 		dp_err("pdev is NULL");
15018 		return;
15019 	}
15020 
15021 	/*
15022 	 * As part of wow enable FW disables the mon status ring and in wow ack
15023 	 * response from FW reap mon status ring to make sure no packets pending
15024 	 * in the ring.
15025 	 */
15026 	dp_monitor_reap_timer_suspend(soc);
15027 }
15028 
15029 /**
15030  * dp_process_target_suspend_req() - process target suspend request
15031  * @soc_hdl: datapath soc handle
15032  * @pdev_id: data path pdev handle id
15033  *
15034  * Return: none
15035  */
15036 static void dp_process_target_suspend_req(struct cdp_soc_t *soc_hdl,
15037 					  uint8_t pdev_id)
15038 {
15039 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15040 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15041 
15042 	if (qdf_unlikely(!pdev)) {
15043 		dp_err("pdev is NULL");
15044 		return;
15045 	}
15046 
15047 	/* Stop monitor reap timer and reap any pending frames in ring */
15048 	dp_monitor_reap_timer_suspend(soc);
15049 }
15050 
15051 static struct cdp_bus_ops dp_ops_bus = {
15052 	.bus_suspend = dp_bus_suspend,
15053 	.bus_resume = dp_bus_resume,
15054 	.process_wow_ack_rsp = dp_process_wow_ack_rsp,
15055 	.process_target_suspend_req = dp_process_target_suspend_req
15056 };
15057 #endif
15058 
15059 #ifdef DP_FLOW_CTL
15060 static struct cdp_throttle_ops dp_ops_throttle = {
15061 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
15062 };
15063 
15064 static struct cdp_cfg_ops dp_ops_cfg = {
15065 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
15066 };
15067 #endif
15068 
15069 #ifdef DP_PEER_EXTENDED_API
15070 static struct cdp_ocb_ops dp_ops_ocb = {
15071 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
15072 };
15073 
15074 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
15075 	.clear_stats = dp_txrx_clear_dump_stats,
15076 };
15077 
15078 static struct cdp_peer_ops dp_ops_peer = {
15079 	.register_peer = dp_register_peer,
15080 	.clear_peer = dp_clear_peer,
15081 	.find_peer_exist = dp_find_peer_exist,
15082 	.find_peer_exist_on_vdev = dp_find_peer_exist_on_vdev,
15083 	.find_peer_exist_on_other_vdev = dp_find_peer_exist_on_other_vdev,
15084 	.peer_state_update = dp_peer_state_update,
15085 	.get_vdevid = dp_get_vdevid,
15086 	.get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr,
15087 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
15088 	.get_peer_state = dp_get_peer_state,
15089 	.peer_flush_frags = dp_peer_flush_frags,
15090 	.set_peer_as_tdls_peer = dp_set_peer_as_tdls_peer,
15091 };
15092 #endif
15093 
15094 static void dp_soc_txrx_ops_attach(struct dp_soc *soc)
15095 {
15096 	soc->cdp_soc.ops->cmn_drv_ops = &dp_ops_cmn;
15097 	soc->cdp_soc.ops->ctrl_ops = &dp_ops_ctrl;
15098 	soc->cdp_soc.ops->me_ops = &dp_ops_me;
15099 	soc->cdp_soc.ops->host_stats_ops = &dp_ops_host_stats;
15100 	soc->cdp_soc.ops->wds_ops = &dp_ops_wds;
15101 	soc->cdp_soc.ops->raw_ops = &dp_ops_raw;
15102 #ifdef PEER_FLOW_CONTROL
15103 	soc->cdp_soc.ops->pflow_ops = &dp_ops_pflow;
15104 #endif /* PEER_FLOW_CONTROL */
15105 #ifdef DP_PEER_EXTENDED_API
15106 	soc->cdp_soc.ops->misc_ops = &dp_ops_misc;
15107 	soc->cdp_soc.ops->ocb_ops = &dp_ops_ocb;
15108 	soc->cdp_soc.ops->peer_ops = &dp_ops_peer;
15109 	soc->cdp_soc.ops->mob_stats_ops = &dp_ops_mob_stats;
15110 #endif
15111 #ifdef DP_FLOW_CTL
15112 	soc->cdp_soc.ops->cfg_ops = &dp_ops_cfg;
15113 	soc->cdp_soc.ops->flowctl_ops = &dp_ops_flowctl;
15114 	soc->cdp_soc.ops->l_flowctl_ops = &dp_ops_l_flowctl;
15115 	soc->cdp_soc.ops->throttle_ops = &dp_ops_throttle;
15116 #endif
15117 #ifdef IPA_OFFLOAD
15118 	soc->cdp_soc.ops->ipa_ops = &dp_ops_ipa;
15119 #endif
15120 #ifdef DP_POWER_SAVE
15121 	soc->cdp_soc.ops->bus_ops = &dp_ops_bus;
15122 #endif
15123 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
15124 	soc->cdp_soc.ops->cfr_ops = &dp_ops_cfr;
15125 #endif
15126 #ifdef WLAN_SUPPORT_MSCS
15127 	soc->cdp_soc.ops->mscs_ops = &dp_ops_mscs;
15128 #endif
15129 #ifdef WLAN_SUPPORT_MESH_LATENCY
15130 	soc->cdp_soc.ops->mesh_latency_ops = &dp_ops_mesh_latency;
15131 #endif
15132 #ifdef CONFIG_SAWF_DEF_QUEUES
15133 	soc->cdp_soc.ops->sawf_ops = &dp_ops_sawf;
15134 #endif
15135 #ifdef WLAN_SUPPORT_SCS
15136 	soc->cdp_soc.ops->scs_ops = &dp_ops_scs;
15137 #endif
15138 };
15139 
15140 /*
15141  * dp_soc_set_txrx_ring_map()
15142  * @dp_soc: DP handler for soc
15143  *
15144  * Return: Void
15145  */
15146 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
15147 {
15148 	uint32_t i;
15149 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
15150 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
15151 	}
15152 }
15153 
15154 qdf_export_symbol(dp_soc_set_txrx_ring_map);
15155 
15156 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
15157 	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574) || \
15158 	defined(QCA_WIFI_QCA5332)
15159 /**
15160  * dp_soc_attach_wifi3() - Attach txrx SOC
15161  * @ctrl_psoc: Opaque SOC handle from control plane
15162  * @params: SOC attach params
15163  *
15164  * Return: DP SOC handle on success, NULL on failure
15165  */
15166 struct cdp_soc_t *
15167 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
15168 		    struct cdp_soc_attach_params *params)
15169 {
15170 	struct dp_soc *dp_soc = NULL;
15171 
15172 	dp_soc = dp_soc_attach(ctrl_psoc, params);
15173 
15174 	return dp_soc_to_cdp_soc_t(dp_soc);
15175 }
15176 
15177 static inline void dp_soc_set_def_pdev(struct dp_soc *soc)
15178 {
15179 	int lmac_id;
15180 
15181 	for (lmac_id = 0; lmac_id < MAX_NUM_LMAC_HW; lmac_id++) {
15182 		/*Set default host PDEV ID for lmac_id*/
15183 		wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
15184 				      INVALID_PDEV_ID, lmac_id);
15185 	}
15186 }
15187 
15188 static uint32_t
15189 dp_get_link_desc_id_start(uint16_t arch_id)
15190 {
15191 	switch (arch_id) {
15192 	case CDP_ARCH_TYPE_LI:
15193 		return LINK_DESC_ID_START_21_BITS_COOKIE;
15194 	case CDP_ARCH_TYPE_BE:
15195 		return LINK_DESC_ID_START_20_BITS_COOKIE;
15196 	default:
15197 		dp_err("unknown arch_id 0x%x", arch_id);
15198 		QDF_BUG(0);
15199 		return LINK_DESC_ID_START_21_BITS_COOKIE;
15200 	}
15201 }
15202 
15203 /**
15204  * dp_soc_attach() - Attach txrx SOC
15205  * @ctrl_psoc: Opaque SOC handle from control plane
15206  * @params: SOC attach params
15207  *
15208  * Return: DP SOC handle on success, NULL on failure
15209  */
15210 static struct dp_soc *
15211 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
15212 	      struct cdp_soc_attach_params *params)
15213 {
15214 	int int_ctx;
15215 	struct dp_soc *soc =  NULL;
15216 	uint16_t arch_id;
15217 	struct hif_opaque_softc *hif_handle = params->hif_handle;
15218 	qdf_device_t qdf_osdev = params->qdf_osdev;
15219 	struct ol_if_ops *ol_ops = params->ol_ops;
15220 	uint16_t device_id = params->device_id;
15221 
15222 	if (!hif_handle) {
15223 		dp_err("HIF handle is NULL");
15224 		goto fail0;
15225 	}
15226 	arch_id = cdp_get_arch_type_from_devid(device_id);
15227 	soc = qdf_mem_malloc(dp_get_soc_context_size(device_id));
15228 	if (!soc) {
15229 		dp_err("DP SOC memory allocation failed");
15230 		goto fail0;
15231 	}
15232 
15233 	dp_info("soc memory allocated %pK", soc);
15234 	soc->hif_handle = hif_handle;
15235 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
15236 	if (!soc->hal_soc)
15237 		goto fail1;
15238 
15239 	hif_get_cmem_info(soc->hif_handle,
15240 			  &soc->cmem_base,
15241 			  &soc->cmem_total_size);
15242 	soc->cmem_avail_size = soc->cmem_total_size;
15243 	int_ctx = 0;
15244 	soc->device_id = device_id;
15245 	soc->cdp_soc.ops =
15246 		(struct cdp_ops *)qdf_mem_malloc(sizeof(struct cdp_ops));
15247 	if (!soc->cdp_soc.ops)
15248 		goto fail1;
15249 
15250 	dp_soc_txrx_ops_attach(soc);
15251 	soc->cdp_soc.ol_ops = ol_ops;
15252 	soc->ctrl_psoc = ctrl_psoc;
15253 	soc->osdev = qdf_osdev;
15254 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
15255 	hal_rx_get_tlv_size(soc->hal_soc, &soc->rx_pkt_tlv_size,
15256 			    &soc->rx_mon_pkt_tlv_size);
15257 	soc->idle_link_bm_id = hal_get_idle_link_bm_id(soc->hal_soc,
15258 						       params->mlo_chip_id);
15259 	soc->features.dmac_cmn_src_rxbuf_ring_enabled =
15260 		hal_dmac_cmn_src_rxbuf_ring_get(soc->hal_soc);
15261 	soc->arch_id = arch_id;
15262 	soc->link_desc_id_start =
15263 			dp_get_link_desc_id_start(soc->arch_id);
15264 	dp_configure_arch_ops(soc);
15265 
15266 	/* Reset wbm sg list and flags */
15267 	dp_rx_wbm_sg_list_reset(soc);
15268 
15269 	dp_soc_tx_hw_desc_history_attach(soc);
15270 	dp_soc_rx_history_attach(soc);
15271 	dp_soc_mon_status_ring_history_attach(soc);
15272 	dp_soc_tx_history_attach(soc);
15273 	wlan_set_srng_cfg(&soc->wlan_srng_cfg);
15274 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
15275 	if (!soc->wlan_cfg_ctx) {
15276 		dp_err("wlan_cfg_ctx failed\n");
15277 		goto fail2;
15278 	}
15279 	dp_soc_cfg_attach(soc);
15280 
15281 	if (dp_hw_link_desc_pool_banks_alloc(soc, WLAN_INVALID_PDEV_ID)) {
15282 		dp_err("failed to allocate link desc pool banks");
15283 		goto fail3;
15284 	}
15285 
15286 	if (dp_hw_link_desc_ring_alloc(soc)) {
15287 		dp_err("failed to allocate link_desc_ring");
15288 		goto fail4;
15289 	}
15290 
15291 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_attach(soc,
15292 								 params))) {
15293 		dp_err("unable to do target specific attach");
15294 		goto fail5;
15295 	}
15296 
15297 	if (dp_soc_srng_alloc(soc)) {
15298 		dp_err("failed to allocate soc srng rings");
15299 		goto fail6;
15300 	}
15301 
15302 	if (dp_soc_tx_desc_sw_pools_alloc(soc)) {
15303 		dp_err("dp_soc_tx_desc_sw_pools_alloc failed");
15304 		goto fail7;
15305 	}
15306 
15307 	if (!dp_monitor_modularized_enable()) {
15308 		if (dp_mon_soc_attach_wrapper(soc)) {
15309 			dp_err("failed to attach monitor");
15310 			goto fail8;
15311 		}
15312 	}
15313 
15314 	if (dp_sysfs_initialize_stats(soc) != QDF_STATUS_SUCCESS) {
15315 		dp_err("failed to initialize dp stats sysfs file");
15316 		dp_sysfs_deinitialize_stats(soc);
15317 	}
15318 
15319 	dp_soc_swlm_attach(soc);
15320 	dp_soc_set_interrupt_mode(soc);
15321 	dp_soc_set_def_pdev(soc);
15322 
15323 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
15324 		qdf_dma_mem_stats_read(),
15325 		qdf_heap_mem_stats_read(),
15326 		qdf_skb_total_mem_stats_read());
15327 
15328 	return soc;
15329 fail8:
15330 	dp_soc_tx_desc_sw_pools_free(soc);
15331 fail7:
15332 	dp_soc_srng_free(soc);
15333 fail6:
15334 	soc->arch_ops.txrx_soc_detach(soc);
15335 fail5:
15336 	dp_hw_link_desc_ring_free(soc);
15337 fail4:
15338 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
15339 fail3:
15340 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
15341 fail2:
15342 	qdf_mem_free(soc->cdp_soc.ops);
15343 fail1:
15344 	qdf_mem_free(soc);
15345 fail0:
15346 	return NULL;
15347 }
15348 
15349 /**
15350  * dp_soc_init() - Initialize txrx SOC
15351  * @dp_soc: Opaque DP SOC handle
15352  * @htc_handle: Opaque HTC handle
15353  * @hif_handle: Opaque HIF handle
15354  *
15355  * Return: DP SOC handle on success, NULL on failure
15356  */
15357 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
15358 		  struct hif_opaque_softc *hif_handle)
15359 {
15360 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
15361 	bool is_monitor_mode = false;
15362 	uint8_t i;
15363 	int num_dp_msi;
15364 
15365 	wlan_minidump_log(soc, sizeof(*soc), soc->ctrl_psoc,
15366 			  WLAN_MD_DP_SOC, "dp_soc");
15367 
15368 	soc->hif_handle = hif_handle;
15369 
15370 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
15371 	if (!soc->hal_soc)
15372 		goto fail0;
15373 
15374 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_init(soc))) {
15375 		dp_err("unable to do target specific init");
15376 		goto fail0;
15377 	}
15378 
15379 	htt_soc = htt_soc_attach(soc, htc_handle);
15380 	if (!htt_soc)
15381 		goto fail1;
15382 
15383 	soc->htt_handle = htt_soc;
15384 
15385 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
15386 		goto fail2;
15387 
15388 	htt_set_htc_handle(htt_soc, htc_handle);
15389 
15390 	dp_soc_cfg_init(soc);
15391 
15392 	dp_monitor_soc_cfg_init(soc);
15393 	/* Reset/Initialize wbm sg list and flags */
15394 	dp_rx_wbm_sg_list_reset(soc);
15395 
15396 	/* Note: Any SRNG ring initialization should happen only after
15397 	 * Interrupt mode is set and followed by filling up the
15398 	 * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER.
15399 	 */
15400 	dp_soc_set_interrupt_mode(soc);
15401 	if (soc->cdp_soc.ol_ops->get_con_mode &&
15402 	    soc->cdp_soc.ol_ops->get_con_mode() ==
15403 	    QDF_GLOBAL_MONITOR_MODE) {
15404 		is_monitor_mode = true;
15405 		soc->curr_rx_pkt_tlv_size = soc->rx_mon_pkt_tlv_size;
15406 	} else {
15407 		soc->curr_rx_pkt_tlv_size = soc->rx_pkt_tlv_size;
15408 	}
15409 
15410 	num_dp_msi = dp_get_num_msi_available(soc, soc->intr_mode);
15411 	if (num_dp_msi < 0) {
15412 		dp_init_err("%pK: dp_interrupt assignment failed", soc);
15413 		goto fail3;
15414 	}
15415 
15416 	wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, num_dp_msi,
15417 				     soc->intr_mode, is_monitor_mode);
15418 
15419 	/* initialize WBM_IDLE_LINK ring */
15420 	if (dp_hw_link_desc_ring_init(soc)) {
15421 		dp_init_err("%pK: dp_hw_link_desc_ring_init failed", soc);
15422 		goto fail3;
15423 	}
15424 
15425 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
15426 
15427 	if (dp_soc_srng_init(soc)) {
15428 		dp_init_err("%pK: dp_soc_srng_init failed", soc);
15429 		goto fail4;
15430 	}
15431 
15432 	if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
15433 			       htt_get_htc_handle(htt_soc),
15434 			       soc->hal_soc, soc->osdev) == NULL)
15435 		goto fail5;
15436 
15437 	/* Initialize descriptors in TCL Rings */
15438 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
15439 		hal_tx_init_data_ring(soc->hal_soc,
15440 				      soc->tcl_data_ring[i].hal_srng);
15441 	}
15442 
15443 	if (dp_soc_tx_desc_sw_pools_init(soc)) {
15444 		dp_init_err("%pK: dp_tx_soc_attach failed", soc);
15445 		goto fail6;
15446 	}
15447 
15448 	if (soc->arch_ops.txrx_soc_ppeds_start) {
15449 		if (soc->arch_ops.txrx_soc_ppeds_start(soc)) {
15450 			dp_init_err("%pK: ppeds start failed", soc);
15451 			goto fail7;
15452 		}
15453 	}
15454 
15455 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
15456 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
15457 	soc->cce_disable = false;
15458 	soc->max_ast_ageout_count = MAX_AST_AGEOUT_COUNT;
15459 
15460 	soc->sta_mode_search_policy = DP_TX_ADDR_SEARCH_ADDR_POLICY;
15461 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
15462 	qdf_spinlock_create(&soc->vdev_map_lock);
15463 	qdf_atomic_init(&soc->num_tx_outstanding);
15464 	qdf_atomic_init(&soc->num_tx_exception);
15465 	soc->num_tx_allowed =
15466 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
15467 
15468 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
15469 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
15470 				CDP_CFG_MAX_PEER_ID);
15471 
15472 		if (ret != -EINVAL)
15473 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
15474 
15475 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
15476 				CDP_CFG_CCE_DISABLE);
15477 		if (ret == 1)
15478 			soc->cce_disable = true;
15479 	}
15480 
15481 	/*
15482 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
15483 	 * and IPQ5018 WMAC2 is not there in these platforms.
15484 	 */
15485 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018 ||
15486 	    soc->disable_mac2_intr)
15487 		dp_soc_disable_unused_mac_intr_mask(soc, 0x2);
15488 
15489 	/*
15490 	 * Skip registering hw ring interrupts for WMAC1 on IPQ5018
15491 	 * WMAC1 is not there in this platform.
15492 	 */
15493 	if (soc->disable_mac1_intr)
15494 		dp_soc_disable_unused_mac_intr_mask(soc, 0x1);
15495 
15496 	/* setup the global rx defrag waitlist */
15497 	TAILQ_INIT(&soc->rx.defrag.waitlist);
15498 	soc->rx.defrag.timeout_ms =
15499 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
15500 	soc->rx.defrag.next_flush_ms = 0;
15501 	soc->rx.flags.defrag_timeout_check =
15502 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
15503 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
15504 
15505 	dp_monitor_soc_init(soc);
15506 
15507 	qdf_atomic_set(&soc->cmn_init_done, 1);
15508 
15509 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
15510 
15511 	qdf_spinlock_create(&soc->ast_lock);
15512 	dp_peer_mec_spinlock_create(soc);
15513 
15514 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
15515 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
15516 	INIT_RX_HW_STATS_LOCK(soc);
15517 
15518 	qdf_nbuf_queue_init(&soc->invalid_buf_queue);
15519 	/* fill the tx/rx cpu ring map*/
15520 	dp_soc_set_txrx_ring_map(soc);
15521 
15522 	TAILQ_INIT(&soc->inactive_peer_list);
15523 	qdf_spinlock_create(&soc->inactive_peer_list_lock);
15524 	TAILQ_INIT(&soc->inactive_vdev_list);
15525 	qdf_spinlock_create(&soc->inactive_vdev_list_lock);
15526 	qdf_spinlock_create(&soc->htt_stats.lock);
15527 	/* initialize work queue for stats processing */
15528 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
15529 
15530 	dp_reo_desc_deferred_freelist_create(soc);
15531 
15532 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
15533 		qdf_dma_mem_stats_read(),
15534 		qdf_heap_mem_stats_read(),
15535 		qdf_skb_total_mem_stats_read());
15536 
15537 	soc->vdev_stats_id_map = 0;
15538 
15539 	return soc;
15540 fail7:
15541 	dp_soc_tx_desc_sw_pools_deinit(soc);
15542 fail6:
15543 	htt_soc_htc_dealloc(soc->htt_handle);
15544 fail5:
15545 	dp_soc_srng_deinit(soc);
15546 fail4:
15547 	dp_hw_link_desc_ring_deinit(soc);
15548 fail3:
15549 	htt_htc_pkt_pool_free(htt_soc);
15550 fail2:
15551 	htt_soc_detach(htt_soc);
15552 fail1:
15553 	soc->arch_ops.txrx_soc_deinit(soc);
15554 fail0:
15555 	return NULL;
15556 }
15557 
15558 /**
15559  * dp_soc_init_wifi3() - Initialize txrx SOC
15560  * @soc: Opaque DP SOC handle
15561  * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
15562  * @hif_handle: Opaque HIF handle
15563  * @htc_handle: Opaque HTC handle
15564  * @qdf_osdev: QDF device (Unused)
15565  * @ol_ops: Offload Operations (Unused)
15566  * @device_id: Device ID (Unused)
15567  *
15568  * Return: DP SOC handle on success, NULL on failure
15569  */
15570 void *dp_soc_init_wifi3(struct cdp_soc_t *soc,
15571 			struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
15572 			struct hif_opaque_softc *hif_handle,
15573 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
15574 			struct ol_if_ops *ol_ops, uint16_t device_id)
15575 {
15576 	return dp_soc_init((struct dp_soc *)soc, htc_handle, hif_handle);
15577 }
15578 
15579 #endif
15580 
15581 /*
15582  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
15583  *
15584  * @soc: handle to DP soc
15585  * @mac_id: MAC id
15586  *
15587  * Return: Return pdev corresponding to MAC
15588  */
15589 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
15590 {
15591 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
15592 		return (mac_id < MAX_PDEV_CNT) ? soc->pdev_list[mac_id] : NULL;
15593 
15594 	/* Typically for MCL as there only 1 PDEV*/
15595 	return soc->pdev_list[0];
15596 }
15597 
15598 void dp_update_num_mac_rings_for_dbs(struct dp_soc *soc,
15599 				     int *max_mac_rings)
15600 {
15601 	bool dbs_enable = false;
15602 
15603 	if (soc->cdp_soc.ol_ops->is_hw_dbs_capable)
15604 		dbs_enable = soc->cdp_soc.ol_ops->
15605 				is_hw_dbs_capable((void *)soc->ctrl_psoc);
15606 
15607 	*max_mac_rings = dbs_enable ? (*max_mac_rings) : 1;
15608 	dp_info("dbs_enable %d, max_mac_rings %d",
15609 		dbs_enable, *max_mac_rings);
15610 }
15611 
15612 qdf_export_symbol(dp_update_num_mac_rings_for_dbs);
15613 
15614 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
15615 /**
15616  * dp_get_cfr_rcc() - get cfr rcc config
15617  * @soc_hdl: Datapath soc handle
15618  * @pdev_id: id of objmgr pdev
15619  *
15620  * Return: true/false based on cfr mode setting
15621  */
15622 static
15623 bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
15624 {
15625 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15626 	struct dp_pdev *pdev = NULL;
15627 
15628 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15629 	if (!pdev) {
15630 		dp_err("pdev is NULL");
15631 		return false;
15632 	}
15633 
15634 	return pdev->cfr_rcc_mode;
15635 }
15636 
15637 /**
15638  * dp_set_cfr_rcc() - enable/disable cfr rcc config
15639  * @soc_hdl: Datapath soc handle
15640  * @pdev_id: id of objmgr pdev
15641  * @enable: Enable/Disable cfr rcc mode
15642  *
15643  * Return: none
15644  */
15645 static
15646 void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, bool enable)
15647 {
15648 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15649 	struct dp_pdev *pdev = NULL;
15650 
15651 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15652 	if (!pdev) {
15653 		dp_err("pdev is NULL");
15654 		return;
15655 	}
15656 
15657 	pdev->cfr_rcc_mode = enable;
15658 }
15659 
15660 /*
15661  * dp_get_cfr_dbg_stats - Get the debug statistics for CFR
15662  * @soc_hdl: Datapath soc handle
15663  * @pdev_id: id of data path pdev handle
15664  * @cfr_rcc_stats: CFR RCC debug statistics buffer
15665  *
15666  * Return: none
15667  */
15668 static inline void
15669 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
15670 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats)
15671 {
15672 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15673 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15674 
15675 	if (!pdev) {
15676 		dp_err("Invalid pdev");
15677 		return;
15678 	}
15679 
15680 	qdf_mem_copy(cfr_rcc_stats, &pdev->stats.rcc,
15681 		     sizeof(struct cdp_cfr_rcc_stats));
15682 }
15683 
15684 /*
15685  * dp_clear_cfr_dbg_stats - Clear debug statistics for CFR
15686  * @soc_hdl: Datapath soc handle
15687  * @pdev_id: id of data path pdev handle
15688  *
15689  * Return: none
15690  */
15691 static void dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl,
15692 				   uint8_t pdev_id)
15693 {
15694 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15695 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15696 
15697 	if (!pdev) {
15698 		dp_err("dp pdev is NULL");
15699 		return;
15700 	}
15701 
15702 	qdf_mem_zero(&pdev->stats.rcc, sizeof(pdev->stats.rcc));
15703 }
15704 #endif
15705 
15706 /**
15707  * dp_bucket_index() - Return index from array
15708  *
15709  * @delay: delay measured
15710  * @array: array used to index corresponding delay
15711  * @delay_in_us: flag to indicate whether the delay in ms or us
15712  *
15713  * Return: index
15714  */
15715 static uint8_t
15716 dp_bucket_index(uint32_t delay, uint16_t *array, bool delay_in_us)
15717 {
15718 	uint8_t i = CDP_DELAY_BUCKET_0;
15719 	uint32_t thr_low, thr_high;
15720 
15721 	for (; i < CDP_DELAY_BUCKET_MAX - 1; i++) {
15722 		thr_low = array[i];
15723 		thr_high = array[i + 1];
15724 
15725 		if (delay_in_us) {
15726 			thr_low = thr_low * USEC_PER_MSEC;
15727 			thr_high = thr_high * USEC_PER_MSEC;
15728 		}
15729 		if (delay >= thr_low && delay <= thr_high)
15730 			return i;
15731 	}
15732 	return (CDP_DELAY_BUCKET_MAX - 1);
15733 }
15734 
15735 #ifdef HW_TX_DELAY_STATS_ENABLE
15736 /*
15737  * cdp_fw_to_hw_delay_range
15738  * Fw to hw delay ranges in milliseconds
15739  */
15740 static uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
15741 	0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
15742 #else
15743 static uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
15744 	0, 2, 4, 6, 8, 10, 20, 30, 40, 50, 100, 250, 500};
15745 #endif
15746 
15747 /*
15748  * cdp_sw_enq_delay_range
15749  * Software enqueue delay ranges in milliseconds
15750  */
15751 static uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
15752 	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
15753 
15754 /*
15755  * cdp_intfrm_delay_range
15756  * Interframe delay ranges in milliseconds
15757  */
15758 static uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
15759 	0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
15760 
15761 /**
15762  * dp_fill_delay_buckets() - Fill delay statistics bucket for each
15763  *				type of delay
15764  * @tstats: tid tx stats
15765  * @rstats: tid rx stats
15766  * @delay: delay in ms
15767  * @tid: tid value
15768  * @mode: type of tx delay mode
15769  * @ring_id: ring number
15770  * @delay_in_us: flag to indicate whether the delay in ms or us
15771  *
15772  * Return: pointer to cdp_delay_stats structure
15773  */
15774 static struct cdp_delay_stats *
15775 dp_fill_delay_buckets(struct cdp_tid_tx_stats *tstats,
15776 		      struct cdp_tid_rx_stats *rstats, uint32_t delay,
15777 		      uint8_t tid, uint8_t mode, uint8_t ring_id,
15778 		      bool delay_in_us)
15779 {
15780 	uint8_t delay_index = 0;
15781 	struct cdp_delay_stats *stats = NULL;
15782 
15783 	/*
15784 	 * Update delay stats in proper bucket
15785 	 */
15786 	switch (mode) {
15787 	/* Software Enqueue delay ranges */
15788 	case CDP_DELAY_STATS_SW_ENQ:
15789 		if (!tstats)
15790 			break;
15791 
15792 		delay_index = dp_bucket_index(delay, cdp_sw_enq_delay,
15793 					      delay_in_us);
15794 		tstats->swq_delay.delay_bucket[delay_index]++;
15795 		stats = &tstats->swq_delay;
15796 		break;
15797 
15798 	/* Tx Completion delay ranges */
15799 	case CDP_DELAY_STATS_FW_HW_TRANSMIT:
15800 		if (!tstats)
15801 			break;
15802 
15803 		delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay,
15804 					      delay_in_us);
15805 		tstats->hwtx_delay.delay_bucket[delay_index]++;
15806 		stats = &tstats->hwtx_delay;
15807 		break;
15808 
15809 	/* Interframe tx delay ranges */
15810 	case CDP_DELAY_STATS_TX_INTERFRAME:
15811 		if (!tstats)
15812 			break;
15813 
15814 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
15815 					      delay_in_us);
15816 		tstats->intfrm_delay.delay_bucket[delay_index]++;
15817 		stats = &tstats->intfrm_delay;
15818 		break;
15819 
15820 	/* Interframe rx delay ranges */
15821 	case CDP_DELAY_STATS_RX_INTERFRAME:
15822 		if (!rstats)
15823 			break;
15824 
15825 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
15826 					      delay_in_us);
15827 		rstats->intfrm_delay.delay_bucket[delay_index]++;
15828 		stats = &rstats->intfrm_delay;
15829 		break;
15830 
15831 	/* Ring reap to indication to network stack */
15832 	case CDP_DELAY_STATS_REAP_STACK:
15833 		if (!rstats)
15834 			break;
15835 
15836 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
15837 					      delay_in_us);
15838 		rstats->to_stack_delay.delay_bucket[delay_index]++;
15839 		stats = &rstats->to_stack_delay;
15840 		break;
15841 	default:
15842 		dp_debug("Incorrect delay mode: %d", mode);
15843 	}
15844 
15845 	return stats;
15846 }
15847 
15848 void dp_update_delay_stats(struct cdp_tid_tx_stats *tstats,
15849 			   struct cdp_tid_rx_stats *rstats, uint32_t delay,
15850 			   uint8_t tid, uint8_t mode, uint8_t ring_id,
15851 			   bool delay_in_us)
15852 {
15853 	struct cdp_delay_stats *dstats = NULL;
15854 
15855 	/*
15856 	 * Delay ranges are different for different delay modes
15857 	 * Get the correct index to update delay bucket
15858 	 */
15859 	dstats = dp_fill_delay_buckets(tstats, rstats, delay, tid, mode,
15860 				       ring_id, delay_in_us);
15861 	if (qdf_unlikely(!dstats))
15862 		return;
15863 
15864 	if (delay != 0) {
15865 		/*
15866 		 * Compute minimum,average and maximum
15867 		 * delay
15868 		 */
15869 		if (delay < dstats->min_delay)
15870 			dstats->min_delay = delay;
15871 
15872 		if (delay > dstats->max_delay)
15873 			dstats->max_delay = delay;
15874 
15875 		/*
15876 		 * Average over delay measured till now
15877 		 */
15878 		if (!dstats->avg_delay)
15879 			dstats->avg_delay = delay;
15880 		else
15881 			dstats->avg_delay = ((delay + dstats->avg_delay) >> 1);
15882 	}
15883 }
15884 
15885 /**
15886  * dp_get_peer_mac_list(): function to get peer mac list of vdev
15887  * @soc: Datapath soc handle
15888  * @vdev_id: vdev id
15889  * @newmac: Table of the clients mac
15890  * @mac_cnt: No. of MACs required
15891  * @limit: Limit the number of clients
15892  *
15893  * return: no of clients
15894  */
15895 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id,
15896 			      u_int8_t newmac[][QDF_MAC_ADDR_SIZE],
15897 			      u_int16_t mac_cnt, bool limit)
15898 {
15899 	struct dp_soc *dp_soc = (struct dp_soc *)soc;
15900 	struct dp_vdev *vdev =
15901 		dp_vdev_get_ref_by_id(dp_soc, vdev_id, DP_MOD_ID_CDP);
15902 	struct dp_peer *peer;
15903 	uint16_t new_mac_cnt = 0;
15904 
15905 	if (!vdev)
15906 		return new_mac_cnt;
15907 
15908 	if (limit && (vdev->num_peers > mac_cnt))
15909 		return 0;
15910 
15911 	qdf_spin_lock_bh(&vdev->peer_list_lock);
15912 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
15913 		if (peer->bss_peer)
15914 			continue;
15915 		if (new_mac_cnt < mac_cnt) {
15916 			WLAN_ADDR_COPY(newmac[new_mac_cnt], peer->mac_addr.raw);
15917 			new_mac_cnt++;
15918 		}
15919 	}
15920 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
15921 	dp_vdev_unref_delete(dp_soc, vdev, DP_MOD_ID_CDP);
15922 	return new_mac_cnt;
15923 }
15924 
15925 uint16_t dp_get_peer_id(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *mac)
15926 {
15927 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
15928 						       mac, 0, vdev_id,
15929 						       DP_MOD_ID_CDP);
15930 	uint16_t peer_id = HTT_INVALID_PEER;
15931 
15932 	if (!peer) {
15933 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
15934 		return peer_id;
15935 	}
15936 
15937 	peer_id = peer->peer_id;
15938 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
15939 	return peer_id;
15940 }
15941 
15942 #ifdef QCA_SUPPORT_WDS_EXTENDED
15943 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc,
15944 				  uint8_t vdev_id,
15945 				  uint8_t *mac,
15946 				  ol_txrx_rx_fp rx,
15947 				  ol_osif_peer_handle osif_peer)
15948 {
15949 	struct dp_txrx_peer *txrx_peer = NULL;
15950 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
15951 						       mac, 0, vdev_id,
15952 						       DP_MOD_ID_CDP);
15953 	QDF_STATUS status = QDF_STATUS_E_INVAL;
15954 
15955 	if (!peer) {
15956 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
15957 		return status;
15958 	}
15959 
15960 	txrx_peer = dp_get_txrx_peer(peer);
15961 	if (!txrx_peer) {
15962 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
15963 		return status;
15964 	}
15965 
15966 	if (rx) {
15967 		if (txrx_peer->osif_rx) {
15968 			status = QDF_STATUS_E_ALREADY;
15969 		} else {
15970 			txrx_peer->osif_rx = rx;
15971 			status = QDF_STATUS_SUCCESS;
15972 		}
15973 	} else {
15974 		if (txrx_peer->osif_rx) {
15975 			txrx_peer->osif_rx = NULL;
15976 			status = QDF_STATUS_SUCCESS;
15977 		} else {
15978 			status = QDF_STATUS_E_ALREADY;
15979 		}
15980 	}
15981 
15982 	txrx_peer->wds_ext.osif_peer = osif_peer;
15983 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
15984 
15985 	return status;
15986 }
15987 #endif /* QCA_SUPPORT_WDS_EXTENDED */
15988 
15989 /**
15990  * dp_pdev_srng_deinit() - de-initialize all pdev srng ring including
15991  *			   monitor rings
15992  * @pdev: Datapath pdev handle
15993  *
15994  */
15995 static void dp_pdev_srng_deinit(struct dp_pdev *pdev)
15996 {
15997 	struct dp_soc *soc = pdev->soc;
15998 	uint8_t i;
15999 
16000 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled)
16001 		dp_srng_deinit(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
16002 			       RXDMA_BUF,
16003 			       pdev->lmac_id);
16004 
16005 	if (!soc->rxdma2sw_rings_not_supported) {
16006 		for (i = 0;
16007 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
16008 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
16009 								 pdev->pdev_id);
16010 
16011 			wlan_minidump_remove(soc->rxdma_err_dst_ring[lmac_id].
16012 							base_vaddr_unaligned,
16013 					     soc->rxdma_err_dst_ring[lmac_id].
16014 								alloc_size,
16015 					     soc->ctrl_psoc,
16016 					     WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
16017 					     "rxdma_err_dst");
16018 			dp_srng_deinit(soc, &soc->rxdma_err_dst_ring[lmac_id],
16019 				       RXDMA_DST, lmac_id);
16020 		}
16021 	}
16022 
16023 
16024 }
16025 
16026 /**
16027  * dp_pdev_srng_init() - initialize all pdev srng rings including
16028  *			   monitor rings
16029  * @pdev: Datapath pdev handle
16030  *
16031  * return: QDF_STATUS_SUCCESS on success
16032  *	   QDF_STATUS_E_NOMEM on failure
16033  */
16034 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev)
16035 {
16036 	struct dp_soc *soc = pdev->soc;
16037 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16038 	uint32_t i;
16039 
16040 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16041 
16042 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
16043 		if (dp_srng_init(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
16044 				 RXDMA_BUF, 0, pdev->lmac_id)) {
16045 			dp_init_err("%pK: dp_srng_init failed rx refill ring",
16046 				    soc);
16047 			goto fail1;
16048 		}
16049 	}
16050 
16051 	/* LMAC RxDMA to SW Rings configuration */
16052 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
16053 		/* Only valid for MCL */
16054 		pdev = soc->pdev_list[0];
16055 
16056 	if (!soc->rxdma2sw_rings_not_supported) {
16057 		for (i = 0;
16058 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
16059 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
16060 								 pdev->pdev_id);
16061 			struct dp_srng *srng =
16062 				&soc->rxdma_err_dst_ring[lmac_id];
16063 
16064 			if (srng->hal_srng)
16065 				continue;
16066 
16067 			if (dp_srng_init(soc, srng, RXDMA_DST, 0, lmac_id)) {
16068 				dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring",
16069 					    soc);
16070 				goto fail1;
16071 			}
16072 			wlan_minidump_log(soc->rxdma_err_dst_ring[lmac_id].
16073 						base_vaddr_unaligned,
16074 					  soc->rxdma_err_dst_ring[lmac_id].
16075 						alloc_size,
16076 					  soc->ctrl_psoc,
16077 					  WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
16078 					  "rxdma_err_dst");
16079 		}
16080 	}
16081 	return QDF_STATUS_SUCCESS;
16082 
16083 fail1:
16084 	dp_pdev_srng_deinit(pdev);
16085 	return QDF_STATUS_E_NOMEM;
16086 }
16087 
16088 /**
16089  * dp_pdev_srng_free() - free all pdev srng rings including monitor rings
16090  * pdev: Datapath pdev handle
16091  *
16092  */
16093 static void dp_pdev_srng_free(struct dp_pdev *pdev)
16094 {
16095 	struct dp_soc *soc = pdev->soc;
16096 	uint8_t i;
16097 
16098 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled)
16099 		dp_srng_free(soc, &soc->rx_refill_buf_ring[pdev->lmac_id]);
16100 
16101 	if (!soc->rxdma2sw_rings_not_supported) {
16102 		for (i = 0;
16103 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
16104 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
16105 								 pdev->pdev_id);
16106 
16107 			dp_srng_free(soc, &soc->rxdma_err_dst_ring[lmac_id]);
16108 		}
16109 	}
16110 }
16111 
16112 /**
16113  * dp_pdev_srng_alloc() - allocate memory for all pdev srng rings including
16114  *			  monitor rings
16115  * pdev: Datapath pdev handle
16116  *
16117  * return: QDF_STATUS_SUCCESS on success
16118  *	   QDF_STATUS_E_NOMEM on failure
16119  */
16120 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev)
16121 {
16122 	struct dp_soc *soc = pdev->soc;
16123 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16124 	uint32_t ring_size;
16125 	uint32_t i;
16126 
16127 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16128 
16129 	ring_size = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
16130 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
16131 		if (dp_srng_alloc(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
16132 				  RXDMA_BUF, ring_size, 0)) {
16133 			dp_init_err("%pK: dp_srng_alloc failed rx refill ring",
16134 				    soc);
16135 			goto fail1;
16136 		}
16137 	}
16138 
16139 	ring_size = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
16140 	/* LMAC RxDMA to SW Rings configuration */
16141 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
16142 		/* Only valid for MCL */
16143 		pdev = soc->pdev_list[0];
16144 
16145 	if (!soc->rxdma2sw_rings_not_supported) {
16146 		for (i = 0;
16147 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
16148 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
16149 								 pdev->pdev_id);
16150 			struct dp_srng *srng =
16151 				&soc->rxdma_err_dst_ring[lmac_id];
16152 
16153 			if (srng->base_vaddr_unaligned)
16154 				continue;
16155 
16156 			if (dp_srng_alloc(soc, srng, RXDMA_DST, ring_size, 0)) {
16157 				dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring",
16158 					    soc);
16159 				goto fail1;
16160 			}
16161 		}
16162 	}
16163 
16164 	return QDF_STATUS_SUCCESS;
16165 fail1:
16166 	dp_pdev_srng_free(pdev);
16167 	return QDF_STATUS_E_NOMEM;
16168 }
16169 
16170 #ifndef WLAN_DP_DISABLE_TCL_CMD_CRED_SRNG
16171 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
16172 {
16173 	QDF_STATUS status;
16174 
16175 	if (soc->init_tcl_cmd_cred_ring) {
16176 		status =  dp_srng_init(soc, &soc->tcl_cmd_credit_ring,
16177 				       TCL_CMD_CREDIT, 0, 0);
16178 		if (QDF_IS_STATUS_ERROR(status))
16179 			return status;
16180 
16181 		wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
16182 				  soc->tcl_cmd_credit_ring.alloc_size,
16183 				  soc->ctrl_psoc,
16184 				  WLAN_MD_DP_SRNG_TCL_CMD,
16185 				  "wbm_desc_rel_ring");
16186 	}
16187 
16188 	return QDF_STATUS_SUCCESS;
16189 }
16190 
16191 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
16192 {
16193 	if (soc->init_tcl_cmd_cred_ring) {
16194 		wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
16195 				     soc->tcl_cmd_credit_ring.alloc_size,
16196 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_CMD,
16197 				     "wbm_desc_rel_ring");
16198 		dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring,
16199 			       TCL_CMD_CREDIT, 0);
16200 	}
16201 }
16202 
16203 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
16204 {
16205 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
16206 	uint32_t entries;
16207 	QDF_STATUS status;
16208 
16209 	entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx);
16210 	if (soc->init_tcl_cmd_cred_ring) {
16211 		status = dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring,
16212 				       TCL_CMD_CREDIT, entries, 0);
16213 		if (QDF_IS_STATUS_ERROR(status))
16214 			return status;
16215 	}
16216 
16217 	return QDF_STATUS_SUCCESS;
16218 }
16219 
16220 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
16221 {
16222 	if (soc->init_tcl_cmd_cred_ring)
16223 		dp_srng_free(soc, &soc->tcl_cmd_credit_ring);
16224 }
16225 
16226 static inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
16227 {
16228 	if (soc->init_tcl_cmd_cred_ring)
16229 		hal_tx_init_cmd_credit_ring(soc->hal_soc,
16230 					    soc->tcl_cmd_credit_ring.hal_srng);
16231 }
16232 #else
16233 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
16234 {
16235 	return QDF_STATUS_SUCCESS;
16236 }
16237 
16238 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
16239 {
16240 }
16241 
16242 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
16243 {
16244 	return QDF_STATUS_SUCCESS;
16245 }
16246 
16247 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
16248 {
16249 }
16250 
16251 static inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
16252 {
16253 }
16254 #endif
16255 
16256 #ifndef WLAN_DP_DISABLE_TCL_STATUS_SRNG
16257 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
16258 {
16259 	QDF_STATUS status;
16260 
16261 	status =  dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0);
16262 	if (QDF_IS_STATUS_ERROR(status))
16263 		return status;
16264 
16265 	wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned,
16266 			  soc->tcl_status_ring.alloc_size,
16267 			  soc->ctrl_psoc,
16268 			  WLAN_MD_DP_SRNG_TCL_STATUS,
16269 			  "wbm_desc_rel_ring");
16270 
16271 	return QDF_STATUS_SUCCESS;
16272 }
16273 
16274 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
16275 {
16276 	wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned,
16277 			     soc->tcl_status_ring.alloc_size,
16278 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_STATUS,
16279 			     "wbm_desc_rel_ring");
16280 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
16281 }
16282 
16283 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
16284 {
16285 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
16286 	uint32_t entries;
16287 	QDF_STATUS status = QDF_STATUS_SUCCESS;
16288 
16289 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
16290 	status = dp_srng_alloc(soc, &soc->tcl_status_ring,
16291 			       TCL_STATUS, entries, 0);
16292 
16293 	return status;
16294 }
16295 
16296 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
16297 {
16298 	dp_srng_free(soc, &soc->tcl_status_ring);
16299 }
16300 #else
16301 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
16302 {
16303 	return QDF_STATUS_SUCCESS;
16304 }
16305 
16306 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
16307 {
16308 }
16309 
16310 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
16311 {
16312 	return QDF_STATUS_SUCCESS;
16313 }
16314 
16315 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
16316 {
16317 }
16318 #endif
16319 
16320 /**
16321  * dp_soc_srng_deinit() - de-initialize soc srng rings
16322  * @soc: Datapath soc handle
16323  *
16324  */
16325 static void dp_soc_srng_deinit(struct dp_soc *soc)
16326 {
16327 	uint32_t i;
16328 
16329 	if (soc->arch_ops.txrx_soc_srng_deinit)
16330 		soc->arch_ops.txrx_soc_srng_deinit(soc);
16331 
16332 	/* Free the ring memories */
16333 	/* Common rings */
16334 	wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
16335 			     soc->wbm_desc_rel_ring.alloc_size,
16336 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_WBM_DESC_REL,
16337 			     "wbm_desc_rel_ring");
16338 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
16339 
16340 	/* Tx data rings */
16341 	for (i = 0; i < soc->num_tcl_data_rings; i++)
16342 		dp_deinit_tx_pair_by_index(soc, i);
16343 
16344 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16345 		dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
16346 		dp_ipa_deinit_alt_tx_ring(soc);
16347 	}
16348 
16349 	/* TCL command and status rings */
16350 	dp_soc_tcl_cmd_cred_srng_deinit(soc);
16351 	dp_soc_tcl_status_srng_deinit(soc);
16352 
16353 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
16354 		/* TODO: Get number of rings and ring sizes
16355 		 * from wlan_cfg
16356 		 */
16357 		wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned,
16358 				     soc->reo_dest_ring[i].alloc_size,
16359 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_DEST,
16360 				     "reo_dest_ring");
16361 		dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i);
16362 	}
16363 
16364 	/* REO reinjection ring */
16365 	wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned,
16366 			     soc->reo_reinject_ring.alloc_size,
16367 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_REINJECT,
16368 			     "reo_reinject_ring");
16369 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
16370 
16371 	/* Rx release ring */
16372 	wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned,
16373 			     soc->rx_rel_ring.alloc_size,
16374 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_RX_REL,
16375 			     "reo_release_ring");
16376 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
16377 
16378 	/* Rx exception ring */
16379 	/* TODO: Better to store ring_type and ring_num in
16380 	 * dp_srng during setup
16381 	 */
16382 	wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned,
16383 			     soc->reo_exception_ring.alloc_size,
16384 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_EXCEPTION,
16385 			     "reo_exception_ring");
16386 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
16387 
16388 	/* REO command and status rings */
16389 	wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned,
16390 			     soc->reo_cmd_ring.alloc_size,
16391 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_CMD,
16392 			     "reo_cmd_ring");
16393 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
16394 	wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned,
16395 			     soc->reo_status_ring.alloc_size,
16396 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_STATUS,
16397 			     "reo_status_ring");
16398 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
16399 }
16400 
16401 /**
16402  * dp_soc_srng_init() - Initialize soc level srng rings
16403  * @soc: Datapath soc handle
16404  *
16405  * return: QDF_STATUS_SUCCESS on success
16406  *	   QDF_STATUS_E_FAILURE on failure
16407  */
16408 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc)
16409 {
16410 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16411 	uint8_t i;
16412 	uint8_t wbm2_sw_rx_rel_ring_id;
16413 
16414 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16415 
16416 	dp_enable_verbose_debug(soc);
16417 
16418 	/* WBM descriptor release ring */
16419 	if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) {
16420 		dp_init_err("%pK: dp_srng_init failed for wbm_desc_rel_ring", soc);
16421 		goto fail1;
16422 	}
16423 
16424 	wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
16425 			  soc->wbm_desc_rel_ring.alloc_size,
16426 			  soc->ctrl_psoc,
16427 			  WLAN_MD_DP_SRNG_WBM_DESC_REL,
16428 			  "wbm_desc_rel_ring");
16429 
16430 	/* TCL command and status rings */
16431 	if (dp_soc_tcl_cmd_cred_srng_init(soc)) {
16432 		dp_init_err("%pK: dp_srng_init failed for tcl_cmd_ring", soc);
16433 		goto fail1;
16434 	}
16435 
16436 	if (dp_soc_tcl_status_srng_init(soc)) {
16437 		dp_init_err("%pK: dp_srng_init failed for tcl_status_ring", soc);
16438 		goto fail1;
16439 	}
16440 
16441 	/* REO reinjection ring */
16442 	if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) {
16443 		dp_init_err("%pK: dp_srng_init failed for reo_reinject_ring", soc);
16444 		goto fail1;
16445 	}
16446 
16447 	wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned,
16448 			  soc->reo_reinject_ring.alloc_size,
16449 			  soc->ctrl_psoc,
16450 			  WLAN_MD_DP_SRNG_REO_REINJECT,
16451 			  "reo_reinject_ring");
16452 
16453 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc_cfg_ctx);
16454 	/* Rx release ring */
16455 	if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
16456 			 wbm2_sw_rx_rel_ring_id, 0)) {
16457 		dp_init_err("%pK: dp_srng_init failed for rx_rel_ring", soc);
16458 		goto fail1;
16459 	}
16460 
16461 	wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned,
16462 			  soc->rx_rel_ring.alloc_size,
16463 			  soc->ctrl_psoc,
16464 			  WLAN_MD_DP_SRNG_RX_REL,
16465 			  "reo_release_ring");
16466 
16467 	/* Rx exception ring */
16468 	if (dp_srng_init(soc, &soc->reo_exception_ring,
16469 			 REO_EXCEPTION, 0, MAX_REO_DEST_RINGS)) {
16470 		dp_init_err("%pK: dp_srng_init failed - reo_exception", soc);
16471 		goto fail1;
16472 	}
16473 
16474 	wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned,
16475 			  soc->reo_exception_ring.alloc_size,
16476 			  soc->ctrl_psoc,
16477 			  WLAN_MD_DP_SRNG_REO_EXCEPTION,
16478 			  "reo_exception_ring");
16479 
16480 	/* REO command and status rings */
16481 	if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) {
16482 		dp_init_err("%pK: dp_srng_init failed for reo_cmd_ring", soc);
16483 		goto fail1;
16484 	}
16485 
16486 	wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned,
16487 			  soc->reo_cmd_ring.alloc_size,
16488 			  soc->ctrl_psoc,
16489 			  WLAN_MD_DP_SRNG_REO_CMD,
16490 			  "reo_cmd_ring");
16491 
16492 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
16493 	TAILQ_INIT(&soc->rx.reo_cmd_list);
16494 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
16495 
16496 	if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) {
16497 		dp_init_err("%pK: dp_srng_init failed for reo_status_ring", soc);
16498 		goto fail1;
16499 	}
16500 
16501 	wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned,
16502 			  soc->reo_status_ring.alloc_size,
16503 			  soc->ctrl_psoc,
16504 			  WLAN_MD_DP_SRNG_REO_STATUS,
16505 			  "reo_status_ring");
16506 
16507 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
16508 		if (dp_init_tx_ring_pair_by_index(soc, i))
16509 			goto fail1;
16510 	}
16511 
16512 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16513 		if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
16514 			goto fail1;
16515 
16516 		if (dp_ipa_init_alt_tx_ring(soc))
16517 			goto fail1;
16518 	}
16519 
16520 	dp_create_ext_stats_event(soc);
16521 
16522 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
16523 		/* Initialize REO destination ring */
16524 		if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) {
16525 			dp_init_err("%pK: dp_srng_init failed for reo_dest_ringn", soc);
16526 			goto fail1;
16527 		}
16528 
16529 		wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned,
16530 				  soc->reo_dest_ring[i].alloc_size,
16531 				  soc->ctrl_psoc,
16532 				  WLAN_MD_DP_SRNG_REO_DEST,
16533 				  "reo_dest_ring");
16534 	}
16535 
16536 	if (soc->arch_ops.txrx_soc_srng_init) {
16537 		if (soc->arch_ops.txrx_soc_srng_init(soc)) {
16538 			dp_init_err("%pK: dp_srng_init failed for arch rings",
16539 				    soc);
16540 			goto fail1;
16541 		}
16542 	}
16543 
16544 	return QDF_STATUS_SUCCESS;
16545 fail1:
16546 	/*
16547 	 * Cleanup will be done as part of soc_detach, which will
16548 	 * be called on pdev attach failure
16549 	 */
16550 	dp_soc_srng_deinit(soc);
16551 	return QDF_STATUS_E_FAILURE;
16552 }
16553 
16554 /**
16555  * dp_soc_srng_free() - free soc level srng rings
16556  * @soc: Datapath soc handle
16557  *
16558  */
16559 static void dp_soc_srng_free(struct dp_soc *soc)
16560 {
16561 	uint32_t i;
16562 
16563 	if (soc->arch_ops.txrx_soc_srng_free)
16564 		soc->arch_ops.txrx_soc_srng_free(soc);
16565 
16566 	dp_srng_free(soc, &soc->wbm_desc_rel_ring);
16567 
16568 	for (i = 0; i < soc->num_tcl_data_rings; i++)
16569 		dp_free_tx_ring_pair_by_index(soc, i);
16570 
16571 	/* Free IPA rings for TCL_TX and TCL_COMPL ring */
16572 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16573 		dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
16574 		dp_ipa_free_alt_tx_ring(soc);
16575 	}
16576 
16577 	dp_soc_tcl_cmd_cred_srng_free(soc);
16578 	dp_soc_tcl_status_srng_free(soc);
16579 
16580 	for (i = 0; i < soc->num_reo_dest_rings; i++)
16581 		dp_srng_free(soc, &soc->reo_dest_ring[i]);
16582 
16583 	dp_srng_free(soc, &soc->reo_reinject_ring);
16584 	dp_srng_free(soc, &soc->rx_rel_ring);
16585 
16586 	dp_srng_free(soc, &soc->reo_exception_ring);
16587 
16588 	dp_srng_free(soc, &soc->reo_cmd_ring);
16589 	dp_srng_free(soc, &soc->reo_status_ring);
16590 }
16591 
16592 /**
16593  * dp_soc_srng_alloc() - Allocate memory for soc level srng rings
16594  * @soc: Datapath soc handle
16595  *
16596  * return: QDF_STATUS_SUCCESS on success
16597  *	   QDF_STATUS_E_NOMEM on failure
16598  */
16599 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc)
16600 {
16601 	uint32_t entries;
16602 	uint32_t i;
16603 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16604 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
16605 	uint32_t tx_comp_ring_size, tx_ring_size, reo_dst_ring_size;
16606 
16607 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16608 
16609 	/* sw2wbm link descriptor release ring */
16610 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
16611 	if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE,
16612 			  entries, 0)) {
16613 		dp_init_err("%pK: dp_srng_alloc failed for wbm_desc_rel_ring", soc);
16614 		goto fail1;
16615 	}
16616 
16617 	/* TCL command and status rings */
16618 	if (dp_soc_tcl_cmd_cred_srng_alloc(soc)) {
16619 		dp_init_err("%pK: dp_srng_alloc failed for tcl_cmd_ring", soc);
16620 		goto fail1;
16621 	}
16622 
16623 	if (dp_soc_tcl_status_srng_alloc(soc)) {
16624 		dp_init_err("%pK: dp_srng_alloc failed for tcl_status_ring", soc);
16625 		goto fail1;
16626 	}
16627 
16628 	/* REO reinjection ring */
16629 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
16630 	if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT,
16631 			  entries, 0)) {
16632 		dp_init_err("%pK: dp_srng_alloc failed for reo_reinject_ring", soc);
16633 		goto fail1;
16634 	}
16635 
16636 	/* Rx release ring */
16637 	entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx);
16638 	if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
16639 			  entries, 0)) {
16640 		dp_init_err("%pK: dp_srng_alloc failed for rx_rel_ring", soc);
16641 		goto fail1;
16642 	}
16643 
16644 	/* Rx exception ring */
16645 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
16646 	if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION,
16647 			  entries, 0)) {
16648 		dp_init_err("%pK: dp_srng_alloc failed - reo_exception", soc);
16649 		goto fail1;
16650 	}
16651 
16652 	/* REO command and status rings */
16653 	entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx);
16654 	if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) {
16655 		dp_init_err("%pK: dp_srng_alloc failed for reo_cmd_ring", soc);
16656 		goto fail1;
16657 	}
16658 
16659 	entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx);
16660 	if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS,
16661 			  entries, 0)) {
16662 		dp_init_err("%pK: dp_srng_alloc failed for reo_status_ring", soc);
16663 		goto fail1;
16664 	}
16665 
16666 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
16667 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
16668 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx);
16669 
16670 	/* Disable cached desc if NSS offload is enabled */
16671 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
16672 		cached = 0;
16673 
16674 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
16675 		if (dp_alloc_tx_ring_pair_by_index(soc, i))
16676 			goto fail1;
16677 	}
16678 
16679 	/* IPA rings for TCL_TX and TX_COMP will be allocated here */
16680 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16681 		if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
16682 			goto fail1;
16683 
16684 		if (dp_ipa_alloc_alt_tx_ring(soc))
16685 			goto fail1;
16686 	}
16687 
16688 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
16689 		/* Setup REO destination ring */
16690 		if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST,
16691 				  reo_dst_ring_size, cached)) {
16692 			dp_init_err("%pK: dp_srng_alloc failed for reo_dest_ring", soc);
16693 			goto fail1;
16694 		}
16695 	}
16696 
16697 	if (soc->arch_ops.txrx_soc_srng_alloc) {
16698 		if (soc->arch_ops.txrx_soc_srng_alloc(soc)) {
16699 			dp_init_err("%pK: dp_srng_alloc failed for arch rings",
16700 				    soc);
16701 			goto fail1;
16702 		}
16703 	}
16704 
16705 	return QDF_STATUS_SUCCESS;
16706 
16707 fail1:
16708 	dp_soc_srng_free(soc);
16709 	return QDF_STATUS_E_NOMEM;
16710 }
16711 
16712 static void dp_soc_cfg_dump(struct dp_soc *soc, uint32_t target_type)
16713 {
16714 	dp_init_info("DP soc Dump for Target = %d", target_type);
16715 	dp_init_info("ast_override_support = %d, da_war_enabled = %d,",
16716 		     soc->ast_override_support, soc->da_war_enabled);
16717 
16718 	wlan_cfg_dp_soc_ctx_dump(soc->wlan_cfg_ctx);
16719 }
16720 
16721 /**
16722  * dp_soc_cfg_init() - initialize target specific configuration
16723  *		       during dp_soc_init
16724  * @soc: dp soc handle
16725  */
16726 static void dp_soc_cfg_init(struct dp_soc *soc)
16727 {
16728 	uint32_t target_type;
16729 
16730 	target_type = hal_get_target_type(soc->hal_soc);
16731 	switch (target_type) {
16732 	case TARGET_TYPE_QCA6290:
16733 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
16734 					       REO_DST_RING_SIZE_QCA6290);
16735 		soc->ast_override_support = 1;
16736 		soc->da_war_enabled = false;
16737 		break;
16738 	case TARGET_TYPE_QCA6390:
16739 	case TARGET_TYPE_QCA6490:
16740 	case TARGET_TYPE_QCA6750:
16741 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
16742 					       REO_DST_RING_SIZE_QCA6290);
16743 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
16744 		soc->ast_override_support = 1;
16745 		if (soc->cdp_soc.ol_ops->get_con_mode &&
16746 		    soc->cdp_soc.ol_ops->get_con_mode() ==
16747 		    QDF_GLOBAL_MONITOR_MODE) {
16748 			int int_ctx;
16749 
16750 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
16751 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
16752 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
16753 			}
16754 		}
16755 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
16756 		break;
16757 	case TARGET_TYPE_KIWI:
16758 	case TARGET_TYPE_MANGO:
16759 		soc->ast_override_support = 1;
16760 		soc->per_tid_basize_max_tid = 8;
16761 
16762 		if (soc->cdp_soc.ol_ops->get_con_mode &&
16763 		    soc->cdp_soc.ol_ops->get_con_mode() ==
16764 		    QDF_GLOBAL_MONITOR_MODE) {
16765 			int int_ctx;
16766 
16767 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS;
16768 			     int_ctx++) {
16769 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
16770 				if (dp_is_monitor_mode_using_poll(soc))
16771 					soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
16772 			}
16773 		}
16774 
16775 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
16776 		soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev = 1;
16777 		break;
16778 	case TARGET_TYPE_QCA8074:
16779 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
16780 		soc->da_war_enabled = true;
16781 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
16782 		break;
16783 	case TARGET_TYPE_QCA8074V2:
16784 	case TARGET_TYPE_QCA6018:
16785 	case TARGET_TYPE_QCA9574:
16786 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
16787 		soc->ast_override_support = 1;
16788 		soc->per_tid_basize_max_tid = 8;
16789 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
16790 		soc->da_war_enabled = false;
16791 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
16792 		break;
16793 	case TARGET_TYPE_QCN9000:
16794 		soc->ast_override_support = 1;
16795 		soc->da_war_enabled = false;
16796 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
16797 		soc->per_tid_basize_max_tid = 8;
16798 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
16799 		soc->lmac_polled_mode = 0;
16800 		soc->wbm_release_desc_rx_sg_support = 1;
16801 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
16802 		break;
16803 	case TARGET_TYPE_QCA5018:
16804 	case TARGET_TYPE_QCN6122:
16805 	case TARGET_TYPE_QCN9160:
16806 		soc->ast_override_support = 1;
16807 		soc->da_war_enabled = false;
16808 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
16809 		soc->per_tid_basize_max_tid = 8;
16810 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS_11AX;
16811 		soc->disable_mac1_intr = 1;
16812 		soc->disable_mac2_intr = 1;
16813 		soc->wbm_release_desc_rx_sg_support = 1;
16814 		break;
16815 	case TARGET_TYPE_QCN9224:
16816 		soc->ast_override_support = 1;
16817 		soc->da_war_enabled = false;
16818 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
16819 		soc->per_tid_basize_max_tid = 8;
16820 		soc->wbm_release_desc_rx_sg_support = 1;
16821 		soc->rxdma2sw_rings_not_supported = 1;
16822 		soc->wbm_sg_last_msdu_war = 1;
16823 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
16824 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
16825 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
16826 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
16827 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
16828 						  CFG_DP_HOST_AST_DB_ENABLE);
16829 		break;
16830 	case TARGET_TYPE_QCA5332:
16831 		soc->ast_override_support = 1;
16832 		soc->da_war_enabled = false;
16833 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
16834 		soc->per_tid_basize_max_tid = 8;
16835 		soc->wbm_release_desc_rx_sg_support = 1;
16836 		soc->rxdma2sw_rings_not_supported = 1;
16837 		soc->wbm_sg_last_msdu_war = 1;
16838 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
16839 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
16840 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS_5332;
16841 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
16842 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
16843 						  CFG_DP_HOST_AST_DB_ENABLE);
16844 		break;
16845 	default:
16846 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
16847 		qdf_assert_always(0);
16848 		break;
16849 	}
16850 	dp_soc_cfg_dump(soc, target_type);
16851 }
16852 
16853 /**
16854  * dp_soc_cfg_attach() - set target specific configuration in
16855  *			 dp soc cfg.
16856  * @soc: dp soc handle
16857  */
16858 static void dp_soc_cfg_attach(struct dp_soc *soc)
16859 {
16860 	int target_type;
16861 	int nss_cfg = 0;
16862 
16863 	target_type = hal_get_target_type(soc->hal_soc);
16864 	switch (target_type) {
16865 	case TARGET_TYPE_QCA6290:
16866 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
16867 					       REO_DST_RING_SIZE_QCA6290);
16868 		break;
16869 	case TARGET_TYPE_QCA6390:
16870 	case TARGET_TYPE_QCA6490:
16871 	case TARGET_TYPE_QCA6750:
16872 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
16873 					       REO_DST_RING_SIZE_QCA6290);
16874 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
16875 		break;
16876 	case TARGET_TYPE_KIWI:
16877 	case TARGET_TYPE_MANGO:
16878 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
16879 		break;
16880 	case TARGET_TYPE_QCA8074:
16881 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
16882 		break;
16883 	case TARGET_TYPE_QCA8074V2:
16884 	case TARGET_TYPE_QCA6018:
16885 	case TARGET_TYPE_QCA9574:
16886 	case TARGET_TYPE_QCN6122:
16887 	case TARGET_TYPE_QCN9160:
16888 	case TARGET_TYPE_QCA5018:
16889 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
16890 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
16891 		break;
16892 	case TARGET_TYPE_QCN9000:
16893 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
16894 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
16895 		break;
16896 	case TARGET_TYPE_QCN9224:
16897 	case TARGET_TYPE_QCA5332:
16898 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
16899 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
16900 		break;
16901 	default:
16902 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
16903 		qdf_assert_always(0);
16904 		break;
16905 	}
16906 
16907 	if (soc->cdp_soc.ol_ops->get_soc_nss_cfg)
16908 		nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc);
16909 
16910 	wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg);
16911 
16912 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
16913 		wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0);
16914 		wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0);
16915 		wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0);
16916 		wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0);
16917 		soc->init_tcl_cmd_cred_ring = false;
16918 		soc->num_tcl_data_rings =
16919 			wlan_cfg_num_nss_tcl_data_rings(soc->wlan_cfg_ctx);
16920 		soc->num_reo_dest_rings =
16921 			wlan_cfg_num_nss_reo_dest_rings(soc->wlan_cfg_ctx);
16922 
16923 	} else {
16924 		soc->init_tcl_cmd_cred_ring = true;
16925 		soc->num_tx_comp_rings =
16926 			wlan_cfg_num_tx_comp_rings(soc->wlan_cfg_ctx);
16927 		soc->num_tcl_data_rings =
16928 			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
16929 		soc->num_reo_dest_rings =
16930 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
16931 	}
16932 
16933 	soc->arch_ops.soc_cfg_attach(soc);
16934 }
16935 
16936 static inline  void dp_pdev_set_default_reo(struct dp_pdev *pdev)
16937 {
16938 	struct dp_soc *soc = pdev->soc;
16939 
16940 	switch (pdev->pdev_id) {
16941 	case 0:
16942 		pdev->reo_dest =
16943 			wlan_cfg_radio0_default_reo_get(soc->wlan_cfg_ctx);
16944 		break;
16945 
16946 	case 1:
16947 		pdev->reo_dest =
16948 			wlan_cfg_radio1_default_reo_get(soc->wlan_cfg_ctx);
16949 		break;
16950 
16951 	case 2:
16952 		pdev->reo_dest =
16953 			wlan_cfg_radio2_default_reo_get(soc->wlan_cfg_ctx);
16954 		break;
16955 
16956 	default:
16957 		dp_init_err("%pK: Invalid pdev_id %d for reo selection",
16958 			    soc, pdev->pdev_id);
16959 		break;
16960 	}
16961 }
16962 
16963 static QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc,
16964 				      HTC_HANDLE htc_handle,
16965 				      qdf_device_t qdf_osdev,
16966 				      uint8_t pdev_id)
16967 {
16968 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16969 	int nss_cfg;
16970 	void *sojourn_buf;
16971 	QDF_STATUS ret;
16972 
16973 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
16974 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
16975 
16976 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16977 	pdev->soc = soc;
16978 	pdev->pdev_id = pdev_id;
16979 
16980 	/*
16981 	 * Variable to prevent double pdev deinitialization during
16982 	 * radio detach execution .i.e. in the absence of any vdev.
16983 	 */
16984 	pdev->pdev_deinit = 0;
16985 
16986 	if (dp_wdi_event_attach(pdev)) {
16987 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
16988 			  "dp_wdi_evet_attach failed");
16989 		goto fail0;
16990 	}
16991 
16992 	if (dp_pdev_srng_init(pdev)) {
16993 		dp_init_err("%pK: Failed to initialize pdev srng rings", soc);
16994 		goto fail1;
16995 	}
16996 
16997 	/* Initialize descriptors in TCL Rings used by IPA */
16998 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16999 		hal_tx_init_data_ring(soc->hal_soc,
17000 				      soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng);
17001 		dp_ipa_hal_tx_init_alt_data_ring(soc);
17002 	}
17003 
17004 	/*
17005 	 * Initialize command/credit ring descriptor
17006 	 * Command/CREDIT ring also used for sending DATA cmds
17007 	 */
17008 	dp_tx_init_cmd_credit_ring(soc);
17009 
17010 	dp_tx_pdev_init(pdev);
17011 
17012 	/*
17013 	 * set nss pdev config based on soc config
17014 	 */
17015 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
17016 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
17017 					 (nss_cfg & (1 << pdev_id)));
17018 	pdev->target_pdev_id =
17019 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
17020 
17021 	if (soc->preferred_hw_mode == WMI_HOST_HW_MODE_2G_PHYB &&
17022 	    pdev->lmac_id == PHYB_2G_LMAC_ID) {
17023 		pdev->target_pdev_id = PHYB_2G_TARGET_PDEV_ID;
17024 	}
17025 
17026 	/* Reset the cpu ring map if radio is NSS offloaded */
17027 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
17028 		dp_soc_reset_cpu_ring_map(soc);
17029 		dp_soc_reset_intr_mask(soc);
17030 	}
17031 
17032 	/* Reset the cpu ring map if radio is NSS offloaded */
17033 	dp_soc_reset_ipa_vlan_intr_mask(soc);
17034 
17035 	TAILQ_INIT(&pdev->vdev_list);
17036 	qdf_spinlock_create(&pdev->vdev_list_lock);
17037 	pdev->vdev_count = 0;
17038 	pdev->is_lro_hash_configured = 0;
17039 
17040 	qdf_spinlock_create(&pdev->tx_mutex);
17041 	pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_INVALID_LMAC_ID;
17042 	pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_INVALID_LMAC_ID;
17043 	pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_INVALID_LMAC_ID;
17044 
17045 	DP_STATS_INIT(pdev);
17046 
17047 	dp_local_peer_id_pool_init(pdev);
17048 
17049 	dp_dscp_tid_map_setup(pdev);
17050 	dp_pcp_tid_map_setup(pdev);
17051 
17052 	/* set the reo destination during initialization */
17053 	dp_pdev_set_default_reo(pdev);
17054 
17055 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
17056 
17057 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
17058 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
17059 			      TRUE);
17060 
17061 	if (!pdev->sojourn_buf) {
17062 		dp_init_err("%pK: Failed to allocate sojourn buf", soc);
17063 		goto fail2;
17064 	}
17065 	sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
17066 	qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
17067 
17068 	qdf_event_create(&pdev->fw_peer_stats_event);
17069 	qdf_event_create(&pdev->fw_stats_event);
17070 	qdf_event_create(&pdev->fw_obss_stats_event);
17071 
17072 	pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
17073 
17074 	if (dp_rxdma_ring_setup(soc, pdev)) {
17075 		dp_init_err("%pK: RXDMA ring config failed", soc);
17076 		goto fail3;
17077 	}
17078 
17079 	if (dp_init_ipa_rx_refill_buf_ring(soc, pdev))
17080 		goto fail3;
17081 
17082 	if (dp_ipa_ring_resource_setup(soc, pdev))
17083 		goto fail4;
17084 
17085 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
17086 		dp_init_err("%pK: dp_ipa_uc_attach failed", soc);
17087 		goto fail4;
17088 	}
17089 
17090 	ret = dp_rx_fst_attach(soc, pdev);
17091 	if ((ret != QDF_STATUS_SUCCESS) &&
17092 	    (ret != QDF_STATUS_E_NOSUPPORT)) {
17093 		dp_init_err("%pK: RX Flow Search Table attach failed: pdev %d err %d",
17094 			    soc, pdev_id, ret);
17095 		goto fail5;
17096 	}
17097 
17098 	if (dp_pdev_bkp_stats_attach(pdev) != QDF_STATUS_SUCCESS) {
17099 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
17100 			  FL("dp_pdev_bkp_stats_attach failed"));
17101 		goto fail6;
17102 	}
17103 
17104 	if (dp_monitor_pdev_init(pdev)) {
17105 		dp_init_err("%pK: dp_monitor_pdev_init failed\n", soc);
17106 		goto fail7;
17107 	}
17108 
17109 	/* initialize sw rx descriptors */
17110 	dp_rx_pdev_desc_pool_init(pdev);
17111 	/* allocate buffers and replenish the RxDMA ring */
17112 	dp_rx_pdev_buffers_alloc(pdev);
17113 
17114 	dp_init_tso_stats(pdev);
17115 
17116 	pdev->rx_fast_flag = false;
17117 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
17118 		qdf_dma_mem_stats_read(),
17119 		qdf_heap_mem_stats_read(),
17120 		qdf_skb_total_mem_stats_read());
17121 
17122 	return QDF_STATUS_SUCCESS;
17123 fail7:
17124 	dp_pdev_bkp_stats_detach(pdev);
17125 fail6:
17126 	dp_rx_fst_detach(soc, pdev);
17127 fail5:
17128 	dp_ipa_uc_detach(soc, pdev);
17129 fail4:
17130 	dp_deinit_ipa_rx_refill_buf_ring(soc, pdev);
17131 fail3:
17132 	dp_rxdma_ring_cleanup(soc, pdev);
17133 	qdf_nbuf_free(pdev->sojourn_buf);
17134 fail2:
17135 	qdf_spinlock_destroy(&pdev->tx_mutex);
17136 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
17137 	dp_pdev_srng_deinit(pdev);
17138 fail1:
17139 	dp_wdi_event_detach(pdev);
17140 fail0:
17141 	return QDF_STATUS_E_FAILURE;
17142 }
17143 
17144 /*
17145  * dp_pdev_init_wifi3() - Init txrx pdev
17146  * @htc_handle: HTC handle for host-target interface
17147  * @qdf_osdev: QDF OS device
17148  * @force: Force deinit
17149  *
17150  * Return: QDF_STATUS
17151  */
17152 static QDF_STATUS dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
17153 				     HTC_HANDLE htc_handle,
17154 				     qdf_device_t qdf_osdev,
17155 				     uint8_t pdev_id)
17156 {
17157 	return dp_pdev_init(txrx_soc, htc_handle, qdf_osdev, pdev_id);
17158 }
17159 
17160