xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision cfe8cda78633be00818878028ff51fc658a66c94)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <wlan_ipa_obj_mgmt_api.h>
21 #include <qdf_types.h>
22 #include <qdf_lock.h>
23 #include <qdf_net_types.h>
24 #include <qdf_lro.h>
25 #include <qdf_module.h>
26 #include <hal_hw_headers.h>
27 #include <hal_api.h>
28 #include <hif.h>
29 #include <htt.h>
30 #include <wdi_event.h>
31 #include <queue.h>
32 #include "dp_types.h"
33 #include "dp_internal.h"
34 #include "dp_tx.h"
35 #include "dp_tx_desc.h"
36 #include "dp_rx.h"
37 #ifdef DP_RATETABLE_SUPPORT
38 #include "dp_ratetable.h"
39 #endif
40 #include <cdp_txrx_handle.h>
41 #include <wlan_cfg.h>
42 #include <wlan_utility.h>
43 #include "cdp_txrx_cmn_struct.h"
44 #include "cdp_txrx_stats_struct.h"
45 #include "cdp_txrx_cmn_reg.h"
46 #include <qdf_util.h>
47 #include "dp_peer.h"
48 #include "htt_stats.h"
49 #include "dp_htt.h"
50 #ifdef WLAN_SUPPORT_RX_FISA
51 #include <wlan_dp_fisa_rx.h>
52 #endif
53 #include "htt_ppdu_stats.h"
54 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
55 #include "cfg_ucfg_api.h"
56 #include <wlan_module_ids.h>
57 
58 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
59 #include "cdp_txrx_flow_ctrl_v2.h"
60 #else
61 
62 static inline void
63 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
64 {
65 	return;
66 }
67 #endif
68 #ifdef WIFI_MONITOR_SUPPORT
69 #include <dp_mon.h>
70 #endif
71 #include "dp_ipa.h"
72 #ifdef FEATURE_WDS
73 #include "dp_txrx_wds.h"
74 #endif
75 #ifdef WLAN_SUPPORT_MSCS
76 #include "dp_mscs.h"
77 #endif
78 #ifdef WLAN_SUPPORT_MESH_LATENCY
79 #include "dp_mesh_latency.h"
80 #endif
81 #ifdef WLAN_SUPPORT_SCS
82 #include "dp_scs.h"
83 #endif
84 #ifdef ATH_SUPPORT_IQUE
85 #include "dp_txrx_me.h"
86 #endif
87 #if defined(DP_CON_MON)
88 #ifndef REMOVE_PKT_LOG
89 #include <pktlog_ac_api.h>
90 #include <pktlog_ac.h>
91 #endif
92 #endif
93 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
94 #include <wlan_dp_swlm.h>
95 #endif
96 #ifdef CONFIG_SAWF_DEF_QUEUES
97 #include "dp_sawf.h"
98 #endif
99 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
100 #include <target_if_dp.h>
101 #endif
102 
103 #ifdef WLAN_FEATURE_STATS_EXT
104 #define INIT_RX_HW_STATS_LOCK(_soc) \
105 	qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
106 #define DEINIT_RX_HW_STATS_LOCK(_soc) \
107 	qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
108 #else
109 #define INIT_RX_HW_STATS_LOCK(_soc)  /* no op */
110 #define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
111 #endif
112 
113 #if defined(DP_PEER_EXTENDED_API) || defined(WLAN_DP_PENDING_MEM_FLUSH)
114 #define SET_PEER_REF_CNT_ONE(_peer) \
115 	qdf_atomic_set(&(_peer)->ref_cnt, 1)
116 #else
117 #define SET_PEER_REF_CNT_ONE(_peer)
118 #endif
119 
120 #ifdef WLAN_SYSFS_DP_STATS
121 /* sysfs event wait time for firmware stat request unit milliseconds */
122 #define WLAN_SYSFS_STAT_REQ_WAIT_MS 3000
123 #endif
124 
125 #ifdef QCA_DP_ENABLE_TX_COMP_RING4
126 #define TXCOMP_RING4_NUM 3
127 #else
128 #define TXCOMP_RING4_NUM WBM2SW_TXCOMP_RING4_NUM
129 #endif
130 
131 #ifdef QCA_DP_TX_FW_METADATA_V2
132 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) \
133 		HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
134 #else
135 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) \
136 		HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
137 #endif
138 
139 QDF_COMPILE_TIME_ASSERT(max_rx_rings_check,
140 			MAX_REO_DEST_RINGS == CDP_MAX_RX_RINGS);
141 
142 QDF_COMPILE_TIME_ASSERT(max_tx_rings_check,
143 			MAX_TCL_DATA_RINGS == CDP_MAX_TX_COMP_RINGS);
144 
145 #define dp_init_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_INIT, params)
146 #define dp_init_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_INIT, params)
147 #define dp_init_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_INIT, params)
148 #define dp_init_info(params...) \
149 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_INIT, ## params)
150 #define dp_init_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_INIT, params)
151 
152 #define dp_vdev_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_VDEV, params)
153 #define dp_vdev_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_VDEV, params)
154 #define dp_vdev_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_VDEV, params)
155 #define dp_vdev_info(params...) \
156 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_VDEV, ## params)
157 #define dp_vdev_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_VDEV, params)
158 
159 void dp_configure_arch_ops(struct dp_soc *soc);
160 qdf_size_t dp_get_soc_context_size(uint16_t device_id);
161 
162 /*
163  * The max size of cdp_peer_stats_param_t is limited to 16 bytes.
164  * If the buffer size is exceeding this size limit,
165  * dp_txrx_get_peer_stats is to be used instead.
166  */
167 QDF_COMPILE_TIME_ASSERT(cdp_peer_stats_param_t_max_size,
168 			(sizeof(cdp_peer_stats_param_t) <= 16));
169 
170 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
171 /*
172  * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS
173  * also should be updated accordingly
174  */
175 QDF_COMPILE_TIME_ASSERT(num_intr_grps,
176 			HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS);
177 
178 /*
179  * HIF_EVENT_HIST_MAX should always be power of 2
180  */
181 QDF_COMPILE_TIME_ASSERT(hif_event_history_size,
182 			(HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0);
183 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
184 
185 /*
186  * If WLAN_CFG_INT_NUM_CONTEXTS is changed,
187  * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated
188  */
189 QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs,
190 			WLAN_CFG_INT_NUM_CONTEXTS_MAX >=
191 			WLAN_CFG_INT_NUM_CONTEXTS);
192 
193 static QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl);
194 static QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl);
195 
196 static void dp_pdev_srng_deinit(struct dp_pdev *pdev);
197 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev);
198 static void dp_pdev_srng_free(struct dp_pdev *pdev);
199 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev);
200 
201 static void dp_soc_srng_deinit(struct dp_soc *soc);
202 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc);
203 static void dp_soc_srng_free(struct dp_soc *soc);
204 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc);
205 
206 static void dp_soc_cfg_init(struct dp_soc *soc);
207 static void dp_soc_cfg_attach(struct dp_soc *soc);
208 
209 static inline
210 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
211 				struct cdp_pdev_attach_params *params);
212 
213 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id);
214 
215 static QDF_STATUS
216 dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
217 		   HTC_HANDLE htc_handle,
218 		   qdf_device_t qdf_osdev,
219 		   uint8_t pdev_id);
220 
221 static QDF_STATUS
222 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, int force);
223 
224 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc);
225 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc);
226 
227 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
228 		  struct hif_opaque_softc *hif_handle);
229 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
230 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc,
231 				       uint8_t pdev_id,
232 				       int force);
233 static struct dp_soc *
234 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
235 	      struct cdp_soc_attach_params *params);
236 static inline QDF_STATUS dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl,
237 					      uint8_t vdev_id,
238 					      uint8_t *peer_mac_addr,
239 					      enum cdp_peer_type peer_type);
240 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
241 				       uint8_t vdev_id,
242 				       uint8_t *peer_mac, uint32_t bitmap,
243 				       enum cdp_peer_type peer_type);
244 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
245 				bool unmap_only,
246 				bool mlo_peers_only);
247 #ifdef ENABLE_VERBOSE_DEBUG
248 bool is_dp_verbose_debug_enabled;
249 #endif
250 
251 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
252 static bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
253 static void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
254 			   bool enable);
255 static inline void
256 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
257 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats);
258 static inline void
259 dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
260 #endif
261 
262 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
263 						uint8_t index);
264 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index);
265 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index);
266 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
267 						 uint8_t index);
268 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
269 					    enum hal_ring_type ring_type,
270 					    int ring_num);
271 #ifdef FEATURE_AST
272 void dp_print_mlo_ast_stats(struct dp_soc *soc);
273 #endif
274 
275 #ifdef DP_UMAC_HW_RESET_SUPPORT
276 static QDF_STATUS dp_umac_reset_handle_pre_reset(struct dp_soc *soc);
277 static QDF_STATUS dp_umac_reset_handle_post_reset(struct dp_soc *soc);
278 static QDF_STATUS dp_umac_reset_handle_post_reset_complete(struct dp_soc *soc);
279 #endif
280 
281 #define DP_INTR_POLL_TIMER_MS	5
282 
283 #define MON_VDEV_TIMER_INIT 0x1
284 #define MON_VDEV_TIMER_RUNNING 0x2
285 
286 #define DP_MCS_LENGTH (6*MAX_MCS)
287 
288 #define DP_CURR_FW_STATS_AVAIL 19
289 #define DP_HTT_DBG_EXT_STATS_MAX 256
290 #define DP_MAX_SLEEP_TIME 100
291 #ifndef QCA_WIFI_3_0_EMU
292 #define SUSPEND_DRAIN_WAIT 500
293 #else
294 #define SUSPEND_DRAIN_WAIT 3000
295 #endif
296 
297 #ifdef IPA_OFFLOAD
298 /* Exclude IPA rings from the interrupt context */
299 #define TX_RING_MASK_VAL	0xb
300 #define RX_RING_MASK_VAL	0x7
301 #else
302 #define TX_RING_MASK_VAL	0xF
303 #define RX_RING_MASK_VAL	0xF
304 #endif
305 
306 #define STR_MAXLEN	64
307 
308 #define RNG_ERR		"SRNG setup failed for"
309 
310 /**
311  * default_dscp_tid_map - Default DSCP-TID mapping
312  *
313  * DSCP        TID
314  * 000000      0
315  * 001000      1
316  * 010000      2
317  * 011000      3
318  * 100000      4
319  * 101000      5
320  * 110000      6
321  * 111000      7
322  */
323 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
324 	0, 0, 0, 0, 0, 0, 0, 0,
325 	1, 1, 1, 1, 1, 1, 1, 1,
326 	2, 2, 2, 2, 2, 2, 2, 2,
327 	3, 3, 3, 3, 3, 3, 3, 3,
328 	4, 4, 4, 4, 4, 4, 4, 4,
329 	5, 5, 5, 5, 5, 5, 5, 5,
330 	6, 6, 6, 6, 6, 6, 6, 6,
331 	7, 7, 7, 7, 7, 7, 7, 7,
332 };
333 
334 /**
335  * default_pcp_tid_map - Default PCP-TID mapping
336  *
337  * PCP     TID
338  * 000      0
339  * 001      1
340  * 010      2
341  * 011      3
342  * 100      4
343  * 101      5
344  * 110      6
345  * 111      7
346  */
347 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
348 	0, 1, 2, 3, 4, 5, 6, 7,
349 };
350 
351 /**
352  * @brief Cpu to tx ring map
353  */
354 uint8_t
355 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
356 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
357 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
358 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
359 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
360 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
361 #ifdef WLAN_TX_PKT_CAPTURE_ENH
362 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
363 #endif
364 };
365 
366 qdf_export_symbol(dp_cpu_ring_map);
367 
368 /**
369  * @brief Select the type of statistics
370  */
371 enum dp_stats_type {
372 	STATS_FW = 0,
373 	STATS_HOST = 1,
374 	STATS_TYPE_MAX = 2,
375 };
376 
377 /**
378  * @brief General Firmware statistics options
379  *
380  */
381 enum dp_fw_stats {
382 	TXRX_FW_STATS_INVALID	= -1,
383 };
384 
385 /**
386  * dp_stats_mapping_table - Firmware and Host statistics
387  * currently supported
388  */
389 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
390 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
391 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
392 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
393 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
394 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
395 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
396 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
397 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
398 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
399 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
400 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
401 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
402 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
403 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
404 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
405 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
406 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
407 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
408 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
409 	/* Last ENUM for HTT FW STATS */
410 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
411 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
412 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
413 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
414 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
415 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
416 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
417 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
418 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
419 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
420 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
421 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
422 	{TXRX_FW_STATS_INVALID, TXRX_NAPI_STATS},
423 	{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
424 	{TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS},
425 	{TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS},
426 	{TXRX_FW_STATS_INVALID, TXRX_SOC_REO_HW_DESC_DUMP},
427 	{TXRX_FW_STATS_INVALID, TXRX_SOC_WBM_IDLE_HPTP_DUMP},
428 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_USAGE_WM_STATS},
429 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, TXRX_HOST_STATS_INVALID},
430 	{HTT_DBG_EXT_STATS_TX_SOUNDING_INFO, TXRX_HOST_STATS_INVALID}
431 };
432 
433 /* MCL specific functions */
434 #if defined(DP_CON_MON)
435 
436 #ifdef DP_CON_MON_MSI_ENABLED
437 /**
438  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
439  * @soc: pointer to dp_soc handle
440  * @intr_ctx_num: interrupt context number for which mon mask is needed
441  *
442  * For MCL, monitor mode rings are being processed in timer contexts (polled).
443  * This function is returning 0, since in interrupt mode(softirq based RX),
444  * we donot want to process monitor mode rings in a softirq.
445  *
446  * So, in case packet log is enabled for SAP/STA/P2P modes,
447  * regular interrupt processing will not process monitor mode rings. It would be
448  * done in a separate timer context.
449  *
450  * Return: 0
451  */
452 static inline uint32_t
453 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
454 {
455 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
456 }
457 #else
458 /**
459  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
460  * @soc: pointer to dp_soc handle
461  * @intr_ctx_num: interrupt context number for which mon mask is needed
462  *
463  * For MCL, monitor mode rings are being processed in timer contexts (polled).
464  * This function is returning 0, since in interrupt mode(softirq based RX),
465  * we donot want to process monitor mode rings in a softirq.
466  *
467  * So, in case packet log is enabled for SAP/STA/P2P modes,
468  * regular interrupt processing will not process monitor mode rings. It would be
469  * done in a separate timer context.
470  *
471  * Return: 0
472  */
473 static inline uint32_t
474 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
475 {
476 	return 0;
477 }
478 #endif
479 
480 #ifdef IPA_OFFLOAD
481 /**
482  * dp_get_num_rx_contexts() - get number of RX contexts
483  * @soc_hdl: cdp opaque soc handle
484  *
485  * Return: number of RX contexts
486  */
487 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
488 {
489 	int num_rx_contexts;
490 	uint32_t reo_ring_map;
491 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
492 
493 	reo_ring_map = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
494 
495 	switch (soc->arch_id) {
496 	case CDP_ARCH_TYPE_BE:
497 		/* 2 REO rings are used for IPA */
498 		reo_ring_map &=  ~(BIT(3) | BIT(7));
499 
500 		break;
501 	case CDP_ARCH_TYPE_LI:
502 		/* 1 REO ring is used for IPA */
503 		reo_ring_map &=  ~BIT(3);
504 		break;
505 	default:
506 		dp_err("unknown arch_id 0x%x", soc->arch_id);
507 		QDF_BUG(0);
508 	}
509 	/*
510 	 * qdf_get_hweight32 prefer over qdf_get_hweight8 in case map is scaled
511 	 * in future
512 	 */
513 	num_rx_contexts = qdf_get_hweight32(reo_ring_map);
514 
515 	return num_rx_contexts;
516 }
517 #else
518 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
519 {
520 	int num_rx_contexts;
521 	uint32_t reo_config;
522 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
523 
524 	reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
525 	/*
526 	 * qdf_get_hweight32 prefer over qdf_get_hweight8 in case map is scaled
527 	 * in future
528 	 */
529 	num_rx_contexts = qdf_get_hweight32(reo_config);
530 
531 	return num_rx_contexts;
532 }
533 #endif
534 
535 #else
536 
537 /**
538  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
539  * @soc: pointer to dp_soc handle
540  * @intr_ctx_num: interrupt context number for which mon mask is needed
541  *
542  * Return: mon mask value
543  */
544 static inline
545 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
546 {
547 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
548 }
549 
550 /**
551  * dp_soc_reset_mon_intr_mask() - reset mon intr mask
552  * @soc: pointer to dp_soc handle
553  *
554  * Return:
555  */
556 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc)
557 {
558 	int i;
559 
560 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
561 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
562 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
563 	}
564 }
565 
566 qdf_export_symbol(dp_soc_reset_mon_intr_mask);
567 
568 /*
569  * dp_service_lmac_rings()- timer to reap lmac rings
570  * @arg: SoC Handle
571  *
572  * Return:
573  *
574  */
575 static void dp_service_lmac_rings(void *arg)
576 {
577 	struct dp_soc *soc = (struct dp_soc *)arg;
578 	int ring = 0, i;
579 	struct dp_pdev *pdev = NULL;
580 	union dp_rx_desc_list_elem_t *desc_list = NULL;
581 	union dp_rx_desc_list_elem_t *tail = NULL;
582 
583 	/* Process LMAC interrupts */
584 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
585 		int mac_for_pdev = ring;
586 		struct dp_srng *rx_refill_buf_ring;
587 
588 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
589 		if (!pdev)
590 			continue;
591 
592 		rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev];
593 
594 		dp_monitor_process(soc, NULL, mac_for_pdev,
595 				   QCA_NAPI_BUDGET);
596 
597 		for (i = 0;
598 		     i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
599 			dp_rxdma_err_process(&soc->intr_ctx[i], soc,
600 					     mac_for_pdev,
601 					     QCA_NAPI_BUDGET);
602 
603 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF,
604 						  mac_for_pdev))
605 			dp_rx_buffers_replenish(soc, mac_for_pdev,
606 						rx_refill_buf_ring,
607 						&soc->rx_desc_buf[mac_for_pdev],
608 						0, &desc_list, &tail, false);
609 	}
610 
611 	qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
612 }
613 
614 #endif
615 
616 #ifdef FEATURE_MEC
617 void dp_peer_mec_flush_entries(struct dp_soc *soc)
618 {
619 	unsigned int index;
620 	struct dp_mec_entry *mecentry, *mecentry_next;
621 
622 	TAILQ_HEAD(, dp_mec_entry) free_list;
623 	TAILQ_INIT(&free_list);
624 
625 	if (!soc->mec_hash.mask)
626 		return;
627 
628 	if (!soc->mec_hash.bins)
629 		return;
630 
631 	if (!qdf_atomic_read(&soc->mec_cnt))
632 		return;
633 
634 	qdf_spin_lock_bh(&soc->mec_lock);
635 	for (index = 0; index <= soc->mec_hash.mask; index++) {
636 		if (!TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
637 			TAILQ_FOREACH_SAFE(mecentry, &soc->mec_hash.bins[index],
638 					   hash_list_elem, mecentry_next) {
639 			    dp_peer_mec_detach_entry(soc, mecentry, &free_list);
640 			}
641 		}
642 	}
643 	qdf_spin_unlock_bh(&soc->mec_lock);
644 
645 	dp_peer_mec_free_list(soc, &free_list);
646 }
647 
648 /**
649  * dp_print_mec_entries() - Dump MEC entries in table
650  * @soc: Datapath soc handle
651  *
652  * Return: none
653  */
654 static void dp_print_mec_stats(struct dp_soc *soc)
655 {
656 	int i;
657 	uint32_t index;
658 	struct dp_mec_entry *mecentry = NULL, *mec_list;
659 	uint32_t num_entries = 0;
660 
661 	DP_PRINT_STATS("MEC Stats:");
662 	DP_PRINT_STATS("   Entries Added   = %d", soc->stats.mec.added);
663 	DP_PRINT_STATS("   Entries Deleted = %d", soc->stats.mec.deleted);
664 
665 	if (!qdf_atomic_read(&soc->mec_cnt))
666 		return;
667 
668 	mec_list = qdf_mem_malloc(sizeof(*mecentry) * DP_PEER_MAX_MEC_ENTRY);
669 	if (!mec_list) {
670 		dp_peer_warn("%pK: failed to allocate mec_list", soc);
671 		return;
672 	}
673 
674 	DP_PRINT_STATS("MEC Table:");
675 	for (index = 0; index <= soc->mec_hash.mask; index++) {
676 		qdf_spin_lock_bh(&soc->mec_lock);
677 		if (TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
678 			qdf_spin_unlock_bh(&soc->mec_lock);
679 			continue;
680 		}
681 
682 		TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index],
683 			      hash_list_elem) {
684 			qdf_mem_copy(&mec_list[num_entries], mecentry,
685 				     sizeof(*mecentry));
686 			num_entries++;
687 		}
688 		qdf_spin_unlock_bh(&soc->mec_lock);
689 	}
690 
691 	if (!num_entries) {
692 		qdf_mem_free(mec_list);
693 		return;
694 	}
695 
696 	for (i = 0; i < num_entries; i++) {
697 		DP_PRINT_STATS("%6d mac_addr = " QDF_MAC_ADDR_FMT
698 			       " is_active = %d pdev_id = %d vdev_id = %d",
699 			       i,
700 			       QDF_MAC_ADDR_REF(mec_list[i].mac_addr.raw),
701 			       mec_list[i].is_active,
702 			       mec_list[i].pdev_id,
703 			       mec_list[i].vdev_id);
704 	}
705 	qdf_mem_free(mec_list);
706 }
707 #else
708 static void dp_print_mec_stats(struct dp_soc *soc)
709 {
710 }
711 #endif
712 
713 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
714 				 uint8_t vdev_id,
715 				 uint8_t *peer_mac,
716 				 uint8_t *mac_addr,
717 				 enum cdp_txrx_ast_entry_type type,
718 				 uint32_t flags)
719 {
720 	int ret = -1;
721 	QDF_STATUS status = QDF_STATUS_SUCCESS;
722 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
723 						       peer_mac, 0, vdev_id,
724 						       DP_MOD_ID_CDP);
725 
726 	if (!peer) {
727 		dp_peer_debug("Peer is NULL!");
728 		return ret;
729 	}
730 
731 	status = dp_peer_add_ast((struct dp_soc *)soc_hdl,
732 				 peer,
733 				 mac_addr,
734 				 type,
735 				 flags);
736 	if ((status == QDF_STATUS_SUCCESS) ||
737 	    (status == QDF_STATUS_E_ALREADY) ||
738 	    (status == QDF_STATUS_E_AGAIN))
739 		ret = 0;
740 
741 	dp_hmwds_ast_add_notify(peer, mac_addr,
742 				type, status, false);
743 
744 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
745 
746 	return ret;
747 }
748 
749 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
750 						uint8_t vdev_id,
751 						uint8_t *peer_mac,
752 						uint8_t *wds_macaddr,
753 						uint32_t flags)
754 {
755 	int status = -1;
756 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
757 	struct dp_ast_entry  *ast_entry = NULL;
758 	struct dp_peer *peer;
759 
760 	if (soc->ast_offload_support)
761 		return status;
762 
763 	peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
764 				      peer_mac, 0, vdev_id,
765 				      DP_MOD_ID_CDP);
766 
767 	if (!peer) {
768 		dp_peer_debug("Peer is NULL!");
769 		return status;
770 	}
771 
772 	qdf_spin_lock_bh(&soc->ast_lock);
773 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
774 						    peer->vdev->pdev->pdev_id);
775 
776 	if (ast_entry) {
777 		status = dp_peer_update_ast(soc,
778 					    peer,
779 					    ast_entry, flags);
780 	}
781 	qdf_spin_unlock_bh(&soc->ast_lock);
782 
783 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
784 
785 	return status;
786 }
787 
788 /*
789  * dp_peer_reset_ast_entries() - Deletes all HMWDS entries for a peer
790  * @soc_handle:		Datapath SOC handle
791  * @peer:		DP peer
792  * @arg:		callback argument
793  *
794  * Return: None
795  */
796 static void
797 dp_peer_reset_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
798 {
799 	struct dp_ast_entry *ast_entry = NULL;
800 	struct dp_ast_entry *tmp_ast_entry;
801 
802 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
803 		if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
804 		    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
805 			dp_peer_del_ast(soc, ast_entry);
806 	}
807 }
808 
809 /*
810  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
811  * @soc_handle:		Datapath SOC handle
812  * @wds_macaddr:	WDS entry MAC Address
813  * @peer_macaddr:	WDS entry MAC Address
814  * @vdev_id:		id of vdev handle
815  * Return: QDF_STATUS
816  */
817 static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
818 					 uint8_t *wds_macaddr,
819 					 uint8_t *peer_mac_addr,
820 					 uint8_t vdev_id)
821 {
822 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
823 	struct dp_ast_entry *ast_entry = NULL;
824 	struct dp_peer *peer;
825 	struct dp_pdev *pdev;
826 	struct dp_vdev *vdev;
827 
828 	if (soc->ast_offload_support)
829 		return QDF_STATUS_E_FAILURE;
830 
831 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
832 
833 	if (!vdev)
834 		return QDF_STATUS_E_FAILURE;
835 
836 	pdev = vdev->pdev;
837 
838 	if (peer_mac_addr) {
839 		peer = dp_peer_find_hash_find(soc, peer_mac_addr,
840 					      0, vdev->vdev_id,
841 					      DP_MOD_ID_CDP);
842 		if (!peer) {
843 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
844 			return QDF_STATUS_E_FAILURE;
845 		}
846 
847 		qdf_spin_lock_bh(&soc->ast_lock);
848 		dp_peer_reset_ast_entries(soc, peer, NULL);
849 		qdf_spin_unlock_bh(&soc->ast_lock);
850 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
851 	} else if (wds_macaddr) {
852 		qdf_spin_lock_bh(&soc->ast_lock);
853 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
854 							    pdev->pdev_id);
855 
856 		if (ast_entry) {
857 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
858 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
859 				dp_peer_del_ast(soc, ast_entry);
860 		}
861 		qdf_spin_unlock_bh(&soc->ast_lock);
862 	}
863 
864 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
865 	return QDF_STATUS_SUCCESS;
866 }
867 
868 /*
869  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
870  * @soc:		Datapath SOC handle
871  * @vdev_id:		id of vdev object
872  *
873  * Return: QDF_STATUS
874  */
875 static QDF_STATUS
876 dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
877 			     uint8_t vdev_id)
878 {
879 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
880 
881 	if (soc->ast_offload_support)
882 		return QDF_STATUS_SUCCESS;
883 
884 	qdf_spin_lock_bh(&soc->ast_lock);
885 
886 	dp_soc_iterate_peer(soc, dp_peer_reset_ast_entries, NULL,
887 			    DP_MOD_ID_CDP);
888 	qdf_spin_unlock_bh(&soc->ast_lock);
889 
890 	return QDF_STATUS_SUCCESS;
891 }
892 
893 /*
894  * dp_peer_flush_ast_entries() - Delete all wds and hmwds ast entries of a peer
895  * @soc:		Datapath SOC
896  * @peer:		Datapath peer
897  * @arg:		arg to callback
898  *
899  * Return: None
900  */
901 static void
902 dp_peer_flush_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
903 {
904 	struct dp_ast_entry *ase = NULL;
905 	struct dp_ast_entry *temp_ase;
906 
907 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
908 		if ((ase->type ==
909 			CDP_TXRX_AST_TYPE_STATIC) ||
910 			(ase->type ==
911 			 CDP_TXRX_AST_TYPE_SELF) ||
912 			(ase->type ==
913 			 CDP_TXRX_AST_TYPE_STA_BSS))
914 			continue;
915 		dp_peer_del_ast(soc, ase);
916 	}
917 }
918 
919 /*
920  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
921  * @soc:		Datapath SOC handle
922  *
923  * Return: None
924  */
925 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
926 {
927 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
928 
929 	qdf_spin_lock_bh(&soc->ast_lock);
930 
931 	dp_soc_iterate_peer(soc, dp_peer_flush_ast_entries, NULL,
932 			    DP_MOD_ID_CDP);
933 
934 	qdf_spin_unlock_bh(&soc->ast_lock);
935 	dp_peer_mec_flush_entries(soc);
936 }
937 
938 #if defined(IPA_WDS_EASYMESH_FEATURE) && defined(FEATURE_AST)
939 /*
940  * dp_peer_send_wds_disconnect() - Send Disconnect event to IPA for each peer
941  * @soc: Datapath SOC
942  * @peer: Datapath peer
943  *
944  * Return: None
945  */
946 static void
947 dp_peer_send_wds_disconnect(struct dp_soc *soc, struct dp_peer *peer)
948 {
949 	struct dp_ast_entry *ase = NULL;
950 	struct dp_ast_entry *temp_ase;
951 
952 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
953 		if (ase->type == CDP_TXRX_AST_TYPE_WDS) {
954 			soc->cdp_soc.ol_ops->peer_send_wds_disconnect(soc->ctrl_psoc,
955 								      ase->mac_addr.raw,
956 								      ase->vdev_id);
957 		}
958 	}
959 }
960 #elif defined(FEATURE_AST)
961 static void
962 dp_peer_send_wds_disconnect(struct dp_soc *soc, struct dp_peer *peer)
963 {
964 }
965 #endif
966 
967 /**
968  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
969  *                                       and return ast entry information
970  *                                       of first ast entry found in the
971  *                                       table with given mac address
972  *
973  * @soc : data path soc handle
974  * @ast_mac_addr : AST entry mac address
975  * @ast_entry_info : ast entry information
976  *
977  * return : true if ast entry found with ast_mac_addr
978  *          false if ast entry not found
979  */
980 static bool dp_peer_get_ast_info_by_soc_wifi3
981 	(struct cdp_soc_t *soc_hdl,
982 	 uint8_t *ast_mac_addr,
983 	 struct cdp_ast_entry_info *ast_entry_info)
984 {
985 	struct dp_ast_entry *ast_entry = NULL;
986 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
987 	struct dp_peer *peer = NULL;
988 
989 	if (soc->ast_offload_support)
990 		return false;
991 
992 	qdf_spin_lock_bh(&soc->ast_lock);
993 
994 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
995 	if ((!ast_entry) ||
996 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
997 		qdf_spin_unlock_bh(&soc->ast_lock);
998 		return false;
999 	}
1000 
1001 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1002 				     DP_MOD_ID_AST);
1003 	if (!peer) {
1004 		qdf_spin_unlock_bh(&soc->ast_lock);
1005 		return false;
1006 	}
1007 
1008 	ast_entry_info->type = ast_entry->type;
1009 	ast_entry_info->pdev_id = ast_entry->pdev_id;
1010 	ast_entry_info->vdev_id = ast_entry->vdev_id;
1011 	ast_entry_info->peer_id = ast_entry->peer_id;
1012 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
1013 		     &peer->mac_addr.raw[0],
1014 		     QDF_MAC_ADDR_SIZE);
1015 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1016 	qdf_spin_unlock_bh(&soc->ast_lock);
1017 	return true;
1018 }
1019 
1020 /**
1021  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
1022  *                                          and return ast entry information
1023  *                                          if mac address and pdev_id matches
1024  *
1025  * @soc : data path soc handle
1026  * @ast_mac_addr : AST entry mac address
1027  * @pdev_id : pdev_id
1028  * @ast_entry_info : ast entry information
1029  *
1030  * return : true if ast entry found with ast_mac_addr
1031  *          false if ast entry not found
1032  */
1033 static bool dp_peer_get_ast_info_by_pdevid_wifi3
1034 		(struct cdp_soc_t *soc_hdl,
1035 		 uint8_t *ast_mac_addr,
1036 		 uint8_t pdev_id,
1037 		 struct cdp_ast_entry_info *ast_entry_info)
1038 {
1039 	struct dp_ast_entry *ast_entry;
1040 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
1041 	struct dp_peer *peer = NULL;
1042 
1043 	if (soc->ast_offload_support)
1044 		return false;
1045 
1046 	qdf_spin_lock_bh(&soc->ast_lock);
1047 
1048 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr,
1049 						    pdev_id);
1050 
1051 	if ((!ast_entry) ||
1052 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
1053 		qdf_spin_unlock_bh(&soc->ast_lock);
1054 		return false;
1055 	}
1056 
1057 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1058 				     DP_MOD_ID_AST);
1059 	if (!peer) {
1060 		qdf_spin_unlock_bh(&soc->ast_lock);
1061 		return false;
1062 	}
1063 
1064 	ast_entry_info->type = ast_entry->type;
1065 	ast_entry_info->pdev_id = ast_entry->pdev_id;
1066 	ast_entry_info->vdev_id = ast_entry->vdev_id;
1067 	ast_entry_info->peer_id = ast_entry->peer_id;
1068 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
1069 		     &peer->mac_addr.raw[0],
1070 		     QDF_MAC_ADDR_SIZE);
1071 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1072 	qdf_spin_unlock_bh(&soc->ast_lock);
1073 	return true;
1074 }
1075 
1076 /**
1077  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
1078  *                            with given mac address
1079  *
1080  * @soc : data path soc handle
1081  * @ast_mac_addr : AST entry mac address
1082  * @callback : callback function to called on ast delete response from FW
1083  * @cookie : argument to be passed to callback
1084  *
1085  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
1086  *          is sent
1087  *          QDF_STATUS_E_INVAL false if ast entry not found
1088  */
1089 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
1090 					       uint8_t *mac_addr,
1091 					       txrx_ast_free_cb callback,
1092 					       void *cookie)
1093 
1094 {
1095 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1096 	struct dp_ast_entry *ast_entry = NULL;
1097 	txrx_ast_free_cb cb = NULL;
1098 	void *arg = NULL;
1099 
1100 	if (soc->ast_offload_support)
1101 		return -QDF_STATUS_E_INVAL;
1102 
1103 	qdf_spin_lock_bh(&soc->ast_lock);
1104 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1105 	if (!ast_entry) {
1106 		qdf_spin_unlock_bh(&soc->ast_lock);
1107 		return -QDF_STATUS_E_INVAL;
1108 	}
1109 
1110 	if (ast_entry->callback) {
1111 		cb = ast_entry->callback;
1112 		arg = ast_entry->cookie;
1113 	}
1114 
1115 	ast_entry->callback = callback;
1116 	ast_entry->cookie = cookie;
1117 
1118 	/*
1119 	 * if delete_in_progress is set AST delete is sent to target
1120 	 * and host is waiting for response should not send delete
1121 	 * again
1122 	 */
1123 	if (!ast_entry->delete_in_progress)
1124 		dp_peer_del_ast(soc, ast_entry);
1125 
1126 	qdf_spin_unlock_bh(&soc->ast_lock);
1127 	if (cb) {
1128 		cb(soc->ctrl_psoc,
1129 		   dp_soc_to_cdp_soc(soc),
1130 		   arg,
1131 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1132 	}
1133 	return QDF_STATUS_SUCCESS;
1134 }
1135 
1136 /**
1137  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
1138  *                                   table if mac address and pdev_id matches
1139  *
1140  * @soc : data path soc handle
1141  * @ast_mac_addr : AST entry mac address
1142  * @pdev_id : pdev id
1143  * @callback : callback function to called on ast delete response from FW
1144  * @cookie : argument to be passed to callback
1145  *
1146  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
1147  *          is sent
1148  *          QDF_STATUS_E_INVAL false if ast entry not found
1149  */
1150 
1151 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
1152 						uint8_t *mac_addr,
1153 						uint8_t pdev_id,
1154 						txrx_ast_free_cb callback,
1155 						void *cookie)
1156 
1157 {
1158 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1159 	struct dp_ast_entry *ast_entry;
1160 	txrx_ast_free_cb cb = NULL;
1161 	void *arg = NULL;
1162 
1163 	if (soc->ast_offload_support)
1164 		return -QDF_STATUS_E_INVAL;
1165 
1166 	qdf_spin_lock_bh(&soc->ast_lock);
1167 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
1168 
1169 	if (!ast_entry) {
1170 		qdf_spin_unlock_bh(&soc->ast_lock);
1171 		return -QDF_STATUS_E_INVAL;
1172 	}
1173 
1174 	if (ast_entry->callback) {
1175 		cb = ast_entry->callback;
1176 		arg = ast_entry->cookie;
1177 	}
1178 
1179 	ast_entry->callback = callback;
1180 	ast_entry->cookie = cookie;
1181 
1182 	/*
1183 	 * if delete_in_progress is set AST delete is sent to target
1184 	 * and host is waiting for response should not sent delete
1185 	 * again
1186 	 */
1187 	if (!ast_entry->delete_in_progress)
1188 		dp_peer_del_ast(soc, ast_entry);
1189 
1190 	qdf_spin_unlock_bh(&soc->ast_lock);
1191 
1192 	if (cb) {
1193 		cb(soc->ctrl_psoc,
1194 		   dp_soc_to_cdp_soc(soc),
1195 		   arg,
1196 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1197 	}
1198 	return QDF_STATUS_SUCCESS;
1199 }
1200 
1201 /**
1202  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
1203  * @ring_num: ring num of the ring being queried
1204  * @grp_mask: the grp_mask array for the ring type in question.
1205  *
1206  * The grp_mask array is indexed by group number and the bit fields correspond
1207  * to ring numbers.  We are finding which interrupt group a ring belongs to.
1208  *
1209  * Return: the index in the grp_mask array with the ring number.
1210  * -QDF_STATUS_E_NOENT if no entry is found
1211  */
1212 static int dp_srng_find_ring_in_mask(int ring_num, uint8_t *grp_mask)
1213 {
1214 	int ext_group_num;
1215 	uint8_t mask = 1 << ring_num;
1216 
1217 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
1218 	     ext_group_num++) {
1219 		if (mask & grp_mask[ext_group_num])
1220 			return ext_group_num;
1221 	}
1222 
1223 	return -QDF_STATUS_E_NOENT;
1224 }
1225 
1226 /**
1227  * dp_is_msi_group_number_invalid() - check msi_group_number valid or not
1228  * @msi_group_number: MSI group number.
1229  * @msi_data_count: MSI data count.
1230  *
1231  * Return: true if msi_group_number is invalid.
1232  */
1233 #ifdef WLAN_ONE_MSI_VECTOR
1234 static bool dp_is_msi_group_number_invalid(int msi_group_number,
1235 					   int msi_data_count)
1236 {
1237 	return false;
1238 }
1239 #else
1240 static bool dp_is_msi_group_number_invalid(int msi_group_number,
1241 					   int msi_data_count)
1242 {
1243 	return msi_group_number > msi_data_count;
1244 }
1245 #endif
1246 
1247 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1248 /**
1249  * dp_is_reo_ring_num_in_nf_grp1() - Check if the current reo ring is part of
1250  *				rx_near_full_grp1 mask
1251  * @soc: Datapath SoC Handle
1252  * @ring_num: REO ring number
1253  *
1254  * Return: 1 if the ring_num belongs to reo_nf_grp1,
1255  *	   0, otherwise.
1256  */
1257 static inline int
1258 dp_is_reo_ring_num_in_nf_grp1(struct dp_soc *soc, int ring_num)
1259 {
1260 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_1 & (1 << ring_num));
1261 }
1262 
1263 /**
1264  * dp_is_reo_ring_num_in_nf_grp2() - Check if the current reo ring is part of
1265  *				rx_near_full_grp2 mask
1266  * @soc: Datapath SoC Handle
1267  * @ring_num: REO ring number
1268  *
1269  * Return: 1 if the ring_num belongs to reo_nf_grp2,
1270  *	   0, otherwise.
1271  */
1272 static inline int
1273 dp_is_reo_ring_num_in_nf_grp2(struct dp_soc *soc, int ring_num)
1274 {
1275 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_2 & (1 << ring_num));
1276 }
1277 
1278 /**
1279  * dp_srng_get_near_full_irq_mask() - Get near-full irq mask for a particular
1280  *				ring type and number
1281  * @soc: Datapath SoC handle
1282  * @ring_type: SRNG type
1283  * @ring_num: ring num
1284  *
1285  * Return: near ful irq mask pointer
1286  */
1287 static inline
1288 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
1289 					enum hal_ring_type ring_type,
1290 					int ring_num)
1291 {
1292 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
1293 	uint8_t wbm2_sw_rx_rel_ring_id;
1294 	uint8_t *nf_irq_mask = NULL;
1295 
1296 	switch (ring_type) {
1297 	case WBM2SW_RELEASE:
1298 		wbm2_sw_rx_rel_ring_id =
1299 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
1300 		if (ring_num != wbm2_sw_rx_rel_ring_id) {
1301 			nf_irq_mask = &soc->wlan_cfg_ctx->
1302 					int_tx_ring_near_full_irq_mask[0];
1303 		}
1304 		break;
1305 	case REO_DST:
1306 		if (dp_is_reo_ring_num_in_nf_grp1(soc, ring_num))
1307 			nf_irq_mask =
1308 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_1_mask[0];
1309 		else if (dp_is_reo_ring_num_in_nf_grp2(soc, ring_num))
1310 			nf_irq_mask =
1311 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_2_mask[0];
1312 		else
1313 			qdf_assert(0);
1314 		break;
1315 	default:
1316 		break;
1317 	}
1318 
1319 	return nf_irq_mask;
1320 }
1321 
1322 /**
1323  * dp_srng_set_msi2_ring_params() - Set the msi2 addr/data in the ring params
1324  * @soc: Datapath SoC handle
1325  * @ring_params: srng params handle
1326  * @msi2_addr: MSI2 addr to be set for the SRNG
1327  * @msi2_data: MSI2 data to be set for the SRNG
1328  *
1329  * Return: None
1330  */
1331 static inline
1332 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
1333 				  struct hal_srng_params *ring_params,
1334 				  qdf_dma_addr_t msi2_addr,
1335 				  uint32_t msi2_data)
1336 {
1337 	ring_params->msi2_addr = msi2_addr;
1338 	ring_params->msi2_data = msi2_data;
1339 }
1340 
1341 /**
1342  * dp_srng_msi2_setup() - Setup MSI2 details for near full IRQ of an SRNG
1343  * @soc: Datapath SoC handle
1344  * @ring_params: ring_params for SRNG
1345  * @ring_type: SENG type
1346  * @ring_num: ring number for the SRNG
1347  * @nf_msi_grp_num: near full msi group number
1348  *
1349  * Return: None
1350  */
1351 static inline void
1352 dp_srng_msi2_setup(struct dp_soc *soc,
1353 		   struct hal_srng_params *ring_params,
1354 		   int ring_type, int ring_num, int nf_msi_grp_num)
1355 {
1356 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1357 	int msi_data_count, ret;
1358 
1359 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1360 					  &msi_data_count, &msi_data_start,
1361 					  &msi_irq_start);
1362 	if (ret)
1363 		return;
1364 
1365 	if (nf_msi_grp_num < 0) {
1366 		dp_init_info("%pK: ring near full IRQ not part of an ext_group; ring_type: %d,ring_num %d",
1367 			     soc, ring_type, ring_num);
1368 		ring_params->msi2_addr = 0;
1369 		ring_params->msi2_data = 0;
1370 		return;
1371 	}
1372 
1373 	if (dp_is_msi_group_number_invalid(nf_msi_grp_num, msi_data_count)) {
1374 		dp_init_warn("%pK: 2 msi_groups will share an msi for near full IRQ; msi_group_num %d",
1375 			     soc, nf_msi_grp_num);
1376 		QDF_ASSERT(0);
1377 	}
1378 
1379 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1380 
1381 	ring_params->nf_irq_support = 1;
1382 	ring_params->msi2_addr = addr_low;
1383 	ring_params->msi2_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1384 	ring_params->msi2_data = (nf_msi_grp_num % msi_data_count)
1385 		+ msi_data_start;
1386 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1387 }
1388 
1389 /* Percentage of ring entries considered as nearly full */
1390 #define DP_NF_HIGH_THRESH_PERCENTAGE	75
1391 /* Percentage of ring entries considered as critically full */
1392 #define DP_NF_CRIT_THRESH_PERCENTAGE	90
1393 /* Percentage of ring entries considered as safe threshold */
1394 #define DP_NF_SAFE_THRESH_PERCENTAGE	50
1395 
1396 /**
1397  * dp_srng_configure_nf_interrupt_thresholds() - Configure the thresholds for
1398  *			near full irq
1399  * @soc: Datapath SoC handle
1400  * @ring_params: ring params for SRNG
1401  * @ring_type: ring type
1402  */
1403 static inline void
1404 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
1405 					  struct hal_srng_params *ring_params,
1406 					  int ring_type)
1407 {
1408 	if (ring_params->nf_irq_support) {
1409 		ring_params->high_thresh = (ring_params->num_entries *
1410 					    DP_NF_HIGH_THRESH_PERCENTAGE) / 100;
1411 		ring_params->crit_thresh = (ring_params->num_entries *
1412 					    DP_NF_CRIT_THRESH_PERCENTAGE) / 100;
1413 		ring_params->safe_thresh = (ring_params->num_entries *
1414 					    DP_NF_SAFE_THRESH_PERCENTAGE) /100;
1415 	}
1416 }
1417 
1418 /**
1419  * dp_srng_set_nf_thresholds() - Set the near full thresholds to srng data
1420  *			structure from the ring params
1421  * @soc: Datapath SoC handle
1422  * @srng: SRNG handle
1423  * @ring_params: ring params for a SRNG
1424  *
1425  * Return: None
1426  */
1427 static inline void
1428 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
1429 			  struct hal_srng_params *ring_params)
1430 {
1431 	srng->crit_thresh = ring_params->crit_thresh;
1432 	srng->safe_thresh = ring_params->safe_thresh;
1433 }
1434 
1435 #else
1436 static inline
1437 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
1438 					enum hal_ring_type ring_type,
1439 					int ring_num)
1440 {
1441 	return NULL;
1442 }
1443 
1444 static inline
1445 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
1446 				  struct hal_srng_params *ring_params,
1447 				  qdf_dma_addr_t msi2_addr,
1448 				  uint32_t msi2_data)
1449 {
1450 }
1451 
1452 static inline void
1453 dp_srng_msi2_setup(struct dp_soc *soc,
1454 		   struct hal_srng_params *ring_params,
1455 		   int ring_type, int ring_num, int nf_msi_grp_num)
1456 {
1457 }
1458 
1459 static inline void
1460 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
1461 					  struct hal_srng_params *ring_params,
1462 					  int ring_type)
1463 {
1464 }
1465 
1466 static inline void
1467 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
1468 			  struct hal_srng_params *ring_params)
1469 {
1470 }
1471 #endif
1472 
1473 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
1474 				       enum hal_ring_type ring_type,
1475 				       int ring_num,
1476 				       int *reg_msi_grp_num,
1477 				       bool nf_irq_support,
1478 				       int *nf_msi_grp_num)
1479 {
1480 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
1481 	uint8_t *grp_mask, *nf_irq_mask = NULL;
1482 	bool nf_irq_enabled = false;
1483 	uint8_t wbm2_sw_rx_rel_ring_id;
1484 
1485 	switch (ring_type) {
1486 	case WBM2SW_RELEASE:
1487 		wbm2_sw_rx_rel_ring_id =
1488 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
1489 		if (ring_num == wbm2_sw_rx_rel_ring_id) {
1490 			/* dp_rx_wbm_err_process - soc->rx_rel_ring */
1491 			grp_mask = &cfg_ctx->int_rx_wbm_rel_ring_mask[0];
1492 			ring_num = 0;
1493 		} else { /* dp_tx_comp_handler - soc->tx_comp_ring */
1494 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1495 			nf_irq_mask = dp_srng_get_near_full_irq_mask(soc,
1496 								     ring_type,
1497 								     ring_num);
1498 			if (nf_irq_mask)
1499 				nf_irq_enabled = true;
1500 
1501 			/*
1502 			 * Using ring 4 as 4th tx completion ring since ring 3
1503 			 * is Rx error ring
1504 			 */
1505 			if (ring_num == WBM2SW_TXCOMP_RING4_NUM)
1506 				ring_num = TXCOMP_RING4_NUM;
1507 		}
1508 	break;
1509 
1510 	case REO_EXCEPTION:
1511 		/* dp_rx_err_process - &soc->reo_exception_ring */
1512 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
1513 	break;
1514 
1515 	case REO_DST:
1516 		/* dp_rx_process - soc->reo_dest_ring */
1517 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1518 		nf_irq_mask = dp_srng_get_near_full_irq_mask(soc, ring_type,
1519 							     ring_num);
1520 		if (nf_irq_mask)
1521 			nf_irq_enabled = true;
1522 	break;
1523 
1524 	case REO_STATUS:
1525 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
1526 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
1527 	break;
1528 
1529 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
1530 	case RXDMA_MONITOR_STATUS:
1531 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
1532 	case RXDMA_MONITOR_DST:
1533 		/* dp_mon_process */
1534 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
1535 	break;
1536 	case TX_MONITOR_DST:
1537 		/* dp_tx_mon_process */
1538 		grp_mask = &soc->wlan_cfg_ctx->int_tx_mon_ring_mask[0];
1539 	break;
1540 	case RXDMA_DST:
1541 		/* dp_rxdma_err_process */
1542 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
1543 	break;
1544 
1545 	case RXDMA_BUF:
1546 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1547 	break;
1548 
1549 	case RXDMA_MONITOR_BUF:
1550 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
1551 	break;
1552 
1553 	case TX_MONITOR_BUF:
1554 		grp_mask = &soc->wlan_cfg_ctx->int_host2txmon_ring_mask[0];
1555 	break;
1556 
1557 	case TCL_DATA:
1558 	/* CMD_CREDIT_RING is used as command in 8074 and credit in 9000 */
1559 	case TCL_CMD_CREDIT:
1560 	case REO_CMD:
1561 	case SW2WBM_RELEASE:
1562 	case WBM_IDLE_LINK:
1563 		/* normally empty SW_TO_HW rings */
1564 		return -QDF_STATUS_E_NOENT;
1565 	break;
1566 
1567 	case TCL_STATUS:
1568 	case REO_REINJECT:
1569 		/* misc unused rings */
1570 		return -QDF_STATUS_E_NOENT;
1571 	break;
1572 
1573 	case CE_SRC:
1574 	case CE_DST:
1575 	case CE_DST_STATUS:
1576 		/* CE_rings - currently handled by hif */
1577 	default:
1578 		return -QDF_STATUS_E_NOENT;
1579 	break;
1580 	}
1581 
1582 	*reg_msi_grp_num = dp_srng_find_ring_in_mask(ring_num, grp_mask);
1583 
1584 	if (nf_irq_support && nf_irq_enabled) {
1585 		*nf_msi_grp_num = dp_srng_find_ring_in_mask(ring_num,
1586 							    nf_irq_mask);
1587 	}
1588 
1589 	return QDF_STATUS_SUCCESS;
1590 }
1591 
1592 /*
1593  * dp_get_num_msi_available()- API to get number of MSIs available
1594  * @dp_soc: DP soc Handle
1595  * @interrupt_mode: Mode of interrupts
1596  *
1597  * Return: Number of MSIs available or 0 in case of integrated
1598  */
1599 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
1600 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1601 {
1602 	return 0;
1603 }
1604 #else
1605 /*
1606  * dp_get_num_msi_available()- API to get number of MSIs available
1607  * @dp_soc: DP soc Handle
1608  * @interrupt_mode: Mode of interrupts
1609  *
1610  * Return: Number of MSIs available or 0 in case of integrated
1611  */
1612 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1613 {
1614 	int msi_data_count;
1615 	int msi_data_start;
1616 	int msi_irq_start;
1617 	int ret;
1618 
1619 	if (interrupt_mode == DP_INTR_INTEGRATED) {
1620 		return 0;
1621 	} else if (interrupt_mode == DP_INTR_MSI || interrupt_mode ==
1622 		   DP_INTR_POLL) {
1623 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1624 						  &msi_data_count,
1625 						  &msi_data_start,
1626 						  &msi_irq_start);
1627 		if (ret) {
1628 			qdf_err("Unable to get DP MSI assignment %d",
1629 				interrupt_mode);
1630 			return -EINVAL;
1631 		}
1632 		return msi_data_count;
1633 	}
1634 	qdf_err("Interrupt mode invalid %d", interrupt_mode);
1635 	return -EINVAL;
1636 }
1637 #endif
1638 
1639 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
1640 			      *ring_params, int ring_type, int ring_num)
1641 {
1642 	int reg_msi_grp_num;
1643 	/*
1644 	 * nf_msi_grp_num needs to be initialized with negative value,
1645 	 * to avoid configuring near-full msi for WBM2SW3 ring
1646 	 */
1647 	int nf_msi_grp_num = -1;
1648 	int msi_data_count;
1649 	int ret;
1650 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1651 	bool nf_irq_support;
1652 
1653 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1654 					    &msi_data_count, &msi_data_start,
1655 					    &msi_irq_start);
1656 
1657 	if (ret)
1658 		return;
1659 
1660 	nf_irq_support = hal_srng_is_near_full_irq_supported(soc->hal_soc,
1661 							     ring_type,
1662 							     ring_num);
1663 	ret = dp_srng_calculate_msi_group(soc, ring_type, ring_num,
1664 					  &reg_msi_grp_num,
1665 					  nf_irq_support,
1666 					  &nf_msi_grp_num);
1667 	if (ret < 0) {
1668 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1669 			     soc, ring_type, ring_num);
1670 		ring_params->msi_addr = 0;
1671 		ring_params->msi_data = 0;
1672 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
1673 		return;
1674 	}
1675 
1676 	if (reg_msi_grp_num < 0) {
1677 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1678 			     soc, ring_type, ring_num);
1679 		ring_params->msi_addr = 0;
1680 		ring_params->msi_data = 0;
1681 		goto configure_msi2;
1682 	}
1683 
1684 	if (dp_is_msi_group_number_invalid(reg_msi_grp_num, msi_data_count)) {
1685 		dp_init_warn("%pK: 2 msi_groups will share an msi; msi_group_num %d",
1686 			     soc, reg_msi_grp_num);
1687 		QDF_ASSERT(0);
1688 	}
1689 
1690 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1691 
1692 	ring_params->msi_addr = addr_low;
1693 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1694 	ring_params->msi_data = (reg_msi_grp_num % msi_data_count)
1695 		+ msi_data_start;
1696 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1697 
1698 	dp_debug("ring type %u ring_num %u msi->data %u msi_addr %llx",
1699 		 ring_type, ring_num, ring_params->msi_data,
1700 		 (uint64_t)ring_params->msi_addr);
1701 
1702 configure_msi2:
1703 	if (!nf_irq_support) {
1704 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
1705 		return;
1706 	}
1707 
1708 	dp_srng_msi2_setup(soc, ring_params, ring_type, ring_num,
1709 			   nf_msi_grp_num);
1710 }
1711 
1712 #ifdef FEATURE_AST
1713 /**
1714  * dp_print_mlo_ast_stats() - Print AST stats for MLO peers
1715  *
1716  * @soc : core DP soc context
1717  *
1718  * Return: void
1719  */
1720 void dp_print_mlo_ast_stats(struct dp_soc *soc)
1721 {
1722 	if (soc->arch_ops.print_mlo_ast_stats)
1723 		soc->arch_ops.print_mlo_ast_stats(soc);
1724 }
1725 
1726 /**
1727  * dp_print_peer_ast_entries() - Dump AST entries of peer
1728  * @soc: Datapath soc handle
1729  * @peer: Datapath peer
1730  * @arg: argument to iterate function
1731  *
1732  * return void
1733  */
1734 void
1735 dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1736 {
1737 	struct dp_ast_entry *ase, *tmp_ase;
1738 	uint32_t num_entries = 0;
1739 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1740 			"NONE", "STATIC", "SELF", "WDS", "HMWDS", "BSS",
1741 			"DA", "HMWDS_SEC", "MLD"};
1742 
1743 	DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1744 	    DP_PRINT_STATS("%6d mac_addr = "QDF_MAC_ADDR_FMT
1745 		    " peer_mac_addr = "QDF_MAC_ADDR_FMT
1746 		    " peer_id = %u"
1747 		    " type = %s"
1748 		    " next_hop = %d"
1749 		    " is_active = %d"
1750 		    " ast_idx = %d"
1751 		    " ast_hash = %d"
1752 		    " delete_in_progress = %d"
1753 		    " pdev_id = %d"
1754 		    " vdev_id = %d",
1755 		    ++num_entries,
1756 		    QDF_MAC_ADDR_REF(ase->mac_addr.raw),
1757 		    QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1758 		    ase->peer_id,
1759 		    type[ase->type],
1760 		    ase->next_hop,
1761 		    ase->is_active,
1762 		    ase->ast_idx,
1763 		    ase->ast_hash_value,
1764 		    ase->delete_in_progress,
1765 		    ase->pdev_id,
1766 		    ase->vdev_id);
1767 	}
1768 }
1769 
1770 /**
1771  * dp_print_ast_stats() - Dump AST table contents
1772  * @soc: Datapath soc handle
1773  *
1774  * return void
1775  */
1776 void dp_print_ast_stats(struct dp_soc *soc)
1777 {
1778 	DP_PRINT_STATS("AST Stats:");
1779 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1780 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1781 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1782 	DP_PRINT_STATS("	Entries MAP ERR  = %d", soc->stats.ast.map_err);
1783 	DP_PRINT_STATS("	Entries Mismatch ERR  = %d",
1784 		       soc->stats.ast.ast_mismatch);
1785 
1786 	DP_PRINT_STATS("AST Table:");
1787 
1788 	qdf_spin_lock_bh(&soc->ast_lock);
1789 
1790 	dp_soc_iterate_peer(soc, dp_print_peer_ast_entries, NULL,
1791 			    DP_MOD_ID_GENERIC_STATS);
1792 
1793 	qdf_spin_unlock_bh(&soc->ast_lock);
1794 
1795 	dp_print_mlo_ast_stats(soc);
1796 }
1797 #else
1798 void dp_print_ast_stats(struct dp_soc *soc)
1799 {
1800 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1801 	return;
1802 }
1803 #endif
1804 
1805 /**
1806  * dp_print_peer_info() - Dump peer info
1807  * @soc: Datapath soc handle
1808  * @peer: Datapath peer handle
1809  * @arg: argument to iter function
1810  *
1811  * return void
1812  */
1813 static void
1814 dp_print_peer_info(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1815 {
1816 	struct dp_txrx_peer *txrx_peer = NULL;
1817 
1818 	txrx_peer = dp_get_txrx_peer(peer);
1819 	if (!txrx_peer)
1820 		return;
1821 
1822 	DP_PRINT_STATS(" peer id = %d"
1823 		       " peer_mac_addr = "QDF_MAC_ADDR_FMT
1824 		       " nawds_enabled = %d"
1825 		       " bss_peer = %d"
1826 		       " wds_enabled = %d"
1827 		       " tx_cap_enabled = %d"
1828 		       " rx_cap_enabled = %d",
1829 		       peer->peer_id,
1830 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1831 		       txrx_peer->nawds_enabled,
1832 		       txrx_peer->bss_peer,
1833 		       txrx_peer->wds_enabled,
1834 		       dp_monitor_is_tx_cap_enabled(peer),
1835 		       dp_monitor_is_rx_cap_enabled(peer));
1836 }
1837 
1838 /**
1839  * dp_print_peer_table() - Dump all Peer stats
1840  * @vdev: Datapath Vdev handle
1841  *
1842  * return void
1843  */
1844 static void dp_print_peer_table(struct dp_vdev *vdev)
1845 {
1846 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1847 	dp_vdev_iterate_peer(vdev, dp_print_peer_info, NULL,
1848 			     DP_MOD_ID_GENERIC_STATS);
1849 }
1850 
1851 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
1852 /**
1853  * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt
1854  * threshold values from the wlan_srng_cfg table for each ring type
1855  * @soc: device handle
1856  * @ring_params: per ring specific parameters
1857  * @ring_type: Ring type
1858  * @ring_num: Ring number for a given ring type
1859  *
1860  * Fill the ring params with the interrupt threshold
1861  * configuration parameters available in the per ring type wlan_srng_cfg
1862  * table.
1863  *
1864  * Return: None
1865  */
1866 static void
1867 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1868 				       struct hal_srng_params *ring_params,
1869 				       int ring_type, int ring_num,
1870 				       int num_entries)
1871 {
1872 	uint8_t wbm2_sw_rx_rel_ring_id;
1873 
1874 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
1875 
1876 	if (ring_type == REO_DST) {
1877 		ring_params->intr_timer_thres_us =
1878 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1879 		ring_params->intr_batch_cntr_thres_entries =
1880 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1881 	} else if (ring_type == WBM2SW_RELEASE &&
1882 		   (ring_num == wbm2_sw_rx_rel_ring_id)) {
1883 		ring_params->intr_timer_thres_us =
1884 				wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1885 		ring_params->intr_batch_cntr_thres_entries =
1886 				wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1887 	} else {
1888 		ring_params->intr_timer_thres_us =
1889 				soc->wlan_srng_cfg[ring_type].timer_threshold;
1890 		ring_params->intr_batch_cntr_thres_entries =
1891 				soc->wlan_srng_cfg[ring_type].batch_count_threshold;
1892 	}
1893 	ring_params->low_threshold =
1894 			soc->wlan_srng_cfg[ring_type].low_threshold;
1895 	if (ring_params->low_threshold)
1896 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1897 
1898 	dp_srng_configure_nf_interrupt_thresholds(soc, ring_params, ring_type);
1899 }
1900 #else
1901 static void
1902 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1903 				       struct hal_srng_params *ring_params,
1904 				       int ring_type, int ring_num,
1905 				       int num_entries)
1906 {
1907 	uint8_t wbm2_sw_rx_rel_ring_id;
1908 	bool rx_refill_lt_disable;
1909 
1910 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
1911 
1912 	if (ring_type == REO_DST) {
1913 		ring_params->intr_timer_thres_us =
1914 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1915 		ring_params->intr_batch_cntr_thres_entries =
1916 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1917 	} else if (ring_type == WBM2SW_RELEASE &&
1918 		   (ring_num < wbm2_sw_rx_rel_ring_id ||
1919 		   ring_num == WBM2SW_TXCOMP_RING4_NUM)) {
1920 		ring_params->intr_timer_thres_us =
1921 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1922 		ring_params->intr_batch_cntr_thres_entries =
1923 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1924 	} else if (ring_type == RXDMA_BUF) {
1925 		rx_refill_lt_disable =
1926 			wlan_cfg_get_dp_soc_rxdma_refill_lt_disable
1927 							(soc->wlan_cfg_ctx);
1928 		ring_params->intr_timer_thres_us =
1929 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1930 
1931 		if (!rx_refill_lt_disable) {
1932 			ring_params->low_threshold = num_entries >> 3;
1933 			ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1934 			ring_params->intr_batch_cntr_thres_entries = 0;
1935 		}
1936 	} else {
1937 		ring_params->intr_timer_thres_us =
1938 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1939 		ring_params->intr_batch_cntr_thres_entries =
1940 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1941 	}
1942 
1943 	/* These rings donot require interrupt to host. Make them zero */
1944 	switch (ring_type) {
1945 	case REO_REINJECT:
1946 	case REO_CMD:
1947 	case TCL_DATA:
1948 	case TCL_CMD_CREDIT:
1949 	case TCL_STATUS:
1950 	case WBM_IDLE_LINK:
1951 	case SW2WBM_RELEASE:
1952 	case PPE2TCL:
1953 	case SW2RXDMA_NEW:
1954 		ring_params->intr_timer_thres_us = 0;
1955 		ring_params->intr_batch_cntr_thres_entries = 0;
1956 		break;
1957 	}
1958 
1959 	/* Enable low threshold interrupts for rx buffer rings (regular and
1960 	 * monitor buffer rings.
1961 	 * TODO: See if this is required for any other ring
1962 	 */
1963 	if ((ring_type == RXDMA_MONITOR_BUF) ||
1964 	    (ring_type == RXDMA_MONITOR_STATUS ||
1965 	    (ring_type == TX_MONITOR_BUF))) {
1966 		/* TODO: Setting low threshold to 1/8th of ring size
1967 		 * see if this needs to be configurable
1968 		 */
1969 		ring_params->low_threshold = num_entries >> 3;
1970 		ring_params->intr_timer_thres_us =
1971 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1972 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1973 		ring_params->intr_batch_cntr_thres_entries = 0;
1974 	}
1975 
1976 	/* During initialisation monitor rings are only filled with
1977 	 * MON_BUF_MIN_ENTRIES entries. So low threshold needs to be set to
1978 	 * a value less than that. Low threshold value is reconfigured again
1979 	 * to 1/8th of the ring size when monitor vap is created.
1980 	 */
1981 	if (ring_type == RXDMA_MONITOR_BUF)
1982 		ring_params->low_threshold = MON_BUF_MIN_ENTRIES >> 1;
1983 
1984 	/* In case of PCI chipsets, we dont have PPDU end interrupts,
1985 	 * so MONITOR STATUS ring is reaped by receiving MSI from srng.
1986 	 * Keep batch threshold as 8 so that interrupt is received for
1987 	 * every 4 packets in MONITOR_STATUS ring
1988 	 */
1989 	if ((ring_type == RXDMA_MONITOR_STATUS) &&
1990 	    (soc->intr_mode == DP_INTR_MSI))
1991 		ring_params->intr_batch_cntr_thres_entries = 4;
1992 }
1993 #endif
1994 
1995 #ifdef DP_MEM_PRE_ALLOC
1996 
1997 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
1998 			   size_t ctxt_size)
1999 {
2000 	void *ctxt_mem;
2001 
2002 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_context) {
2003 		dp_warn("dp_prealloc_get_context null!");
2004 		goto dynamic_alloc;
2005 	}
2006 
2007 	ctxt_mem = soc->cdp_soc.ol_ops->dp_prealloc_get_context(ctxt_type,
2008 								ctxt_size);
2009 
2010 	if (ctxt_mem)
2011 		goto end;
2012 
2013 dynamic_alloc:
2014 	dp_info("switch to dynamic-alloc for type %d, size %zu",
2015 		ctxt_type, ctxt_size);
2016 	ctxt_mem = qdf_mem_malloc(ctxt_size);
2017 end:
2018 	return ctxt_mem;
2019 }
2020 
2021 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
2022 			 void *vaddr)
2023 {
2024 	QDF_STATUS status;
2025 
2026 	if (soc->cdp_soc.ol_ops->dp_prealloc_put_context) {
2027 		status = soc->cdp_soc.ol_ops->dp_prealloc_put_context(
2028 								ctxt_type,
2029 								vaddr);
2030 	} else {
2031 		dp_warn("dp_prealloc_put_context null!");
2032 		status = QDF_STATUS_E_NOSUPPORT;
2033 	}
2034 
2035 	if (QDF_IS_STATUS_ERROR(status)) {
2036 		dp_info("Context type %d not pre-allocated", ctxt_type);
2037 		qdf_mem_free(vaddr);
2038 	}
2039 }
2040 
2041 static inline
2042 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
2043 					   struct dp_srng *srng,
2044 					   uint32_t ring_type)
2045 {
2046 	void *mem;
2047 
2048 	qdf_assert(!srng->is_mem_prealloc);
2049 
2050 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_consistent) {
2051 		dp_warn("dp_prealloc_get_consistent is null!");
2052 		goto qdf;
2053 	}
2054 
2055 	mem =
2056 		soc->cdp_soc.ol_ops->dp_prealloc_get_consistent
2057 						(&srng->alloc_size,
2058 						 &srng->base_vaddr_unaligned,
2059 						 &srng->base_paddr_unaligned,
2060 						 &srng->base_paddr_aligned,
2061 						 DP_RING_BASE_ALIGN, ring_type);
2062 
2063 	if (mem) {
2064 		srng->is_mem_prealloc = true;
2065 		goto end;
2066 	}
2067 qdf:
2068 	mem =  qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
2069 						&srng->base_vaddr_unaligned,
2070 						&srng->base_paddr_unaligned,
2071 						&srng->base_paddr_aligned,
2072 						DP_RING_BASE_ALIGN);
2073 end:
2074 	dp_info("%s memory %pK dp_srng %pK ring_type %d alloc_size %d num_entries %d",
2075 		srng->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", mem,
2076 		srng, ring_type, srng->alloc_size, srng->num_entries);
2077 	return mem;
2078 }
2079 
2080 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
2081 					       struct dp_srng *srng)
2082 {
2083 	if (srng->is_mem_prealloc) {
2084 		if (!soc->cdp_soc.ol_ops->dp_prealloc_put_consistent) {
2085 			dp_warn("dp_prealloc_put_consistent is null!");
2086 			QDF_BUG(0);
2087 			return;
2088 		}
2089 		soc->cdp_soc.ol_ops->dp_prealloc_put_consistent
2090 						(srng->alloc_size,
2091 						 srng->base_vaddr_unaligned,
2092 						 srng->base_paddr_unaligned);
2093 
2094 	} else {
2095 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2096 					srng->alloc_size,
2097 					srng->base_vaddr_unaligned,
2098 					srng->base_paddr_unaligned, 0);
2099 	}
2100 }
2101 
2102 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
2103 				   enum dp_desc_type desc_type,
2104 				   struct qdf_mem_multi_page_t *pages,
2105 				   size_t element_size,
2106 				   uint32_t element_num,
2107 				   qdf_dma_context_t memctxt,
2108 				   bool cacheable)
2109 {
2110 	if (!soc->cdp_soc.ol_ops->dp_get_multi_pages) {
2111 		dp_warn("dp_get_multi_pages is null!");
2112 		goto qdf;
2113 	}
2114 
2115 	pages->num_pages = 0;
2116 	pages->is_mem_prealloc = 0;
2117 	soc->cdp_soc.ol_ops->dp_get_multi_pages(desc_type,
2118 						element_size,
2119 						element_num,
2120 						pages,
2121 						cacheable);
2122 	if (pages->num_pages)
2123 		goto end;
2124 
2125 qdf:
2126 	qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size,
2127 				  element_num, memctxt, cacheable);
2128 end:
2129 	dp_info("%s desc_type %d element_size %d element_num %d cacheable %d",
2130 		pages->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc",
2131 		desc_type, (int)element_size, element_num, cacheable);
2132 }
2133 
2134 void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
2135 				  enum dp_desc_type desc_type,
2136 				  struct qdf_mem_multi_page_t *pages,
2137 				  qdf_dma_context_t memctxt,
2138 				  bool cacheable)
2139 {
2140 	if (pages->is_mem_prealloc) {
2141 		if (!soc->cdp_soc.ol_ops->dp_put_multi_pages) {
2142 			dp_warn("dp_put_multi_pages is null!");
2143 			QDF_BUG(0);
2144 			return;
2145 		}
2146 
2147 		soc->cdp_soc.ol_ops->dp_put_multi_pages(desc_type, pages);
2148 		qdf_mem_zero(pages, sizeof(*pages));
2149 	} else {
2150 		qdf_mem_multi_pages_free(soc->osdev, pages,
2151 					 memctxt, cacheable);
2152 	}
2153 }
2154 
2155 #else
2156 
2157 static inline
2158 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
2159 					   struct dp_srng *srng,
2160 					   uint32_t ring_type)
2161 
2162 {
2163 	void *mem;
2164 
2165 	mem = qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
2166 					       &srng->base_vaddr_unaligned,
2167 					       &srng->base_paddr_unaligned,
2168 					       &srng->base_paddr_aligned,
2169 					       DP_RING_BASE_ALIGN);
2170 	if (mem)
2171 		qdf_mem_set(srng->base_vaddr_unaligned, 0, srng->alloc_size);
2172 
2173 	return mem;
2174 }
2175 
2176 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
2177 					       struct dp_srng *srng)
2178 {
2179 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2180 				srng->alloc_size,
2181 				srng->base_vaddr_unaligned,
2182 				srng->base_paddr_unaligned, 0);
2183 }
2184 
2185 #endif /* DP_MEM_PRE_ALLOC */
2186 
2187 #ifdef QCA_SUPPORT_WDS_EXTENDED
2188 static bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev)
2189 {
2190 	return vdev->wds_ext_enabled;
2191 }
2192 #else
2193 static bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev)
2194 {
2195 	return false;
2196 }
2197 #endif
2198 
2199 void dp_pdev_update_fast_rx_flag(struct dp_soc *soc, struct dp_pdev *pdev)
2200 {
2201 	struct dp_vdev *vdev = NULL;
2202 	uint8_t rx_fast_flag = true;
2203 
2204 	if (wlan_cfg_is_rx_flow_tag_enabled(soc->wlan_cfg_ctx)) {
2205 		rx_fast_flag = false;
2206 		goto update_flag;
2207 	}
2208 
2209 	/* Check if protocol tagging enable */
2210 	if (pdev->is_rx_protocol_tagging_enabled) {
2211 		rx_fast_flag = false;
2212 		goto update_flag;
2213 	}
2214 
2215 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
2216 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2217 		/* Check if any VDEV has NAWDS enabled */
2218 		if (vdev->nawds_enabled) {
2219 			rx_fast_flag = false;
2220 			break;
2221 		}
2222 
2223 		/* Check if any VDEV has multipass enabled */
2224 		if (vdev->multipass_en) {
2225 			rx_fast_flag = false;
2226 			break;
2227 		}
2228 
2229 		/* Check if any VDEV has mesh enabled */
2230 		if (vdev->mesh_vdev) {
2231 			rx_fast_flag = false;
2232 			break;
2233 		}
2234 
2235 		/* Check if any VDEV has WDS ext enabled */
2236 		if (dp_vdev_is_wds_ext_enabled(vdev)) {
2237 			rx_fast_flag = false;
2238 			break;
2239 		}
2240 	}
2241 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
2242 
2243 update_flag:
2244 	dp_init_info("Updated Rx fast flag to %u", rx_fast_flag);
2245 	pdev->rx_fast_flag = rx_fast_flag;
2246 }
2247 
2248 /*
2249  * dp_srng_free() - Free SRNG memory
2250  * @soc  : Data path soc handle
2251  * @srng : SRNG pointer
2252  *
2253  * return: None
2254  */
2255 void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng)
2256 {
2257 	if (srng->alloc_size && srng->base_vaddr_unaligned) {
2258 		if (!srng->cached) {
2259 			dp_srng_mem_free_consistent(soc, srng);
2260 		} else {
2261 			qdf_mem_free(srng->base_vaddr_unaligned);
2262 		}
2263 		srng->alloc_size = 0;
2264 		srng->base_vaddr_unaligned = NULL;
2265 	}
2266 	srng->hal_srng = NULL;
2267 }
2268 
2269 qdf_export_symbol(dp_srng_free);
2270 
2271 #ifdef DISABLE_MON_RING_MSI_CFG
2272 /*
2273  * dp_skip_msi_cfg() - Check if msi cfg has to be skipped for ring_type
2274  * @ring_type: sring type
2275  *
2276  * Return: True if msi cfg should be skipped for srng type else false
2277  */
2278 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2279 {
2280 	if (ring_type == RXDMA_MONITOR_STATUS)
2281 		return true;
2282 
2283 	return false;
2284 }
2285 #else
2286 #ifdef DP_CON_MON_MSI_ENABLED
2287 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2288 {
2289 	if (soc->cdp_soc.ol_ops->get_con_mode &&
2290 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE) {
2291 		if (ring_type == REO_DST || ring_type == RXDMA_DST)
2292 			return true;
2293 	} else if (ring_type == RXDMA_MONITOR_STATUS) {
2294 		return true;
2295 	}
2296 
2297 	return false;
2298 }
2299 #else
2300 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2301 {
2302 	return false;
2303 }
2304 #endif /* DP_CON_MON_MSI_ENABLED */
2305 #endif /* DISABLE_MON_RING_MSI_CFG */
2306 
2307 #ifdef DP_UMAC_HW_RESET_SUPPORT
2308 static bool dp_check_umac_reset_in_progress(struct dp_soc *soc)
2309 {
2310 	return !!soc->umac_reset_ctx.intr_ctx_bkp;
2311 }
2312 #else
2313 static bool dp_check_umac_reset_in_progress(struct dp_soc *soc)
2314 {
2315 	return false;
2316 }
2317 #endif
2318 
2319 /*
2320  * dp_srng_init() - Initialize SRNG
2321  * @soc  : Data path soc handle
2322  * @srng : SRNG pointer
2323  * @ring_type : Ring Type
2324  * @ring_num: Ring number
2325  * @mac_id: mac_id
2326  *
2327  * return: QDF_STATUS
2328  */
2329 QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
2330 			int ring_type, int ring_num, int mac_id)
2331 {
2332 	bool idle_check;
2333 
2334 	hal_soc_handle_t hal_soc = soc->hal_soc;
2335 	struct hal_srng_params ring_params;
2336 
2337 	if (srng->hal_srng) {
2338 		dp_init_err("%pK: Ring type: %d, num:%d is already initialized",
2339 			    soc, ring_type, ring_num);
2340 		return QDF_STATUS_SUCCESS;
2341 	}
2342 
2343 	/* memset the srng ring to zero */
2344 	qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size);
2345 
2346 	qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params));
2347 	ring_params.ring_base_paddr = srng->base_paddr_aligned;
2348 	ring_params.ring_base_vaddr = srng->base_vaddr_aligned;
2349 
2350 	ring_params.num_entries = srng->num_entries;
2351 
2352 	dp_info("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
2353 		ring_type, ring_num,
2354 		(void *)ring_params.ring_base_vaddr,
2355 		(void *)ring_params.ring_base_paddr,
2356 		ring_params.num_entries);
2357 
2358 	if (soc->intr_mode == DP_INTR_MSI && !dp_skip_msi_cfg(soc, ring_type)) {
2359 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
2360 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
2361 				 ring_type, ring_num);
2362 	} else {
2363 		ring_params.msi_data = 0;
2364 		ring_params.msi_addr = 0;
2365 		dp_srng_set_msi2_ring_params(soc, &ring_params, 0, 0);
2366 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
2367 				 ring_type, ring_num);
2368 	}
2369 
2370 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
2371 					       ring_type, ring_num,
2372 					       srng->num_entries);
2373 
2374 	dp_srng_set_nf_thresholds(soc, srng, &ring_params);
2375 
2376 	if (srng->cached)
2377 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
2378 
2379 	idle_check = dp_check_umac_reset_in_progress(soc);
2380 
2381 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
2382 					mac_id, &ring_params, idle_check);
2383 
2384 	if (!srng->hal_srng) {
2385 		dp_srng_free(soc, srng);
2386 		return QDF_STATUS_E_FAILURE;
2387 	}
2388 
2389 	return QDF_STATUS_SUCCESS;
2390 }
2391 
2392 qdf_export_symbol(dp_srng_init);
2393 
2394 /*
2395  * dp_srng_alloc() - Allocate memory for SRNG
2396  * @soc  : Data path soc handle
2397  * @srng : SRNG pointer
2398  * @ring_type : Ring Type
2399  * @num_entries: Number of entries
2400  * @cached: cached flag variable
2401  *
2402  * return: QDF_STATUS
2403  */
2404 QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
2405 			 int ring_type, uint32_t num_entries,
2406 			 bool cached)
2407 {
2408 	hal_soc_handle_t hal_soc = soc->hal_soc;
2409 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
2410 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
2411 
2412 	if (srng->base_vaddr_unaligned) {
2413 		dp_init_err("%pK: Ring type: %d, is already allocated",
2414 			    soc, ring_type);
2415 		return QDF_STATUS_SUCCESS;
2416 	}
2417 
2418 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
2419 	srng->hal_srng = NULL;
2420 	srng->alloc_size = num_entries * entry_size;
2421 	srng->num_entries = num_entries;
2422 	srng->cached = cached;
2423 
2424 	if (!cached) {
2425 		srng->base_vaddr_aligned =
2426 		    dp_srng_aligned_mem_alloc_consistent(soc,
2427 							 srng,
2428 							 ring_type);
2429 	} else {
2430 		srng->base_vaddr_aligned = qdf_aligned_malloc(
2431 					&srng->alloc_size,
2432 					&srng->base_vaddr_unaligned,
2433 					&srng->base_paddr_unaligned,
2434 					&srng->base_paddr_aligned,
2435 					DP_RING_BASE_ALIGN);
2436 	}
2437 
2438 	if (!srng->base_vaddr_aligned)
2439 		return QDF_STATUS_E_NOMEM;
2440 
2441 	return QDF_STATUS_SUCCESS;
2442 }
2443 
2444 qdf_export_symbol(dp_srng_alloc);
2445 
2446 /*
2447  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
2448  * @soc: DP SOC handle
2449  * @srng: source ring structure
2450  * @ring_type: type of ring
2451  * @ring_num: ring number
2452  *
2453  * Return: None
2454  */
2455 void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
2456 		    int ring_type, int ring_num)
2457 {
2458 	if (!srng->hal_srng) {
2459 		dp_init_err("%pK: Ring type: %d, num:%d not setup",
2460 			    soc, ring_type, ring_num);
2461 		return;
2462 	}
2463 
2464 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
2465 	srng->hal_srng = NULL;
2466 }
2467 
2468 qdf_export_symbol(dp_srng_deinit);
2469 
2470 /* TODO: Need this interface from HIF */
2471 void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle);
2472 
2473 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2474 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2475 			 hal_ring_handle_t hal_ring_hdl)
2476 {
2477 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2478 	uint32_t hp, tp;
2479 	uint8_t ring_id;
2480 
2481 	if (!int_ctx)
2482 		return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2483 
2484 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2485 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2486 
2487 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2488 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START);
2489 
2490 	return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2491 }
2492 
2493 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2494 			hal_ring_handle_t hal_ring_hdl)
2495 {
2496 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2497 	uint32_t hp, tp;
2498 	uint8_t ring_id;
2499 
2500 	if (!int_ctx)
2501 		return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2502 
2503 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2504 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2505 
2506 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2507 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END);
2508 
2509 	return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2510 }
2511 
2512 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2513 					      uint8_t hist_group_id)
2514 {
2515 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2516 			 0, 0, 0, HIF_EVENT_TIMER_ENTRY);
2517 }
2518 
2519 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2520 					     uint8_t hist_group_id)
2521 {
2522 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2523 			 0, 0, 0, HIF_EVENT_TIMER_EXIT);
2524 }
2525 #else
2526 
2527 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2528 					      uint8_t hist_group_id)
2529 {
2530 }
2531 
2532 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2533 					     uint8_t hist_group_id)
2534 {
2535 }
2536 
2537 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
2538 
2539 /*
2540  * dp_should_timer_irq_yield() - Decide if the bottom half should yield
2541  * @soc: DP soc handle
2542  * @work_done: work done in softirq context
2543  * @start_time: start time for the softirq
2544  *
2545  * Return: enum with yield code
2546  */
2547 enum timer_yield_status
2548 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
2549 			  uint64_t start_time)
2550 {
2551 	uint64_t cur_time = qdf_get_log_timestamp();
2552 
2553 	if (!work_done)
2554 		return DP_TIMER_WORK_DONE;
2555 
2556 	if (cur_time - start_time > DP_MAX_TIMER_EXEC_TIME_TICKS)
2557 		return DP_TIMER_TIME_EXHAUST;
2558 
2559 	return DP_TIMER_NO_YIELD;
2560 }
2561 
2562 qdf_export_symbol(dp_should_timer_irq_yield);
2563 
2564 static int dp_process_rxdma_dst_ring(struct dp_soc *soc,
2565 				     struct dp_intr *int_ctx,
2566 				     int mac_for_pdev,
2567 				     int total_budget)
2568 {
2569 	return dp_rxdma_err_process(int_ctx, soc, mac_for_pdev,
2570 				    total_budget);
2571 }
2572 
2573 /**
2574  * dp_process_lmac_rings() - Process LMAC rings
2575  * @int_ctx: interrupt context
2576  * @total_budget: budget of work which can be done
2577  *
2578  * Return: work done
2579  */
2580 static int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget)
2581 {
2582 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2583 	struct dp_soc *soc = int_ctx->soc;
2584 	uint32_t remaining_quota = total_budget;
2585 	struct dp_pdev *pdev = NULL;
2586 	uint32_t work_done  = 0;
2587 	int budget = total_budget;
2588 	int ring = 0;
2589 
2590 	/* Process LMAC interrupts */
2591 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
2592 		int mac_for_pdev = ring;
2593 
2594 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
2595 		if (!pdev)
2596 			continue;
2597 		if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
2598 			work_done = dp_monitor_process(soc, int_ctx,
2599 						       mac_for_pdev,
2600 						       remaining_quota);
2601 			if (work_done)
2602 				intr_stats->num_rx_mon_ring_masks++;
2603 			budget -= work_done;
2604 			if (budget <= 0)
2605 				goto budget_done;
2606 			remaining_quota = budget;
2607 		}
2608 
2609 		if (int_ctx->tx_mon_ring_mask & (1 << mac_for_pdev)) {
2610 			work_done = dp_tx_mon_process(soc, int_ctx,
2611 						      mac_for_pdev,
2612 						      remaining_quota);
2613 			if (work_done)
2614 				intr_stats->num_tx_mon_ring_masks++;
2615 			budget -= work_done;
2616 			if (budget <= 0)
2617 				goto budget_done;
2618 			remaining_quota = budget;
2619 		}
2620 
2621 		if (int_ctx->rxdma2host_ring_mask &
2622 				(1 << mac_for_pdev)) {
2623 			work_done = dp_process_rxdma_dst_ring(soc, int_ctx,
2624 							      mac_for_pdev,
2625 							      remaining_quota);
2626 			if (work_done)
2627 				intr_stats->num_rxdma2host_ring_masks++;
2628 			budget -=  work_done;
2629 			if (budget <= 0)
2630 				goto budget_done;
2631 			remaining_quota = budget;
2632 		}
2633 
2634 		if (int_ctx->host2rxdma_ring_mask & (1 << mac_for_pdev)) {
2635 			union dp_rx_desc_list_elem_t *desc_list = NULL;
2636 			union dp_rx_desc_list_elem_t *tail = NULL;
2637 			struct dp_srng *rx_refill_buf_ring;
2638 			struct rx_desc_pool *rx_desc_pool;
2639 
2640 			rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2641 			if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
2642 				rx_refill_buf_ring =
2643 					&soc->rx_refill_buf_ring[mac_for_pdev];
2644 			else
2645 				rx_refill_buf_ring =
2646 					&soc->rx_refill_buf_ring[pdev->lmac_id];
2647 
2648 			intr_stats->num_host2rxdma_ring_masks++;
2649 			dp_rx_buffers_lt_replenish_simple(soc, mac_for_pdev,
2650 							  rx_refill_buf_ring,
2651 							  rx_desc_pool,
2652 							  0,
2653 							  &desc_list,
2654 							  &tail);
2655 		}
2656 
2657 	}
2658 
2659 	if (int_ctx->host2rxdma_mon_ring_mask)
2660 		dp_rx_mon_buf_refill(int_ctx);
2661 
2662 	if (int_ctx->host2txmon_ring_mask)
2663 		dp_tx_mon_buf_refill(int_ctx);
2664 
2665 budget_done:
2666 	return total_budget - budget;
2667 }
2668 
2669 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
2670 /**
2671  * dp_service_near_full_srngs() - Bottom half handler to process the near
2672  *				full IRQ on a SRNG
2673  * @dp_ctx: Datapath SoC handle
2674  * @dp_budget: Number of SRNGs which can be processed in a single attempt
2675  *		without rescheduling
2676  * @cpu: cpu id
2677  *
2678  * Return: remaining budget/quota for the soc device
2679  */
2680 static
2681 uint32_t dp_service_near_full_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
2682 {
2683 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2684 	struct dp_soc *soc = int_ctx->soc;
2685 
2686 	/*
2687 	 * dp_service_near_full_srngs arch ops should be initialized always
2688 	 * if the NEAR FULL IRQ feature is enabled.
2689 	 */
2690 	return soc->arch_ops.dp_service_near_full_srngs(soc, int_ctx,
2691 							dp_budget);
2692 }
2693 #endif
2694 
2695 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2696 
2697 /*
2698  * dp_srng_get_cpu() - Get the smp processor id for srng processing
2699  *
2700  * Return: smp processor id
2701  */
2702 static inline int dp_srng_get_cpu(void)
2703 {
2704 	return smp_processor_id();
2705 }
2706 
2707 /*
2708  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
2709  * @dp_ctx: DP SOC handle
2710  * @budget: Number of frames/descriptors that can be processed in one shot
2711  * @cpu: CPU on which this instance is running
2712  *
2713  * Return: remaining budget/quota for the soc device
2714  */
2715 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
2716 {
2717 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2718 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2719 	struct dp_soc *soc = int_ctx->soc;
2720 	int ring = 0;
2721 	int index;
2722 	uint32_t work_done  = 0;
2723 	int budget = dp_budget;
2724 	uint8_t tx_mask = int_ctx->tx_ring_mask;
2725 	uint8_t rx_mask = int_ctx->rx_ring_mask;
2726 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
2727 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
2728 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2729 	uint32_t remaining_quota = dp_budget;
2730 
2731 	qdf_atomic_set_bit(cpu, &soc->service_rings_running);
2732 
2733 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n",
2734 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
2735 			 reo_status_mask,
2736 			 int_ctx->rx_mon_ring_mask,
2737 			 int_ctx->host2rxdma_ring_mask,
2738 			 int_ctx->rxdma2host_ring_mask);
2739 
2740 	/* Process Tx completion interrupts first to return back buffers */
2741 	for (index = 0; index < soc->num_tx_comp_rings; index++) {
2742 		if (!(1 << wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) & tx_mask))
2743 			continue;
2744 		work_done = dp_tx_comp_handler(int_ctx,
2745 					       soc,
2746 					       soc->tx_comp_ring[index].hal_srng,
2747 					       index, remaining_quota);
2748 		if (work_done) {
2749 			intr_stats->num_tx_ring_masks[index]++;
2750 			dp_verbose_debug("tx mask 0x%x index %d, budget %d, work_done %d",
2751 					 tx_mask, index, budget,
2752 					 work_done);
2753 		}
2754 		budget -= work_done;
2755 		if (budget <= 0)
2756 			goto budget_done;
2757 
2758 		remaining_quota = budget;
2759 	}
2760 
2761 	/* Process REO Exception ring interrupt */
2762 	if (rx_err_mask) {
2763 		work_done = dp_rx_err_process(int_ctx, soc,
2764 					      soc->reo_exception_ring.hal_srng,
2765 					      remaining_quota);
2766 
2767 		if (work_done) {
2768 			intr_stats->num_rx_err_ring_masks++;
2769 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
2770 					 work_done, budget);
2771 		}
2772 
2773 		budget -=  work_done;
2774 		if (budget <= 0) {
2775 			goto budget_done;
2776 		}
2777 		remaining_quota = budget;
2778 	}
2779 
2780 	/* Process Rx WBM release ring interrupt */
2781 	if (rx_wbm_rel_mask) {
2782 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
2783 						  soc->rx_rel_ring.hal_srng,
2784 						  remaining_quota);
2785 
2786 		if (work_done) {
2787 			intr_stats->num_rx_wbm_rel_ring_masks++;
2788 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
2789 					 work_done, budget);
2790 		}
2791 
2792 		budget -=  work_done;
2793 		if (budget <= 0) {
2794 			goto budget_done;
2795 		}
2796 		remaining_quota = budget;
2797 	}
2798 
2799 	/* Process Rx interrupts */
2800 	if (rx_mask) {
2801 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
2802 			if (!(rx_mask & (1 << ring)))
2803 				continue;
2804 			work_done = soc->arch_ops.dp_rx_process(int_ctx,
2805 						  soc->reo_dest_ring[ring].hal_srng,
2806 						  ring,
2807 						  remaining_quota);
2808 			if (work_done) {
2809 				intr_stats->num_rx_ring_masks[ring]++;
2810 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
2811 						 rx_mask, ring,
2812 						 work_done, budget);
2813 				budget -=  work_done;
2814 				if (budget <= 0)
2815 					goto budget_done;
2816 				remaining_quota = budget;
2817 			}
2818 		}
2819 	}
2820 
2821 	if (reo_status_mask) {
2822 		if (dp_reo_status_ring_handler(int_ctx, soc))
2823 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2824 	}
2825 
2826 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
2827 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2828 		if (work_done) {
2829 			budget -=  work_done;
2830 			if (budget <= 0)
2831 				goto budget_done;
2832 			remaining_quota = budget;
2833 		}
2834 	}
2835 
2836 	qdf_lro_flush(int_ctx->lro_ctx);
2837 	intr_stats->num_masks++;
2838 
2839 budget_done:
2840 	qdf_atomic_clear_bit(cpu, &soc->service_rings_running);
2841 
2842 	if (soc->notify_fw_callback)
2843 		soc->notify_fw_callback(soc);
2844 
2845 	return dp_budget - budget;
2846 }
2847 
2848 #else /* QCA_HOST_MODE_WIFI_DISABLED */
2849 
2850 /*
2851  * dp_srng_get_cpu() - Get the smp processor id for srng processing
2852  *
2853  * Return: smp processor id
2854  */
2855 static inline int dp_srng_get_cpu(void)
2856 {
2857 	return 0;
2858 }
2859 
2860 /*
2861  * dp_service_srngs() - Top level handler for DP Monitor Ring interrupts
2862  * @dp_ctx: DP SOC handle
2863  * @budget: Number of frames/descriptors that can be processed in one shot
2864  *
2865  * Return: remaining budget/quota for the soc device
2866  */
2867 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
2868 {
2869 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2870 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2871 	struct dp_soc *soc = int_ctx->soc;
2872 	uint32_t remaining_quota = dp_budget;
2873 	uint32_t work_done  = 0;
2874 	int budget = dp_budget;
2875 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2876 
2877 	if (reo_status_mask) {
2878 		if (dp_reo_status_ring_handler(int_ctx, soc))
2879 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2880 	}
2881 
2882 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
2883 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2884 		if (work_done) {
2885 			budget -=  work_done;
2886 			if (budget <= 0)
2887 				goto budget_done;
2888 			remaining_quota = budget;
2889 		}
2890 	}
2891 
2892 	qdf_lro_flush(int_ctx->lro_ctx);
2893 	intr_stats->num_masks++;
2894 
2895 budget_done:
2896 	return dp_budget - budget;
2897 }
2898 
2899 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2900 
2901 /* dp_interrupt_timer()- timer poll for interrupts
2902  *
2903  * @arg: SoC Handle
2904  *
2905  * Return:
2906  *
2907  */
2908 static void dp_interrupt_timer(void *arg)
2909 {
2910 	struct dp_soc *soc = (struct dp_soc *) arg;
2911 	struct dp_pdev *pdev = soc->pdev_list[0];
2912 	enum timer_yield_status yield = DP_TIMER_NO_YIELD;
2913 	uint32_t work_done  = 0, total_work_done = 0;
2914 	int budget = 0xffff, i;
2915 	uint32_t remaining_quota = budget;
2916 	uint64_t start_time;
2917 	uint32_t lmac_id = DP_MON_INVALID_LMAC_ID;
2918 	uint8_t dp_intr_id = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
2919 	uint32_t lmac_iter;
2920 	int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
2921 	enum reg_wifi_band mon_band;
2922 	int cpu = dp_srng_get_cpu();
2923 
2924 	/*
2925 	 * this logic makes all data path interfacing rings (UMAC/LMAC)
2926 	 * and Monitor rings polling mode when NSS offload is disabled
2927 	 */
2928 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx) &&
2929 	    !wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2930 		if (qdf_atomic_read(&soc->cmn_init_done)) {
2931 			for (i = 0; i < wlan_cfg_get_num_contexts(
2932 						soc->wlan_cfg_ctx); i++)
2933 				dp_service_srngs(&soc->intr_ctx[i], 0xffff,
2934 						 cpu);
2935 
2936 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
2937 		}
2938 		return;
2939 	}
2940 
2941 	if (!qdf_atomic_read(&soc->cmn_init_done))
2942 		return;
2943 
2944 	if (dp_monitor_is_chan_band_known(pdev)) {
2945 		mon_band = dp_monitor_get_chan_band(pdev);
2946 		lmac_id = pdev->ch_band_lmac_id_mapping[mon_band];
2947 		if (qdf_likely(lmac_id != DP_MON_INVALID_LMAC_ID)) {
2948 			dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id];
2949 			dp_srng_record_timer_entry(soc, dp_intr_id);
2950 		}
2951 	}
2952 
2953 	start_time = qdf_get_log_timestamp();
2954 	dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
2955 
2956 	while (yield == DP_TIMER_NO_YIELD) {
2957 		for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) {
2958 			if (lmac_iter == lmac_id)
2959 				work_done = dp_monitor_process(soc,
2960 						&soc->intr_ctx[dp_intr_id],
2961 						lmac_iter, remaining_quota);
2962 			else
2963 				work_done =
2964 					dp_monitor_drop_packets_for_mac(pdev,
2965 							     lmac_iter,
2966 							     remaining_quota);
2967 			if (work_done) {
2968 				budget -=  work_done;
2969 				if (budget <= 0) {
2970 					yield = DP_TIMER_WORK_EXHAUST;
2971 					goto budget_done;
2972 				}
2973 				remaining_quota = budget;
2974 				total_work_done += work_done;
2975 			}
2976 		}
2977 
2978 		yield = dp_should_timer_irq_yield(soc, total_work_done,
2979 						  start_time);
2980 		total_work_done = 0;
2981 	}
2982 
2983 budget_done:
2984 	if (yield == DP_TIMER_WORK_EXHAUST ||
2985 	    yield == DP_TIMER_TIME_EXHAUST)
2986 		qdf_timer_mod(&soc->int_timer, 1);
2987 	else
2988 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
2989 
2990 	if (lmac_id != DP_MON_INVALID_LMAC_ID)
2991 		dp_srng_record_timer_exit(soc, dp_intr_id);
2992 }
2993 
2994 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2995 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
2996 					struct dp_intr *intr_ctx)
2997 {
2998 	if (intr_ctx->rx_mon_ring_mask)
2999 		return true;
3000 
3001 	return false;
3002 }
3003 #else
3004 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
3005 					struct dp_intr *intr_ctx)
3006 {
3007 	return false;
3008 }
3009 #endif
3010 
3011 /*
3012  * dp_soc_attach_poll() - Register handlers for DP interrupts
3013  * @txrx_soc: DP SOC handle
3014  *
3015  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
3016  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
3017  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
3018  *
3019  * Return: 0 for success, nonzero for failure.
3020  */
3021 static QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
3022 {
3023 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3024 	int i;
3025 	int lmac_id = 0;
3026 
3027 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3028 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
3029 	soc->intr_mode = DP_INTR_POLL;
3030 
3031 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3032 		soc->intr_ctx[i].dp_intr_id = i;
3033 		soc->intr_ctx[i].tx_ring_mask =
3034 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
3035 		soc->intr_ctx[i].rx_ring_mask =
3036 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
3037 		soc->intr_ctx[i].rx_mon_ring_mask =
3038 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
3039 		soc->intr_ctx[i].rx_err_ring_mask =
3040 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
3041 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
3042 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
3043 		soc->intr_ctx[i].reo_status_ring_mask =
3044 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
3045 		soc->intr_ctx[i].rxdma2host_ring_mask =
3046 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
3047 		soc->intr_ctx[i].soc = soc;
3048 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
3049 
3050 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
3051 			hif_event_history_init(soc->hif_handle, i);
3052 			soc->mon_intr_id_lmac_map[lmac_id] = i;
3053 			lmac_id++;
3054 		}
3055 	}
3056 
3057 	qdf_timer_init(soc->osdev, &soc->int_timer,
3058 			dp_interrupt_timer, (void *)soc,
3059 			QDF_TIMER_TYPE_WAKE_APPS);
3060 
3061 	return QDF_STATUS_SUCCESS;
3062 }
3063 
3064 /**
3065  * dp_soc_set_interrupt_mode() - Set the interrupt mode in soc
3066  * soc: DP soc handle
3067  *
3068  * Set the appropriate interrupt mode flag in the soc
3069  */
3070 static void dp_soc_set_interrupt_mode(struct dp_soc *soc)
3071 {
3072 	uint32_t msi_base_data, msi_vector_start;
3073 	int msi_vector_count, ret;
3074 
3075 	soc->intr_mode = DP_INTR_INTEGRATED;
3076 
3077 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
3078 	    (dp_is_monitor_mode_using_poll(soc) &&
3079 	     soc->cdp_soc.ol_ops->get_con_mode &&
3080 	     soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) {
3081 		soc->intr_mode = DP_INTR_POLL;
3082 	} else {
3083 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
3084 						  &msi_vector_count,
3085 						  &msi_base_data,
3086 						  &msi_vector_start);
3087 		if (ret)
3088 			return;
3089 
3090 		soc->intr_mode = DP_INTR_MSI;
3091 	}
3092 }
3093 
3094 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc);
3095 #if defined(DP_INTR_POLL_BOTH)
3096 /*
3097  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
3098  * @txrx_soc: DP SOC handle
3099  *
3100  * Call the appropriate attach function based on the mode of operation.
3101  * This is a WAR for enabling monitor mode.
3102  *
3103  * Return: 0 for success. nonzero for failure.
3104  */
3105 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
3106 {
3107 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3108 
3109 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
3110 	    (dp_is_monitor_mode_using_poll(soc) &&
3111 	     soc->cdp_soc.ol_ops->get_con_mode &&
3112 	     soc->cdp_soc.ol_ops->get_con_mode() ==
3113 	     QDF_GLOBAL_MONITOR_MODE)) {
3114 		dp_info("Poll mode");
3115 		return dp_soc_attach_poll(txrx_soc);
3116 	} else {
3117 		dp_info("Interrupt  mode");
3118 		return dp_soc_interrupt_attach(txrx_soc);
3119 	}
3120 }
3121 #else
3122 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
3123 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
3124 {
3125 	return dp_soc_attach_poll(txrx_soc);
3126 }
3127 #else
3128 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
3129 {
3130 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3131 
3132 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx))
3133 		return dp_soc_attach_poll(txrx_soc);
3134 	else
3135 		return dp_soc_interrupt_attach(txrx_soc);
3136 }
3137 #endif
3138 #endif
3139 
3140 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
3141 /**
3142  * dp_soc_interrupt_map_calculate_wifi3_pci_legacy()
3143  * Calculate interrupt map for legacy interrupts
3144  * @soc: DP soc handle
3145  * @intr_ctx_num: Interrupt context number
3146  * @irq_id_map: IRQ map
3147  * num_irq_r: Number of interrupts assigned for this context
3148  *
3149  * Return: void
3150  */
3151 static void dp_soc_interrupt_map_calculate_wifi3_pci_legacy(struct dp_soc *soc,
3152 							    int intr_ctx_num,
3153 							    int *irq_id_map,
3154 							    int *num_irq_r)
3155 {
3156 	int j;
3157 	int num_irq = 0;
3158 	int tx_mask = wlan_cfg_get_tx_ring_mask(
3159 					soc->wlan_cfg_ctx, intr_ctx_num);
3160 	int rx_mask = wlan_cfg_get_rx_ring_mask(
3161 					soc->wlan_cfg_ctx, intr_ctx_num);
3162 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
3163 					soc->wlan_cfg_ctx, intr_ctx_num);
3164 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3165 					soc->wlan_cfg_ctx, intr_ctx_num);
3166 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3167 					soc->wlan_cfg_ctx, intr_ctx_num);
3168 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3169 					soc->wlan_cfg_ctx, intr_ctx_num);
3170 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3171 					soc->wlan_cfg_ctx, intr_ctx_num);
3172 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3173 					soc->wlan_cfg_ctx, intr_ctx_num);
3174 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3175 					soc->wlan_cfg_ctx, intr_ctx_num);
3176 	soc->intr_mode = DP_INTR_LEGACY_VIRTUAL_IRQ;
3177 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
3178 		if (tx_mask & (1 << j))
3179 			irq_id_map[num_irq++] = (wbm2sw0_release - j);
3180 		if (rx_mask & (1 << j))
3181 			irq_id_map[num_irq++] = (reo2sw1_intr - j);
3182 		if (rx_mon_mask & (1 << j))
3183 			irq_id_map[num_irq++] = (rxmon2sw_p0_dest0 - j);
3184 		if (rx_err_ring_mask & (1 << j))
3185 			irq_id_map[num_irq++] = (reo2sw0_intr - j);
3186 		if (rx_wbm_rel_ring_mask & (1 << j))
3187 			irq_id_map[num_irq++] = (wbm2sw5_release - j);
3188 		if (reo_status_ring_mask & (1 << j))
3189 			irq_id_map[num_irq++] = (reo_status - j);
3190 		if (rxdma2host_ring_mask & (1 << j))
3191 			irq_id_map[num_irq++] = (rxdma2sw_dst_ring0 - j);
3192 		if (host2rxdma_ring_mask & (1 << j))
3193 			irq_id_map[num_irq++] = (sw2rxdma_0 - j);
3194 		if (host2rxdma_mon_ring_mask & (1 << j))
3195 			irq_id_map[num_irq++] = (sw2rxmon_src_ring - j);
3196 	}
3197 	*num_irq_r = num_irq;
3198 }
3199 #else
3200 /**
3201  * dp_soc_interrupt_map_calculate_wifi3_pci_legacy()
3202  * Calculate interrupt map for legacy interrupts
3203  * @soc: DP soc handle
3204  * @intr_ctx_num: Interrupt context number
3205  * @irq_id_map: IRQ map
3206  * num_irq_r: Number of interrupts assigned for this context
3207  *
3208  * Return: void
3209  */
3210 static void dp_soc_interrupt_map_calculate_wifi3_pci_legacy(struct dp_soc *soc,
3211 							    int intr_ctx_num,
3212 							    int *irq_id_map,
3213 							    int *num_irq_r)
3214 {
3215 }
3216 #endif
3217 
3218 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
3219 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
3220 {
3221 	int j;
3222 	int num_irq = 0;
3223 
3224 	int tx_mask =
3225 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3226 	int rx_mask =
3227 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3228 	int rx_mon_mask =
3229 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3230 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3231 					soc->wlan_cfg_ctx, intr_ctx_num);
3232 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3233 					soc->wlan_cfg_ctx, intr_ctx_num);
3234 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3235 					soc->wlan_cfg_ctx, intr_ctx_num);
3236 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3237 					soc->wlan_cfg_ctx, intr_ctx_num);
3238 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3239 					soc->wlan_cfg_ctx, intr_ctx_num);
3240 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3241 					soc->wlan_cfg_ctx, intr_ctx_num);
3242 
3243 	soc->intr_mode = DP_INTR_INTEGRATED;
3244 
3245 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
3246 
3247 		if (tx_mask & (1 << j)) {
3248 			irq_id_map[num_irq++] =
3249 				(wbm2host_tx_completions_ring1 - j);
3250 		}
3251 
3252 		if (rx_mask & (1 << j)) {
3253 			irq_id_map[num_irq++] =
3254 				(reo2host_destination_ring1 - j);
3255 		}
3256 
3257 		if (rxdma2host_ring_mask & (1 << j)) {
3258 			irq_id_map[num_irq++] =
3259 				rxdma2host_destination_ring_mac1 - j;
3260 		}
3261 
3262 		if (host2rxdma_ring_mask & (1 << j)) {
3263 			irq_id_map[num_irq++] =
3264 				host2rxdma_host_buf_ring_mac1 -	j;
3265 		}
3266 
3267 		if (host2rxdma_mon_ring_mask & (1 << j)) {
3268 			irq_id_map[num_irq++] =
3269 				host2rxdma_monitor_ring1 - j;
3270 		}
3271 
3272 		if (rx_mon_mask & (1 << j)) {
3273 			irq_id_map[num_irq++] =
3274 				ppdu_end_interrupts_mac1 - j;
3275 			irq_id_map[num_irq++] =
3276 				rxdma2host_monitor_status_ring_mac1 - j;
3277 			irq_id_map[num_irq++] =
3278 				rxdma2host_monitor_destination_mac1 - j;
3279 		}
3280 
3281 		if (rx_wbm_rel_ring_mask & (1 << j))
3282 			irq_id_map[num_irq++] = wbm2host_rx_release;
3283 
3284 		if (rx_err_ring_mask & (1 << j))
3285 			irq_id_map[num_irq++] = reo2host_exception;
3286 
3287 		if (reo_status_ring_mask & (1 << j))
3288 			irq_id_map[num_irq++] = reo2host_status;
3289 
3290 	}
3291 	*num_irq_r = num_irq;
3292 }
3293 
3294 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
3295 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
3296 		int msi_vector_count, int msi_vector_start)
3297 {
3298 	int tx_mask = wlan_cfg_get_tx_ring_mask(
3299 					soc->wlan_cfg_ctx, intr_ctx_num);
3300 	int rx_mask = wlan_cfg_get_rx_ring_mask(
3301 					soc->wlan_cfg_ctx, intr_ctx_num);
3302 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
3303 					soc->wlan_cfg_ctx, intr_ctx_num);
3304 	int tx_mon_mask = wlan_cfg_get_tx_mon_ring_mask(
3305 					soc->wlan_cfg_ctx, intr_ctx_num);
3306 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3307 					soc->wlan_cfg_ctx, intr_ctx_num);
3308 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3309 					soc->wlan_cfg_ctx, intr_ctx_num);
3310 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3311 					soc->wlan_cfg_ctx, intr_ctx_num);
3312 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3313 					soc->wlan_cfg_ctx, intr_ctx_num);
3314 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3315 					soc->wlan_cfg_ctx, intr_ctx_num);
3316 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3317 					soc->wlan_cfg_ctx, intr_ctx_num);
3318 	int rx_near_full_grp_1_mask =
3319 		wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
3320 						     intr_ctx_num);
3321 	int rx_near_full_grp_2_mask =
3322 		wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
3323 						     intr_ctx_num);
3324 	int tx_ring_near_full_mask =
3325 		wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
3326 						    intr_ctx_num);
3327 
3328 	int host2txmon_ring_mask =
3329 		wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx,
3330 						  intr_ctx_num);
3331 	unsigned int vector =
3332 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
3333 	int num_irq = 0;
3334 
3335 	soc->intr_mode = DP_INTR_MSI;
3336 
3337 	if (tx_mask | rx_mask | rx_mon_mask | tx_mon_mask | rx_err_ring_mask |
3338 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask |
3339 	    host2rxdma_ring_mask | host2rxdma_mon_ring_mask |
3340 	    rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
3341 	    tx_ring_near_full_mask | host2txmon_ring_mask)
3342 		irq_id_map[num_irq++] =
3343 			pld_get_msi_irq(soc->osdev->dev, vector);
3344 
3345 	*num_irq_r = num_irq;
3346 }
3347 
3348 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
3349 				    int *irq_id_map, int *num_irq)
3350 {
3351 	int msi_vector_count, ret;
3352 	uint32_t msi_base_data, msi_vector_start;
3353 
3354 	if (pld_get_enable_intx(soc->osdev->dev)) {
3355 		return dp_soc_interrupt_map_calculate_wifi3_pci_legacy(soc,
3356 				intr_ctx_num, irq_id_map, num_irq);
3357 	}
3358 
3359 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
3360 					    &msi_vector_count,
3361 					    &msi_base_data,
3362 					    &msi_vector_start);
3363 	if (ret)
3364 		return dp_soc_interrupt_map_calculate_integrated(soc,
3365 				intr_ctx_num, irq_id_map, num_irq);
3366 
3367 	else
3368 		dp_soc_interrupt_map_calculate_msi(soc,
3369 				intr_ctx_num, irq_id_map, num_irq,
3370 				msi_vector_count, msi_vector_start);
3371 }
3372 
3373 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
3374 /**
3375  * dp_soc_near_full_interrupt_attach() - Register handler for DP near fill irq
3376  * @soc: DP soc handle
3377  * @num_irq: IRQ number
3378  * @irq_id_map: IRQ map
3379  * intr_id: interrupt context ID
3380  *
3381  * Return: 0 for success. nonzero for failure.
3382  */
3383 static inline int
3384 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
3385 				  int irq_id_map[], int intr_id)
3386 {
3387 	return hif_register_ext_group(soc->hif_handle,
3388 				      num_irq, irq_id_map,
3389 				      dp_service_near_full_srngs,
3390 				      &soc->intr_ctx[intr_id], "dp_nf_intr",
3391 				      HIF_EXEC_NAPI_TYPE,
3392 				      QCA_NAPI_DEF_SCALE_BIN_SHIFT);
3393 }
3394 #else
3395 static inline int
3396 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
3397 				  int *irq_id_map, int intr_id)
3398 {
3399 	return 0;
3400 }
3401 #endif
3402 
3403 #ifdef DP_CON_MON_MSI_SKIP_SET
3404 static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
3405 {
3406 	return !!(soc->cdp_soc.ol_ops->get_con_mode() !=
3407 			QDF_GLOBAL_MONITOR_MODE);
3408 }
3409 #else
3410 static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
3411 {
3412 	return false;
3413 }
3414 #endif
3415 
3416 /*
3417  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
3418  * @txrx_soc: DP SOC handle
3419  *
3420  * Return: none
3421  */
3422 static void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
3423 {
3424 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3425 	int i;
3426 
3427 	if (soc->intr_mode == DP_INTR_POLL) {
3428 		qdf_timer_free(&soc->int_timer);
3429 	} else {
3430 		hif_deconfigure_ext_group_interrupts(soc->hif_handle);
3431 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
3432 		hif_deregister_exec_group(soc->hif_handle, "dp_nf_intr");
3433 	}
3434 
3435 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3436 		soc->intr_ctx[i].tx_ring_mask = 0;
3437 		soc->intr_ctx[i].rx_ring_mask = 0;
3438 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
3439 		soc->intr_ctx[i].rx_err_ring_mask = 0;
3440 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
3441 		soc->intr_ctx[i].reo_status_ring_mask = 0;
3442 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
3443 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
3444 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
3445 		soc->intr_ctx[i].rx_near_full_grp_1_mask = 0;
3446 		soc->intr_ctx[i].rx_near_full_grp_2_mask = 0;
3447 		soc->intr_ctx[i].tx_ring_near_full_mask = 0;
3448 		soc->intr_ctx[i].tx_mon_ring_mask = 0;
3449 		soc->intr_ctx[i].host2txmon_ring_mask = 0;
3450 		soc->intr_ctx[i].umac_reset_intr_mask = 0;
3451 
3452 		hif_event_history_deinit(soc->hif_handle, i);
3453 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
3454 	}
3455 
3456 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3457 		    sizeof(soc->mon_intr_id_lmac_map),
3458 		    DP_MON_INVALID_LMAC_ID);
3459 }
3460 
3461 /*
3462  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
3463  * @txrx_soc: DP SOC handle
3464  *
3465  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
3466  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
3467  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
3468  *
3469  * Return: 0 for success. nonzero for failure.
3470  */
3471 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
3472 {
3473 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3474 
3475 	int i = 0;
3476 	int num_irq = 0;
3477 	int rx_err_ring_intr_ctxt_id = HIF_MAX_GROUP;
3478 	int lmac_id = 0;
3479 	int napi_scale;
3480 
3481 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3482 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
3483 
3484 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3485 		int ret = 0;
3486 
3487 		/* Map of IRQ ids registered with one interrupt context */
3488 		int irq_id_map[HIF_MAX_GRP_IRQ];
3489 
3490 		int tx_mask =
3491 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
3492 		int rx_mask =
3493 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
3494 		int rx_mon_mask =
3495 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
3496 		int tx_mon_ring_mask =
3497 			wlan_cfg_get_tx_mon_ring_mask(soc->wlan_cfg_ctx, i);
3498 		int rx_err_ring_mask =
3499 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
3500 		int rx_wbm_rel_ring_mask =
3501 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
3502 		int reo_status_ring_mask =
3503 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
3504 		int rxdma2host_ring_mask =
3505 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
3506 		int host2rxdma_ring_mask =
3507 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
3508 		int host2rxdma_mon_ring_mask =
3509 			wlan_cfg_get_host2rxdma_mon_ring_mask(
3510 				soc->wlan_cfg_ctx, i);
3511 		int rx_near_full_grp_1_mask =
3512 			wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
3513 							     i);
3514 		int rx_near_full_grp_2_mask =
3515 			wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
3516 							     i);
3517 		int tx_ring_near_full_mask =
3518 			wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
3519 							    i);
3520 		int host2txmon_ring_mask =
3521 			wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx, i);
3522 		int umac_reset_intr_mask =
3523 			wlan_cfg_get_umac_reset_intr_mask(soc->wlan_cfg_ctx, i);
3524 
3525 		if (dp_skip_rx_mon_ring_mask_set(soc))
3526 			rx_mon_mask = 0;
3527 
3528 		soc->intr_ctx[i].dp_intr_id = i;
3529 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
3530 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
3531 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
3532 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
3533 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
3534 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
3535 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
3536 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
3537 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
3538 			 host2rxdma_mon_ring_mask;
3539 		soc->intr_ctx[i].rx_near_full_grp_1_mask =
3540 						rx_near_full_grp_1_mask;
3541 		soc->intr_ctx[i].rx_near_full_grp_2_mask =
3542 						rx_near_full_grp_2_mask;
3543 		soc->intr_ctx[i].tx_ring_near_full_mask =
3544 						tx_ring_near_full_mask;
3545 		soc->intr_ctx[i].tx_mon_ring_mask = tx_mon_ring_mask;
3546 		soc->intr_ctx[i].host2txmon_ring_mask = host2txmon_ring_mask;
3547 		soc->intr_ctx[i].umac_reset_intr_mask = umac_reset_intr_mask;
3548 
3549 		soc->intr_ctx[i].soc = soc;
3550 
3551 		num_irq = 0;
3552 
3553 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
3554 					       &num_irq);
3555 
3556 		if (rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
3557 		    tx_ring_near_full_mask) {
3558 			dp_soc_near_full_interrupt_attach(soc, num_irq,
3559 							  irq_id_map, i);
3560 		} else {
3561 			napi_scale = wlan_cfg_get_napi_scale_factor(
3562 							    soc->wlan_cfg_ctx);
3563 			if (!napi_scale)
3564 				napi_scale = QCA_NAPI_DEF_SCALE_BIN_SHIFT;
3565 
3566 			ret = hif_register_ext_group(soc->hif_handle,
3567 				num_irq, irq_id_map, dp_service_srngs,
3568 				&soc->intr_ctx[i], "dp_intr",
3569 				HIF_EXEC_NAPI_TYPE, napi_scale);
3570 		}
3571 
3572 		dp_debug(" int ctx %u num_irq %u irq_id_map %u %u",
3573 			 i, num_irq, irq_id_map[0], irq_id_map[1]);
3574 
3575 		if (ret) {
3576 			dp_init_err("%pK: failed, ret = %d", soc, ret);
3577 			dp_soc_interrupt_detach(txrx_soc);
3578 			return QDF_STATUS_E_FAILURE;
3579 		}
3580 
3581 		hif_event_history_init(soc->hif_handle, i);
3582 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
3583 
3584 		if (rx_err_ring_mask)
3585 			rx_err_ring_intr_ctxt_id = i;
3586 
3587 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
3588 			soc->mon_intr_id_lmac_map[lmac_id] = i;
3589 			lmac_id++;
3590 		}
3591 	}
3592 
3593 	hif_configure_ext_group_interrupts(soc->hif_handle);
3594 	if (rx_err_ring_intr_ctxt_id != HIF_MAX_GROUP)
3595 		hif_config_irq_clear_cpu_affinity(soc->hif_handle,
3596 						  rx_err_ring_intr_ctxt_id, 0);
3597 
3598 	return QDF_STATUS_SUCCESS;
3599 }
3600 
3601 #define AVG_MAX_MPDUS_PER_TID 128
3602 #define AVG_TIDS_PER_CLIENT 2
3603 #define AVG_FLOWS_PER_TID 2
3604 #define AVG_MSDUS_PER_FLOW 128
3605 #define AVG_MSDUS_PER_MPDU 4
3606 
3607 /*
3608  * dp_hw_link_desc_pool_banks_free() - Free h/w link desc pool banks
3609  * @soc: DP SOC handle
3610  * @mac_id: mac id
3611  *
3612  * Return: none
3613  */
3614 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
3615 {
3616 	struct qdf_mem_multi_page_t *pages;
3617 
3618 	if (mac_id != WLAN_INVALID_PDEV_ID) {
3619 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3620 	} else {
3621 		pages = &soc->link_desc_pages;
3622 	}
3623 
3624 	if (!pages) {
3625 		dp_err("can not get link desc pages");
3626 		QDF_ASSERT(0);
3627 		return;
3628 	}
3629 
3630 	if (pages->dma_pages) {
3631 		wlan_minidump_remove((void *)
3632 				     pages->dma_pages->page_v_addr_start,
3633 				     pages->num_pages * pages->page_size,
3634 				     soc->ctrl_psoc,
3635 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3636 				     "hw_link_desc_bank");
3637 		dp_desc_multi_pages_mem_free(soc, DP_HW_LINK_DESC_TYPE,
3638 					     pages, 0, false);
3639 	}
3640 }
3641 
3642 qdf_export_symbol(dp_hw_link_desc_pool_banks_free);
3643 
3644 /*
3645  * dp_hw_link_desc_pool_banks_alloc() - Allocate h/w link desc pool banks
3646  * @soc: DP SOC handle
3647  * @mac_id: mac id
3648  *
3649  * Allocates memory pages for link descriptors, the page size is 4K for
3650  * MCL and 2MB for WIN. if the mac_id is invalid link descriptor pages are
3651  * allocated for regular RX/TX and if the there is a proper mac_id link
3652  * descriptors are allocated for RX monitor mode.
3653  *
3654  * Return: QDF_STATUS_SUCCESS: Success
3655  *	   QDF_STATUS_E_FAILURE: Failure
3656  */
3657 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
3658 {
3659 	hal_soc_handle_t hal_soc = soc->hal_soc;
3660 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
3661 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
3662 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
3663 	uint32_t num_mpdus_per_link_desc = hal_num_mpdus_per_link_desc(hal_soc);
3664 	uint32_t num_msdus_per_link_desc = hal_num_msdus_per_link_desc(hal_soc);
3665 	uint32_t num_mpdu_links_per_queue_desc =
3666 		hal_num_mpdu_links_per_queue_desc(hal_soc);
3667 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3668 	uint32_t *total_link_descs, total_mem_size;
3669 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
3670 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
3671 	uint32_t num_entries;
3672 	struct qdf_mem_multi_page_t *pages;
3673 	struct dp_srng *dp_srng;
3674 	uint8_t minidump_str[MINIDUMP_STR_SIZE];
3675 
3676 	/* Only Tx queue descriptors are allocated from common link descriptor
3677 	 * pool Rx queue descriptors are not included in this because (REO queue
3678 	 * extension descriptors) they are expected to be allocated contiguously
3679 	 * with REO queue descriptors
3680 	 */
3681 	if (mac_id != WLAN_INVALID_PDEV_ID) {
3682 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3683 		/* dp_monitor_get_link_desc_pages returns NULL only
3684 		 * if monitor SOC is  NULL
3685 		 */
3686 		if (!pages) {
3687 			dp_err("can not get link desc pages");
3688 			QDF_ASSERT(0);
3689 			return QDF_STATUS_E_FAULT;
3690 		}
3691 		dp_srng = &soc->rxdma_mon_desc_ring[mac_id];
3692 		num_entries = dp_srng->alloc_size /
3693 			hal_srng_get_entrysize(soc->hal_soc,
3694 					       RXDMA_MONITOR_DESC);
3695 		total_link_descs = dp_monitor_get_total_link_descs(soc, mac_id);
3696 		qdf_str_lcopy(minidump_str, "mon_link_desc_bank",
3697 			      MINIDUMP_STR_SIZE);
3698 	} else {
3699 		num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3700 			AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
3701 
3702 		num_mpdu_queue_descs = num_mpdu_link_descs /
3703 			num_mpdu_links_per_queue_desc;
3704 
3705 		num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3706 			AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
3707 			num_msdus_per_link_desc;
3708 
3709 		num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3710 			AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
3711 
3712 		num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
3713 			num_tx_msdu_link_descs + num_rx_msdu_link_descs;
3714 
3715 		pages = &soc->link_desc_pages;
3716 		total_link_descs = &soc->total_link_descs;
3717 		qdf_str_lcopy(minidump_str, "link_desc_bank",
3718 			      MINIDUMP_STR_SIZE);
3719 	}
3720 
3721 	/* If link descriptor banks are allocated, return from here */
3722 	if (pages->num_pages)
3723 		return QDF_STATUS_SUCCESS;
3724 
3725 	/* Round up to power of 2 */
3726 	*total_link_descs = 1;
3727 	while (*total_link_descs < num_entries)
3728 		*total_link_descs <<= 1;
3729 
3730 	dp_init_info("%pK: total_link_descs: %u, link_desc_size: %d",
3731 		     soc, *total_link_descs, link_desc_size);
3732 	total_mem_size =  *total_link_descs * link_desc_size;
3733 	total_mem_size += link_desc_align;
3734 
3735 	dp_init_info("%pK: total_mem_size: %d",
3736 		     soc, total_mem_size);
3737 
3738 	dp_set_max_page_size(pages, max_alloc_size);
3739 	dp_desc_multi_pages_mem_alloc(soc, DP_HW_LINK_DESC_TYPE,
3740 				      pages,
3741 				      link_desc_size,
3742 				      *total_link_descs,
3743 				      0, false);
3744 	if (!pages->num_pages) {
3745 		dp_err("Multi page alloc fail for hw link desc pool");
3746 		return QDF_STATUS_E_FAULT;
3747 	}
3748 
3749 	wlan_minidump_log(pages->dma_pages->page_v_addr_start,
3750 			  pages->num_pages * pages->page_size,
3751 			  soc->ctrl_psoc,
3752 			  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3753 			  "hw_link_desc_bank");
3754 
3755 	return QDF_STATUS_SUCCESS;
3756 }
3757 
3758 /*
3759  * dp_hw_link_desc_ring_free() - Free h/w link desc rings
3760  * @soc: DP SOC handle
3761  *
3762  * Return: none
3763  */
3764 static void dp_hw_link_desc_ring_free(struct dp_soc *soc)
3765 {
3766 	uint32_t i;
3767 	uint32_t size = soc->wbm_idle_scatter_buf_size;
3768 	void *vaddr = soc->wbm_idle_link_ring.base_vaddr_unaligned;
3769 	qdf_dma_addr_t paddr;
3770 
3771 	if (soc->wbm_idle_scatter_buf_base_vaddr[0]) {
3772 		for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3773 			vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3774 			paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3775 			if (vaddr) {
3776 				qdf_mem_free_consistent(soc->osdev,
3777 							soc->osdev->dev,
3778 							size,
3779 							vaddr,
3780 							paddr,
3781 							0);
3782 				vaddr = NULL;
3783 			}
3784 		}
3785 	} else {
3786 		wlan_minidump_remove(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3787 				     soc->wbm_idle_link_ring.alloc_size,
3788 				     soc->ctrl_psoc,
3789 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3790 				     "wbm_idle_link_ring");
3791 		dp_srng_free(soc, &soc->wbm_idle_link_ring);
3792 	}
3793 }
3794 
3795 /*
3796  * dp_hw_link_desc_ring_alloc() - Allocate hw link desc rings
3797  * @soc: DP SOC handle
3798  *
3799  * Allocate memory for WBM_IDLE_LINK srng ring if the number of
3800  * link descriptors is less then the max_allocated size. else
3801  * allocate memory for wbm_idle_scatter_buffer.
3802  *
3803  * Return: QDF_STATUS_SUCCESS: success
3804  *         QDF_STATUS_E_NO_MEM: No memory (Failure)
3805  */
3806 static QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc)
3807 {
3808 	uint32_t entry_size, i;
3809 	uint32_t total_mem_size;
3810 	qdf_dma_addr_t *baseaddr = NULL;
3811 	struct dp_srng *dp_srng;
3812 	uint32_t ring_type;
3813 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3814 	uint32_t tlds;
3815 
3816 	ring_type = WBM_IDLE_LINK;
3817 	dp_srng = &soc->wbm_idle_link_ring;
3818 	tlds = soc->total_link_descs;
3819 
3820 	entry_size = hal_srng_get_entrysize(soc->hal_soc, ring_type);
3821 	total_mem_size = entry_size * tlds;
3822 
3823 	if (total_mem_size <= max_alloc_size) {
3824 		if (dp_srng_alloc(soc, dp_srng, ring_type, tlds, 0)) {
3825 			dp_init_err("%pK: Link desc idle ring setup failed",
3826 				    soc);
3827 			goto fail;
3828 		}
3829 
3830 		wlan_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3831 				  soc->wbm_idle_link_ring.alloc_size,
3832 				  soc->ctrl_psoc,
3833 				  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3834 				  "wbm_idle_link_ring");
3835 	} else {
3836 		uint32_t num_scatter_bufs;
3837 		uint32_t num_entries_per_buf;
3838 		uint32_t buf_size = 0;
3839 
3840 		soc->wbm_idle_scatter_buf_size =
3841 			hal_idle_list_scatter_buf_size(soc->hal_soc);
3842 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
3843 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
3844 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
3845 					soc->hal_soc, total_mem_size,
3846 					soc->wbm_idle_scatter_buf_size);
3847 
3848 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
3849 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3850 				  FL("scatter bufs size out of bounds"));
3851 			goto fail;
3852 		}
3853 
3854 		for (i = 0; i < num_scatter_bufs; i++) {
3855 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
3856 			buf_size = soc->wbm_idle_scatter_buf_size;
3857 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
3858 				qdf_mem_alloc_consistent(soc->osdev,
3859 							 soc->osdev->dev,
3860 							 buf_size,
3861 							 baseaddr);
3862 
3863 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
3864 				QDF_TRACE(QDF_MODULE_ID_DP,
3865 					  QDF_TRACE_LEVEL_ERROR,
3866 					  FL("Scatter lst memory alloc fail"));
3867 				goto fail;
3868 			}
3869 		}
3870 		soc->num_scatter_bufs = num_scatter_bufs;
3871 	}
3872 	return QDF_STATUS_SUCCESS;
3873 
3874 fail:
3875 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3876 		void *vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3877 		qdf_dma_addr_t paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3878 
3879 		if (vaddr) {
3880 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
3881 						soc->wbm_idle_scatter_buf_size,
3882 						vaddr,
3883 						paddr, 0);
3884 			vaddr = NULL;
3885 		}
3886 	}
3887 	return QDF_STATUS_E_NOMEM;
3888 }
3889 
3890 qdf_export_symbol(dp_hw_link_desc_pool_banks_alloc);
3891 
3892 /*
3893  * dp_hw_link_desc_ring_init() - Initialize hw link desc rings
3894  * @soc: DP SOC handle
3895  *
3896  * Return: QDF_STATUS_SUCCESS: success
3897  *         QDF_STATUS_E_FAILURE: failure
3898  */
3899 static QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc)
3900 {
3901 	struct dp_srng *dp_srng = &soc->wbm_idle_link_ring;
3902 
3903 	if (dp_srng->base_vaddr_unaligned) {
3904 		if (dp_srng_init(soc, dp_srng, WBM_IDLE_LINK, 0, 0))
3905 			return QDF_STATUS_E_FAILURE;
3906 	}
3907 	return QDF_STATUS_SUCCESS;
3908 }
3909 
3910 /*
3911  * dp_hw_link_desc_ring_deinit() - Reset hw link desc rings
3912  * @soc: DP SOC handle
3913  *
3914  * Return: None
3915  */
3916 static void dp_hw_link_desc_ring_deinit(struct dp_soc *soc)
3917 {
3918 	dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0);
3919 }
3920 
3921 /*
3922  * dp_hw_link_desc_ring_replenish() - Replenish hw link desc rings
3923  * @soc: DP SOC handle
3924  * @mac_id: mac id
3925  *
3926  * Return: None
3927  */
3928 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id)
3929 {
3930 	uint32_t cookie = 0;
3931 	uint32_t page_idx = 0;
3932 	struct qdf_mem_multi_page_t *pages;
3933 	struct qdf_mem_dma_page_t *dma_pages;
3934 	uint32_t offset = 0;
3935 	uint32_t count = 0;
3936 	uint32_t desc_id = 0;
3937 	void *desc_srng;
3938 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
3939 	uint32_t *total_link_descs_addr;
3940 	uint32_t total_link_descs;
3941 	uint32_t scatter_buf_num;
3942 	uint32_t num_entries_per_buf = 0;
3943 	uint32_t rem_entries;
3944 	uint32_t num_descs_per_page;
3945 	uint32_t num_scatter_bufs = 0;
3946 	uint8_t *scatter_buf_ptr;
3947 	void *desc;
3948 
3949 	num_scatter_bufs = soc->num_scatter_bufs;
3950 
3951 	if (mac_id == WLAN_INVALID_PDEV_ID) {
3952 		pages = &soc->link_desc_pages;
3953 		total_link_descs = soc->total_link_descs;
3954 		desc_srng = soc->wbm_idle_link_ring.hal_srng;
3955 	} else {
3956 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3957 		/* dp_monitor_get_link_desc_pages returns NULL only
3958 		 * if monitor SOC is  NULL
3959 		 */
3960 		if (!pages) {
3961 			dp_err("can not get link desc pages");
3962 			QDF_ASSERT(0);
3963 			return;
3964 		}
3965 		total_link_descs_addr =
3966 				dp_monitor_get_total_link_descs(soc, mac_id);
3967 		total_link_descs = *total_link_descs_addr;
3968 		desc_srng = soc->rxdma_mon_desc_ring[mac_id].hal_srng;
3969 	}
3970 
3971 	dma_pages = pages->dma_pages;
3972 	do {
3973 		qdf_mem_zero(dma_pages[page_idx].page_v_addr_start,
3974 			     pages->page_size);
3975 		page_idx++;
3976 	} while (page_idx < pages->num_pages);
3977 
3978 	if (desc_srng) {
3979 		hal_srng_access_start_unlocked(soc->hal_soc, desc_srng);
3980 		page_idx = 0;
3981 		count = 0;
3982 		offset = 0;
3983 		pages = &soc->link_desc_pages;
3984 		while ((desc = hal_srng_src_get_next(soc->hal_soc,
3985 						     desc_srng)) &&
3986 			(count < total_link_descs)) {
3987 			page_idx = count / pages->num_element_per_page;
3988 			if (desc_id == pages->num_element_per_page)
3989 				desc_id = 0;
3990 
3991 			offset = count % pages->num_element_per_page;
3992 			cookie = LINK_DESC_COOKIE(desc_id, page_idx,
3993 						  soc->link_desc_id_start);
3994 
3995 			hal_set_link_desc_addr(soc->hal_soc, desc, cookie,
3996 					       dma_pages[page_idx].page_p_addr
3997 					       + (offset * link_desc_size),
3998 					       soc->idle_link_bm_id);
3999 			count++;
4000 			desc_id++;
4001 		}
4002 		hal_srng_access_end_unlocked(soc->hal_soc, desc_srng);
4003 	} else {
4004 		/* Populate idle list scatter buffers with link descriptor
4005 		 * pointers
4006 		 */
4007 		scatter_buf_num = 0;
4008 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
4009 					soc->hal_soc,
4010 					soc->wbm_idle_scatter_buf_size);
4011 
4012 		scatter_buf_ptr = (uint8_t *)(
4013 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
4014 		rem_entries = num_entries_per_buf;
4015 		pages = &soc->link_desc_pages;
4016 		page_idx = 0; count = 0;
4017 		offset = 0;
4018 		num_descs_per_page = pages->num_element_per_page;
4019 
4020 		while (count < total_link_descs) {
4021 			page_idx = count / num_descs_per_page;
4022 			offset = count % num_descs_per_page;
4023 			if (desc_id == pages->num_element_per_page)
4024 				desc_id = 0;
4025 
4026 			cookie = LINK_DESC_COOKIE(desc_id, page_idx,
4027 						  soc->link_desc_id_start);
4028 			hal_set_link_desc_addr(soc->hal_soc,
4029 					       (void *)scatter_buf_ptr,
4030 					       cookie,
4031 					       dma_pages[page_idx].page_p_addr +
4032 					       (offset * link_desc_size),
4033 					       soc->idle_link_bm_id);
4034 			rem_entries--;
4035 			if (rem_entries) {
4036 				scatter_buf_ptr += link_desc_size;
4037 			} else {
4038 				rem_entries = num_entries_per_buf;
4039 				scatter_buf_num++;
4040 				if (scatter_buf_num >= num_scatter_bufs)
4041 					break;
4042 				scatter_buf_ptr = (uint8_t *)
4043 					(soc->wbm_idle_scatter_buf_base_vaddr[
4044 					 scatter_buf_num]);
4045 			}
4046 			count++;
4047 			desc_id++;
4048 		}
4049 		/* Setup link descriptor idle list in HW */
4050 		hal_setup_link_idle_list(soc->hal_soc,
4051 			soc->wbm_idle_scatter_buf_base_paddr,
4052 			soc->wbm_idle_scatter_buf_base_vaddr,
4053 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
4054 			(uint32_t)(scatter_buf_ptr -
4055 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
4056 			scatter_buf_num-1])), total_link_descs);
4057 	}
4058 }
4059 
4060 qdf_export_symbol(dp_link_desc_ring_replenish);
4061 
4062 #ifdef IPA_OFFLOAD
4063 #define USE_1_IPA_RX_REO_RING 1
4064 #define USE_2_IPA_RX_REO_RINGS 2
4065 #define REO_DST_RING_SIZE_QCA6290 1023
4066 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
4067 #define REO_DST_RING_SIZE_QCA8074 1023
4068 #define REO_DST_RING_SIZE_QCN9000 2048
4069 #else
4070 #define REO_DST_RING_SIZE_QCA8074 8
4071 #define REO_DST_RING_SIZE_QCN9000 8
4072 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
4073 
4074 #ifdef IPA_WDI3_TX_TWO_PIPES
4075 #ifdef DP_MEMORY_OPT
4076 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4077 {
4078 	return dp_init_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4079 }
4080 
4081 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4082 {
4083 	dp_deinit_tx_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4084 }
4085 
4086 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4087 {
4088 	return dp_alloc_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4089 }
4090 
4091 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4092 {
4093 	dp_free_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4094 }
4095 
4096 #else /* !DP_MEMORY_OPT */
4097 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4098 {
4099 	return 0;
4100 }
4101 
4102 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4103 {
4104 }
4105 
4106 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4107 {
4108 	return 0
4109 }
4110 
4111 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4112 {
4113 }
4114 #endif /* DP_MEMORY_OPT */
4115 
4116 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
4117 {
4118 	hal_tx_init_data_ring(soc->hal_soc,
4119 			      soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng);
4120 }
4121 
4122 #else /* !IPA_WDI3_TX_TWO_PIPES */
4123 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4124 {
4125 	return 0;
4126 }
4127 
4128 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4129 {
4130 }
4131 
4132 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4133 {
4134 	return 0;
4135 }
4136 
4137 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4138 {
4139 }
4140 
4141 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
4142 {
4143 }
4144 
4145 #endif /* IPA_WDI3_TX_TWO_PIPES */
4146 
4147 #else
4148 
4149 #define REO_DST_RING_SIZE_QCA6290 1024
4150 
4151 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4152 {
4153 	return 0;
4154 }
4155 
4156 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4157 {
4158 }
4159 
4160 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4161 {
4162 	return 0;
4163 }
4164 
4165 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4166 {
4167 }
4168 
4169 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
4170 {
4171 }
4172 
4173 #endif /* IPA_OFFLOAD */
4174 
4175 /*
4176  * dp_soc_reset_ring_map() - Reset cpu ring map
4177  * @soc: Datapath soc handler
4178  *
4179  * This api resets the default cpu ring map
4180  */
4181 
4182 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
4183 {
4184 	uint8_t i;
4185 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4186 
4187 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
4188 		switch (nss_config) {
4189 		case dp_nss_cfg_first_radio:
4190 			/*
4191 			 * Setting Tx ring map for one nss offloaded radio
4192 			 */
4193 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
4194 			break;
4195 
4196 		case dp_nss_cfg_second_radio:
4197 			/*
4198 			 * Setting Tx ring for two nss offloaded radios
4199 			 */
4200 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
4201 			break;
4202 
4203 		case dp_nss_cfg_dbdc:
4204 			/*
4205 			 * Setting Tx ring map for 2 nss offloaded radios
4206 			 */
4207 			soc->tx_ring_map[i] =
4208 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
4209 			break;
4210 
4211 		case dp_nss_cfg_dbtc:
4212 			/*
4213 			 * Setting Tx ring map for 3 nss offloaded radios
4214 			 */
4215 			soc->tx_ring_map[i] =
4216 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
4217 			break;
4218 
4219 		default:
4220 			dp_err("tx_ring_map failed due to invalid nss cfg");
4221 			break;
4222 		}
4223 	}
4224 }
4225 
4226 /*
4227  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
4228  * @dp_soc - DP soc handle
4229  * @ring_type - ring type
4230  * @ring_num - ring_num
4231  *
4232  * return 0 or 1
4233  */
4234 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
4235 {
4236 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4237 	uint8_t status = 0;
4238 
4239 	switch (ring_type) {
4240 	case WBM2SW_RELEASE:
4241 	case REO_DST:
4242 	case RXDMA_BUF:
4243 	case REO_EXCEPTION:
4244 		status = ((nss_config) & (1 << ring_num));
4245 		break;
4246 	default:
4247 		break;
4248 	}
4249 
4250 	return status;
4251 }
4252 
4253 /*
4254  * dp_soc_disable_unused_mac_intr_mask() - reset interrupt mask for
4255  *					  unused WMAC hw rings
4256  * @dp_soc - DP Soc handle
4257  * @mac_num - wmac num
4258  *
4259  * Return: Return void
4260  */
4261 static void dp_soc_disable_unused_mac_intr_mask(struct dp_soc *soc,
4262 						int mac_num)
4263 {
4264 	uint8_t *grp_mask = NULL;
4265 	int group_number;
4266 
4267 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
4268 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4269 	wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4270 					  group_number, 0x0);
4271 
4272 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
4273 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4274 	wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
4275 				      group_number, 0x0);
4276 
4277 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
4278 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4279 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
4280 					  group_number, 0x0);
4281 
4282 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
4283 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4284 	wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
4285 					      group_number, 0x0);
4286 }
4287 
4288 #ifdef IPA_OFFLOAD
4289 #ifdef IPA_WDI3_VLAN_SUPPORT
4290 /*
4291  * dp_soc_reset_ipa_vlan_intr_mask() - reset interrupt mask for IPA offloaded
4292  * ring for vlan tagged traffic
4293  * @dp_soc - DP Soc handle
4294  *
4295  * Return: Return void
4296  */
4297 static void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
4298 {
4299 	uint8_t *grp_mask = NULL;
4300 	int group_number, mask;
4301 
4302 	if (!wlan_ipa_is_vlan_enabled())
4303 		return;
4304 
4305 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
4306 
4307 	group_number = dp_srng_find_ring_in_mask(IPA_ALT_REO_DEST_RING_IDX, grp_mask);
4308 	if (group_number < 0) {
4309 		dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4310 			      soc, REO_DST, IPA_ALT_REO_DEST_RING_IDX);
4311 		return;
4312 	}
4313 
4314 	mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
4315 
4316 	/* reset the interrupt mask for offloaded ring */
4317 	mask &= (~(1 << IPA_ALT_REO_DEST_RING_IDX));
4318 
4319 	/*
4320 	 * set the interrupt mask to zero for rx offloaded radio.
4321 	 */
4322 	wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4323 }
4324 #else
4325 static inline
4326 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
4327 { }
4328 #endif /* IPA_WDI3_VLAN_SUPPORT */
4329 #else
4330 static inline
4331 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
4332 { }
4333 #endif /* IPA_OFFLOAD */
4334 
4335 /*
4336  * dp_soc_reset_intr_mask() - reset interrupt mask
4337  * @dp_soc - DP Soc handle
4338  *
4339  * Return: Return void
4340  */
4341 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
4342 {
4343 	uint8_t j;
4344 	uint8_t *grp_mask = NULL;
4345 	int group_number, mask, num_ring;
4346 
4347 	/* number of tx ring */
4348 	num_ring = soc->num_tcl_data_rings;
4349 
4350 	/*
4351 	 * group mask for tx completion  ring.
4352 	 */
4353 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
4354 
4355 	/* loop and reset the mask for only offloaded ring */
4356 	for (j = 0; j < WLAN_CFG_NUM_TCL_DATA_RINGS; j++) {
4357 		/*
4358 		 * Group number corresponding to tx offloaded ring.
4359 		 */
4360 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4361 		if (group_number < 0) {
4362 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4363 				      soc, WBM2SW_RELEASE, j);
4364 			continue;
4365 		}
4366 
4367 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
4368 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j) &&
4369 		    (!mask)) {
4370 			continue;
4371 		}
4372 
4373 		/* reset the tx mask for offloaded ring */
4374 		mask &= (~(1 << j));
4375 
4376 		/*
4377 		 * reset the interrupt mask for offloaded ring.
4378 		 */
4379 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4380 	}
4381 
4382 	/* number of rx rings */
4383 	num_ring = soc->num_reo_dest_rings;
4384 
4385 	/*
4386 	 * group mask for reo destination ring.
4387 	 */
4388 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
4389 
4390 	/* loop and reset the mask for only offloaded ring */
4391 	for (j = 0; j < WLAN_CFG_NUM_REO_DEST_RING; j++) {
4392 		/*
4393 		 * Group number corresponding to rx offloaded ring.
4394 		 */
4395 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4396 		if (group_number < 0) {
4397 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4398 				      soc, REO_DST, j);
4399 			continue;
4400 		}
4401 
4402 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
4403 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j) &&
4404 		    (!mask)) {
4405 			continue;
4406 		}
4407 
4408 		/* reset the interrupt mask for offloaded ring */
4409 		mask &= (~(1 << j));
4410 
4411 		/*
4412 		 * set the interrupt mask to zero for rx offloaded radio.
4413 		 */
4414 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4415 	}
4416 
4417 	/*
4418 	 * group mask for Rx buffer refill ring
4419 	 */
4420 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
4421 
4422 	/* loop and reset the mask for only offloaded ring */
4423 	for (j = 0; j < MAX_PDEV_CNT; j++) {
4424 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
4425 
4426 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
4427 			continue;
4428 		}
4429 
4430 		/*
4431 		 * Group number corresponding to rx offloaded ring.
4432 		 */
4433 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
4434 		if (group_number < 0) {
4435 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4436 				      soc, REO_DST, lmac_id);
4437 			continue;
4438 		}
4439 
4440 		/* set the interrupt mask for offloaded ring */
4441 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4442 				group_number);
4443 		mask &= (~(1 << lmac_id));
4444 
4445 		/*
4446 		 * set the interrupt mask to zero for rx offloaded radio.
4447 		 */
4448 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4449 			group_number, mask);
4450 	}
4451 
4452 	grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
4453 
4454 	for (j = 0; j < num_ring; j++) {
4455 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_EXCEPTION, j)) {
4456 			continue;
4457 		}
4458 
4459 		/*
4460 		 * Group number corresponding to rx err ring.
4461 		 */
4462 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4463 		if (group_number < 0) {
4464 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4465 				      soc, REO_EXCEPTION, j);
4466 			continue;
4467 		}
4468 
4469 		wlan_cfg_set_rx_err_ring_mask(soc->wlan_cfg_ctx,
4470 					      group_number, 0);
4471 	}
4472 }
4473 
4474 #ifdef IPA_OFFLOAD
4475 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
4476 			 uint32_t *remap1, uint32_t *remap2)
4477 {
4478 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX] = {
4479 				REO_REMAP_SW1, REO_REMAP_SW2, REO_REMAP_SW3,
4480 				REO_REMAP_SW5, REO_REMAP_SW6, REO_REMAP_SW7};
4481 
4482 	switch (soc->arch_id) {
4483 	case CDP_ARCH_TYPE_BE:
4484 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4485 					      soc->num_reo_dest_rings -
4486 					      USE_2_IPA_RX_REO_RINGS, remap1,
4487 					      remap2);
4488 		break;
4489 
4490 	case CDP_ARCH_TYPE_LI:
4491 		if (wlan_ipa_is_vlan_enabled()) {
4492 			hal_compute_reo_remap_ix2_ix3(
4493 					soc->hal_soc, ring,
4494 					soc->num_reo_dest_rings -
4495 					USE_2_IPA_RX_REO_RINGS, remap1,
4496 					remap2);
4497 
4498 		} else {
4499 			hal_compute_reo_remap_ix2_ix3(
4500 					soc->hal_soc, ring,
4501 					soc->num_reo_dest_rings -
4502 					USE_1_IPA_RX_REO_RING, remap1,
4503 					remap2);
4504 		}
4505 
4506 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
4507 		break;
4508 	default:
4509 		dp_err("unknown arch_id 0x%x", soc->arch_id);
4510 		QDF_BUG(0);
4511 
4512 	}
4513 
4514 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
4515 
4516 	return true;
4517 }
4518 
4519 #ifdef IPA_WDI3_TX_TWO_PIPES
4520 static bool dp_ipa_is_alt_tx_ring(int index)
4521 {
4522 	return index == IPA_TX_ALT_RING_IDX;
4523 }
4524 
4525 static bool dp_ipa_is_alt_tx_comp_ring(int index)
4526 {
4527 	return index == IPA_TX_ALT_COMP_RING_IDX;
4528 }
4529 #else /* !IPA_WDI3_TX_TWO_PIPES */
4530 static bool dp_ipa_is_alt_tx_ring(int index)
4531 {
4532 	return false;
4533 }
4534 
4535 static bool dp_ipa_is_alt_tx_comp_ring(int index)
4536 {
4537 	return false;
4538 }
4539 #endif /* IPA_WDI3_TX_TWO_PIPES */
4540 
4541 /**
4542  * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA
4543  *
4544  * @tx_ring_num: Tx ring number
4545  * @tx_ipa_ring_sz: Return param only updated for IPA.
4546  * @soc_cfg_ctx: dp soc cfg context
4547  *
4548  * Return: None
4549  */
4550 static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz,
4551 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4552 {
4553 	if (!soc_cfg_ctx->ipa_enabled)
4554 		return;
4555 
4556 	if (tx_ring_num == IPA_TCL_DATA_RING_IDX)
4557 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_ring_size(soc_cfg_ctx);
4558 	else if (dp_ipa_is_alt_tx_ring(tx_ring_num))
4559 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_alt_ring_size(soc_cfg_ctx);
4560 }
4561 
4562 /**
4563  * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA
4564  *
4565  * @tx_comp_ring_num: Tx comp ring number
4566  * @tx_comp_ipa_ring_sz: Return param only updated for IPA.
4567  * @soc_cfg_ctx: dp soc cfg context
4568  *
4569  * Return: None
4570  */
4571 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
4572 					 int *tx_comp_ipa_ring_sz,
4573 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4574 {
4575 	if (!soc_cfg_ctx->ipa_enabled)
4576 		return;
4577 
4578 	if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX)
4579 		*tx_comp_ipa_ring_sz =
4580 				wlan_cfg_ipa_tx_comp_ring_size(soc_cfg_ctx);
4581 	else if (dp_ipa_is_alt_tx_comp_ring(tx_comp_ring_num))
4582 		*tx_comp_ipa_ring_sz =
4583 				wlan_cfg_ipa_tx_alt_comp_ring_size(soc_cfg_ctx);
4584 }
4585 #else
4586 static uint8_t dp_reo_ring_selection(uint32_t value, uint32_t *ring)
4587 {
4588 	uint8_t num = 0;
4589 
4590 	switch (value) {
4591 	/* should we have all the different possible ring configs */
4592 	case 0xFF:
4593 		num = 8;
4594 		ring[0] = REO_REMAP_SW1;
4595 		ring[1] = REO_REMAP_SW2;
4596 		ring[2] = REO_REMAP_SW3;
4597 		ring[3] = REO_REMAP_SW4;
4598 		ring[4] = REO_REMAP_SW5;
4599 		ring[5] = REO_REMAP_SW6;
4600 		ring[6] = REO_REMAP_SW7;
4601 		ring[7] = REO_REMAP_SW8;
4602 		break;
4603 
4604 	case 0x3F:
4605 		num = 6;
4606 		ring[0] = REO_REMAP_SW1;
4607 		ring[1] = REO_REMAP_SW2;
4608 		ring[2] = REO_REMAP_SW3;
4609 		ring[3] = REO_REMAP_SW4;
4610 		ring[4] = REO_REMAP_SW5;
4611 		ring[5] = REO_REMAP_SW6;
4612 		break;
4613 
4614 	case 0xF:
4615 		num = 4;
4616 		ring[0] = REO_REMAP_SW1;
4617 		ring[1] = REO_REMAP_SW2;
4618 		ring[2] = REO_REMAP_SW3;
4619 		ring[3] = REO_REMAP_SW4;
4620 		break;
4621 	case 0xE:
4622 		num = 3;
4623 		ring[0] = REO_REMAP_SW2;
4624 		ring[1] = REO_REMAP_SW3;
4625 		ring[2] = REO_REMAP_SW4;
4626 		break;
4627 	case 0xD:
4628 		num = 3;
4629 		ring[0] = REO_REMAP_SW1;
4630 		ring[1] = REO_REMAP_SW3;
4631 		ring[2] = REO_REMAP_SW4;
4632 		break;
4633 	case 0xC:
4634 		num = 2;
4635 		ring[0] = REO_REMAP_SW3;
4636 		ring[1] = REO_REMAP_SW4;
4637 		break;
4638 	case 0xB:
4639 		num = 3;
4640 		ring[0] = REO_REMAP_SW1;
4641 		ring[1] = REO_REMAP_SW2;
4642 		ring[2] = REO_REMAP_SW4;
4643 		break;
4644 	case 0xA:
4645 		num = 2;
4646 		ring[0] = REO_REMAP_SW2;
4647 		ring[1] = REO_REMAP_SW4;
4648 		break;
4649 	case 0x9:
4650 		num = 2;
4651 		ring[0] = REO_REMAP_SW1;
4652 		ring[1] = REO_REMAP_SW4;
4653 		break;
4654 	case 0x8:
4655 		num = 1;
4656 		ring[0] = REO_REMAP_SW4;
4657 		break;
4658 	case 0x7:
4659 		num = 3;
4660 		ring[0] = REO_REMAP_SW1;
4661 		ring[1] = REO_REMAP_SW2;
4662 		ring[2] = REO_REMAP_SW3;
4663 		break;
4664 	case 0x6:
4665 		num = 2;
4666 		ring[0] = REO_REMAP_SW2;
4667 		ring[1] = REO_REMAP_SW3;
4668 		break;
4669 	case 0x5:
4670 		num = 2;
4671 		ring[0] = REO_REMAP_SW1;
4672 		ring[1] = REO_REMAP_SW3;
4673 		break;
4674 	case 0x4:
4675 		num = 1;
4676 		ring[0] = REO_REMAP_SW3;
4677 		break;
4678 	case 0x3:
4679 		num = 2;
4680 		ring[0] = REO_REMAP_SW1;
4681 		ring[1] = REO_REMAP_SW2;
4682 		break;
4683 	case 0x2:
4684 		num = 1;
4685 		ring[0] = REO_REMAP_SW2;
4686 		break;
4687 	case 0x1:
4688 		num = 1;
4689 		ring[0] = REO_REMAP_SW1;
4690 		break;
4691 	default:
4692 		dp_err("unknown reo ring map 0x%x", value);
4693 		QDF_BUG(0);
4694 	}
4695 	return num;
4696 }
4697 
4698 bool dp_reo_remap_config(struct dp_soc *soc,
4699 			 uint32_t *remap0,
4700 			 uint32_t *remap1,
4701 			 uint32_t *remap2)
4702 {
4703 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4704 	uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
4705 	uint8_t num;
4706 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX];
4707 	uint32_t value;
4708 
4709 	switch (offload_radio) {
4710 	case dp_nss_cfg_default:
4711 		value = reo_config & WLAN_CFG_NUM_REO_RINGS_MAP_MAX;
4712 		num = dp_reo_ring_selection(value, ring);
4713 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4714 					      num, remap1, remap2);
4715 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
4716 
4717 		break;
4718 	case dp_nss_cfg_first_radio:
4719 		value = reo_config & 0xE;
4720 		num = dp_reo_ring_selection(value, ring);
4721 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4722 					      num, remap1, remap2);
4723 
4724 		break;
4725 	case dp_nss_cfg_second_radio:
4726 		value = reo_config & 0xD;
4727 		num = dp_reo_ring_selection(value, ring);
4728 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4729 					      num, remap1, remap2);
4730 
4731 		break;
4732 	case dp_nss_cfg_dbdc:
4733 	case dp_nss_cfg_dbtc:
4734 		/* return false if both or all are offloaded to NSS */
4735 		return false;
4736 
4737 	}
4738 
4739 	dp_debug("remap1 %x remap2 %x offload_radio %u",
4740 		 *remap1, *remap2, offload_radio);
4741 	return true;
4742 }
4743 
4744 static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz,
4745 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4746 {
4747 }
4748 
4749 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
4750 					 int *tx_comp_ipa_ring_sz,
4751 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4752 {
4753 }
4754 #endif /* IPA_OFFLOAD */
4755 
4756 /*
4757  * dp_reo_frag_dst_set() - configure reo register to set the
4758  *                        fragment destination ring
4759  * @soc : Datapath soc
4760  * @frag_dst_ring : output parameter to set fragment destination ring
4761  *
4762  * Based on offload_radio below fragment destination rings is selected
4763  * 0 - TCL
4764  * 1 - SW1
4765  * 2 - SW2
4766  * 3 - SW3
4767  * 4 - SW4
4768  * 5 - Release
4769  * 6 - FW
4770  * 7 - alternate select
4771  *
4772  * return: void
4773  */
4774 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
4775 {
4776 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4777 
4778 	switch (offload_radio) {
4779 	case dp_nss_cfg_default:
4780 		*frag_dst_ring = REO_REMAP_TCL;
4781 		break;
4782 	case dp_nss_cfg_first_radio:
4783 		/*
4784 		 * This configuration is valid for single band radio which
4785 		 * is also NSS offload.
4786 		 */
4787 	case dp_nss_cfg_dbdc:
4788 	case dp_nss_cfg_dbtc:
4789 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
4790 		break;
4791 	default:
4792 		dp_init_err("%pK: dp_reo_frag_dst_set invalid offload radio config", soc);
4793 		break;
4794 	}
4795 }
4796 
4797 #ifdef ENABLE_VERBOSE_DEBUG
4798 static void dp_enable_verbose_debug(struct dp_soc *soc)
4799 {
4800 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4801 
4802 	soc_cfg_ctx = soc->wlan_cfg_ctx;
4803 
4804 	if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask)
4805 		is_dp_verbose_debug_enabled = true;
4806 
4807 	if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask)
4808 		hal_set_verbose_debug(true);
4809 	else
4810 		hal_set_verbose_debug(false);
4811 }
4812 #else
4813 static void dp_enable_verbose_debug(struct dp_soc *soc)
4814 {
4815 }
4816 #endif
4817 
4818 #ifdef WLAN_FEATURE_STATS_EXT
4819 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
4820 {
4821 	qdf_event_create(&soc->rx_hw_stats_event);
4822 }
4823 #else
4824 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
4825 {
4826 }
4827 #endif
4828 
4829 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index)
4830 {
4831 	int tcl_ring_num, wbm_ring_num;
4832 
4833 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
4834 						index,
4835 						&tcl_ring_num,
4836 						&wbm_ring_num);
4837 
4838 	if (tcl_ring_num == -1) {
4839 		dp_err("incorrect tcl ring num for index %u", index);
4840 		return;
4841 	}
4842 
4843 	wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned,
4844 			     soc->tcl_data_ring[index].alloc_size,
4845 			     soc->ctrl_psoc,
4846 			     WLAN_MD_DP_SRNG_TCL_DATA,
4847 			     "tcl_data_ring");
4848 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
4849 	dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA,
4850 		       tcl_ring_num);
4851 
4852 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
4853 		return;
4854 
4855 	wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned,
4856 			     soc->tx_comp_ring[index].alloc_size,
4857 			     soc->ctrl_psoc,
4858 			     WLAN_MD_DP_SRNG_TX_COMP,
4859 			     "tcl_comp_ring");
4860 	dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4861 		       wbm_ring_num);
4862 }
4863 
4864 /**
4865  * dp_init_tx_ring_pair_by_index() - The function inits tcl data/wbm completion
4866  * ring pair
4867  * @soc: DP soc pointer
4868  * @index: index of soc->tcl_data or soc->tx_comp to initialize
4869  *
4870  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
4871  */
4872 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
4873 						uint8_t index)
4874 {
4875 	int tcl_ring_num, wbm_ring_num;
4876 	uint8_t bm_id;
4877 
4878 	if (index >= MAX_TCL_DATA_RINGS) {
4879 		dp_err("unexpected index!");
4880 		QDF_BUG(0);
4881 		goto fail1;
4882 	}
4883 
4884 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
4885 						index,
4886 						&tcl_ring_num,
4887 						&wbm_ring_num);
4888 
4889 	if (tcl_ring_num == -1) {
4890 		dp_err("incorrect tcl ring num for index %u", index);
4891 		goto fail1;
4892 	}
4893 
4894 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
4895 	if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA,
4896 			 tcl_ring_num, 0)) {
4897 		dp_err("dp_srng_init failed for tcl_data_ring");
4898 		goto fail1;
4899 	}
4900 	wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned,
4901 			  soc->tcl_data_ring[index].alloc_size,
4902 			  soc->ctrl_psoc,
4903 			  WLAN_MD_DP_SRNG_TCL_DATA,
4904 			  "tcl_data_ring");
4905 
4906 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
4907 		goto set_rbm;
4908 
4909 	if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4910 			 wbm_ring_num, 0)) {
4911 		dp_err("dp_srng_init failed for tx_comp_ring");
4912 		goto fail1;
4913 	}
4914 
4915 	wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned,
4916 			  soc->tx_comp_ring[index].alloc_size,
4917 			  soc->ctrl_psoc,
4918 			  WLAN_MD_DP_SRNG_TX_COMP,
4919 			  "tcl_comp_ring");
4920 set_rbm:
4921 	bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_ring_num);
4922 
4923 	soc->arch_ops.tx_implicit_rbm_set(soc, tcl_ring_num, bm_id);
4924 
4925 	return QDF_STATUS_SUCCESS;
4926 
4927 fail1:
4928 	return QDF_STATUS_E_FAILURE;
4929 }
4930 
4931 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index)
4932 {
4933 	dp_debug("index %u", index);
4934 	dp_srng_free(soc, &soc->tcl_data_ring[index]);
4935 	dp_srng_free(soc, &soc->tx_comp_ring[index]);
4936 }
4937 
4938 /**
4939  * dp_alloc_tx_ring_pair_by_index() - The function allocs tcl data/wbm2sw
4940  * ring pair for the given "index"
4941  * @soc: DP soc pointer
4942  * @index: index of soc->tcl_data or soc->tx_comp to initialize
4943  *
4944  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
4945  */
4946 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
4947 						 uint8_t index)
4948 {
4949 	int tx_ring_size;
4950 	int tx_comp_ring_size;
4951 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
4952 	int cached = 0;
4953 
4954 	if (index >= MAX_TCL_DATA_RINGS) {
4955 		dp_err("unexpected index!");
4956 		QDF_BUG(0);
4957 		goto fail1;
4958 	}
4959 
4960 	dp_debug("index %u", index);
4961 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
4962 	dp_ipa_get_tx_ring_size(index, &tx_ring_size, soc_cfg_ctx);
4963 
4964 	if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA,
4965 			  tx_ring_size, cached)) {
4966 		dp_err("dp_srng_alloc failed for tcl_data_ring");
4967 		goto fail1;
4968 	}
4969 
4970 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
4971 	dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size, soc_cfg_ctx);
4972 	/* Enable cached TCL desc if NSS offload is disabled */
4973 	if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
4974 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
4975 
4976 	if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) ==
4977 	    INVALID_WBM_RING_NUM)
4978 		return QDF_STATUS_SUCCESS;
4979 
4980 	if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4981 			  tx_comp_ring_size, cached)) {
4982 		dp_err("dp_srng_alloc failed for tx_comp_ring");
4983 		goto fail1;
4984 	}
4985 
4986 	return QDF_STATUS_SUCCESS;
4987 
4988 fail1:
4989 	return QDF_STATUS_E_FAILURE;
4990 }
4991 
4992 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
4993 {
4994 	struct cdp_lro_hash_config lro_hash;
4995 	QDF_STATUS status;
4996 
4997 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
4998 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
4999 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
5000 		dp_err("LRO, GRO and RX hash disabled");
5001 		return QDF_STATUS_E_FAILURE;
5002 	}
5003 
5004 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
5005 
5006 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
5007 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
5008 		lro_hash.lro_enable = 1;
5009 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
5010 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
5011 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
5012 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
5013 	}
5014 
5015 	soc->arch_ops.get_rx_hash_key(soc, &lro_hash);
5016 
5017 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
5018 
5019 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
5020 		QDF_BUG(0);
5021 		dp_err("lro_hash_config not configured");
5022 		return QDF_STATUS_E_FAILURE;
5023 	}
5024 
5025 	status = soc->cdp_soc.ol_ops->lro_hash_config(soc->ctrl_psoc,
5026 						      pdev->pdev_id,
5027 						      &lro_hash);
5028 	if (!QDF_IS_STATUS_SUCCESS(status)) {
5029 		dp_err("failed to send lro_hash_config to FW %u", status);
5030 		return status;
5031 	}
5032 
5033 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
5034 		lro_hash.lro_enable, lro_hash.tcp_flag,
5035 		lro_hash.tcp_flag_mask);
5036 
5037 	dp_info("toeplitz_hash_ipv4:");
5038 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5039 			   lro_hash.toeplitz_hash_ipv4,
5040 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
5041 			   LRO_IPV4_SEED_ARR_SZ));
5042 
5043 	dp_info("toeplitz_hash_ipv6:");
5044 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5045 			   lro_hash.toeplitz_hash_ipv6,
5046 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
5047 			   LRO_IPV6_SEED_ARR_SZ));
5048 
5049 	return status;
5050 }
5051 
5052 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
5053 /*
5054  * dp_reap_timer_init() - initialize the reap timer
5055  * @soc: data path SoC handle
5056  *
5057  * Return: void
5058  */
5059 static void dp_reap_timer_init(struct dp_soc *soc)
5060 {
5061 	/*
5062 	 * Timer to reap rxdma status rings.
5063 	 * Needed until we enable ppdu end interrupts
5064 	 */
5065 	dp_monitor_reap_timer_init(soc);
5066 	dp_monitor_vdev_timer_init(soc);
5067 }
5068 
5069 /*
5070  * dp_reap_timer_deinit() - de-initialize the reap timer
5071  * @soc: data path SoC handle
5072  *
5073  * Return: void
5074  */
5075 static void dp_reap_timer_deinit(struct dp_soc *soc)
5076 {
5077 	dp_monitor_reap_timer_deinit(soc);
5078 }
5079 #else
5080 /* WIN use case */
5081 static void dp_reap_timer_init(struct dp_soc *soc)
5082 {
5083 	/* Configure LMAC rings in Polled mode */
5084 	if (soc->lmac_polled_mode) {
5085 		/*
5086 		 * Timer to reap lmac rings.
5087 		 */
5088 		qdf_timer_init(soc->osdev, &soc->lmac_reap_timer,
5089 			       dp_service_lmac_rings, (void *)soc,
5090 			       QDF_TIMER_TYPE_WAKE_APPS);
5091 		soc->lmac_timer_init = 1;
5092 		qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
5093 	}
5094 }
5095 
5096 static void dp_reap_timer_deinit(struct dp_soc *soc)
5097 {
5098 	if (soc->lmac_timer_init) {
5099 		qdf_timer_stop(&soc->lmac_reap_timer);
5100 		qdf_timer_free(&soc->lmac_reap_timer);
5101 		soc->lmac_timer_init = 0;
5102 	}
5103 }
5104 #endif
5105 
5106 #ifdef QCA_HOST2FW_RXBUF_RING
5107 /*
5108  * dp_rxdma_ring_alloc() - allocate the RXDMA rings
5109  * @soc: data path SoC handle
5110  * @pdev: Physical device handle
5111  *
5112  * Return: 0 - success, > 0 - failure
5113  */
5114 static int dp_rxdma_ring_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
5115 {
5116 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
5117 	int max_mac_rings;
5118 	int i;
5119 	int ring_size;
5120 
5121 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
5122 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
5123 	ring_size =  wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx);
5124 
5125 	for (i = 0; i < max_mac_rings; i++) {
5126 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
5127 		if (dp_srng_alloc(soc, &pdev->rx_mac_buf_ring[i],
5128 				  RXDMA_BUF, ring_size, 0)) {
5129 			dp_init_err("%pK: failed rx mac ring setup", soc);
5130 			return QDF_STATUS_E_FAILURE;
5131 		}
5132 	}
5133 	return QDF_STATUS_SUCCESS;
5134 }
5135 
5136 /*
5137  * dp_rxdma_ring_setup() - configure the RXDMA rings
5138  * @soc: data path SoC handle
5139  * @pdev: Physical device handle
5140  *
5141  * Return: 0 - success, > 0 - failure
5142  */
5143 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
5144 {
5145 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
5146 	int max_mac_rings;
5147 	int i;
5148 
5149 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
5150 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
5151 
5152 	for (i = 0; i < max_mac_rings; i++) {
5153 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
5154 		if (dp_srng_init(soc, &pdev->rx_mac_buf_ring[i],
5155 				 RXDMA_BUF, 1, i)) {
5156 			dp_init_err("%pK: failed rx mac ring setup", soc);
5157 			return QDF_STATUS_E_FAILURE;
5158 		}
5159 	}
5160 	return QDF_STATUS_SUCCESS;
5161 }
5162 
5163 /*
5164  * dp_rxdma_ring_cleanup() - Deinit the RXDMA rings and reap timer
5165  * @soc: data path SoC handle
5166  * @pdev: Physical device handle
5167  *
5168  * Return: void
5169  */
5170 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
5171 {
5172 	int i;
5173 
5174 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
5175 		dp_srng_deinit(soc, &pdev->rx_mac_buf_ring[i], RXDMA_BUF, 1);
5176 
5177 	dp_reap_timer_deinit(soc);
5178 }
5179 
5180 /*
5181  * dp_rxdma_ring_free() - Free the RXDMA rings
5182  * @pdev: Physical device handle
5183  *
5184  * Return: void
5185  */
5186 static void dp_rxdma_ring_free(struct dp_pdev *pdev)
5187 {
5188 	int i;
5189 
5190 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
5191 		dp_srng_free(pdev->soc, &pdev->rx_mac_buf_ring[i]);
5192 }
5193 
5194 #else
5195 static int dp_rxdma_ring_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
5196 {
5197 	return QDF_STATUS_SUCCESS;
5198 }
5199 
5200 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
5201 {
5202 	return QDF_STATUS_SUCCESS;
5203 }
5204 
5205 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
5206 {
5207 	dp_reap_timer_deinit(soc);
5208 }
5209 
5210 static void dp_rxdma_ring_free(struct dp_pdev *pdev)
5211 {
5212 }
5213 #endif
5214 
5215 /**
5216  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
5217  * @pdev - DP_PDEV handle
5218  *
5219  * Return: void
5220  */
5221 static inline void
5222 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
5223 {
5224 	uint8_t map_id;
5225 	struct dp_soc *soc = pdev->soc;
5226 
5227 	if (!soc)
5228 		return;
5229 
5230 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
5231 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
5232 			     default_dscp_tid_map,
5233 			     sizeof(default_dscp_tid_map));
5234 	}
5235 
5236 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
5237 		hal_tx_set_dscp_tid_map(soc->hal_soc,
5238 					default_dscp_tid_map,
5239 					map_id);
5240 	}
5241 }
5242 
5243 /**
5244  * dp_pcp_tid_map_setup(): Initialize the pcp-tid maps
5245  * @pdev - DP_PDEV handle
5246  *
5247  * Return: void
5248  */
5249 static inline void
5250 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
5251 {
5252 	struct dp_soc *soc = pdev->soc;
5253 
5254 	if (!soc)
5255 		return;
5256 
5257 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
5258 		     sizeof(default_pcp_tid_map));
5259 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
5260 }
5261 
5262 #ifdef IPA_OFFLOAD
5263 /**
5264  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
5265  * @soc: data path instance
5266  * @pdev: core txrx pdev context
5267  *
5268  * Return: QDF_STATUS_SUCCESS: success
5269  *         QDF_STATUS_E_RESOURCES: Error return
5270  */
5271 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5272 					   struct dp_pdev *pdev)
5273 {
5274 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5275 	int entries;
5276 
5277 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5278 		soc_cfg_ctx = soc->wlan_cfg_ctx;
5279 		entries =
5280 			wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
5281 
5282 		/* Setup second Rx refill buffer ring */
5283 		if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
5284 				  entries, 0)) {
5285 			dp_init_err("%pK: dp_srng_alloc failed second"
5286 				    "rx refill ring", soc);
5287 			return QDF_STATUS_E_FAILURE;
5288 		}
5289 	}
5290 
5291 	return QDF_STATUS_SUCCESS;
5292 }
5293 
5294 #ifdef IPA_WDI3_VLAN_SUPPORT
5295 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5296 					       struct dp_pdev *pdev)
5297 {
5298 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5299 	int entries;
5300 
5301 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5302 	    wlan_ipa_is_vlan_enabled()) {
5303 		soc_cfg_ctx = soc->wlan_cfg_ctx;
5304 		entries =
5305 			wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
5306 
5307 		/* Setup second Rx refill buffer ring */
5308 		if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF,
5309 				  entries, 0)) {
5310 			dp_init_err("%pK: alloc failed for 3rd rx refill ring",
5311 				    soc);
5312 			return QDF_STATUS_E_FAILURE;
5313 		}
5314 	}
5315 
5316 	return QDF_STATUS_SUCCESS;
5317 }
5318 
5319 static int dp_init_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5320 					      struct dp_pdev *pdev)
5321 {
5322 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5323 	    wlan_ipa_is_vlan_enabled()) {
5324 		if (dp_srng_init(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF,
5325 				 IPA_RX_ALT_REFILL_BUF_RING_IDX,
5326 				 pdev->pdev_id)) {
5327 			dp_init_err("%pK: init failed for 3rd rx refill ring",
5328 				    soc);
5329 			return QDF_STATUS_E_FAILURE;
5330 		}
5331 	}
5332 
5333 	return QDF_STATUS_SUCCESS;
5334 }
5335 
5336 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5337 						 struct dp_pdev *pdev)
5338 {
5339 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5340 	    wlan_ipa_is_vlan_enabled())
5341 		dp_srng_deinit(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF, 0);
5342 }
5343 
5344 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5345 					       struct dp_pdev *pdev)
5346 {
5347 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5348 	    wlan_ipa_is_vlan_enabled())
5349 		dp_srng_free(soc, &pdev->rx_refill_buf_ring3);
5350 }
5351 #else
5352 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5353 					       struct dp_pdev *pdev)
5354 {
5355 	return QDF_STATUS_SUCCESS;
5356 }
5357 
5358 static int dp_init_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5359 					      struct dp_pdev *pdev)
5360 {
5361 	return QDF_STATUS_SUCCESS;
5362 }
5363 
5364 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5365 						 struct dp_pdev *pdev)
5366 {
5367 }
5368 
5369 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5370 					       struct dp_pdev *pdev)
5371 {
5372 }
5373 #endif
5374 
5375 /**
5376  * dp_deinit_ipa_rx_refill_buf_ring - deinit second Rx refill buffer ring
5377  * @soc: data path instance
5378  * @pdev: core txrx pdev context
5379  *
5380  * Return: void
5381  */
5382 static void dp_deinit_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5383 					     struct dp_pdev *pdev)
5384 {
5385 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
5386 		dp_srng_deinit(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 0);
5387 }
5388 
5389 /**
5390  * dp_init_ipa_rx_refill_buf_ring - Init second Rx refill buffer ring
5391  * @soc: data path instance
5392  * @pdev: core txrx pdev context
5393  *
5394  * Return: QDF_STATUS_SUCCESS: success
5395  *         QDF_STATUS_E_RESOURCES: Error return
5396  */
5397 static int dp_init_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5398 					  struct dp_pdev *pdev)
5399 {
5400 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5401 		if (dp_srng_init(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
5402 				 IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id)) {
5403 			dp_init_err("%pK: dp_srng_init failed second"
5404 				    "rx refill ring", soc);
5405 			return QDF_STATUS_E_FAILURE;
5406 		}
5407 	}
5408 
5409 	if (dp_init_ipa_rx_alt_refill_buf_ring(soc, pdev)) {
5410 		dp_deinit_ipa_rx_refill_buf_ring(soc, pdev);
5411 		return QDF_STATUS_E_FAILURE;
5412 	}
5413 
5414 	return QDF_STATUS_SUCCESS;
5415 }
5416 
5417 /**
5418  * dp_free_ipa_rx_refill_buf_ring - free second Rx refill buffer ring
5419  * @soc: data path instance
5420  * @pdev: core txrx pdev context
5421  *
5422  * Return: void
5423  */
5424 static void dp_free_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5425 					   struct dp_pdev *pdev)
5426 {
5427 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
5428 		dp_srng_free(soc, &pdev->rx_refill_buf_ring2);
5429 }
5430 #else
5431 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5432 					   struct dp_pdev *pdev)
5433 {
5434 	return QDF_STATUS_SUCCESS;
5435 }
5436 
5437 static int dp_init_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5438 					  struct dp_pdev *pdev)
5439 {
5440 	return QDF_STATUS_SUCCESS;
5441 }
5442 
5443 static void dp_deinit_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5444 					     struct dp_pdev *pdev)
5445 {
5446 }
5447 
5448 static void dp_free_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5449 					   struct dp_pdev *pdev)
5450 {
5451 }
5452 
5453 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5454 					       struct dp_pdev *pdev)
5455 {
5456 	return QDF_STATUS_SUCCESS;
5457 }
5458 
5459 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5460 						 struct dp_pdev *pdev)
5461 {
5462 }
5463 
5464 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5465 					       struct dp_pdev *pdev)
5466 {
5467 }
5468 #endif
5469 
5470 #ifdef DP_TX_HW_DESC_HISTORY
5471 /**
5472  * dp_soc_tx_hw_desc_history_attach - Attach TX HW descriptor history
5473  *
5474  * @soc: DP soc handle
5475  *
5476  * Return: None
5477  */
5478 static void dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
5479 {
5480 	dp_soc_frag_history_attach(soc, &soc->tx_hw_desc_history,
5481 				   DP_TX_HW_DESC_HIST_MAX_SLOTS,
5482 				   DP_TX_HW_DESC_HIST_PER_SLOT_MAX,
5483 				   sizeof(struct dp_tx_hw_desc_evt),
5484 				   true, DP_TX_HW_DESC_HIST_TYPE);
5485 }
5486 
5487 static void dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
5488 {
5489 	dp_soc_frag_history_detach(soc, &soc->tx_hw_desc_history,
5490 				   DP_TX_HW_DESC_HIST_MAX_SLOTS,
5491 				   true, DP_TX_HW_DESC_HIST_TYPE);
5492 }
5493 
5494 #else /* DP_TX_HW_DESC_HISTORY */
5495 static inline void
5496 dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
5497 {
5498 }
5499 
5500 static inline void
5501 dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
5502 {
5503 }
5504 #endif /* DP_TX_HW_DESC_HISTORY */
5505 
5506 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
5507 #ifndef RX_DEFRAG_DO_NOT_REINJECT
5508 /**
5509  * dp_soc_rx_reinject_ring_history_attach - Attach the reo reinject ring
5510  *					    history.
5511  * @soc: DP soc handle
5512  *
5513  * Return: None
5514  */
5515 static void dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
5516 {
5517 	soc->rx_reinject_ring_history =
5518 		dp_context_alloc_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
5519 				     sizeof(struct dp_rx_reinject_history));
5520 	if (soc->rx_reinject_ring_history)
5521 		qdf_atomic_init(&soc->rx_reinject_ring_history->index);
5522 }
5523 #else /* RX_DEFRAG_DO_NOT_REINJECT */
5524 static inline void
5525 dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
5526 {
5527 }
5528 #endif /* RX_DEFRAG_DO_NOT_REINJECT */
5529 
5530 /**
5531  * dp_soc_rx_history_attach() - Attach the ring history record buffers
5532  * @soc: DP soc structure
5533  *
5534  * This function allocates the memory for recording the rx ring, rx error
5535  * ring and the reinject ring entries. There is no error returned in case
5536  * of allocation failure since the record function checks if the history is
5537  * initialized or not. We do not want to fail the driver load in case of
5538  * failure to allocate memory for debug history.
5539  *
5540  * Returns: None
5541  */
5542 static void dp_soc_rx_history_attach(struct dp_soc *soc)
5543 {
5544 	int i;
5545 	uint32_t rx_ring_hist_size;
5546 	uint32_t rx_refill_ring_hist_size;
5547 
5548 	rx_ring_hist_size = sizeof(*soc->rx_ring_history[0]);
5549 	rx_refill_ring_hist_size = sizeof(*soc->rx_refill_ring_history[0]);
5550 
5551 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5552 		soc->rx_ring_history[i] = dp_context_alloc_mem(
5553 				soc, DP_RX_RING_HIST_TYPE, rx_ring_hist_size);
5554 		if (soc->rx_ring_history[i])
5555 			qdf_atomic_init(&soc->rx_ring_history[i]->index);
5556 	}
5557 
5558 	soc->rx_err_ring_history = dp_context_alloc_mem(
5559 			soc, DP_RX_ERR_RING_HIST_TYPE, rx_ring_hist_size);
5560 	if (soc->rx_err_ring_history)
5561 		qdf_atomic_init(&soc->rx_err_ring_history->index);
5562 
5563 	dp_soc_rx_reinject_ring_history_attach(soc);
5564 
5565 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5566 		soc->rx_refill_ring_history[i] = dp_context_alloc_mem(
5567 						soc,
5568 						DP_RX_REFILL_RING_HIST_TYPE,
5569 						rx_refill_ring_hist_size);
5570 
5571 		if (soc->rx_refill_ring_history[i])
5572 			qdf_atomic_init(&soc->rx_refill_ring_history[i]->index);
5573 	}
5574 }
5575 
5576 static void dp_soc_rx_history_detach(struct dp_soc *soc)
5577 {
5578 	int i;
5579 
5580 	for (i = 0; i < MAX_REO_DEST_RINGS; i++)
5581 		dp_context_free_mem(soc, DP_RX_RING_HIST_TYPE,
5582 				    soc->rx_ring_history[i]);
5583 
5584 	dp_context_free_mem(soc, DP_RX_ERR_RING_HIST_TYPE,
5585 			    soc->rx_err_ring_history);
5586 
5587 	/*
5588 	 * No need for a featurized detach since qdf_mem_free takes
5589 	 * care of NULL pointer.
5590 	 */
5591 	dp_context_free_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
5592 			    soc->rx_reinject_ring_history);
5593 
5594 	for (i = 0; i < MAX_PDEV_CNT; i++)
5595 		dp_context_free_mem(soc, DP_RX_REFILL_RING_HIST_TYPE,
5596 				    soc->rx_refill_ring_history[i]);
5597 }
5598 
5599 #else
5600 static inline void dp_soc_rx_history_attach(struct dp_soc *soc)
5601 {
5602 }
5603 
5604 static inline void dp_soc_rx_history_detach(struct dp_soc *soc)
5605 {
5606 }
5607 #endif
5608 
5609 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY
5610 /**
5611  * dp_soc_mon_status_ring_history_attach() - Attach the monitor status
5612  *					     buffer record history.
5613  * @soc: DP soc handle
5614  *
5615  * This function allocates memory to track the event for a monitor
5616  * status buffer, before its parsed and freed.
5617  *
5618  * Return: None
5619  */
5620 static void dp_soc_mon_status_ring_history_attach(struct dp_soc *soc)
5621 {
5622 	soc->mon_status_ring_history = dp_context_alloc_mem(soc,
5623 				DP_MON_STATUS_BUF_HIST_TYPE,
5624 				sizeof(struct dp_mon_status_ring_history));
5625 	if (!soc->mon_status_ring_history) {
5626 		dp_err("Failed to alloc memory for mon status ring history");
5627 		return;
5628 	}
5629 }
5630 
5631 /**
5632  * dp_soc_mon_status_ring_history_detach() - Detach the monitor status buffer
5633  *					     record history.
5634  * @soc: DP soc handle
5635  *
5636  * Return: None
5637  */
5638 static void dp_soc_mon_status_ring_history_detach(struct dp_soc *soc)
5639 {
5640 	dp_context_free_mem(soc, DP_MON_STATUS_BUF_HIST_TYPE,
5641 			    soc->mon_status_ring_history);
5642 }
5643 #else
5644 static void dp_soc_mon_status_ring_history_attach(struct dp_soc *soc)
5645 {
5646 }
5647 
5648 static void dp_soc_mon_status_ring_history_detach(struct dp_soc *soc)
5649 {
5650 }
5651 #endif
5652 
5653 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
5654 /**
5655  * dp_soc_tx_history_attach() - Attach the ring history record buffers
5656  * @soc: DP soc structure
5657  *
5658  * This function allocates the memory for recording the tx tcl ring and
5659  * the tx comp ring entries. There is no error returned in case
5660  * of allocation failure since the record function checks if the history is
5661  * initialized or not. We do not want to fail the driver load in case of
5662  * failure to allocate memory for debug history.
5663  *
5664  * Returns: None
5665  */
5666 static void dp_soc_tx_history_attach(struct dp_soc *soc)
5667 {
5668 	dp_soc_frag_history_attach(soc, &soc->tx_tcl_history,
5669 				   DP_TX_TCL_HIST_MAX_SLOTS,
5670 				   DP_TX_TCL_HIST_PER_SLOT_MAX,
5671 				   sizeof(struct dp_tx_desc_event),
5672 				   true, DP_TX_TCL_HIST_TYPE);
5673 	dp_soc_frag_history_attach(soc, &soc->tx_comp_history,
5674 				   DP_TX_COMP_HIST_MAX_SLOTS,
5675 				   DP_TX_COMP_HIST_PER_SLOT_MAX,
5676 				   sizeof(struct dp_tx_desc_event),
5677 				   true, DP_TX_COMP_HIST_TYPE);
5678 }
5679 
5680 /**
5681  * dp_soc_tx_history_detach() - Detach the ring history record buffers
5682  * @soc: DP soc structure
5683  *
5684  * This function frees the memory for recording the tx tcl ring and
5685  * the tx comp ring entries.
5686  *
5687  * Returns: None
5688  */
5689 static void dp_soc_tx_history_detach(struct dp_soc *soc)
5690 {
5691 	dp_soc_frag_history_detach(soc, &soc->tx_tcl_history,
5692 				   DP_TX_TCL_HIST_MAX_SLOTS,
5693 				   true, DP_TX_TCL_HIST_TYPE);
5694 	dp_soc_frag_history_detach(soc, &soc->tx_comp_history,
5695 				   DP_TX_COMP_HIST_MAX_SLOTS,
5696 				   true, DP_TX_COMP_HIST_TYPE);
5697 }
5698 
5699 #else
5700 static inline void dp_soc_tx_history_attach(struct dp_soc *soc)
5701 {
5702 }
5703 
5704 static inline void dp_soc_tx_history_detach(struct dp_soc *soc)
5705 {
5706 }
5707 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
5708 
5709 /*
5710 * dp_pdev_attach_wifi3() - attach txrx pdev
5711 * @txrx_soc: Datapath SOC handle
5712 * @params: Params for PDEV attach
5713 *
5714 * Return: QDF_STATUS
5715 */
5716 static inline
5717 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
5718 				struct cdp_pdev_attach_params *params)
5719 {
5720 	qdf_size_t pdev_context_size;
5721 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5722 	struct dp_pdev *pdev = NULL;
5723 	uint8_t pdev_id = params->pdev_id;
5724 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5725 	int nss_cfg;
5726 
5727 	pdev_context_size =
5728 		soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_PDEV);
5729 	if (pdev_context_size)
5730 		pdev = dp_context_alloc_mem(soc, DP_PDEV_TYPE, pdev_context_size);
5731 
5732 	if (!pdev) {
5733 		dp_init_err("%pK: DP PDEV memory allocation failed",
5734 			    soc);
5735 		goto fail0;
5736 	}
5737 	wlan_minidump_log(pdev, sizeof(*pdev), soc->ctrl_psoc,
5738 			  WLAN_MD_DP_PDEV, "dp_pdev");
5739 
5740 	soc_cfg_ctx = soc->wlan_cfg_ctx;
5741 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
5742 
5743 	if (!pdev->wlan_cfg_ctx) {
5744 		dp_init_err("%pK: pdev cfg_attach failed", soc);
5745 		goto fail1;
5746 	}
5747 
5748 	/*
5749 	 * set nss pdev config based on soc config
5750 	 */
5751 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
5752 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
5753 					 (nss_cfg & (1 << pdev_id)));
5754 
5755 	pdev->soc = soc;
5756 	pdev->pdev_id = pdev_id;
5757 	soc->pdev_list[pdev_id] = pdev;
5758 
5759 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
5760 	soc->pdev_count++;
5761 
5762 	/* Allocate memory for pdev srng rings */
5763 	if (dp_pdev_srng_alloc(pdev)) {
5764 		dp_init_err("%pK: dp_pdev_srng_alloc failed", soc);
5765 		goto fail2;
5766 	}
5767 
5768 	/* Setup second Rx refill buffer ring */
5769 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev)) {
5770 		dp_init_err("%pK: dp_srng_alloc failed rxrefill2 ring",
5771 			    soc);
5772 		goto fail3;
5773 	}
5774 
5775 	/* Allocate memory for pdev rxdma rings */
5776 	if (dp_rxdma_ring_alloc(soc, pdev)) {
5777 		dp_init_err("%pK: dp_rxdma_ring_alloc failed", soc);
5778 		goto fail4;
5779 	}
5780 
5781 	/* Rx specific init */
5782 	if (dp_rx_pdev_desc_pool_alloc(pdev)) {
5783 		dp_init_err("%pK: dp_rx_pdev_attach failed", soc);
5784 		goto fail4;
5785 	}
5786 
5787 	if (dp_monitor_pdev_attach(pdev)) {
5788 		dp_init_err("%pK: dp_monitor_pdev_attach failed", soc);
5789 		goto fail5;
5790 	}
5791 
5792 	soc->arch_ops.txrx_pdev_attach(pdev, params);
5793 
5794 	/* Setup third Rx refill buffer ring */
5795 	if (dp_setup_ipa_rx_alt_refill_buf_ring(soc, pdev)) {
5796 		dp_init_err("%pK: dp_srng_alloc failed rxrefill3 ring",
5797 			    soc);
5798 		goto fail6;
5799 	}
5800 
5801 	return QDF_STATUS_SUCCESS;
5802 
5803 fail6:
5804 	dp_monitor_pdev_detach(pdev);
5805 fail5:
5806 	dp_rx_pdev_desc_pool_free(pdev);
5807 fail4:
5808 	dp_rxdma_ring_free(pdev);
5809 	dp_free_ipa_rx_refill_buf_ring(soc, pdev);
5810 fail3:
5811 	dp_pdev_srng_free(pdev);
5812 fail2:
5813 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
5814 fail1:
5815 	soc->pdev_list[pdev_id] = NULL;
5816 	qdf_mem_free(pdev);
5817 fail0:
5818 	return QDF_STATUS_E_FAILURE;
5819 }
5820 
5821 /**
5822  * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev
5823  * @pdev: Datapath PDEV handle
5824  *
5825  * This is the last chance to flush all pending dp vdevs/peers,
5826  * some peer/vdev leak case like Non-SSR + peer unmap missing
5827  * will be covered here.
5828  *
5829  * Return: None
5830  */
5831 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
5832 {
5833 	struct dp_soc *soc = pdev->soc;
5834 	struct dp_vdev *vdev_arr[MAX_VDEV_CNT] = {0};
5835 	uint32_t i = 0;
5836 	uint32_t num_vdevs = 0;
5837 	struct dp_vdev *vdev = NULL;
5838 
5839 	if (TAILQ_EMPTY(&soc->inactive_vdev_list))
5840 		return;
5841 
5842 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
5843 	TAILQ_FOREACH(vdev, &soc->inactive_vdev_list,
5844 		      inactive_list_elem) {
5845 		if (vdev->pdev != pdev)
5846 			continue;
5847 
5848 		vdev_arr[num_vdevs] = vdev;
5849 		num_vdevs++;
5850 		/* take reference to free */
5851 		dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CDP);
5852 	}
5853 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
5854 
5855 	for (i = 0; i < num_vdevs; i++) {
5856 		dp_vdev_flush_peers((struct cdp_vdev *)vdev_arr[i], 0, 0);
5857 		dp_vdev_unref_delete(soc, vdev_arr[i], DP_MOD_ID_CDP);
5858 	}
5859 }
5860 
5861 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
5862 /**
5863  * dp_vdev_stats_hw_offload_target_config() - Send HTT command to FW
5864  *                                          for enable/disable of HW vdev stats
5865  * @soc: Datapath soc handle
5866  * @pdev_id: INVALID_PDEV_ID for all pdevs or 0,1,2 for individual pdev
5867  * @enable: flag to represent enable/disable of hw vdev stats
5868  *
5869  * Return: none
5870  */
5871 static void dp_vdev_stats_hw_offload_target_config(struct dp_soc *soc,
5872 						   uint8_t pdev_id,
5873 						   bool enable)
5874 {
5875 	/* Check SOC level config for HW offload vdev stats support */
5876 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
5877 		dp_debug("%pK: HW vdev offload stats is disabled", soc);
5878 		return;
5879 	}
5880 
5881 	/* Send HTT command to FW for enable of stats */
5882 	dp_h2t_hw_vdev_stats_config_send(soc, pdev_id, enable, false, 0);
5883 }
5884 
5885 /**
5886  * dp_vdev_stats_hw_offload_target_clear() - Clear HW vdev stats on target
5887  * @soc: Datapath soc handle
5888  * @pdev_id: pdev_id (0,1,2)
5889  * @bitmask: bitmask with vdev_id(s) for which stats are to be cleared on HW
5890  *
5891  * Return: none
5892  */
5893 static
5894 void dp_vdev_stats_hw_offload_target_clear(struct dp_soc *soc, uint8_t pdev_id,
5895 					   uint64_t vdev_id_bitmask)
5896 {
5897 	/* Check SOC level config for HW offload vdev stats support */
5898 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
5899 		dp_debug("%pK: HW vdev offload stats is disabled", soc);
5900 		return;
5901 	}
5902 
5903 	/* Send HTT command to FW for reset of stats */
5904 	dp_h2t_hw_vdev_stats_config_send(soc, pdev_id, true, true,
5905 					 vdev_id_bitmask);
5906 }
5907 #else
5908 static void
5909 dp_vdev_stats_hw_offload_target_config(struct dp_soc *soc, uint8_t pdev_id,
5910 				       bool enable)
5911 {
5912 }
5913 
5914 static
5915 void dp_vdev_stats_hw_offload_target_clear(struct dp_soc *soc, uint8_t pdev_id,
5916 					   uint64_t vdev_id_bitmask)
5917 {
5918 }
5919 #endif /*QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT */
5920 
5921 /**
5922  * dp_pdev_deinit() - Deinit txrx pdev
5923  * @txrx_pdev: Datapath PDEV handle
5924  * @force: Force deinit
5925  *
5926  * Return: None
5927  */
5928 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
5929 {
5930 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
5931 	qdf_nbuf_t curr_nbuf, next_nbuf;
5932 
5933 	if (pdev->pdev_deinit)
5934 		return;
5935 
5936 	dp_tx_me_exit(pdev);
5937 	dp_rx_fst_detach(pdev->soc, pdev);
5938 	dp_rx_pdev_buffers_free(pdev);
5939 	dp_rx_pdev_desc_pool_deinit(pdev);
5940 	dp_pdev_bkp_stats_detach(pdev);
5941 	qdf_event_destroy(&pdev->fw_peer_stats_event);
5942 	qdf_event_destroy(&pdev->fw_stats_event);
5943 	qdf_event_destroy(&pdev->fw_obss_stats_event);
5944 	if (pdev->sojourn_buf)
5945 		qdf_nbuf_free(pdev->sojourn_buf);
5946 
5947 	dp_pdev_flush_pending_vdevs(pdev);
5948 	dp_tx_desc_flush(pdev, NULL, true);
5949 
5950 	qdf_spinlock_destroy(&pdev->tx_mutex);
5951 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
5952 
5953 	dp_monitor_pdev_deinit(pdev);
5954 
5955 	dp_pdev_srng_deinit(pdev);
5956 
5957 	dp_ipa_uc_detach(pdev->soc, pdev);
5958 	dp_deinit_ipa_rx_alt_refill_buf_ring(pdev->soc, pdev);
5959 	dp_deinit_ipa_rx_refill_buf_ring(pdev->soc, pdev);
5960 	dp_rxdma_ring_cleanup(pdev->soc, pdev);
5961 
5962 	curr_nbuf = pdev->invalid_peer_head_msdu;
5963 	while (curr_nbuf) {
5964 		next_nbuf = qdf_nbuf_next(curr_nbuf);
5965 		dp_rx_nbuf_free(curr_nbuf);
5966 		curr_nbuf = next_nbuf;
5967 	}
5968 	pdev->invalid_peer_head_msdu = NULL;
5969 	pdev->invalid_peer_tail_msdu = NULL;
5970 
5971 	dp_wdi_event_detach(pdev);
5972 	pdev->pdev_deinit = 1;
5973 }
5974 
5975 /**
5976  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
5977  * @psoc: Datapath psoc handle
5978  * @pdev_id: Id of datapath PDEV handle
5979  * @force: Force deinit
5980  *
5981  * Return: QDF_STATUS
5982  */
5983 static QDF_STATUS
5984 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
5985 		     int force)
5986 {
5987 	struct dp_pdev *txrx_pdev;
5988 
5989 	txrx_pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
5990 						       pdev_id);
5991 
5992 	if (!txrx_pdev)
5993 		return QDF_STATUS_E_FAILURE;
5994 
5995 	dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force);
5996 
5997 	return QDF_STATUS_SUCCESS;
5998 }
5999 
6000 /*
6001  * dp_pdev_post_attach() - Do post pdev attach after dev_alloc_name
6002  * @txrx_pdev: Datapath PDEV handle
6003  *
6004  * Return: None
6005  */
6006 static void dp_pdev_post_attach(struct cdp_pdev *txrx_pdev)
6007 {
6008 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
6009 
6010 	dp_monitor_tx_capture_debugfs_init(pdev);
6011 
6012 	if (dp_pdev_htt_stats_dbgfs_init(pdev)) {
6013 		dp_init_err("%pK: Failed to initialize pdev HTT stats debugfs", pdev->soc);
6014 	}
6015 }
6016 
6017 /*
6018  * dp_pdev_post_attach_wifi3() - attach txrx pdev post
6019  * @psoc: Datapath soc handle
6020  * @pdev_id: pdev id of pdev
6021  *
6022  * Return: QDF_STATUS
6023  */
6024 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *soc,
6025 				     uint8_t pdev_id)
6026 {
6027 	struct dp_pdev *pdev;
6028 
6029 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
6030 						  pdev_id);
6031 
6032 	if (!pdev) {
6033 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
6034 			    (struct dp_soc *)soc, pdev_id);
6035 		return QDF_STATUS_E_FAILURE;
6036 	}
6037 
6038 	dp_pdev_post_attach((struct cdp_pdev *)pdev);
6039 	return QDF_STATUS_SUCCESS;
6040 }
6041 
6042 /*
6043  * dp_pdev_detach() - Complete rest of pdev detach
6044  * @txrx_pdev: Datapath PDEV handle
6045  * @force: Force deinit
6046  *
6047  * Return: None
6048  */
6049 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
6050 {
6051 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
6052 	struct dp_soc *soc = pdev->soc;
6053 
6054 	dp_pdev_htt_stats_dbgfs_deinit(pdev);
6055 	dp_rx_pdev_desc_pool_free(pdev);
6056 	dp_monitor_pdev_detach(pdev);
6057 	dp_rxdma_ring_free(pdev);
6058 	dp_free_ipa_rx_refill_buf_ring(soc, pdev);
6059 	dp_free_ipa_rx_alt_refill_buf_ring(soc, pdev);
6060 	dp_pdev_srng_free(pdev);
6061 
6062 	soc->pdev_count--;
6063 	soc->pdev_list[pdev->pdev_id] = NULL;
6064 
6065 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
6066 	wlan_minidump_remove(pdev, sizeof(*pdev), soc->ctrl_psoc,
6067 			     WLAN_MD_DP_PDEV, "dp_pdev");
6068 	dp_context_free_mem(soc, DP_PDEV_TYPE, pdev);
6069 }
6070 
6071 /*
6072  * dp_pdev_detach_wifi3() - detach txrx pdev
6073  * @psoc: Datapath soc handle
6074  * @pdev_id: pdev id of pdev
6075  * @force: Force detach
6076  *
6077  * Return: QDF_STATUS
6078  */
6079 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
6080 				       int force)
6081 {
6082 	struct dp_pdev *pdev;
6083 	struct dp_soc *soc = (struct dp_soc *)psoc;
6084 
6085 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
6086 						  pdev_id);
6087 
6088 	if (!pdev) {
6089 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
6090 			    (struct dp_soc *)psoc, pdev_id);
6091 		return QDF_STATUS_E_FAILURE;
6092 	}
6093 
6094 	soc->arch_ops.txrx_pdev_detach(pdev);
6095 
6096 	dp_pdev_detach((struct cdp_pdev *)pdev, force);
6097 	return QDF_STATUS_SUCCESS;
6098 }
6099 
6100 /*
6101  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
6102  * @soc: DP SOC handle
6103  */
6104 #ifndef DP_UMAC_HW_RESET_SUPPORT
6105 static inline
6106 #endif
6107 void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
6108 {
6109 	struct reo_desc_list_node *desc;
6110 	struct dp_rx_tid *rx_tid;
6111 
6112 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
6113 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
6114 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
6115 		rx_tid = &desc->rx_tid;
6116 		qdf_mem_unmap_nbytes_single(soc->osdev,
6117 			rx_tid->hw_qdesc_paddr,
6118 			QDF_DMA_BIDIRECTIONAL,
6119 			rx_tid->hw_qdesc_alloc_size);
6120 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
6121 		qdf_mem_free(desc);
6122 	}
6123 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
6124 	qdf_list_destroy(&soc->reo_desc_freelist);
6125 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
6126 }
6127 
6128 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
6129 /*
6130  * dp_reo_desc_deferred_freelist_create() - Initialize the resources used
6131  *                                          for deferred reo desc list
6132  * @psoc: Datapath soc handle
6133  *
6134  * Return: void
6135  */
6136 static void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
6137 {
6138 	qdf_spinlock_create(&soc->reo_desc_deferred_freelist_lock);
6139 	qdf_list_create(&soc->reo_desc_deferred_freelist,
6140 			REO_DESC_DEFERRED_FREELIST_SIZE);
6141 	soc->reo_desc_deferred_freelist_init = true;
6142 }
6143 
6144 /*
6145  * dp_reo_desc_deferred_freelist_destroy() - loop the deferred free list &
6146  *                                           free the leftover REO QDESCs
6147  * @psoc: Datapath soc handle
6148  *
6149  * Return: void
6150  */
6151 static void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
6152 {
6153 	struct reo_desc_deferred_freelist_node *desc;
6154 
6155 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
6156 	soc->reo_desc_deferred_freelist_init = false;
6157 	while (qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
6158 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
6159 		qdf_mem_unmap_nbytes_single(soc->osdev,
6160 					    desc->hw_qdesc_paddr,
6161 					    QDF_DMA_BIDIRECTIONAL,
6162 					    desc->hw_qdesc_alloc_size);
6163 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
6164 		qdf_mem_free(desc);
6165 	}
6166 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
6167 
6168 	qdf_list_destroy(&soc->reo_desc_deferred_freelist);
6169 	qdf_spinlock_destroy(&soc->reo_desc_deferred_freelist_lock);
6170 }
6171 #else
6172 static inline void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
6173 {
6174 }
6175 
6176 static inline void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
6177 {
6178 }
6179 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
6180 
6181 /*
6182  * dp_soc_reset_txrx_ring_map() - reset tx ring map
6183  * @soc: DP SOC handle
6184  *
6185  */
6186 static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc)
6187 {
6188 	uint32_t i;
6189 
6190 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++)
6191 		soc->tx_ring_map[i] = 0;
6192 }
6193 
6194 /*
6195  * dp_soc_print_inactive_objects() - prints inactive peer and vdev list
6196  * @soc: DP SOC handle
6197  *
6198  */
6199 static void dp_soc_print_inactive_objects(struct dp_soc *soc)
6200 {
6201 	struct dp_peer *peer = NULL;
6202 	struct dp_peer *tmp_peer = NULL;
6203 	struct dp_vdev *vdev = NULL;
6204 	struct dp_vdev *tmp_vdev = NULL;
6205 	int i = 0;
6206 	uint32_t count;
6207 
6208 	if (TAILQ_EMPTY(&soc->inactive_peer_list) &&
6209 	    TAILQ_EMPTY(&soc->inactive_vdev_list))
6210 		return;
6211 
6212 	TAILQ_FOREACH_SAFE(peer, &soc->inactive_peer_list,
6213 			   inactive_list_elem, tmp_peer) {
6214 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
6215 			count = qdf_atomic_read(&peer->mod_refs[i]);
6216 			if (count)
6217 				DP_PRINT_STATS("peer %pK Module id %u ==> %u",
6218 					       peer, i, count);
6219 		}
6220 	}
6221 
6222 	TAILQ_FOREACH_SAFE(vdev, &soc->inactive_vdev_list,
6223 			   inactive_list_elem, tmp_vdev) {
6224 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
6225 			count = qdf_atomic_read(&vdev->mod_refs[i]);
6226 			if (count)
6227 				DP_PRINT_STATS("vdev %pK Module id %u ==> %u",
6228 					       vdev, i, count);
6229 		}
6230 	}
6231 	QDF_BUG(0);
6232 }
6233 
6234 /**
6235  * dp_soc_deinit() - Deinitialize txrx SOC
6236  * @txrx_soc: Opaque DP SOC handle
6237  *
6238  * Return: None
6239  */
6240 static void dp_soc_deinit(void *txrx_soc)
6241 {
6242 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6243 	struct htt_soc *htt_soc = soc->htt_handle;
6244 
6245 	qdf_atomic_set(&soc->cmn_init_done, 0);
6246 
6247 	if (soc->arch_ops.txrx_soc_ppeds_stop)
6248 		soc->arch_ops.txrx_soc_ppeds_stop(soc);
6249 
6250 	soc->arch_ops.txrx_soc_deinit(soc);
6251 
6252 	dp_monitor_soc_deinit(soc);
6253 
6254 	/* free peer tables & AST tables allocated during peer_map_attach */
6255 	if (soc->peer_map_attach_success) {
6256 		dp_peer_find_detach(soc);
6257 		soc->arch_ops.txrx_peer_map_detach(soc);
6258 		soc->peer_map_attach_success = FALSE;
6259 	}
6260 
6261 	qdf_flush_work(&soc->htt_stats.work);
6262 	qdf_disable_work(&soc->htt_stats.work);
6263 
6264 	qdf_spinlock_destroy(&soc->htt_stats.lock);
6265 
6266 	dp_soc_reset_txrx_ring_map(soc);
6267 
6268 	dp_reo_desc_freelist_destroy(soc);
6269 	dp_reo_desc_deferred_freelist_destroy(soc);
6270 
6271 	DEINIT_RX_HW_STATS_LOCK(soc);
6272 
6273 	qdf_spinlock_destroy(&soc->ast_lock);
6274 
6275 	dp_peer_mec_spinlock_destroy(soc);
6276 
6277 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
6278 
6279 	qdf_nbuf_queue_free(&soc->invalid_buf_queue);
6280 
6281 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
6282 
6283 	qdf_spinlock_destroy(&soc->vdev_map_lock);
6284 
6285 	dp_reo_cmdlist_destroy(soc);
6286 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
6287 
6288 	dp_soc_tx_desc_sw_pools_deinit(soc);
6289 
6290 	dp_soc_srng_deinit(soc);
6291 
6292 	dp_hw_link_desc_ring_deinit(soc);
6293 
6294 	dp_soc_print_inactive_objects(soc);
6295 	qdf_spinlock_destroy(&soc->inactive_peer_list_lock);
6296 	qdf_spinlock_destroy(&soc->inactive_vdev_list_lock);
6297 
6298 	htt_soc_htc_dealloc(soc->htt_handle);
6299 
6300 	htt_soc_detach(htt_soc);
6301 
6302 	/* Free wbm sg list and reset flags in down path */
6303 	dp_rx_wbm_sg_list_deinit(soc);
6304 
6305 	wlan_minidump_remove(soc, sizeof(*soc), soc->ctrl_psoc,
6306 			     WLAN_MD_DP_SOC, "dp_soc");
6307 }
6308 
6309 /**
6310  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
6311  * @txrx_soc: Opaque DP SOC handle
6312  *
6313  * Return: None
6314  */
6315 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc)
6316 {
6317 	dp_soc_deinit(txrx_soc);
6318 }
6319 
6320 /*
6321  * dp_soc_detach() - Detach rest of txrx SOC
6322  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
6323  *
6324  * Return: None
6325  */
6326 static void dp_soc_detach(struct cdp_soc_t *txrx_soc)
6327 {
6328 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6329 
6330 	soc->arch_ops.txrx_soc_detach(soc);
6331 
6332 	dp_runtime_deinit();
6333 
6334 	dp_sysfs_deinitialize_stats(soc);
6335 	dp_soc_swlm_detach(soc);
6336 	dp_soc_tx_desc_sw_pools_free(soc);
6337 	dp_soc_srng_free(soc);
6338 	dp_hw_link_desc_ring_free(soc);
6339 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
6340 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
6341 	dp_soc_tx_hw_desc_history_detach(soc);
6342 	dp_soc_tx_history_detach(soc);
6343 	dp_soc_mon_status_ring_history_detach(soc);
6344 	dp_soc_rx_history_detach(soc);
6345 
6346 	if (!dp_monitor_modularized_enable()) {
6347 		dp_mon_soc_detach_wrapper(soc);
6348 	}
6349 
6350 	qdf_mem_free(soc->cdp_soc.ops);
6351 	qdf_mem_free(soc);
6352 }
6353 
6354 /*
6355  * dp_soc_detach_wifi3() - Detach txrx SOC
6356  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
6357  *
6358  * Return: None
6359  */
6360 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc)
6361 {
6362 	dp_soc_detach(txrx_soc);
6363 }
6364 
6365 /*
6366  * dp_rxdma_ring_config() - configure the RX DMA rings
6367  *
6368  * This function is used to configure the MAC rings.
6369  * On MCL host provides buffers in Host2FW ring
6370  * FW refills (copies) buffers to the ring and updates
6371  * ring_idx in register
6372  *
6373  * @soc: data path SoC handle
6374  *
6375  * Return: zero on success, non-zero on failure
6376  */
6377 #ifdef QCA_HOST2FW_RXBUF_RING
6378 static inline void
6379 dp_htt_setup_rxdma_err_dst_ring(struct dp_soc *soc, int mac_id,
6380 				int lmac_id)
6381 {
6382 	if (soc->rxdma_err_dst_ring[lmac_id].hal_srng)
6383 		htt_srng_setup(soc->htt_handle, mac_id,
6384 			       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
6385 			       RXDMA_DST);
6386 }
6387 
6388 #ifdef IPA_WDI3_VLAN_SUPPORT
6389 static inline
6390 void dp_rxdma_setup_refill_ring3(struct dp_soc *soc,
6391 				 struct dp_pdev *pdev,
6392 				 uint8_t idx)
6393 {
6394 	if (pdev->rx_refill_buf_ring3.hal_srng)
6395 		htt_srng_setup(soc->htt_handle, idx,
6396 			       pdev->rx_refill_buf_ring3.hal_srng,
6397 			       RXDMA_BUF);
6398 }
6399 #else
6400 static inline
6401 void dp_rxdma_setup_refill_ring3(struct dp_soc *soc,
6402 				 struct dp_pdev *pdev,
6403 				 uint8_t idx)
6404 { }
6405 #endif
6406 
6407 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
6408 {
6409 	int i;
6410 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6411 
6412 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6413 		struct dp_pdev *pdev = soc->pdev_list[i];
6414 
6415 		if (pdev) {
6416 			int mac_id;
6417 			int max_mac_rings =
6418 				 wlan_cfg_get_num_mac_rings
6419 				(pdev->wlan_cfg_ctx);
6420 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
6421 
6422 			htt_srng_setup(soc->htt_handle, i,
6423 				       soc->rx_refill_buf_ring[lmac_id]
6424 				       .hal_srng,
6425 				       RXDMA_BUF);
6426 
6427 			if (pdev->rx_refill_buf_ring2.hal_srng)
6428 				htt_srng_setup(soc->htt_handle, i,
6429 					       pdev->rx_refill_buf_ring2
6430 					       .hal_srng,
6431 					       RXDMA_BUF);
6432 
6433 			dp_rxdma_setup_refill_ring3(soc, pdev, i);
6434 
6435 			dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
6436 			dp_err("pdev_id %d max_mac_rings %d",
6437 			       pdev->pdev_id, max_mac_rings);
6438 
6439 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
6440 				int mac_for_pdev =
6441 					dp_get_mac_id_for_pdev(mac_id,
6442 							       pdev->pdev_id);
6443 				/*
6444 				 * Obtain lmac id from pdev to access the LMAC
6445 				 * ring in soc context
6446 				 */
6447 				lmac_id =
6448 				dp_get_lmac_id_for_pdev_id(soc,
6449 							   mac_id,
6450 							   pdev->pdev_id);
6451 				QDF_TRACE(QDF_MODULE_ID_TXRX,
6452 					 QDF_TRACE_LEVEL_ERROR,
6453 					 FL("mac_id %d"), mac_for_pdev);
6454 
6455 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
6456 					 pdev->rx_mac_buf_ring[mac_id]
6457 						.hal_srng,
6458 					 RXDMA_BUF);
6459 
6460 				if (!soc->rxdma2sw_rings_not_supported)
6461 					dp_htt_setup_rxdma_err_dst_ring(soc,
6462 						mac_for_pdev, lmac_id);
6463 
6464 				/* Configure monitor mode rings */
6465 				status = dp_monitor_htt_srng_setup(soc, pdev,
6466 								   lmac_id,
6467 								   mac_for_pdev);
6468 				if (status != QDF_STATUS_SUCCESS) {
6469 					dp_err("Failed to send htt monitor messages to target");
6470 					return status;
6471 				}
6472 
6473 			}
6474 		}
6475 	}
6476 
6477 	dp_reap_timer_init(soc);
6478 	return status;
6479 }
6480 #else
6481 /* This is only for WIN */
6482 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
6483 {
6484 	int i;
6485 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6486 	int mac_for_pdev;
6487 	int lmac_id;
6488 
6489 	/* Configure monitor mode rings */
6490 	dp_monitor_soc_htt_srng_setup(soc);
6491 
6492 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6493 		struct dp_pdev *pdev =  soc->pdev_list[i];
6494 
6495 		if (!pdev)
6496 			continue;
6497 
6498 		mac_for_pdev = i;
6499 		lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
6500 
6501 		if (soc->rx_refill_buf_ring[lmac_id].hal_srng)
6502 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
6503 				       soc->rx_refill_buf_ring[lmac_id].
6504 				       hal_srng, RXDMA_BUF);
6505 
6506 		/* Configure monitor mode rings */
6507 		dp_monitor_htt_srng_setup(soc, pdev,
6508 					  lmac_id,
6509 					  mac_for_pdev);
6510 		if (!soc->rxdma2sw_rings_not_supported)
6511 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
6512 				       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
6513 				       RXDMA_DST);
6514 	}
6515 
6516 	dp_reap_timer_init(soc);
6517 	return status;
6518 }
6519 #endif
6520 
6521 /*
6522  * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine
6523  *
6524  * This function is used to configure the FSE HW block in RX OLE on a
6525  * per pdev basis. Here, we will be programming parameters related to
6526  * the Flow Search Table.
6527  *
6528  * @soc: data path SoC handle
6529  *
6530  * Return: zero on success, non-zero on failure
6531  */
6532 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
6533 static QDF_STATUS
6534 dp_rx_target_fst_config(struct dp_soc *soc)
6535 {
6536 	int i;
6537 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6538 
6539 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6540 		struct dp_pdev *pdev = soc->pdev_list[i];
6541 
6542 		/* Flow search is not enabled if NSS offload is enabled */
6543 		if (pdev &&
6544 		    !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
6545 			status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev);
6546 			if (status != QDF_STATUS_SUCCESS)
6547 				break;
6548 		}
6549 	}
6550 	return status;
6551 }
6552 #elif defined(WLAN_SUPPORT_RX_FISA)
6553 /**
6554  * dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW
6555  * @soc: SoC handle
6556  *
6557  * Return: Success
6558  */
6559 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
6560 {
6561 	QDF_STATUS status;
6562 	struct dp_rx_fst *fst = soc->rx_fst;
6563 
6564 	/* Check if it is enabled in the INI */
6565 	if (!soc->fisa_enable) {
6566 		dp_err("RX FISA feature is disabled");
6567 		return QDF_STATUS_E_NOSUPPORT;
6568 	}
6569 
6570 	status = dp_rx_flow_send_fst_fw_setup(soc, soc->pdev_list[0]);
6571 	if (QDF_IS_STATUS_ERROR(status)) {
6572 		dp_err("dp_rx_flow_send_fst_fw_setup failed %d",
6573 		       status);
6574 		return status;
6575 	}
6576 
6577 	if (soc->fst_cmem_base) {
6578 		soc->fst_in_cmem = true;
6579 		dp_rx_fst_update_cmem_params(soc, fst->max_entries,
6580 					     soc->fst_cmem_base & 0xffffffff,
6581 					     soc->fst_cmem_base >> 32);
6582 	}
6583 	return status;
6584 }
6585 
6586 #define FISA_MAX_TIMEOUT 0xffffffff
6587 #define FISA_DISABLE_TIMEOUT 0
6588 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
6589 {
6590 	struct dp_htt_rx_fisa_cfg fisa_config;
6591 
6592 	fisa_config.pdev_id = 0;
6593 	fisa_config.fisa_timeout = FISA_MAX_TIMEOUT;
6594 
6595 	return dp_htt_rx_fisa_config(soc->pdev_list[0], &fisa_config);
6596 }
6597 
6598 #else /* !WLAN_SUPPORT_RX_FISA */
6599 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
6600 {
6601 	return QDF_STATUS_SUCCESS;
6602 }
6603 #endif /* !WLAN_SUPPORT_RX_FISA */
6604 
6605 #ifndef WLAN_SUPPORT_RX_FISA
6606 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
6607 {
6608 	return QDF_STATUS_SUCCESS;
6609 }
6610 
6611 static QDF_STATUS dp_rx_dump_fisa_stats(struct dp_soc *soc)
6612 {
6613 	return QDF_STATUS_SUCCESS;
6614 }
6615 
6616 static void dp_rx_dump_fisa_table(struct dp_soc *soc)
6617 {
6618 }
6619 
6620 static void dp_suspend_fse_cache_flush(struct dp_soc *soc)
6621 {
6622 }
6623 
6624 static void dp_resume_fse_cache_flush(struct dp_soc *soc)
6625 {
6626 }
6627 #endif /* !WLAN_SUPPORT_RX_FISA */
6628 
6629 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR
6630 static inline QDF_STATUS dp_print_swlm_stats(struct dp_soc *soc)
6631 {
6632 	return QDF_STATUS_SUCCESS;
6633 }
6634 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */
6635 
6636 #ifdef WLAN_SUPPORT_PPEDS
6637 /*
6638  * dp_soc_target_ppe_rxole_rxdma_cfg() - Configure the RxOLe and RxDMA for PPE
6639  * @soc: DP Tx/Rx handle
6640  *
6641  * Return: QDF_STATUS
6642  */
6643 static
6644 QDF_STATUS dp_soc_target_ppe_rxole_rxdma_cfg(struct dp_soc *soc)
6645 {
6646 	struct dp_htt_rxdma_rxole_ppe_config htt_cfg = {0};
6647 	QDF_STATUS status;
6648 
6649 	/*
6650 	 * Program RxDMA to override the reo destination indication
6651 	 * with REO2PPE_DST_IND, when use_ppe is set to 1 in RX_MSDU_END,
6652 	 * thereby driving the packet to REO2PPE ring.
6653 	 * If the MSDU is spanning more than 1 buffer, then this
6654 	 * override is not done.
6655 	 */
6656 	htt_cfg.override = 1;
6657 	htt_cfg.reo_destination_indication = REO2PPE_DST_IND;
6658 	htt_cfg.multi_buffer_msdu_override_en = 0;
6659 
6660 	/*
6661 	 * Override use_ppe to 0 in RxOLE for the following
6662 	 * cases.
6663 	 */
6664 	htt_cfg.intra_bss_override = 1;
6665 	htt_cfg.decap_raw_override = 1;
6666 	htt_cfg.decap_nwifi_override = 1;
6667 	htt_cfg.ip_frag_override = 1;
6668 
6669 	status = dp_htt_rxdma_rxole_ppe_cfg_set(soc, &htt_cfg);
6670 	if (status != QDF_STATUS_SUCCESS)
6671 		dp_err("RxOLE and RxDMA PPE config failed %d", status);
6672 
6673 	return status;
6674 }
6675 #else
6676 static inline
6677 QDF_STATUS dp_soc_target_ppe_rxole_rxdma_cfg(struct dp_soc *soc)
6678 {
6679 	return QDF_STATUS_SUCCESS;
6680 }
6681 #endif /* WLAN_SUPPORT_PPEDS */
6682 
6683 #ifdef DP_UMAC_HW_RESET_SUPPORT
6684 static void dp_register_umac_reset_handlers(struct dp_soc *soc)
6685 {
6686 	dp_umac_reset_register_rx_action_callback(soc,
6687 		dp_umac_reset_handle_pre_reset, UMAC_RESET_ACTION_DO_PRE_RESET);
6688 
6689 	dp_umac_reset_register_rx_action_callback(soc,
6690 					dp_umac_reset_handle_post_reset,
6691 					UMAC_RESET_ACTION_DO_POST_RESET_START);
6692 
6693 	dp_umac_reset_register_rx_action_callback(soc,
6694 				dp_umac_reset_handle_post_reset_complete,
6695 				UMAC_RESET_ACTION_DO_POST_RESET_COMPLETE);
6696 
6697 }
6698 #else
6699 static void dp_register_umac_reset_handlers(struct dp_soc *soc)
6700 {
6701 }
6702 #endif
6703 /*
6704  * dp_soc_attach_target_wifi3() - SOC initialization in the target
6705  * @cdp_soc: Opaque Datapath SOC handle
6706  *
6707  * Return: zero on success, non-zero on failure
6708  */
6709 static QDF_STATUS
6710 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
6711 {
6712 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
6713 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6714 	struct hal_reo_params reo_params;
6715 
6716 	htt_soc_attach_target(soc->htt_handle);
6717 
6718 	status = dp_soc_target_ppe_rxole_rxdma_cfg(soc);
6719 	if (status != QDF_STATUS_SUCCESS) {
6720 		dp_err("Failed to send htt RxOLE and RxDMA messages to target");
6721 		return status;
6722 	}
6723 
6724 	status = dp_rxdma_ring_config(soc);
6725 	if (status != QDF_STATUS_SUCCESS) {
6726 		dp_err("Failed to send htt srng setup messages to target");
6727 		return status;
6728 	}
6729 
6730 	status = soc->arch_ops.dp_rxdma_ring_sel_cfg(soc);
6731 	if (status != QDF_STATUS_SUCCESS) {
6732 		dp_err("Failed to send htt ring config message to target");
6733 		return status;
6734 	}
6735 
6736 	status = dp_soc_umac_reset_init(soc);
6737 	if (status != QDF_STATUS_SUCCESS &&
6738 	    status != QDF_STATUS_E_NOSUPPORT) {
6739 		dp_err("Failed to initialize UMAC reset");
6740 		return status;
6741 	}
6742 
6743 	dp_register_umac_reset_handlers(soc);
6744 
6745 	status = dp_rx_target_fst_config(soc);
6746 	if (status != QDF_STATUS_SUCCESS &&
6747 	    status != QDF_STATUS_E_NOSUPPORT) {
6748 		dp_err("Failed to send htt fst setup config message to target");
6749 		return status;
6750 	}
6751 
6752 	if (status == QDF_STATUS_SUCCESS) {
6753 		status = dp_rx_fisa_config(soc);
6754 		if (status != QDF_STATUS_SUCCESS) {
6755 			dp_err("Failed to send htt FISA config message to target");
6756 			return status;
6757 		}
6758 	}
6759 
6760 	DP_STATS_INIT(soc);
6761 
6762 	dp_runtime_init(soc);
6763 
6764 	/* Enable HW vdev offload stats if feature is supported */
6765 	dp_vdev_stats_hw_offload_target_config(soc, INVALID_PDEV_ID, true);
6766 
6767 	/* initialize work queue for stats processing */
6768 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
6769 
6770 	wlan_cfg_soc_update_tgt_params(soc->wlan_cfg_ctx,
6771 				       soc->ctrl_psoc);
6772 	/* Setup HW REO */
6773 	qdf_mem_zero(&reo_params, sizeof(reo_params));
6774 
6775 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
6776 		/*
6777 		 * Reo ring remap is not required if both radios
6778 		 * are offloaded to NSS
6779 		 */
6780 
6781 		if (soc->arch_ops.reo_remap_config(soc, &reo_params.remap0,
6782 						   &reo_params.remap1,
6783 						   &reo_params.remap2))
6784 			reo_params.rx_hash_enabled = true;
6785 		else
6786 			reo_params.rx_hash_enabled = false;
6787 	}
6788 
6789 	/*
6790 	 * set the fragment destination ring
6791 	 */
6792 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
6793 
6794 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
6795 		reo_params.alt_dst_ind_0 = REO_REMAP_RELEASE;
6796 
6797 	hal_reo_setup(soc->hal_soc, &reo_params, 1);
6798 
6799 	hal_reo_set_err_dst_remap(soc->hal_soc);
6800 
6801 	soc->features.pn_in_reo_dest = hal_reo_enable_pn_in_dest(soc->hal_soc);
6802 
6803 	return QDF_STATUS_SUCCESS;
6804 }
6805 
6806 /*
6807  * dp_vdev_id_map_tbl_add() - Add vdev into vdev_id table
6808  * @soc: SoC handle
6809  * @vdev: vdev handle
6810  * @vdev_id: vdev_id
6811  *
6812  * Return: None
6813  */
6814 static void dp_vdev_id_map_tbl_add(struct dp_soc *soc,
6815 				   struct dp_vdev *vdev,
6816 				   uint8_t vdev_id)
6817 {
6818 	QDF_ASSERT(vdev_id <= MAX_VDEV_CNT);
6819 
6820 	qdf_spin_lock_bh(&soc->vdev_map_lock);
6821 
6822 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
6823 			QDF_STATUS_SUCCESS) {
6824 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK vdev_id %u",
6825 			     soc, vdev, vdev_id);
6826 		qdf_spin_unlock_bh(&soc->vdev_map_lock);
6827 		return;
6828 	}
6829 
6830 	if (!soc->vdev_id_map[vdev_id])
6831 		soc->vdev_id_map[vdev_id] = vdev;
6832 	else
6833 		QDF_ASSERT(0);
6834 
6835 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
6836 }
6837 
6838 /*
6839  * dp_vdev_id_map_tbl_remove() - remove vdev from vdev_id table
6840  * @soc: SoC handle
6841  * @vdev: vdev handle
6842  *
6843  * Return: None
6844  */
6845 static void dp_vdev_id_map_tbl_remove(struct dp_soc *soc,
6846 				      struct dp_vdev *vdev)
6847 {
6848 	qdf_spin_lock_bh(&soc->vdev_map_lock);
6849 	QDF_ASSERT(soc->vdev_id_map[vdev->vdev_id] == vdev);
6850 
6851 	soc->vdev_id_map[vdev->vdev_id] = NULL;
6852 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
6853 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
6854 }
6855 
6856 /*
6857  * dp_vdev_pdev_list_add() - add vdev into pdev's list
6858  * @soc: soc handle
6859  * @pdev: pdev handle
6860  * @vdev: vdev handle
6861  *
6862  * return: none
6863  */
6864 static void dp_vdev_pdev_list_add(struct dp_soc *soc,
6865 				  struct dp_pdev *pdev,
6866 				  struct dp_vdev *vdev)
6867 {
6868 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
6869 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
6870 			QDF_STATUS_SUCCESS) {
6871 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK",
6872 			     soc, vdev);
6873 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6874 		return;
6875 	}
6876 	/* add this vdev into the pdev's list */
6877 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
6878 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6879 }
6880 
6881 /*
6882  * dp_vdev_pdev_list_remove() - remove vdev from pdev's list
6883  * @soc: SoC handle
6884  * @pdev: pdev handle
6885  * @vdev: VDEV handle
6886  *
6887  * Return: none
6888  */
6889 static void dp_vdev_pdev_list_remove(struct dp_soc *soc,
6890 				     struct dp_pdev *pdev,
6891 				     struct dp_vdev *vdev)
6892 {
6893 	uint8_t found = 0;
6894 	struct dp_vdev *tmpvdev = NULL;
6895 
6896 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
6897 	TAILQ_FOREACH(tmpvdev, &pdev->vdev_list, vdev_list_elem) {
6898 		if (tmpvdev == vdev) {
6899 			found = 1;
6900 			break;
6901 		}
6902 	}
6903 
6904 	if (found) {
6905 		TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
6906 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
6907 	} else {
6908 		dp_vdev_debug("%pK: vdev:%pK not found in pdev:%pK vdevlist:%pK",
6909 			      soc, vdev, pdev, &pdev->vdev_list);
6910 		QDF_ASSERT(0);
6911 	}
6912 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6913 }
6914 
6915 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
6916 /*
6917  * dp_vdev_init_rx_eapol() - initializing osif_rx_eapol
6918  * @vdev: Datapath VDEV handle
6919  *
6920  * Return: None
6921  */
6922 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev)
6923 {
6924 	vdev->osif_rx_eapol = NULL;
6925 }
6926 
6927 /*
6928  * dp_vdev_register_rx_eapol() - Register VDEV operations for rx_eapol
6929  * @vdev: DP vdev handle
6930  * @txrx_ops: Tx and Rx operations
6931  *
6932  * Return: None
6933  */
6934 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev,
6935 					     struct ol_txrx_ops *txrx_ops)
6936 {
6937 	vdev->osif_rx_eapol = txrx_ops->rx.rx_eapol;
6938 }
6939 #else
6940 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev)
6941 {
6942 }
6943 
6944 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev,
6945 					     struct ol_txrx_ops *txrx_ops)
6946 {
6947 }
6948 #endif
6949 
6950 #ifdef WLAN_FEATURE_11BE_MLO
6951 #if defined(WLAN_MLO_MULTI_CHIP) && defined(WLAN_MCAST_MLO)
6952 static inline void dp_vdev_save_mld_info(struct dp_vdev *vdev,
6953 					 struct cdp_vdev_info *vdev_info)
6954 {
6955 	if (qdf_is_macaddr_zero((struct qdf_mac_addr *)vdev_info->mld_mac_addr))
6956 		vdev->mlo_vdev = false;
6957 	else
6958 		vdev->mlo_vdev = true;
6959 }
6960 #else
6961 static inline void dp_vdev_save_mld_info(struct dp_vdev *vdev,
6962 					 struct cdp_vdev_info *vdev_info)
6963 {
6964 }
6965 #endif
6966 static inline void dp_vdev_save_mld_addr(struct dp_vdev *vdev,
6967 					 struct cdp_vdev_info *vdev_info)
6968 {
6969 	if (vdev_info->mld_mac_addr)
6970 		qdf_mem_copy(&vdev->mld_mac_addr.raw[0],
6971 			     vdev_info->mld_mac_addr, QDF_MAC_ADDR_SIZE);
6972 
6973 	dp_vdev_save_mld_info(vdev, vdev_info);
6974 
6975 }
6976 #else
6977 static inline void dp_vdev_save_mld_addr(struct dp_vdev *vdev,
6978 					 struct cdp_vdev_info *vdev_info)
6979 {
6980 
6981 }
6982 #endif
6983 
6984 #ifdef DP_TRAFFIC_END_INDICATION
6985 /*
6986  * dp_tx_traffic_end_indication_attach() - Initialize data end indication
6987  *                                         related members in VDEV
6988  * @vdev: DP vdev handle
6989  *
6990  * Return: None
6991  */
6992 static inline void
6993 dp_tx_vdev_traffic_end_indication_attach(struct dp_vdev *vdev)
6994 {
6995 	qdf_nbuf_queue_init(&vdev->end_ind_pkt_q);
6996 }
6997 
6998 /*
6999  * dp_tx_vdev_traffic_end_indication_detach() - De-init data end indication
7000  *                                              related members in VDEV
7001  * @vdev: DP vdev handle
7002  *
7003  * Return: None
7004  */
7005 static inline void
7006 dp_tx_vdev_traffic_end_indication_detach(struct dp_vdev *vdev)
7007 {
7008 	qdf_nbuf_t nbuf;
7009 
7010 	while ((nbuf = qdf_nbuf_queue_remove(&vdev->end_ind_pkt_q)) != NULL)
7011 		qdf_nbuf_free(nbuf);
7012 }
7013 #else
7014 static inline void
7015 dp_tx_vdev_traffic_end_indication_attach(struct dp_vdev *vdev)
7016 {}
7017 
7018 static inline void
7019 dp_tx_vdev_traffic_end_indication_detach(struct dp_vdev *vdev)
7020 {}
7021 #endif
7022 
7023 /*
7024 * dp_vdev_attach_wifi3() - attach txrx vdev
7025 * @txrx_pdev: Datapath PDEV handle
7026 * @pdev_id: PDEV ID for vdev creation
7027 * @vdev_info: parameters used for vdev creation
7028 *
7029 * Return: status
7030 */
7031 static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc,
7032 				       uint8_t pdev_id,
7033 				       struct cdp_vdev_info *vdev_info)
7034 {
7035 	int i = 0;
7036 	qdf_size_t vdev_context_size;
7037 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
7038 	struct dp_pdev *pdev =
7039 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7040 						   pdev_id);
7041 	struct dp_vdev *vdev;
7042 	uint8_t *vdev_mac_addr = vdev_info->vdev_mac_addr;
7043 	uint8_t vdev_id = vdev_info->vdev_id;
7044 	enum wlan_op_mode op_mode = vdev_info->op_mode;
7045 	enum wlan_op_subtype subtype = vdev_info->subtype;
7046 	uint8_t vdev_stats_id = vdev_info->vdev_stats_id;
7047 
7048 	vdev_context_size =
7049 		soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_VDEV);
7050 	vdev = qdf_mem_malloc(vdev_context_size);
7051 
7052 	if (!pdev) {
7053 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
7054 			    cdp_soc, pdev_id);
7055 		qdf_mem_free(vdev);
7056 		goto fail0;
7057 	}
7058 
7059 	if (!vdev) {
7060 		dp_init_err("%pK: DP VDEV memory allocation failed",
7061 			    cdp_soc);
7062 		goto fail0;
7063 	}
7064 
7065 	wlan_minidump_log(vdev, sizeof(*vdev), soc->ctrl_psoc,
7066 			  WLAN_MD_DP_VDEV, "dp_vdev");
7067 
7068 	vdev->pdev = pdev;
7069 	vdev->vdev_id = vdev_id;
7070 	vdev->vdev_stats_id = vdev_stats_id;
7071 	vdev->opmode = op_mode;
7072 	vdev->subtype = subtype;
7073 	vdev->osdev = soc->osdev;
7074 
7075 	vdev->osif_rx = NULL;
7076 	vdev->osif_rsim_rx_decap = NULL;
7077 	vdev->osif_get_key = NULL;
7078 	vdev->osif_tx_free_ext = NULL;
7079 	vdev->osif_vdev = NULL;
7080 
7081 	vdev->delete.pending = 0;
7082 	vdev->safemode = 0;
7083 	vdev->drop_unenc = 1;
7084 	vdev->sec_type = cdp_sec_type_none;
7085 	vdev->multipass_en = false;
7086 	vdev->wrap_vdev = false;
7087 	dp_vdev_init_rx_eapol(vdev);
7088 	qdf_atomic_init(&vdev->ref_cnt);
7089 	for (i = 0; i < DP_MOD_ID_MAX; i++)
7090 		qdf_atomic_init(&vdev->mod_refs[i]);
7091 
7092 	/* Take one reference for create*/
7093 	qdf_atomic_inc(&vdev->ref_cnt);
7094 	qdf_atomic_inc(&vdev->mod_refs[DP_MOD_ID_CONFIG]);
7095 	vdev->num_peers = 0;
7096 #ifdef notyet
7097 	vdev->filters_num = 0;
7098 #endif
7099 	vdev->lmac_id = pdev->lmac_id;
7100 
7101 	qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
7102 
7103 	dp_vdev_save_mld_addr(vdev, vdev_info);
7104 
7105 	/* TODO: Initialize default HTT meta data that will be used in
7106 	 * TCL descriptors for packets transmitted from this VDEV
7107 	 */
7108 
7109 	qdf_spinlock_create(&vdev->peer_list_lock);
7110 	TAILQ_INIT(&vdev->peer_list);
7111 	dp_peer_multipass_list_init(vdev);
7112 	if ((soc->intr_mode == DP_INTR_POLL) &&
7113 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
7114 		if ((pdev->vdev_count == 0) ||
7115 		    (wlan_op_mode_monitor == vdev->opmode))
7116 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7117 	} else if (dp_soc_get_con_mode(soc) == QDF_GLOBAL_MISSION_MODE &&
7118 		   soc->intr_mode == DP_INTR_MSI &&
7119 		   wlan_op_mode_monitor == vdev->opmode) {
7120 		/* Timer to reap status ring in mission mode */
7121 		dp_monitor_vdev_timer_start(soc);
7122 	}
7123 
7124 	dp_vdev_id_map_tbl_add(soc, vdev, vdev_id);
7125 
7126 	if (wlan_op_mode_monitor == vdev->opmode) {
7127 		if (dp_monitor_vdev_attach(vdev) == QDF_STATUS_SUCCESS) {
7128 			dp_monitor_pdev_set_mon_vdev(vdev);
7129 			return dp_monitor_vdev_set_monitor_mode_buf_rings(pdev);
7130 		}
7131 		return QDF_STATUS_E_FAILURE;
7132 	}
7133 
7134 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
7135 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
7136 	vdev->dscp_tid_map_id = 0;
7137 	vdev->mcast_enhancement_en = 0;
7138 	vdev->igmp_mcast_enhanc_en = 0;
7139 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
7140 	vdev->prev_tx_enq_tstamp = 0;
7141 	vdev->prev_rx_deliver_tstamp = 0;
7142 	vdev->skip_sw_tid_classification = DP_TX_HW_DSCP_TID_MAP_VALID;
7143 	dp_tx_vdev_traffic_end_indication_attach(vdev);
7144 
7145 	dp_vdev_pdev_list_add(soc, pdev, vdev);
7146 	pdev->vdev_count++;
7147 
7148 	if (wlan_op_mode_sta != vdev->opmode &&
7149 	    wlan_op_mode_ndi != vdev->opmode)
7150 		vdev->ap_bridge_enabled = true;
7151 	else
7152 		vdev->ap_bridge_enabled = false;
7153 	dp_init_info("%pK: wlan_cfg_ap_bridge_enabled %d",
7154 		     cdp_soc, vdev->ap_bridge_enabled);
7155 
7156 	dp_tx_vdev_attach(vdev);
7157 
7158 	dp_monitor_vdev_attach(vdev);
7159 	if (!pdev->is_lro_hash_configured) {
7160 		if (QDF_IS_STATUS_SUCCESS(dp_lro_hash_setup(soc, pdev)))
7161 			pdev->is_lro_hash_configured = true;
7162 		else
7163 			dp_err("LRO hash setup failure!");
7164 	}
7165 
7166 	dp_info("Created vdev %pK ("QDF_MAC_ADDR_FMT")", vdev,
7167 		QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
7168 	DP_STATS_INIT(vdev);
7169 
7170 	if (QDF_IS_STATUS_ERROR(soc->arch_ops.txrx_vdev_attach(soc, vdev)))
7171 		goto fail0;
7172 
7173 	if (wlan_op_mode_sta == vdev->opmode)
7174 		dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id,
7175 				     vdev->mac_addr.raw, CDP_LINK_PEER_TYPE);
7176 
7177 	dp_pdev_update_fast_rx_flag(soc, pdev);
7178 
7179 	return QDF_STATUS_SUCCESS;
7180 
7181 fail0:
7182 	return QDF_STATUS_E_FAILURE;
7183 }
7184 
7185 #ifndef QCA_HOST_MODE_WIFI_DISABLED
7186 /**
7187  * dp_vdev_fetch_tx_handlers() - Fetch Tx handlers
7188  * @vdev: struct dp_vdev *
7189  * @soc: struct dp_soc *
7190  * @ctx: struct ol_txrx_hardtart_ctxt *
7191  */
7192 static inline void dp_vdev_fetch_tx_handler(struct dp_vdev *vdev,
7193 					    struct dp_soc *soc,
7194 					    struct ol_txrx_hardtart_ctxt *ctx)
7195 {
7196 	/* Enable vdev_id check only for ap, if flag is enabled */
7197 	if (vdev->mesh_vdev)
7198 		ctx->tx = dp_tx_send_mesh;
7199 	else if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
7200 		 (vdev->opmode == wlan_op_mode_ap)) {
7201 		ctx->tx = dp_tx_send_vdev_id_check;
7202 		ctx->tx_fast = dp_tx_send_vdev_id_check;
7203 	} else {
7204 		ctx->tx = dp_tx_send;
7205 		ctx->tx_fast = soc->arch_ops.dp_tx_send_fast;
7206 	}
7207 
7208 	/* Avoid check in regular exception Path */
7209 	if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
7210 	    (vdev->opmode == wlan_op_mode_ap))
7211 		ctx->tx_exception = dp_tx_send_exception_vdev_id_check;
7212 	else
7213 		ctx->tx_exception = dp_tx_send_exception;
7214 }
7215 
7216 /**
7217  * dp_vdev_register_tx_handler() - Register Tx handler
7218  * @vdev: struct dp_vdev *
7219  * @soc: struct dp_soc *
7220  * @txrx_ops: struct ol_txrx_ops *
7221  */
7222 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
7223 					       struct dp_soc *soc,
7224 					       struct ol_txrx_ops *txrx_ops)
7225 {
7226 	struct ol_txrx_hardtart_ctxt ctx = {0};
7227 
7228 	dp_vdev_fetch_tx_handler(vdev, soc, &ctx);
7229 
7230 	txrx_ops->tx.tx = ctx.tx;
7231 	txrx_ops->tx.tx_fast = ctx.tx_fast;
7232 	txrx_ops->tx.tx_exception = ctx.tx_exception;
7233 
7234 	dp_info("Configure tx_vdev_id_chk_handler Feature Flag: %d and mode:%d for vdev_id:%d",
7235 		wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx),
7236 		vdev->opmode, vdev->vdev_id);
7237 }
7238 #else /* QCA_HOST_MODE_WIFI_DISABLED */
7239 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
7240 					       struct dp_soc *soc,
7241 					       struct ol_txrx_ops *txrx_ops)
7242 {
7243 }
7244 
7245 static inline void dp_vdev_fetch_tx_handler(struct dp_vdev *vdev,
7246 					    struct dp_soc *soc,
7247 					    struct ol_txrx_hardtart_ctxt *ctx)
7248 {
7249 }
7250 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
7251 
7252 /**
7253  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
7254  * @soc: Datapath soc handle
7255  * @vdev_id: id of Datapath VDEV handle
7256  * @osif_vdev: OSIF vdev handle
7257  * @txrx_ops: Tx and Rx operations
7258  *
7259  * Return: DP VDEV handle on success, NULL on failure
7260  */
7261 static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc_hdl,
7262 					 uint8_t vdev_id,
7263 					 ol_osif_vdev_handle osif_vdev,
7264 					 struct ol_txrx_ops *txrx_ops)
7265 {
7266 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7267 	struct dp_vdev *vdev =	dp_vdev_get_ref_by_id(soc, vdev_id,
7268 						      DP_MOD_ID_CDP);
7269 
7270 	if (!vdev)
7271 		return QDF_STATUS_E_FAILURE;
7272 
7273 	vdev->osif_vdev = osif_vdev;
7274 	vdev->osif_rx = txrx_ops->rx.rx;
7275 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
7276 	vdev->osif_rx_flush = txrx_ops->rx.rx_flush;
7277 	vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush;
7278 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
7279 	vdev->osif_fisa_rx = txrx_ops->rx.osif_fisa_rx;
7280 	vdev->osif_fisa_flush = txrx_ops->rx.osif_fisa_flush;
7281 	vdev->osif_get_key = txrx_ops->get_key;
7282 	dp_monitor_vdev_register_osif(vdev, txrx_ops);
7283 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
7284 	vdev->tx_comp = txrx_ops->tx.tx_comp;
7285 	vdev->stats_cb = txrx_ops->rx.stats_rx;
7286 	vdev->tx_classify_critical_pkt_cb =
7287 		txrx_ops->tx.tx_classify_critical_pkt_cb;
7288 #ifdef notyet
7289 #if ATH_SUPPORT_WAPI
7290 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
7291 #endif
7292 #endif
7293 #ifdef UMAC_SUPPORT_PROXY_ARP
7294 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
7295 #endif
7296 	vdev->me_convert = txrx_ops->me_convert;
7297 	vdev->get_tsf_time = txrx_ops->get_tsf_time;
7298 
7299 	dp_vdev_register_rx_eapol(vdev, txrx_ops);
7300 
7301 	dp_vdev_register_tx_handler(vdev, soc, txrx_ops);
7302 
7303 	dp_init_info("%pK: DP Vdev Register success", soc);
7304 
7305 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7306 	return QDF_STATUS_SUCCESS;
7307 }
7308 
7309 #ifdef WLAN_FEATURE_11BE_MLO
7310 void dp_peer_delete(struct dp_soc *soc,
7311 		    struct dp_peer *peer,
7312 		    void *arg)
7313 {
7314 	if (!peer->valid)
7315 		return;
7316 
7317 	dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
7318 			     peer->vdev->vdev_id,
7319 			     peer->mac_addr.raw, 0,
7320 			     peer->peer_type);
7321 }
7322 #else
7323 void dp_peer_delete(struct dp_soc *soc,
7324 		    struct dp_peer *peer,
7325 		    void *arg)
7326 {
7327 	if (!peer->valid)
7328 		return;
7329 
7330 	dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
7331 			     peer->vdev->vdev_id,
7332 			     peer->mac_addr.raw, 0,
7333 			     CDP_LINK_PEER_TYPE);
7334 }
7335 #endif
7336 
7337 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
7338 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg)
7339 {
7340 	if (!peer->valid)
7341 		return;
7342 
7343 	if (IS_MLO_DP_LINK_PEER(peer))
7344 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
7345 				     peer->vdev->vdev_id,
7346 				     peer->mac_addr.raw, 0,
7347 				     CDP_LINK_PEER_TYPE);
7348 }
7349 #else
7350 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg)
7351 {
7352 }
7353 #endif
7354 /**
7355  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
7356  * @vdev: Datapath VDEV handle
7357  * @unmap_only: Flag to indicate "only unmap"
7358  *
7359  * Return: void
7360  */
7361 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
7362 				bool unmap_only,
7363 				bool mlo_peers_only)
7364 {
7365 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7366 	struct dp_pdev *pdev = vdev->pdev;
7367 	struct dp_soc *soc = pdev->soc;
7368 	struct dp_peer *peer;
7369 	uint32_t i = 0;
7370 
7371 
7372 	if (!unmap_only) {
7373 		if (!mlo_peers_only)
7374 			dp_vdev_iterate_peer_lock_safe(vdev,
7375 						       dp_peer_delete,
7376 						       NULL,
7377 						       DP_MOD_ID_CDP);
7378 		else
7379 			dp_vdev_iterate_peer_lock_safe(vdev,
7380 						       dp_mlo_peer_delete,
7381 						       NULL,
7382 						       DP_MOD_ID_CDP);
7383 	}
7384 
7385 	for (i = 0; i < soc->max_peer_id ; i++) {
7386 		peer = __dp_peer_get_ref_by_id(soc, i, DP_MOD_ID_CDP);
7387 
7388 		if (!peer)
7389 			continue;
7390 
7391 		if (peer->vdev != vdev) {
7392 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7393 			continue;
7394 		}
7395 
7396 		if (!mlo_peers_only) {
7397 			dp_info("peer: " QDF_MAC_ADDR_FMT " is getting unmap",
7398 				QDF_MAC_ADDR_REF(peer->mac_addr.raw));
7399 			dp_rx_peer_unmap_handler(soc, i,
7400 						 vdev->vdev_id,
7401 						 peer->mac_addr.raw, 0,
7402 						 DP_PEER_WDS_COUNT_INVALID);
7403 			SET_PEER_REF_CNT_ONE(peer);
7404 		} else if (IS_MLO_DP_LINK_PEER(peer) ||
7405 			   IS_MLO_DP_MLD_PEER(peer)) {
7406 			dp_info("peer: " QDF_MAC_ADDR_FMT " is getting unmap",
7407 				QDF_MAC_ADDR_REF(peer->mac_addr.raw));
7408 			dp_rx_peer_unmap_handler(soc, i,
7409 						 vdev->vdev_id,
7410 						 peer->mac_addr.raw, 0,
7411 						 DP_PEER_WDS_COUNT_INVALID);
7412 			SET_PEER_REF_CNT_ONE(peer);
7413 		}
7414 
7415 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7416 	}
7417 }
7418 
7419 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
7420 /*
7421  * dp_txrx_alloc_vdev_stats_id()- Allocate vdev_stats_id
7422  * @soc_hdl: Datapath soc handle
7423  * @vdev_stats_id: Address of vdev_stats_id
7424  *
7425  * Return: QDF_STATUS
7426  */
7427 static QDF_STATUS dp_txrx_alloc_vdev_stats_id(struct cdp_soc_t *soc_hdl,
7428 					      uint8_t *vdev_stats_id)
7429 {
7430 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7431 	uint8_t id = 0;
7432 
7433 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
7434 		*vdev_stats_id = CDP_INVALID_VDEV_STATS_ID;
7435 		return QDF_STATUS_E_FAILURE;
7436 	}
7437 
7438 	while (id < CDP_MAX_VDEV_STATS_ID) {
7439 		if (!qdf_atomic_test_and_set_bit(id, &soc->vdev_stats_id_map)) {
7440 			*vdev_stats_id = id;
7441 			return QDF_STATUS_SUCCESS;
7442 		}
7443 		id++;
7444 	}
7445 
7446 	*vdev_stats_id = CDP_INVALID_VDEV_STATS_ID;
7447 	return QDF_STATUS_E_FAILURE;
7448 }
7449 
7450 /*
7451  * dp_txrx_reset_vdev_stats_id() - Reset vdev_stats_id in dp_soc
7452  * @soc_hdl: Datapath soc handle
7453  * @vdev_stats_id: vdev_stats_id to reset in dp_soc
7454  *
7455  * Return: none
7456  */
7457 static void dp_txrx_reset_vdev_stats_id(struct cdp_soc_t *soc_hdl,
7458 					uint8_t vdev_stats_id)
7459 {
7460 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7461 
7462 	if ((!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) ||
7463 	    (vdev_stats_id >= CDP_MAX_VDEV_STATS_ID))
7464 		return;
7465 
7466 	qdf_atomic_clear_bit(vdev_stats_id, &soc->vdev_stats_id_map);
7467 }
7468 #else
7469 static void dp_txrx_reset_vdev_stats_id(struct cdp_soc_t *soc,
7470 					uint8_t vdev_stats_id)
7471 {}
7472 #endif
7473 /*
7474  * dp_vdev_detach_wifi3() - Detach txrx vdev
7475  * @cdp_soc: Datapath soc handle
7476  * @vdev_id: VDEV Id
7477  * @callback: Callback OL_IF on completion of detach
7478  * @cb_context:	Callback context
7479  *
7480  */
7481 static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc,
7482 				       uint8_t vdev_id,
7483 				       ol_txrx_vdev_delete_cb callback,
7484 				       void *cb_context)
7485 {
7486 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
7487 	struct dp_pdev *pdev;
7488 	struct dp_neighbour_peer *peer = NULL;
7489 	struct dp_peer *vap_self_peer = NULL;
7490 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7491 						     DP_MOD_ID_CDP);
7492 
7493 	if (!vdev)
7494 		return QDF_STATUS_E_FAILURE;
7495 
7496 	soc->arch_ops.txrx_vdev_detach(soc, vdev);
7497 
7498 	pdev = vdev->pdev;
7499 
7500 	vap_self_peer = dp_sta_vdev_self_peer_ref_n_get(soc, vdev,
7501 							DP_MOD_ID_CONFIG);
7502 	if (vap_self_peer) {
7503 		qdf_spin_lock_bh(&soc->ast_lock);
7504 		if (vap_self_peer->self_ast_entry) {
7505 			dp_peer_del_ast(soc, vap_self_peer->self_ast_entry);
7506 			vap_self_peer->self_ast_entry = NULL;
7507 		}
7508 		qdf_spin_unlock_bh(&soc->ast_lock);
7509 
7510 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc, vdev->vdev_id,
7511 				     vap_self_peer->mac_addr.raw, 0,
7512 				     CDP_LINK_PEER_TYPE);
7513 		dp_peer_unref_delete(vap_self_peer, DP_MOD_ID_CONFIG);
7514 	}
7515 
7516 	/*
7517 	 * If Target is hung, flush all peers before detaching vdev
7518 	 * this will free all references held due to missing
7519 	 * unmap commands from Target
7520 	 */
7521 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
7522 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, false, false);
7523 	else if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
7524 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, true, false);
7525 
7526 	/* indicate that the vdev needs to be deleted */
7527 	vdev->delete.pending = 1;
7528 	dp_rx_vdev_detach(vdev);
7529 	/*
7530 	 * move it after dp_rx_vdev_detach(),
7531 	 * as the call back done in dp_rx_vdev_detach()
7532 	 * still need to get vdev pointer by vdev_id.
7533 	 */
7534 	dp_vdev_id_map_tbl_remove(soc, vdev);
7535 
7536 	dp_monitor_neighbour_peer_list_remove(pdev, vdev, peer);
7537 
7538 	dp_txrx_reset_vdev_stats_id(cdp_soc, vdev->vdev_stats_id);
7539 
7540 	dp_tx_vdev_multipass_deinit(vdev);
7541 	dp_tx_vdev_traffic_end_indication_detach(vdev);
7542 
7543 	if (vdev->vdev_dp_ext_handle) {
7544 		qdf_mem_free(vdev->vdev_dp_ext_handle);
7545 		vdev->vdev_dp_ext_handle = NULL;
7546 	}
7547 	vdev->delete.callback = callback;
7548 	vdev->delete.context = cb_context;
7549 
7550 	if (vdev->opmode != wlan_op_mode_monitor)
7551 		dp_vdev_pdev_list_remove(soc, pdev, vdev);
7552 
7553 	pdev->vdev_count--;
7554 	/* release reference taken above for find */
7555 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7556 
7557 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
7558 	TAILQ_INSERT_TAIL(&soc->inactive_vdev_list, vdev, inactive_list_elem);
7559 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
7560 
7561 	/* release reference taken at dp_vdev_create */
7562 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
7563 
7564 	return QDF_STATUS_SUCCESS;
7565 }
7566 
7567 #ifdef WLAN_FEATURE_11BE_MLO
7568 /**
7569  * is_dp_peer_can_reuse() - check if the dp_peer match condition to be reused
7570  * @vdev: Target DP vdev handle
7571  * @peer: DP peer handle to be checked
7572  * @peer_mac_addr: Target peer mac address
7573  * @peer_type: Target peer type
7574  *
7575  * Return: true - if match, false - not match
7576  */
7577 static inline
7578 bool is_dp_peer_can_reuse(struct dp_vdev *vdev,
7579 			  struct dp_peer *peer,
7580 			  uint8_t *peer_mac_addr,
7581 			  enum cdp_peer_type peer_type)
7582 {
7583 	if (peer->bss_peer && (peer->vdev == vdev) &&
7584 	    (peer->peer_type == peer_type) &&
7585 	    (qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw,
7586 			 QDF_MAC_ADDR_SIZE) == 0))
7587 		return true;
7588 
7589 	return false;
7590 }
7591 #else
7592 static inline
7593 bool is_dp_peer_can_reuse(struct dp_vdev *vdev,
7594 			  struct dp_peer *peer,
7595 			  uint8_t *peer_mac_addr,
7596 			  enum cdp_peer_type peer_type)
7597 {
7598 	if (peer->bss_peer && (peer->vdev == vdev) &&
7599 	    (qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw,
7600 			 QDF_MAC_ADDR_SIZE) == 0))
7601 		return true;
7602 
7603 	return false;
7604 }
7605 #endif
7606 
7607 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
7608 						uint8_t *peer_mac_addr,
7609 						enum cdp_peer_type peer_type)
7610 {
7611 	struct dp_peer *peer;
7612 	struct dp_soc *soc = vdev->pdev->soc;
7613 
7614 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
7615 	TAILQ_FOREACH(peer, &soc->inactive_peer_list,
7616 		      inactive_list_elem) {
7617 
7618 		/* reuse bss peer only when vdev matches*/
7619 		if (is_dp_peer_can_reuse(vdev, peer,
7620 					 peer_mac_addr, peer_type)) {
7621 			/* increment ref count for cdp_peer_create*/
7622 			if (dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG) ==
7623 						QDF_STATUS_SUCCESS) {
7624 				TAILQ_REMOVE(&soc->inactive_peer_list, peer,
7625 					     inactive_list_elem);
7626 				qdf_spin_unlock_bh
7627 					(&soc->inactive_peer_list_lock);
7628 				return peer;
7629 			}
7630 		}
7631 	}
7632 
7633 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
7634 	return NULL;
7635 }
7636 
7637 #ifdef FEATURE_AST
7638 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
7639 					       struct dp_pdev *pdev,
7640 					       uint8_t *peer_mac_addr)
7641 {
7642 	struct dp_ast_entry *ast_entry;
7643 
7644 	if (soc->ast_offload_support)
7645 		return;
7646 
7647 	qdf_spin_lock_bh(&soc->ast_lock);
7648 	if (soc->ast_override_support)
7649 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
7650 							    pdev->pdev_id);
7651 	else
7652 		ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
7653 
7654 	if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress)
7655 		dp_peer_del_ast(soc, ast_entry);
7656 
7657 	qdf_spin_unlock_bh(&soc->ast_lock);
7658 }
7659 #else
7660 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
7661 					       struct dp_pdev *pdev,
7662 					       uint8_t *peer_mac_addr)
7663 {
7664 }
7665 #endif
7666 
7667 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
7668 /*
7669  * dp_peer_hw_txrx_stats_init() - Initialize hw_txrx_stats_en in dp_peer
7670  * @soc: Datapath soc handle
7671  * @peer: Datapath peer handle
7672  *
7673  * Return: none
7674  */
7675 static inline
7676 void dp_peer_hw_txrx_stats_init(struct dp_soc *soc,
7677 				struct dp_txrx_peer *txrx_peer)
7678 {
7679 	txrx_peer->hw_txrx_stats_en =
7680 		wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx);
7681 }
7682 #else
7683 static inline
7684 void dp_peer_hw_txrx_stats_init(struct dp_soc *soc,
7685 				struct dp_txrx_peer *txrx_peer)
7686 {
7687 	txrx_peer->hw_txrx_stats_en = 0;
7688 }
7689 #endif
7690 
7691 static QDF_STATUS dp_txrx_peer_detach(struct dp_soc *soc, struct dp_peer *peer)
7692 {
7693 	struct dp_txrx_peer *txrx_peer;
7694 	struct dp_pdev *pdev;
7695 
7696 	/* dp_txrx_peer exists for mld peer and legacy peer */
7697 	if (peer->txrx_peer) {
7698 		txrx_peer = peer->txrx_peer;
7699 		peer->txrx_peer = NULL;
7700 		pdev = txrx_peer->vdev->pdev;
7701 
7702 		dp_peer_defrag_rx_tids_deinit(txrx_peer);
7703 		/*
7704 		 * Deallocate the extended stats contenxt
7705 		 */
7706 		dp_peer_delay_stats_ctx_dealloc(soc, txrx_peer);
7707 		dp_peer_rx_bufq_resources_deinit(txrx_peer);
7708 		dp_peer_jitter_stats_ctx_dealloc(pdev, txrx_peer);
7709 		dp_peer_sawf_stats_ctx_free(soc, txrx_peer);
7710 
7711 		qdf_mem_free(txrx_peer);
7712 	}
7713 
7714 	return QDF_STATUS_SUCCESS;
7715 }
7716 
7717 static QDF_STATUS dp_txrx_peer_attach(struct dp_soc *soc, struct dp_peer *peer)
7718 {
7719 	struct dp_txrx_peer *txrx_peer;
7720 	struct dp_pdev *pdev;
7721 
7722 	txrx_peer = (struct dp_txrx_peer *)qdf_mem_malloc(sizeof(*txrx_peer));
7723 
7724 	if (!txrx_peer)
7725 		return QDF_STATUS_E_NOMEM; /* failure */
7726 
7727 	txrx_peer->peer_id = HTT_INVALID_PEER;
7728 	/* initialize the peer_id */
7729 	txrx_peer->vdev = peer->vdev;
7730 	pdev = peer->vdev->pdev;
7731 
7732 	DP_STATS_INIT(txrx_peer);
7733 
7734 	dp_wds_ext_peer_init(txrx_peer);
7735 	dp_peer_rx_bufq_resources_init(txrx_peer);
7736 	dp_peer_hw_txrx_stats_init(soc, txrx_peer);
7737 	/*
7738 	 * Allocate peer extended stats context. Fall through in
7739 	 * case of failure as its not an implicit requirement to have
7740 	 * this object for regular statistics updates.
7741 	 */
7742 	if (dp_peer_delay_stats_ctx_alloc(soc, txrx_peer) !=
7743 					  QDF_STATUS_SUCCESS)
7744 		dp_warn("peer delay_stats ctx alloc failed");
7745 
7746 	/*
7747 	 * Alloctate memory for jitter stats. Fall through in
7748 	 * case of failure as its not an implicit requirement to have
7749 	 * this object for regular statistics updates.
7750 	 */
7751 	if (dp_peer_jitter_stats_ctx_alloc(pdev, txrx_peer) !=
7752 					   QDF_STATUS_SUCCESS)
7753 		dp_warn("peer jitter_stats ctx alloc failed");
7754 
7755 	dp_set_peer_isolation(txrx_peer, false);
7756 
7757 	dp_peer_defrag_rx_tids_init(txrx_peer);
7758 
7759 	if (dp_peer_sawf_stats_ctx_alloc(soc, txrx_peer) != QDF_STATUS_SUCCESS)
7760 		dp_warn("peer sawf stats alloc failed");
7761 
7762 	dp_txrx_peer_attach_add(soc, peer, txrx_peer);
7763 
7764 	return QDF_STATUS_SUCCESS;
7765 }
7766 
7767 static inline
7768 void dp_txrx_peer_stats_clr(struct dp_txrx_peer *txrx_peer)
7769 {
7770 	if (!txrx_peer)
7771 		return;
7772 
7773 	txrx_peer->tx_failed = 0;
7774 	txrx_peer->comp_pkt.num = 0;
7775 	txrx_peer->comp_pkt.bytes = 0;
7776 	txrx_peer->to_stack.num = 0;
7777 	txrx_peer->to_stack.bytes = 0;
7778 
7779 	DP_STATS_CLR(txrx_peer);
7780 	dp_peer_delay_stats_ctx_clr(txrx_peer);
7781 	dp_peer_jitter_stats_ctx_clr(txrx_peer);
7782 }
7783 
7784 /*
7785  * dp_peer_create_wifi3() - attach txrx peer
7786  * @soc_hdl: Datapath soc handle
7787  * @vdev_id: id of vdev
7788  * @peer_mac_addr: Peer MAC address
7789  * @peer_type: link or MLD peer type
7790  *
7791  * Return: 0 on success, -1 on failure
7792  */
7793 static QDF_STATUS
7794 dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
7795 		     uint8_t *peer_mac_addr, enum cdp_peer_type peer_type)
7796 {
7797 	struct dp_peer *peer;
7798 	int i;
7799 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7800 	struct dp_pdev *pdev;
7801 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
7802 	struct dp_vdev *vdev = NULL;
7803 
7804 	if (!peer_mac_addr)
7805 		return QDF_STATUS_E_FAILURE;
7806 
7807 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
7808 
7809 	if (!vdev)
7810 		return QDF_STATUS_E_FAILURE;
7811 
7812 	pdev = vdev->pdev;
7813 	soc = pdev->soc;
7814 
7815 	/*
7816 	 * If a peer entry with given MAC address already exists,
7817 	 * reuse the peer and reset the state of peer.
7818 	 */
7819 	peer = dp_peer_can_reuse(vdev, peer_mac_addr, peer_type);
7820 
7821 	if (peer) {
7822 		qdf_atomic_init(&peer->is_default_route_set);
7823 		dp_peer_cleanup(vdev, peer);
7824 
7825 		dp_peer_vdev_list_add(soc, vdev, peer);
7826 		dp_peer_find_hash_add(soc, peer);
7827 
7828 		dp_peer_rx_tids_create(peer);
7829 		if (IS_MLO_DP_MLD_PEER(peer))
7830 			dp_mld_peer_init_link_peers_info(peer);
7831 
7832 		qdf_spin_lock_bh(&soc->ast_lock);
7833 		dp_peer_delete_ast_entries(soc, peer);
7834 		qdf_spin_unlock_bh(&soc->ast_lock);
7835 
7836 		if ((vdev->opmode == wlan_op_mode_sta) &&
7837 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
7838 		     QDF_MAC_ADDR_SIZE)) {
7839 			ast_type = CDP_TXRX_AST_TYPE_SELF;
7840 		}
7841 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
7842 
7843 		peer->valid = 1;
7844 		peer->is_tdls_peer = false;
7845 		dp_local_peer_id_alloc(pdev, peer);
7846 
7847 		qdf_spinlock_create(&peer->peer_info_lock);
7848 
7849 		DP_STATS_INIT(peer);
7850 
7851 		/*
7852 		 * In tx_monitor mode, filter may be set for unassociated peer
7853 		 * when unassociated peer get associated peer need to
7854 		 * update tx_cap_enabled flag to support peer filter.
7855 		 */
7856 		if (!IS_MLO_DP_MLD_PEER(peer)) {
7857 			dp_monitor_peer_tx_capture_filter_check(pdev, peer);
7858 			dp_monitor_peer_reset_stats(soc, peer);
7859 		}
7860 
7861 		if (peer->txrx_peer) {
7862 			dp_peer_rx_bufq_resources_init(peer->txrx_peer);
7863 			dp_txrx_peer_stats_clr(peer->txrx_peer);
7864 			dp_set_peer_isolation(peer->txrx_peer, false);
7865 			dp_wds_ext_peer_init(peer->txrx_peer);
7866 			dp_peer_hw_txrx_stats_init(soc, peer->txrx_peer);
7867 		}
7868 
7869 		dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
7870 
7871 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7872 		return QDF_STATUS_SUCCESS;
7873 	} else {
7874 		/*
7875 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
7876 		 * need to remove the AST entry which was earlier added as a WDS
7877 		 * entry.
7878 		 * If an AST entry exists, but no peer entry exists with a given
7879 		 * MAC addresses, we could deduce it as a WDS entry
7880 		 */
7881 		dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
7882 	}
7883 
7884 #ifdef notyet
7885 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
7886 		soc->mempool_ol_ath_peer);
7887 #else
7888 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
7889 #endif
7890 	wlan_minidump_log(peer,
7891 			  sizeof(*peer),
7892 			  soc->ctrl_psoc,
7893 			  WLAN_MD_DP_PEER, "dp_peer");
7894 	if (!peer) {
7895 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7896 		return QDF_STATUS_E_FAILURE; /* failure */
7897 	}
7898 
7899 	qdf_mem_zero(peer, sizeof(struct dp_peer));
7900 
7901 	/* store provided params */
7902 	peer->vdev = vdev;
7903 
7904 	/* initialize the peer_id */
7905 	peer->peer_id = HTT_INVALID_PEER;
7906 
7907 	qdf_mem_copy(
7908 		&peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
7909 
7910 	DP_PEER_SET_TYPE(peer, peer_type);
7911 	if (IS_MLO_DP_MLD_PEER(peer)) {
7912 		if (dp_txrx_peer_attach(soc, peer) !=
7913 				QDF_STATUS_SUCCESS)
7914 			goto fail; /* failure */
7915 
7916 		dp_mld_peer_init_link_peers_info(peer);
7917 	} else if (dp_monitor_peer_attach(soc, peer) !=
7918 				QDF_STATUS_SUCCESS)
7919 		dp_warn("peer monitor ctx alloc failed");
7920 
7921 	TAILQ_INIT(&peer->ast_entry_list);
7922 
7923 	/* get the vdev reference for new peer */
7924 	dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CHILD);
7925 
7926 	if ((vdev->opmode == wlan_op_mode_sta) &&
7927 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
7928 			 QDF_MAC_ADDR_SIZE)) {
7929 		ast_type = CDP_TXRX_AST_TYPE_SELF;
7930 	}
7931 	qdf_spinlock_create(&peer->peer_state_lock);
7932 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
7933 	qdf_spinlock_create(&peer->peer_info_lock);
7934 
7935 	/* reset the ast index to flowid table */
7936 	dp_peer_reset_flowq_map(peer);
7937 
7938 	qdf_atomic_init(&peer->ref_cnt);
7939 
7940 	for (i = 0; i < DP_MOD_ID_MAX; i++)
7941 		qdf_atomic_init(&peer->mod_refs[i]);
7942 
7943 	/* keep one reference for attach */
7944 	qdf_atomic_inc(&peer->ref_cnt);
7945 	qdf_atomic_inc(&peer->mod_refs[DP_MOD_ID_CONFIG]);
7946 
7947 	dp_peer_vdev_list_add(soc, vdev, peer);
7948 
7949 	/* TODO: See if hash based search is required */
7950 	dp_peer_find_hash_add(soc, peer);
7951 
7952 	/* Initialize the peer state */
7953 	peer->state = OL_TXRX_PEER_STATE_DISC;
7954 
7955 	dp_info("vdev %pK created peer %pK ("QDF_MAC_ADDR_FMT") vdev_ref_cnt "
7956 		"%d peer_ref_cnt: %d",
7957 		vdev, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
7958 		qdf_atomic_read(&vdev->ref_cnt),
7959 		qdf_atomic_read(&peer->ref_cnt));
7960 	/*
7961 	 * For every peer MAp message search and set if bss_peer
7962 	 */
7963 	if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
7964 			QDF_MAC_ADDR_SIZE) == 0 &&
7965 			(wlan_op_mode_sta != vdev->opmode)) {
7966 		dp_info("vdev bss_peer!!");
7967 		peer->bss_peer = 1;
7968 		if (peer->txrx_peer)
7969 			peer->txrx_peer->bss_peer = 1;
7970 	}
7971 
7972 	if (wlan_op_mode_sta == vdev->opmode &&
7973 	    qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
7974 			QDF_MAC_ADDR_SIZE) == 0) {
7975 		peer->sta_self_peer = 1;
7976 	}
7977 
7978 	dp_peer_rx_tids_create(peer);
7979 
7980 	peer->valid = 1;
7981 	dp_local_peer_id_alloc(pdev, peer);
7982 	DP_STATS_INIT(peer);
7983 
7984 	if (dp_peer_sawf_ctx_alloc(soc, peer) != QDF_STATUS_SUCCESS)
7985 		dp_warn("peer sawf context alloc failed");
7986 
7987 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
7988 
7989 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7990 
7991 	return QDF_STATUS_SUCCESS;
7992 fail:
7993 	qdf_mem_free(peer);
7994 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7995 
7996 	return QDF_STATUS_E_FAILURE;
7997 }
7998 
7999 static QDF_STATUS dp_peer_legacy_setup(struct dp_soc *soc, struct dp_peer *peer)
8000 {
8001 	/* txrx_peer might exist already in peer reuse case */
8002 	if (peer->txrx_peer)
8003 		return QDF_STATUS_SUCCESS;
8004 
8005 	if (dp_txrx_peer_attach(soc, peer) !=
8006 				QDF_STATUS_SUCCESS) {
8007 		dp_err("peer txrx ctx alloc failed");
8008 		return QDF_STATUS_E_FAILURE;
8009 	}
8010 
8011 	return QDF_STATUS_SUCCESS;
8012 }
8013 
8014 #ifdef WLAN_FEATURE_11BE_MLO
8015 QDF_STATUS dp_peer_mlo_setup(
8016 			struct dp_soc *soc,
8017 			struct dp_peer *peer,
8018 			uint8_t vdev_id,
8019 			struct cdp_peer_setup_info *setup_info)
8020 {
8021 	struct dp_peer *mld_peer = NULL;
8022 
8023 	/* Non-MLO connection, do nothing */
8024 	if (!setup_info || !setup_info->mld_peer_mac)
8025 		return QDF_STATUS_SUCCESS;
8026 
8027 	dp_info("link peer:" QDF_MAC_ADDR_FMT "mld peer:" QDF_MAC_ADDR_FMT
8028 		"assoc_link %d, primary_link %d",
8029 		QDF_MAC_ADDR_REF(peer->mac_addr.raw),
8030 		QDF_MAC_ADDR_REF(setup_info->mld_peer_mac),
8031 		setup_info->is_first_link,
8032 		setup_info->is_primary_link);
8033 
8034 	/* if this is the first link peer */
8035 	if (setup_info->is_first_link)
8036 		/* create MLD peer */
8037 		dp_peer_create_wifi3((struct cdp_soc_t *)soc,
8038 				     vdev_id,
8039 				     setup_info->mld_peer_mac,
8040 				     CDP_MLD_PEER_TYPE);
8041 
8042 	peer->first_link = setup_info->is_first_link;
8043 	peer->primary_link = setup_info->is_primary_link;
8044 	mld_peer = dp_mld_peer_find_hash_find(soc,
8045 					      setup_info->mld_peer_mac,
8046 					      0, vdev_id, DP_MOD_ID_CDP);
8047 	if (mld_peer) {
8048 		if (setup_info->is_first_link) {
8049 			/* assign rx_tid to mld peer */
8050 			mld_peer->rx_tid = peer->rx_tid;
8051 			/* no cdp_peer_setup for MLD peer,
8052 			 * set it for addba processing
8053 			 */
8054 			qdf_atomic_set(&mld_peer->is_default_route_set, 1);
8055 		} else {
8056 			/* free link peer original rx_tids mem */
8057 			dp_peer_rx_tids_destroy(peer);
8058 			/* assign mld peer rx_tid to link peer */
8059 			peer->rx_tid = mld_peer->rx_tid;
8060 		}
8061 
8062 		if (setup_info->is_primary_link &&
8063 		    !setup_info->is_first_link) {
8064 			/*
8065 			 * if first link is not the primary link,
8066 			 * then need to change mld_peer->vdev as
8067 			 * primary link dp_vdev is not same one
8068 			 * during mld peer creation.
8069 			 */
8070 			dp_info("Primary link is not the first link. vdev: %pK,"
8071 				"vdev_ref_cnt %d", mld_peer->vdev,
8072 				 mld_peer->vdev->ref_cnt);
8073 			/* release the ref to original dp_vdev */
8074 			dp_vdev_unref_delete(soc, mld_peer->vdev,
8075 					     DP_MOD_ID_CHILD);
8076 			/*
8077 			 * get the ref to new dp_vdev,
8078 			 * increase dp_vdev ref_cnt
8079 			 */
8080 			mld_peer->vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8081 							       DP_MOD_ID_CHILD);
8082 			mld_peer->txrx_peer->vdev = mld_peer->vdev;
8083 		}
8084 
8085 		/* associate mld and link peer */
8086 		dp_link_peer_add_mld_peer(peer, mld_peer);
8087 		dp_mld_peer_add_link_peer(mld_peer, peer);
8088 
8089 		mld_peer->txrx_peer->mld_peer = 1;
8090 		dp_peer_unref_delete(mld_peer, DP_MOD_ID_CDP);
8091 	} else {
8092 		peer->mld_peer = NULL;
8093 		dp_err("mld peer" QDF_MAC_ADDR_FMT "not found!",
8094 		       QDF_MAC_ADDR_REF(setup_info->mld_peer_mac));
8095 		return QDF_STATUS_E_FAILURE;
8096 	}
8097 
8098 	return QDF_STATUS_SUCCESS;
8099 }
8100 
8101 /*
8102  * dp_mlo_peer_authorize() - authorize MLO peer
8103  * @soc: soc handle
8104  * @peer: pointer to link peer
8105  *
8106  * return void
8107  */
8108 static void dp_mlo_peer_authorize(struct dp_soc *soc,
8109 				  struct dp_peer *peer)
8110 {
8111 	int i;
8112 	struct dp_peer *link_peer = NULL;
8113 	struct dp_peer *mld_peer = peer->mld_peer;
8114 	struct dp_mld_link_peers link_peers_info;
8115 
8116 	if (!mld_peer)
8117 		return;
8118 
8119 	/* get link peers with reference */
8120 	dp_get_link_peers_ref_from_mld_peer(soc, mld_peer,
8121 					    &link_peers_info,
8122 					    DP_MOD_ID_CDP);
8123 
8124 	for (i = 0; i < link_peers_info.num_links; i++) {
8125 		link_peer = link_peers_info.link_peers[i];
8126 
8127 		if (!link_peer->authorize) {
8128 			dp_release_link_peers_ref(&link_peers_info,
8129 						  DP_MOD_ID_CDP);
8130 			mld_peer->authorize = false;
8131 			return;
8132 		}
8133 	}
8134 
8135 	/* if we are here all link peers are authorized,
8136 	 * authorize ml_peer also
8137 	 */
8138 	mld_peer->authorize = true;
8139 
8140 	/* release link peers reference */
8141 	dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
8142 }
8143 #endif
8144 
8145 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
8146 				  enum cdp_host_reo_dest_ring *reo_dest,
8147 				  bool *hash_based)
8148 {
8149 	struct dp_soc *soc;
8150 	struct dp_pdev *pdev;
8151 
8152 	pdev = vdev->pdev;
8153 	soc = pdev->soc;
8154 	/*
8155 	 * hash based steering is disabled for Radios which are offloaded
8156 	 * to NSS
8157 	 */
8158 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
8159 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
8160 
8161 	/*
8162 	 * Below line of code will ensure the proper reo_dest ring is chosen
8163 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
8164 	 */
8165 	*reo_dest = pdev->reo_dest;
8166 }
8167 
8168 #ifdef IPA_OFFLOAD
8169 /**
8170  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
8171  * @vdev: Virtual device
8172  *
8173  * Return: true if the vdev is of subtype P2P
8174  *	   false if the vdev is of any other subtype
8175  */
8176 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
8177 {
8178 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
8179 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
8180 	    vdev->subtype == wlan_op_subtype_p2p_go)
8181 		return true;
8182 
8183 	return false;
8184 }
8185 
8186 /*
8187  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
8188  * @vdev: Datapath VDEV handle
8189  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
8190  * @hash_based: pointer to hash value (enabled/disabled) to be populated
8191  *
8192  * If IPA is enabled in ini, for SAP mode, disable hash based
8193  * steering, use default reo_dst ring for RX. Use config values for other modes.
8194  * Return: None
8195  */
8196 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
8197 				       struct cdp_peer_setup_info *setup_info,
8198 				       enum cdp_host_reo_dest_ring *reo_dest,
8199 				       bool *hash_based,
8200 				       uint8_t *lmac_peer_id_msb)
8201 {
8202 	struct dp_soc *soc;
8203 	struct dp_pdev *pdev;
8204 
8205 	pdev = vdev->pdev;
8206 	soc = pdev->soc;
8207 
8208 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
8209 
8210 	/* For P2P-GO interfaces we do not need to change the REO
8211 	 * configuration even if IPA config is enabled
8212 	 */
8213 	if (dp_is_vdev_subtype_p2p(vdev))
8214 		return;
8215 
8216 	/*
8217 	 * If IPA is enabled, disable hash-based flow steering and set
8218 	 * reo_dest_ring_4 as the REO ring to receive packets on.
8219 	 * IPA is configured to reap reo_dest_ring_4.
8220 	 *
8221 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
8222 	 * value enum value is from 1 - 4.
8223 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
8224 	 */
8225 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
8226 		if (vdev->opmode == wlan_op_mode_ap) {
8227 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
8228 			*hash_based = 0;
8229 		} else if (vdev->opmode == wlan_op_mode_sta &&
8230 			   dp_ipa_is_mdm_platform()) {
8231 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
8232 		}
8233 	}
8234 }
8235 
8236 #else
8237 
8238 /*
8239  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
8240  * @vdev: Datapath VDEV handle
8241  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
8242  * @hash_based: pointer to hash value (enabled/disabled) to be populated
8243  *
8244  * Use system config values for hash based steering.
8245  * Return: None
8246  */
8247 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
8248 				       struct cdp_peer_setup_info *setup_info,
8249 				       enum cdp_host_reo_dest_ring *reo_dest,
8250 				       bool *hash_based,
8251 				       uint8_t *lmac_peer_id_msb)
8252 {
8253 	struct dp_soc *soc = vdev->pdev->soc;
8254 
8255 	soc->arch_ops.peer_get_reo_hash(vdev, setup_info, reo_dest, hash_based,
8256 					lmac_peer_id_msb);
8257 }
8258 #endif /* IPA_OFFLOAD */
8259 
8260 /*
8261  * dp_peer_setup_wifi3() - initialize the peer
8262  * @soc_hdl: soc handle object
8263  * @vdev_id : vdev_id of vdev object
8264  * @peer_mac: Peer's mac address
8265  * @peer_setup_info: peer setup info for MLO
8266  *
8267  * Return: QDF_STATUS
8268  */
8269 static QDF_STATUS
8270 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8271 		    uint8_t *peer_mac,
8272 		    struct cdp_peer_setup_info *setup_info)
8273 {
8274 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8275 	struct dp_pdev *pdev;
8276 	bool hash_based = 0;
8277 	enum cdp_host_reo_dest_ring reo_dest;
8278 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8279 	struct dp_vdev *vdev = NULL;
8280 	struct dp_peer *peer =
8281 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
8282 					       DP_MOD_ID_CDP);
8283 	struct dp_peer *mld_peer = NULL;
8284 	enum wlan_op_mode vdev_opmode;
8285 	uint8_t lmac_peer_id_msb = 0;
8286 
8287 	if (!peer)
8288 		return QDF_STATUS_E_FAILURE;
8289 
8290 	vdev = peer->vdev;
8291 	if (!vdev) {
8292 		status = QDF_STATUS_E_FAILURE;
8293 		goto fail;
8294 	}
8295 
8296 	/* save vdev related member in case vdev freed */
8297 	vdev_opmode = vdev->opmode;
8298 	pdev = vdev->pdev;
8299 	dp_peer_setup_get_reo_hash(vdev, setup_info,
8300 				   &reo_dest, &hash_based,
8301 				   &lmac_peer_id_msb);
8302 
8303 	dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
8304 		pdev->pdev_id, vdev->vdev_id,
8305 		vdev->opmode, hash_based, reo_dest);
8306 
8307 	/*
8308 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
8309 	 * i.e both the devices have same MAC address. In these
8310 	 * cases we want such pkts to be processed in NULL Q handler
8311 	 * which is REO2TCL ring. for this reason we should
8312 	 * not setup reo_queues and default route for bss_peer.
8313 	 */
8314 	if (!IS_MLO_DP_MLD_PEER(peer))
8315 		dp_monitor_peer_tx_init(pdev, peer);
8316 
8317 	if (!setup_info)
8318 		if (dp_peer_legacy_setup(soc, peer) !=
8319 				QDF_STATUS_SUCCESS) {
8320 			status = QDF_STATUS_E_RESOURCES;
8321 			goto fail;
8322 		}
8323 
8324 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
8325 		status = QDF_STATUS_E_FAILURE;
8326 		goto fail;
8327 	}
8328 
8329 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
8330 		/* TODO: Check the destination ring number to be passed to FW */
8331 		soc->cdp_soc.ol_ops->peer_set_default_routing(
8332 				soc->ctrl_psoc,
8333 				peer->vdev->pdev->pdev_id,
8334 				peer->mac_addr.raw,
8335 				peer->vdev->vdev_id, hash_based, reo_dest,
8336 				lmac_peer_id_msb);
8337 	}
8338 
8339 	qdf_atomic_set(&peer->is_default_route_set, 1);
8340 
8341 	status = dp_peer_mlo_setup(soc, peer, vdev->vdev_id, setup_info);
8342 	if (QDF_IS_STATUS_ERROR(status)) {
8343 		dp_peer_err("peer mlo setup failed");
8344 		qdf_assert_always(0);
8345 	}
8346 
8347 	if (vdev_opmode != wlan_op_mode_monitor) {
8348 		/* In case of MLD peer, switch peer to mld peer and
8349 		 * do peer_rx_init.
8350 		 */
8351 		if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
8352 		    IS_MLO_DP_LINK_PEER(peer)) {
8353 			if (setup_info && setup_info->is_first_link) {
8354 				mld_peer = DP_GET_MLD_PEER_FROM_PEER(peer);
8355 				if (mld_peer)
8356 					dp_peer_rx_init(pdev, mld_peer);
8357 				else
8358 					dp_peer_err("MLD peer null. Primary link peer:%pK", peer);
8359 			}
8360 		} else {
8361 			dp_peer_rx_init(pdev, peer);
8362 		}
8363 	}
8364 
8365 	if (!IS_MLO_DP_MLD_PEER(peer))
8366 		dp_peer_ppdu_delayed_ba_init(peer);
8367 
8368 fail:
8369 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8370 	return status;
8371 }
8372 
8373 /*
8374  * dp_cp_peer_del_resp_handler - Handle the peer delete response
8375  * @soc_hdl: Datapath SOC handle
8376  * @vdev_id: id of virtual device object
8377  * @mac_addr: Mac address of the peer
8378  *
8379  * Return: QDF_STATUS
8380  */
8381 static QDF_STATUS dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl,
8382 					      uint8_t vdev_id,
8383 					      uint8_t *mac_addr)
8384 {
8385 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8386 	struct dp_ast_entry  *ast_entry = NULL;
8387 	txrx_ast_free_cb cb = NULL;
8388 	void *cookie;
8389 
8390 	if (soc->ast_offload_support)
8391 		return QDF_STATUS_E_INVAL;
8392 
8393 	qdf_spin_lock_bh(&soc->ast_lock);
8394 
8395 	ast_entry =
8396 		dp_peer_ast_hash_find_by_vdevid(soc, mac_addr,
8397 						vdev_id);
8398 
8399 	/* in case of qwrap we have multiple BSS peers
8400 	 * with same mac address
8401 	 *
8402 	 * AST entry for this mac address will be created
8403 	 * only for one peer hence it will be NULL here
8404 	 */
8405 	if ((!ast_entry || !ast_entry->delete_in_progress) ||
8406 	    (ast_entry->peer_id != HTT_INVALID_PEER)) {
8407 		qdf_spin_unlock_bh(&soc->ast_lock);
8408 		return QDF_STATUS_E_FAILURE;
8409 	}
8410 
8411 	if (ast_entry->is_mapped)
8412 		soc->ast_table[ast_entry->ast_idx] = NULL;
8413 
8414 	DP_STATS_INC(soc, ast.deleted, 1);
8415 	dp_peer_ast_hash_remove(soc, ast_entry);
8416 
8417 	cb = ast_entry->callback;
8418 	cookie = ast_entry->cookie;
8419 	ast_entry->callback = NULL;
8420 	ast_entry->cookie = NULL;
8421 
8422 	soc->num_ast_entries--;
8423 	qdf_spin_unlock_bh(&soc->ast_lock);
8424 
8425 	if (cb) {
8426 		cb(soc->ctrl_psoc,
8427 		   dp_soc_to_cdp_soc(soc),
8428 		   cookie,
8429 		   CDP_TXRX_AST_DELETED);
8430 	}
8431 	qdf_mem_free(ast_entry);
8432 
8433 	return QDF_STATUS_SUCCESS;
8434 }
8435 
8436 /*
8437  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
8438  * @txrx_soc: cdp soc handle
8439  * @ac: Access category
8440  * @value: timeout value in millisec
8441  *
8442  * Return: void
8443  */
8444 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
8445 				    uint8_t ac, uint32_t value)
8446 {
8447 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
8448 
8449 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
8450 }
8451 
8452 /*
8453  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
8454  * @txrx_soc: cdp soc handle
8455  * @ac: access category
8456  * @value: timeout value in millisec
8457  *
8458  * Return: void
8459  */
8460 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
8461 				    uint8_t ac, uint32_t *value)
8462 {
8463 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
8464 
8465 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
8466 }
8467 
8468 /*
8469  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
8470  * @txrx_soc: cdp soc handle
8471  * @pdev_id: id of physical device object
8472  * @val: reo destination ring index (1 - 4)
8473  *
8474  * Return: QDF_STATUS
8475  */
8476 static QDF_STATUS
8477 dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
8478 		     enum cdp_host_reo_dest_ring val)
8479 {
8480 	struct dp_pdev *pdev =
8481 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
8482 						   pdev_id);
8483 
8484 	if (pdev) {
8485 		pdev->reo_dest = val;
8486 		return QDF_STATUS_SUCCESS;
8487 	}
8488 
8489 	return QDF_STATUS_E_FAILURE;
8490 }
8491 
8492 /*
8493  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
8494  * @txrx_soc: cdp soc handle
8495  * @pdev_id: id of physical device object
8496  *
8497  * Return: reo destination ring index
8498  */
8499 static enum cdp_host_reo_dest_ring
8500 dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id)
8501 {
8502 	struct dp_pdev *pdev =
8503 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
8504 						   pdev_id);
8505 
8506 	if (pdev)
8507 		return pdev->reo_dest;
8508 	else
8509 		return cdp_host_reo_dest_ring_unknown;
8510 }
8511 
8512 #ifdef WLAN_SUPPORT_MSCS
8513 /*
8514  * dp_record_mscs_params - MSCS parameters sent by the STA in
8515  * the MSCS Request to the AP. The AP makes a note of these
8516  * parameters while comparing the MSDUs sent by the STA, to
8517  * send the downlink traffic with correct User priority.
8518  * @soc - Datapath soc handle
8519  * @peer_mac - STA Mac address
8520  * @vdev_id - ID of the vdev handle
8521  * @mscs_params - Structure having MSCS parameters obtained
8522  * from handshake
8523  * @active - Flag to set MSCS active/inactive
8524  * return type - QDF_STATUS - Success/Invalid
8525  */
8526 static QDF_STATUS
8527 dp_record_mscs_params(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
8528 		      uint8_t vdev_id, struct cdp_mscs_params *mscs_params,
8529 		      bool active)
8530 {
8531 	struct dp_peer *peer;
8532 	QDF_STATUS status = QDF_STATUS_E_INVAL;
8533 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8534 
8535 	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
8536 				      DP_MOD_ID_CDP);
8537 
8538 	if (!peer) {
8539 		dp_err("Peer is NULL!");
8540 		goto fail;
8541 	}
8542 	if (!active) {
8543 		dp_info("MSCS Procedure is terminated");
8544 		peer->mscs_active = active;
8545 		goto fail;
8546 	}
8547 
8548 	if (mscs_params->classifier_type == IEEE80211_TCLAS_MASK_CLA_TYPE_4) {
8549 		/* Populate entries inside IPV4 database first */
8550 		peer->mscs_ipv4_parameter.user_priority_bitmap =
8551 			mscs_params->user_pri_bitmap;
8552 		peer->mscs_ipv4_parameter.user_priority_limit =
8553 			mscs_params->user_pri_limit;
8554 		peer->mscs_ipv4_parameter.classifier_mask =
8555 			mscs_params->classifier_mask;
8556 
8557 		/* Populate entries inside IPV6 database */
8558 		peer->mscs_ipv6_parameter.user_priority_bitmap =
8559 			mscs_params->user_pri_bitmap;
8560 		peer->mscs_ipv6_parameter.user_priority_limit =
8561 			mscs_params->user_pri_limit;
8562 		peer->mscs_ipv6_parameter.classifier_mask =
8563 			mscs_params->classifier_mask;
8564 		peer->mscs_active = 1;
8565 		dp_info("\n\tMSCS Procedure request based parameters for "QDF_MAC_ADDR_FMT"\n"
8566 			"\tClassifier_type = %d\tUser priority bitmap = %x\n"
8567 			"\tUser priority limit = %x\tClassifier mask = %x",
8568 			QDF_MAC_ADDR_REF(peer_mac),
8569 			mscs_params->classifier_type,
8570 			peer->mscs_ipv4_parameter.user_priority_bitmap,
8571 			peer->mscs_ipv4_parameter.user_priority_limit,
8572 			peer->mscs_ipv4_parameter.classifier_mask);
8573 	}
8574 
8575 	status = QDF_STATUS_SUCCESS;
8576 fail:
8577 	if (peer)
8578 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8579 	return status;
8580 }
8581 #endif
8582 
8583 /*
8584  * dp_get_sec_type() - Get the security type
8585  * @soc: soc handle
8586  * @vdev_id: id of dp handle
8587  * @peer_mac: mac of datapath PEER handle
8588  * @sec_idx:    Security id (mcast, ucast)
8589  *
8590  * return sec_type: Security type
8591  */
8592 static int dp_get_sec_type(struct cdp_soc_t *soc, uint8_t vdev_id,
8593 			   uint8_t *peer_mac, uint8_t sec_idx)
8594 {
8595 	int sec_type = 0;
8596 	struct dp_peer *peer =
8597 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
8598 						       peer_mac, 0, vdev_id,
8599 						       DP_MOD_ID_CDP);
8600 
8601 	if (!peer) {
8602 		dp_cdp_err("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
8603 		return sec_type;
8604 	}
8605 
8606 	if (!peer->txrx_peer) {
8607 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8608 		dp_peer_debug("%pK: txrx peer is NULL!\n", soc);
8609 		return sec_type;
8610 	}
8611 	sec_type = peer->txrx_peer->security[sec_idx].sec_type;
8612 
8613 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8614 	return sec_type;
8615 }
8616 
8617 /*
8618  * dp_peer_authorize() - authorize txrx peer
8619  * @soc: soc handle
8620  * @vdev_id: id of dp handle
8621  * @peer_mac: mac of datapath PEER handle
8622  * @authorize
8623  *
8624  */
8625 static QDF_STATUS
8626 dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8627 		  uint8_t *peer_mac, uint32_t authorize)
8628 {
8629 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8630 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8631 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(soc, peer_mac,
8632 							      0, vdev_id,
8633 							      DP_MOD_ID_CDP);
8634 
8635 	if (!peer) {
8636 		dp_cdp_debug("%pK: Peer is NULL!\n", soc);
8637 		status = QDF_STATUS_E_FAILURE;
8638 	} else {
8639 		peer->authorize = authorize ? 1 : 0;
8640 		if (peer->txrx_peer)
8641 			peer->txrx_peer->authorize = peer->authorize;
8642 
8643 		if (!peer->authorize)
8644 			dp_peer_flush_frags(soc_hdl, vdev_id, peer_mac);
8645 
8646 		dp_mlo_peer_authorize(soc, peer);
8647 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8648 	}
8649 
8650 	return status;
8651 }
8652 
8653 /*
8654  * dp_peer_get_authorize() - get peer authorize status
8655  * @soc: soc handle
8656  * @vdev_id: id of dp handle
8657  * @peer_mac: mac of datapath PEER handle
8658  *
8659  * Retusn: true is peer is authorized, false otherwise
8660  */
8661 static bool
8662 dp_peer_get_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8663 		      uint8_t *peer_mac)
8664 {
8665 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8666 	bool authorize = false;
8667 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
8668 						      0, vdev_id,
8669 						      DP_MOD_ID_CDP);
8670 
8671 	if (!peer) {
8672 		dp_cdp_debug("%pK: Peer is NULL!\n", soc);
8673 		return authorize;
8674 	}
8675 
8676 	authorize = peer->authorize;
8677 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8678 
8679 	return authorize;
8680 }
8681 
8682 /**
8683  * dp_vdev_unref_delete() - check and process vdev delete
8684  * @soc : DP specific soc pointer
8685  * @vdev: DP specific vdev pointer
8686  * @mod_id: module id
8687  *
8688  */
8689 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev,
8690 			  enum dp_mod_id mod_id)
8691 {
8692 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
8693 	void *vdev_delete_context = NULL;
8694 	uint8_t vdev_id = vdev->vdev_id;
8695 	struct dp_pdev *pdev = vdev->pdev;
8696 	struct dp_vdev *tmp_vdev = NULL;
8697 	uint8_t found = 0;
8698 
8699 	QDF_ASSERT(qdf_atomic_dec_return(&vdev->mod_refs[mod_id]) >= 0);
8700 
8701 	/* Return if this is not the last reference*/
8702 	if (!qdf_atomic_dec_and_test(&vdev->ref_cnt))
8703 		return;
8704 
8705 	/*
8706 	 * This should be set as last reference need to released
8707 	 * after cdp_vdev_detach() is called
8708 	 *
8709 	 * if this assert is hit there is a ref count issue
8710 	 */
8711 	QDF_ASSERT(vdev->delete.pending);
8712 
8713 	vdev_delete_cb = vdev->delete.callback;
8714 	vdev_delete_context = vdev->delete.context;
8715 
8716 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")- its last peer is done",
8717 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
8718 
8719 	if (wlan_op_mode_monitor == vdev->opmode) {
8720 		dp_monitor_vdev_delete(soc, vdev);
8721 		goto free_vdev;
8722 	}
8723 
8724 	/* all peers are gone, go ahead and delete it */
8725 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
8726 			FLOW_TYPE_VDEV, vdev_id);
8727 	dp_tx_vdev_detach(vdev);
8728 	dp_monitor_vdev_detach(vdev);
8729 
8730 free_vdev:
8731 	qdf_spinlock_destroy(&vdev->peer_list_lock);
8732 
8733 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
8734 	TAILQ_FOREACH(tmp_vdev, &soc->inactive_vdev_list,
8735 		      inactive_list_elem) {
8736 		if (tmp_vdev == vdev) {
8737 			found = 1;
8738 			break;
8739 		}
8740 	}
8741 	if (found)
8742 		TAILQ_REMOVE(&soc->inactive_vdev_list, vdev,
8743 			     inactive_list_elem);
8744 	/* delete this peer from the list */
8745 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
8746 
8747 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")",
8748 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
8749 	wlan_minidump_remove(vdev, sizeof(*vdev), soc->ctrl_psoc,
8750 			     WLAN_MD_DP_VDEV, "dp_vdev");
8751 	qdf_mem_free(vdev);
8752 	vdev = NULL;
8753 
8754 	if (vdev_delete_cb)
8755 		vdev_delete_cb(vdev_delete_context);
8756 }
8757 
8758 qdf_export_symbol(dp_vdev_unref_delete);
8759 
8760 /*
8761  * dp_peer_unref_delete() - unref and delete peer
8762  * @peer_handle:    Datapath peer handle
8763  * @mod_id:         ID of module releasing reference
8764  *
8765  */
8766 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id mod_id)
8767 {
8768 	struct dp_vdev *vdev = peer->vdev;
8769 	struct dp_pdev *pdev = vdev->pdev;
8770 	struct dp_soc *soc = pdev->soc;
8771 	uint16_t peer_id;
8772 	struct dp_peer *tmp_peer;
8773 	bool found = false;
8774 
8775 	if (mod_id > DP_MOD_ID_RX)
8776 		QDF_ASSERT(qdf_atomic_dec_return(&peer->mod_refs[mod_id]) >= 0);
8777 
8778 	/*
8779 	 * Hold the lock all the way from checking if the peer ref count
8780 	 * is zero until the peer references are removed from the hash
8781 	 * table and vdev list (if the peer ref count is zero).
8782 	 * This protects against a new HL tx operation starting to use the
8783 	 * peer object just after this function concludes it's done being used.
8784 	 * Furthermore, the lock needs to be held while checking whether the
8785 	 * vdev's list of peers is empty, to make sure that list is not modified
8786 	 * concurrently with the empty check.
8787 	 */
8788 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
8789 		peer_id = peer->peer_id;
8790 
8791 		/*
8792 		 * Make sure that the reference to the peer in
8793 		 * peer object map is removed
8794 		 */
8795 		QDF_ASSERT(peer_id == HTT_INVALID_PEER);
8796 
8797 		dp_peer_info("Deleting peer %pK ("QDF_MAC_ADDR_FMT")", peer,
8798 			     QDF_MAC_ADDR_REF(peer->mac_addr.raw));
8799 
8800 		dp_peer_sawf_ctx_free(soc, peer);
8801 
8802 		wlan_minidump_remove(peer, sizeof(*peer), soc->ctrl_psoc,
8803 				     WLAN_MD_DP_PEER, "dp_peer");
8804 
8805 		qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
8806 		TAILQ_FOREACH(tmp_peer, &soc->inactive_peer_list,
8807 			      inactive_list_elem) {
8808 			if (tmp_peer == peer) {
8809 				found = 1;
8810 				break;
8811 			}
8812 		}
8813 		if (found)
8814 			TAILQ_REMOVE(&soc->inactive_peer_list, peer,
8815 				     inactive_list_elem);
8816 		/* delete this peer from the list */
8817 		qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
8818 		DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
8819 		dp_peer_update_state(soc, peer, DP_PEER_STATE_FREED);
8820 
8821 		/* cleanup the peer data */
8822 		dp_peer_cleanup(vdev, peer);
8823 
8824 		if (!IS_MLO_DP_MLD_PEER(peer))
8825 			dp_monitor_peer_detach(soc, peer);
8826 
8827 		qdf_spinlock_destroy(&peer->peer_state_lock);
8828 
8829 		dp_txrx_peer_detach(soc, peer);
8830 		qdf_mem_free(peer);
8831 
8832 		/*
8833 		 * Decrement ref count taken at peer create
8834 		 */
8835 		dp_peer_info("Deleted peer. Unref vdev %pK, vdev_ref_cnt %d",
8836 			     vdev, qdf_atomic_read(&vdev->ref_cnt));
8837 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CHILD);
8838 	}
8839 }
8840 
8841 qdf_export_symbol(dp_peer_unref_delete);
8842 
8843 /*
8844  * dp_txrx_peer_unref_delete() - unref and delete peer
8845  * @handle: Datapath txrx ref handle
8846  * @mod_id: Module ID of the caller
8847  *
8848  */
8849 void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle,
8850 			       enum dp_mod_id mod_id)
8851 {
8852 	dp_peer_unref_delete((struct dp_peer *)handle, mod_id);
8853 }
8854 
8855 qdf_export_symbol(dp_txrx_peer_unref_delete);
8856 
8857 /*
8858  * dp_peer_delete_wifi3() – Delete txrx peer
8859  * @soc_hdl: soc handle
8860  * @vdev_id: id of dp handle
8861  * @peer_mac: mac of datapath PEER handle
8862  * @bitmap: bitmap indicating special handling of request.
8863  * @peer_type: peer type (link or MLD)
8864  *
8865  */
8866 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
8867 				       uint8_t vdev_id,
8868 				       uint8_t *peer_mac, uint32_t bitmap,
8869 				       enum cdp_peer_type peer_type)
8870 {
8871 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8872 	struct dp_peer *peer;
8873 	struct cdp_peer_info peer_info = { 0 };
8874 	struct dp_vdev *vdev = NULL;
8875 
8876 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac,
8877 				 false, peer_type);
8878 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP);
8879 
8880 	/* Peer can be null for monitor vap mac address */
8881 	if (!peer) {
8882 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
8883 			  "%s: Invalid peer\n", __func__);
8884 		return QDF_STATUS_E_FAILURE;
8885 	}
8886 
8887 	if (!peer->valid) {
8888 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8889 		dp_err("Invalid peer: "QDF_MAC_ADDR_FMT,
8890 			QDF_MAC_ADDR_REF(peer_mac));
8891 		return QDF_STATUS_E_ALREADY;
8892 	}
8893 
8894 	vdev = peer->vdev;
8895 
8896 	if (!vdev) {
8897 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8898 		return QDF_STATUS_E_FAILURE;
8899 	}
8900 
8901 	peer->valid = 0;
8902 
8903 	dp_init_info("%pK: peer %pK (" QDF_MAC_ADDR_FMT ")",
8904 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
8905 
8906 	dp_local_peer_id_free(peer->vdev->pdev, peer);
8907 
8908 	/* Drop all rx packets before deleting peer */
8909 	dp_clear_peer_internal(soc, peer);
8910 
8911 	qdf_spinlock_destroy(&peer->peer_info_lock);
8912 	dp_peer_multipass_list_remove(peer);
8913 
8914 	/* remove the reference to the peer from the hash table */
8915 	dp_peer_find_hash_remove(soc, peer);
8916 
8917 	dp_peer_vdev_list_remove(soc, vdev, peer);
8918 
8919 	dp_peer_mlo_delete(peer);
8920 
8921 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
8922 	TAILQ_INSERT_TAIL(&soc->inactive_peer_list, peer,
8923 			  inactive_list_elem);
8924 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
8925 
8926 	/*
8927 	 * Remove the reference added during peer_attach.
8928 	 * The peer will still be left allocated until the
8929 	 * PEER_UNMAP message arrives to remove the other
8930 	 * reference, added by the PEER_MAP message.
8931 	 */
8932 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
8933 	/*
8934 	 * Remove the reference taken above
8935 	 */
8936 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8937 
8938 	return QDF_STATUS_SUCCESS;
8939 }
8940 
8941 #ifdef DP_RX_UDP_OVER_PEER_ROAM
8942 static QDF_STATUS dp_update_roaming_peer_wifi3(struct cdp_soc_t *soc_hdl,
8943 					       uint8_t vdev_id,
8944 					       uint8_t *peer_mac,
8945 					       uint32_t auth_status)
8946 {
8947 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8948 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8949 						     DP_MOD_ID_CDP);
8950 	if (!vdev)
8951 		return QDF_STATUS_E_FAILURE;
8952 
8953 	vdev->roaming_peer_status = auth_status;
8954 	qdf_mem_copy(vdev->roaming_peer_mac.raw, peer_mac,
8955 		     QDF_MAC_ADDR_SIZE);
8956 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8957 
8958 	return QDF_STATUS_SUCCESS;
8959 }
8960 #endif
8961 /*
8962  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
8963  * @soc_hdl: Datapath soc handle
8964  * @vdev_id: virtual interface id
8965  *
8966  * Return: MAC address on success, NULL on failure.
8967  *
8968  */
8969 static uint8_t *dp_get_vdev_mac_addr_wifi3(struct cdp_soc_t *soc_hdl,
8970 					   uint8_t vdev_id)
8971 {
8972 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8973 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8974 						     DP_MOD_ID_CDP);
8975 	uint8_t *mac = NULL;
8976 
8977 	if (!vdev)
8978 		return NULL;
8979 
8980 	mac = vdev->mac_addr.raw;
8981 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8982 
8983 	return mac;
8984 }
8985 
8986 /*
8987  * dp_vdev_set_wds() - Enable per packet stats
8988  * @soc: DP soc handle
8989  * @vdev_id: id of DP VDEV handle
8990  * @val: value
8991  *
8992  * Return: none
8993  */
8994 static int dp_vdev_set_wds(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8995 			   uint32_t val)
8996 {
8997 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8998 	struct dp_vdev *vdev =
8999 		dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id,
9000 				      DP_MOD_ID_CDP);
9001 
9002 	if (!vdev)
9003 		return QDF_STATUS_E_FAILURE;
9004 
9005 	vdev->wds_enabled = val;
9006 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9007 
9008 	return QDF_STATUS_SUCCESS;
9009 }
9010 
9011 static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
9012 {
9013 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9014 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9015 						     DP_MOD_ID_CDP);
9016 	int opmode;
9017 
9018 	if (!vdev) {
9019 		dp_err("vdev for id %d is NULL", vdev_id);
9020 		return -EINVAL;
9021 	}
9022 	opmode = vdev->opmode;
9023 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9024 
9025 	return opmode;
9026 }
9027 
9028 /**
9029  * dp_get_os_rx_handles_from_vdev_wifi3() - Get os rx handles for a vdev
9030  * @soc_hdl: ol_txrx_soc_handle handle
9031  * @vdev_id: vdev id for which os rx handles are needed
9032  * @stack_fn_p: pointer to stack function pointer
9033  * @osif_handle_p: pointer to ol_osif_vdev_handle
9034  *
9035  * Return: void
9036  */
9037 static
9038 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_soc_t *soc_hdl,
9039 					  uint8_t vdev_id,
9040 					  ol_txrx_rx_fp *stack_fn_p,
9041 					  ol_osif_vdev_handle *osif_vdev_p)
9042 {
9043 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9044 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9045 						     DP_MOD_ID_CDP);
9046 
9047 	if (qdf_unlikely(!vdev)) {
9048 		*stack_fn_p = NULL;
9049 		*osif_vdev_p = NULL;
9050 		return;
9051 	}
9052 	*stack_fn_p = vdev->osif_rx_stack;
9053 	*osif_vdev_p = vdev->osif_vdev;
9054 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9055 }
9056 
9057 /**
9058  * dp_get_ctrl_pdev_from_vdev() - Get control pdev of vdev
9059  * @soc_hdl: datapath soc handle
9060  * @vdev_id: virtual device/interface id
9061  *
9062  * Return: Handle to control pdev
9063  */
9064 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(
9065 						struct cdp_soc_t *soc_hdl,
9066 						uint8_t vdev_id)
9067 {
9068 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9069 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9070 						     DP_MOD_ID_CDP);
9071 	struct dp_pdev *pdev;
9072 
9073 	if (!vdev)
9074 		return NULL;
9075 
9076 	pdev = vdev->pdev;
9077 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9078 	return pdev ? (struct cdp_cfg *)pdev->wlan_cfg_ctx : NULL;
9079 }
9080 
9081 /**
9082  * dp_get_tx_pending() - read pending tx
9083  * @pdev_handle: Datapath PDEV handle
9084  *
9085  * Return: outstanding tx
9086  */
9087 static int32_t dp_get_tx_pending(struct cdp_pdev *pdev_handle)
9088 {
9089 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9090 
9091 	return qdf_atomic_read(&pdev->num_tx_outstanding);
9092 }
9093 
9094 /**
9095  * dp_get_peer_mac_from_peer_id() - get peer mac
9096  * @pdev_handle: Datapath PDEV handle
9097  * @peer_id: Peer ID
9098  * @peer_mac: MAC addr of PEER
9099  *
9100  * Return: QDF_STATUS
9101  */
9102 static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc,
9103 					       uint32_t peer_id,
9104 					       uint8_t *peer_mac)
9105 {
9106 	struct dp_peer *peer;
9107 
9108 	if (soc && peer_mac) {
9109 		peer = dp_peer_get_ref_by_id((struct dp_soc *)soc,
9110 					     (uint16_t)peer_id,
9111 					     DP_MOD_ID_CDP);
9112 		if (peer) {
9113 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
9114 				     QDF_MAC_ADDR_SIZE);
9115 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9116 			return QDF_STATUS_SUCCESS;
9117 		}
9118 	}
9119 
9120 	return QDF_STATUS_E_FAILURE;
9121 }
9122 
9123 #ifdef MESH_MODE_SUPPORT
9124 static
9125 void dp_vdev_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
9126 {
9127 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
9128 
9129 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
9130 	vdev->mesh_vdev = val;
9131 	if (val)
9132 		vdev->skip_sw_tid_classification |=
9133 			DP_TX_MESH_ENABLED;
9134 	else
9135 		vdev->skip_sw_tid_classification &=
9136 			~DP_TX_MESH_ENABLED;
9137 }
9138 
9139 /*
9140  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
9141  * @vdev_hdl: virtual device object
9142  * @val: value to be set
9143  *
9144  * Return: void
9145  */
9146 static
9147 void dp_vdev_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
9148 {
9149 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
9150 
9151 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
9152 	vdev->mesh_rx_filter = val;
9153 }
9154 #endif
9155 
9156 /*
9157  * dp_vdev_set_hlos_tid_override() - to set hlos tid override
9158  * @vdev_hdl: virtual device object
9159  * @val: value to be set
9160  *
9161  * Return: void
9162  */
9163 static
9164 void dp_vdev_set_hlos_tid_override(struct dp_vdev *vdev, uint32_t val)
9165 {
9166 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
9167 	if (val)
9168 		vdev->skip_sw_tid_classification |=
9169 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
9170 	else
9171 		vdev->skip_sw_tid_classification &=
9172 			~DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
9173 }
9174 
9175 /*
9176  * dp_vdev_get_hlos_tid_override() - to get hlos tid override flag
9177  * @vdev_hdl: virtual device object
9178  * @val: value to be set
9179  *
9180  * Return: 1 if this flag is set
9181  */
9182 static
9183 uint8_t dp_vdev_get_hlos_tid_override(struct cdp_vdev *vdev_hdl)
9184 {
9185 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
9186 
9187 	return !!(vdev->skip_sw_tid_classification &
9188 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED);
9189 }
9190 
9191 #ifdef VDEV_PEER_PROTOCOL_COUNT
9192 static void dp_enable_vdev_peer_protocol_count(struct cdp_soc_t *soc_hdl,
9193 					       int8_t vdev_id,
9194 					       bool enable)
9195 {
9196 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9197 	struct dp_vdev *vdev;
9198 
9199 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9200 	if (!vdev)
9201 		return;
9202 
9203 	dp_info("enable %d vdev_id %d", enable, vdev_id);
9204 	vdev->peer_protocol_count_track = enable;
9205 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9206 }
9207 
9208 static void dp_enable_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
9209 						   int8_t vdev_id,
9210 						   int drop_mask)
9211 {
9212 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9213 	struct dp_vdev *vdev;
9214 
9215 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9216 	if (!vdev)
9217 		return;
9218 
9219 	dp_info("drop_mask %d vdev_id %d", drop_mask, vdev_id);
9220 	vdev->peer_protocol_count_dropmask = drop_mask;
9221 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9222 }
9223 
9224 static int dp_is_vdev_peer_protocol_count_enabled(struct cdp_soc_t *soc_hdl,
9225 						  int8_t vdev_id)
9226 {
9227 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9228 	struct dp_vdev *vdev;
9229 	int peer_protocol_count_track;
9230 
9231 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9232 	if (!vdev)
9233 		return 0;
9234 
9235 	dp_info("enable %d vdev_id %d", vdev->peer_protocol_count_track,
9236 		vdev_id);
9237 	peer_protocol_count_track =
9238 		vdev->peer_protocol_count_track;
9239 
9240 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9241 	return peer_protocol_count_track;
9242 }
9243 
9244 static int dp_get_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
9245 					       int8_t vdev_id)
9246 {
9247 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9248 	struct dp_vdev *vdev;
9249 	int peer_protocol_count_dropmask;
9250 
9251 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9252 	if (!vdev)
9253 		return 0;
9254 
9255 	dp_info("drop_mask %d vdev_id %d", vdev->peer_protocol_count_dropmask,
9256 		vdev_id);
9257 	peer_protocol_count_dropmask =
9258 		vdev->peer_protocol_count_dropmask;
9259 
9260 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9261 	return peer_protocol_count_dropmask;
9262 }
9263 
9264 #endif
9265 
9266 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data)
9267 {
9268 	uint8_t pdev_count;
9269 
9270 	for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) {
9271 		if (soc->pdev_list[pdev_count] &&
9272 		    soc->pdev_list[pdev_count] == data)
9273 			return true;
9274 	}
9275 	return false;
9276 }
9277 
9278 /**
9279  * dp_rx_bar_stats_cb(): BAR received stats callback
9280  * @soc: SOC handle
9281  * @cb_ctxt: Call back context
9282  * @reo_status: Reo status
9283  *
9284  * return: void
9285  */
9286 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
9287 	union hal_reo_status *reo_status)
9288 {
9289 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
9290 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
9291 
9292 	if (!dp_check_pdev_exists(soc, pdev)) {
9293 		dp_err_rl("pdev doesn't exist");
9294 		return;
9295 	}
9296 
9297 	if (!qdf_atomic_read(&soc->cmn_init_done))
9298 		return;
9299 
9300 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
9301 		DP_PRINT_STATS("REO stats failure %d",
9302 			       queue_status->header.status);
9303 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
9304 		return;
9305 	}
9306 
9307 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
9308 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
9309 
9310 }
9311 
9312 /**
9313  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
9314  * @vdev: DP VDEV handle
9315  *
9316  * return: void
9317  */
9318 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
9319 			     struct cdp_vdev_stats *vdev_stats)
9320 {
9321 	struct dp_soc *soc = NULL;
9322 
9323 	if (!vdev || !vdev->pdev)
9324 		return;
9325 
9326 	soc = vdev->pdev->soc;
9327 
9328 	dp_update_vdev_ingress_stats(vdev);
9329 
9330 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
9331 
9332 	dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats,
9333 			     DP_MOD_ID_GENERIC_STATS);
9334 
9335 	dp_update_vdev_rate_stats(vdev_stats, &vdev->stats);
9336 
9337 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9338 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
9339 			     vdev_stats, vdev->vdev_id,
9340 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
9341 #endif
9342 }
9343 
9344 void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
9345 {
9346 	struct dp_vdev *vdev = NULL;
9347 	struct dp_soc *soc;
9348 	struct cdp_vdev_stats *vdev_stats =
9349 			qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
9350 
9351 	if (!vdev_stats) {
9352 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
9353 			   pdev->soc);
9354 		return;
9355 	}
9356 
9357 	soc = pdev->soc;
9358 
9359 	qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
9360 	qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
9361 	qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
9362 	qdf_mem_zero(&pdev->stats.rx_i, sizeof(pdev->stats.rx_i));
9363 
9364 	if (dp_monitor_is_enable_mcopy_mode(pdev))
9365 		dp_monitor_invalid_peer_update_pdev_stats(soc, pdev);
9366 
9367 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
9368 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
9369 
9370 		dp_aggregate_vdev_stats(vdev, vdev_stats);
9371 		dp_update_pdev_stats(pdev, vdev_stats);
9372 		dp_update_pdev_ingress_stats(pdev, vdev);
9373 	}
9374 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
9375 	qdf_mem_free(vdev_stats);
9376 
9377 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9378 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
9379 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
9380 #endif
9381 }
9382 
9383 /**
9384  * dp_vdev_getstats() - get vdev packet level stats
9385  * @vdev_handle: Datapath VDEV handle
9386  * @stats: cdp network device stats structure
9387  *
9388  * Return: QDF_STATUS
9389  */
9390 static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle,
9391 				   struct cdp_dev_stats *stats)
9392 {
9393 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
9394 	struct dp_pdev *pdev;
9395 	struct dp_soc *soc;
9396 	struct cdp_vdev_stats *vdev_stats;
9397 
9398 	if (!vdev)
9399 		return QDF_STATUS_E_FAILURE;
9400 
9401 	pdev = vdev->pdev;
9402 	if (!pdev)
9403 		return QDF_STATUS_E_FAILURE;
9404 
9405 	soc = pdev->soc;
9406 
9407 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
9408 
9409 	if (!vdev_stats) {
9410 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
9411 			   soc);
9412 		return QDF_STATUS_E_FAILURE;
9413 	}
9414 
9415 	dp_aggregate_vdev_stats(vdev, vdev_stats);
9416 
9417 	stats->tx_packets = vdev_stats->tx.comp_pkt.num;
9418 	stats->tx_bytes = vdev_stats->tx.comp_pkt.bytes;
9419 
9420 	stats->tx_errors = vdev_stats->tx.tx_failed;
9421 	stats->tx_dropped = vdev_stats->tx_i.dropped.dropped_pkt.num +
9422 			    vdev_stats->tx_i.sg.dropped_host.num +
9423 			    vdev_stats->tx_i.mcast_en.dropped_map_error +
9424 			    vdev_stats->tx_i.mcast_en.dropped_self_mac +
9425 			    vdev_stats->tx_i.mcast_en.dropped_send_fail +
9426 			    vdev_stats->tx.nawds_mcast_drop;
9427 
9428 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
9429 		stats->rx_packets = vdev_stats->rx.to_stack.num;
9430 		stats->rx_bytes = vdev_stats->rx.to_stack.bytes;
9431 	} else {
9432 		stats->rx_packets = vdev_stats->rx_i.reo_rcvd_pkt.num +
9433 				    vdev_stats->rx_i.null_q_desc_pkt.num +
9434 				    vdev_stats->rx_i.routed_eapol_pkt.num;
9435 		stats->rx_bytes = vdev_stats->rx_i.reo_rcvd_pkt.bytes +
9436 				  vdev_stats->rx_i.null_q_desc_pkt.bytes +
9437 				  vdev_stats->rx_i.routed_eapol_pkt.bytes;
9438 	}
9439 
9440 	stats->rx_errors = vdev_stats->rx.err.mic_err +
9441 			   vdev_stats->rx.err.decrypt_err +
9442 			   vdev_stats->rx.err.fcserr +
9443 			   vdev_stats->rx.err.pn_err +
9444 			   vdev_stats->rx.err.oor_err +
9445 			   vdev_stats->rx.err.jump_2k_err +
9446 			   vdev_stats->rx.err.rxdma_wifi_parse_err;
9447 
9448 	stats->rx_dropped = vdev_stats->rx.mec_drop.num +
9449 			    vdev_stats->rx.multipass_rx_pkt_drop +
9450 			    vdev_stats->rx.peer_unauth_rx_pkt_drop +
9451 			    vdev_stats->rx.policy_check_drop +
9452 			    vdev_stats->rx.nawds_mcast_drop +
9453 			    vdev_stats->rx.mcast_3addr_drop;
9454 
9455 	qdf_mem_free(vdev_stats);
9456 
9457 	return QDF_STATUS_SUCCESS;
9458 }
9459 
9460 /**
9461  * dp_pdev_getstats() - get pdev packet level stats
9462  * @pdev_handle: Datapath PDEV handle
9463  * @stats: cdp network device stats structure
9464  *
9465  * Return: QDF_STATUS
9466  */
9467 static void dp_pdev_getstats(struct cdp_pdev *pdev_handle,
9468 			     struct cdp_dev_stats *stats)
9469 {
9470 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9471 
9472 	dp_aggregate_pdev_stats(pdev);
9473 
9474 	stats->tx_packets = pdev->stats.tx.comp_pkt.num;
9475 	stats->tx_bytes = pdev->stats.tx.comp_pkt.bytes;
9476 
9477 	stats->tx_errors = pdev->stats.tx.tx_failed;
9478 	stats->tx_dropped = pdev->stats.tx_i.dropped.dropped_pkt.num +
9479 			    pdev->stats.tx_i.sg.dropped_host.num +
9480 			    pdev->stats.tx_i.mcast_en.dropped_map_error +
9481 			    pdev->stats.tx_i.mcast_en.dropped_self_mac +
9482 			    pdev->stats.tx_i.mcast_en.dropped_send_fail +
9483 			    pdev->stats.tx.nawds_mcast_drop +
9484 			    pdev->stats.tso_stats.dropped_host.num;
9485 
9486 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(pdev->soc->wlan_cfg_ctx)) {
9487 		stats->rx_packets = pdev->stats.rx.to_stack.num;
9488 		stats->rx_bytes = pdev->stats.rx.to_stack.bytes;
9489 	} else {
9490 		stats->rx_packets = pdev->stats.rx_i.reo_rcvd_pkt.num +
9491 				    pdev->stats.rx_i.null_q_desc_pkt.num +
9492 				    pdev->stats.rx_i.routed_eapol_pkt.num;
9493 		stats->rx_bytes = pdev->stats.rx_i.reo_rcvd_pkt.bytes +
9494 				  pdev->stats.rx_i.null_q_desc_pkt.bytes +
9495 				  pdev->stats.rx_i.routed_eapol_pkt.bytes;
9496 	}
9497 
9498 	stats->rx_errors = pdev->stats.err.ip_csum_err +
9499 		pdev->stats.err.tcp_udp_csum_err +
9500 		pdev->stats.rx.err.mic_err +
9501 		pdev->stats.rx.err.decrypt_err +
9502 		pdev->stats.rx.err.fcserr +
9503 		pdev->stats.rx.err.pn_err +
9504 		pdev->stats.rx.err.oor_err +
9505 		pdev->stats.rx.err.jump_2k_err +
9506 		pdev->stats.rx.err.rxdma_wifi_parse_err;
9507 	stats->rx_dropped = pdev->stats.dropped.msdu_not_done +
9508 		pdev->stats.dropped.mec +
9509 		pdev->stats.dropped.mesh_filter +
9510 		pdev->stats.dropped.wifi_parse +
9511 		pdev->stats.dropped.mon_rx_drop +
9512 		pdev->stats.dropped.mon_radiotap_update_err +
9513 		pdev->stats.rx.mec_drop.num +
9514 		pdev->stats.rx.multipass_rx_pkt_drop +
9515 		pdev->stats.rx.peer_unauth_rx_pkt_drop +
9516 		pdev->stats.rx.policy_check_drop +
9517 		pdev->stats.rx.nawds_mcast_drop +
9518 		pdev->stats.rx.mcast_3addr_drop;
9519 }
9520 
9521 /**
9522  * dp_get_device_stats() - get interface level packet stats
9523  * @soc: soc handle
9524  * @id : vdev_id or pdev_id based on type
9525  * @stats: cdp network device stats structure
9526  * @type: device type pdev/vdev
9527  *
9528  * Return: QDF_STATUS
9529  */
9530 static QDF_STATUS dp_get_device_stats(struct cdp_soc_t *soc_hdl, uint8_t id,
9531 				      struct cdp_dev_stats *stats,
9532 				      uint8_t type)
9533 {
9534 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9535 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
9536 	struct dp_vdev *vdev;
9537 
9538 	switch (type) {
9539 	case UPDATE_VDEV_STATS:
9540 		vdev = dp_vdev_get_ref_by_id(soc, id, DP_MOD_ID_CDP);
9541 
9542 		if (vdev) {
9543 			status = dp_vdev_getstats((struct cdp_vdev *)vdev,
9544 						  stats);
9545 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9546 		}
9547 		return status;
9548 	case UPDATE_PDEV_STATS:
9549 		{
9550 			struct dp_pdev *pdev =
9551 				dp_get_pdev_from_soc_pdev_id_wifi3(
9552 						(struct dp_soc *)soc,
9553 						 id);
9554 			if (pdev) {
9555 				dp_pdev_getstats((struct cdp_pdev *)pdev,
9556 						 stats);
9557 				return QDF_STATUS_SUCCESS;
9558 			}
9559 		}
9560 		break;
9561 	default:
9562 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9563 			"apstats cannot be updated for this input "
9564 			"type %d", type);
9565 		break;
9566 	}
9567 
9568 	return QDF_STATUS_E_FAILURE;
9569 }
9570 
9571 const
9572 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
9573 {
9574 	switch (ring_type) {
9575 	case REO_DST:
9576 		return "Reo_dst";
9577 	case REO_EXCEPTION:
9578 		return "Reo_exception";
9579 	case REO_CMD:
9580 		return "Reo_cmd";
9581 	case REO_REINJECT:
9582 		return "Reo_reinject";
9583 	case REO_STATUS:
9584 		return "Reo_status";
9585 	case WBM2SW_RELEASE:
9586 		return "wbm2sw_release";
9587 	case TCL_DATA:
9588 		return "tcl_data";
9589 	case TCL_CMD_CREDIT:
9590 		return "tcl_cmd_credit";
9591 	case TCL_STATUS:
9592 		return "tcl_status";
9593 	case SW2WBM_RELEASE:
9594 		return "sw2wbm_release";
9595 	case RXDMA_BUF:
9596 		return "Rxdma_buf";
9597 	case RXDMA_DST:
9598 		return "Rxdma_dst";
9599 	case RXDMA_MONITOR_BUF:
9600 		return "Rxdma_monitor_buf";
9601 	case RXDMA_MONITOR_DESC:
9602 		return "Rxdma_monitor_desc";
9603 	case RXDMA_MONITOR_STATUS:
9604 		return "Rxdma_monitor_status";
9605 	case RXDMA_MONITOR_DST:
9606 		return "Rxdma_monitor_destination";
9607 	case WBM_IDLE_LINK:
9608 		return "WBM_hw_idle_link";
9609 	case PPE2TCL:
9610 		return "PPE2TCL";
9611 	case REO2PPE:
9612 		return "REO2PPE";
9613 	default:
9614 		dp_err("Invalid ring type");
9615 		break;
9616 	}
9617 	return "Invalid";
9618 }
9619 
9620 /*
9621  * dp_print_napi_stats(): NAPI stats
9622  * @soc - soc handle
9623  */
9624 void dp_print_napi_stats(struct dp_soc *soc)
9625 {
9626 	hif_print_napi_stats(soc->hif_handle);
9627 }
9628 
9629 /**
9630  * dp_txrx_host_peer_stats_clr): Reinitialize the txrx peer stats
9631  * @soc: Datapath soc
9632  * @peer: Datatpath peer
9633  * @arg: argument to iter function
9634  *
9635  * Return: QDF_STATUS
9636  */
9637 static inline void
9638 dp_txrx_host_peer_stats_clr(struct dp_soc *soc,
9639 			    struct dp_peer *peer,
9640 			    void *arg)
9641 {
9642 	struct dp_txrx_peer *txrx_peer = NULL;
9643 	struct dp_peer *tgt_peer = NULL;
9644 	struct cdp_interface_peer_stats peer_stats_intf;
9645 
9646 	qdf_mem_zero(&peer_stats_intf, sizeof(struct cdp_interface_peer_stats));
9647 
9648 	DP_STATS_CLR(peer);
9649 	/* Clear monitor peer stats */
9650 	dp_monitor_peer_reset_stats(soc, peer);
9651 
9652 	/* Clear MLD peer stats only when link peer is primary */
9653 	if (dp_peer_is_primary_link_peer(peer)) {
9654 		tgt_peer = dp_get_tgt_peer_from_peer(peer);
9655 		if (tgt_peer) {
9656 			DP_STATS_CLR(tgt_peer);
9657 			txrx_peer = tgt_peer->txrx_peer;
9658 			dp_txrx_peer_stats_clr(txrx_peer);
9659 		}
9660 	}
9661 
9662 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9663 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, peer->vdev->pdev->soc,
9664 			     &peer_stats_intf,  peer->peer_id,
9665 			     UPDATE_PEER_STATS, peer->vdev->pdev->pdev_id);
9666 #endif
9667 }
9668 
9669 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING
9670 static inline void dp_srng_clear_ring_usage_wm_stats(struct dp_soc *soc)
9671 {
9672 	int ring;
9673 
9674 	for (ring = 0; ring < soc->num_reo_dest_rings; ring++)
9675 		hal_srng_clear_ring_usage_wm_locked(soc->hal_soc,
9676 					    soc->reo_dest_ring[ring].hal_srng);
9677 }
9678 #else
9679 static inline void dp_srng_clear_ring_usage_wm_stats(struct dp_soc *soc)
9680 {
9681 }
9682 #endif
9683 
9684 /**
9685  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
9686  * @vdev: DP_VDEV handle
9687  * @dp_soc: DP_SOC handle
9688  *
9689  * Return: QDF_STATUS
9690  */
9691 static inline QDF_STATUS
9692 dp_txrx_host_stats_clr(struct dp_vdev *vdev, struct dp_soc *soc)
9693 {
9694 	if (!vdev || !vdev->pdev)
9695 		return QDF_STATUS_E_FAILURE;
9696 
9697 	/*
9698 	 * if NSS offload is enabled, then send message
9699 	 * to NSS FW to clear the stats. Once NSS FW clears the statistics
9700 	 * then clear host statistics.
9701 	 */
9702 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
9703 		if (soc->cdp_soc.ol_ops->nss_stats_clr)
9704 			soc->cdp_soc.ol_ops->nss_stats_clr(soc->ctrl_psoc,
9705 							   vdev->vdev_id);
9706 	}
9707 
9708 	dp_vdev_stats_hw_offload_target_clear(soc, vdev->pdev->pdev_id,
9709 					      (1 << vdev->vdev_id));
9710 
9711 	DP_STATS_CLR(vdev->pdev);
9712 	DP_STATS_CLR(vdev->pdev->soc);
9713 	DP_STATS_CLR(vdev);
9714 
9715 	hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
9716 
9717 	dp_vdev_iterate_peer(vdev, dp_txrx_host_peer_stats_clr, NULL,
9718 			     DP_MOD_ID_GENERIC_STATS);
9719 
9720 	dp_srng_clear_ring_usage_wm_stats(soc);
9721 
9722 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9723 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
9724 			     &vdev->stats,  vdev->vdev_id,
9725 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
9726 #endif
9727 	return QDF_STATUS_SUCCESS;
9728 }
9729 
9730 /**
9731  * dp_get_peer_calibr_stats()- Get peer calibrated stats
9732  * @peer: Datapath peer
9733  * @peer_stats: buffer for peer stats
9734  *
9735  * Return: none
9736  */
9737 static inline
9738 void dp_get_peer_calibr_stats(struct dp_peer *peer,
9739 			      struct cdp_peer_stats *peer_stats)
9740 {
9741 	struct dp_peer *tgt_peer;
9742 
9743 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
9744 	if (!tgt_peer)
9745 		return;
9746 
9747 	peer_stats->tx.last_per = tgt_peer->stats.tx.last_per;
9748 	peer_stats->tx.tx_bytes_success_last =
9749 				tgt_peer->stats.tx.tx_bytes_success_last;
9750 	peer_stats->tx.tx_data_success_last =
9751 					tgt_peer->stats.tx.tx_data_success_last;
9752 	peer_stats->tx.tx_byte_rate = tgt_peer->stats.tx.tx_byte_rate;
9753 	peer_stats->tx.tx_data_rate = tgt_peer->stats.tx.tx_data_rate;
9754 	peer_stats->tx.tx_data_ucast_last =
9755 					tgt_peer->stats.tx.tx_data_ucast_last;
9756 	peer_stats->tx.tx_data_ucast_rate =
9757 					tgt_peer->stats.tx.tx_data_ucast_rate;
9758 	peer_stats->tx.inactive_time = tgt_peer->stats.tx.inactive_time;
9759 	peer_stats->rx.rx_bytes_success_last =
9760 				tgt_peer->stats.rx.rx_bytes_success_last;
9761 	peer_stats->rx.rx_data_success_last =
9762 				tgt_peer->stats.rx.rx_data_success_last;
9763 	peer_stats->rx.rx_byte_rate = tgt_peer->stats.rx.rx_byte_rate;
9764 	peer_stats->rx.rx_data_rate = tgt_peer->stats.rx.rx_data_rate;
9765 }
9766 
9767 /**
9768  * dp_get_peer_basic_stats()- Get peer basic stats
9769  * @peer: Datapath peer
9770  * @peer_stats: buffer for peer stats
9771  *
9772  * Return: none
9773  */
9774 #ifdef QCA_ENHANCED_STATS_SUPPORT
9775 static inline
9776 void dp_get_peer_basic_stats(struct dp_peer *peer,
9777 			     struct cdp_peer_stats *peer_stats)
9778 {
9779 	struct dp_txrx_peer *txrx_peer;
9780 
9781 	txrx_peer = dp_get_txrx_peer(peer);
9782 	if (!txrx_peer)
9783 		return;
9784 
9785 	peer_stats->tx.comp_pkt.num += txrx_peer->comp_pkt.num;
9786 	peer_stats->tx.comp_pkt.bytes += txrx_peer->comp_pkt.bytes;
9787 	peer_stats->tx.tx_failed += txrx_peer->tx_failed;
9788 	peer_stats->rx.to_stack.num += txrx_peer->to_stack.num;
9789 	peer_stats->rx.to_stack.bytes += txrx_peer->to_stack.bytes;
9790 }
9791 #else
9792 static inline
9793 void dp_get_peer_basic_stats(struct dp_peer *peer,
9794 			     struct cdp_peer_stats *peer_stats)
9795 {
9796 	struct dp_txrx_peer *txrx_peer;
9797 
9798 	txrx_peer = dp_get_txrx_peer(peer);
9799 	if (!txrx_peer)
9800 		return;
9801 
9802 	peer_stats->tx.comp_pkt.num += txrx_peer->comp_pkt.num;
9803 	peer_stats->tx.comp_pkt.bytes += txrx_peer->comp_pkt.bytes;
9804 	peer_stats->tx.tx_failed += txrx_peer->tx_failed;
9805 	peer_stats->rx.to_stack.num += txrx_peer->to_stack.num;
9806 	peer_stats->rx.to_stack.bytes += txrx_peer->to_stack.bytes;
9807 }
9808 #endif
9809 
9810 /**
9811  * dp_get_peer_per_pkt_stats()- Get peer per pkt stats
9812  * @peer: Datapath peer
9813  * @peer_stats: buffer for peer stats
9814  *
9815  * Return: none
9816  */
9817 #ifdef QCA_ENHANCED_STATS_SUPPORT
9818 static inline
9819 void dp_get_peer_per_pkt_stats(struct dp_peer *peer,
9820 			       struct cdp_peer_stats *peer_stats)
9821 {
9822 	struct dp_txrx_peer *txrx_peer;
9823 	struct dp_peer_per_pkt_stats *per_pkt_stats;
9824 
9825 	txrx_peer = dp_get_txrx_peer(peer);
9826 	if (!txrx_peer)
9827 		return;
9828 
9829 	per_pkt_stats = &txrx_peer->stats.per_pkt_stats;
9830 	DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats);
9831 }
9832 #else
9833 static inline
9834 void dp_get_peer_per_pkt_stats(struct dp_peer *peer,
9835 			       struct cdp_peer_stats *peer_stats)
9836 {
9837 	struct dp_txrx_peer *txrx_peer;
9838 	struct dp_peer_per_pkt_stats *per_pkt_stats;
9839 
9840 	txrx_peer = dp_get_txrx_peer(peer);
9841 	if (!txrx_peer)
9842 		return;
9843 
9844 	per_pkt_stats = &txrx_peer->stats.per_pkt_stats;
9845 	DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats);
9846 }
9847 #endif
9848 
9849 /**
9850  * dp_get_peer_extd_stats()- Get peer extd stats
9851  * @peer: Datapath peer
9852  * @peer_stats: buffer for peer stats
9853  *
9854  * Return: none
9855  */
9856 #ifdef QCA_ENHANCED_STATS_SUPPORT
9857 #ifdef WLAN_FEATURE_11BE_MLO
9858 static inline
9859 void dp_get_peer_extd_stats(struct dp_peer *peer,
9860 			    struct cdp_peer_stats *peer_stats)
9861 {
9862 	struct dp_soc *soc = peer->vdev->pdev->soc;
9863 
9864 	if (IS_MLO_DP_MLD_PEER(peer)) {
9865 		uint8_t i;
9866 		struct dp_peer *link_peer;
9867 		struct dp_soc *link_peer_soc;
9868 		struct dp_mld_link_peers link_peers_info;
9869 
9870 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
9871 						    &link_peers_info,
9872 						    DP_MOD_ID_CDP);
9873 		for (i = 0; i < link_peers_info.num_links; i++) {
9874 			link_peer = link_peers_info.link_peers[i];
9875 			link_peer_soc = link_peer->vdev->pdev->soc;
9876 			dp_monitor_peer_get_stats(link_peer_soc, link_peer,
9877 						  peer_stats,
9878 						  UPDATE_PEER_STATS);
9879 		}
9880 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
9881 	} else {
9882 		dp_monitor_peer_get_stats(soc, peer, peer_stats,
9883 					  UPDATE_PEER_STATS);
9884 	}
9885 }
9886 #else
9887 static inline
9888 void dp_get_peer_extd_stats(struct dp_peer *peer,
9889 			    struct cdp_peer_stats *peer_stats)
9890 {
9891 	struct dp_soc *soc = peer->vdev->pdev->soc;
9892 
9893 	dp_monitor_peer_get_stats(soc, peer, peer_stats, UPDATE_PEER_STATS);
9894 }
9895 #endif
9896 #else
9897 static inline
9898 void dp_get_peer_extd_stats(struct dp_peer *peer,
9899 			    struct cdp_peer_stats *peer_stats)
9900 {
9901 	struct dp_txrx_peer *txrx_peer;
9902 	struct dp_peer_extd_stats *extd_stats;
9903 
9904 	txrx_peer = dp_get_txrx_peer(peer);
9905 	if (qdf_unlikely(!txrx_peer)) {
9906 		dp_err_rl("txrx_peer NULL");
9907 		return;
9908 	}
9909 
9910 	extd_stats = &txrx_peer->stats.extd_stats;
9911 	DP_UPDATE_EXTD_STATS(peer_stats, extd_stats);
9912 }
9913 #endif
9914 
9915 /**
9916  * dp_get_peer_tx_per()- Get peer packet error ratio
9917  * @peer_stats: buffer for peer stats
9918  *
9919  * Return: none
9920  */
9921 static inline
9922 void dp_get_peer_tx_per(struct cdp_peer_stats *peer_stats)
9923 {
9924 	if (peer_stats->tx.tx_success.num + peer_stats->tx.retries > 0)
9925 		peer_stats->tx.per = (peer_stats->tx.retries * 100) /
9926 				  (peer_stats->tx.tx_success.num +
9927 				   peer_stats->tx.retries);
9928 	else
9929 		peer_stats->tx.per = 0;
9930 }
9931 
9932 /**
9933  * dp_get_peer_stats()- Get peer stats
9934  * @peer: Datapath peer
9935  * @peer_stats: buffer for peer stats
9936  *
9937  * Return: none
9938  */
9939 static inline
9940 void dp_get_peer_stats(struct dp_peer *peer, struct cdp_peer_stats *peer_stats)
9941 {
9942 	dp_get_peer_calibr_stats(peer, peer_stats);
9943 
9944 	dp_get_peer_basic_stats(peer, peer_stats);
9945 
9946 	dp_get_peer_per_pkt_stats(peer, peer_stats);
9947 
9948 	dp_get_peer_extd_stats(peer, peer_stats);
9949 
9950 	dp_get_peer_tx_per(peer_stats);
9951 }
9952 
9953 /*
9954  * dp_get_host_peer_stats()- function to print peer stats
9955  * @soc: dp_soc handle
9956  * @mac_addr: mac address of the peer
9957  *
9958  * Return: QDF_STATUS
9959  */
9960 static QDF_STATUS
9961 dp_get_host_peer_stats(struct cdp_soc_t *soc, uint8_t *mac_addr)
9962 {
9963 	struct dp_peer *peer = NULL;
9964 	struct cdp_peer_stats *peer_stats = NULL;
9965 	struct cdp_peer_info peer_info = { 0 };
9966 
9967 	if (!mac_addr) {
9968 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9969 			  "%s: NULL peer mac addr\n", __func__);
9970 		return QDF_STATUS_E_FAILURE;
9971 	}
9972 
9973 	DP_PEER_INFO_PARAMS_INIT(&peer_info, DP_VDEV_ALL, mac_addr, false,
9974 				 CDP_WILD_PEER_TYPE);
9975 
9976 	peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info,
9977 					 DP_MOD_ID_CDP);
9978 	if (!peer) {
9979 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9980 			  "%s: Invalid peer\n", __func__);
9981 		return QDF_STATUS_E_FAILURE;
9982 	}
9983 
9984 	peer_stats = qdf_mem_malloc(sizeof(struct cdp_peer_stats));
9985 	if (!peer_stats) {
9986 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9987 			  "%s: Memory allocation failed for cdp_peer_stats\n",
9988 			  __func__);
9989 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9990 		return QDF_STATUS_E_NOMEM;
9991 	}
9992 
9993 	qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats));
9994 
9995 	dp_get_peer_stats(peer, peer_stats);
9996 	dp_print_peer_stats(peer, peer_stats);
9997 
9998 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
9999 
10000 	qdf_mem_free(peer_stats);
10001 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10002 
10003 	return QDF_STATUS_SUCCESS;
10004 }
10005 
10006 /* *
10007  * dp_dump_wbm_idle_hptp() -dump wbm idle ring, hw hp tp info.
10008  * @soc: dp soc.
10009  * @pdev: dp pdev.
10010  *
10011  * Return: None.
10012  */
10013 static void
10014 dp_dump_wbm_idle_hptp(struct dp_soc *soc, struct dp_pdev *pdev)
10015 {
10016 	uint32_t hw_head;
10017 	uint32_t hw_tail;
10018 	struct dp_srng *srng;
10019 
10020 	if (!soc) {
10021 		dp_err("soc is NULL");
10022 		return;
10023 	}
10024 
10025 	if (!pdev) {
10026 		dp_err("pdev is NULL");
10027 		return;
10028 	}
10029 
10030 	srng = &pdev->soc->wbm_idle_link_ring;
10031 	if (!srng) {
10032 		dp_err("wbm_idle_link_ring srng is NULL");
10033 		return;
10034 	}
10035 
10036 	hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &hw_head,
10037 			&hw_tail, WBM_IDLE_LINK);
10038 
10039 	dp_debug("WBM_IDLE_LINK: HW hp: %d, HW tp: %d",
10040 			hw_head, hw_tail);
10041 }
10042 
10043 
10044 /**
10045  * dp_txrx_stats_help() - Helper function for Txrx_Stats
10046  *
10047  * Return: None
10048  */
10049 static void dp_txrx_stats_help(void)
10050 {
10051 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
10052 	dp_info("stats_option:");
10053 	dp_info("  1 -- HTT Tx Statistics");
10054 	dp_info("  2 -- HTT Rx Statistics");
10055 	dp_info("  3 -- HTT Tx HW Queue Statistics");
10056 	dp_info("  4 -- HTT Tx HW Sched Statistics");
10057 	dp_info("  5 -- HTT Error Statistics");
10058 	dp_info("  6 -- HTT TQM Statistics");
10059 	dp_info("  7 -- HTT TQM CMDQ Statistics");
10060 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
10061 	dp_info("  9 -- HTT Tx Rate Statistics");
10062 	dp_info(" 10 -- HTT Rx Rate Statistics");
10063 	dp_info(" 11 -- HTT Peer Statistics");
10064 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
10065 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
10066 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
10067 	dp_info(" 15 -- HTT SRNG Statistics");
10068 	dp_info(" 16 -- HTT SFM Info Statistics");
10069 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
10070 	dp_info(" 18 -- HTT Peer List Details");
10071 	dp_info(" 20 -- Clear Host Statistics");
10072 	dp_info(" 21 -- Host Rx Rate Statistics");
10073 	dp_info(" 22 -- Host Tx Rate Statistics");
10074 	dp_info(" 23 -- Host Tx Statistics");
10075 	dp_info(" 24 -- Host Rx Statistics");
10076 	dp_info(" 25 -- Host AST Statistics");
10077 	dp_info(" 26 -- Host SRNG PTR Statistics");
10078 	dp_info(" 27 -- Host Mon Statistics");
10079 	dp_info(" 28 -- Host REO Queue Statistics");
10080 	dp_info(" 29 -- Host Soc cfg param Statistics");
10081 	dp_info(" 30 -- Host pdev cfg param Statistics");
10082 	dp_info(" 31 -- Host NAPI stats");
10083 	dp_info(" 32 -- Host Interrupt stats");
10084 	dp_info(" 33 -- Host FISA stats");
10085 	dp_info(" 34 -- Host Register Work stats");
10086 	dp_info(" 35 -- HW REO Queue stats");
10087 	dp_info(" 36 -- Host WBM IDLE link desc ring HP/TP");
10088 	dp_info(" 37 -- Host SRNG usage watermark stats");
10089 }
10090 
10091 #ifdef DP_UMAC_HW_RESET_SUPPORT
10092 /**
10093  * dp_umac_rst_skel_enable_update(): Update skel dbg flag for umac reset
10094  * @soc: dp soc handle
10095  * @en: ebable/disable
10096  *
10097  * Return: void
10098  */
10099 static void dp_umac_rst_skel_enable_update(struct dp_soc *soc, bool en)
10100 {
10101 	soc->umac_reset_ctx.skel_enable = en;
10102 	dp_cdp_debug("UMAC HW reset debug skeleton code enabled :%u",
10103 		     soc->umac_reset_ctx.skel_enable);
10104 }
10105 
10106 /**
10107  * dp_umac_rst_skel_enable_get(): Get skel dbg flag for umac reset
10108  * @soc: dp soc handle
10109  *
10110  * Return: enable/disable flag
10111  */
10112 static bool dp_umac_rst_skel_enable_get(struct dp_soc *soc)
10113 {
10114 	return soc->umac_reset_ctx.skel_enable;
10115 }
10116 #else
10117 static void dp_umac_rst_skel_enable_update(struct dp_soc *soc, bool en)
10118 {
10119 }
10120 
10121 static bool dp_umac_rst_skel_enable_get(struct dp_soc *soc)
10122 {
10123 	return false;
10124 }
10125 #endif
10126 
10127 /**
10128  * dp_print_host_stats()- Function to print the stats aggregated at host
10129  * @vdev_handle: DP_VDEV handle
10130  * @req: host stats type
10131  * @soc: dp soc handler
10132  *
10133  * Return: 0 on success, print error message in case of failure
10134  */
10135 static int
10136 dp_print_host_stats(struct dp_vdev *vdev,
10137 		    struct cdp_txrx_stats_req *req,
10138 		    struct dp_soc *soc)
10139 {
10140 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
10141 	enum cdp_host_txrx_stats type =
10142 			dp_stats_mapping_table[req->stats][STATS_HOST];
10143 
10144 	dp_aggregate_pdev_stats(pdev);
10145 
10146 	switch (type) {
10147 	case TXRX_CLEAR_STATS:
10148 		dp_txrx_host_stats_clr(vdev, soc);
10149 		break;
10150 	case TXRX_RX_RATE_STATS:
10151 		dp_print_rx_rates(vdev);
10152 		break;
10153 	case TXRX_TX_RATE_STATS:
10154 		dp_print_tx_rates(vdev);
10155 		break;
10156 	case TXRX_TX_HOST_STATS:
10157 		dp_print_pdev_tx_stats(pdev);
10158 		dp_print_soc_tx_stats(pdev->soc);
10159 		break;
10160 	case TXRX_RX_HOST_STATS:
10161 		dp_print_pdev_rx_stats(pdev);
10162 		dp_print_soc_rx_stats(pdev->soc);
10163 		break;
10164 	case TXRX_AST_STATS:
10165 		dp_print_ast_stats(pdev->soc);
10166 		dp_print_mec_stats(pdev->soc);
10167 		dp_print_peer_table(vdev);
10168 		break;
10169 	case TXRX_SRNG_PTR_STATS:
10170 		dp_print_ring_stats(pdev);
10171 		break;
10172 	case TXRX_RX_MON_STATS:
10173 		dp_monitor_print_pdev_rx_mon_stats(pdev);
10174 		break;
10175 	case TXRX_REO_QUEUE_STATS:
10176 		dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc,
10177 				       req->peer_addr);
10178 		break;
10179 	case TXRX_SOC_CFG_PARAMS:
10180 		dp_print_soc_cfg_params(pdev->soc);
10181 		break;
10182 	case TXRX_PDEV_CFG_PARAMS:
10183 		dp_print_pdev_cfg_params(pdev);
10184 		break;
10185 	case TXRX_NAPI_STATS:
10186 		dp_print_napi_stats(pdev->soc);
10187 		break;
10188 	case TXRX_SOC_INTERRUPT_STATS:
10189 		dp_print_soc_interrupt_stats(pdev->soc);
10190 		break;
10191 	case TXRX_SOC_FSE_STATS:
10192 		dp_rx_dump_fisa_table(pdev->soc);
10193 		break;
10194 	case TXRX_HAL_REG_WRITE_STATS:
10195 		hal_dump_reg_write_stats(pdev->soc->hal_soc);
10196 		hal_dump_reg_write_srng_stats(pdev->soc->hal_soc);
10197 		break;
10198 	case TXRX_SOC_REO_HW_DESC_DUMP:
10199 		dp_get_rx_reo_queue_info((struct cdp_soc_t *)pdev->soc,
10200 					 vdev->vdev_id);
10201 		break;
10202 	case TXRX_SOC_WBM_IDLE_HPTP_DUMP:
10203 		dp_dump_wbm_idle_hptp(pdev->soc, pdev);
10204 		break;
10205 	case TXRX_SRNG_USAGE_WM_STATS:
10206 		/* Dump usage watermark stats for all SRNGs */
10207 		dp_dump_srng_high_wm_stats(soc, 0xFF);
10208 		break;
10209 	default:
10210 		dp_info("Wrong Input For TxRx Host Stats");
10211 		dp_txrx_stats_help();
10212 		break;
10213 	}
10214 	return 0;
10215 }
10216 
10217 /*
10218  * dp_pdev_tid_stats_ingress_inc
10219  * @pdev: pdev handle
10220  * @val: increase in value
10221  *
10222  * Return: void
10223  */
10224 static void
10225 dp_pdev_tid_stats_ingress_inc(struct dp_pdev *pdev, uint32_t val)
10226 {
10227 	pdev->stats.tid_stats.ingress_stack += val;
10228 }
10229 
10230 /*
10231  * dp_pdev_tid_stats_osif_drop
10232  * @pdev: pdev handle
10233  * @val: increase in value
10234  *
10235  * Return: void
10236  */
10237 static void
10238 dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val)
10239 {
10240 	pdev->stats.tid_stats.osif_drop += val;
10241 }
10242 
10243 /*
10244  * dp_get_fw_peer_stats()- function to print peer stats
10245  * @soc: soc handle
10246  * @pdev_id : id of the pdev handle
10247  * @mac_addr: mac address of the peer
10248  * @cap: Type of htt stats requested
10249  * @is_wait: if set, wait on completion from firmware response
10250  *
10251  * Currently Supporting only MAC ID based requests Only
10252  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
10253  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
10254  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
10255  *
10256  * Return: QDF_STATUS
10257  */
10258 static QDF_STATUS
10259 dp_get_fw_peer_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
10260 		     uint8_t *mac_addr,
10261 		     uint32_t cap, uint32_t is_wait)
10262 {
10263 	int i;
10264 	uint32_t config_param0 = 0;
10265 	uint32_t config_param1 = 0;
10266 	uint32_t config_param2 = 0;
10267 	uint32_t config_param3 = 0;
10268 	struct dp_pdev *pdev =
10269 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10270 						   pdev_id);
10271 
10272 	if (!pdev)
10273 		return QDF_STATUS_E_FAILURE;
10274 
10275 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
10276 	config_param0 |= (1 << (cap + 1));
10277 
10278 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
10279 		config_param1 |= (1 << i);
10280 	}
10281 
10282 	config_param2 |= (mac_addr[0] & 0x000000ff);
10283 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
10284 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
10285 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
10286 
10287 	config_param3 |= (mac_addr[4] & 0x000000ff);
10288 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
10289 
10290 	if (is_wait) {
10291 		qdf_event_reset(&pdev->fw_peer_stats_event);
10292 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
10293 					  config_param0, config_param1,
10294 					  config_param2, config_param3,
10295 					  0, DBG_STATS_COOKIE_DP_STATS, 0);
10296 		qdf_wait_single_event(&pdev->fw_peer_stats_event,
10297 				      DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
10298 	} else {
10299 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
10300 					  config_param0, config_param1,
10301 					  config_param2, config_param3,
10302 					  0, DBG_STATS_COOKIE_DEFAULT, 0);
10303 	}
10304 
10305 	return QDF_STATUS_SUCCESS;
10306 
10307 }
10308 
10309 /* This struct definition will be removed from here
10310  * once it get added in FW headers*/
10311 struct httstats_cmd_req {
10312     uint32_t    config_param0;
10313     uint32_t    config_param1;
10314     uint32_t    config_param2;
10315     uint32_t    config_param3;
10316     int cookie;
10317     u_int8_t    stats_id;
10318 };
10319 
10320 /*
10321  * dp_get_htt_stats: function to process the httstas request
10322  * @soc: DP soc handle
10323  * @pdev_id: id of pdev handle
10324  * @data: pointer to request data
10325  * @data_len: length for request data
10326  *
10327  * return: QDF_STATUS
10328  */
10329 static QDF_STATUS
10330 dp_get_htt_stats(struct cdp_soc_t *soc, uint8_t pdev_id, void *data,
10331 		 uint32_t data_len)
10332 {
10333 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
10334 	struct dp_pdev *pdev =
10335 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10336 						   pdev_id);
10337 
10338 	if (!pdev)
10339 		return QDF_STATUS_E_FAILURE;
10340 
10341 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
10342 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
10343 				req->config_param0, req->config_param1,
10344 				req->config_param2, req->config_param3,
10345 				req->cookie, DBG_STATS_COOKIE_DEFAULT, 0);
10346 
10347 	return QDF_STATUS_SUCCESS;
10348 }
10349 
10350 /**
10351  * dp_set_pdev_tidmap_prty_wifi3(): update tidmap priority in pdev
10352  * @pdev: DP_PDEV handle
10353  * @prio: tidmap priority value passed by the user
10354  *
10355  * Return: QDF_STATUS_SUCCESS on success
10356  */
10357 static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct dp_pdev *pdev,
10358 						uint8_t prio)
10359 {
10360 	struct dp_soc *soc = pdev->soc;
10361 
10362 	soc->tidmap_prty = prio;
10363 
10364 	hal_tx_set_tidmap_prty(soc->hal_soc, prio);
10365 	return QDF_STATUS_SUCCESS;
10366 }
10367 
10368 /*
10369  * dp_get_peer_param: function to get parameters in peer
10370  * @cdp_soc: DP soc handle
10371  * @vdev_id: id of vdev handle
10372  * @peer_mac: peer mac address
10373  * @param: parameter type to be set
10374  * @val : address of buffer
10375  *
10376  * Return: val
10377  */
10378 static QDF_STATUS dp_get_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
10379 				    uint8_t *peer_mac,
10380 				    enum cdp_peer_param_type param,
10381 				    cdp_config_param_type *val)
10382 {
10383 	return QDF_STATUS_SUCCESS;
10384 }
10385 
10386 /*
10387  * dp_set_peer_param: function to set parameters in peer
10388  * @cdp_soc: DP soc handle
10389  * @vdev_id: id of vdev handle
10390  * @peer_mac: peer mac address
10391  * @param: parameter type to be set
10392  * @val: value of parameter to be set
10393  *
10394  * Return: 0 for success. nonzero for failure.
10395  */
10396 static QDF_STATUS dp_set_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
10397 				    uint8_t *peer_mac,
10398 				    enum cdp_peer_param_type param,
10399 				    cdp_config_param_type val)
10400 {
10401 	struct dp_peer *peer =
10402 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
10403 						       peer_mac, 0, vdev_id,
10404 						       DP_MOD_ID_CDP);
10405 	struct dp_txrx_peer *txrx_peer;
10406 
10407 	if (!peer)
10408 		return QDF_STATUS_E_FAILURE;
10409 
10410 	txrx_peer = peer->txrx_peer;
10411 	if (!txrx_peer) {
10412 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10413 		return QDF_STATUS_E_FAILURE;
10414 	}
10415 
10416 	switch (param) {
10417 	case CDP_CONFIG_NAWDS:
10418 		txrx_peer->nawds_enabled = val.cdp_peer_param_nawds;
10419 		break;
10420 	case CDP_CONFIG_ISOLATION:
10421 		dp_set_peer_isolation(txrx_peer, val.cdp_peer_param_isolation);
10422 		break;
10423 	case CDP_CONFIG_IN_TWT:
10424 		txrx_peer->in_twt = !!(val.cdp_peer_param_in_twt);
10425 		break;
10426 	default:
10427 		break;
10428 	}
10429 
10430 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10431 
10432 	return QDF_STATUS_SUCCESS;
10433 }
10434 
10435 /*
10436  * dp_get_pdev_param: function to get parameters from pdev
10437  * @cdp_soc: DP soc handle
10438  * @pdev_id: id of pdev handle
10439  * @param: parameter type to be get
10440  * @value : buffer for value
10441  *
10442  * Return: status
10443  */
10444 static QDF_STATUS dp_get_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
10445 				    enum cdp_pdev_param_type param,
10446 				    cdp_config_param_type *val)
10447 {
10448 	struct cdp_pdev *pdev = (struct cdp_pdev *)
10449 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10450 						   pdev_id);
10451 	if (!pdev)
10452 		return QDF_STATUS_E_FAILURE;
10453 
10454 	switch (param) {
10455 	case CDP_CONFIG_VOW:
10456 		val->cdp_pdev_param_cfg_vow =
10457 				((struct dp_pdev *)pdev)->delay_stats_flag;
10458 		break;
10459 	case CDP_TX_PENDING:
10460 		val->cdp_pdev_param_tx_pending = dp_get_tx_pending(pdev);
10461 		break;
10462 	case CDP_FILTER_MCAST_DATA:
10463 		val->cdp_pdev_param_fltr_mcast =
10464 				dp_monitor_pdev_get_filter_mcast_data(pdev);
10465 		break;
10466 	case CDP_FILTER_NO_DATA:
10467 		val->cdp_pdev_param_fltr_none =
10468 				dp_monitor_pdev_get_filter_non_data(pdev);
10469 		break;
10470 	case CDP_FILTER_UCAST_DATA:
10471 		val->cdp_pdev_param_fltr_ucast =
10472 				dp_monitor_pdev_get_filter_ucast_data(pdev);
10473 		break;
10474 	case CDP_MONITOR_CHANNEL:
10475 		val->cdp_pdev_param_monitor_chan =
10476 			dp_monitor_get_chan_num((struct dp_pdev *)pdev);
10477 		break;
10478 	case CDP_MONITOR_FREQUENCY:
10479 		val->cdp_pdev_param_mon_freq =
10480 			dp_monitor_get_chan_freq((struct dp_pdev *)pdev);
10481 		break;
10482 	default:
10483 		return QDF_STATUS_E_FAILURE;
10484 	}
10485 
10486 	return QDF_STATUS_SUCCESS;
10487 }
10488 
10489 /*
10490  * dp_set_pdev_param: function to set parameters in pdev
10491  * @cdp_soc: DP soc handle
10492  * @pdev_id: id of pdev handle
10493  * @param: parameter type to be set
10494  * @val: value of parameter to be set
10495  *
10496  * Return: 0 for success. nonzero for failure.
10497  */
10498 static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
10499 				    enum cdp_pdev_param_type param,
10500 				    cdp_config_param_type val)
10501 {
10502 	int target_type;
10503 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
10504 	struct dp_pdev *pdev =
10505 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10506 						   pdev_id);
10507 	enum reg_wifi_band chan_band;
10508 
10509 	if (!pdev)
10510 		return QDF_STATUS_E_FAILURE;
10511 
10512 	target_type = hal_get_target_type(soc->hal_soc);
10513 	switch (target_type) {
10514 	case TARGET_TYPE_QCA6750:
10515 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC0_LMAC_ID;
10516 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
10517 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
10518 		break;
10519 	case TARGET_TYPE_KIWI:
10520 	case TARGET_TYPE_MANGO:
10521 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC0_LMAC_ID;
10522 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
10523 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
10524 		break;
10525 	default:
10526 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC1_LMAC_ID;
10527 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
10528 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
10529 		break;
10530 	}
10531 
10532 	switch (param) {
10533 	case CDP_CONFIG_TX_CAPTURE:
10534 		return dp_monitor_config_debug_sniffer(pdev,
10535 						val.cdp_pdev_param_tx_capture);
10536 	case CDP_CONFIG_DEBUG_SNIFFER:
10537 		return dp_monitor_config_debug_sniffer(pdev,
10538 						val.cdp_pdev_param_dbg_snf);
10539 	case CDP_CONFIG_BPR_ENABLE:
10540 		return dp_monitor_set_bpr_enable(pdev,
10541 						 val.cdp_pdev_param_bpr_enable);
10542 	case CDP_CONFIG_PRIMARY_RADIO:
10543 		pdev->is_primary = val.cdp_pdev_param_primary_radio;
10544 		break;
10545 	case CDP_CONFIG_CAPTURE_LATENCY:
10546 		pdev->latency_capture_enable = val.cdp_pdev_param_cptr_latcy;
10547 		break;
10548 	case CDP_INGRESS_STATS:
10549 		dp_pdev_tid_stats_ingress_inc(pdev,
10550 					      val.cdp_pdev_param_ingrs_stats);
10551 		break;
10552 	case CDP_OSIF_DROP:
10553 		dp_pdev_tid_stats_osif_drop(pdev,
10554 					    val.cdp_pdev_param_osif_drop);
10555 		break;
10556 	case CDP_CONFIG_ENH_RX_CAPTURE:
10557 		return dp_monitor_config_enh_rx_capture(pdev,
10558 						val.cdp_pdev_param_en_rx_cap);
10559 	case CDP_CONFIG_ENH_TX_CAPTURE:
10560 		return dp_monitor_config_enh_tx_capture(pdev,
10561 						val.cdp_pdev_param_en_tx_cap);
10562 	case CDP_CONFIG_HMMC_TID_OVERRIDE:
10563 		pdev->hmmc_tid_override_en = val.cdp_pdev_param_hmmc_tid_ovrd;
10564 		break;
10565 	case CDP_CONFIG_HMMC_TID_VALUE:
10566 		pdev->hmmc_tid = val.cdp_pdev_param_hmmc_tid;
10567 		break;
10568 	case CDP_CHAN_NOISE_FLOOR:
10569 		pdev->chan_noise_floor = val.cdp_pdev_param_chn_noise_flr;
10570 		break;
10571 	case CDP_TIDMAP_PRTY:
10572 		dp_set_pdev_tidmap_prty_wifi3(pdev,
10573 					      val.cdp_pdev_param_tidmap_prty);
10574 		break;
10575 	case CDP_FILTER_NEIGH_PEERS:
10576 		dp_monitor_set_filter_neigh_peers(pdev,
10577 					val.cdp_pdev_param_fltr_neigh_peers);
10578 		break;
10579 	case CDP_MONITOR_CHANNEL:
10580 		dp_monitor_set_chan_num(pdev, val.cdp_pdev_param_monitor_chan);
10581 		break;
10582 	case CDP_MONITOR_FREQUENCY:
10583 		chan_band = wlan_reg_freq_to_band(val.cdp_pdev_param_mon_freq);
10584 		dp_monitor_set_chan_freq(pdev, val.cdp_pdev_param_mon_freq);
10585 		dp_monitor_set_chan_band(pdev, chan_band);
10586 		break;
10587 	case CDP_CONFIG_BSS_COLOR:
10588 		dp_monitor_set_bsscolor(pdev, val.cdp_pdev_param_bss_color);
10589 		break;
10590 	case CDP_SET_ATF_STATS_ENABLE:
10591 		dp_monitor_set_atf_stats_enable(pdev,
10592 					val.cdp_pdev_param_atf_stats_enable);
10593 		break;
10594 	case CDP_CONFIG_SPECIAL_VAP:
10595 		dp_monitor_pdev_config_scan_spcl_vap(pdev,
10596 					val.cdp_pdev_param_config_special_vap);
10597 		dp_monitor_vdev_set_monitor_mode_buf_rings(pdev);
10598 		break;
10599 	case CDP_RESET_SCAN_SPCL_VAP_STATS_ENABLE:
10600 		dp_monitor_pdev_reset_scan_spcl_vap_stats_enable(pdev,
10601 				val.cdp_pdev_param_reset_scan_spcl_vap_stats_enable);
10602 		break;
10603 	case CDP_CONFIG_ENHANCED_STATS_ENABLE:
10604 		pdev->enhanced_stats_en = val.cdp_pdev_param_enhanced_stats_enable;
10605 		break;
10606 	case CDP_ISOLATION:
10607 		pdev->isolation = val.cdp_pdev_param_isolation;
10608 		break;
10609 	case CDP_CONFIG_UNDECODED_METADATA_CAPTURE_ENABLE:
10610 		return dp_monitor_config_undecoded_metadata_capture(pdev,
10611 				val.cdp_pdev_param_undecoded_metadata_enable);
10612 		break;
10613 	default:
10614 		return QDF_STATUS_E_INVAL;
10615 	}
10616 	return QDF_STATUS_SUCCESS;
10617 }
10618 
10619 #ifdef QCA_UNDECODED_METADATA_SUPPORT
10620 static
10621 QDF_STATUS dp_set_pdev_phyrx_error_mask(struct cdp_soc_t *cdp_soc,
10622 					uint8_t pdev_id, uint32_t mask,
10623 					uint32_t mask_cont)
10624 {
10625 	struct dp_pdev *pdev =
10626 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10627 						   pdev_id);
10628 
10629 	if (!pdev)
10630 		return QDF_STATUS_E_FAILURE;
10631 
10632 	return dp_monitor_config_undecoded_metadata_phyrx_error_mask(pdev,
10633 				mask, mask_cont);
10634 }
10635 
10636 static
10637 QDF_STATUS dp_get_pdev_phyrx_error_mask(struct cdp_soc_t *cdp_soc,
10638 					uint8_t pdev_id, uint32_t *mask,
10639 					uint32_t *mask_cont)
10640 {
10641 	struct dp_pdev *pdev =
10642 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10643 						   pdev_id);
10644 
10645 	if (!pdev)
10646 		return QDF_STATUS_E_FAILURE;
10647 
10648 	return dp_monitor_get_undecoded_metadata_phyrx_error_mask(pdev,
10649 				mask, mask_cont);
10650 }
10651 #endif
10652 
10653 #ifdef QCA_PEER_EXT_STATS
10654 static void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
10655 					  qdf_nbuf_t nbuf)
10656 {
10657 	struct dp_peer *peer = NULL;
10658 	uint16_t peer_id, ring_id;
10659 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
10660 	struct dp_peer_delay_stats *delay_stats = NULL;
10661 
10662 	peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
10663 	if (peer_id > soc->max_peer_id)
10664 		return;
10665 
10666 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
10667 	if (qdf_unlikely(!peer))
10668 		return;
10669 
10670 	if (qdf_unlikely(!peer->txrx_peer)) {
10671 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10672 		return;
10673 	}
10674 
10675 	if (qdf_likely(peer->txrx_peer->delay_stats)) {
10676 		delay_stats = peer->txrx_peer->delay_stats;
10677 		ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
10678 		dp_rx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
10679 					nbuf);
10680 	}
10681 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10682 }
10683 #else
10684 static inline void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
10685 						 qdf_nbuf_t nbuf)
10686 {
10687 }
10688 #endif
10689 
10690 /*
10691  * dp_calculate_delay_stats: function to get rx delay stats
10692  * @cdp_soc: DP soc handle
10693  * @vdev_id: id of DP vdev handle
10694  * @nbuf: skb
10695  *
10696  * Return: QDF_STATUS
10697  */
10698 static QDF_STATUS
10699 dp_calculate_delay_stats(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
10700 			 qdf_nbuf_t nbuf)
10701 {
10702 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
10703 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10704 						     DP_MOD_ID_CDP);
10705 
10706 	if (!vdev)
10707 		return QDF_STATUS_SUCCESS;
10708 
10709 	if (vdev->pdev->delay_stats_flag)
10710 		dp_rx_compute_delay(vdev, nbuf);
10711 	else
10712 		dp_rx_update_peer_delay_stats(soc, nbuf);
10713 
10714 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10715 	return QDF_STATUS_SUCCESS;
10716 }
10717 
10718 /*
10719  * dp_get_vdev_param: function to get parameters from vdev
10720  * @cdp_soc : DP soc handle
10721  * @vdev_id: id of DP vdev handle
10722  * @param: parameter type to get value
10723  * @val: buffer address
10724  *
10725  * return: status
10726  */
10727 static QDF_STATUS dp_get_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
10728 				    enum cdp_vdev_param_type param,
10729 				    cdp_config_param_type *val)
10730 {
10731 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
10732 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10733 						     DP_MOD_ID_CDP);
10734 
10735 	if (!vdev)
10736 		return QDF_STATUS_E_FAILURE;
10737 
10738 	switch (param) {
10739 	case CDP_ENABLE_WDS:
10740 		val->cdp_vdev_param_wds = vdev->wds_enabled;
10741 		break;
10742 	case CDP_ENABLE_MEC:
10743 		val->cdp_vdev_param_mec = vdev->mec_enabled;
10744 		break;
10745 	case CDP_ENABLE_DA_WAR:
10746 		val->cdp_vdev_param_da_war = vdev->pdev->soc->da_war_enabled;
10747 		break;
10748 	case CDP_ENABLE_IGMP_MCAST_EN:
10749 		val->cdp_vdev_param_igmp_mcast_en = vdev->igmp_mcast_enhanc_en;
10750 		break;
10751 	case CDP_ENABLE_MCAST_EN:
10752 		val->cdp_vdev_param_mcast_en = vdev->mcast_enhancement_en;
10753 		break;
10754 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
10755 		val->cdp_vdev_param_hlos_tid_override =
10756 			    dp_vdev_get_hlos_tid_override((struct cdp_vdev *)vdev);
10757 		break;
10758 	case CDP_ENABLE_PEER_AUTHORIZE:
10759 		val->cdp_vdev_param_peer_authorize =
10760 			    vdev->peer_authorize;
10761 		break;
10762 	case CDP_TX_ENCAP_TYPE:
10763 		val->cdp_vdev_param_tx_encap = vdev->tx_encap_type;
10764 		break;
10765 	case CDP_ENABLE_CIPHER:
10766 		val->cdp_vdev_param_cipher_en = vdev->sec_type;
10767 		break;
10768 #ifdef WLAN_SUPPORT_MESH_LATENCY
10769 	case CDP_ENABLE_PEER_TID_LATENCY:
10770 		val->cdp_vdev_param_peer_tid_latency_enable =
10771 			vdev->peer_tid_latency_enabled;
10772 		break;
10773 	case CDP_SET_VAP_MESH_TID:
10774 		val->cdp_vdev_param_mesh_tid =
10775 				vdev->mesh_tid_latency_config.latency_tid;
10776 		break;
10777 #endif
10778 	case CDP_DROP_3ADDR_MCAST:
10779 		val->cdp_drop_3addr_mcast = vdev->drop_3addr_mcast;
10780 		break;
10781 	default:
10782 		dp_cdp_err("%pK: param value %d is wrong",
10783 			   soc, param);
10784 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10785 		return QDF_STATUS_E_FAILURE;
10786 	}
10787 
10788 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10789 	return QDF_STATUS_SUCCESS;
10790 }
10791 
10792 /*
10793  * dp_set_vdev_param: function to set parameters in vdev
10794  * @cdp_soc : DP soc handle
10795  * @vdev_id: id of DP vdev handle
10796  * @param: parameter type to get value
10797  * @val: value
10798  *
10799  * return: QDF_STATUS
10800  */
10801 static QDF_STATUS
10802 dp_set_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
10803 		  enum cdp_vdev_param_type param, cdp_config_param_type val)
10804 {
10805 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
10806 	struct dp_vdev *vdev =
10807 		dp_vdev_get_ref_by_id(dsoc, vdev_id, DP_MOD_ID_CDP);
10808 	uint32_t var = 0;
10809 
10810 	if (!vdev)
10811 		return QDF_STATUS_E_FAILURE;
10812 
10813 	switch (param) {
10814 	case CDP_ENABLE_WDS:
10815 		dp_cdp_err("%pK: wds_enable %d for vdev(%pK) id(%d)\n",
10816 			   dsoc, val.cdp_vdev_param_wds, vdev, vdev->vdev_id);
10817 		vdev->wds_enabled = val.cdp_vdev_param_wds;
10818 		break;
10819 	case CDP_ENABLE_MEC:
10820 		dp_cdp_err("%pK: mec_enable %d for vdev(%pK) id(%d)\n",
10821 			   dsoc, val.cdp_vdev_param_mec, vdev, vdev->vdev_id);
10822 		vdev->mec_enabled = val.cdp_vdev_param_mec;
10823 		break;
10824 	case CDP_ENABLE_DA_WAR:
10825 		dp_cdp_err("%pK: da_war_enable %d for vdev(%pK) id(%d)\n",
10826 			   dsoc, val.cdp_vdev_param_da_war, vdev, vdev->vdev_id);
10827 		vdev->pdev->soc->da_war_enabled = val.cdp_vdev_param_da_war;
10828 		dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
10829 					     vdev->pdev->soc));
10830 		break;
10831 	case CDP_ENABLE_NAWDS:
10832 		vdev->nawds_enabled = val.cdp_vdev_param_nawds;
10833 		break;
10834 	case CDP_ENABLE_MCAST_EN:
10835 		vdev->mcast_enhancement_en = val.cdp_vdev_param_mcast_en;
10836 		break;
10837 	case CDP_ENABLE_IGMP_MCAST_EN:
10838 		vdev->igmp_mcast_enhanc_en = val.cdp_vdev_param_igmp_mcast_en;
10839 		break;
10840 	case CDP_ENABLE_PROXYSTA:
10841 		vdev->proxysta_vdev = val.cdp_vdev_param_proxysta;
10842 		break;
10843 	case CDP_UPDATE_TDLS_FLAGS:
10844 		vdev->tdls_link_connected = val.cdp_vdev_param_tdls_flags;
10845 		break;
10846 	case CDP_CFG_WDS_AGING_TIMER:
10847 		var = val.cdp_vdev_param_aging_tmr;
10848 		if (!var)
10849 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
10850 		else if (var != vdev->wds_aging_timer_val)
10851 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, var);
10852 
10853 		vdev->wds_aging_timer_val = var;
10854 		break;
10855 	case CDP_ENABLE_AP_BRIDGE:
10856 		if (wlan_op_mode_sta != vdev->opmode)
10857 			vdev->ap_bridge_enabled = val.cdp_vdev_param_ap_brdg_en;
10858 		else
10859 			vdev->ap_bridge_enabled = false;
10860 		break;
10861 	case CDP_ENABLE_CIPHER:
10862 		vdev->sec_type = val.cdp_vdev_param_cipher_en;
10863 		break;
10864 	case CDP_ENABLE_QWRAP_ISOLATION:
10865 		vdev->isolation_vdev = val.cdp_vdev_param_qwrap_isolation;
10866 		break;
10867 	case CDP_UPDATE_MULTIPASS:
10868 		vdev->multipass_en = val.cdp_vdev_param_update_multipass;
10869 		break;
10870 	case CDP_TX_ENCAP_TYPE:
10871 		vdev->tx_encap_type = val.cdp_vdev_param_tx_encap;
10872 		break;
10873 	case CDP_RX_DECAP_TYPE:
10874 		vdev->rx_decap_type = val.cdp_vdev_param_rx_decap;
10875 		break;
10876 	case CDP_TID_VDEV_PRTY:
10877 		vdev->tidmap_prty = val.cdp_vdev_param_tidmap_prty;
10878 		break;
10879 	case CDP_TIDMAP_TBL_ID:
10880 		vdev->tidmap_tbl_id = val.cdp_vdev_param_tidmap_tbl_id;
10881 		break;
10882 #ifdef MESH_MODE_SUPPORT
10883 	case CDP_MESH_RX_FILTER:
10884 		dp_vdev_set_mesh_rx_filter((struct cdp_vdev *)vdev,
10885 					   val.cdp_vdev_param_mesh_rx_filter);
10886 		break;
10887 	case CDP_MESH_MODE:
10888 		dp_vdev_set_mesh_mode((struct cdp_vdev *)vdev,
10889 				      val.cdp_vdev_param_mesh_mode);
10890 		break;
10891 #endif
10892 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
10893 		dp_info("vdev_id %d enable hlod tid override %d", vdev_id,
10894 			val.cdp_vdev_param_hlos_tid_override);
10895 		dp_vdev_set_hlos_tid_override(vdev,
10896 				val.cdp_vdev_param_hlos_tid_override);
10897 		break;
10898 #ifdef QCA_SUPPORT_WDS_EXTENDED
10899 	case CDP_CFG_WDS_EXT:
10900 		if (vdev->opmode == wlan_op_mode_ap)
10901 			vdev->wds_ext_enabled = val.cdp_vdev_param_wds_ext;
10902 		break;
10903 #endif
10904 	case CDP_ENABLE_PEER_AUTHORIZE:
10905 		vdev->peer_authorize = val.cdp_vdev_param_peer_authorize;
10906 		break;
10907 #ifdef WLAN_SUPPORT_MESH_LATENCY
10908 	case CDP_ENABLE_PEER_TID_LATENCY:
10909 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
10910 			val.cdp_vdev_param_peer_tid_latency_enable);
10911 		vdev->peer_tid_latency_enabled =
10912 			val.cdp_vdev_param_peer_tid_latency_enable;
10913 		break;
10914 	case CDP_SET_VAP_MESH_TID:
10915 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
10916 			val.cdp_vdev_param_mesh_tid);
10917 		vdev->mesh_tid_latency_config.latency_tid
10918 				= val.cdp_vdev_param_mesh_tid;
10919 		break;
10920 #endif
10921 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
10922 	case CDP_SKIP_BAR_UPDATE_AP:
10923 		dp_info("vdev_id %d skip BAR update: %u", vdev_id,
10924 			val.cdp_skip_bar_update);
10925 		vdev->skip_bar_update = val.cdp_skip_bar_update;
10926 		vdev->skip_bar_update_last_ts = 0;
10927 		break;
10928 #endif
10929 	case CDP_DROP_3ADDR_MCAST:
10930 		dp_info("vdev_id %d drop 3 addr mcast :%d", vdev_id,
10931 			val.cdp_drop_3addr_mcast);
10932 		vdev->drop_3addr_mcast = val.cdp_drop_3addr_mcast;
10933 		break;
10934 	case CDP_ENABLE_WRAP:
10935 		vdev->wrap_vdev = val.cdp_vdev_param_wrap;
10936 		break;
10937 #ifdef DP_TRAFFIC_END_INDICATION
10938 	case CDP_ENABLE_TRAFFIC_END_INDICATION:
10939 		vdev->traffic_end_ind_en = val.cdp_vdev_param_traffic_end_ind;
10940 		break;
10941 #endif
10942 	default:
10943 		break;
10944 	}
10945 
10946 	dp_tx_vdev_update_search_flags((struct dp_vdev *)vdev);
10947 	dsoc->arch_ops.txrx_set_vdev_param(dsoc, vdev, param, val);
10948 
10949 	/* Update PDEV flags as VDEV flags are updated */
10950 	dp_pdev_update_fast_rx_flag(dsoc, vdev->pdev);
10951 	dp_vdev_unref_delete(dsoc, vdev, DP_MOD_ID_CDP);
10952 
10953 	return QDF_STATUS_SUCCESS;
10954 }
10955 
10956 /*
10957  * dp_set_psoc_param: function to set parameters in psoc
10958  * @cdp_soc : DP soc handle
10959  * @param: parameter type to be set
10960  * @val: value of parameter to be set
10961  *
10962  * return: QDF_STATUS
10963  */
10964 static QDF_STATUS
10965 dp_set_psoc_param(struct cdp_soc_t *cdp_soc,
10966 		  enum cdp_psoc_param_type param, cdp_config_param_type val)
10967 {
10968 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
10969 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->wlan_cfg_ctx;
10970 
10971 	switch (param) {
10972 	case CDP_ENABLE_RATE_STATS:
10973 		soc->peerstats_enabled = val.cdp_psoc_param_en_rate_stats;
10974 		break;
10975 	case CDP_SET_NSS_CFG:
10976 		wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx,
10977 					    val.cdp_psoc_param_en_nss_cfg);
10978 		/*
10979 		 * TODO: masked out based on the per offloaded radio
10980 		 */
10981 		switch (val.cdp_psoc_param_en_nss_cfg) {
10982 		case dp_nss_cfg_default:
10983 			break;
10984 		case dp_nss_cfg_first_radio:
10985 		/*
10986 		 * This configuration is valid for single band radio which
10987 		 * is also NSS offload.
10988 		 */
10989 		case dp_nss_cfg_dbdc:
10990 		case dp_nss_cfg_dbtc:
10991 			wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
10992 			wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
10993 			wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
10994 			wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
10995 			break;
10996 		default:
10997 			dp_cdp_err("%pK: Invalid offload config %d",
10998 				   soc, val.cdp_psoc_param_en_nss_cfg);
10999 		}
11000 
11001 			dp_cdp_err("%pK: nss-wifi<0> nss config is enabled"
11002 				   , soc);
11003 		break;
11004 	case CDP_SET_PREFERRED_HW_MODE:
11005 		soc->preferred_hw_mode = val.cdp_psoc_param_preferred_hw_mode;
11006 		break;
11007 	case CDP_IPA_ENABLE:
11008 		soc->wlan_cfg_ctx->ipa_enabled = val.cdp_ipa_enabled;
11009 		break;
11010 	case CDP_CFG_VDEV_STATS_HW_OFFLOAD:
11011 		wlan_cfg_set_vdev_stats_hw_offload_config(wlan_cfg_ctx,
11012 				val.cdp_psoc_param_vdev_stats_hw_offload);
11013 		break;
11014 	case CDP_SAWF_ENABLE:
11015 		wlan_cfg_set_sawf_config(wlan_cfg_ctx, val.cdp_sawf_enabled);
11016 		break;
11017 	case CDP_UMAC_RST_SKEL_ENABLE:
11018 		dp_umac_rst_skel_enable_update(soc, val.cdp_umac_rst_skel);
11019 		break;
11020 	case CDP_SAWF_STATS:
11021 		wlan_cfg_set_sawf_stats_config(wlan_cfg_ctx,
11022 					       val.cdp_sawf_stats);
11023 		break;
11024 	default:
11025 		break;
11026 	}
11027 
11028 	return QDF_STATUS_SUCCESS;
11029 }
11030 
11031 /*
11032  * dp_get_psoc_param: function to get parameters in soc
11033  * @cdp_soc : DP soc handle
11034  * @param: parameter type to be set
11035  * @val: address of buffer
11036  *
11037  * return: status
11038  */
11039 static QDF_STATUS dp_get_psoc_param(struct cdp_soc_t *cdp_soc,
11040 				    enum cdp_psoc_param_type param,
11041 				    cdp_config_param_type *val)
11042 {
11043 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
11044 
11045 	if (!soc)
11046 		return QDF_STATUS_E_FAILURE;
11047 
11048 	switch (param) {
11049 	case CDP_CFG_PEER_EXT_STATS:
11050 		val->cdp_psoc_param_pext_stats =
11051 			wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx);
11052 		break;
11053 	case CDP_CFG_VDEV_STATS_HW_OFFLOAD:
11054 		val->cdp_psoc_param_vdev_stats_hw_offload =
11055 			wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx);
11056 		break;
11057 	case CDP_UMAC_RST_SKEL_ENABLE:
11058 		val->cdp_umac_rst_skel = dp_umac_rst_skel_enable_get(soc);
11059 		break;
11060 	case CDP_PPEDS_ENABLE:
11061 		val->cdp_psoc_param_ppeds_enabled =
11062 			wlan_cfg_get_dp_soc_is_ppe_enabled(soc->wlan_cfg_ctx);
11063 		break;
11064 	default:
11065 		dp_warn("Invalid param");
11066 		break;
11067 	}
11068 
11069 	return QDF_STATUS_SUCCESS;
11070 }
11071 
11072 /*
11073  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
11074  * @soc: DP_SOC handle
11075  * @vdev_id: id of DP_VDEV handle
11076  * @map_id:ID of map that needs to be updated
11077  *
11078  * Return: QDF_STATUS
11079  */
11080 static QDF_STATUS dp_set_vdev_dscp_tid_map_wifi3(ol_txrx_soc_handle cdp_soc,
11081 						 uint8_t vdev_id,
11082 						 uint8_t map_id)
11083 {
11084 	cdp_config_param_type val;
11085 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
11086 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11087 						     DP_MOD_ID_CDP);
11088 	if (vdev) {
11089 		vdev->dscp_tid_map_id = map_id;
11090 		val.cdp_vdev_param_dscp_tid_map_id = map_id;
11091 		soc->arch_ops.txrx_set_vdev_param(soc,
11092 						  vdev,
11093 						  CDP_UPDATE_DSCP_TO_TID_MAP,
11094 						  val);
11095 		/* Updatr flag for transmit tid classification */
11096 		if (vdev->dscp_tid_map_id < soc->num_hw_dscp_tid_map)
11097 			vdev->skip_sw_tid_classification |=
11098 				DP_TX_HW_DSCP_TID_MAP_VALID;
11099 		else
11100 			vdev->skip_sw_tid_classification &=
11101 				~DP_TX_HW_DSCP_TID_MAP_VALID;
11102 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11103 		return QDF_STATUS_SUCCESS;
11104 	}
11105 
11106 	return QDF_STATUS_E_FAILURE;
11107 }
11108 
11109 #ifdef DP_RATETABLE_SUPPORT
11110 static int dp_txrx_get_ratekbps(int preamb, int mcs,
11111 				int htflag, int gintval)
11112 {
11113 	uint32_t rix;
11114 	uint16_t ratecode;
11115 	enum cdp_punctured_modes punc_mode = NO_PUNCTURE;
11116 
11117 	return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
11118 			       (uint8_t)preamb, 1, punc_mode,
11119 			       &rix, &ratecode);
11120 }
11121 #else
11122 static int dp_txrx_get_ratekbps(int preamb, int mcs,
11123 				int htflag, int gintval)
11124 {
11125 	return 0;
11126 }
11127 #endif
11128 
11129 /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
11130  * @soc: DP soc handle
11131  * @pdev_id: id of DP pdev handle
11132  * @pdev_stats: buffer to copy to
11133  *
11134  * return : status success/failure
11135  */
11136 static QDF_STATUS
11137 dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
11138 		       struct cdp_pdev_stats *pdev_stats)
11139 {
11140 	struct dp_pdev *pdev =
11141 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11142 						   pdev_id);
11143 	if (!pdev)
11144 		return QDF_STATUS_E_FAILURE;
11145 
11146 	dp_aggregate_pdev_stats(pdev);
11147 
11148 	qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats));
11149 	return QDF_STATUS_SUCCESS;
11150 }
11151 
11152 /* dp_txrx_update_vdev_me_stats(): Update vdev ME stats sent from CDP
11153  * @vdev: DP vdev handle
11154  * @buf: buffer containing specific stats structure
11155  *
11156  * Returns: void
11157  */
11158 static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev,
11159 					 void *buf)
11160 {
11161 	struct cdp_tx_ingress_stats *host_stats = NULL;
11162 
11163 	if (!buf) {
11164 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
11165 		return;
11166 	}
11167 	host_stats = (struct cdp_tx_ingress_stats *)buf;
11168 
11169 	DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt,
11170 			 host_stats->mcast_en.mcast_pkt.num,
11171 			 host_stats->mcast_en.mcast_pkt.bytes);
11172 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error,
11173 		     host_stats->mcast_en.dropped_map_error);
11174 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac,
11175 		     host_stats->mcast_en.dropped_self_mac);
11176 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail,
11177 		     host_stats->mcast_en.dropped_send_fail);
11178 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast,
11179 		     host_stats->mcast_en.ucast);
11180 	DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc,
11181 		     host_stats->mcast_en.fail_seg_alloc);
11182 	DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail,
11183 		     host_stats->mcast_en.clone_fail);
11184 }
11185 
11186 /* dp_txrx_update_vdev_igmp_me_stats(): Update vdev IGMP ME stats sent from CDP
11187  * @vdev: DP vdev handle
11188  * @buf: buffer containing specific stats structure
11189  *
11190  * Returns: void
11191  */
11192 static void dp_txrx_update_vdev_igmp_me_stats(struct dp_vdev *vdev,
11193 					      void *buf)
11194 {
11195 	struct cdp_tx_ingress_stats *host_stats = NULL;
11196 
11197 	if (!buf) {
11198 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
11199 		return;
11200 	}
11201 	host_stats = (struct cdp_tx_ingress_stats *)buf;
11202 
11203 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_rcvd,
11204 		     host_stats->igmp_mcast_en.igmp_rcvd);
11205 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_ucast_converted,
11206 		     host_stats->igmp_mcast_en.igmp_ucast_converted);
11207 }
11208 
11209 /* dp_txrx_update_vdev_host_stats(): Update stats sent through CDP
11210  * @soc: DP soc handle
11211  * @vdev_id: id of DP vdev handle
11212  * @buf: buffer containing specific stats structure
11213  * @stats_id: stats type
11214  *
11215  * Returns: QDF_STATUS
11216  */
11217 static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc_hdl,
11218 						 uint8_t vdev_id,
11219 						 void *buf,
11220 						 uint16_t stats_id)
11221 {
11222 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11223 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11224 						     DP_MOD_ID_CDP);
11225 
11226 	if (!vdev) {
11227 		dp_cdp_err("%pK: Invalid vdev handle", soc);
11228 		return QDF_STATUS_E_FAILURE;
11229 	}
11230 
11231 	switch (stats_id) {
11232 	case DP_VDEV_STATS_PKT_CNT_ONLY:
11233 		break;
11234 	case DP_VDEV_STATS_TX_ME:
11235 		dp_txrx_update_vdev_me_stats(vdev, buf);
11236 		dp_txrx_update_vdev_igmp_me_stats(vdev, buf);
11237 		break;
11238 	default:
11239 		qdf_info("Invalid stats_id %d", stats_id);
11240 		break;
11241 	}
11242 
11243 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11244 	return QDF_STATUS_SUCCESS;
11245 }
11246 
11247 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
11248  * @soc: soc handle
11249  * @vdev_id: id of vdev handle
11250  * @peer_mac: mac of DP_PEER handle
11251  * @peer_stats: buffer to copy to
11252  * return : status success/failure
11253  */
11254 static QDF_STATUS
11255 dp_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
11256 		       uint8_t *peer_mac, struct cdp_peer_stats *peer_stats)
11257 {
11258 	struct dp_peer *peer = NULL;
11259 	struct cdp_peer_info peer_info = { 0 };
11260 
11261 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac, false,
11262 				 CDP_WILD_PEER_TYPE);
11263 
11264 	peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info,
11265 					 DP_MOD_ID_CDP);
11266 
11267 	qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats));
11268 
11269 	if (!peer)
11270 		return QDF_STATUS_E_FAILURE;
11271 
11272 	dp_get_peer_stats(peer, peer_stats);
11273 
11274 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11275 
11276 	return QDF_STATUS_SUCCESS;
11277 }
11278 
11279 /* dp_txrx_get_peer_stats_param - will return specified cdp_peer_stats
11280  * @param soc - soc handle
11281  * @param vdev_id - vdev_id of vdev object
11282  * @param peer_mac - mac address of the peer
11283  * @param type - enum of required stats
11284  * @param buf - buffer to hold the value
11285  * return : status success/failure
11286  */
11287 static QDF_STATUS
11288 dp_txrx_get_peer_stats_param(struct cdp_soc_t *soc, uint8_t vdev_id,
11289 			     uint8_t *peer_mac, enum cdp_peer_stats_type type,
11290 			     cdp_peer_stats_param_t *buf)
11291 {
11292 	QDF_STATUS ret;
11293 	struct dp_peer *peer = NULL;
11294 	struct cdp_peer_info peer_info = { 0 };
11295 
11296 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac, false,
11297 				 CDP_WILD_PEER_TYPE);
11298 
11299 	peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info,
11300 				         DP_MOD_ID_CDP);
11301 
11302 	if (!peer) {
11303 		dp_peer_err("%pK: Invalid Peer for Mac " QDF_MAC_ADDR_FMT,
11304 			    soc, QDF_MAC_ADDR_REF(peer_mac));
11305 		return QDF_STATUS_E_FAILURE;
11306 	}
11307 
11308 	if (type >= cdp_peer_per_pkt_stats_min &&
11309 	    type < cdp_peer_per_pkt_stats_max) {
11310 		ret = dp_txrx_get_peer_per_pkt_stats_param(peer, type, buf);
11311 	} else if (type >= cdp_peer_extd_stats_min &&
11312 		   type < cdp_peer_extd_stats_max) {
11313 		ret = dp_txrx_get_peer_extd_stats_param(peer, type, buf);
11314 	} else {
11315 		dp_err("%pK: Invalid stat type requested", soc);
11316 		ret = QDF_STATUS_E_FAILURE;
11317 	}
11318 
11319 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11320 
11321 	return ret;
11322 }
11323 
11324 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
11325  * @soc: soc handle
11326  * @vdev_id: id of vdev handle
11327  * @peer_mac: mac of DP_PEER handle
11328  *
11329  * return : QDF_STATUS
11330  */
11331 #ifdef WLAN_FEATURE_11BE_MLO
11332 static QDF_STATUS
11333 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
11334 			 uint8_t *peer_mac)
11335 {
11336 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11337 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11338 	struct dp_peer *peer =
11339 			dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
11340 						       vdev_id, DP_MOD_ID_CDP);
11341 
11342 	if (!peer)
11343 		return QDF_STATUS_E_FAILURE;
11344 
11345 	DP_STATS_CLR(peer);
11346 	dp_txrx_peer_stats_clr(peer->txrx_peer);
11347 
11348 	if (IS_MLO_DP_MLD_PEER(peer)) {
11349 		uint8_t i;
11350 		struct dp_peer *link_peer;
11351 		struct dp_soc *link_peer_soc;
11352 		struct dp_mld_link_peers link_peers_info;
11353 
11354 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
11355 						    &link_peers_info,
11356 						    DP_MOD_ID_CDP);
11357 		for (i = 0; i < link_peers_info.num_links; i++) {
11358 			link_peer = link_peers_info.link_peers[i];
11359 			link_peer_soc = link_peer->vdev->pdev->soc;
11360 
11361 			DP_STATS_CLR(link_peer);
11362 			dp_monitor_peer_reset_stats(link_peer_soc, link_peer);
11363 		}
11364 
11365 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
11366 	} else {
11367 		dp_monitor_peer_reset_stats(soc, peer);
11368 	}
11369 
11370 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11371 
11372 	return status;
11373 }
11374 #else
11375 static QDF_STATUS
11376 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
11377 			 uint8_t *peer_mac)
11378 {
11379 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11380 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
11381 						      peer_mac, 0, vdev_id,
11382 						      DP_MOD_ID_CDP);
11383 
11384 	if (!peer)
11385 		return QDF_STATUS_E_FAILURE;
11386 
11387 	DP_STATS_CLR(peer);
11388 	dp_txrx_peer_stats_clr(peer->txrx_peer);
11389 	dp_monitor_peer_reset_stats((struct dp_soc *)soc, peer);
11390 
11391 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11392 
11393 	return status;
11394 }
11395 #endif
11396 
11397 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
11398  * @vdev_handle: DP_VDEV handle
11399  * @buf: buffer for vdev stats
11400  *
11401  * return : int
11402  */
11403 static int dp_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
11404 				  void *buf, bool is_aggregate)
11405 {
11406 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11407 	struct cdp_vdev_stats *vdev_stats;
11408 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11409 						     DP_MOD_ID_CDP);
11410 
11411 	if (!vdev)
11412 		return 1;
11413 
11414 	vdev_stats = (struct cdp_vdev_stats *)buf;
11415 
11416 	if (is_aggregate) {
11417 		dp_aggregate_vdev_stats(vdev, buf);
11418 	} else {
11419 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
11420 	}
11421 
11422 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11423 	return 0;
11424 }
11425 
11426 /*
11427  * dp_get_total_per(): get total per
11428  * @soc: DP soc handle
11429  * @pdev_id: id of DP_PDEV handle
11430  *
11431  * Return: % error rate using retries per packet and success packets
11432  */
11433 static int dp_get_total_per(struct cdp_soc_t *soc, uint8_t pdev_id)
11434 {
11435 	struct dp_pdev *pdev =
11436 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11437 						   pdev_id);
11438 
11439 	if (!pdev)
11440 		return 0;
11441 
11442 	dp_aggregate_pdev_stats(pdev);
11443 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
11444 		return 0;
11445 	return ((pdev->stats.tx.retries * 100) /
11446 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
11447 }
11448 
11449 /*
11450  * dp_txrx_stats_publish(): publish pdev stats into a buffer
11451  * @soc: DP soc handle
11452  * @pdev_id: id of DP_PDEV handle
11453  * @buf: to hold pdev_stats
11454  *
11455  * Return: int
11456  */
11457 static int
11458 dp_txrx_stats_publish(struct cdp_soc_t *soc, uint8_t pdev_id,
11459 		      struct cdp_stats_extd *buf)
11460 {
11461 	struct cdp_txrx_stats_req req = {0,};
11462 	QDF_STATUS status;
11463 	struct dp_pdev *pdev =
11464 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11465 						   pdev_id);
11466 
11467 	if (!pdev)
11468 		return TXRX_STATS_LEVEL_OFF;
11469 
11470 	if (pdev->pending_fw_stats_response)
11471 		return TXRX_STATS_LEVEL_OFF;
11472 
11473 	dp_aggregate_pdev_stats(pdev);
11474 
11475 	pdev->pending_fw_stats_response = true;
11476 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
11477 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
11478 	pdev->fw_stats_tlv_bitmap_rcvd = 0;
11479 	qdf_event_reset(&pdev->fw_stats_event);
11480 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
11481 				req.param1, req.param2, req.param3, 0,
11482 				req.cookie_val, 0);
11483 
11484 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
11485 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
11486 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
11487 				req.param1, req.param2, req.param3, 0,
11488 				req.cookie_val, 0);
11489 
11490 	status =
11491 		qdf_wait_single_event(&pdev->fw_stats_event, DP_MAX_SLEEP_TIME);
11492 
11493 	if (status != QDF_STATUS_SUCCESS) {
11494 		if (status == QDF_STATUS_E_TIMEOUT)
11495 			qdf_debug("TIMEOUT_OCCURS");
11496 		pdev->pending_fw_stats_response = false;
11497 		return TXRX_STATS_LEVEL_OFF;
11498 	}
11499 	qdf_mem_copy(buf, &pdev->stats, sizeof(struct cdp_pdev_stats));
11500 	pdev->pending_fw_stats_response = false;
11501 
11502 	return TXRX_STATS_LEVEL;
11503 }
11504 
11505 /*
11506  * dp_get_obss_stats(): Get Pdev OBSS stats from Fw
11507  * @soc: DP soc handle
11508  * @pdev_id: id of DP_PDEV handle
11509  * @buf: to hold pdev obss stats
11510  *
11511  * Return: status
11512  */
11513 static QDF_STATUS
11514 dp_get_obss_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
11515 		  struct cdp_pdev_obss_pd_stats_tlv *buf)
11516 {
11517 	struct cdp_txrx_stats_req req = {0};
11518 	QDF_STATUS status;
11519 	struct dp_pdev *pdev =
11520 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11521 						   pdev_id);
11522 
11523 	if (!pdev)
11524 		return QDF_STATUS_E_INVAL;
11525 
11526 	if (pdev->pending_fw_obss_stats_response)
11527 		return QDF_STATUS_E_AGAIN;
11528 
11529 	pdev->pending_fw_obss_stats_response = true;
11530 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_OBSS_PD_STATS;
11531 	req.cookie_val = DBG_STATS_COOKIE_HTT_OBSS;
11532 	qdf_event_reset(&pdev->fw_obss_stats_event);
11533 	status = dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
11534 					   req.param1, req.param2, req.param3,
11535 					   0, req.cookie_val, 0);
11536 	if (QDF_IS_STATUS_ERROR(status)) {
11537 		pdev->pending_fw_obss_stats_response = false;
11538 		return status;
11539 	}
11540 	status =
11541 		qdf_wait_single_event(&pdev->fw_obss_stats_event,
11542 				      DP_MAX_SLEEP_TIME);
11543 
11544 	if (status != QDF_STATUS_SUCCESS) {
11545 		if (status == QDF_STATUS_E_TIMEOUT)
11546 			qdf_debug("TIMEOUT_OCCURS");
11547 		pdev->pending_fw_obss_stats_response = false;
11548 		return QDF_STATUS_E_TIMEOUT;
11549 	}
11550 	qdf_mem_copy(buf, &pdev->stats.htt_tx_pdev_stats.obss_pd_stats_tlv,
11551 		     sizeof(struct cdp_pdev_obss_pd_stats_tlv));
11552 	pdev->pending_fw_obss_stats_response = false;
11553 	return status;
11554 }
11555 
11556 /*
11557  * dp_clear_pdev_obss_pd_stats(): Clear pdev obss stats
11558  * @soc: DP soc handle
11559  * @pdev_id: id of DP_PDEV handle
11560  *
11561  * Return: status
11562  */
11563 static QDF_STATUS
11564 dp_clear_pdev_obss_pd_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
11565 {
11566 	struct cdp_txrx_stats_req req = {0};
11567 	struct dp_pdev *pdev =
11568 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11569 						   pdev_id);
11570 	uint32_t cookie_val = DBG_STATS_COOKIE_DEFAULT;
11571 
11572 	if (!pdev)
11573 		return QDF_STATUS_E_INVAL;
11574 
11575 	/*
11576 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
11577 	 * from param0 to param3 according to below rule:
11578 	 *
11579 	 * PARAM:
11580 	 *   - config_param0 : start_offset (stats type)
11581 	 *   - config_param1 : stats bmask from start offset
11582 	 *   - config_param2 : stats bmask from start offset + 32
11583 	 *   - config_param3 : stats bmask from start offset + 64
11584 	 */
11585 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_RESET;
11586 	req.param0 = HTT_DBG_EXT_STATS_PDEV_OBSS_PD_STATS;
11587 	req.param1 = 0x00000001;
11588 
11589 	return dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
11590 				  req.param1, req.param2, req.param3, 0,
11591 				cookie_val, 0);
11592 }
11593 
11594 /**
11595  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
11596  * @soc: soc handle
11597  * @pdev_id: id of DP_PDEV handle
11598  * @map_id: ID of map that needs to be updated
11599  * @tos: index value in map
11600  * @tid: tid value passed by the user
11601  *
11602  * Return: QDF_STATUS
11603  */
11604 static QDF_STATUS
11605 dp_set_pdev_dscp_tid_map_wifi3(struct cdp_soc_t *soc_handle,
11606 			       uint8_t pdev_id,
11607 			       uint8_t map_id,
11608 			       uint8_t tos, uint8_t tid)
11609 {
11610 	uint8_t dscp;
11611 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11612 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11613 
11614 	if (!pdev)
11615 		return QDF_STATUS_E_FAILURE;
11616 
11617 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
11618 	pdev->dscp_tid_map[map_id][dscp] = tid;
11619 
11620 	if (map_id < soc->num_hw_dscp_tid_map)
11621 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
11622 				       map_id, dscp);
11623 	else
11624 		return QDF_STATUS_E_FAILURE;
11625 
11626 	return QDF_STATUS_SUCCESS;
11627 }
11628 
11629 #ifdef WLAN_SYSFS_DP_STATS
11630 /*
11631  * dp_sysfs_event_trigger(): Trigger event to wait for firmware
11632  * stats request response.
11633  * @soc: soc handle
11634  * @cookie_val: cookie value
11635  *
11636  * @Return: QDF_STATUS
11637  */
11638 static QDF_STATUS
11639 dp_sysfs_event_trigger(struct dp_soc *soc, uint32_t cookie_val)
11640 {
11641 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11642 	/* wait for firmware response for sysfs stats request */
11643 	if (cookie_val == DBG_SYSFS_STATS_COOKIE) {
11644 		if (!soc) {
11645 			dp_cdp_err("soc is NULL");
11646 			return QDF_STATUS_E_FAILURE;
11647 		}
11648 		/* wait for event completion */
11649 		status = qdf_wait_single_event(&soc->sysfs_config->sysfs_txrx_fw_request_done,
11650 					       WLAN_SYSFS_STAT_REQ_WAIT_MS);
11651 		if (status == QDF_STATUS_SUCCESS)
11652 			dp_cdp_info("sysfs_txrx_fw_request_done event completed");
11653 		else if (status == QDF_STATUS_E_TIMEOUT)
11654 			dp_cdp_warn("sysfs_txrx_fw_request_done event expired");
11655 		else
11656 			dp_cdp_warn("sysfs_txrx_fw_request_done event error code %d", status);
11657 	}
11658 
11659 	return status;
11660 }
11661 #else /* WLAN_SYSFS_DP_STATS */
11662 /*
11663  * dp_sysfs_event_trigger(): Trigger event to wait for firmware
11664  * stats request response.
11665  * @soc: soc handle
11666  * @cookie_val: cookie value
11667  *
11668  * @Return: QDF_STATUS
11669  */
11670 static QDF_STATUS
11671 dp_sysfs_event_trigger(struct dp_soc *soc, uint32_t cookie_val)
11672 {
11673 	return QDF_STATUS_SUCCESS;
11674 }
11675 #endif /* WLAN_SYSFS_DP_STATS */
11676 
11677 /**
11678  * dp_fw_stats_process(): Process TXRX FW stats request.
11679  * @vdev_handle: DP VDEV handle
11680  * @req: stats request
11681  *
11682  * return: QDF_STATUS
11683  */
11684 static QDF_STATUS
11685 dp_fw_stats_process(struct dp_vdev *vdev,
11686 		    struct cdp_txrx_stats_req *req)
11687 {
11688 	struct dp_pdev *pdev = NULL;
11689 	struct dp_soc *soc = NULL;
11690 	uint32_t stats = req->stats;
11691 	uint8_t mac_id = req->mac_id;
11692 	uint32_t cookie_val = DBG_STATS_COOKIE_DEFAULT;
11693 
11694 	if (!vdev) {
11695 		DP_TRACE(NONE, "VDEV not found");
11696 		return QDF_STATUS_E_FAILURE;
11697 	}
11698 
11699 	pdev = vdev->pdev;
11700 	if (!pdev) {
11701 		DP_TRACE(NONE, "PDEV not found");
11702 		return QDF_STATUS_E_FAILURE;
11703 	}
11704 
11705 	soc = pdev->soc;
11706 	if (!soc) {
11707 		DP_TRACE(NONE, "soc not found");
11708 		return QDF_STATUS_E_FAILURE;
11709 	}
11710 
11711 	/* In case request is from host sysfs for displaying stats on console */
11712 	if (req->cookie_val == DBG_SYSFS_STATS_COOKIE)
11713 		cookie_val = DBG_SYSFS_STATS_COOKIE;
11714 
11715 	/*
11716 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
11717 	 * from param0 to param3 according to below rule:
11718 	 *
11719 	 * PARAM:
11720 	 *   - config_param0 : start_offset (stats type)
11721 	 *   - config_param1 : stats bmask from start offset
11722 	 *   - config_param2 : stats bmask from start offset + 32
11723 	 *   - config_param3 : stats bmask from start offset + 64
11724 	 */
11725 	if (req->stats == CDP_TXRX_STATS_0) {
11726 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
11727 		req->param1 = 0xFFFFFFFF;
11728 		req->param2 = 0xFFFFFFFF;
11729 		req->param3 = 0xFFFFFFFF;
11730 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
11731 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
11732 	}
11733 
11734 	if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT) {
11735 		dp_h2t_ext_stats_msg_send(pdev,
11736 					  HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT,
11737 					  req->param0, req->param1, req->param2,
11738 					  req->param3, 0, cookie_val,
11739 					  mac_id);
11740 	} else {
11741 		dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
11742 					  req->param1, req->param2, req->param3,
11743 					  0, cookie_val, mac_id);
11744 	}
11745 
11746 	dp_sysfs_event_trigger(soc, cookie_val);
11747 
11748 	return QDF_STATUS_SUCCESS;
11749 }
11750 
11751 /**
11752  * dp_txrx_stats_request - function to map to firmware and host stats
11753  * @soc: soc handle
11754  * @vdev_id: virtual device ID
11755  * @req: stats request
11756  *
11757  * Return: QDF_STATUS
11758  */
11759 static
11760 QDF_STATUS dp_txrx_stats_request(struct cdp_soc_t *soc_handle,
11761 				 uint8_t vdev_id,
11762 				 struct cdp_txrx_stats_req *req)
11763 {
11764 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_handle);
11765 	int host_stats;
11766 	int fw_stats;
11767 	enum cdp_stats stats;
11768 	int num_stats;
11769 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11770 						     DP_MOD_ID_CDP);
11771 	QDF_STATUS status = QDF_STATUS_E_INVAL;
11772 
11773 	if (!vdev || !req) {
11774 		dp_cdp_err("%pK: Invalid vdev/req instance", soc);
11775 		status = QDF_STATUS_E_INVAL;
11776 		goto fail0;
11777 	}
11778 
11779 	if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) {
11780 		dp_err("Invalid mac id request");
11781 		status = QDF_STATUS_E_INVAL;
11782 		goto fail0;
11783 	}
11784 
11785 	stats = req->stats;
11786 	if (stats >= CDP_TXRX_MAX_STATS) {
11787 		status = QDF_STATUS_E_INVAL;
11788 		goto fail0;
11789 	}
11790 
11791 	/*
11792 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
11793 	 *			has to be updated if new FW HTT stats added
11794 	 */
11795 	if (stats > CDP_TXRX_STATS_HTT_MAX)
11796 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
11797 
11798 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
11799 
11800 	if (stats >= num_stats) {
11801 		dp_cdp_err("%pK : Invalid stats option: %d", soc, stats);
11802 		status = QDF_STATUS_E_INVAL;
11803 		goto fail0;
11804 	}
11805 
11806 	req->stats = stats;
11807 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
11808 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
11809 
11810 	dp_info("stats: %u fw_stats_type: %d host_stats: %d",
11811 		stats, fw_stats, host_stats);
11812 
11813 	if (fw_stats != TXRX_FW_STATS_INVALID) {
11814 		/* update request with FW stats type */
11815 		req->stats = fw_stats;
11816 		status = dp_fw_stats_process(vdev, req);
11817 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
11818 			(host_stats <= TXRX_HOST_STATS_MAX))
11819 		status = dp_print_host_stats(vdev, req, soc);
11820 	else
11821 		dp_cdp_info("%pK: Wrong Input for TxRx Stats", soc);
11822 fail0:
11823 	if (vdev)
11824 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11825 	return status;
11826 }
11827 
11828 /*
11829  * dp_txrx_dump_stats() -  Dump statistics
11830  * @value - Statistics option
11831  */
11832 static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value,
11833 				     enum qdf_stats_verbosity_level level)
11834 {
11835 	struct dp_soc *soc =
11836 		(struct dp_soc *)psoc;
11837 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11838 
11839 	if (!soc) {
11840 		dp_cdp_err("%pK: soc is NULL", soc);
11841 		return QDF_STATUS_E_INVAL;
11842 	}
11843 
11844 	switch (value) {
11845 	case CDP_TXRX_PATH_STATS:
11846 		dp_txrx_path_stats(soc);
11847 		dp_print_soc_interrupt_stats(soc);
11848 		hal_dump_reg_write_stats(soc->hal_soc);
11849 		dp_pdev_print_tx_delay_stats(soc);
11850 		/* Dump usage watermark stats for core TX/RX SRNGs */
11851 		dp_dump_srng_high_wm_stats(soc, (1 << REO_DST));
11852 		dp_print_fisa_stats(soc);
11853 		break;
11854 
11855 	case CDP_RX_RING_STATS:
11856 		dp_print_per_ring_stats(soc);
11857 		break;
11858 
11859 	case CDP_TXRX_TSO_STATS:
11860 		dp_print_tso_stats(soc, level);
11861 		break;
11862 
11863 	case CDP_DUMP_TX_FLOW_POOL_INFO:
11864 		if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH)
11865 			cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
11866 		else
11867 			dp_tx_dump_flow_pool_info_compact(soc);
11868 		break;
11869 
11870 	case CDP_DP_NAPI_STATS:
11871 		dp_print_napi_stats(soc);
11872 		break;
11873 
11874 	case CDP_TXRX_DESC_STATS:
11875 		/* TODO: NOT IMPLEMENTED */
11876 		break;
11877 
11878 	case CDP_DP_RX_FISA_STATS:
11879 		dp_rx_dump_fisa_stats(soc);
11880 		break;
11881 
11882 	case CDP_DP_SWLM_STATS:
11883 		dp_print_swlm_stats(soc);
11884 		break;
11885 
11886 	case CDP_DP_TX_HW_LATENCY_STATS:
11887 		dp_pdev_print_tx_delay_stats(soc);
11888 		break;
11889 
11890 	default:
11891 		status = QDF_STATUS_E_INVAL;
11892 		break;
11893 	}
11894 
11895 	return status;
11896 
11897 }
11898 
11899 #ifdef WLAN_SYSFS_DP_STATS
11900 static
11901 void dp_sysfs_get_stat_type(struct dp_soc *soc, uint32_t *mac_id,
11902 			    uint32_t *stat_type)
11903 {
11904 	qdf_spinlock_acquire(&soc->sysfs_config->rw_stats_lock);
11905 	*stat_type = soc->sysfs_config->stat_type_requested;
11906 	*mac_id   = soc->sysfs_config->mac_id;
11907 
11908 	qdf_spinlock_release(&soc->sysfs_config->rw_stats_lock);
11909 }
11910 
11911 static
11912 void dp_sysfs_update_config_buf_params(struct dp_soc *soc,
11913 				       uint32_t curr_len,
11914 				       uint32_t max_buf_len,
11915 				       char *buf)
11916 {
11917 	qdf_spinlock_acquire(&soc->sysfs_config->sysfs_write_user_buffer);
11918 	/* set sysfs_config parameters */
11919 	soc->sysfs_config->buf = buf;
11920 	soc->sysfs_config->curr_buffer_length = curr_len;
11921 	soc->sysfs_config->max_buffer_length = max_buf_len;
11922 	qdf_spinlock_release(&soc->sysfs_config->sysfs_write_user_buffer);
11923 }
11924 
11925 static
11926 QDF_STATUS dp_sysfs_fill_stats(ol_txrx_soc_handle soc_hdl,
11927 			       char *buf, uint32_t buf_size)
11928 {
11929 	uint32_t mac_id = 0;
11930 	uint32_t stat_type = 0;
11931 	uint32_t fw_stats = 0;
11932 	uint32_t host_stats = 0;
11933 	enum cdp_stats stats;
11934 	struct cdp_txrx_stats_req req;
11935 	uint32_t num_stats;
11936 	struct dp_soc *soc = NULL;
11937 
11938 	if (!soc_hdl) {
11939 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
11940 		return QDF_STATUS_E_INVAL;
11941 	}
11942 
11943 	soc = cdp_soc_t_to_dp_soc(soc_hdl);
11944 
11945 	if (!soc) {
11946 		dp_cdp_err("%pK: soc is NULL", soc);
11947 		return QDF_STATUS_E_INVAL;
11948 	}
11949 
11950 	dp_sysfs_get_stat_type(soc, &mac_id, &stat_type);
11951 
11952 	stats = stat_type;
11953 	if (stats >= CDP_TXRX_MAX_STATS) {
11954 		dp_cdp_info("sysfs stat type requested is invalid");
11955 		return QDF_STATUS_E_INVAL;
11956 	}
11957 	/*
11958 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
11959 	 *			has to be updated if new FW HTT stats added
11960 	 */
11961 	if (stats > CDP_TXRX_MAX_STATS)
11962 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
11963 
11964 	num_stats = QDF_ARRAY_SIZE(dp_stats_mapping_table);
11965 
11966 	if (stats >= num_stats) {
11967 		dp_cdp_err("%pK : Invalid stats option: %d, max num stats: %d",
11968 				soc, stats, num_stats);
11969 		return QDF_STATUS_E_INVAL;
11970 	}
11971 
11972 	/* build request */
11973 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
11974 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
11975 
11976 	req.stats = stat_type;
11977 	req.mac_id = mac_id;
11978 	/* request stats to be printed */
11979 	qdf_mutex_acquire(&soc->sysfs_config->sysfs_read_lock);
11980 
11981 	if (fw_stats != TXRX_FW_STATS_INVALID) {
11982 		/* update request with FW stats type */
11983 		req.cookie_val = DBG_SYSFS_STATS_COOKIE;
11984 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
11985 			(host_stats <= TXRX_HOST_STATS_MAX)) {
11986 		req.cookie_val = DBG_STATS_COOKIE_DEFAULT;
11987 		soc->sysfs_config->process_id = qdf_get_current_pid();
11988 		soc->sysfs_config->printing_mode = PRINTING_MODE_ENABLED;
11989 	}
11990 
11991 	dp_sysfs_update_config_buf_params(soc, 0, buf_size, buf);
11992 
11993 	dp_txrx_stats_request(soc_hdl, mac_id, &req);
11994 	soc->sysfs_config->process_id = 0;
11995 	soc->sysfs_config->printing_mode = PRINTING_MODE_DISABLED;
11996 
11997 	dp_sysfs_update_config_buf_params(soc, 0, 0, NULL);
11998 
11999 	qdf_mutex_release(&soc->sysfs_config->sysfs_read_lock);
12000 	return QDF_STATUS_SUCCESS;
12001 }
12002 
12003 static
12004 QDF_STATUS dp_sysfs_set_stat_type(ol_txrx_soc_handle soc_hdl,
12005 				  uint32_t stat_type, uint32_t mac_id)
12006 {
12007 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12008 
12009 	if (!soc_hdl) {
12010 		dp_cdp_err("%pK: soc is NULL", soc);
12011 		return QDF_STATUS_E_INVAL;
12012 	}
12013 
12014 	qdf_spinlock_acquire(&soc->sysfs_config->rw_stats_lock);
12015 
12016 	soc->sysfs_config->stat_type_requested = stat_type;
12017 	soc->sysfs_config->mac_id = mac_id;
12018 
12019 	qdf_spinlock_release(&soc->sysfs_config->rw_stats_lock);
12020 
12021 	return QDF_STATUS_SUCCESS;
12022 }
12023 
12024 static
12025 QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl)
12026 {
12027 	struct dp_soc *soc;
12028 	QDF_STATUS status;
12029 
12030 	if (!soc_hdl) {
12031 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
12032 		return QDF_STATUS_E_INVAL;
12033 	}
12034 
12035 	soc = soc_hdl;
12036 
12037 	soc->sysfs_config = qdf_mem_malloc(sizeof(struct sysfs_stats_config));
12038 	if (!soc->sysfs_config) {
12039 		dp_cdp_err("failed to allocate memory for sysfs_config no memory");
12040 		return QDF_STATUS_E_NOMEM;
12041 	}
12042 
12043 	status = qdf_event_create(&soc->sysfs_config->sysfs_txrx_fw_request_done);
12044 	/* create event for fw stats request from sysfs */
12045 	if (status != QDF_STATUS_SUCCESS) {
12046 		dp_cdp_err("failed to create event sysfs_txrx_fw_request_done");
12047 		qdf_mem_free(soc->sysfs_config);
12048 		soc->sysfs_config = NULL;
12049 		return QDF_STATUS_E_FAILURE;
12050 	}
12051 
12052 	qdf_spinlock_create(&soc->sysfs_config->rw_stats_lock);
12053 	qdf_mutex_create(&soc->sysfs_config->sysfs_read_lock);
12054 	qdf_spinlock_create(&soc->sysfs_config->sysfs_write_user_buffer);
12055 
12056 	return QDF_STATUS_SUCCESS;
12057 }
12058 
12059 static
12060 QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl)
12061 {
12062 	struct dp_soc *soc;
12063 	QDF_STATUS status;
12064 
12065 	if (!soc_hdl) {
12066 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
12067 		return QDF_STATUS_E_INVAL;
12068 	}
12069 
12070 	soc = soc_hdl;
12071 	if (!soc->sysfs_config) {
12072 		dp_cdp_err("soc->sysfs_config is NULL");
12073 		return QDF_STATUS_E_FAILURE;
12074 	}
12075 
12076 	status = qdf_event_destroy(&soc->sysfs_config->sysfs_txrx_fw_request_done);
12077 	if (status != QDF_STATUS_SUCCESS)
12078 		dp_cdp_err("Failed to destroy event sysfs_txrx_fw_request_done ");
12079 
12080 	qdf_mutex_destroy(&soc->sysfs_config->sysfs_read_lock);
12081 	qdf_spinlock_destroy(&soc->sysfs_config->rw_stats_lock);
12082 	qdf_spinlock_destroy(&soc->sysfs_config->sysfs_write_user_buffer);
12083 
12084 	qdf_mem_free(soc->sysfs_config);
12085 
12086 	return QDF_STATUS_SUCCESS;
12087 }
12088 
12089 #else /* WLAN_SYSFS_DP_STATS */
12090 
12091 static
12092 QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl)
12093 {
12094 	return QDF_STATUS_SUCCESS;
12095 }
12096 
12097 static
12098 QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl)
12099 {
12100 	return QDF_STATUS_SUCCESS;
12101 }
12102 #endif /* WLAN_SYSFS_DP_STATS */
12103 
12104 /**
12105  * dp_txrx_clear_dump_stats() - clear dumpStats
12106  * @soc- soc handle
12107  * @value - stats option
12108  *
12109  * Return: 0 - Success, non-zero - failure
12110  */
12111 static
12112 QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12113 				    uint8_t value)
12114 {
12115 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12116 	QDF_STATUS status = QDF_STATUS_SUCCESS;
12117 
12118 	if (!soc) {
12119 		dp_err("soc is NULL");
12120 		return QDF_STATUS_E_INVAL;
12121 	}
12122 
12123 	switch (value) {
12124 	case CDP_TXRX_TSO_STATS:
12125 		dp_txrx_clear_tso_stats(soc);
12126 		break;
12127 
12128 	case CDP_DP_TX_HW_LATENCY_STATS:
12129 		dp_pdev_clear_tx_delay_stats(soc);
12130 		break;
12131 
12132 	default:
12133 		status = QDF_STATUS_E_INVAL;
12134 		break;
12135 	}
12136 
12137 	return status;
12138 }
12139 
12140 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
12141 /**
12142  * dp_update_flow_control_parameters() - API to store datapath
12143  *                            config parameters
12144  * @soc: soc handle
12145  * @cfg: ini parameter handle
12146  *
12147  * Return: void
12148  */
12149 static inline
12150 void dp_update_flow_control_parameters(struct dp_soc *soc,
12151 				struct cdp_config_params *params)
12152 {
12153 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
12154 					params->tx_flow_stop_queue_threshold;
12155 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
12156 					params->tx_flow_start_queue_offset;
12157 }
12158 #else
12159 static inline
12160 void dp_update_flow_control_parameters(struct dp_soc *soc,
12161 				struct cdp_config_params *params)
12162 {
12163 }
12164 #endif
12165 
12166 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
12167 /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */
12168 #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024
12169 
12170 /* Max packet limit for RX REAP Loop (dp_rx_process) */
12171 #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024
12172 
12173 static
12174 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
12175 					struct cdp_config_params *params)
12176 {
12177 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit =
12178 				params->tx_comp_loop_pkt_limit;
12179 
12180 	if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX)
12181 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true;
12182 	else
12183 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false;
12184 
12185 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit =
12186 				params->rx_reap_loop_pkt_limit;
12187 
12188 	if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX)
12189 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = true;
12190 	else
12191 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = false;
12192 
12193 	soc->wlan_cfg_ctx->rx_hp_oos_update_limit =
12194 				params->rx_hp_oos_update_limit;
12195 
12196 	dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u",
12197 		soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit,
12198 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check,
12199 		soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit,
12200 		soc->wlan_cfg_ctx->rx_enable_eol_data_check,
12201 		soc->wlan_cfg_ctx->rx_hp_oos_update_limit);
12202 }
12203 
12204 static void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
12205 				      uint32_t rx_limit)
12206 {
12207 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit = tx_limit;
12208 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit = rx_limit;
12209 }
12210 
12211 #else
12212 static inline
12213 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
12214 					struct cdp_config_params *params)
12215 { }
12216 
12217 static inline
12218 void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
12219 			       uint32_t rx_limit)
12220 {
12221 }
12222 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
12223 
12224 /**
12225  * dp_update_config_parameters() - API to store datapath
12226  *                            config parameters
12227  * @soc: soc handle
12228  * @cfg: ini parameter handle
12229  *
12230  * Return: status
12231  */
12232 static
12233 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
12234 				struct cdp_config_params *params)
12235 {
12236 	struct dp_soc *soc = (struct dp_soc *)psoc;
12237 
12238 	if (!(soc)) {
12239 		dp_cdp_err("%pK: Invalid handle", soc);
12240 		return QDF_STATUS_E_INVAL;
12241 	}
12242 
12243 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
12244 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
12245 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
12246 	soc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload =
12247 				params->p2p_tcp_udp_checksumoffload;
12248 	soc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload =
12249 				params->nan_tcp_udp_checksumoffload;
12250 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
12251 				params->tcp_udp_checksumoffload;
12252 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
12253 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
12254 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
12255 
12256 	dp_update_rx_soft_irq_limit_params(soc, params);
12257 	dp_update_flow_control_parameters(soc, params);
12258 
12259 	return QDF_STATUS_SUCCESS;
12260 }
12261 
12262 static struct cdp_wds_ops dp_ops_wds = {
12263 	.vdev_set_wds = dp_vdev_set_wds,
12264 #ifdef WDS_VENDOR_EXTENSION
12265 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
12266 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
12267 #endif
12268 };
12269 
12270 /*
12271  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
12272  * @soc_hdl - datapath soc handle
12273  * @vdev_id - virtual interface id
12274  * @callback - callback function
12275  * @ctxt: callback context
12276  *
12277  */
12278 static void
12279 dp_txrx_data_tx_cb_set(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
12280 		       ol_txrx_data_tx_cb callback, void *ctxt)
12281 {
12282 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12283 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12284 						     DP_MOD_ID_CDP);
12285 
12286 	if (!vdev)
12287 		return;
12288 
12289 	vdev->tx_non_std_data_callback.func = callback;
12290 	vdev->tx_non_std_data_callback.ctxt = ctxt;
12291 
12292 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12293 }
12294 
12295 /**
12296  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
12297  * @soc: datapath soc handle
12298  * @pdev_id: id of datapath pdev handle
12299  *
12300  * Return: opaque pointer to dp txrx handle
12301  */
12302 static void *dp_pdev_get_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id)
12303 {
12304 	struct dp_pdev *pdev =
12305 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12306 						   pdev_id);
12307 	if (qdf_unlikely(!pdev))
12308 		return NULL;
12309 
12310 	return pdev->dp_txrx_handle;
12311 }
12312 
12313 /**
12314  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
12315  * @soc: datapath soc handle
12316  * @pdev_id: id of datapath pdev handle
12317  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
12318  *
12319  * Return: void
12320  */
12321 static void
12322 dp_pdev_set_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id,
12323 			   void *dp_txrx_hdl)
12324 {
12325 	struct dp_pdev *pdev =
12326 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12327 						   pdev_id);
12328 
12329 	if (!pdev)
12330 		return;
12331 
12332 	pdev->dp_txrx_handle = dp_txrx_hdl;
12333 }
12334 
12335 /**
12336  * dp_vdev_get_dp_ext_handle() - get dp handle from vdev
12337  * @soc: datapath soc handle
12338  * @vdev_id: vdev id
12339  *
12340  * Return: opaque pointer to dp txrx handle
12341  */
12342 static void *dp_vdev_get_dp_ext_handle(ol_txrx_soc_handle soc_hdl,
12343 				       uint8_t vdev_id)
12344 {
12345 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12346 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12347 						     DP_MOD_ID_CDP);
12348 	void *dp_ext_handle;
12349 
12350 	if (!vdev)
12351 		return NULL;
12352 	dp_ext_handle = vdev->vdev_dp_ext_handle;
12353 
12354 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12355 	return dp_ext_handle;
12356 }
12357 
12358 /**
12359  * dp_vdev_set_dp_ext_handle() - set dp handle in vdev
12360  * @soc: datapath soc handle
12361  * @vdev_id: vdev id
12362  * @size: size of advance dp handle
12363  *
12364  * Return: QDF_STATUS
12365  */
12366 static QDF_STATUS
12367 dp_vdev_set_dp_ext_handle(ol_txrx_soc_handle soc_hdl, uint8_t vdev_id,
12368 			  uint16_t size)
12369 {
12370 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12371 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12372 						     DP_MOD_ID_CDP);
12373 	void *dp_ext_handle;
12374 
12375 	if (!vdev)
12376 		return QDF_STATUS_E_FAILURE;
12377 
12378 	dp_ext_handle = qdf_mem_malloc(size);
12379 
12380 	if (!dp_ext_handle) {
12381 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12382 		return QDF_STATUS_E_FAILURE;
12383 	}
12384 
12385 	vdev->vdev_dp_ext_handle = dp_ext_handle;
12386 
12387 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12388 	return QDF_STATUS_SUCCESS;
12389 }
12390 
12391 /**
12392  * dp_vdev_inform_ll_conn() - Inform vdev to add/delete a latency critical
12393  *			      connection for this vdev
12394  * @soc_hdl: CDP soc handle
12395  * @vdev_id: vdev ID
12396  * @action: Add/Delete action
12397  *
12398  * Returns: QDF_STATUS.
12399  */
12400 static QDF_STATUS
12401 dp_vdev_inform_ll_conn(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
12402 		       enum vdev_ll_conn_actions action)
12403 {
12404 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12405 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12406 						     DP_MOD_ID_CDP);
12407 
12408 	if (!vdev) {
12409 		dp_err("LL connection action for invalid vdev %d", vdev_id);
12410 		return QDF_STATUS_E_FAILURE;
12411 	}
12412 
12413 	switch (action) {
12414 	case CDP_VDEV_LL_CONN_ADD:
12415 		vdev->num_latency_critical_conn++;
12416 		break;
12417 
12418 	case CDP_VDEV_LL_CONN_DEL:
12419 		vdev->num_latency_critical_conn--;
12420 		break;
12421 
12422 	default:
12423 		dp_err("LL connection action invalid %d", action);
12424 		break;
12425 	}
12426 
12427 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12428 	return QDF_STATUS_SUCCESS;
12429 }
12430 
12431 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
12432 /**
12433  * dp_soc_set_swlm_enable() - Enable/Disable SWLM if initialized.
12434  * @soc_hdl: CDP Soc handle
12435  * @value: Enable/Disable value
12436  *
12437  * Returns: QDF_STATUS
12438  */
12439 static QDF_STATUS dp_soc_set_swlm_enable(struct cdp_soc_t *soc_hdl,
12440 					 uint8_t value)
12441 {
12442 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12443 
12444 	if (!soc->swlm.is_init) {
12445 		dp_err("SWLM is not initialized");
12446 		return QDF_STATUS_E_FAILURE;
12447 	}
12448 
12449 	soc->swlm.is_enabled = !!value;
12450 
12451 	return QDF_STATUS_SUCCESS;
12452 }
12453 
12454 /**
12455  * dp_soc_is_swlm_enabled() - Check if SWLM is enabled.
12456  * @soc_hdl: CDP Soc handle
12457  *
12458  * Returns: QDF_STATUS
12459  */
12460 static uint8_t dp_soc_is_swlm_enabled(struct cdp_soc_t *soc_hdl)
12461 {
12462 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12463 
12464 	return soc->swlm.is_enabled;
12465 }
12466 #endif
12467 
12468 /**
12469  * dp_display_srng_info() - Dump the srng HP TP info
12470  * @soc_hdl: CDP Soc handle
12471  *
12472  * This function dumps the SW hp/tp values for the important rings.
12473  * HW hp/tp values are not being dumped, since it can lead to
12474  * READ NOC error when UMAC is in low power state. MCC does not have
12475  * device force wake working yet.
12476  *
12477  * Return: none
12478  */
12479 static void dp_display_srng_info(struct cdp_soc_t *soc_hdl)
12480 {
12481 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12482 	hal_soc_handle_t hal_soc = soc->hal_soc;
12483 	uint32_t hp, tp, i;
12484 
12485 	dp_info("SRNG HP-TP data:");
12486 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
12487 		hal_get_sw_hptp(hal_soc, soc->tcl_data_ring[i].hal_srng,
12488 				&tp, &hp);
12489 		dp_info("TCL DATA ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
12490 
12491 		if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, i) ==
12492 		    INVALID_WBM_RING_NUM)
12493 			continue;
12494 
12495 		hal_get_sw_hptp(hal_soc, soc->tx_comp_ring[i].hal_srng,
12496 				&tp, &hp);
12497 		dp_info("TX comp ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
12498 	}
12499 
12500 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
12501 		hal_get_sw_hptp(hal_soc, soc->reo_dest_ring[i].hal_srng,
12502 				&tp, &hp);
12503 		dp_info("REO DST ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
12504 	}
12505 
12506 	hal_get_sw_hptp(hal_soc, soc->reo_exception_ring.hal_srng, &tp, &hp);
12507 	dp_info("REO exception ring: hp=0x%x, tp=0x%x", hp, tp);
12508 
12509 	hal_get_sw_hptp(hal_soc, soc->rx_rel_ring.hal_srng, &tp, &hp);
12510 	dp_info("WBM RX release ring: hp=0x%x, tp=0x%x", hp, tp);
12511 
12512 	hal_get_sw_hptp(hal_soc, soc->wbm_desc_rel_ring.hal_srng, &tp, &hp);
12513 	dp_info("WBM desc release ring: hp=0x%x, tp=0x%x", hp, tp);
12514 }
12515 
12516 /**
12517  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
12518  * @soc_handle: datapath soc handle
12519  *
12520  * Return: opaque pointer to external dp (non-core DP)
12521  */
12522 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
12523 {
12524 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12525 
12526 	return soc->external_txrx_handle;
12527 }
12528 
12529 /**
12530  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
12531  * @soc_handle: datapath soc handle
12532  * @txrx_handle: opaque pointer to external dp (non-core DP)
12533  *
12534  * Return: void
12535  */
12536 static void
12537 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
12538 {
12539 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12540 
12541 	soc->external_txrx_handle = txrx_handle;
12542 }
12543 
12544 /**
12545  * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
12546  * @soc_hdl: datapath soc handle
12547  * @pdev_id: id of the datapath pdev handle
12548  * @lmac_id: lmac id
12549  *
12550  * Return: QDF_STATUS
12551  */
12552 static QDF_STATUS
12553 dp_soc_map_pdev_to_lmac
12554 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12555 	 uint32_t lmac_id)
12556 {
12557 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12558 
12559 	wlan_cfg_set_hw_mac_idx(soc->wlan_cfg_ctx,
12560 				pdev_id,
12561 				lmac_id);
12562 
12563 	/*Set host PDEV ID for lmac_id*/
12564 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
12565 			      pdev_id,
12566 			      lmac_id);
12567 
12568 	return QDF_STATUS_SUCCESS;
12569 }
12570 
12571 /**
12572  * dp_soc_handle_pdev_mode_change() - Update pdev to lmac mapping
12573  * @soc_hdl: datapath soc handle
12574  * @pdev_id: id of the datapath pdev handle
12575  * @lmac_id: lmac id
12576  *
12577  * In the event of a dynamic mode change, update the pdev to lmac mapping
12578  *
12579  * Return: QDF_STATUS
12580  */
12581 static QDF_STATUS
12582 dp_soc_handle_pdev_mode_change
12583 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12584 	 uint32_t lmac_id)
12585 {
12586 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12587 	struct dp_vdev *vdev = NULL;
12588 	uint8_t hw_pdev_id, mac_id;
12589 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc,
12590 								  pdev_id);
12591 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
12592 
12593 	if (qdf_unlikely(!pdev))
12594 		return QDF_STATUS_E_FAILURE;
12595 
12596 	pdev->lmac_id = lmac_id;
12597 	pdev->target_pdev_id =
12598 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
12599 	dp_info(" mode change %d %d\n", pdev->pdev_id, pdev->lmac_id);
12600 
12601 	/*Set host PDEV ID for lmac_id*/
12602 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
12603 			      pdev->pdev_id,
12604 			      lmac_id);
12605 
12606 	hw_pdev_id =
12607 		dp_get_target_pdev_id_for_host_pdev_id(soc,
12608 						       pdev->pdev_id);
12609 
12610 	/*
12611 	 * When NSS offload is enabled, send pdev_id->lmac_id
12612 	 * and pdev_id to hw_pdev_id to NSS FW
12613 	 */
12614 	if (nss_config) {
12615 		mac_id = pdev->lmac_id;
12616 		if (soc->cdp_soc.ol_ops->pdev_update_lmac_n_target_pdev_id)
12617 			soc->cdp_soc.ol_ops->
12618 				pdev_update_lmac_n_target_pdev_id(
12619 				soc->ctrl_psoc,
12620 				&pdev_id, &mac_id, &hw_pdev_id);
12621 	}
12622 
12623 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
12624 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
12625 		DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
12626 					       hw_pdev_id);
12627 		vdev->lmac_id = pdev->lmac_id;
12628 	}
12629 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
12630 
12631 	return QDF_STATUS_SUCCESS;
12632 }
12633 
12634 /**
12635  * dp_soc_set_pdev_status_down() - set pdev down/up status
12636  * @soc: datapath soc handle
12637  * @pdev_id: id of datapath pdev handle
12638  * @is_pdev_down: pdev down/up status
12639  *
12640  * Return: QDF_STATUS
12641  */
12642 static QDF_STATUS
12643 dp_soc_set_pdev_status_down(struct cdp_soc_t *soc, uint8_t pdev_id,
12644 			    bool is_pdev_down)
12645 {
12646 	struct dp_pdev *pdev =
12647 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12648 						   pdev_id);
12649 	if (!pdev)
12650 		return QDF_STATUS_E_FAILURE;
12651 
12652 	pdev->is_pdev_down = is_pdev_down;
12653 	return QDF_STATUS_SUCCESS;
12654 }
12655 
12656 /**
12657  * dp_get_cfg_capabilities() - get dp capabilities
12658  * @soc_handle: datapath soc handle
12659  * @dp_caps: enum for dp capabilities
12660  *
12661  * Return: bool to determine if dp caps is enabled
12662  */
12663 static bool
12664 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
12665 			enum cdp_capabilities dp_caps)
12666 {
12667 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12668 
12669 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
12670 }
12671 
12672 #ifdef FEATURE_AST
12673 static QDF_STATUS
12674 dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
12675 		       uint8_t *peer_mac)
12676 {
12677 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12678 	QDF_STATUS status = QDF_STATUS_SUCCESS;
12679 	struct dp_peer *peer =
12680 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
12681 					       DP_MOD_ID_CDP);
12682 
12683 	/* Peer can be null for monitor vap mac address */
12684 	if (!peer) {
12685 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
12686 			  "%s: Invalid peer\n", __func__);
12687 		return QDF_STATUS_E_FAILURE;
12688 	}
12689 
12690 	dp_peer_update_state(soc, peer, DP_PEER_STATE_LOGICAL_DELETE);
12691 
12692 	qdf_spin_lock_bh(&soc->ast_lock);
12693 	dp_peer_send_wds_disconnect(soc, peer);
12694 	dp_peer_delete_ast_entries(soc, peer);
12695 	qdf_spin_unlock_bh(&soc->ast_lock);
12696 
12697 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
12698 	return status;
12699 }
12700 #endif
12701 
12702 #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS
12703 /**
12704  * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
12705  * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
12706  * @soc: cdp_soc handle
12707  * @pdev_id: id of cdp_pdev handle
12708  * @protocol_type: protocol type for which stats should be displayed
12709  *
12710  * Return: none
12711  */
12712 static inline void
12713 dp_dump_pdev_rx_protocol_tag_stats(struct cdp_soc_t  *soc, uint8_t pdev_id,
12714 				   uint16_t protocol_type)
12715 {
12716 }
12717 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
12718 
12719 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
12720 /**
12721  * dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be
12722  * applied to the desired protocol type packets
12723  * @soc: soc handle
12724  * @pdev_id: id of cdp_pdev handle
12725  * @enable_rx_protocol_tag - bitmask that indicates what protocol types
12726  * are enabled for tagging. zero indicates disable feature, non-zero indicates
12727  * enable feature
12728  * @protocol_type: new protocol type for which the tag is being added
12729  * @tag: user configured tag for the new protocol
12730  *
12731  * Return: Success
12732  */
12733 static inline QDF_STATUS
12734 dp_update_pdev_rx_protocol_tag(struct cdp_soc_t  *soc, uint8_t pdev_id,
12735 			       uint32_t enable_rx_protocol_tag,
12736 			       uint16_t protocol_type,
12737 			       uint16_t tag)
12738 {
12739 	return QDF_STATUS_SUCCESS;
12740 }
12741 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
12742 
12743 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
12744 /**
12745  * dp_set_rx_flow_tag - add/delete a flow
12746  * @soc: soc handle
12747  * @pdev_id: id of cdp_pdev handle
12748  * @flow_info: flow tuple that is to be added to/deleted from flow search table
12749  *
12750  * Return: Success
12751  */
12752 static inline QDF_STATUS
12753 dp_set_rx_flow_tag(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
12754 		   struct cdp_rx_flow_info *flow_info)
12755 {
12756 	return QDF_STATUS_SUCCESS;
12757 }
12758 /**
12759  * dp_dump_rx_flow_tag_stats - dump the number of packets tagged for
12760  * given flow 5-tuple
12761  * @cdp_soc: soc handle
12762  * @pdev_id: id of cdp_pdev handle
12763  * @flow_info: flow 5-tuple for which stats should be displayed
12764  *
12765  * Return: Success
12766  */
12767 static inline QDF_STATUS
12768 dp_dump_rx_flow_tag_stats(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
12769 			  struct cdp_rx_flow_info *flow_info)
12770 {
12771 	return QDF_STATUS_SUCCESS;
12772 }
12773 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
12774 
12775 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
12776 					   uint32_t max_peers,
12777 					   uint32_t max_ast_index,
12778 					   uint8_t peer_map_unmap_versions)
12779 {
12780 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12781 	QDF_STATUS status;
12782 
12783 	soc->max_peers = max_peers;
12784 
12785 	wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
12786 
12787 	status = soc->arch_ops.txrx_peer_map_attach(soc);
12788 	if (!QDF_IS_STATUS_SUCCESS(status)) {
12789 		dp_err("failure in allocating peer tables");
12790 		return QDF_STATUS_E_FAILURE;
12791 	}
12792 
12793 	dp_info("max_peers %u, calculated max_peers %u max_ast_index: %u\n",
12794 		max_peers, soc->max_peer_id, max_ast_index);
12795 
12796 	status = dp_peer_find_attach(soc);
12797 	if (!QDF_IS_STATUS_SUCCESS(status)) {
12798 		dp_err("Peer find attach failure");
12799 		goto fail;
12800 	}
12801 
12802 	soc->peer_map_unmap_versions = peer_map_unmap_versions;
12803 	soc->peer_map_attach_success = TRUE;
12804 
12805 	return QDF_STATUS_SUCCESS;
12806 fail:
12807 	soc->arch_ops.txrx_peer_map_detach(soc);
12808 
12809 	return status;
12810 }
12811 
12812 static QDF_STATUS dp_soc_set_param(struct cdp_soc_t  *soc_hdl,
12813 				   enum cdp_soc_param_t param,
12814 				   uint32_t value)
12815 {
12816 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12817 
12818 	switch (param) {
12819 	case DP_SOC_PARAM_MSDU_EXCEPTION_DESC:
12820 		soc->num_msdu_exception_desc = value;
12821 		dp_info("num_msdu exception_desc %u",
12822 			value);
12823 		break;
12824 	case DP_SOC_PARAM_CMEM_FSE_SUPPORT:
12825 		if (wlan_cfg_is_fst_in_cmem_enabled(soc->wlan_cfg_ctx))
12826 			soc->fst_in_cmem = !!value;
12827 		dp_info("FW supports CMEM FSE %u", value);
12828 		break;
12829 	case DP_SOC_PARAM_MAX_AST_AGEOUT:
12830 		soc->max_ast_ageout_count = value;
12831 		dp_info("Max ast ageout count %u", soc->max_ast_ageout_count);
12832 		break;
12833 	case DP_SOC_PARAM_EAPOL_OVER_CONTROL_PORT:
12834 		soc->eapol_over_control_port = value;
12835 		dp_info("Eapol over control_port:%d",
12836 			soc->eapol_over_control_port);
12837 		break;
12838 	case DP_SOC_PARAM_MULTI_PEER_GRP_CMD_SUPPORT:
12839 		soc->multi_peer_grp_cmd_supported = value;
12840 		dp_info("Multi Peer group command support:%d",
12841 			soc->multi_peer_grp_cmd_supported);
12842 		break;
12843 	case DP_SOC_PARAM_RSSI_DBM_CONV_SUPPORT:
12844 		soc->features.rssi_dbm_conv_support = value;
12845 		dp_info("Rssi dbm conversion support:%u",
12846 			soc->features.rssi_dbm_conv_support);
12847 		break;
12848 	case DP_SOC_PARAM_UMAC_HW_RESET_SUPPORT:
12849 		soc->features.umac_hw_reset_support = value;
12850 		dp_info("UMAC HW reset support :%u",
12851 			soc->features.umac_hw_reset_support);
12852 		break;
12853 	default:
12854 		dp_info("not handled param %d ", param);
12855 		break;
12856 	}
12857 
12858 	return QDF_STATUS_SUCCESS;
12859 }
12860 
12861 static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
12862 				      void *stats_ctx)
12863 {
12864 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12865 
12866 	soc->rate_stats_ctx = (struct cdp_soc_rate_stats_ctx *)stats_ctx;
12867 }
12868 
12869 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
12870 /**
12871  * dp_peer_flush_rate_stats_req(): Flush peer rate stats
12872  * @soc: Datapath SOC handle
12873  * @peer: Datapath peer
12874  * @arg: argument to iter function
12875  *
12876  * Return: QDF_STATUS
12877  */
12878 static void
12879 dp_peer_flush_rate_stats_req(struct dp_soc *soc, struct dp_peer *peer,
12880 			     void *arg)
12881 {
12882 	if (peer->bss_peer)
12883 		return;
12884 
12885 	dp_wdi_event_handler(
12886 		WDI_EVENT_FLUSH_RATE_STATS_REQ,
12887 		soc, dp_monitor_peer_get_peerstats_ctx(soc, peer),
12888 		peer->peer_id,
12889 		WDI_NO_VAL, peer->vdev->pdev->pdev_id);
12890 }
12891 
12892 /**
12893  * dp_flush_rate_stats_req(): Flush peer rate stats in pdev
12894  * @soc_hdl: Datapath SOC handle
12895  * @pdev_id: pdev_id
12896  *
12897  * Return: QDF_STATUS
12898  */
12899 static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
12900 					  uint8_t pdev_id)
12901 {
12902 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12903 	struct dp_pdev *pdev =
12904 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12905 						   pdev_id);
12906 	if (!pdev)
12907 		return QDF_STATUS_E_FAILURE;
12908 
12909 	dp_pdev_iterate_peer(pdev, dp_peer_flush_rate_stats_req, NULL,
12910 			     DP_MOD_ID_CDP);
12911 
12912 	return QDF_STATUS_SUCCESS;
12913 }
12914 #else
12915 static inline QDF_STATUS
12916 dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
12917 			uint8_t pdev_id)
12918 {
12919 	return QDF_STATUS_SUCCESS;
12920 }
12921 #endif
12922 
12923 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
12924 #ifdef WLAN_FEATURE_11BE_MLO
12925 /**
12926  * dp_get_peer_extd_rate_link_stats(): function to get peer
12927  *				extended rate and link stats
12928  * @soc_hdl: dp soc handler
12929  * @mac_addr: mac address of peer
12930  *
12931  * Return: QDF_STATUS
12932  */
12933 static QDF_STATUS
12934 dp_get_peer_extd_rate_link_stats(struct cdp_soc_t *soc_hdl, uint8_t *mac_addr)
12935 {
12936 	uint8_t i;
12937 	struct dp_peer *link_peer;
12938 	struct dp_soc *link_peer_soc;
12939 	struct dp_mld_link_peers link_peers_info;
12940 	struct dp_peer *peer = NULL;
12941 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12942 	struct cdp_peer_info peer_info = { 0 };
12943 
12944 	if (!mac_addr) {
12945 		dp_err("NULL peer mac addr\n");
12946 		return QDF_STATUS_E_FAILURE;
12947 	}
12948 
12949 	DP_PEER_INFO_PARAMS_INIT(&peer_info, DP_VDEV_ALL, mac_addr, false,
12950 				 CDP_WILD_PEER_TYPE);
12951 
12952 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP);
12953 	if (!peer) {
12954 		dp_err("Invalid peer\n");
12955 		return QDF_STATUS_E_FAILURE;
12956 	}
12957 
12958 	if (IS_MLO_DP_MLD_PEER(peer)) {
12959 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
12960 						    &link_peers_info,
12961 						    DP_MOD_ID_CDP);
12962 		for (i = 0; i < link_peers_info.num_links; i++) {
12963 			link_peer = link_peers_info.link_peers[i];
12964 			link_peer_soc = link_peer->vdev->pdev->soc;
12965 			dp_wdi_event_handler(WDI_EVENT_FLUSH_RATE_STATS_REQ,
12966 					     link_peer_soc,
12967 					     dp_monitor_peer_get_peerstats_ctx
12968 					     (link_peer_soc, link_peer),
12969 					     link_peer->peer_id,
12970 					     WDI_NO_VAL,
12971 					     link_peer->vdev->pdev->pdev_id);
12972 		}
12973 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
12974 	} else {
12975 		dp_wdi_event_handler(
12976 				WDI_EVENT_FLUSH_RATE_STATS_REQ, soc,
12977 				dp_monitor_peer_get_peerstats_ctx(soc, peer),
12978 				peer->peer_id,
12979 				WDI_NO_VAL, peer->vdev->pdev->pdev_id);
12980 	}
12981 
12982 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
12983 	return QDF_STATUS_SUCCESS;
12984 }
12985 #else
12986 static QDF_STATUS
12987 dp_get_peer_extd_rate_link_stats(struct cdp_soc_t *soc_hdl, uint8_t *mac_addr)
12988 {
12989 	struct dp_peer *peer = NULL;
12990 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12991 
12992 	if (!mac_addr) {
12993 		dp_err("NULL peer mac addr\n");
12994 		return QDF_STATUS_E_FAILURE;
12995 	}
12996 
12997 	peer = dp_peer_find_hash_find(soc, mac_addr, 0,
12998 				      DP_VDEV_ALL, DP_MOD_ID_CDP);
12999 	if (!peer) {
13000 		dp_err("Invalid peer\n");
13001 		return QDF_STATUS_E_FAILURE;
13002 	}
13003 
13004 	dp_wdi_event_handler(
13005 			WDI_EVENT_FLUSH_RATE_STATS_REQ, soc,
13006 			dp_monitor_peer_get_peerstats_ctx(soc, peer),
13007 			peer->peer_id,
13008 			WDI_NO_VAL, peer->vdev->pdev->pdev_id);
13009 
13010 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
13011 	return QDF_STATUS_SUCCESS;
13012 }
13013 #endif
13014 #else
13015 static inline QDF_STATUS
13016 dp_get_peer_extd_rate_link_stats(struct cdp_soc_t *soc_hdl, uint8_t *mac_addr)
13017 {
13018 	return QDF_STATUS_SUCCESS;
13019 }
13020 #endif
13021 
13022 static void *dp_peer_get_peerstats_ctx(struct cdp_soc_t *soc_hdl,
13023 				       uint8_t vdev_id,
13024 				       uint8_t *mac_addr)
13025 {
13026 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13027 	struct dp_peer *peer;
13028 	void *peerstats_ctx = NULL;
13029 
13030 	if (mac_addr) {
13031 		peer = dp_peer_find_hash_find(soc, mac_addr,
13032 					      0, vdev_id,
13033 					      DP_MOD_ID_CDP);
13034 		if (!peer)
13035 			return NULL;
13036 
13037 		if (!IS_MLO_DP_MLD_PEER(peer))
13038 			peerstats_ctx = dp_monitor_peer_get_peerstats_ctx(soc,
13039 									  peer);
13040 
13041 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
13042 	}
13043 
13044 	return peerstats_ctx;
13045 }
13046 
13047 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
13048 static QDF_STATUS dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
13049 					   uint8_t pdev_id,
13050 					   void *buf)
13051 {
13052 	 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS,
13053 			      (struct dp_soc *)soc, buf, HTT_INVALID_PEER,
13054 			      WDI_NO_VAL, pdev_id);
13055 	return QDF_STATUS_SUCCESS;
13056 }
13057 #else
13058 static inline QDF_STATUS
13059 dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
13060 			 uint8_t pdev_id,
13061 			 void *buf)
13062 {
13063 	return QDF_STATUS_SUCCESS;
13064 }
13065 #endif
13066 
13067 static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle)
13068 {
13069 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13070 
13071 	return soc->rate_stats_ctx;
13072 }
13073 
13074 /*
13075  * dp_get_cfg() - get dp cfg
13076  * @soc: cdp soc handle
13077  * @cfg: cfg enum
13078  *
13079  * Return: cfg value
13080  */
13081 static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg)
13082 {
13083 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
13084 	uint32_t value = 0;
13085 
13086 	switch (cfg) {
13087 	case cfg_dp_enable_data_stall:
13088 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
13089 		break;
13090 	case cfg_dp_enable_p2p_ip_tcp_udp_checksum_offload:
13091 		value = dpsoc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload;
13092 		break;
13093 	case cfg_dp_enable_nan_ip_tcp_udp_checksum_offload:
13094 		value = dpsoc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload;
13095 		break;
13096 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
13097 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
13098 		break;
13099 	case cfg_dp_disable_legacy_mode_csum_offload:
13100 		value = dpsoc->wlan_cfg_ctx->
13101 					legacy_mode_checksumoffload_disable;
13102 		break;
13103 	case cfg_dp_tso_enable:
13104 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
13105 		break;
13106 	case cfg_dp_lro_enable:
13107 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
13108 		break;
13109 	case cfg_dp_gro_enable:
13110 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
13111 		break;
13112 	case cfg_dp_tc_based_dyn_gro_enable:
13113 		value = dpsoc->wlan_cfg_ctx->tc_based_dynamic_gro;
13114 		break;
13115 	case cfg_dp_tc_ingress_prio:
13116 		value = dpsoc->wlan_cfg_ctx->tc_ingress_prio;
13117 		break;
13118 	case cfg_dp_sg_enable:
13119 		value = dpsoc->wlan_cfg_ctx->sg_enabled;
13120 		break;
13121 	case cfg_dp_tx_flow_start_queue_offset:
13122 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
13123 		break;
13124 	case cfg_dp_tx_flow_stop_queue_threshold:
13125 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
13126 		break;
13127 	case cfg_dp_disable_intra_bss_fwd:
13128 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
13129 		break;
13130 	case cfg_dp_pktlog_buffer_size:
13131 		value = dpsoc->wlan_cfg_ctx->pktlog_buffer_size;
13132 		break;
13133 	case cfg_dp_wow_check_rx_pending:
13134 		value = dpsoc->wlan_cfg_ctx->wow_check_rx_pending_enable;
13135 		break;
13136 	default:
13137 		value =  0;
13138 	}
13139 
13140 	return value;
13141 }
13142 
13143 #ifdef PEER_FLOW_CONTROL
13144 /**
13145  * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params
13146  * @soc_handle: datapath soc handle
13147  * @pdev_id: id of datapath pdev handle
13148  * @param: ol ath params
13149  * @value: value of the flag
13150  * @buff: Buffer to be passed
13151  *
13152  * Implemented this function same as legacy function. In legacy code, single
13153  * function is used to display stats and update pdev params.
13154  *
13155  * Return: 0 for success. nonzero for failure.
13156  */
13157 static uint32_t dp_tx_flow_ctrl_configure_pdev(struct cdp_soc_t *soc_handle,
13158 					       uint8_t pdev_id,
13159 					       enum _dp_param_t param,
13160 					       uint32_t value, void *buff)
13161 {
13162 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13163 	struct dp_pdev *pdev =
13164 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
13165 						   pdev_id);
13166 
13167 	if (qdf_unlikely(!pdev))
13168 		return 1;
13169 
13170 	soc = pdev->soc;
13171 	if (!soc)
13172 		return 1;
13173 
13174 	switch (param) {
13175 #ifdef QCA_ENH_V3_STATS_SUPPORT
13176 	case DP_PARAM_VIDEO_DELAY_STATS_FC:
13177 		if (value)
13178 			pdev->delay_stats_flag = true;
13179 		else
13180 			pdev->delay_stats_flag = false;
13181 		break;
13182 	case DP_PARAM_VIDEO_STATS_FC:
13183 		qdf_print("------- TID Stats ------\n");
13184 		dp_pdev_print_tid_stats(pdev);
13185 		qdf_print("------ Delay Stats ------\n");
13186 		dp_pdev_print_delay_stats(pdev);
13187 		qdf_print("------ Rx Error Stats ------\n");
13188 		dp_pdev_print_rx_error_stats(pdev);
13189 		break;
13190 #endif
13191 	case DP_PARAM_TOTAL_Q_SIZE:
13192 		{
13193 			uint32_t tx_min, tx_max;
13194 
13195 			tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx);
13196 			tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
13197 
13198 			if (!buff) {
13199 				if ((value >= tx_min) && (value <= tx_max)) {
13200 					pdev->num_tx_allowed = value;
13201 				} else {
13202 					dp_tx_info("%pK: Failed to update num_tx_allowed, Q_min = %d Q_max = %d",
13203 						   soc, tx_min, tx_max);
13204 					break;
13205 				}
13206 			} else {
13207 				*(int *)buff = pdev->num_tx_allowed;
13208 			}
13209 		}
13210 		break;
13211 	default:
13212 		dp_tx_info("%pK: not handled param %d ", soc, param);
13213 		break;
13214 	}
13215 
13216 	return 0;
13217 }
13218 #endif
13219 
13220 /**
13221  * dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev
13222  * @psoc: dp soc handle
13223  * @pdev_id: id of DP_PDEV handle
13224  * @pcp: pcp value
13225  * @tid: tid value passed by the user
13226  *
13227  * Return: QDF_STATUS_SUCCESS on success
13228  */
13229 static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
13230 						uint8_t pdev_id,
13231 						uint8_t pcp, uint8_t tid)
13232 {
13233 	struct dp_soc *soc = (struct dp_soc *)psoc;
13234 
13235 	soc->pcp_tid_map[pcp] = tid;
13236 
13237 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
13238 	return QDF_STATUS_SUCCESS;
13239 }
13240 
13241 /**
13242  * dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev
13243  * @soc: DP soc handle
13244  * @vdev_id: id of DP_VDEV handle
13245  * @pcp: pcp value
13246  * @tid: tid value passed by the user
13247  *
13248  * Return: QDF_STATUS_SUCCESS on success
13249  */
13250 static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc_hdl,
13251 						uint8_t vdev_id,
13252 						uint8_t pcp, uint8_t tid)
13253 {
13254 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13255 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
13256 						     DP_MOD_ID_CDP);
13257 
13258 	if (!vdev)
13259 		return QDF_STATUS_E_FAILURE;
13260 
13261 	vdev->pcp_tid_map[pcp] = tid;
13262 
13263 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13264 	return QDF_STATUS_SUCCESS;
13265 }
13266 
13267 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
13268 static void dp_drain_txrx(struct cdp_soc_t *soc_handle)
13269 {
13270 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13271 	uint32_t cur_tx_limit, cur_rx_limit;
13272 	uint32_t budget = 0xffff;
13273 	uint32_t val;
13274 	int i;
13275 	int cpu = dp_srng_get_cpu();
13276 
13277 	cur_tx_limit = soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit;
13278 	cur_rx_limit = soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit;
13279 
13280 	/* Temporarily increase soft irq limits when going to drain
13281 	 * the UMAC/LMAC SRNGs and restore them after polling.
13282 	 * Though the budget is on higher side, the TX/RX reaping loops
13283 	 * will not execute longer as both TX and RX would be suspended
13284 	 * by the time this API is called.
13285 	 */
13286 	dp_update_soft_irq_limits(soc, budget, budget);
13287 
13288 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
13289 		dp_service_srngs(&soc->intr_ctx[i], budget, cpu);
13290 
13291 	dp_update_soft_irq_limits(soc, cur_tx_limit, cur_rx_limit);
13292 
13293 	/* Do a dummy read at offset 0; this will ensure all
13294 	 * pendings writes(HP/TP) are flushed before read returns.
13295 	 */
13296 	val = HAL_REG_READ((struct hal_soc *)soc->hal_soc, 0);
13297 	dp_debug("Register value at offset 0: %u\n", val);
13298 }
13299 #endif
13300 
13301 #ifdef DP_UMAC_HW_RESET_SUPPORT
13302 /**
13303  * dp_reset_interrupt_ring_masks(): Reset rx interrupt masks
13304  * @soc: dp soc handle
13305  *
13306  * Return: void
13307  */
13308 static void dp_reset_interrupt_ring_masks(struct dp_soc *soc)
13309 {
13310 	struct dp_intr_bkp *intr_bkp;
13311 	struct dp_intr *intr_ctx;
13312 	int num_ctxt = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
13313 	int i;
13314 
13315 	intr_bkp =
13316 	(struct dp_intr_bkp *)qdf_mem_malloc_atomic(sizeof(struct dp_intr_bkp) *
13317 			num_ctxt);
13318 
13319 	qdf_assert_always(intr_bkp);
13320 
13321 	soc->umac_reset_ctx.intr_ctx_bkp = intr_bkp;
13322 	for (i = 0; i < num_ctxt; i++) {
13323 		intr_ctx = &soc->intr_ctx[i];
13324 
13325 		intr_bkp->tx_ring_mask = intr_ctx->tx_ring_mask;
13326 		intr_bkp->rx_ring_mask = intr_ctx->rx_ring_mask;
13327 		intr_bkp->rx_mon_ring_mask = intr_ctx->rx_mon_ring_mask;
13328 		intr_bkp->rx_err_ring_mask = intr_ctx->rx_err_ring_mask;
13329 		intr_bkp->rx_wbm_rel_ring_mask = intr_ctx->rx_wbm_rel_ring_mask;
13330 		intr_bkp->reo_status_ring_mask = intr_ctx->reo_status_ring_mask;
13331 		intr_bkp->rxdma2host_ring_mask = intr_ctx->rxdma2host_ring_mask;
13332 		intr_bkp->host2rxdma_ring_mask = intr_ctx->host2rxdma_ring_mask;
13333 		intr_bkp->host2rxdma_mon_ring_mask =
13334 					intr_ctx->host2rxdma_mon_ring_mask;
13335 		intr_bkp->tx_mon_ring_mask = intr_ctx->tx_mon_ring_mask;
13336 
13337 		intr_ctx->tx_ring_mask = 0;
13338 		intr_ctx->rx_ring_mask = 0;
13339 		intr_ctx->rx_mon_ring_mask = 0;
13340 		intr_ctx->rx_err_ring_mask = 0;
13341 		intr_ctx->rx_wbm_rel_ring_mask = 0;
13342 		intr_ctx->reo_status_ring_mask = 0;
13343 		intr_ctx->rxdma2host_ring_mask = 0;
13344 		intr_ctx->host2rxdma_ring_mask = 0;
13345 		intr_ctx->host2rxdma_mon_ring_mask = 0;
13346 		intr_ctx->tx_mon_ring_mask = 0;
13347 
13348 		intr_bkp++;
13349 	}
13350 }
13351 
13352 /**
13353  * dp_restore_interrupt_ring_masks(): Restore rx interrupt masks
13354  * @soc: dp soc handle
13355  *
13356  * Return: void
13357  */
13358 static void dp_restore_interrupt_ring_masks(struct dp_soc *soc)
13359 {
13360 	struct dp_intr_bkp *intr_bkp = soc->umac_reset_ctx.intr_ctx_bkp;
13361 	struct dp_intr_bkp *intr_bkp_base = intr_bkp;
13362 	struct dp_intr *intr_ctx;
13363 	int num_ctxt = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
13364 	int i;
13365 
13366 	qdf_assert_always(intr_bkp);
13367 
13368 	for (i = 0; i < num_ctxt; i++) {
13369 		intr_ctx = &soc->intr_ctx[i];
13370 
13371 		intr_ctx->tx_ring_mask = intr_bkp->tx_ring_mask;
13372 		intr_ctx->rx_ring_mask = intr_bkp->rx_ring_mask;
13373 		intr_ctx->rx_mon_ring_mask = intr_bkp->rx_mon_ring_mask;
13374 		intr_ctx->rx_err_ring_mask = intr_bkp->rx_err_ring_mask;
13375 		intr_ctx->rx_wbm_rel_ring_mask = intr_bkp->rx_wbm_rel_ring_mask;
13376 		intr_ctx->reo_status_ring_mask = intr_bkp->reo_status_ring_mask;
13377 		intr_ctx->rxdma2host_ring_mask = intr_bkp->rxdma2host_ring_mask;
13378 		intr_ctx->host2rxdma_ring_mask = intr_bkp->host2rxdma_ring_mask;
13379 		intr_ctx->host2rxdma_mon_ring_mask =
13380 			intr_bkp->host2rxdma_mon_ring_mask;
13381 		intr_ctx->tx_mon_ring_mask = intr_bkp->tx_mon_ring_mask;
13382 
13383 		intr_bkp++;
13384 	}
13385 
13386 	qdf_mem_free(intr_bkp_base);
13387 	soc->umac_reset_ctx.intr_ctx_bkp = NULL;
13388 }
13389 
13390 /**
13391  * dp_resume_tx_hardstart(): Restore the old Tx hardstart functions
13392  * @soc: dp soc handle
13393  *
13394  * Return: void
13395  */
13396 static void dp_resume_tx_hardstart(struct dp_soc *soc)
13397 {
13398 	struct dp_vdev *vdev;
13399 	struct ol_txrx_hardtart_ctxt ctxt = {0};
13400 	struct cdp_ctrl_objmgr_psoc *psoc = soc->ctrl_psoc;
13401 	int i;
13402 
13403 	for (i = 0; i < MAX_PDEV_CNT; i++) {
13404 		struct dp_pdev *pdev = soc->pdev_list[i];
13405 
13406 		if (!pdev)
13407 			continue;
13408 
13409 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
13410 			uint8_t vdev_id = vdev->vdev_id;
13411 
13412 			dp_vdev_fetch_tx_handler(vdev, soc, &ctxt);
13413 			soc->cdp_soc.ol_ops->dp_update_tx_hardstart(psoc,
13414 								    vdev_id,
13415 								    &ctxt);
13416 		}
13417 	}
13418 }
13419 
13420 /**
13421  * dp_pause_tx_hardstart(): Register Tx hardstart functions to drop packets
13422  * @soc: dp soc handle
13423  *
13424  * Return: void
13425  */
13426 static void dp_pause_tx_hardstart(struct dp_soc *soc)
13427 {
13428 	struct dp_vdev *vdev;
13429 	struct ol_txrx_hardtart_ctxt ctxt;
13430 	struct cdp_ctrl_objmgr_psoc *psoc = soc->ctrl_psoc;
13431 	int i;
13432 
13433 	ctxt.tx = &dp_tx_drop;
13434 	ctxt.tx_fast = &dp_tx_drop;
13435 	ctxt.tx_exception = &dp_tx_exc_drop;
13436 
13437 	for (i = 0; i < MAX_PDEV_CNT; i++) {
13438 		struct dp_pdev *pdev = soc->pdev_list[i];
13439 
13440 		if (!pdev)
13441 			continue;
13442 
13443 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
13444 			uint8_t vdev_id = vdev->vdev_id;
13445 
13446 			soc->cdp_soc.ol_ops->dp_update_tx_hardstart(psoc,
13447 								    vdev_id,
13448 								    &ctxt);
13449 		}
13450 	}
13451 }
13452 
13453 /**
13454  * dp_unregister_notify_umac_pre_reset_fw_callback(): unregister notify_fw_cb
13455  * @soc: dp soc handle
13456  *
13457  * Return: void
13458  */
13459 static inline
13460 void dp_unregister_notify_umac_pre_reset_fw_callback(struct dp_soc *soc)
13461 {
13462 	soc->notify_fw_callback = NULL;
13463 }
13464 
13465 /**
13466  * dp_check_n_notify_umac_prereset_done(): Send pre reset done to firmware
13467  * @soc: dp soc handle
13468  *
13469  * Return: void
13470  */
13471 static inline
13472 void dp_check_n_notify_umac_prereset_done(struct dp_soc *soc)
13473 {
13474 	/* Some Cpu(s) is processing the umac rings*/
13475 	if (soc->service_rings_running)
13476 		return;
13477 
13478 	/* Notify the firmware that Umac pre reset is complete */
13479 	dp_umac_reset_notify_action_completion(soc,
13480 					       UMAC_RESET_ACTION_DO_PRE_RESET);
13481 
13482 	/* Unregister the callback */
13483 	dp_unregister_notify_umac_pre_reset_fw_callback(soc);
13484 }
13485 
13486 /**
13487  * dp_register_notify_umac_pre_reset_fw_callback(): register notify_fw_cb
13488  * @soc: dp soc handle
13489  *
13490  * Return: void
13491  */
13492 static inline
13493 void dp_register_notify_umac_pre_reset_fw_callback(struct dp_soc *soc)
13494 {
13495 	soc->notify_fw_callback = dp_check_n_notify_umac_prereset_done;
13496 }
13497 
13498 #ifdef DP_UMAC_HW_HARD_RESET
13499 /**
13500  * dp_set_umac_regs(): Reinitialize host umac registers
13501  * @soc: dp soc handle
13502  *
13503  * Return: void
13504  */
13505 static void dp_set_umac_regs(struct dp_soc *soc)
13506 {
13507 	int i;
13508 	struct hal_reo_params reo_params;
13509 
13510 	qdf_mem_zero(&reo_params, sizeof(reo_params));
13511 
13512 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
13513 		if (soc->arch_ops.reo_remap_config(soc, &reo_params.remap0,
13514 						   &reo_params.remap1,
13515 						   &reo_params.remap2))
13516 			reo_params.rx_hash_enabled = true;
13517 		else
13518 			reo_params.rx_hash_enabled = false;
13519 	}
13520 
13521 	hal_reo_setup(soc->hal_soc, &reo_params, 0);
13522 
13523 	soc->arch_ops.dp_cc_reg_cfg_init(soc, true);
13524 
13525 	for (i = 0; i < PCP_TID_MAP_MAX; i++)
13526 		hal_tx_update_pcp_tid_map(soc->hal_soc, soc->pcp_tid_map[i], i);
13527 
13528 	for (i = 0; i < MAX_PDEV_CNT; i++) {
13529 		struct dp_vdev *vdev = NULL;
13530 		struct dp_pdev *pdev = soc->pdev_list[i];
13531 
13532 		if (!pdev)
13533 			continue;
13534 
13535 		for (i = 0; i < soc->num_hw_dscp_tid_map; i++)
13536 			hal_tx_set_dscp_tid_map(soc->hal_soc,
13537 						pdev->dscp_tid_map[i], i);
13538 
13539 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
13540 			soc->arch_ops.dp_bank_reconfig(soc, vdev);
13541 			soc->arch_ops.dp_reconfig_tx_vdev_mcast_ctrl(soc,
13542 								      vdev);
13543 		}
13544 	}
13545 }
13546 #else
13547 static void dp_set_umac_regs(struct dp_soc *soc)
13548 {
13549 }
13550 #endif
13551 
13552 /**
13553  * dp_reinit_rings(): Reinitialize host managed rings
13554  * @soc: dp soc handle
13555  *
13556  * Return: QDF_STATUS
13557  */
13558 static void dp_reinit_rings(struct dp_soc *soc)
13559 {
13560 	unsigned long end;
13561 
13562 	dp_soc_srng_deinit(soc);
13563 	dp_hw_link_desc_ring_deinit(soc);
13564 
13565 	/* Busy wait for 2 ms to make sure the rings are in idle state
13566 	 * before we enable them again
13567 	 */
13568 	end = jiffies + msecs_to_jiffies(2);
13569 	while (time_before(jiffies, end))
13570 		;
13571 
13572 	dp_hw_link_desc_ring_init(soc);
13573 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
13574 	dp_soc_srng_init(soc);
13575 }
13576 
13577 /**
13578  * dp_umac_reset_handle_pre_reset(): Handle Umac prereset interrupt from FW
13579  * @soc: dp soc handle
13580  *
13581  * Return: QDF_STATUS
13582  */
13583 static QDF_STATUS dp_umac_reset_handle_pre_reset(struct dp_soc *soc)
13584 {
13585 	dp_reset_interrupt_ring_masks(soc);
13586 
13587 	dp_pause_tx_hardstart(soc);
13588 	dp_pause_reo_send_cmd(soc);
13589 
13590 	dp_check_n_notify_umac_prereset_done(soc);
13591 
13592 	soc->umac_reset_ctx.nbuf_list = NULL;
13593 
13594 	return QDF_STATUS_SUCCESS;
13595 }
13596 
13597 /**
13598  * dp_umac_reset_handle_post_reset(): Handle Umac postreset interrupt from FW
13599  * @soc: dp soc handle
13600  *
13601  * Return: QDF_STATUS
13602  */
13603 static QDF_STATUS dp_umac_reset_handle_post_reset(struct dp_soc *soc)
13604 {
13605 	if (!soc->umac_reset_ctx.skel_enable) {
13606 		qdf_nbuf_t *nbuf_list = &soc->umac_reset_ctx.nbuf_list;
13607 
13608 		dp_set_umac_regs(soc);
13609 
13610 		dp_reinit_rings(soc);
13611 
13612 		dp_rx_desc_reuse(soc, nbuf_list);
13613 
13614 		dp_cleanup_reo_cmd_module(soc);
13615 
13616 		dp_tx_desc_pool_cleanup(soc, nbuf_list);
13617 
13618 		dp_reset_tid_q_setup(soc);
13619 	}
13620 
13621 	return dp_umac_reset_notify_action_completion(soc,
13622 					UMAC_RESET_ACTION_DO_POST_RESET_START);
13623 }
13624 
13625 /**
13626  * dp_umac_reset_handle_post_reset_complete(): Handle Umac postreset_complete
13627  *						interrupt from FW
13628  * @soc: dp soc handle
13629  *
13630  * Return: QDF_STATUS
13631  */
13632 static QDF_STATUS dp_umac_reset_handle_post_reset_complete(struct dp_soc *soc)
13633 {
13634 	QDF_STATUS status;
13635 	qdf_nbuf_t nbuf_list = soc->umac_reset_ctx.nbuf_list;
13636 
13637 	soc->umac_reset_ctx.nbuf_list = NULL;
13638 
13639 	dp_resume_reo_send_cmd(soc);
13640 
13641 	dp_restore_interrupt_ring_masks(soc);
13642 
13643 	dp_resume_tx_hardstart(soc);
13644 
13645 	status = dp_umac_reset_notify_action_completion(soc,
13646 				UMAC_RESET_ACTION_DO_POST_RESET_COMPLETE);
13647 
13648 	while (nbuf_list) {
13649 		qdf_nbuf_t nbuf = nbuf_list->next;
13650 
13651 		qdf_nbuf_free(nbuf_list);
13652 		nbuf_list = nbuf;
13653 	}
13654 
13655 	dp_umac_reset_info("Umac reset done on soc %pK\n prereset : %u us\n"
13656 			   "postreset : %u us \n postreset complete: %u us \n",
13657 			   soc,
13658 			   soc->umac_reset_ctx.ts.pre_reset_done -
13659 			   soc->umac_reset_ctx.ts.pre_reset_start,
13660 			   soc->umac_reset_ctx.ts.post_reset_done -
13661 			   soc->umac_reset_ctx.ts.post_reset_start,
13662 			   soc->umac_reset_ctx.ts.post_reset_complete_done -
13663 			   soc->umac_reset_ctx.ts.post_reset_complete_start);
13664 
13665 	return status;
13666 }
13667 #endif
13668 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
13669 static void
13670 dp_set_pkt_capture_mode(struct cdp_soc_t *soc_handle, bool val)
13671 {
13672 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13673 
13674 	soc->wlan_cfg_ctx->pkt_capture_mode = val;
13675 }
13676 #endif
13677 
13678 #ifdef HW_TX_DELAY_STATS_ENABLE
13679 /**
13680  * dp_enable_disable_vdev_tx_delay_stats(): Start/Stop tx delay stats capture
13681  * @soc: DP soc handle
13682  * @vdev_id: vdev id
13683  * @value: value
13684  *
13685  * Return: None
13686  */
13687 static void
13688 dp_enable_disable_vdev_tx_delay_stats(struct cdp_soc_t *soc_hdl,
13689 				      uint8_t vdev_id,
13690 				      uint8_t value)
13691 {
13692 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13693 	struct dp_vdev *vdev = NULL;
13694 
13695 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
13696 	if (!vdev)
13697 		return;
13698 
13699 	vdev->hw_tx_delay_stats_enabled = value;
13700 
13701 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13702 }
13703 
13704 /**
13705  * dp_check_vdev_tx_delay_stats_enabled() - check the feature is enabled or not
13706  * @soc: DP soc handle
13707  * @vdev_id: vdev id
13708  *
13709  * Returns: 1 if enabled, 0 if disabled
13710  */
13711 static uint8_t
13712 dp_check_vdev_tx_delay_stats_enabled(struct cdp_soc_t *soc_hdl,
13713 				     uint8_t vdev_id)
13714 {
13715 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13716 	struct dp_vdev *vdev;
13717 	uint8_t ret_val = 0;
13718 
13719 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
13720 	if (!vdev)
13721 		return ret_val;
13722 
13723 	ret_val = vdev->hw_tx_delay_stats_enabled;
13724 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13725 
13726 	return ret_val;
13727 }
13728 #endif
13729 
13730 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
13731 static void
13732 dp_recovery_vdev_flush_peers(struct cdp_soc_t *cdp_soc,
13733 			     uint8_t vdev_id,
13734 			     bool mlo_peers_only)
13735 {
13736 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
13737 	struct dp_vdev *vdev;
13738 
13739 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
13740 
13741 	if (!vdev)
13742 		return;
13743 
13744 	dp_vdev_flush_peers((struct cdp_vdev *)vdev, false, mlo_peers_only);
13745 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13746 }
13747 #endif
13748 #ifdef QCA_GET_TSF_VIA_REG
13749 /**
13750  * dp_get_tsf_time() - get tsf time
13751  * @soc: Datapath soc handle
13752  * @mac_id: mac_id
13753  * @tsf: pointer to update tsf value
13754  * @tsf_sync_soc_time: pointer to update tsf sync time
13755  *
13756  * Return: None.
13757  */
13758 static inline void
13759 dp_get_tsf_time(struct cdp_soc_t *soc_hdl, uint32_t tsf_id, uint32_t mac_id,
13760 		uint64_t *tsf, uint64_t *tsf_sync_soc_time)
13761 {
13762 	hal_get_tsf_time(((struct dp_soc *)soc_hdl)->hal_soc, tsf_id, mac_id,
13763 			 tsf, tsf_sync_soc_time);
13764 }
13765 #else
13766 static inline void
13767 dp_get_tsf_time(struct cdp_soc_t *soc_hdl, uint32_t tsf_id, uint32_t mac_id,
13768 		uint64_t *tsf, uint64_t *tsf_sync_soc_time)
13769 {
13770 }
13771 #endif
13772 
13773 /**
13774  * dp_set_tx_pause() - Pause or resume tx path
13775  * @soc_hdl: Datapath soc handle
13776  * @flag: set or clear is_tx_pause
13777  *
13778  * Return: None.
13779  */
13780 static inline
13781 void dp_set_tx_pause(struct cdp_soc_t *soc_hdl, bool flag)
13782 {
13783 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13784 
13785 	soc->is_tx_pause = flag;
13786 }
13787 
13788 static struct cdp_cmn_ops dp_ops_cmn = {
13789 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
13790 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
13791 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
13792 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
13793 	.txrx_pdev_post_attach = dp_pdev_post_attach_wifi3,
13794 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
13795 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
13796 	.txrx_peer_create = dp_peer_create_wifi3,
13797 	.txrx_peer_setup = dp_peer_setup_wifi3,
13798 #ifdef FEATURE_AST
13799 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
13800 #else
13801 	.txrx_peer_teardown = NULL,
13802 #endif
13803 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
13804 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
13805 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
13806 	.txrx_peer_get_ast_info_by_pdev =
13807 		dp_peer_get_ast_info_by_pdevid_wifi3,
13808 	.txrx_peer_ast_delete_by_soc =
13809 		dp_peer_ast_entry_del_by_soc,
13810 	.txrx_peer_ast_delete_by_pdev =
13811 		dp_peer_ast_entry_del_by_pdev,
13812 	.txrx_peer_delete = dp_peer_delete_wifi3,
13813 #ifdef DP_RX_UDP_OVER_PEER_ROAM
13814 	.txrx_update_roaming_peer = dp_update_roaming_peer_wifi3,
13815 #endif
13816 	.txrx_vdev_register = dp_vdev_register_wifi3,
13817 	.txrx_soc_detach = dp_soc_detach_wifi3,
13818 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
13819 	.txrx_soc_init = dp_soc_init_wifi3,
13820 #ifndef QCA_HOST_MODE_WIFI_DISABLED
13821 	.txrx_tso_soc_attach = dp_tso_soc_attach,
13822 	.txrx_tso_soc_detach = dp_tso_soc_detach,
13823 	.tx_send = dp_tx_send,
13824 	.tx_send_exc = dp_tx_send_exception,
13825 #endif
13826 	.set_tx_pause = dp_set_tx_pause,
13827 	.txrx_pdev_init = dp_pdev_init_wifi3,
13828 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
13829 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
13830 	.txrx_ath_getstats = dp_get_device_stats,
13831 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
13832 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
13833 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
13834 	.delba_process = dp_delba_process_wifi3,
13835 	.set_addba_response = dp_set_addba_response,
13836 	.flush_cache_rx_queue = NULL,
13837 	.tid_update_ba_win_size = dp_rx_tid_update_ba_win_size,
13838 	/* TODO: get API's for dscp-tid need to be added*/
13839 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
13840 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
13841 	.txrx_get_total_per = dp_get_total_per,
13842 	.txrx_stats_request = dp_txrx_stats_request,
13843 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
13844 	.display_stats = dp_txrx_dump_stats,
13845 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
13846 	.txrx_intr_detach = dp_soc_interrupt_detach,
13847 	.set_pn_check = dp_set_pn_check_wifi3,
13848 	.set_key_sec_type = dp_set_key_sec_type_wifi3,
13849 	.update_config_parameters = dp_update_config_parameters,
13850 	/* TODO: Add other functions */
13851 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
13852 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
13853 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
13854 	.get_vdev_dp_ext_txrx_handle = dp_vdev_get_dp_ext_handle,
13855 	.set_vdev_dp_ext_txrx_handle = dp_vdev_set_dp_ext_handle,
13856 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
13857 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
13858 	.map_pdev_to_lmac = dp_soc_map_pdev_to_lmac,
13859 	.handle_mode_change = dp_soc_handle_pdev_mode_change,
13860 	.set_pdev_status_down = dp_soc_set_pdev_status_down,
13861 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
13862 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
13863 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
13864 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
13865 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
13866 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
13867 	.set_soc_param = dp_soc_set_param,
13868 	.txrx_get_os_rx_handles_from_vdev =
13869 					dp_get_os_rx_handles_from_vdev_wifi3,
13870 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
13871 	.get_dp_capabilities = dp_get_cfg_capabilities,
13872 	.txrx_get_cfg = dp_get_cfg,
13873 	.set_rate_stats_ctx = dp_soc_set_rate_stats_ctx,
13874 	.get_rate_stats_ctx = dp_soc_get_rate_stats_ctx,
13875 	.txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats,
13876 	.txrx_flush_rate_stats_request = dp_flush_rate_stats_req,
13877 	.txrx_peer_get_peerstats_ctx = dp_peer_get_peerstats_ctx,
13878 
13879 	.set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3,
13880 	.set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3,
13881 
13882 	.txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler,
13883 #ifdef QCA_MULTIPASS_SUPPORT
13884 	.set_vlan_groupkey = dp_set_vlan_groupkey,
13885 #endif
13886 	.get_peer_mac_list = dp_get_peer_mac_list,
13887 	.get_peer_id = dp_get_peer_id,
13888 #ifdef QCA_SUPPORT_WDS_EXTENDED
13889 	.set_wds_ext_peer_rx = dp_wds_ext_set_peer_rx,
13890 #endif /* QCA_SUPPORT_WDS_EXTENDED */
13891 
13892 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
13893 	.txrx_drain = dp_drain_txrx,
13894 #endif
13895 #if defined(FEATURE_RUNTIME_PM)
13896 	.set_rtpm_tput_policy = dp_set_rtpm_tput_policy_requirement,
13897 #endif
13898 #ifdef WLAN_SYSFS_DP_STATS
13899 	.txrx_sysfs_fill_stats = dp_sysfs_fill_stats,
13900 	.txrx_sysfs_set_stat_type = dp_sysfs_set_stat_type,
13901 #endif /* WLAN_SYSFS_DP_STATS */
13902 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
13903 	.set_pkt_capture_mode = dp_set_pkt_capture_mode,
13904 #endif
13905 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
13906 	.txrx_recovery_vdev_flush_peers = dp_recovery_vdev_flush_peers,
13907 #endif
13908 	.txrx_umac_reset_deinit = dp_soc_umac_reset_deinit,
13909 	.txrx_get_tsf_time = dp_get_tsf_time,
13910 };
13911 
13912 static struct cdp_ctrl_ops dp_ops_ctrl = {
13913 	.txrx_peer_authorize = dp_peer_authorize,
13914 	.txrx_peer_get_authorize = dp_peer_get_authorize,
13915 #ifdef VDEV_PEER_PROTOCOL_COUNT
13916 	.txrx_enable_peer_protocol_count = dp_enable_vdev_peer_protocol_count,
13917 	.txrx_set_peer_protocol_drop_mask =
13918 		dp_enable_vdev_peer_protocol_drop_mask,
13919 	.txrx_is_peer_protocol_count_enabled =
13920 		dp_is_vdev_peer_protocol_count_enabled,
13921 	.txrx_get_peer_protocol_drop_mask = dp_get_vdev_peer_protocol_drop_mask,
13922 #endif
13923 	.txrx_set_vdev_param = dp_set_vdev_param,
13924 	.txrx_set_psoc_param = dp_set_psoc_param,
13925 	.txrx_get_psoc_param = dp_get_psoc_param,
13926 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
13927 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
13928 	.txrx_get_sec_type = dp_get_sec_type,
13929 	.txrx_wdi_event_sub = dp_wdi_event_sub,
13930 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
13931 	.txrx_set_pdev_param = dp_set_pdev_param,
13932 	.txrx_get_pdev_param = dp_get_pdev_param,
13933 	.txrx_set_peer_param = dp_set_peer_param,
13934 	.txrx_get_peer_param = dp_get_peer_param,
13935 #ifdef VDEV_PEER_PROTOCOL_COUNT
13936 	.txrx_peer_protocol_cnt = dp_peer_stats_update_protocol_cnt,
13937 #endif
13938 #ifdef WLAN_SUPPORT_MSCS
13939 	.txrx_record_mscs_params = dp_record_mscs_params,
13940 #endif
13941 	.set_key = dp_set_michael_key,
13942 	.txrx_get_vdev_param = dp_get_vdev_param,
13943 	.calculate_delay_stats = dp_calculate_delay_stats,
13944 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
13945 	.txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag,
13946 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
13947 	.txrx_dump_pdev_rx_protocol_tag_stats =
13948 				dp_dump_pdev_rx_protocol_tag_stats,
13949 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
13950 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
13951 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
13952 	.txrx_set_rx_flow_tag = dp_set_rx_flow_tag,
13953 	.txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats,
13954 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
13955 #ifdef QCA_MULTIPASS_SUPPORT
13956 	.txrx_peer_set_vlan_id = dp_peer_set_vlan_id,
13957 #endif /*QCA_MULTIPASS_SUPPORT*/
13958 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
13959 	.txrx_set_delta_tsf = dp_set_delta_tsf,
13960 #endif
13961 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
13962 	.txrx_set_tsf_ul_delay_report = dp_set_tsf_ul_delay_report,
13963 	.txrx_get_uplink_delay = dp_get_uplink_delay,
13964 #endif
13965 #ifdef QCA_UNDECODED_METADATA_SUPPORT
13966 	.txrx_set_pdev_phyrx_error_mask = dp_set_pdev_phyrx_error_mask,
13967 	.txrx_get_pdev_phyrx_error_mask = dp_get_pdev_phyrx_error_mask,
13968 #endif
13969 	.txrx_peer_flush_frags = dp_peer_flush_frags,
13970 };
13971 
13972 static struct cdp_me_ops dp_ops_me = {
13973 #ifndef QCA_HOST_MODE_WIFI_DISABLED
13974 #ifdef ATH_SUPPORT_IQUE
13975 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
13976 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
13977 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
13978 #endif
13979 #endif
13980 };
13981 
13982 static struct cdp_host_stats_ops dp_ops_host_stats = {
13983 	.txrx_per_peer_stats = dp_get_host_peer_stats,
13984 	.get_fw_peer_stats = dp_get_fw_peer_stats,
13985 	.get_htt_stats = dp_get_htt_stats,
13986 	.txrx_stats_publish = dp_txrx_stats_publish,
13987 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
13988 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
13989 	.txrx_get_soc_stats = dp_txrx_get_soc_stats,
13990 	.txrx_get_peer_stats_param = dp_txrx_get_peer_stats_param,
13991 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
13992 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
13993 	.txrx_get_ratekbps = dp_txrx_get_ratekbps,
13994 	.txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats,
13995 	.txrx_get_peer_delay_stats = dp_txrx_get_peer_delay_stats,
13996 	.txrx_get_peer_jitter_stats = dp_txrx_get_peer_jitter_stats,
13997 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
13998 	.txrx_alloc_vdev_stats_id = dp_txrx_alloc_vdev_stats_id,
13999 	.txrx_reset_vdev_stats_id = dp_txrx_reset_vdev_stats_id,
14000 #endif
14001 #ifdef WLAN_TX_PKT_CAPTURE_ENH
14002 	.get_peer_tx_capture_stats = dp_peer_get_tx_capture_stats,
14003 	.get_pdev_tx_capture_stats = dp_pdev_get_tx_capture_stats,
14004 #endif /* WLAN_TX_PKT_CAPTURE_ENH */
14005 #ifdef HW_TX_DELAY_STATS_ENABLE
14006 	.enable_disable_vdev_tx_delay_stats =
14007 				dp_enable_disable_vdev_tx_delay_stats,
14008 	.is_tx_delay_stats_enabled = dp_check_vdev_tx_delay_stats_enabled,
14009 #endif
14010 	.txrx_get_pdev_tid_stats = dp_pdev_get_tid_stats,
14011 #ifdef WLAN_TELEMETRY_STATS_SUPPORT
14012 	.txrx_pdev_telemetry_stats = dp_get_pdev_telemetry_stats,
14013 	.txrx_peer_telemetry_stats = dp_get_peer_telemetry_stats,
14014 #endif
14015 	.txrx_get_peer_extd_rate_link_stats =
14016 					dp_get_peer_extd_rate_link_stats,
14017 	.get_pdev_obss_stats = dp_get_obss_stats,
14018 	.clear_pdev_obss_pd_stats = dp_clear_pdev_obss_pd_stats,
14019 	/* TODO */
14020 };
14021 
14022 static struct cdp_raw_ops dp_ops_raw = {
14023 	/* TODO */
14024 };
14025 
14026 #ifdef PEER_FLOW_CONTROL
14027 static struct cdp_pflow_ops dp_ops_pflow = {
14028 	dp_tx_flow_ctrl_configure_pdev,
14029 };
14030 #endif /* CONFIG_WIN */
14031 
14032 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
14033 static struct cdp_cfr_ops dp_ops_cfr = {
14034 	.txrx_cfr_filter = NULL,
14035 	.txrx_get_cfr_rcc = dp_get_cfr_rcc,
14036 	.txrx_set_cfr_rcc = dp_set_cfr_rcc,
14037 	.txrx_get_cfr_dbg_stats = dp_get_cfr_dbg_stats,
14038 	.txrx_clear_cfr_dbg_stats = dp_clear_cfr_dbg_stats,
14039 };
14040 #endif
14041 
14042 #ifdef WLAN_SUPPORT_MSCS
14043 static struct cdp_mscs_ops dp_ops_mscs = {
14044 	.mscs_peer_lookup_n_get_priority = dp_mscs_peer_lookup_n_get_priority,
14045 };
14046 #endif
14047 
14048 #ifdef WLAN_SUPPORT_MESH_LATENCY
14049 static struct cdp_mesh_latency_ops dp_ops_mesh_latency = {
14050 	.mesh_latency_update_peer_parameter =
14051 		dp_mesh_latency_update_peer_parameter,
14052 };
14053 #endif
14054 
14055 #ifdef WLAN_SUPPORT_SCS
14056 static struct cdp_scs_ops dp_ops_scs = {
14057 	.scs_peer_lookup_n_rule_match = dp_scs_peer_lookup_n_rule_match,
14058 };
14059 #endif
14060 
14061 #ifdef CONFIG_SAWF_DEF_QUEUES
14062 static struct cdp_sawf_ops dp_ops_sawf = {
14063 	.sawf_def_queues_map_req = dp_sawf_def_queues_map_req,
14064 	.sawf_def_queues_unmap_req = dp_sawf_def_queues_unmap_req,
14065 	.sawf_def_queues_get_map_report =
14066 		dp_sawf_def_queues_get_map_report,
14067 #ifdef CONFIG_SAWF_STATS
14068 	.txrx_get_peer_sawf_delay_stats = dp_sawf_get_peer_delay_stats,
14069 	.txrx_get_peer_sawf_tx_stats = dp_sawf_get_peer_tx_stats,
14070 	.sawf_mpdu_stats_req = dp_sawf_mpdu_stats_req,
14071 	.sawf_mpdu_details_stats_req = dp_sawf_mpdu_details_stats_req,
14072 	.txrx_sawf_set_mov_avg_params = dp_sawf_set_mov_avg_params,
14073 	.txrx_sawf_set_sla_params = dp_sawf_set_sla_params,
14074 	.txrx_sawf_init_telemtery_params = dp_sawf_init_telemetry_params,
14075 	.telemetry_get_throughput_stats = dp_sawf_get_tx_stats,
14076 	.telemetry_get_mpdu_stats = dp_sawf_get_mpdu_sched_stats,
14077 	.telemetry_get_drop_stats = dp_sawf_get_drop_stats,
14078 #endif
14079 };
14080 #endif
14081 
14082 #if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
14083 /**
14084  * dp_flush_ring_hptp() - Update ring shadow
14085  *			  register HP/TP address when runtime
14086  *                        resume
14087  * @opaque_soc: DP soc context
14088  *
14089  * Return: None
14090  */
14091 static
14092 void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
14093 {
14094 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
14095 						 HAL_SRNG_FLUSH_EVENT)) {
14096 		/* Acquire the lock */
14097 		hal_srng_access_start(soc->hal_soc, hal_srng);
14098 
14099 		hal_srng_access_end(soc->hal_soc, hal_srng);
14100 
14101 		hal_srng_set_flush_last_ts(hal_srng);
14102 
14103 		dp_debug("flushed");
14104 	}
14105 }
14106 #endif
14107 
14108 #ifdef DP_TX_TRACKING
14109 
14110 #define DP_TX_COMP_MAX_LATENCY_MS 30000
14111 /**
14112  * dp_tx_comp_delay_check() - calculate time latency for tx completion per pkt
14113  * @tx_desc: tx descriptor
14114  *
14115  * Calculate time latency for tx completion per pkt and trigger self recovery
14116  * when the delay is more than threshold value.
14117  *
14118  * Return: True if delay is more than threshold
14119  */
14120 static bool dp_tx_comp_delay_check(struct dp_tx_desc_s *tx_desc)
14121 {
14122 	uint64_t time_latency, timestamp_tick = tx_desc->timestamp_tick;
14123 	qdf_ktime_t current_time = qdf_ktime_real_get();
14124 	qdf_ktime_t timestamp = tx_desc->timestamp;
14125 
14126 	if (!timestamp)
14127 		return false;
14128 
14129 	if (dp_tx_pkt_tracepoints_enabled()) {
14130 		time_latency = qdf_ktime_to_ms(current_time) -
14131 				qdf_ktime_to_ms(timestamp);
14132 		if (time_latency >= DP_TX_COMP_MAX_LATENCY_MS) {
14133 			dp_err_rl("enqueued: %llu ms, current : %llu ms",
14134 				  timestamp, current_time);
14135 			return true;
14136 		}
14137 	} else {
14138 		current_time = qdf_system_ticks();
14139 		time_latency = qdf_system_ticks_to_msecs(current_time -
14140 							 timestamp_tick);
14141 		if (time_latency >= DP_TX_COMP_MAX_LATENCY_MS) {
14142 			dp_err_rl("enqueued: %u ms, current : %u ms",
14143 				  qdf_system_ticks_to_msecs(timestamp),
14144 				  qdf_system_ticks_to_msecs(current_time));
14145 			return true;
14146 		}
14147 	}
14148 
14149 	return false;
14150 }
14151 
14152 #if defined(CONFIG_SLUB_DEBUG_ON)
14153 /**
14154  * dp_find_missing_tx_comp() - check for leaked descriptor in tx path
14155  * @soc - DP SOC context
14156  *
14157  * Parse through descriptors in all pools and validate magic number and
14158  * completion time. Trigger self recovery if magic value is corrupted.
14159  *
14160  * Return: None.
14161  */
14162 static void dp_find_missing_tx_comp(struct dp_soc *soc)
14163 {
14164 	uint8_t i;
14165 	uint32_t j;
14166 	uint32_t num_desc, page_id, offset;
14167 	uint16_t num_desc_per_page;
14168 	struct dp_tx_desc_s *tx_desc = NULL;
14169 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
14170 	bool send_fw_stats_cmd = false;
14171 	uint8_t vdev_id;
14172 
14173 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
14174 		tx_desc_pool = &soc->tx_desc[i];
14175 		if (!(tx_desc_pool->pool_size) ||
14176 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
14177 		    !(tx_desc_pool->desc_pages.cacheable_pages))
14178 			continue;
14179 
14180 		num_desc = tx_desc_pool->pool_size;
14181 		num_desc_per_page =
14182 			tx_desc_pool->desc_pages.num_element_per_page;
14183 		for (j = 0; j < num_desc; j++) {
14184 			page_id = j / num_desc_per_page;
14185 			offset = j % num_desc_per_page;
14186 
14187 			if (qdf_unlikely(!(tx_desc_pool->
14188 					 desc_pages.cacheable_pages)))
14189 				break;
14190 
14191 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
14192 			if (tx_desc->magic == DP_TX_MAGIC_PATTERN_FREE) {
14193 				continue;
14194 			} else if (tx_desc->magic ==
14195 				   DP_TX_MAGIC_PATTERN_INUSE) {
14196 				if (dp_tx_comp_delay_check(tx_desc)) {
14197 					dp_err_rl("Tx completion not rcvd for id: %u",
14198 						  tx_desc->id);
14199 
14200 					if (!send_fw_stats_cmd) {
14201 						send_fw_stats_cmd = true;
14202 						vdev_id = i;
14203 					}
14204 				}
14205 			} else {
14206 				dp_err_rl("tx desc %u corrupted, flags: 0x%x",
14207 				       tx_desc->id, tx_desc->flags);
14208 			}
14209 		}
14210 	}
14211 
14212 	/*
14213 	 * The unit test command to dump FW stats is required only once as the
14214 	 * stats are dumped at pdev level and not vdev level.
14215 	 */
14216 	if (send_fw_stats_cmd && soc->cdp_soc.ol_ops->dp_send_unit_test_cmd) {
14217 		uint32_t fw_stats_args[2] = {533, 1};
14218 
14219 		soc->cdp_soc.ol_ops->dp_send_unit_test_cmd(vdev_id,
14220 							   WLAN_MODULE_TX, 2,
14221 							   fw_stats_args);
14222 	}
14223 }
14224 #else
14225 static void dp_find_missing_tx_comp(struct dp_soc *soc)
14226 {
14227 	uint8_t i;
14228 	uint32_t j;
14229 	uint32_t num_desc, page_id, offset;
14230 	uint16_t num_desc_per_page;
14231 	struct dp_tx_desc_s *tx_desc = NULL;
14232 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
14233 
14234 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
14235 		tx_desc_pool = &soc->tx_desc[i];
14236 		if (!(tx_desc_pool->pool_size) ||
14237 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
14238 		    !(tx_desc_pool->desc_pages.cacheable_pages))
14239 			continue;
14240 
14241 		num_desc = tx_desc_pool->pool_size;
14242 		num_desc_per_page =
14243 			tx_desc_pool->desc_pages.num_element_per_page;
14244 		for (j = 0; j < num_desc; j++) {
14245 			page_id = j / num_desc_per_page;
14246 			offset = j % num_desc_per_page;
14247 
14248 			if (qdf_unlikely(!(tx_desc_pool->
14249 					 desc_pages.cacheable_pages)))
14250 				break;
14251 
14252 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
14253 			if (tx_desc->magic == DP_TX_MAGIC_PATTERN_FREE) {
14254 				continue;
14255 			} else if (tx_desc->magic ==
14256 				   DP_TX_MAGIC_PATTERN_INUSE) {
14257 				if (dp_tx_comp_delay_check(tx_desc)) {
14258 					dp_err_rl("Tx completion not rcvd for id: %u",
14259 						  tx_desc->id);
14260 					if (tx_desc->vdev_id == DP_INVALID_VDEV_ID) {
14261 						tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
14262 						dp_tx_comp_free_buf(soc,
14263 								    tx_desc,
14264 								    false);
14265 						dp_tx_desc_release(tx_desc, i);
14266 						DP_STATS_INC(soc,
14267 							     tx.tx_comp_force_freed, 1);
14268 						dp_err_rl("Tx completion force freed");
14269 					}
14270 				}
14271 			} else {
14272 				dp_err_rl("tx desc %u corrupted, flags: 0x%x",
14273 					  tx_desc->id, tx_desc->flags);
14274 			}
14275 		}
14276 	}
14277 }
14278 #endif /* CONFIG_SLUB_DEBUG_ON */
14279 #else
14280 static inline void dp_find_missing_tx_comp(struct dp_soc *soc)
14281 {
14282 }
14283 #endif
14284 
14285 #ifdef FEATURE_RUNTIME_PM
14286 /**
14287  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
14288  * @soc_hdl: Datapath soc handle
14289  * @pdev_id: id of data path pdev handle
14290  *
14291  * DP is ready to runtime suspend if there are no pending TX packets.
14292  *
14293  * Return: QDF_STATUS
14294  */
14295 static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
14296 {
14297 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14298 	struct dp_pdev *pdev;
14299 	uint8_t i;
14300 	int32_t tx_pending;
14301 
14302 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14303 	if (!pdev) {
14304 		dp_err("pdev is NULL");
14305 		return QDF_STATUS_E_INVAL;
14306 	}
14307 
14308 	/* Abort if there are any pending TX packets */
14309 	tx_pending = dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev));
14310 	if (tx_pending) {
14311 		dp_info_rl("%pK: Abort suspend due to pending TX packets %d",
14312 			   soc, tx_pending);
14313 		dp_find_missing_tx_comp(soc);
14314 		/* perform a force flush if tx is pending */
14315 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
14316 			hal_srng_set_event(soc->tcl_data_ring[i].hal_srng,
14317 					   HAL_SRNG_FLUSH_EVENT);
14318 			dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
14319 		}
14320 		qdf_atomic_set(&soc->tx_pending_rtpm, 0);
14321 
14322 		return QDF_STATUS_E_AGAIN;
14323 	}
14324 
14325 	if (dp_runtime_get_refcount(soc)) {
14326 		dp_init_info("refcount: %d", dp_runtime_get_refcount(soc));
14327 
14328 		return QDF_STATUS_E_AGAIN;
14329 	}
14330 
14331 	if (soc->intr_mode == DP_INTR_POLL)
14332 		qdf_timer_stop(&soc->int_timer);
14333 
14334 	dp_rx_fst_update_pm_suspend_status(soc, true);
14335 
14336 	return QDF_STATUS_SUCCESS;
14337 }
14338 
14339 #define DP_FLUSH_WAIT_CNT 10
14340 #define DP_RUNTIME_SUSPEND_WAIT_MS 10
14341 /**
14342  * dp_runtime_resume() - ensure DP is ready to runtime resume
14343  * @soc_hdl: Datapath soc handle
14344  * @pdev_id: id of data path pdev handle
14345  *
14346  * Resume DP for runtime PM.
14347  *
14348  * Return: QDF_STATUS
14349  */
14350 static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
14351 {
14352 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14353 	int i, suspend_wait = 0;
14354 
14355 	if (soc->intr_mode == DP_INTR_POLL)
14356 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
14357 
14358 	/*
14359 	 * Wait until dp runtime refcount becomes zero or time out, then flush
14360 	 * pending tx for runtime suspend.
14361 	 */
14362 	while (dp_runtime_get_refcount(soc) &&
14363 	       suspend_wait < DP_FLUSH_WAIT_CNT) {
14364 		qdf_sleep(DP_RUNTIME_SUSPEND_WAIT_MS);
14365 		suspend_wait++;
14366 	}
14367 
14368 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
14369 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
14370 	}
14371 	qdf_atomic_set(&soc->tx_pending_rtpm, 0);
14372 
14373 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
14374 	dp_rx_fst_update_pm_suspend_status(soc, false);
14375 
14376 	return QDF_STATUS_SUCCESS;
14377 }
14378 #endif /* FEATURE_RUNTIME_PM */
14379 
14380 /**
14381  * dp_tx_get_success_ack_stats() - get tx success completion count
14382  * @soc_hdl: Datapath soc handle
14383  * @vdevid: vdev identifier
14384  *
14385  * Return: tx success ack count
14386  */
14387 static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl,
14388 					    uint8_t vdev_id)
14389 {
14390 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14391 	struct cdp_vdev_stats *vdev_stats = NULL;
14392 	uint32_t tx_success;
14393 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
14394 						     DP_MOD_ID_CDP);
14395 
14396 	if (!vdev) {
14397 		dp_cdp_err("%pK: Invalid vdev id %d", soc, vdev_id);
14398 		return 0;
14399 	}
14400 
14401 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
14402 	if (!vdev_stats) {
14403 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats", soc);
14404 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
14405 		return 0;
14406 	}
14407 
14408 	dp_aggregate_vdev_stats(vdev, vdev_stats);
14409 
14410 	tx_success = vdev_stats->tx.tx_success.num;
14411 	qdf_mem_free(vdev_stats);
14412 
14413 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
14414 	return tx_success;
14415 }
14416 
14417 #ifdef WLAN_SUPPORT_DATA_STALL
14418 /**
14419  * dp_register_data_stall_detect_cb() - register data stall callback
14420  * @soc_hdl: Datapath soc handle
14421  * @pdev_id: id of data path pdev handle
14422  * @data_stall_detect_callback: data stall callback function
14423  *
14424  * Return: QDF_STATUS Enumeration
14425  */
14426 static
14427 QDF_STATUS dp_register_data_stall_detect_cb(
14428 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14429 			data_stall_detect_cb data_stall_detect_callback)
14430 {
14431 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14432 	struct dp_pdev *pdev;
14433 
14434 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14435 	if (!pdev) {
14436 		dp_err("pdev NULL!");
14437 		return QDF_STATUS_E_INVAL;
14438 	}
14439 
14440 	pdev->data_stall_detect_callback = data_stall_detect_callback;
14441 	return QDF_STATUS_SUCCESS;
14442 }
14443 
14444 /**
14445  * dp_deregister_data_stall_detect_cb() - de-register data stall callback
14446  * @soc_hdl: Datapath soc handle
14447  * @pdev_id: id of data path pdev handle
14448  * @data_stall_detect_callback: data stall callback function
14449  *
14450  * Return: QDF_STATUS Enumeration
14451  */
14452 static
14453 QDF_STATUS dp_deregister_data_stall_detect_cb(
14454 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14455 			data_stall_detect_cb data_stall_detect_callback)
14456 {
14457 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14458 	struct dp_pdev *pdev;
14459 
14460 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14461 	if (!pdev) {
14462 		dp_err("pdev NULL!");
14463 		return QDF_STATUS_E_INVAL;
14464 	}
14465 
14466 	pdev->data_stall_detect_callback = NULL;
14467 	return QDF_STATUS_SUCCESS;
14468 }
14469 
14470 /**
14471  * dp_txrx_post_data_stall_event() - post data stall event
14472  * @soc_hdl: Datapath soc handle
14473  * @indicator: Module triggering data stall
14474  * @data_stall_type: data stall event type
14475  * @pdev_id: pdev id
14476  * @vdev_id_bitmap: vdev id bitmap
14477  * @recovery_type: data stall recovery type
14478  *
14479  * Return: None
14480  */
14481 static void
14482 dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl,
14483 			      enum data_stall_log_event_indicator indicator,
14484 			      enum data_stall_log_event_type data_stall_type,
14485 			      uint32_t pdev_id, uint32_t vdev_id_bitmap,
14486 			      enum data_stall_log_recovery_type recovery_type)
14487 {
14488 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14489 	struct data_stall_event_info data_stall_info;
14490 	struct dp_pdev *pdev;
14491 
14492 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14493 	if (!pdev) {
14494 		dp_err("pdev NULL!");
14495 		return;
14496 	}
14497 
14498 	if (!pdev->data_stall_detect_callback) {
14499 		dp_err("data stall cb not registered!");
14500 		return;
14501 	}
14502 
14503 	dp_info("data_stall_type: %x pdev_id: %d",
14504 		data_stall_type, pdev_id);
14505 
14506 	data_stall_info.indicator = indicator;
14507 	data_stall_info.data_stall_type = data_stall_type;
14508 	data_stall_info.vdev_id_bitmap = vdev_id_bitmap;
14509 	data_stall_info.pdev_id = pdev_id;
14510 	data_stall_info.recovery_type = recovery_type;
14511 
14512 	pdev->data_stall_detect_callback(&data_stall_info);
14513 }
14514 #endif /* WLAN_SUPPORT_DATA_STALL */
14515 
14516 #ifdef WLAN_FEATURE_STATS_EXT
14517 /* rx hw stats event wait timeout in ms */
14518 #define DP_REO_STATUS_STATS_TIMEOUT 1500
14519 /**
14520  * dp_txrx_ext_stats_request - request dp txrx extended stats request
14521  * @soc_hdl: soc handle
14522  * @pdev_id: pdev id
14523  * @req: stats request
14524  *
14525  * Return: QDF_STATUS
14526  */
14527 static QDF_STATUS
14528 dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14529 			  struct cdp_txrx_ext_stats *req)
14530 {
14531 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
14532 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14533 	int i = 0;
14534 	int tcl_ring_full = 0;
14535 
14536 	if (!pdev) {
14537 		dp_err("pdev is null");
14538 		return QDF_STATUS_E_INVAL;
14539 	}
14540 
14541 	dp_aggregate_pdev_stats(pdev);
14542 
14543 	for(i = 0 ; i < MAX_TCL_DATA_RINGS; i++)
14544 		tcl_ring_full += soc->stats.tx.tcl_ring_full[i];
14545 
14546 	req->tx_msdu_enqueue = pdev->stats.tx_i.processed.num;
14547 	req->tx_msdu_overflow = tcl_ring_full;
14548 	req->rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
14549 	req->rx_mpdu_delivered = soc->ext_stats.rx_mpdu_received;
14550 	req->rx_mpdu_missed = pdev->stats.err.reo_error;
14551 	/* only count error source from RXDMA */
14552 	req->rx_mpdu_error = pdev->stats.err.rxdma_error;
14553 
14554 	dp_info("ext stats: tx_msdu_enq = %u, tx_msdu_overflow = %u, "
14555 		"tx_mpdu_recieve = %u, rx_mpdu_delivered = %u, "
14556 		"rx_mpdu_missed = %u, rx_mpdu_error = %u",
14557 		req->tx_msdu_enqueue,
14558 		req->tx_msdu_overflow,
14559 		req->rx_mpdu_received,
14560 		req->rx_mpdu_delivered,
14561 		req->rx_mpdu_missed,
14562 		req->rx_mpdu_error);
14563 
14564 	return QDF_STATUS_SUCCESS;
14565 }
14566 
14567 /**
14568  * dp_rx_hw_stats_cb - request rx hw stats response callback
14569  * @soc: soc handle
14570  * @cb_ctxt: callback context
14571  * @reo_status: reo command response status
14572  *
14573  * Return: None
14574  */
14575 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
14576 			      union hal_reo_status *reo_status)
14577 {
14578 	struct dp_req_rx_hw_stats_t *rx_hw_stats = cb_ctxt;
14579 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
14580 	bool is_query_timeout;
14581 
14582 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
14583 	is_query_timeout = rx_hw_stats->is_query_timeout;
14584 	/* free the cb_ctxt if all pending tid stats query is received */
14585 	if (qdf_atomic_dec_and_test(&rx_hw_stats->pending_tid_stats_cnt)) {
14586 		if (!is_query_timeout) {
14587 			qdf_event_set(&soc->rx_hw_stats_event);
14588 			soc->is_last_stats_ctx_init = false;
14589 		}
14590 
14591 		qdf_mem_free(rx_hw_stats);
14592 	}
14593 
14594 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
14595 		dp_info("REO stats failure %d",
14596 			queue_status->header.status);
14597 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14598 		return;
14599 	}
14600 
14601 	if (!is_query_timeout) {
14602 		soc->ext_stats.rx_mpdu_received +=
14603 					queue_status->mpdu_frms_cnt;
14604 		soc->ext_stats.rx_mpdu_missed +=
14605 					queue_status->hole_cnt;
14606 	}
14607 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14608 }
14609 
14610 /**
14611  * dp_request_rx_hw_stats - request rx hardware stats
14612  * @soc_hdl: soc handle
14613  * @vdev_id: vdev id
14614  *
14615  * Return: None
14616  */
14617 static QDF_STATUS
14618 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
14619 {
14620 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
14621 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
14622 						     DP_MOD_ID_CDP);
14623 	struct dp_peer *peer = NULL;
14624 	QDF_STATUS status;
14625 	struct dp_req_rx_hw_stats_t *rx_hw_stats;
14626 	int rx_stats_sent_cnt = 0;
14627 	uint32_t last_rx_mpdu_received;
14628 	uint32_t last_rx_mpdu_missed;
14629 
14630 	if (!vdev) {
14631 		dp_err("vdev is null for vdev_id: %u", vdev_id);
14632 		status = QDF_STATUS_E_INVAL;
14633 		goto out;
14634 	}
14635 
14636 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_CDP);
14637 
14638 	if (!peer) {
14639 		dp_err("Peer is NULL");
14640 		status = QDF_STATUS_E_INVAL;
14641 		goto out;
14642 	}
14643 
14644 	rx_hw_stats = qdf_mem_malloc(sizeof(*rx_hw_stats));
14645 
14646 	if (!rx_hw_stats) {
14647 		dp_err("malloc failed for hw stats structure");
14648 		status = QDF_STATUS_E_INVAL;
14649 		goto out;
14650 	}
14651 
14652 	qdf_event_reset(&soc->rx_hw_stats_event);
14653 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
14654 	/* save the last soc cumulative stats and reset it to 0 */
14655 	last_rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
14656 	last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
14657 	soc->ext_stats.rx_mpdu_received = 0;
14658 
14659 	rx_stats_sent_cnt =
14660 		dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
14661 	if (!rx_stats_sent_cnt) {
14662 		dp_err("no tid stats sent successfully");
14663 		qdf_mem_free(rx_hw_stats);
14664 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14665 		status = QDF_STATUS_E_INVAL;
14666 		goto out;
14667 	}
14668 	qdf_atomic_set(&rx_hw_stats->pending_tid_stats_cnt,
14669 		       rx_stats_sent_cnt);
14670 	rx_hw_stats->is_query_timeout = false;
14671 	soc->is_last_stats_ctx_init = true;
14672 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14673 
14674 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
14675 				       DP_REO_STATUS_STATS_TIMEOUT);
14676 
14677 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
14678 	if (status != QDF_STATUS_SUCCESS) {
14679 		dp_info("rx hw stats event timeout");
14680 		if (soc->is_last_stats_ctx_init)
14681 			rx_hw_stats->is_query_timeout = true;
14682 		/**
14683 		 * If query timeout happened, use the last saved stats
14684 		 * for this time query.
14685 		 */
14686 		soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
14687 		soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
14688 	}
14689 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14690 
14691 out:
14692 	if (peer)
14693 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
14694 	if (vdev)
14695 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
14696 
14697 	return status;
14698 }
14699 
14700 /**
14701  * dp_reset_rx_hw_ext_stats - Reset rx hardware ext stats
14702  * @soc_hdl: soc handle
14703  *
14704  * Return: None
14705  */
14706 static
14707 void dp_reset_rx_hw_ext_stats(struct cdp_soc_t *soc_hdl)
14708 {
14709 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
14710 
14711 	soc->ext_stats.rx_mpdu_received = 0;
14712 	soc->ext_stats.rx_mpdu_missed = 0;
14713 }
14714 #endif /* WLAN_FEATURE_STATS_EXT */
14715 
14716 static
14717 uint32_t dp_get_tx_rings_grp_bitmap(struct cdp_soc_t *soc_hdl)
14718 {
14719 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
14720 
14721 	return soc->wlan_cfg_ctx->tx_rings_grp_bitmap;
14722 }
14723 
14724 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
14725 /**
14726  * dp_mark_first_wakeup_packet() - set flag to indicate that
14727  *    fw is compatible for marking first packet after wow wakeup
14728  * @soc_hdl: Datapath soc handle
14729  * @pdev_id: id of data path pdev handle
14730  * @value: 1 for enabled/ 0 for disabled
14731  *
14732  * Return: None
14733  */
14734 static void dp_mark_first_wakeup_packet(struct cdp_soc_t *soc_hdl,
14735 					uint8_t pdev_id, uint8_t value)
14736 {
14737 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14738 	struct dp_pdev *pdev;
14739 
14740 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14741 	if (!pdev) {
14742 		dp_err("pdev is NULL");
14743 		return;
14744 	}
14745 
14746 	pdev->is_first_wakeup_packet = value;
14747 }
14748 #endif
14749 
14750 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
14751 /**
14752  * dp_set_peer_txq_flush_config() - Set the peer txq flush configuration
14753  * @soc_hdl: Opaque handle to the DP soc object
14754  * @vdev_id: VDEV identifier
14755  * @mac: MAC address of the peer
14756  * @ac: access category mask
14757  * @tid: TID mask
14758  * @policy: Flush policy
14759  *
14760  * Return: 0 on success, errno on failure
14761  */
14762 static int dp_set_peer_txq_flush_config(struct cdp_soc_t *soc_hdl,
14763 					uint8_t vdev_id, uint8_t *mac,
14764 					uint8_t ac, uint32_t tid,
14765 					enum cdp_peer_txq_flush_policy policy)
14766 {
14767 	struct dp_soc *soc;
14768 
14769 	if (!soc_hdl) {
14770 		dp_err("soc is null");
14771 		return -EINVAL;
14772 	}
14773 	soc = cdp_soc_t_to_dp_soc(soc_hdl);
14774 	return target_if_peer_txq_flush_config(soc->ctrl_psoc, vdev_id,
14775 					       mac, ac, tid, policy);
14776 }
14777 #endif
14778 
14779 #ifdef CONNECTIVITY_PKTLOG
14780 /**
14781  * dp_register_packetdump_callback() - registers
14782  *  tx data packet, tx mgmt. packet and rx data packet
14783  *  dump callback handler.
14784  *
14785  * @soc_hdl: Datapath soc handle
14786  * @pdev_id: id of data path pdev handle
14787  * @dp_tx_packetdump_cb: tx packetdump cb
14788  * @dp_rx_packetdump_cb: rx packetdump cb
14789  *
14790  * This function is used to register tx data pkt, tx mgmt.
14791  * pkt and rx data pkt dump callback
14792  *
14793  * Return: None
14794  *
14795  */
14796 static inline
14797 void dp_register_packetdump_callback(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14798 				     ol_txrx_pktdump_cb dp_tx_packetdump_cb,
14799 				     ol_txrx_pktdump_cb dp_rx_packetdump_cb)
14800 {
14801 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14802 	struct dp_pdev *pdev;
14803 
14804 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14805 	if (!pdev) {
14806 		dp_err("pdev is NULL!");
14807 		return;
14808 	}
14809 
14810 	pdev->dp_tx_packetdump_cb = dp_tx_packetdump_cb;
14811 	pdev->dp_rx_packetdump_cb = dp_rx_packetdump_cb;
14812 }
14813 
14814 /**
14815  * dp_deregister_packetdump_callback() - deregidters
14816  *  tx data packet, tx mgmt. packet and rx data packet
14817  *  dump callback handler
14818  * @soc_hdl: Datapath soc handle
14819  * @pdev_id: id of data path pdev handle
14820  *
14821  * This function is used to deregidter tx data pkt.,
14822  * tx mgmt. pkt and rx data pkt. dump callback
14823  *
14824  * Return: None
14825  *
14826  */
14827 static inline
14828 void dp_deregister_packetdump_callback(struct cdp_soc_t *soc_hdl,
14829 				       uint8_t pdev_id)
14830 {
14831 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14832 	struct dp_pdev *pdev;
14833 
14834 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14835 	if (!pdev) {
14836 		dp_err("pdev is NULL!");
14837 		return;
14838 	}
14839 
14840 	pdev->dp_tx_packetdump_cb = NULL;
14841 	pdev->dp_rx_packetdump_cb = NULL;
14842 }
14843 #endif
14844 
14845 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
14846 /**
14847  * dp_set_bus_vote_lvl_high() - Take a vote on bus bandwidth from dp
14848  * @soc_hdl: Datapath soc handle
14849  * @high: whether the bus bw is high or not
14850  *
14851  * Return: void
14852  */
14853 static void
14854 dp_set_bus_vote_lvl_high(ol_txrx_soc_handle soc_hdl, bool high)
14855 {
14856 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14857 
14858 	soc->high_throughput = high;
14859 }
14860 
14861 /**
14862  * dp_get_bus_vote_lvl_high() - get bus bandwidth vote to dp
14863  * @soc_hdl: Datapath soc handle
14864  *
14865  * Return: bool
14866  */
14867 static bool
14868 dp_get_bus_vote_lvl_high(ol_txrx_soc_handle soc_hdl)
14869 {
14870 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14871 
14872 	return soc->high_throughput;
14873 }
14874 #endif
14875 
14876 #ifdef DP_PEER_EXTENDED_API
14877 static struct cdp_misc_ops dp_ops_misc = {
14878 #ifdef FEATURE_WLAN_TDLS
14879 	.tx_non_std = dp_tx_non_std,
14880 #endif /* FEATURE_WLAN_TDLS */
14881 	.get_opmode = dp_get_opmode,
14882 #ifdef FEATURE_RUNTIME_PM
14883 	.runtime_suspend = dp_runtime_suspend,
14884 	.runtime_resume = dp_runtime_resume,
14885 #endif /* FEATURE_RUNTIME_PM */
14886 	.get_num_rx_contexts = dp_get_num_rx_contexts,
14887 	.get_tx_ack_stats = dp_tx_get_success_ack_stats,
14888 #ifdef WLAN_SUPPORT_DATA_STALL
14889 	.txrx_data_stall_cb_register = dp_register_data_stall_detect_cb,
14890 	.txrx_data_stall_cb_deregister = dp_deregister_data_stall_detect_cb,
14891 	.txrx_post_data_stall_event = dp_txrx_post_data_stall_event,
14892 #endif
14893 
14894 #ifdef WLAN_FEATURE_STATS_EXT
14895 	.txrx_ext_stats_request = dp_txrx_ext_stats_request,
14896 	.request_rx_hw_stats = dp_request_rx_hw_stats,
14897 	.reset_rx_hw_ext_stats = dp_reset_rx_hw_ext_stats,
14898 #endif /* WLAN_FEATURE_STATS_EXT */
14899 	.vdev_inform_ll_conn = dp_vdev_inform_ll_conn,
14900 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
14901 	.set_swlm_enable = dp_soc_set_swlm_enable,
14902 	.is_swlm_enabled = dp_soc_is_swlm_enabled,
14903 #endif
14904 	.display_txrx_hw_info = dp_display_srng_info,
14905 	.get_tx_rings_grp_bitmap = dp_get_tx_rings_grp_bitmap,
14906 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
14907 	.mark_first_wakeup_packet = dp_mark_first_wakeup_packet,
14908 #endif
14909 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
14910 	.set_peer_txq_flush_config = dp_set_peer_txq_flush_config,
14911 #endif
14912 #ifdef CONNECTIVITY_PKTLOG
14913 	.register_pktdump_cb = dp_register_packetdump_callback,
14914 	.unregister_pktdump_cb = dp_deregister_packetdump_callback,
14915 #endif
14916 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
14917 	.set_bus_vote_lvl_high = dp_set_bus_vote_lvl_high,
14918 	.get_bus_vote_lvl_high = dp_get_bus_vote_lvl_high,
14919 #endif
14920 };
14921 #endif
14922 
14923 #ifdef DP_FLOW_CTL
14924 static struct cdp_flowctl_ops dp_ops_flowctl = {
14925 	/* WIFI 3.0 DP implement as required. */
14926 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
14927 	.flow_pool_map_handler = dp_tx_flow_pool_map,
14928 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
14929 	.register_pause_cb = dp_txrx_register_pause_cb,
14930 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
14931 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
14932 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
14933 };
14934 
14935 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
14936 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
14937 };
14938 #endif
14939 
14940 #ifdef IPA_OFFLOAD
14941 static struct cdp_ipa_ops dp_ops_ipa = {
14942 	.ipa_get_resource = dp_ipa_get_resource,
14943 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
14944 	.ipa_iounmap_doorbell_vaddr = dp_ipa_iounmap_doorbell_vaddr,
14945 	.ipa_op_response = dp_ipa_op_response,
14946 	.ipa_register_op_cb = dp_ipa_register_op_cb,
14947 	.ipa_deregister_op_cb = dp_ipa_deregister_op_cb,
14948 	.ipa_get_stat = dp_ipa_get_stat,
14949 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
14950 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
14951 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
14952 	.ipa_setup = dp_ipa_setup,
14953 	.ipa_cleanup = dp_ipa_cleanup,
14954 	.ipa_setup_iface = dp_ipa_setup_iface,
14955 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
14956 	.ipa_enable_pipes = dp_ipa_enable_pipes,
14957 	.ipa_disable_pipes = dp_ipa_disable_pipes,
14958 	.ipa_set_perf_level = dp_ipa_set_perf_level,
14959 	.ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd,
14960 	.ipa_tx_buf_smmu_mapping = dp_ipa_tx_buf_smmu_mapping,
14961 	.ipa_tx_buf_smmu_unmapping = dp_ipa_tx_buf_smmu_unmapping,
14962 #ifdef IPA_WDS_EASYMESH_FEATURE
14963 	.ipa_ast_create = dp_ipa_ast_create,
14964 #endif
14965 };
14966 #endif
14967 
14968 #ifdef DP_POWER_SAVE
14969 static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
14970 {
14971 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14972 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14973 	int timeout = SUSPEND_DRAIN_WAIT;
14974 	int drain_wait_delay = 50; /* 50 ms */
14975 	int32_t tx_pending;
14976 
14977 	if (qdf_unlikely(!pdev)) {
14978 		dp_err("pdev is NULL");
14979 		return QDF_STATUS_E_INVAL;
14980 	}
14981 
14982 	/* Abort if there are any pending TX packets */
14983 	while ((tx_pending = dp_get_tx_pending((struct cdp_pdev *)pdev))) {
14984 		qdf_sleep(drain_wait_delay);
14985 		if (timeout <= 0) {
14986 			dp_info("TX frames are pending %d, abort suspend",
14987 				tx_pending);
14988 			dp_find_missing_tx_comp(soc);
14989 			return QDF_STATUS_E_TIMEOUT;
14990 		}
14991 		timeout = timeout - drain_wait_delay;
14992 	}
14993 
14994 	if (soc->intr_mode == DP_INTR_POLL)
14995 		qdf_timer_stop(&soc->int_timer);
14996 
14997 	/* Stop monitor reap timer and reap any pending frames in ring */
14998 	dp_monitor_reap_timer_suspend(soc);
14999 
15000 	dp_suspend_fse_cache_flush(soc);
15001 
15002 	return QDF_STATUS_SUCCESS;
15003 }
15004 
15005 static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
15006 {
15007 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15008 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15009 	uint8_t i;
15010 
15011 	if (qdf_unlikely(!pdev)) {
15012 		dp_err("pdev is NULL");
15013 		return QDF_STATUS_E_INVAL;
15014 	}
15015 
15016 	if (soc->intr_mode == DP_INTR_POLL)
15017 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
15018 
15019 	/* Start monitor reap timer */
15020 	dp_monitor_reap_timer_start(soc, CDP_MON_REAP_SOURCE_ANY);
15021 
15022 	dp_resume_fse_cache_flush(soc);
15023 
15024 	for (i = 0; i < soc->num_tcl_data_rings; i++)
15025 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
15026 
15027 	return QDF_STATUS_SUCCESS;
15028 }
15029 
15030 /**
15031  * dp_process_wow_ack_rsp() - process wow ack response
15032  * @soc_hdl: datapath soc handle
15033  * @pdev_id: data path pdev handle id
15034  *
15035  * Return: none
15036  */
15037 static void dp_process_wow_ack_rsp(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
15038 {
15039 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15040 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15041 
15042 	if (qdf_unlikely(!pdev)) {
15043 		dp_err("pdev is NULL");
15044 		return;
15045 	}
15046 
15047 	/*
15048 	 * As part of wow enable FW disables the mon status ring and in wow ack
15049 	 * response from FW reap mon status ring to make sure no packets pending
15050 	 * in the ring.
15051 	 */
15052 	dp_monitor_reap_timer_suspend(soc);
15053 }
15054 
15055 /**
15056  * dp_process_target_suspend_req() - process target suspend request
15057  * @soc_hdl: datapath soc handle
15058  * @pdev_id: data path pdev handle id
15059  *
15060  * Return: none
15061  */
15062 static void dp_process_target_suspend_req(struct cdp_soc_t *soc_hdl,
15063 					  uint8_t pdev_id)
15064 {
15065 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15066 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15067 
15068 	if (qdf_unlikely(!pdev)) {
15069 		dp_err("pdev is NULL");
15070 		return;
15071 	}
15072 
15073 	/* Stop monitor reap timer and reap any pending frames in ring */
15074 	dp_monitor_reap_timer_suspend(soc);
15075 }
15076 
15077 static struct cdp_bus_ops dp_ops_bus = {
15078 	.bus_suspend = dp_bus_suspend,
15079 	.bus_resume = dp_bus_resume,
15080 	.process_wow_ack_rsp = dp_process_wow_ack_rsp,
15081 	.process_target_suspend_req = dp_process_target_suspend_req
15082 };
15083 #endif
15084 
15085 #ifdef DP_FLOW_CTL
15086 static struct cdp_throttle_ops dp_ops_throttle = {
15087 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
15088 };
15089 
15090 static struct cdp_cfg_ops dp_ops_cfg = {
15091 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
15092 };
15093 #endif
15094 
15095 #ifdef DP_PEER_EXTENDED_API
15096 static struct cdp_ocb_ops dp_ops_ocb = {
15097 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
15098 };
15099 
15100 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
15101 	.clear_stats = dp_txrx_clear_dump_stats,
15102 };
15103 
15104 static struct cdp_peer_ops dp_ops_peer = {
15105 	.register_peer = dp_register_peer,
15106 	.clear_peer = dp_clear_peer,
15107 	.find_peer_exist = dp_find_peer_exist,
15108 	.find_peer_exist_on_vdev = dp_find_peer_exist_on_vdev,
15109 	.find_peer_exist_on_other_vdev = dp_find_peer_exist_on_other_vdev,
15110 	.peer_state_update = dp_peer_state_update,
15111 	.get_vdevid = dp_get_vdevid,
15112 	.get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr,
15113 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
15114 	.get_peer_state = dp_get_peer_state,
15115 	.peer_flush_frags = dp_peer_flush_frags,
15116 	.set_peer_as_tdls_peer = dp_set_peer_as_tdls_peer,
15117 };
15118 #endif
15119 
15120 static void dp_soc_txrx_ops_attach(struct dp_soc *soc)
15121 {
15122 	soc->cdp_soc.ops->cmn_drv_ops = &dp_ops_cmn;
15123 	soc->cdp_soc.ops->ctrl_ops = &dp_ops_ctrl;
15124 	soc->cdp_soc.ops->me_ops = &dp_ops_me;
15125 	soc->cdp_soc.ops->host_stats_ops = &dp_ops_host_stats;
15126 	soc->cdp_soc.ops->wds_ops = &dp_ops_wds;
15127 	soc->cdp_soc.ops->raw_ops = &dp_ops_raw;
15128 #ifdef PEER_FLOW_CONTROL
15129 	soc->cdp_soc.ops->pflow_ops = &dp_ops_pflow;
15130 #endif /* PEER_FLOW_CONTROL */
15131 #ifdef DP_PEER_EXTENDED_API
15132 	soc->cdp_soc.ops->misc_ops = &dp_ops_misc;
15133 	soc->cdp_soc.ops->ocb_ops = &dp_ops_ocb;
15134 	soc->cdp_soc.ops->peer_ops = &dp_ops_peer;
15135 	soc->cdp_soc.ops->mob_stats_ops = &dp_ops_mob_stats;
15136 #endif
15137 #ifdef DP_FLOW_CTL
15138 	soc->cdp_soc.ops->cfg_ops = &dp_ops_cfg;
15139 	soc->cdp_soc.ops->flowctl_ops = &dp_ops_flowctl;
15140 	soc->cdp_soc.ops->l_flowctl_ops = &dp_ops_l_flowctl;
15141 	soc->cdp_soc.ops->throttle_ops = &dp_ops_throttle;
15142 #endif
15143 #ifdef IPA_OFFLOAD
15144 	soc->cdp_soc.ops->ipa_ops = &dp_ops_ipa;
15145 #endif
15146 #ifdef DP_POWER_SAVE
15147 	soc->cdp_soc.ops->bus_ops = &dp_ops_bus;
15148 #endif
15149 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
15150 	soc->cdp_soc.ops->cfr_ops = &dp_ops_cfr;
15151 #endif
15152 #ifdef WLAN_SUPPORT_MSCS
15153 	soc->cdp_soc.ops->mscs_ops = &dp_ops_mscs;
15154 #endif
15155 #ifdef WLAN_SUPPORT_MESH_LATENCY
15156 	soc->cdp_soc.ops->mesh_latency_ops = &dp_ops_mesh_latency;
15157 #endif
15158 #ifdef CONFIG_SAWF_DEF_QUEUES
15159 	soc->cdp_soc.ops->sawf_ops = &dp_ops_sawf;
15160 #endif
15161 #ifdef WLAN_SUPPORT_SCS
15162 	soc->cdp_soc.ops->scs_ops = &dp_ops_scs;
15163 #endif
15164 };
15165 
15166 /*
15167  * dp_soc_set_txrx_ring_map()
15168  * @dp_soc: DP handler for soc
15169  *
15170  * Return: Void
15171  */
15172 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
15173 {
15174 	uint32_t i;
15175 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
15176 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
15177 	}
15178 }
15179 
15180 qdf_export_symbol(dp_soc_set_txrx_ring_map);
15181 
15182 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
15183 	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574) || \
15184 	defined(QCA_WIFI_QCA5332)
15185 /**
15186  * dp_soc_attach_wifi3() - Attach txrx SOC
15187  * @ctrl_psoc: Opaque SOC handle from control plane
15188  * @params: SOC attach params
15189  *
15190  * Return: DP SOC handle on success, NULL on failure
15191  */
15192 struct cdp_soc_t *
15193 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
15194 		    struct cdp_soc_attach_params *params)
15195 {
15196 	struct dp_soc *dp_soc = NULL;
15197 
15198 	dp_soc = dp_soc_attach(ctrl_psoc, params);
15199 
15200 	return dp_soc_to_cdp_soc_t(dp_soc);
15201 }
15202 
15203 static inline void dp_soc_set_def_pdev(struct dp_soc *soc)
15204 {
15205 	int lmac_id;
15206 
15207 	for (lmac_id = 0; lmac_id < MAX_NUM_LMAC_HW; lmac_id++) {
15208 		/*Set default host PDEV ID for lmac_id*/
15209 		wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
15210 				      INVALID_PDEV_ID, lmac_id);
15211 	}
15212 }
15213 
15214 static uint32_t
15215 dp_get_link_desc_id_start(uint16_t arch_id)
15216 {
15217 	switch (arch_id) {
15218 	case CDP_ARCH_TYPE_LI:
15219 		return LINK_DESC_ID_START_21_BITS_COOKIE;
15220 	case CDP_ARCH_TYPE_BE:
15221 		return LINK_DESC_ID_START_20_BITS_COOKIE;
15222 	default:
15223 		dp_err("unknown arch_id 0x%x", arch_id);
15224 		QDF_BUG(0);
15225 		return LINK_DESC_ID_START_21_BITS_COOKIE;
15226 	}
15227 }
15228 
15229 /**
15230  * dp_soc_attach() - Attach txrx SOC
15231  * @ctrl_psoc: Opaque SOC handle from control plane
15232  * @params: SOC attach params
15233  *
15234  * Return: DP SOC handle on success, NULL on failure
15235  */
15236 static struct dp_soc *
15237 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
15238 	      struct cdp_soc_attach_params *params)
15239 {
15240 	int int_ctx;
15241 	struct dp_soc *soc =  NULL;
15242 	uint16_t arch_id;
15243 	struct hif_opaque_softc *hif_handle = params->hif_handle;
15244 	qdf_device_t qdf_osdev = params->qdf_osdev;
15245 	struct ol_if_ops *ol_ops = params->ol_ops;
15246 	uint16_t device_id = params->device_id;
15247 
15248 	if (!hif_handle) {
15249 		dp_err("HIF handle is NULL");
15250 		goto fail0;
15251 	}
15252 	arch_id = cdp_get_arch_type_from_devid(device_id);
15253 	soc = qdf_mem_malloc(dp_get_soc_context_size(device_id));
15254 	if (!soc) {
15255 		dp_err("DP SOC memory allocation failed");
15256 		goto fail0;
15257 	}
15258 
15259 	dp_info("soc memory allocated %pK", soc);
15260 	soc->hif_handle = hif_handle;
15261 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
15262 	if (!soc->hal_soc)
15263 		goto fail1;
15264 
15265 	hif_get_cmem_info(soc->hif_handle,
15266 			  &soc->cmem_base,
15267 			  &soc->cmem_total_size);
15268 	soc->cmem_avail_size = soc->cmem_total_size;
15269 	int_ctx = 0;
15270 	soc->device_id = device_id;
15271 	soc->cdp_soc.ops =
15272 		(struct cdp_ops *)qdf_mem_malloc(sizeof(struct cdp_ops));
15273 	if (!soc->cdp_soc.ops)
15274 		goto fail1;
15275 
15276 	dp_soc_txrx_ops_attach(soc);
15277 	soc->cdp_soc.ol_ops = ol_ops;
15278 	soc->ctrl_psoc = ctrl_psoc;
15279 	soc->osdev = qdf_osdev;
15280 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
15281 	hal_rx_get_tlv_size(soc->hal_soc, &soc->rx_pkt_tlv_size,
15282 			    &soc->rx_mon_pkt_tlv_size);
15283 	soc->idle_link_bm_id = hal_get_idle_link_bm_id(soc->hal_soc,
15284 						       params->mlo_chip_id);
15285 	soc->features.dmac_cmn_src_rxbuf_ring_enabled =
15286 		hal_dmac_cmn_src_rxbuf_ring_get(soc->hal_soc);
15287 	soc->arch_id = arch_id;
15288 	soc->link_desc_id_start =
15289 			dp_get_link_desc_id_start(soc->arch_id);
15290 	dp_configure_arch_ops(soc);
15291 
15292 	/* Reset wbm sg list and flags */
15293 	dp_rx_wbm_sg_list_reset(soc);
15294 
15295 	dp_soc_tx_hw_desc_history_attach(soc);
15296 	dp_soc_rx_history_attach(soc);
15297 	dp_soc_mon_status_ring_history_attach(soc);
15298 	dp_soc_tx_history_attach(soc);
15299 	wlan_set_srng_cfg(&soc->wlan_srng_cfg);
15300 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
15301 	if (!soc->wlan_cfg_ctx) {
15302 		dp_err("wlan_cfg_ctx failed\n");
15303 		goto fail2;
15304 	}
15305 	dp_soc_cfg_attach(soc);
15306 
15307 	if (dp_hw_link_desc_pool_banks_alloc(soc, WLAN_INVALID_PDEV_ID)) {
15308 		dp_err("failed to allocate link desc pool banks");
15309 		goto fail3;
15310 	}
15311 
15312 	if (dp_hw_link_desc_ring_alloc(soc)) {
15313 		dp_err("failed to allocate link_desc_ring");
15314 		goto fail4;
15315 	}
15316 
15317 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_attach(soc,
15318 								 params))) {
15319 		dp_err("unable to do target specific attach");
15320 		goto fail5;
15321 	}
15322 
15323 	if (dp_soc_srng_alloc(soc)) {
15324 		dp_err("failed to allocate soc srng rings");
15325 		goto fail6;
15326 	}
15327 
15328 	if (dp_soc_tx_desc_sw_pools_alloc(soc)) {
15329 		dp_err("dp_soc_tx_desc_sw_pools_alloc failed");
15330 		goto fail7;
15331 	}
15332 
15333 	if (!dp_monitor_modularized_enable()) {
15334 		if (dp_mon_soc_attach_wrapper(soc)) {
15335 			dp_err("failed to attach monitor");
15336 			goto fail8;
15337 		}
15338 	}
15339 
15340 	if (dp_sysfs_initialize_stats(soc) != QDF_STATUS_SUCCESS) {
15341 		dp_err("failed to initialize dp stats sysfs file");
15342 		dp_sysfs_deinitialize_stats(soc);
15343 	}
15344 
15345 	dp_soc_swlm_attach(soc);
15346 	dp_soc_set_interrupt_mode(soc);
15347 	dp_soc_set_def_pdev(soc);
15348 
15349 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
15350 		qdf_dma_mem_stats_read(),
15351 		qdf_heap_mem_stats_read(),
15352 		qdf_skb_total_mem_stats_read());
15353 
15354 	return soc;
15355 fail8:
15356 	dp_soc_tx_desc_sw_pools_free(soc);
15357 fail7:
15358 	dp_soc_srng_free(soc);
15359 fail6:
15360 	soc->arch_ops.txrx_soc_detach(soc);
15361 fail5:
15362 	dp_hw_link_desc_ring_free(soc);
15363 fail4:
15364 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
15365 fail3:
15366 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
15367 fail2:
15368 	qdf_mem_free(soc->cdp_soc.ops);
15369 fail1:
15370 	qdf_mem_free(soc);
15371 fail0:
15372 	return NULL;
15373 }
15374 
15375 /**
15376  * dp_soc_init() - Initialize txrx SOC
15377  * @dp_soc: Opaque DP SOC handle
15378  * @htc_handle: Opaque HTC handle
15379  * @hif_handle: Opaque HIF handle
15380  *
15381  * Return: DP SOC handle on success, NULL on failure
15382  */
15383 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
15384 		  struct hif_opaque_softc *hif_handle)
15385 {
15386 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
15387 	bool is_monitor_mode = false;
15388 	uint8_t i;
15389 	int num_dp_msi;
15390 
15391 	wlan_minidump_log(soc, sizeof(*soc), soc->ctrl_psoc,
15392 			  WLAN_MD_DP_SOC, "dp_soc");
15393 
15394 	soc->hif_handle = hif_handle;
15395 
15396 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
15397 	if (!soc->hal_soc)
15398 		goto fail0;
15399 
15400 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_init(soc))) {
15401 		dp_err("unable to do target specific init");
15402 		goto fail0;
15403 	}
15404 
15405 	htt_soc = htt_soc_attach(soc, htc_handle);
15406 	if (!htt_soc)
15407 		goto fail1;
15408 
15409 	soc->htt_handle = htt_soc;
15410 
15411 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
15412 		goto fail2;
15413 
15414 	htt_set_htc_handle(htt_soc, htc_handle);
15415 
15416 	dp_soc_cfg_init(soc);
15417 
15418 	dp_monitor_soc_cfg_init(soc);
15419 	/* Reset/Initialize wbm sg list and flags */
15420 	dp_rx_wbm_sg_list_reset(soc);
15421 
15422 	/* Note: Any SRNG ring initialization should happen only after
15423 	 * Interrupt mode is set and followed by filling up the
15424 	 * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER.
15425 	 */
15426 	dp_soc_set_interrupt_mode(soc);
15427 	if (soc->cdp_soc.ol_ops->get_con_mode &&
15428 	    soc->cdp_soc.ol_ops->get_con_mode() ==
15429 	    QDF_GLOBAL_MONITOR_MODE)
15430 		is_monitor_mode = true;
15431 
15432 	num_dp_msi = dp_get_num_msi_available(soc, soc->intr_mode);
15433 	if (num_dp_msi < 0) {
15434 		dp_init_err("%pK: dp_interrupt assignment failed", soc);
15435 		goto fail3;
15436 	}
15437 
15438 	wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, num_dp_msi,
15439 				     soc->intr_mode, is_monitor_mode);
15440 
15441 	/* initialize WBM_IDLE_LINK ring */
15442 	if (dp_hw_link_desc_ring_init(soc)) {
15443 		dp_init_err("%pK: dp_hw_link_desc_ring_init failed", soc);
15444 		goto fail3;
15445 	}
15446 
15447 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
15448 
15449 	if (dp_soc_srng_init(soc)) {
15450 		dp_init_err("%pK: dp_soc_srng_init failed", soc);
15451 		goto fail4;
15452 	}
15453 
15454 	if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
15455 			       htt_get_htc_handle(htt_soc),
15456 			       soc->hal_soc, soc->osdev) == NULL)
15457 		goto fail5;
15458 
15459 	/* Initialize descriptors in TCL Rings */
15460 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
15461 		hal_tx_init_data_ring(soc->hal_soc,
15462 				      soc->tcl_data_ring[i].hal_srng);
15463 	}
15464 
15465 	if (dp_soc_tx_desc_sw_pools_init(soc)) {
15466 		dp_init_err("%pK: dp_tx_soc_attach failed", soc);
15467 		goto fail6;
15468 	}
15469 
15470 	if (soc->arch_ops.txrx_soc_ppeds_start) {
15471 		if (soc->arch_ops.txrx_soc_ppeds_start(soc)) {
15472 			dp_init_err("%pK: ppeds start failed", soc);
15473 			goto fail7;
15474 		}
15475 	}
15476 
15477 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
15478 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
15479 	soc->cce_disable = false;
15480 	soc->max_ast_ageout_count = MAX_AST_AGEOUT_COUNT;
15481 
15482 	soc->sta_mode_search_policy = DP_TX_ADDR_SEARCH_ADDR_POLICY;
15483 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
15484 	qdf_spinlock_create(&soc->vdev_map_lock);
15485 	qdf_atomic_init(&soc->num_tx_outstanding);
15486 	qdf_atomic_init(&soc->num_tx_exception);
15487 	soc->num_tx_allowed =
15488 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
15489 
15490 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
15491 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
15492 				CDP_CFG_MAX_PEER_ID);
15493 
15494 		if (ret != -EINVAL)
15495 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
15496 
15497 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
15498 				CDP_CFG_CCE_DISABLE);
15499 		if (ret == 1)
15500 			soc->cce_disable = true;
15501 	}
15502 
15503 	/*
15504 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
15505 	 * and IPQ5018 WMAC2 is not there in these platforms.
15506 	 */
15507 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018 ||
15508 	    soc->disable_mac2_intr)
15509 		dp_soc_disable_unused_mac_intr_mask(soc, 0x2);
15510 
15511 	/*
15512 	 * Skip registering hw ring interrupts for WMAC1 on IPQ5018
15513 	 * WMAC1 is not there in this platform.
15514 	 */
15515 	if (soc->disable_mac1_intr)
15516 		dp_soc_disable_unused_mac_intr_mask(soc, 0x1);
15517 
15518 	/* setup the global rx defrag waitlist */
15519 	TAILQ_INIT(&soc->rx.defrag.waitlist);
15520 	soc->rx.defrag.timeout_ms =
15521 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
15522 	soc->rx.defrag.next_flush_ms = 0;
15523 	soc->rx.flags.defrag_timeout_check =
15524 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
15525 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
15526 
15527 	dp_monitor_soc_init(soc);
15528 
15529 	qdf_atomic_set(&soc->cmn_init_done, 1);
15530 
15531 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
15532 
15533 	qdf_spinlock_create(&soc->ast_lock);
15534 	dp_peer_mec_spinlock_create(soc);
15535 
15536 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
15537 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
15538 	INIT_RX_HW_STATS_LOCK(soc);
15539 
15540 	qdf_nbuf_queue_init(&soc->invalid_buf_queue);
15541 	/* fill the tx/rx cpu ring map*/
15542 	dp_soc_set_txrx_ring_map(soc);
15543 
15544 	TAILQ_INIT(&soc->inactive_peer_list);
15545 	qdf_spinlock_create(&soc->inactive_peer_list_lock);
15546 	TAILQ_INIT(&soc->inactive_vdev_list);
15547 	qdf_spinlock_create(&soc->inactive_vdev_list_lock);
15548 	qdf_spinlock_create(&soc->htt_stats.lock);
15549 	/* initialize work queue for stats processing */
15550 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
15551 
15552 	dp_reo_desc_deferred_freelist_create(soc);
15553 
15554 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
15555 		qdf_dma_mem_stats_read(),
15556 		qdf_heap_mem_stats_read(),
15557 		qdf_skb_total_mem_stats_read());
15558 
15559 	soc->vdev_stats_id_map = 0;
15560 
15561 	return soc;
15562 fail7:
15563 	dp_soc_tx_desc_sw_pools_deinit(soc);
15564 fail6:
15565 	htt_soc_htc_dealloc(soc->htt_handle);
15566 fail5:
15567 	dp_soc_srng_deinit(soc);
15568 fail4:
15569 	dp_hw_link_desc_ring_deinit(soc);
15570 fail3:
15571 	htt_htc_pkt_pool_free(htt_soc);
15572 fail2:
15573 	htt_soc_detach(htt_soc);
15574 fail1:
15575 	soc->arch_ops.txrx_soc_deinit(soc);
15576 fail0:
15577 	return NULL;
15578 }
15579 
15580 /**
15581  * dp_soc_init_wifi3() - Initialize txrx SOC
15582  * @soc: Opaque DP SOC handle
15583  * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
15584  * @hif_handle: Opaque HIF handle
15585  * @htc_handle: Opaque HTC handle
15586  * @qdf_osdev: QDF device (Unused)
15587  * @ol_ops: Offload Operations (Unused)
15588  * @device_id: Device ID (Unused)
15589  *
15590  * Return: DP SOC handle on success, NULL on failure
15591  */
15592 void *dp_soc_init_wifi3(struct cdp_soc_t *soc,
15593 			struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
15594 			struct hif_opaque_softc *hif_handle,
15595 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
15596 			struct ol_if_ops *ol_ops, uint16_t device_id)
15597 {
15598 	return dp_soc_init((struct dp_soc *)soc, htc_handle, hif_handle);
15599 }
15600 
15601 #endif
15602 
15603 /*
15604  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
15605  *
15606  * @soc: handle to DP soc
15607  * @mac_id: MAC id
15608  *
15609  * Return: Return pdev corresponding to MAC
15610  */
15611 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
15612 {
15613 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
15614 		return (mac_id < MAX_PDEV_CNT) ? soc->pdev_list[mac_id] : NULL;
15615 
15616 	/* Typically for MCL as there only 1 PDEV*/
15617 	return soc->pdev_list[0];
15618 }
15619 
15620 void dp_update_num_mac_rings_for_dbs(struct dp_soc *soc,
15621 				     int *max_mac_rings)
15622 {
15623 	bool dbs_enable = false;
15624 
15625 	if (soc->cdp_soc.ol_ops->is_hw_dbs_capable)
15626 		dbs_enable = soc->cdp_soc.ol_ops->
15627 				is_hw_dbs_capable((void *)soc->ctrl_psoc);
15628 
15629 	*max_mac_rings = dbs_enable ? (*max_mac_rings) : 1;
15630 	dp_info("dbs_enable %d, max_mac_rings %d",
15631 		dbs_enable, *max_mac_rings);
15632 }
15633 
15634 qdf_export_symbol(dp_update_num_mac_rings_for_dbs);
15635 
15636 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
15637 /**
15638  * dp_get_cfr_rcc() - get cfr rcc config
15639  * @soc_hdl: Datapath soc handle
15640  * @pdev_id: id of objmgr pdev
15641  *
15642  * Return: true/false based on cfr mode setting
15643  */
15644 static
15645 bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
15646 {
15647 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15648 	struct dp_pdev *pdev = NULL;
15649 
15650 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15651 	if (!pdev) {
15652 		dp_err("pdev is NULL");
15653 		return false;
15654 	}
15655 
15656 	return pdev->cfr_rcc_mode;
15657 }
15658 
15659 /**
15660  * dp_set_cfr_rcc() - enable/disable cfr rcc config
15661  * @soc_hdl: Datapath soc handle
15662  * @pdev_id: id of objmgr pdev
15663  * @enable: Enable/Disable cfr rcc mode
15664  *
15665  * Return: none
15666  */
15667 static
15668 void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, bool enable)
15669 {
15670 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15671 	struct dp_pdev *pdev = NULL;
15672 
15673 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15674 	if (!pdev) {
15675 		dp_err("pdev is NULL");
15676 		return;
15677 	}
15678 
15679 	pdev->cfr_rcc_mode = enable;
15680 }
15681 
15682 /*
15683  * dp_get_cfr_dbg_stats - Get the debug statistics for CFR
15684  * @soc_hdl: Datapath soc handle
15685  * @pdev_id: id of data path pdev handle
15686  * @cfr_rcc_stats: CFR RCC debug statistics buffer
15687  *
15688  * Return: none
15689  */
15690 static inline void
15691 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
15692 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats)
15693 {
15694 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15695 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15696 
15697 	if (!pdev) {
15698 		dp_err("Invalid pdev");
15699 		return;
15700 	}
15701 
15702 	qdf_mem_copy(cfr_rcc_stats, &pdev->stats.rcc,
15703 		     sizeof(struct cdp_cfr_rcc_stats));
15704 }
15705 
15706 /*
15707  * dp_clear_cfr_dbg_stats - Clear debug statistics for CFR
15708  * @soc_hdl: Datapath soc handle
15709  * @pdev_id: id of data path pdev handle
15710  *
15711  * Return: none
15712  */
15713 static void dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl,
15714 				   uint8_t pdev_id)
15715 {
15716 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15717 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15718 
15719 	if (!pdev) {
15720 		dp_err("dp pdev is NULL");
15721 		return;
15722 	}
15723 
15724 	qdf_mem_zero(&pdev->stats.rcc, sizeof(pdev->stats.rcc));
15725 }
15726 #endif
15727 
15728 /**
15729  * dp_bucket_index() - Return index from array
15730  *
15731  * @delay: delay measured
15732  * @array: array used to index corresponding delay
15733  * @delay_in_us: flag to indicate whether the delay in ms or us
15734  *
15735  * Return: index
15736  */
15737 static uint8_t
15738 dp_bucket_index(uint32_t delay, uint16_t *array, bool delay_in_us)
15739 {
15740 	uint8_t i = CDP_DELAY_BUCKET_0;
15741 	uint32_t thr_low, thr_high;
15742 
15743 	for (; i < CDP_DELAY_BUCKET_MAX - 1; i++) {
15744 		thr_low = array[i];
15745 		thr_high = array[i + 1];
15746 
15747 		if (delay_in_us) {
15748 			thr_low = thr_low * USEC_PER_MSEC;
15749 			thr_high = thr_high * USEC_PER_MSEC;
15750 		}
15751 		if (delay >= thr_low && delay <= thr_high)
15752 			return i;
15753 	}
15754 	return (CDP_DELAY_BUCKET_MAX - 1);
15755 }
15756 
15757 #ifdef HW_TX_DELAY_STATS_ENABLE
15758 /*
15759  * cdp_fw_to_hw_delay_range
15760  * Fw to hw delay ranges in milliseconds
15761  */
15762 static uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
15763 	0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
15764 #else
15765 static uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
15766 	0, 2, 4, 6, 8, 10, 20, 30, 40, 50, 100, 250, 500};
15767 #endif
15768 
15769 /*
15770  * cdp_sw_enq_delay_range
15771  * Software enqueue delay ranges in milliseconds
15772  */
15773 static uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
15774 	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
15775 
15776 /*
15777  * cdp_intfrm_delay_range
15778  * Interframe delay ranges in milliseconds
15779  */
15780 static uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
15781 	0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
15782 
15783 /**
15784  * dp_fill_delay_buckets() - Fill delay statistics bucket for each
15785  *				type of delay
15786  * @tstats: tid tx stats
15787  * @rstats: tid rx stats
15788  * @delay: delay in ms
15789  * @tid: tid value
15790  * @mode: type of tx delay mode
15791  * @ring_id: ring number
15792  * @delay_in_us: flag to indicate whether the delay in ms or us
15793  *
15794  * Return: pointer to cdp_delay_stats structure
15795  */
15796 static struct cdp_delay_stats *
15797 dp_fill_delay_buckets(struct cdp_tid_tx_stats *tstats,
15798 		      struct cdp_tid_rx_stats *rstats, uint32_t delay,
15799 		      uint8_t tid, uint8_t mode, uint8_t ring_id,
15800 		      bool delay_in_us)
15801 {
15802 	uint8_t delay_index = 0;
15803 	struct cdp_delay_stats *stats = NULL;
15804 
15805 	/*
15806 	 * Update delay stats in proper bucket
15807 	 */
15808 	switch (mode) {
15809 	/* Software Enqueue delay ranges */
15810 	case CDP_DELAY_STATS_SW_ENQ:
15811 		if (!tstats)
15812 			break;
15813 
15814 		delay_index = dp_bucket_index(delay, cdp_sw_enq_delay,
15815 					      delay_in_us);
15816 		tstats->swq_delay.delay_bucket[delay_index]++;
15817 		stats = &tstats->swq_delay;
15818 		break;
15819 
15820 	/* Tx Completion delay ranges */
15821 	case CDP_DELAY_STATS_FW_HW_TRANSMIT:
15822 		if (!tstats)
15823 			break;
15824 
15825 		delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay,
15826 					      delay_in_us);
15827 		tstats->hwtx_delay.delay_bucket[delay_index]++;
15828 		stats = &tstats->hwtx_delay;
15829 		break;
15830 
15831 	/* Interframe tx delay ranges */
15832 	case CDP_DELAY_STATS_TX_INTERFRAME:
15833 		if (!tstats)
15834 			break;
15835 
15836 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
15837 					      delay_in_us);
15838 		tstats->intfrm_delay.delay_bucket[delay_index]++;
15839 		stats = &tstats->intfrm_delay;
15840 		break;
15841 
15842 	/* Interframe rx delay ranges */
15843 	case CDP_DELAY_STATS_RX_INTERFRAME:
15844 		if (!rstats)
15845 			break;
15846 
15847 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
15848 					      delay_in_us);
15849 		rstats->intfrm_delay.delay_bucket[delay_index]++;
15850 		stats = &rstats->intfrm_delay;
15851 		break;
15852 
15853 	/* Ring reap to indication to network stack */
15854 	case CDP_DELAY_STATS_REAP_STACK:
15855 		if (!rstats)
15856 			break;
15857 
15858 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
15859 					      delay_in_us);
15860 		rstats->to_stack_delay.delay_bucket[delay_index]++;
15861 		stats = &rstats->to_stack_delay;
15862 		break;
15863 	default:
15864 		dp_debug("Incorrect delay mode: %d", mode);
15865 	}
15866 
15867 	return stats;
15868 }
15869 
15870 void dp_update_delay_stats(struct cdp_tid_tx_stats *tstats,
15871 			   struct cdp_tid_rx_stats *rstats, uint32_t delay,
15872 			   uint8_t tid, uint8_t mode, uint8_t ring_id,
15873 			   bool delay_in_us)
15874 {
15875 	struct cdp_delay_stats *dstats = NULL;
15876 
15877 	/*
15878 	 * Delay ranges are different for different delay modes
15879 	 * Get the correct index to update delay bucket
15880 	 */
15881 	dstats = dp_fill_delay_buckets(tstats, rstats, delay, tid, mode,
15882 				       ring_id, delay_in_us);
15883 	if (qdf_unlikely(!dstats))
15884 		return;
15885 
15886 	if (delay != 0) {
15887 		/*
15888 		 * Compute minimum,average and maximum
15889 		 * delay
15890 		 */
15891 		if (delay < dstats->min_delay)
15892 			dstats->min_delay = delay;
15893 
15894 		if (delay > dstats->max_delay)
15895 			dstats->max_delay = delay;
15896 
15897 		/*
15898 		 * Average over delay measured till now
15899 		 */
15900 		if (!dstats->avg_delay)
15901 			dstats->avg_delay = delay;
15902 		else
15903 			dstats->avg_delay = ((delay + dstats->avg_delay) >> 1);
15904 	}
15905 }
15906 
15907 /**
15908  * dp_get_peer_mac_list(): function to get peer mac list of vdev
15909  * @soc: Datapath soc handle
15910  * @vdev_id: vdev id
15911  * @newmac: Table of the clients mac
15912  * @mac_cnt: No. of MACs required
15913  * @limit: Limit the number of clients
15914  *
15915  * return: no of clients
15916  */
15917 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id,
15918 			      u_int8_t newmac[][QDF_MAC_ADDR_SIZE],
15919 			      u_int16_t mac_cnt, bool limit)
15920 {
15921 	struct dp_soc *dp_soc = (struct dp_soc *)soc;
15922 	struct dp_vdev *vdev =
15923 		dp_vdev_get_ref_by_id(dp_soc, vdev_id, DP_MOD_ID_CDP);
15924 	struct dp_peer *peer;
15925 	uint16_t new_mac_cnt = 0;
15926 
15927 	if (!vdev)
15928 		return new_mac_cnt;
15929 
15930 	if (limit && (vdev->num_peers > mac_cnt))
15931 		return 0;
15932 
15933 	qdf_spin_lock_bh(&vdev->peer_list_lock);
15934 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
15935 		if (peer->bss_peer)
15936 			continue;
15937 		if (new_mac_cnt < mac_cnt) {
15938 			WLAN_ADDR_COPY(newmac[new_mac_cnt], peer->mac_addr.raw);
15939 			new_mac_cnt++;
15940 		}
15941 	}
15942 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
15943 	dp_vdev_unref_delete(dp_soc, vdev, DP_MOD_ID_CDP);
15944 	return new_mac_cnt;
15945 }
15946 
15947 uint16_t dp_get_peer_id(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *mac)
15948 {
15949 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
15950 						       mac, 0, vdev_id,
15951 						       DP_MOD_ID_CDP);
15952 	uint16_t peer_id = HTT_INVALID_PEER;
15953 
15954 	if (!peer) {
15955 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
15956 		return peer_id;
15957 	}
15958 
15959 	peer_id = peer->peer_id;
15960 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
15961 	return peer_id;
15962 }
15963 
15964 #ifdef QCA_SUPPORT_WDS_EXTENDED
15965 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc,
15966 				  uint8_t vdev_id,
15967 				  uint8_t *mac,
15968 				  ol_txrx_rx_fp rx,
15969 				  ol_osif_peer_handle osif_peer)
15970 {
15971 	struct dp_txrx_peer *txrx_peer = NULL;
15972 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
15973 						       mac, 0, vdev_id,
15974 						       DP_MOD_ID_CDP);
15975 	QDF_STATUS status = QDF_STATUS_E_INVAL;
15976 
15977 	if (!peer) {
15978 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
15979 		return status;
15980 	}
15981 
15982 	txrx_peer = dp_get_txrx_peer(peer);
15983 	if (!txrx_peer) {
15984 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
15985 		return status;
15986 	}
15987 
15988 	if (rx) {
15989 		if (txrx_peer->osif_rx) {
15990 			status = QDF_STATUS_E_ALREADY;
15991 		} else {
15992 			txrx_peer->osif_rx = rx;
15993 			status = QDF_STATUS_SUCCESS;
15994 		}
15995 	} else {
15996 		if (txrx_peer->osif_rx) {
15997 			txrx_peer->osif_rx = NULL;
15998 			status = QDF_STATUS_SUCCESS;
15999 		} else {
16000 			status = QDF_STATUS_E_ALREADY;
16001 		}
16002 	}
16003 
16004 	txrx_peer->wds_ext.osif_peer = osif_peer;
16005 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
16006 
16007 	return status;
16008 }
16009 #endif /* QCA_SUPPORT_WDS_EXTENDED */
16010 
16011 /**
16012  * dp_pdev_srng_deinit() - de-initialize all pdev srng ring including
16013  *			   monitor rings
16014  * @pdev: Datapath pdev handle
16015  *
16016  */
16017 static void dp_pdev_srng_deinit(struct dp_pdev *pdev)
16018 {
16019 	struct dp_soc *soc = pdev->soc;
16020 	uint8_t i;
16021 
16022 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled)
16023 		dp_srng_deinit(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
16024 			       RXDMA_BUF,
16025 			       pdev->lmac_id);
16026 
16027 	if (!soc->rxdma2sw_rings_not_supported) {
16028 		for (i = 0;
16029 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
16030 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
16031 								 pdev->pdev_id);
16032 
16033 			wlan_minidump_remove(soc->rxdma_err_dst_ring[lmac_id].
16034 							base_vaddr_unaligned,
16035 					     soc->rxdma_err_dst_ring[lmac_id].
16036 								alloc_size,
16037 					     soc->ctrl_psoc,
16038 					     WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
16039 					     "rxdma_err_dst");
16040 			dp_srng_deinit(soc, &soc->rxdma_err_dst_ring[lmac_id],
16041 				       RXDMA_DST, lmac_id);
16042 		}
16043 	}
16044 
16045 
16046 }
16047 
16048 /**
16049  * dp_pdev_srng_init() - initialize all pdev srng rings including
16050  *			   monitor rings
16051  * @pdev: Datapath pdev handle
16052  *
16053  * return: QDF_STATUS_SUCCESS on success
16054  *	   QDF_STATUS_E_NOMEM on failure
16055  */
16056 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev)
16057 {
16058 	struct dp_soc *soc = pdev->soc;
16059 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16060 	uint32_t i;
16061 
16062 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16063 
16064 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
16065 		if (dp_srng_init(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
16066 				 RXDMA_BUF, 0, pdev->lmac_id)) {
16067 			dp_init_err("%pK: dp_srng_init failed rx refill ring",
16068 				    soc);
16069 			goto fail1;
16070 		}
16071 	}
16072 
16073 	/* LMAC RxDMA to SW Rings configuration */
16074 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
16075 		/* Only valid for MCL */
16076 		pdev = soc->pdev_list[0];
16077 
16078 	if (!soc->rxdma2sw_rings_not_supported) {
16079 		for (i = 0;
16080 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
16081 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
16082 								 pdev->pdev_id);
16083 			struct dp_srng *srng =
16084 				&soc->rxdma_err_dst_ring[lmac_id];
16085 
16086 			if (srng->hal_srng)
16087 				continue;
16088 
16089 			if (dp_srng_init(soc, srng, RXDMA_DST, 0, lmac_id)) {
16090 				dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring",
16091 					    soc);
16092 				goto fail1;
16093 			}
16094 			wlan_minidump_log(soc->rxdma_err_dst_ring[lmac_id].
16095 						base_vaddr_unaligned,
16096 					  soc->rxdma_err_dst_ring[lmac_id].
16097 						alloc_size,
16098 					  soc->ctrl_psoc,
16099 					  WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
16100 					  "rxdma_err_dst");
16101 		}
16102 	}
16103 	return QDF_STATUS_SUCCESS;
16104 
16105 fail1:
16106 	dp_pdev_srng_deinit(pdev);
16107 	return QDF_STATUS_E_NOMEM;
16108 }
16109 
16110 /**
16111  * dp_pdev_srng_free() - free all pdev srng rings including monitor rings
16112  * pdev: Datapath pdev handle
16113  *
16114  */
16115 static void dp_pdev_srng_free(struct dp_pdev *pdev)
16116 {
16117 	struct dp_soc *soc = pdev->soc;
16118 	uint8_t i;
16119 
16120 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled)
16121 		dp_srng_free(soc, &soc->rx_refill_buf_ring[pdev->lmac_id]);
16122 
16123 	if (!soc->rxdma2sw_rings_not_supported) {
16124 		for (i = 0;
16125 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
16126 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
16127 								 pdev->pdev_id);
16128 
16129 			dp_srng_free(soc, &soc->rxdma_err_dst_ring[lmac_id]);
16130 		}
16131 	}
16132 }
16133 
16134 /**
16135  * dp_pdev_srng_alloc() - allocate memory for all pdev srng rings including
16136  *			  monitor rings
16137  * pdev: Datapath pdev handle
16138  *
16139  * return: QDF_STATUS_SUCCESS on success
16140  *	   QDF_STATUS_E_NOMEM on failure
16141  */
16142 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev)
16143 {
16144 	struct dp_soc *soc = pdev->soc;
16145 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16146 	uint32_t ring_size;
16147 	uint32_t i;
16148 
16149 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16150 
16151 	ring_size = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
16152 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
16153 		if (dp_srng_alloc(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
16154 				  RXDMA_BUF, ring_size, 0)) {
16155 			dp_init_err("%pK: dp_srng_alloc failed rx refill ring",
16156 				    soc);
16157 			goto fail1;
16158 		}
16159 	}
16160 
16161 	ring_size = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
16162 	/* LMAC RxDMA to SW Rings configuration */
16163 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
16164 		/* Only valid for MCL */
16165 		pdev = soc->pdev_list[0];
16166 
16167 	if (!soc->rxdma2sw_rings_not_supported) {
16168 		for (i = 0;
16169 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
16170 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
16171 								 pdev->pdev_id);
16172 			struct dp_srng *srng =
16173 				&soc->rxdma_err_dst_ring[lmac_id];
16174 
16175 			if (srng->base_vaddr_unaligned)
16176 				continue;
16177 
16178 			if (dp_srng_alloc(soc, srng, RXDMA_DST, ring_size, 0)) {
16179 				dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring",
16180 					    soc);
16181 				goto fail1;
16182 			}
16183 		}
16184 	}
16185 
16186 	return QDF_STATUS_SUCCESS;
16187 fail1:
16188 	dp_pdev_srng_free(pdev);
16189 	return QDF_STATUS_E_NOMEM;
16190 }
16191 
16192 #ifndef WLAN_DP_DISABLE_TCL_CMD_CRED_SRNG
16193 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
16194 {
16195 	QDF_STATUS status;
16196 
16197 	if (soc->init_tcl_cmd_cred_ring) {
16198 		status =  dp_srng_init(soc, &soc->tcl_cmd_credit_ring,
16199 				       TCL_CMD_CREDIT, 0, 0);
16200 		if (QDF_IS_STATUS_ERROR(status))
16201 			return status;
16202 
16203 		wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
16204 				  soc->tcl_cmd_credit_ring.alloc_size,
16205 				  soc->ctrl_psoc,
16206 				  WLAN_MD_DP_SRNG_TCL_CMD,
16207 				  "wbm_desc_rel_ring");
16208 	}
16209 
16210 	return QDF_STATUS_SUCCESS;
16211 }
16212 
16213 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
16214 {
16215 	if (soc->init_tcl_cmd_cred_ring) {
16216 		wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
16217 				     soc->tcl_cmd_credit_ring.alloc_size,
16218 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_CMD,
16219 				     "wbm_desc_rel_ring");
16220 		dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring,
16221 			       TCL_CMD_CREDIT, 0);
16222 	}
16223 }
16224 
16225 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
16226 {
16227 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
16228 	uint32_t entries;
16229 	QDF_STATUS status;
16230 
16231 	entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx);
16232 	if (soc->init_tcl_cmd_cred_ring) {
16233 		status = dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring,
16234 				       TCL_CMD_CREDIT, entries, 0);
16235 		if (QDF_IS_STATUS_ERROR(status))
16236 			return status;
16237 	}
16238 
16239 	return QDF_STATUS_SUCCESS;
16240 }
16241 
16242 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
16243 {
16244 	if (soc->init_tcl_cmd_cred_ring)
16245 		dp_srng_free(soc, &soc->tcl_cmd_credit_ring);
16246 }
16247 
16248 static inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
16249 {
16250 	if (soc->init_tcl_cmd_cred_ring)
16251 		hal_tx_init_cmd_credit_ring(soc->hal_soc,
16252 					    soc->tcl_cmd_credit_ring.hal_srng);
16253 }
16254 #else
16255 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
16256 {
16257 	return QDF_STATUS_SUCCESS;
16258 }
16259 
16260 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
16261 {
16262 }
16263 
16264 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
16265 {
16266 	return QDF_STATUS_SUCCESS;
16267 }
16268 
16269 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
16270 {
16271 }
16272 
16273 static inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
16274 {
16275 }
16276 #endif
16277 
16278 #ifndef WLAN_DP_DISABLE_TCL_STATUS_SRNG
16279 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
16280 {
16281 	QDF_STATUS status;
16282 
16283 	status =  dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0);
16284 	if (QDF_IS_STATUS_ERROR(status))
16285 		return status;
16286 
16287 	wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned,
16288 			  soc->tcl_status_ring.alloc_size,
16289 			  soc->ctrl_psoc,
16290 			  WLAN_MD_DP_SRNG_TCL_STATUS,
16291 			  "wbm_desc_rel_ring");
16292 
16293 	return QDF_STATUS_SUCCESS;
16294 }
16295 
16296 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
16297 {
16298 	wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned,
16299 			     soc->tcl_status_ring.alloc_size,
16300 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_STATUS,
16301 			     "wbm_desc_rel_ring");
16302 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
16303 }
16304 
16305 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
16306 {
16307 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
16308 	uint32_t entries;
16309 	QDF_STATUS status = QDF_STATUS_SUCCESS;
16310 
16311 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
16312 	status = dp_srng_alloc(soc, &soc->tcl_status_ring,
16313 			       TCL_STATUS, entries, 0);
16314 
16315 	return status;
16316 }
16317 
16318 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
16319 {
16320 	dp_srng_free(soc, &soc->tcl_status_ring);
16321 }
16322 #else
16323 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
16324 {
16325 	return QDF_STATUS_SUCCESS;
16326 }
16327 
16328 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
16329 {
16330 }
16331 
16332 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
16333 {
16334 	return QDF_STATUS_SUCCESS;
16335 }
16336 
16337 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
16338 {
16339 }
16340 #endif
16341 
16342 /**
16343  * dp_soc_srng_deinit() - de-initialize soc srng rings
16344  * @soc: Datapath soc handle
16345  *
16346  */
16347 static void dp_soc_srng_deinit(struct dp_soc *soc)
16348 {
16349 	uint32_t i;
16350 
16351 	if (soc->arch_ops.txrx_soc_srng_deinit)
16352 		soc->arch_ops.txrx_soc_srng_deinit(soc);
16353 
16354 	/* Free the ring memories */
16355 	/* Common rings */
16356 	wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
16357 			     soc->wbm_desc_rel_ring.alloc_size,
16358 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_WBM_DESC_REL,
16359 			     "wbm_desc_rel_ring");
16360 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
16361 
16362 	/* Tx data rings */
16363 	for (i = 0; i < soc->num_tcl_data_rings; i++)
16364 		dp_deinit_tx_pair_by_index(soc, i);
16365 
16366 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16367 		dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
16368 		dp_ipa_deinit_alt_tx_ring(soc);
16369 	}
16370 
16371 	/* TCL command and status rings */
16372 	dp_soc_tcl_cmd_cred_srng_deinit(soc);
16373 	dp_soc_tcl_status_srng_deinit(soc);
16374 
16375 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
16376 		/* TODO: Get number of rings and ring sizes
16377 		 * from wlan_cfg
16378 		 */
16379 		wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned,
16380 				     soc->reo_dest_ring[i].alloc_size,
16381 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_DEST,
16382 				     "reo_dest_ring");
16383 		dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i);
16384 	}
16385 
16386 	/* REO reinjection ring */
16387 	wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned,
16388 			     soc->reo_reinject_ring.alloc_size,
16389 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_REINJECT,
16390 			     "reo_reinject_ring");
16391 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
16392 
16393 	/* Rx release ring */
16394 	wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned,
16395 			     soc->rx_rel_ring.alloc_size,
16396 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_RX_REL,
16397 			     "reo_release_ring");
16398 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
16399 
16400 	/* Rx exception ring */
16401 	/* TODO: Better to store ring_type and ring_num in
16402 	 * dp_srng during setup
16403 	 */
16404 	wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned,
16405 			     soc->reo_exception_ring.alloc_size,
16406 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_EXCEPTION,
16407 			     "reo_exception_ring");
16408 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
16409 
16410 	/* REO command and status rings */
16411 	wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned,
16412 			     soc->reo_cmd_ring.alloc_size,
16413 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_CMD,
16414 			     "reo_cmd_ring");
16415 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
16416 	wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned,
16417 			     soc->reo_status_ring.alloc_size,
16418 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_STATUS,
16419 			     "reo_status_ring");
16420 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
16421 }
16422 
16423 /**
16424  * dp_soc_srng_init() - Initialize soc level srng rings
16425  * @soc: Datapath soc handle
16426  *
16427  * return: QDF_STATUS_SUCCESS on success
16428  *	   QDF_STATUS_E_FAILURE on failure
16429  */
16430 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc)
16431 {
16432 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16433 	uint8_t i;
16434 	uint8_t wbm2_sw_rx_rel_ring_id;
16435 
16436 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16437 
16438 	dp_enable_verbose_debug(soc);
16439 
16440 	/* WBM descriptor release ring */
16441 	if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) {
16442 		dp_init_err("%pK: dp_srng_init failed for wbm_desc_rel_ring", soc);
16443 		goto fail1;
16444 	}
16445 
16446 	wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
16447 			  soc->wbm_desc_rel_ring.alloc_size,
16448 			  soc->ctrl_psoc,
16449 			  WLAN_MD_DP_SRNG_WBM_DESC_REL,
16450 			  "wbm_desc_rel_ring");
16451 
16452 	/* TCL command and status rings */
16453 	if (dp_soc_tcl_cmd_cred_srng_init(soc)) {
16454 		dp_init_err("%pK: dp_srng_init failed for tcl_cmd_ring", soc);
16455 		goto fail1;
16456 	}
16457 
16458 	if (dp_soc_tcl_status_srng_init(soc)) {
16459 		dp_init_err("%pK: dp_srng_init failed for tcl_status_ring", soc);
16460 		goto fail1;
16461 	}
16462 
16463 	/* REO reinjection ring */
16464 	if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) {
16465 		dp_init_err("%pK: dp_srng_init failed for reo_reinject_ring", soc);
16466 		goto fail1;
16467 	}
16468 
16469 	wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned,
16470 			  soc->reo_reinject_ring.alloc_size,
16471 			  soc->ctrl_psoc,
16472 			  WLAN_MD_DP_SRNG_REO_REINJECT,
16473 			  "reo_reinject_ring");
16474 
16475 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc_cfg_ctx);
16476 	/* Rx release ring */
16477 	if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
16478 			 wbm2_sw_rx_rel_ring_id, 0)) {
16479 		dp_init_err("%pK: dp_srng_init failed for rx_rel_ring", soc);
16480 		goto fail1;
16481 	}
16482 
16483 	wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned,
16484 			  soc->rx_rel_ring.alloc_size,
16485 			  soc->ctrl_psoc,
16486 			  WLAN_MD_DP_SRNG_RX_REL,
16487 			  "reo_release_ring");
16488 
16489 	/* Rx exception ring */
16490 	if (dp_srng_init(soc, &soc->reo_exception_ring,
16491 			 REO_EXCEPTION, 0, MAX_REO_DEST_RINGS)) {
16492 		dp_init_err("%pK: dp_srng_init failed - reo_exception", soc);
16493 		goto fail1;
16494 	}
16495 
16496 	wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned,
16497 			  soc->reo_exception_ring.alloc_size,
16498 			  soc->ctrl_psoc,
16499 			  WLAN_MD_DP_SRNG_REO_EXCEPTION,
16500 			  "reo_exception_ring");
16501 
16502 	/* REO command and status rings */
16503 	if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) {
16504 		dp_init_err("%pK: dp_srng_init failed for reo_cmd_ring", soc);
16505 		goto fail1;
16506 	}
16507 
16508 	wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned,
16509 			  soc->reo_cmd_ring.alloc_size,
16510 			  soc->ctrl_psoc,
16511 			  WLAN_MD_DP_SRNG_REO_CMD,
16512 			  "reo_cmd_ring");
16513 
16514 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
16515 	TAILQ_INIT(&soc->rx.reo_cmd_list);
16516 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
16517 
16518 	if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) {
16519 		dp_init_err("%pK: dp_srng_init failed for reo_status_ring", soc);
16520 		goto fail1;
16521 	}
16522 
16523 	wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned,
16524 			  soc->reo_status_ring.alloc_size,
16525 			  soc->ctrl_psoc,
16526 			  WLAN_MD_DP_SRNG_REO_STATUS,
16527 			  "reo_status_ring");
16528 
16529 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
16530 		if (dp_init_tx_ring_pair_by_index(soc, i))
16531 			goto fail1;
16532 	}
16533 
16534 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16535 		if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
16536 			goto fail1;
16537 
16538 		if (dp_ipa_init_alt_tx_ring(soc))
16539 			goto fail1;
16540 	}
16541 
16542 	dp_create_ext_stats_event(soc);
16543 
16544 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
16545 		/* Initialize REO destination ring */
16546 		if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) {
16547 			dp_init_err("%pK: dp_srng_init failed for reo_dest_ringn", soc);
16548 			goto fail1;
16549 		}
16550 
16551 		wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned,
16552 				  soc->reo_dest_ring[i].alloc_size,
16553 				  soc->ctrl_psoc,
16554 				  WLAN_MD_DP_SRNG_REO_DEST,
16555 				  "reo_dest_ring");
16556 	}
16557 
16558 	if (soc->arch_ops.txrx_soc_srng_init) {
16559 		if (soc->arch_ops.txrx_soc_srng_init(soc)) {
16560 			dp_init_err("%pK: dp_srng_init failed for arch rings",
16561 				    soc);
16562 			goto fail1;
16563 		}
16564 	}
16565 
16566 	return QDF_STATUS_SUCCESS;
16567 fail1:
16568 	/*
16569 	 * Cleanup will be done as part of soc_detach, which will
16570 	 * be called on pdev attach failure
16571 	 */
16572 	dp_soc_srng_deinit(soc);
16573 	return QDF_STATUS_E_FAILURE;
16574 }
16575 
16576 /**
16577  * dp_soc_srng_free() - free soc level srng rings
16578  * @soc: Datapath soc handle
16579  *
16580  */
16581 static void dp_soc_srng_free(struct dp_soc *soc)
16582 {
16583 	uint32_t i;
16584 
16585 	if (soc->arch_ops.txrx_soc_srng_free)
16586 		soc->arch_ops.txrx_soc_srng_free(soc);
16587 
16588 	dp_srng_free(soc, &soc->wbm_desc_rel_ring);
16589 
16590 	for (i = 0; i < soc->num_tcl_data_rings; i++)
16591 		dp_free_tx_ring_pair_by_index(soc, i);
16592 
16593 	/* Free IPA rings for TCL_TX and TCL_COMPL ring */
16594 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16595 		dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
16596 		dp_ipa_free_alt_tx_ring(soc);
16597 	}
16598 
16599 	dp_soc_tcl_cmd_cred_srng_free(soc);
16600 	dp_soc_tcl_status_srng_free(soc);
16601 
16602 	for (i = 0; i < soc->num_reo_dest_rings; i++)
16603 		dp_srng_free(soc, &soc->reo_dest_ring[i]);
16604 
16605 	dp_srng_free(soc, &soc->reo_reinject_ring);
16606 	dp_srng_free(soc, &soc->rx_rel_ring);
16607 
16608 	dp_srng_free(soc, &soc->reo_exception_ring);
16609 
16610 	dp_srng_free(soc, &soc->reo_cmd_ring);
16611 	dp_srng_free(soc, &soc->reo_status_ring);
16612 }
16613 
16614 /**
16615  * dp_soc_srng_alloc() - Allocate memory for soc level srng rings
16616  * @soc: Datapath soc handle
16617  *
16618  * return: QDF_STATUS_SUCCESS on success
16619  *	   QDF_STATUS_E_NOMEM on failure
16620  */
16621 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc)
16622 {
16623 	uint32_t entries;
16624 	uint32_t i;
16625 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16626 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
16627 	uint32_t tx_comp_ring_size, tx_ring_size, reo_dst_ring_size;
16628 
16629 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16630 
16631 	/* sw2wbm link descriptor release ring */
16632 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
16633 	if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE,
16634 			  entries, 0)) {
16635 		dp_init_err("%pK: dp_srng_alloc failed for wbm_desc_rel_ring", soc);
16636 		goto fail1;
16637 	}
16638 
16639 	/* TCL command and status rings */
16640 	if (dp_soc_tcl_cmd_cred_srng_alloc(soc)) {
16641 		dp_init_err("%pK: dp_srng_alloc failed for tcl_cmd_ring", soc);
16642 		goto fail1;
16643 	}
16644 
16645 	if (dp_soc_tcl_status_srng_alloc(soc)) {
16646 		dp_init_err("%pK: dp_srng_alloc failed for tcl_status_ring", soc);
16647 		goto fail1;
16648 	}
16649 
16650 	/* REO reinjection ring */
16651 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
16652 	if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT,
16653 			  entries, 0)) {
16654 		dp_init_err("%pK: dp_srng_alloc failed for reo_reinject_ring", soc);
16655 		goto fail1;
16656 	}
16657 
16658 	/* Rx release ring */
16659 	entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx);
16660 	if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
16661 			  entries, 0)) {
16662 		dp_init_err("%pK: dp_srng_alloc failed for rx_rel_ring", soc);
16663 		goto fail1;
16664 	}
16665 
16666 	/* Rx exception ring */
16667 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
16668 	if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION,
16669 			  entries, 0)) {
16670 		dp_init_err("%pK: dp_srng_alloc failed - reo_exception", soc);
16671 		goto fail1;
16672 	}
16673 
16674 	/* REO command and status rings */
16675 	entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx);
16676 	if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) {
16677 		dp_init_err("%pK: dp_srng_alloc failed for reo_cmd_ring", soc);
16678 		goto fail1;
16679 	}
16680 
16681 	entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx);
16682 	if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS,
16683 			  entries, 0)) {
16684 		dp_init_err("%pK: dp_srng_alloc failed for reo_status_ring", soc);
16685 		goto fail1;
16686 	}
16687 
16688 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
16689 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
16690 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx);
16691 
16692 	/* Disable cached desc if NSS offload is enabled */
16693 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
16694 		cached = 0;
16695 
16696 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
16697 		if (dp_alloc_tx_ring_pair_by_index(soc, i))
16698 			goto fail1;
16699 	}
16700 
16701 	/* IPA rings for TCL_TX and TX_COMP will be allocated here */
16702 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16703 		if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
16704 			goto fail1;
16705 
16706 		if (dp_ipa_alloc_alt_tx_ring(soc))
16707 			goto fail1;
16708 	}
16709 
16710 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
16711 		/* Setup REO destination ring */
16712 		if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST,
16713 				  reo_dst_ring_size, cached)) {
16714 			dp_init_err("%pK: dp_srng_alloc failed for reo_dest_ring", soc);
16715 			goto fail1;
16716 		}
16717 	}
16718 
16719 	if (soc->arch_ops.txrx_soc_srng_alloc) {
16720 		if (soc->arch_ops.txrx_soc_srng_alloc(soc)) {
16721 			dp_init_err("%pK: dp_srng_alloc failed for arch rings",
16722 				    soc);
16723 			goto fail1;
16724 		}
16725 	}
16726 
16727 	return QDF_STATUS_SUCCESS;
16728 
16729 fail1:
16730 	dp_soc_srng_free(soc);
16731 	return QDF_STATUS_E_NOMEM;
16732 }
16733 
16734 static void dp_soc_cfg_dump(struct dp_soc *soc, uint32_t target_type)
16735 {
16736 	dp_init_info("DP soc Dump for Target = %d", target_type);
16737 	dp_init_info("ast_override_support = %d, da_war_enabled = %d,",
16738 		     soc->ast_override_support, soc->da_war_enabled);
16739 
16740 	wlan_cfg_dp_soc_ctx_dump(soc->wlan_cfg_ctx);
16741 }
16742 
16743 /**
16744  * dp_soc_cfg_init() - initialize target specific configuration
16745  *		       during dp_soc_init
16746  * @soc: dp soc handle
16747  */
16748 static void dp_soc_cfg_init(struct dp_soc *soc)
16749 {
16750 	uint32_t target_type;
16751 
16752 	target_type = hal_get_target_type(soc->hal_soc);
16753 	switch (target_type) {
16754 	case TARGET_TYPE_QCA6290:
16755 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
16756 					       REO_DST_RING_SIZE_QCA6290);
16757 		soc->ast_override_support = 1;
16758 		soc->da_war_enabled = false;
16759 		break;
16760 	case TARGET_TYPE_QCA6390:
16761 	case TARGET_TYPE_QCA6490:
16762 	case TARGET_TYPE_QCA6750:
16763 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
16764 					       REO_DST_RING_SIZE_QCA6290);
16765 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
16766 		soc->ast_override_support = 1;
16767 		if (soc->cdp_soc.ol_ops->get_con_mode &&
16768 		    soc->cdp_soc.ol_ops->get_con_mode() ==
16769 		    QDF_GLOBAL_MONITOR_MODE) {
16770 			int int_ctx;
16771 
16772 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
16773 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
16774 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
16775 			}
16776 		}
16777 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
16778 		break;
16779 	case TARGET_TYPE_KIWI:
16780 	case TARGET_TYPE_MANGO:
16781 		soc->ast_override_support = 1;
16782 		soc->per_tid_basize_max_tid = 8;
16783 
16784 		if (soc->cdp_soc.ol_ops->get_con_mode &&
16785 		    soc->cdp_soc.ol_ops->get_con_mode() ==
16786 		    QDF_GLOBAL_MONITOR_MODE) {
16787 			int int_ctx;
16788 
16789 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS;
16790 			     int_ctx++) {
16791 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
16792 				if (dp_is_monitor_mode_using_poll(soc))
16793 					soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
16794 			}
16795 		}
16796 
16797 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
16798 		soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev = 1;
16799 		break;
16800 	case TARGET_TYPE_QCA8074:
16801 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
16802 		soc->da_war_enabled = true;
16803 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
16804 		break;
16805 	case TARGET_TYPE_QCA8074V2:
16806 	case TARGET_TYPE_QCA6018:
16807 	case TARGET_TYPE_QCA9574:
16808 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
16809 		soc->ast_override_support = 1;
16810 		soc->per_tid_basize_max_tid = 8;
16811 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
16812 		soc->da_war_enabled = false;
16813 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
16814 		break;
16815 	case TARGET_TYPE_QCN9000:
16816 		soc->ast_override_support = 1;
16817 		soc->da_war_enabled = false;
16818 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
16819 		soc->per_tid_basize_max_tid = 8;
16820 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
16821 		soc->lmac_polled_mode = 0;
16822 		soc->wbm_release_desc_rx_sg_support = 1;
16823 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
16824 		break;
16825 	case TARGET_TYPE_QCA5018:
16826 	case TARGET_TYPE_QCN6122:
16827 	case TARGET_TYPE_QCN9160:
16828 		soc->ast_override_support = 1;
16829 		soc->da_war_enabled = false;
16830 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
16831 		soc->per_tid_basize_max_tid = 8;
16832 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS_11AX;
16833 		soc->disable_mac1_intr = 1;
16834 		soc->disable_mac2_intr = 1;
16835 		soc->wbm_release_desc_rx_sg_support = 1;
16836 		break;
16837 	case TARGET_TYPE_QCN9224:
16838 		soc->ast_override_support = 1;
16839 		soc->da_war_enabled = false;
16840 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
16841 		soc->per_tid_basize_max_tid = 8;
16842 		soc->wbm_release_desc_rx_sg_support = 1;
16843 		soc->rxdma2sw_rings_not_supported = 1;
16844 		soc->wbm_sg_last_msdu_war = 1;
16845 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
16846 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
16847 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
16848 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
16849 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
16850 						  CFG_DP_HOST_AST_DB_ENABLE);
16851 		break;
16852 	case TARGET_TYPE_QCA5332:
16853 		soc->ast_override_support = 1;
16854 		soc->da_war_enabled = false;
16855 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
16856 		soc->per_tid_basize_max_tid = 8;
16857 		soc->wbm_release_desc_rx_sg_support = 1;
16858 		soc->rxdma2sw_rings_not_supported = 1;
16859 		soc->wbm_sg_last_msdu_war = 1;
16860 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
16861 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
16862 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS_5332;
16863 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
16864 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
16865 						  CFG_DP_HOST_AST_DB_ENABLE);
16866 		break;
16867 	default:
16868 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
16869 		qdf_assert_always(0);
16870 		break;
16871 	}
16872 	dp_soc_cfg_dump(soc, target_type);
16873 }
16874 
16875 /**
16876  * dp_soc_cfg_attach() - set target specific configuration in
16877  *			 dp soc cfg.
16878  * @soc: dp soc handle
16879  */
16880 static void dp_soc_cfg_attach(struct dp_soc *soc)
16881 {
16882 	int target_type;
16883 	int nss_cfg = 0;
16884 
16885 	target_type = hal_get_target_type(soc->hal_soc);
16886 	switch (target_type) {
16887 	case TARGET_TYPE_QCA6290:
16888 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
16889 					       REO_DST_RING_SIZE_QCA6290);
16890 		break;
16891 	case TARGET_TYPE_QCA6390:
16892 	case TARGET_TYPE_QCA6490:
16893 	case TARGET_TYPE_QCA6750:
16894 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
16895 					       REO_DST_RING_SIZE_QCA6290);
16896 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
16897 		break;
16898 	case TARGET_TYPE_KIWI:
16899 	case TARGET_TYPE_MANGO:
16900 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
16901 		break;
16902 	case TARGET_TYPE_QCA8074:
16903 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
16904 		break;
16905 	case TARGET_TYPE_QCA8074V2:
16906 	case TARGET_TYPE_QCA6018:
16907 	case TARGET_TYPE_QCA9574:
16908 	case TARGET_TYPE_QCN6122:
16909 	case TARGET_TYPE_QCN9160:
16910 	case TARGET_TYPE_QCA5018:
16911 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
16912 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
16913 		break;
16914 	case TARGET_TYPE_QCN9000:
16915 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
16916 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
16917 		break;
16918 	case TARGET_TYPE_QCN9224:
16919 	case TARGET_TYPE_QCA5332:
16920 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
16921 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
16922 		break;
16923 	default:
16924 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
16925 		qdf_assert_always(0);
16926 		break;
16927 	}
16928 
16929 	if (soc->cdp_soc.ol_ops->get_soc_nss_cfg)
16930 		nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc);
16931 
16932 	wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg);
16933 
16934 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
16935 		wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0);
16936 		wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0);
16937 		wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0);
16938 		wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0);
16939 		soc->init_tcl_cmd_cred_ring = false;
16940 		soc->num_tcl_data_rings =
16941 			wlan_cfg_num_nss_tcl_data_rings(soc->wlan_cfg_ctx);
16942 		soc->num_reo_dest_rings =
16943 			wlan_cfg_num_nss_reo_dest_rings(soc->wlan_cfg_ctx);
16944 
16945 	} else {
16946 		soc->init_tcl_cmd_cred_ring = true;
16947 		soc->num_tx_comp_rings =
16948 			wlan_cfg_num_tx_comp_rings(soc->wlan_cfg_ctx);
16949 		soc->num_tcl_data_rings =
16950 			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
16951 		soc->num_reo_dest_rings =
16952 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
16953 	}
16954 
16955 	soc->arch_ops.soc_cfg_attach(soc);
16956 }
16957 
16958 static inline  void dp_pdev_set_default_reo(struct dp_pdev *pdev)
16959 {
16960 	struct dp_soc *soc = pdev->soc;
16961 
16962 	switch (pdev->pdev_id) {
16963 	case 0:
16964 		pdev->reo_dest =
16965 			wlan_cfg_radio0_default_reo_get(soc->wlan_cfg_ctx);
16966 		break;
16967 
16968 	case 1:
16969 		pdev->reo_dest =
16970 			wlan_cfg_radio1_default_reo_get(soc->wlan_cfg_ctx);
16971 		break;
16972 
16973 	case 2:
16974 		pdev->reo_dest =
16975 			wlan_cfg_radio2_default_reo_get(soc->wlan_cfg_ctx);
16976 		break;
16977 
16978 	default:
16979 		dp_init_err("%pK: Invalid pdev_id %d for reo selection",
16980 			    soc, pdev->pdev_id);
16981 		break;
16982 	}
16983 }
16984 
16985 static QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc,
16986 				      HTC_HANDLE htc_handle,
16987 				      qdf_device_t qdf_osdev,
16988 				      uint8_t pdev_id)
16989 {
16990 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16991 	int nss_cfg;
16992 	void *sojourn_buf;
16993 	QDF_STATUS ret;
16994 
16995 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
16996 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
16997 
16998 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16999 	pdev->soc = soc;
17000 	pdev->pdev_id = pdev_id;
17001 
17002 	/*
17003 	 * Variable to prevent double pdev deinitialization during
17004 	 * radio detach execution .i.e. in the absence of any vdev.
17005 	 */
17006 	pdev->pdev_deinit = 0;
17007 
17008 	if (dp_wdi_event_attach(pdev)) {
17009 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
17010 			  "dp_wdi_evet_attach failed");
17011 		goto fail0;
17012 	}
17013 
17014 	if (dp_pdev_srng_init(pdev)) {
17015 		dp_init_err("%pK: Failed to initialize pdev srng rings", soc);
17016 		goto fail1;
17017 	}
17018 
17019 	/* Initialize descriptors in TCL Rings used by IPA */
17020 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
17021 		hal_tx_init_data_ring(soc->hal_soc,
17022 				      soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng);
17023 		dp_ipa_hal_tx_init_alt_data_ring(soc);
17024 	}
17025 
17026 	/*
17027 	 * Initialize command/credit ring descriptor
17028 	 * Command/CREDIT ring also used for sending DATA cmds
17029 	 */
17030 	dp_tx_init_cmd_credit_ring(soc);
17031 
17032 	dp_tx_pdev_init(pdev);
17033 
17034 	/*
17035 	 * set nss pdev config based on soc config
17036 	 */
17037 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
17038 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
17039 					 (nss_cfg & (1 << pdev_id)));
17040 	pdev->target_pdev_id =
17041 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
17042 
17043 	if (soc->preferred_hw_mode == WMI_HOST_HW_MODE_2G_PHYB &&
17044 	    pdev->lmac_id == PHYB_2G_LMAC_ID) {
17045 		pdev->target_pdev_id = PHYB_2G_TARGET_PDEV_ID;
17046 	}
17047 
17048 	/* Reset the cpu ring map if radio is NSS offloaded */
17049 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
17050 		dp_soc_reset_cpu_ring_map(soc);
17051 		dp_soc_reset_intr_mask(soc);
17052 	}
17053 
17054 	/* Reset the cpu ring map if radio is NSS offloaded */
17055 	dp_soc_reset_ipa_vlan_intr_mask(soc);
17056 
17057 	TAILQ_INIT(&pdev->vdev_list);
17058 	qdf_spinlock_create(&pdev->vdev_list_lock);
17059 	pdev->vdev_count = 0;
17060 	pdev->is_lro_hash_configured = 0;
17061 
17062 	qdf_spinlock_create(&pdev->tx_mutex);
17063 	pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_INVALID_LMAC_ID;
17064 	pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_INVALID_LMAC_ID;
17065 	pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_INVALID_LMAC_ID;
17066 
17067 	DP_STATS_INIT(pdev);
17068 
17069 	dp_local_peer_id_pool_init(pdev);
17070 
17071 	dp_dscp_tid_map_setup(pdev);
17072 	dp_pcp_tid_map_setup(pdev);
17073 
17074 	/* set the reo destination during initialization */
17075 	dp_pdev_set_default_reo(pdev);
17076 
17077 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
17078 
17079 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
17080 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
17081 			      TRUE);
17082 
17083 	if (!pdev->sojourn_buf) {
17084 		dp_init_err("%pK: Failed to allocate sojourn buf", soc);
17085 		goto fail2;
17086 	}
17087 	sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
17088 	qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
17089 
17090 	qdf_event_create(&pdev->fw_peer_stats_event);
17091 	qdf_event_create(&pdev->fw_stats_event);
17092 	qdf_event_create(&pdev->fw_obss_stats_event);
17093 
17094 	pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
17095 
17096 	if (dp_rxdma_ring_setup(soc, pdev)) {
17097 		dp_init_err("%pK: RXDMA ring config failed", soc);
17098 		goto fail3;
17099 	}
17100 
17101 	if (dp_init_ipa_rx_refill_buf_ring(soc, pdev))
17102 		goto fail3;
17103 
17104 	if (dp_ipa_ring_resource_setup(soc, pdev))
17105 		goto fail4;
17106 
17107 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
17108 		dp_init_err("%pK: dp_ipa_uc_attach failed", soc);
17109 		goto fail4;
17110 	}
17111 
17112 	ret = dp_rx_fst_attach(soc, pdev);
17113 	if ((ret != QDF_STATUS_SUCCESS) &&
17114 	    (ret != QDF_STATUS_E_NOSUPPORT)) {
17115 		dp_init_err("%pK: RX Flow Search Table attach failed: pdev %d err %d",
17116 			    soc, pdev_id, ret);
17117 		goto fail5;
17118 	}
17119 
17120 	if (dp_pdev_bkp_stats_attach(pdev) != QDF_STATUS_SUCCESS) {
17121 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
17122 			  FL("dp_pdev_bkp_stats_attach failed"));
17123 		goto fail6;
17124 	}
17125 
17126 	if (dp_monitor_pdev_init(pdev)) {
17127 		dp_init_err("%pK: dp_monitor_pdev_init failed\n", soc);
17128 		goto fail7;
17129 	}
17130 
17131 	/* initialize sw rx descriptors */
17132 	dp_rx_pdev_desc_pool_init(pdev);
17133 	/* allocate buffers and replenish the RxDMA ring */
17134 	dp_rx_pdev_buffers_alloc(pdev);
17135 
17136 	dp_init_tso_stats(pdev);
17137 
17138 	pdev->rx_fast_flag = false;
17139 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
17140 		qdf_dma_mem_stats_read(),
17141 		qdf_heap_mem_stats_read(),
17142 		qdf_skb_total_mem_stats_read());
17143 
17144 	return QDF_STATUS_SUCCESS;
17145 fail7:
17146 	dp_pdev_bkp_stats_detach(pdev);
17147 fail6:
17148 	dp_rx_fst_detach(soc, pdev);
17149 fail5:
17150 	dp_ipa_uc_detach(soc, pdev);
17151 fail4:
17152 	dp_deinit_ipa_rx_refill_buf_ring(soc, pdev);
17153 fail3:
17154 	dp_rxdma_ring_cleanup(soc, pdev);
17155 	qdf_nbuf_free(pdev->sojourn_buf);
17156 fail2:
17157 	qdf_spinlock_destroy(&pdev->tx_mutex);
17158 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
17159 	dp_pdev_srng_deinit(pdev);
17160 fail1:
17161 	dp_wdi_event_detach(pdev);
17162 fail0:
17163 	return QDF_STATUS_E_FAILURE;
17164 }
17165 
17166 /*
17167  * dp_pdev_init_wifi3() - Init txrx pdev
17168  * @htc_handle: HTC handle for host-target interface
17169  * @qdf_osdev: QDF OS device
17170  * @force: Force deinit
17171  *
17172  * Return: QDF_STATUS
17173  */
17174 static QDF_STATUS dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
17175 				     HTC_HANDLE htc_handle,
17176 				     qdf_device_t qdf_osdev,
17177 				     uint8_t pdev_id)
17178 {
17179 	return dp_pdev_init(txrx_soc, htc_handle, qdf_osdev, pdev_id);
17180 }
17181 
17182