xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision 839714c413056bc9b82af766295b4ffabe28bbbf)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <wlan_ipa_obj_mgmt_api.h>
21 #include <qdf_types.h>
22 #include <qdf_lock.h>
23 #include <qdf_net_types.h>
24 #include <qdf_lro.h>
25 #include <qdf_module.h>
26 #include <hal_hw_headers.h>
27 #include <hal_api.h>
28 #include <hif.h>
29 #include <htt.h>
30 #include <wdi_event.h>
31 #include <queue.h>
32 #include "dp_types.h"
33 #include "dp_internal.h"
34 #include "dp_tx.h"
35 #include "dp_tx_desc.h"
36 #include "dp_rx.h"
37 #ifdef DP_RATETABLE_SUPPORT
38 #include "dp_ratetable.h"
39 #endif
40 #include <cdp_txrx_handle.h>
41 #include <wlan_cfg.h>
42 #include <wlan_utility.h>
43 #include "cdp_txrx_cmn_struct.h"
44 #include "cdp_txrx_stats_struct.h"
45 #include "cdp_txrx_cmn_reg.h"
46 #include <qdf_util.h>
47 #include "dp_peer.h"
48 #include "htt_stats.h"
49 #include "dp_htt.h"
50 #ifdef WLAN_SUPPORT_RX_FISA
51 #include <wlan_dp_fisa_rx.h>
52 #endif
53 #include "htt_ppdu_stats.h"
54 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
55 #include "cfg_ucfg_api.h"
56 #include <wlan_module_ids.h>
57 
58 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
59 #include "cdp_txrx_flow_ctrl_v2.h"
60 #else
61 
62 static inline void
63 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
64 {
65 	return;
66 }
67 #endif
68 #ifdef WIFI_MONITOR_SUPPORT
69 #include <dp_mon.h>
70 #endif
71 #include "dp_ipa.h"
72 #ifdef FEATURE_WDS
73 #include "dp_txrx_wds.h"
74 #endif
75 #ifdef WLAN_SUPPORT_MSCS
76 #include "dp_mscs.h"
77 #endif
78 #ifdef WLAN_SUPPORT_MESH_LATENCY
79 #include "dp_mesh_latency.h"
80 #endif
81 #ifdef WLAN_SUPPORT_SCS
82 #include "dp_scs.h"
83 #endif
84 #ifdef ATH_SUPPORT_IQUE
85 #include "dp_txrx_me.h"
86 #endif
87 #if defined(DP_CON_MON)
88 #ifndef REMOVE_PKT_LOG
89 #include <pktlog_ac_api.h>
90 #include <pktlog_ac.h>
91 #endif
92 #endif
93 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
94 #include <wlan_dp_swlm.h>
95 #endif
96 #ifdef CONFIG_SAWF_DEF_QUEUES
97 #include "dp_sawf.h"
98 #endif
99 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
100 #include <target_if_dp.h>
101 #endif
102 
103 #ifdef WLAN_FEATURE_STATS_EXT
104 #define INIT_RX_HW_STATS_LOCK(_soc) \
105 	qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
106 #define DEINIT_RX_HW_STATS_LOCK(_soc) \
107 	qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
108 #else
109 #define INIT_RX_HW_STATS_LOCK(_soc)  /* no op */
110 #define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
111 #endif
112 
113 #if defined(DP_PEER_EXTENDED_API) || defined(WLAN_DP_PENDING_MEM_FLUSH)
114 #define SET_PEER_REF_CNT_ONE(_peer) \
115 	qdf_atomic_set(&(_peer)->ref_cnt, 1)
116 #else
117 #define SET_PEER_REF_CNT_ONE(_peer)
118 #endif
119 
120 #ifdef WLAN_SYSFS_DP_STATS
121 /* sysfs event wait time for firmware stat request unit milliseconds */
122 #define WLAN_SYSFS_STAT_REQ_WAIT_MS 3000
123 #endif
124 
125 #ifdef QCA_DP_ENABLE_TX_COMP_RING4
126 #define TXCOMP_RING4_NUM 3
127 #else
128 #define TXCOMP_RING4_NUM WBM2SW_TXCOMP_RING4_NUM
129 #endif
130 
131 #ifdef QCA_DP_TX_FW_METADATA_V2
132 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) \
133 		HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
134 #else
135 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) \
136 		HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
137 #endif
138 
139 QDF_COMPILE_TIME_ASSERT(max_rx_rings_check,
140 			MAX_REO_DEST_RINGS == CDP_MAX_RX_RINGS);
141 
142 QDF_COMPILE_TIME_ASSERT(max_tx_rings_check,
143 			MAX_TCL_DATA_RINGS == CDP_MAX_TX_COMP_RINGS);
144 
145 #define dp_init_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_INIT, params)
146 #define dp_init_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_INIT, params)
147 #define dp_init_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_INIT, params)
148 #define dp_init_info(params...) \
149 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_INIT, ## params)
150 #define dp_init_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_INIT, params)
151 
152 #define dp_vdev_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_VDEV, params)
153 #define dp_vdev_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_VDEV, params)
154 #define dp_vdev_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_VDEV, params)
155 #define dp_vdev_info(params...) \
156 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_VDEV, ## params)
157 #define dp_vdev_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_VDEV, params)
158 
159 void dp_configure_arch_ops(struct dp_soc *soc);
160 qdf_size_t dp_get_soc_context_size(uint16_t device_id);
161 
162 /*
163  * The max size of cdp_peer_stats_param_t is limited to 16 bytes.
164  * If the buffer size is exceeding this size limit,
165  * dp_txrx_get_peer_stats is to be used instead.
166  */
167 QDF_COMPILE_TIME_ASSERT(cdp_peer_stats_param_t_max_size,
168 			(sizeof(cdp_peer_stats_param_t) <= 16));
169 
170 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
171 /*
172  * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS
173  * also should be updated accordingly
174  */
175 QDF_COMPILE_TIME_ASSERT(num_intr_grps,
176 			HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS);
177 
178 /*
179  * HIF_EVENT_HIST_MAX should always be power of 2
180  */
181 QDF_COMPILE_TIME_ASSERT(hif_event_history_size,
182 			(HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0);
183 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
184 
185 /*
186  * If WLAN_CFG_INT_NUM_CONTEXTS is changed,
187  * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated
188  */
189 QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs,
190 			WLAN_CFG_INT_NUM_CONTEXTS_MAX >=
191 			WLAN_CFG_INT_NUM_CONTEXTS);
192 
193 static QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl);
194 static QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl);
195 
196 static void dp_pdev_srng_deinit(struct dp_pdev *pdev);
197 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev);
198 static void dp_pdev_srng_free(struct dp_pdev *pdev);
199 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev);
200 
201 static void dp_soc_srng_deinit(struct dp_soc *soc);
202 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc);
203 static void dp_soc_srng_free(struct dp_soc *soc);
204 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc);
205 
206 static void dp_soc_cfg_init(struct dp_soc *soc);
207 static void dp_soc_cfg_attach(struct dp_soc *soc);
208 
209 static inline
210 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
211 				struct cdp_pdev_attach_params *params);
212 
213 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id);
214 
215 static QDF_STATUS
216 dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
217 		   HTC_HANDLE htc_handle,
218 		   qdf_device_t qdf_osdev,
219 		   uint8_t pdev_id);
220 
221 static QDF_STATUS
222 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, int force);
223 
224 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc);
225 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc);
226 
227 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
228 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc,
229 				       uint8_t pdev_id,
230 				       int force);
231 static struct dp_soc *
232 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
233 	      struct cdp_soc_attach_params *params);
234 static inline QDF_STATUS dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl,
235 					      uint8_t vdev_id,
236 					      uint8_t *peer_mac_addr,
237 					      enum cdp_peer_type peer_type);
238 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
239 				       uint8_t vdev_id,
240 				       uint8_t *peer_mac, uint32_t bitmap,
241 				       enum cdp_peer_type peer_type);
242 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
243 				bool unmap_only,
244 				bool mlo_peers_only);
245 #ifdef ENABLE_VERBOSE_DEBUG
246 bool is_dp_verbose_debug_enabled;
247 #endif
248 
249 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
250 static bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
251 static void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
252 			   bool enable);
253 static inline void
254 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
255 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats);
256 static inline void
257 dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
258 #endif
259 
260 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
261 						uint8_t index);
262 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index);
263 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index);
264 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
265 						 uint8_t index);
266 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
267 					    enum hal_ring_type ring_type,
268 					    int ring_num);
269 #ifdef DP_UMAC_HW_RESET_SUPPORT
270 static QDF_STATUS dp_umac_reset_action_trigger_recovery(struct dp_soc *soc);
271 static QDF_STATUS dp_umac_reset_handle_pre_reset(struct dp_soc *soc);
272 static QDF_STATUS dp_umac_reset_handle_post_reset(struct dp_soc *soc);
273 static QDF_STATUS dp_umac_reset_handle_post_reset_complete(struct dp_soc *soc);
274 #endif
275 
276 #define DP_INTR_POLL_TIMER_MS	5
277 
278 #define MON_VDEV_TIMER_INIT 0x1
279 #define MON_VDEV_TIMER_RUNNING 0x2
280 
281 #define DP_MCS_LENGTH (6*MAX_MCS)
282 
283 #define DP_CURR_FW_STATS_AVAIL 19
284 #define DP_HTT_DBG_EXT_STATS_MAX 256
285 #define DP_MAX_SLEEP_TIME 100
286 #ifndef QCA_WIFI_3_0_EMU
287 #define SUSPEND_DRAIN_WAIT 500
288 #else
289 #define SUSPEND_DRAIN_WAIT 3000
290 #endif
291 
292 #ifdef IPA_OFFLOAD
293 /* Exclude IPA rings from the interrupt context */
294 #define TX_RING_MASK_VAL	0xb
295 #define RX_RING_MASK_VAL	0x7
296 #else
297 #define TX_RING_MASK_VAL	0xF
298 #define RX_RING_MASK_VAL	0xF
299 #endif
300 
301 #define STR_MAXLEN	64
302 
303 #define RNG_ERR		"SRNG setup failed for"
304 
305 /*
306  * default_dscp_tid_map - Default DSCP-TID mapping
307  *
308  * DSCP        TID
309  * 000000      0
310  * 001000      1
311  * 010000      2
312  * 011000      3
313  * 100000      4
314  * 101000      5
315  * 110000      6
316  * 111000      7
317  */
318 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
319 	0, 0, 0, 0, 0, 0, 0, 0,
320 	1, 1, 1, 1, 1, 1, 1, 1,
321 	2, 2, 2, 2, 2, 2, 2, 2,
322 	3, 3, 3, 3, 3, 3, 3, 3,
323 	4, 4, 4, 4, 4, 4, 4, 4,
324 	5, 5, 5, 5, 5, 5, 5, 5,
325 	6, 6, 6, 6, 6, 6, 6, 6,
326 	7, 7, 7, 7, 7, 7, 7, 7,
327 };
328 
329 /*
330  * default_pcp_tid_map - Default PCP-TID mapping
331  *
332  * PCP     TID
333  * 000      0
334  * 001      1
335  * 010      2
336  * 011      3
337  * 100      4
338  * 101      5
339  * 110      6
340  * 111      7
341  */
342 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
343 	0, 1, 2, 3, 4, 5, 6, 7,
344 };
345 
346 /*
347  * Cpu to tx ring map
348  */
349 uint8_t
350 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
351 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
352 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
353 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
354 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
355 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
356 #ifdef WLAN_TX_PKT_CAPTURE_ENH
357 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
358 #endif
359 };
360 
361 qdf_export_symbol(dp_cpu_ring_map);
362 
363 /**
364  * enum dp_stats_type - Select the type of statistics
365  * @STATS_FW: Firmware-based statistic
366  * @STATS_HOST: Host-based statistic
367  * @STATS_TYPE_MAX: maximum enumeration
368  */
369 enum dp_stats_type {
370 	STATS_FW = 0,
371 	STATS_HOST = 1,
372 	STATS_TYPE_MAX = 2,
373 };
374 
375 /**
376  * enum dp_fw_stats - General Firmware statistics options
377  * @TXRX_FW_STATS_INVALID: statistic is not available
378  */
379 enum dp_fw_stats {
380 	TXRX_FW_STATS_INVALID	= -1,
381 };
382 
383 /*
384  * dp_stats_mapping_table - Firmware and Host statistics
385  * currently supported
386  */
387 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
388 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
389 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
390 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
391 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
392 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
393 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
394 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
395 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
396 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
397 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
398 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
399 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
400 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
401 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
402 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
403 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
404 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
405 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
406 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
407 	/* Last ENUM for HTT FW STATS */
408 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
409 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
410 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
411 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
412 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
413 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
414 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
415 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
416 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
417 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
418 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
419 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
420 	{TXRX_FW_STATS_INVALID, TXRX_NAPI_STATS},
421 	{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
422 	{TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS},
423 	{TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS},
424 	{TXRX_FW_STATS_INVALID, TXRX_SOC_REO_HW_DESC_DUMP},
425 	{TXRX_FW_STATS_INVALID, TXRX_SOC_WBM_IDLE_HPTP_DUMP},
426 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_USAGE_WM_STATS},
427 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, TXRX_HOST_STATS_INVALID},
428 	{HTT_DBG_EXT_STATS_TX_SOUNDING_INFO, TXRX_HOST_STATS_INVALID}
429 };
430 
431 /* MCL specific functions */
432 #if defined(DP_CON_MON)
433 
434 #ifdef DP_CON_MON_MSI_ENABLED
435 /**
436  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
437  * @soc: pointer to dp_soc handle
438  * @intr_ctx_num: interrupt context number for which mon mask is needed
439  *
440  * For MCL, monitor mode rings are being processed in timer contexts (polled).
441  * This function is returning 0, since in interrupt mode(softirq based RX),
442  * we donot want to process monitor mode rings in a softirq.
443  *
444  * So, in case packet log is enabled for SAP/STA/P2P modes,
445  * regular interrupt processing will not process monitor mode rings. It would be
446  * done in a separate timer context.
447  *
448  * Return: 0
449  */
450 static inline uint32_t
451 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
452 {
453 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
454 }
455 #else
456 /**
457  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
458  * @soc: pointer to dp_soc handle
459  * @intr_ctx_num: interrupt context number for which mon mask is needed
460  *
461  * For MCL, monitor mode rings are being processed in timer contexts (polled).
462  * This function is returning 0, since in interrupt mode(softirq based RX),
463  * we donot want to process monitor mode rings in a softirq.
464  *
465  * So, in case packet log is enabled for SAP/STA/P2P modes,
466  * regular interrupt processing will not process monitor mode rings. It would be
467  * done in a separate timer context.
468  *
469  * Return: 0
470  */
471 static inline uint32_t
472 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
473 {
474 	return 0;
475 }
476 #endif
477 
478 #ifdef IPA_OFFLOAD
479 /**
480  * dp_get_num_rx_contexts() - get number of RX contexts
481  * @soc_hdl: cdp opaque soc handle
482  *
483  * Return: number of RX contexts
484  */
485 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
486 {
487 	int num_rx_contexts;
488 	uint32_t reo_ring_map;
489 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
490 
491 	reo_ring_map = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
492 
493 	switch (soc->arch_id) {
494 	case CDP_ARCH_TYPE_BE:
495 		/* 2 REO rings are used for IPA */
496 		reo_ring_map &=  ~(BIT(3) | BIT(7));
497 
498 		break;
499 	case CDP_ARCH_TYPE_LI:
500 		/* 1 REO ring is used for IPA */
501 		reo_ring_map &=  ~BIT(3);
502 		break;
503 	default:
504 		dp_err("unknown arch_id 0x%x", soc->arch_id);
505 		QDF_BUG(0);
506 	}
507 	/*
508 	 * qdf_get_hweight32 prefer over qdf_get_hweight8 in case map is scaled
509 	 * in future
510 	 */
511 	num_rx_contexts = qdf_get_hweight32(reo_ring_map);
512 
513 	return num_rx_contexts;
514 }
515 #else
516 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
517 {
518 	int num_rx_contexts;
519 	uint32_t reo_config;
520 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
521 
522 	reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
523 	/*
524 	 * qdf_get_hweight32 prefer over qdf_get_hweight8 in case map is scaled
525 	 * in future
526 	 */
527 	num_rx_contexts = qdf_get_hweight32(reo_config);
528 
529 	return num_rx_contexts;
530 }
531 #endif
532 
533 #else
534 
535 /**
536  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
537  * @soc: pointer to dp_soc handle
538  * @intr_ctx_num: interrupt context number for which mon mask is needed
539  *
540  * Return: mon mask value
541  */
542 static inline
543 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
544 {
545 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
546 }
547 
548 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc)
549 {
550 	int i;
551 
552 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
553 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
554 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
555 	}
556 }
557 
558 qdf_export_symbol(dp_soc_reset_mon_intr_mask);
559 
560 /**
561  * dp_service_lmac_rings()- timer to reap lmac rings
562  * @arg: SoC Handle
563  *
564  * Return:
565  *
566  */
567 static void dp_service_lmac_rings(void *arg)
568 {
569 	struct dp_soc *soc = (struct dp_soc *)arg;
570 	int ring = 0, i;
571 	struct dp_pdev *pdev = NULL;
572 	union dp_rx_desc_list_elem_t *desc_list = NULL;
573 	union dp_rx_desc_list_elem_t *tail = NULL;
574 
575 	/* Process LMAC interrupts */
576 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
577 		int mac_for_pdev = ring;
578 		struct dp_srng *rx_refill_buf_ring;
579 
580 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
581 		if (!pdev)
582 			continue;
583 
584 		rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev];
585 
586 		dp_monitor_process(soc, NULL, mac_for_pdev,
587 				   QCA_NAPI_BUDGET);
588 
589 		for (i = 0;
590 		     i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
591 			dp_rxdma_err_process(&soc->intr_ctx[i], soc,
592 					     mac_for_pdev,
593 					     QCA_NAPI_BUDGET);
594 
595 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF,
596 						  mac_for_pdev))
597 			dp_rx_buffers_replenish(soc, mac_for_pdev,
598 						rx_refill_buf_ring,
599 						&soc->rx_desc_buf[mac_for_pdev],
600 						0, &desc_list, &tail, false);
601 	}
602 
603 	qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
604 }
605 
606 #endif
607 
608 #ifdef FEATURE_MEC
609 void dp_peer_mec_flush_entries(struct dp_soc *soc)
610 {
611 	unsigned int index;
612 	struct dp_mec_entry *mecentry, *mecentry_next;
613 
614 	TAILQ_HEAD(, dp_mec_entry) free_list;
615 	TAILQ_INIT(&free_list);
616 
617 	if (!soc->mec_hash.mask)
618 		return;
619 
620 	if (!soc->mec_hash.bins)
621 		return;
622 
623 	if (!qdf_atomic_read(&soc->mec_cnt))
624 		return;
625 
626 	qdf_spin_lock_bh(&soc->mec_lock);
627 	for (index = 0; index <= soc->mec_hash.mask; index++) {
628 		if (!TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
629 			TAILQ_FOREACH_SAFE(mecentry, &soc->mec_hash.bins[index],
630 					   hash_list_elem, mecentry_next) {
631 			    dp_peer_mec_detach_entry(soc, mecentry, &free_list);
632 			}
633 		}
634 	}
635 	qdf_spin_unlock_bh(&soc->mec_lock);
636 
637 	dp_peer_mec_free_list(soc, &free_list);
638 }
639 
640 /**
641  * dp_print_mec_stats() - Dump MEC entries in table
642  * @soc: Datapath soc handle
643  *
644  * Return: none
645  */
646 static void dp_print_mec_stats(struct dp_soc *soc)
647 {
648 	int i;
649 	uint32_t index;
650 	struct dp_mec_entry *mecentry = NULL, *mec_list;
651 	uint32_t num_entries = 0;
652 
653 	DP_PRINT_STATS("MEC Stats:");
654 	DP_PRINT_STATS("   Entries Added   = %d", soc->stats.mec.added);
655 	DP_PRINT_STATS("   Entries Deleted = %d", soc->stats.mec.deleted);
656 
657 	if (!qdf_atomic_read(&soc->mec_cnt))
658 		return;
659 
660 	mec_list = qdf_mem_malloc(sizeof(*mecentry) * DP_PEER_MAX_MEC_ENTRY);
661 	if (!mec_list) {
662 		dp_peer_warn("%pK: failed to allocate mec_list", soc);
663 		return;
664 	}
665 
666 	DP_PRINT_STATS("MEC Table:");
667 	for (index = 0; index <= soc->mec_hash.mask; index++) {
668 		qdf_spin_lock_bh(&soc->mec_lock);
669 		if (TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
670 			qdf_spin_unlock_bh(&soc->mec_lock);
671 			continue;
672 		}
673 
674 		TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index],
675 			      hash_list_elem) {
676 			qdf_mem_copy(&mec_list[num_entries], mecentry,
677 				     sizeof(*mecentry));
678 			num_entries++;
679 		}
680 		qdf_spin_unlock_bh(&soc->mec_lock);
681 	}
682 
683 	if (!num_entries) {
684 		qdf_mem_free(mec_list);
685 		return;
686 	}
687 
688 	for (i = 0; i < num_entries; i++) {
689 		DP_PRINT_STATS("%6d mac_addr = " QDF_MAC_ADDR_FMT
690 			       " is_active = %d pdev_id = %d vdev_id = %d",
691 			       i,
692 			       QDF_MAC_ADDR_REF(mec_list[i].mac_addr.raw),
693 			       mec_list[i].is_active,
694 			       mec_list[i].pdev_id,
695 			       mec_list[i].vdev_id);
696 	}
697 	qdf_mem_free(mec_list);
698 }
699 #else
700 static void dp_print_mec_stats(struct dp_soc *soc)
701 {
702 }
703 #endif
704 
705 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
706 				 uint8_t vdev_id,
707 				 uint8_t *peer_mac,
708 				 uint8_t *mac_addr,
709 				 enum cdp_txrx_ast_entry_type type,
710 				 uint32_t flags)
711 {
712 	int ret = -1;
713 	QDF_STATUS status = QDF_STATUS_SUCCESS;
714 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
715 						       peer_mac, 0, vdev_id,
716 						       DP_MOD_ID_CDP);
717 
718 	if (!peer) {
719 		dp_peer_debug("Peer is NULL!");
720 		return ret;
721 	}
722 
723 	status = dp_peer_add_ast((struct dp_soc *)soc_hdl,
724 				 peer,
725 				 mac_addr,
726 				 type,
727 				 flags);
728 	if ((status == QDF_STATUS_SUCCESS) ||
729 	    (status == QDF_STATUS_E_ALREADY) ||
730 	    (status == QDF_STATUS_E_AGAIN))
731 		ret = 0;
732 
733 	dp_hmwds_ast_add_notify(peer, mac_addr,
734 				type, status, false);
735 
736 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
737 
738 	return ret;
739 }
740 
741 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
742 						uint8_t vdev_id,
743 						uint8_t *peer_mac,
744 						uint8_t *wds_macaddr,
745 						uint32_t flags)
746 {
747 	int status = -1;
748 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
749 	struct dp_ast_entry  *ast_entry = NULL;
750 	struct dp_peer *peer;
751 
752 	if (soc->ast_offload_support)
753 		return status;
754 
755 	peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
756 				      peer_mac, 0, vdev_id,
757 				      DP_MOD_ID_CDP);
758 
759 	if (!peer) {
760 		dp_peer_debug("Peer is NULL!");
761 		return status;
762 	}
763 
764 	qdf_spin_lock_bh(&soc->ast_lock);
765 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
766 						    peer->vdev->pdev->pdev_id);
767 
768 	if (ast_entry) {
769 		status = dp_peer_update_ast(soc,
770 					    peer,
771 					    ast_entry, flags);
772 	}
773 	qdf_spin_unlock_bh(&soc->ast_lock);
774 
775 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
776 
777 	return status;
778 }
779 
780 /**
781  * dp_peer_reset_ast_entries() - Deletes all HMWDS entries for a peer
782  * @soc:		Datapath SOC handle
783  * @peer:		DP peer
784  * @arg:		callback argument
785  *
786  * Return: None
787  */
788 static void
789 dp_peer_reset_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
790 {
791 	struct dp_ast_entry *ast_entry = NULL;
792 	struct dp_ast_entry *tmp_ast_entry;
793 
794 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
795 		if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
796 		    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
797 			dp_peer_del_ast(soc, ast_entry);
798 	}
799 }
800 
801 /**
802  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
803  * @soc_hdl:		Datapath SOC handle
804  * @wds_macaddr:	WDS entry MAC Address
805  * @peer_mac_addr:	WDS entry MAC Address
806  * @vdev_id:		id of vdev handle
807  *
808  * Return: QDF_STATUS
809  */
810 static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
811 					 uint8_t *wds_macaddr,
812 					 uint8_t *peer_mac_addr,
813 					 uint8_t vdev_id)
814 {
815 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
816 	struct dp_ast_entry *ast_entry = NULL;
817 	struct dp_peer *peer;
818 	struct dp_pdev *pdev;
819 	struct dp_vdev *vdev;
820 
821 	if (soc->ast_offload_support)
822 		return QDF_STATUS_E_FAILURE;
823 
824 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
825 
826 	if (!vdev)
827 		return QDF_STATUS_E_FAILURE;
828 
829 	pdev = vdev->pdev;
830 
831 	if (peer_mac_addr) {
832 		peer = dp_peer_find_hash_find(soc, peer_mac_addr,
833 					      0, vdev->vdev_id,
834 					      DP_MOD_ID_CDP);
835 		if (!peer) {
836 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
837 			return QDF_STATUS_E_FAILURE;
838 		}
839 
840 		qdf_spin_lock_bh(&soc->ast_lock);
841 		dp_peer_reset_ast_entries(soc, peer, NULL);
842 		qdf_spin_unlock_bh(&soc->ast_lock);
843 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
844 	} else if (wds_macaddr) {
845 		qdf_spin_lock_bh(&soc->ast_lock);
846 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
847 							    pdev->pdev_id);
848 
849 		if (ast_entry) {
850 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
851 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
852 				dp_peer_del_ast(soc, ast_entry);
853 		}
854 		qdf_spin_unlock_bh(&soc->ast_lock);
855 	}
856 
857 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
858 	return QDF_STATUS_SUCCESS;
859 }
860 
861 /**
862  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
863  * @soc_hdl:		Datapath SOC handle
864  * @vdev_id:		id of vdev object
865  *
866  * Return: QDF_STATUS
867  */
868 static QDF_STATUS
869 dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
870 			     uint8_t vdev_id)
871 {
872 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
873 
874 	if (soc->ast_offload_support)
875 		return QDF_STATUS_SUCCESS;
876 
877 	qdf_spin_lock_bh(&soc->ast_lock);
878 
879 	dp_soc_iterate_peer(soc, dp_peer_reset_ast_entries, NULL,
880 			    DP_MOD_ID_CDP);
881 	qdf_spin_unlock_bh(&soc->ast_lock);
882 
883 	return QDF_STATUS_SUCCESS;
884 }
885 
886 /**
887  * dp_peer_flush_ast_entries() - Delete all wds and hmwds ast entries of a peer
888  * @soc:		Datapath SOC
889  * @peer:		Datapath peer
890  * @arg:		arg to callback
891  *
892  * Return: None
893  */
894 static void
895 dp_peer_flush_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
896 {
897 	struct dp_ast_entry *ase = NULL;
898 	struct dp_ast_entry *temp_ase;
899 
900 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
901 		if ((ase->type ==
902 			CDP_TXRX_AST_TYPE_STATIC) ||
903 			(ase->type ==
904 			 CDP_TXRX_AST_TYPE_SELF) ||
905 			(ase->type ==
906 			 CDP_TXRX_AST_TYPE_STA_BSS))
907 			continue;
908 		dp_peer_del_ast(soc, ase);
909 	}
910 }
911 
912 /**
913  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
914  * @soc_hdl:		Datapath SOC handle
915  *
916  * Return: None
917  */
918 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
919 {
920 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
921 
922 	qdf_spin_lock_bh(&soc->ast_lock);
923 
924 	dp_soc_iterate_peer(soc, dp_peer_flush_ast_entries, NULL,
925 			    DP_MOD_ID_CDP);
926 
927 	qdf_spin_unlock_bh(&soc->ast_lock);
928 	dp_peer_mec_flush_entries(soc);
929 }
930 
931 #if defined(IPA_WDS_EASYMESH_FEATURE) && defined(FEATURE_AST)
932 /**
933  * dp_peer_send_wds_disconnect() - Send Disconnect event to IPA for each peer
934  * @soc: Datapath SOC
935  * @peer: Datapath peer
936  *
937  * Return: None
938  */
939 static void
940 dp_peer_send_wds_disconnect(struct dp_soc *soc, struct dp_peer *peer)
941 {
942 	struct dp_ast_entry *ase = NULL;
943 	struct dp_ast_entry *temp_ase;
944 
945 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
946 		if (ase->type == CDP_TXRX_AST_TYPE_WDS) {
947 			soc->cdp_soc.ol_ops->peer_send_wds_disconnect(soc->ctrl_psoc,
948 								      ase->mac_addr.raw,
949 								      ase->vdev_id);
950 		}
951 	}
952 }
953 #elif defined(FEATURE_AST)
954 static void
955 dp_peer_send_wds_disconnect(struct dp_soc *soc, struct dp_peer *peer)
956 {
957 }
958 #endif
959 
960 /**
961  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
962  *                                       and return ast entry information
963  *                                       of first ast entry found in the
964  *                                       table with given mac address
965  * @soc_hdl: data path soc handle
966  * @ast_mac_addr: AST entry mac address
967  * @ast_entry_info: ast entry information
968  *
969  * Return: true if ast entry found with ast_mac_addr
970  *          false if ast entry not found
971  */
972 static bool dp_peer_get_ast_info_by_soc_wifi3
973 	(struct cdp_soc_t *soc_hdl,
974 	 uint8_t *ast_mac_addr,
975 	 struct cdp_ast_entry_info *ast_entry_info)
976 {
977 	struct dp_ast_entry *ast_entry = NULL;
978 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
979 	struct dp_peer *peer = NULL;
980 
981 	if (soc->ast_offload_support)
982 		return false;
983 
984 	qdf_spin_lock_bh(&soc->ast_lock);
985 
986 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
987 	if ((!ast_entry) ||
988 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
989 		qdf_spin_unlock_bh(&soc->ast_lock);
990 		return false;
991 	}
992 
993 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
994 				     DP_MOD_ID_AST);
995 	if (!peer) {
996 		qdf_spin_unlock_bh(&soc->ast_lock);
997 		return false;
998 	}
999 
1000 	ast_entry_info->type = ast_entry->type;
1001 	ast_entry_info->pdev_id = ast_entry->pdev_id;
1002 	ast_entry_info->vdev_id = ast_entry->vdev_id;
1003 	ast_entry_info->peer_id = ast_entry->peer_id;
1004 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
1005 		     &peer->mac_addr.raw[0],
1006 		     QDF_MAC_ADDR_SIZE);
1007 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1008 	qdf_spin_unlock_bh(&soc->ast_lock);
1009 	return true;
1010 }
1011 
1012 /**
1013  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
1014  *                                          and return ast entry information
1015  *                                          if mac address and pdev_id matches
1016  * @soc_hdl: data path soc handle
1017  * @ast_mac_addr: AST entry mac address
1018  * @pdev_id: pdev_id
1019  * @ast_entry_info: ast entry information
1020  *
1021  * Return: true if ast entry found with ast_mac_addr
1022  *          false if ast entry not found
1023  */
1024 static bool dp_peer_get_ast_info_by_pdevid_wifi3
1025 		(struct cdp_soc_t *soc_hdl,
1026 		 uint8_t *ast_mac_addr,
1027 		 uint8_t pdev_id,
1028 		 struct cdp_ast_entry_info *ast_entry_info)
1029 {
1030 	struct dp_ast_entry *ast_entry;
1031 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
1032 	struct dp_peer *peer = NULL;
1033 
1034 	if (soc->ast_offload_support)
1035 		return false;
1036 
1037 	qdf_spin_lock_bh(&soc->ast_lock);
1038 
1039 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr,
1040 						    pdev_id);
1041 
1042 	if ((!ast_entry) ||
1043 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
1044 		qdf_spin_unlock_bh(&soc->ast_lock);
1045 		return false;
1046 	}
1047 
1048 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1049 				     DP_MOD_ID_AST);
1050 	if (!peer) {
1051 		qdf_spin_unlock_bh(&soc->ast_lock);
1052 		return false;
1053 	}
1054 
1055 	ast_entry_info->type = ast_entry->type;
1056 	ast_entry_info->pdev_id = ast_entry->pdev_id;
1057 	ast_entry_info->vdev_id = ast_entry->vdev_id;
1058 	ast_entry_info->peer_id = ast_entry->peer_id;
1059 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
1060 		     &peer->mac_addr.raw[0],
1061 		     QDF_MAC_ADDR_SIZE);
1062 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1063 	qdf_spin_unlock_bh(&soc->ast_lock);
1064 	return true;
1065 }
1066 
1067 /**
1068  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
1069  *                            with given mac address
1070  * @soc_handle: data path soc handle
1071  * @mac_addr: AST entry mac address
1072  * @callback: callback function to called on ast delete response from FW
1073  * @cookie: argument to be passed to callback
1074  *
1075  * Return: QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
1076  *          is sent
1077  *          QDF_STATUS_E_INVAL false if ast entry not found
1078  */
1079 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
1080 					       uint8_t *mac_addr,
1081 					       txrx_ast_free_cb callback,
1082 					       void *cookie)
1083 
1084 {
1085 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1086 	struct dp_ast_entry *ast_entry = NULL;
1087 	txrx_ast_free_cb cb = NULL;
1088 	void *arg = NULL;
1089 
1090 	if (soc->ast_offload_support)
1091 		return -QDF_STATUS_E_INVAL;
1092 
1093 	qdf_spin_lock_bh(&soc->ast_lock);
1094 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1095 	if (!ast_entry) {
1096 		qdf_spin_unlock_bh(&soc->ast_lock);
1097 		return -QDF_STATUS_E_INVAL;
1098 	}
1099 
1100 	if (ast_entry->callback) {
1101 		cb = ast_entry->callback;
1102 		arg = ast_entry->cookie;
1103 	}
1104 
1105 	ast_entry->callback = callback;
1106 	ast_entry->cookie = cookie;
1107 
1108 	/*
1109 	 * if delete_in_progress is set AST delete is sent to target
1110 	 * and host is waiting for response should not send delete
1111 	 * again
1112 	 */
1113 	if (!ast_entry->delete_in_progress)
1114 		dp_peer_del_ast(soc, ast_entry);
1115 
1116 	qdf_spin_unlock_bh(&soc->ast_lock);
1117 	if (cb) {
1118 		cb(soc->ctrl_psoc,
1119 		   dp_soc_to_cdp_soc(soc),
1120 		   arg,
1121 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1122 	}
1123 	return QDF_STATUS_SUCCESS;
1124 }
1125 
1126 /**
1127  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
1128  *                                   table if mac address and pdev_id matches
1129  * @soc_handle: data path soc handle
1130  * @mac_addr: AST entry mac address
1131  * @pdev_id: pdev id
1132  * @callback: callback function to called on ast delete response from FW
1133  * @cookie: argument to be passed to callback
1134  *
1135  * Return: QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
1136  *          is sent
1137  *          QDF_STATUS_E_INVAL false if ast entry not found
1138  */
1139 
1140 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
1141 						uint8_t *mac_addr,
1142 						uint8_t pdev_id,
1143 						txrx_ast_free_cb callback,
1144 						void *cookie)
1145 
1146 {
1147 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1148 	struct dp_ast_entry *ast_entry;
1149 	txrx_ast_free_cb cb = NULL;
1150 	void *arg = NULL;
1151 
1152 	if (soc->ast_offload_support)
1153 		return -QDF_STATUS_E_INVAL;
1154 
1155 	qdf_spin_lock_bh(&soc->ast_lock);
1156 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
1157 
1158 	if (!ast_entry) {
1159 		qdf_spin_unlock_bh(&soc->ast_lock);
1160 		return -QDF_STATUS_E_INVAL;
1161 	}
1162 
1163 	if (ast_entry->callback) {
1164 		cb = ast_entry->callback;
1165 		arg = ast_entry->cookie;
1166 	}
1167 
1168 	ast_entry->callback = callback;
1169 	ast_entry->cookie = cookie;
1170 
1171 	/*
1172 	 * if delete_in_progress is set AST delete is sent to target
1173 	 * and host is waiting for response should not sent delete
1174 	 * again
1175 	 */
1176 	if (!ast_entry->delete_in_progress)
1177 		dp_peer_del_ast(soc, ast_entry);
1178 
1179 	qdf_spin_unlock_bh(&soc->ast_lock);
1180 
1181 	if (cb) {
1182 		cb(soc->ctrl_psoc,
1183 		   dp_soc_to_cdp_soc(soc),
1184 		   arg,
1185 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1186 	}
1187 	return QDF_STATUS_SUCCESS;
1188 }
1189 
1190 /**
1191  * dp_peer_HMWDS_ast_entry_del() - delete the ast entry from soc AST hash
1192  *                                 table if HMWDS rem-addr command is issued
1193  *
1194  * @soc_handle: data path soc handle
1195  * @vdev_id: vdev id
1196  * @wds_macaddr: AST entry mac address to delete
1197  * @type: cdp_txrx_ast_entry_type to send to FW
1198  * @delete_in_fw: flag to indicate AST entry deletion in FW
1199  *
1200  * Return: QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
1201  *         is sent
1202  *         QDF_STATUS_E_INVAL false if ast entry not found
1203  */
1204 static QDF_STATUS dp_peer_HMWDS_ast_entry_del(struct cdp_soc_t *soc_handle,
1205 					      uint8_t vdev_id,
1206 					      uint8_t *wds_macaddr,
1207 					      uint8_t type,
1208 					      uint8_t delete_in_fw)
1209 {
1210 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1211 
1212 	if (soc->ast_offload_support) {
1213 		dp_del_wds_entry_wrapper(soc, vdev_id, wds_macaddr, type,
1214 					 delete_in_fw);
1215 		return QDF_STATUS_SUCCESS;
1216 	}
1217 
1218 	return -QDF_STATUS_E_INVAL;
1219 }
1220 
1221 /**
1222  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
1223  * @ring_num: ring num of the ring being queried
1224  * @grp_mask: the grp_mask array for the ring type in question.
1225  *
1226  * The grp_mask array is indexed by group number and the bit fields correspond
1227  * to ring numbers.  We are finding which interrupt group a ring belongs to.
1228  *
1229  * Return: the index in the grp_mask array with the ring number.
1230  * -QDF_STATUS_E_NOENT if no entry is found
1231  */
1232 static int dp_srng_find_ring_in_mask(int ring_num, uint8_t *grp_mask)
1233 {
1234 	int ext_group_num;
1235 	uint8_t mask = 1 << ring_num;
1236 
1237 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
1238 	     ext_group_num++) {
1239 		if (mask & grp_mask[ext_group_num])
1240 			return ext_group_num;
1241 	}
1242 
1243 	return -QDF_STATUS_E_NOENT;
1244 }
1245 
1246 /**
1247  * dp_is_msi_group_number_invalid() - check msi_group_number valid or not
1248  * @soc: dp_soc
1249  * @msi_group_number: MSI group number.
1250  * @msi_data_count: MSI data count.
1251  *
1252  * Return: true if msi_group_number is invalid.
1253  */
1254 static bool dp_is_msi_group_number_invalid(struct dp_soc *soc,
1255 					   int msi_group_number,
1256 					   int msi_data_count)
1257 {
1258 	if (soc && soc->osdev && soc->osdev->dev &&
1259 	    pld_is_one_msi(soc->osdev->dev))
1260 		return false;
1261 
1262 	return msi_group_number > msi_data_count;
1263 }
1264 
1265 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1266 /**
1267  * dp_is_reo_ring_num_in_nf_grp1() - Check if the current reo ring is part of
1268  *				rx_near_full_grp1 mask
1269  * @soc: Datapath SoC Handle
1270  * @ring_num: REO ring number
1271  *
1272  * Return: 1 if the ring_num belongs to reo_nf_grp1,
1273  *	   0, otherwise.
1274  */
1275 static inline int
1276 dp_is_reo_ring_num_in_nf_grp1(struct dp_soc *soc, int ring_num)
1277 {
1278 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_1 & (1 << ring_num));
1279 }
1280 
1281 /**
1282  * dp_is_reo_ring_num_in_nf_grp2() - Check if the current reo ring is part of
1283  *				rx_near_full_grp2 mask
1284  * @soc: Datapath SoC Handle
1285  * @ring_num: REO ring number
1286  *
1287  * Return: 1 if the ring_num belongs to reo_nf_grp2,
1288  *	   0, otherwise.
1289  */
1290 static inline int
1291 dp_is_reo_ring_num_in_nf_grp2(struct dp_soc *soc, int ring_num)
1292 {
1293 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_2 & (1 << ring_num));
1294 }
1295 
1296 /**
1297  * dp_srng_get_near_full_irq_mask() - Get near-full irq mask for a particular
1298  *				ring type and number
1299  * @soc: Datapath SoC handle
1300  * @ring_type: SRNG type
1301  * @ring_num: ring num
1302  *
1303  * Return: near-full irq mask pointer
1304  */
1305 static inline
1306 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
1307 					enum hal_ring_type ring_type,
1308 					int ring_num)
1309 {
1310 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
1311 	uint8_t wbm2_sw_rx_rel_ring_id;
1312 	uint8_t *nf_irq_mask = NULL;
1313 
1314 	switch (ring_type) {
1315 	case WBM2SW_RELEASE:
1316 		wbm2_sw_rx_rel_ring_id =
1317 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
1318 		if (ring_num != wbm2_sw_rx_rel_ring_id) {
1319 			nf_irq_mask = &soc->wlan_cfg_ctx->
1320 					int_tx_ring_near_full_irq_mask[0];
1321 		}
1322 		break;
1323 	case REO_DST:
1324 		if (dp_is_reo_ring_num_in_nf_grp1(soc, ring_num))
1325 			nf_irq_mask =
1326 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_1_mask[0];
1327 		else if (dp_is_reo_ring_num_in_nf_grp2(soc, ring_num))
1328 			nf_irq_mask =
1329 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_2_mask[0];
1330 		else
1331 			qdf_assert(0);
1332 		break;
1333 	default:
1334 		break;
1335 	}
1336 
1337 	return nf_irq_mask;
1338 }
1339 
1340 /**
1341  * dp_srng_set_msi2_ring_params() - Set the msi2 addr/data in the ring params
1342  * @soc: Datapath SoC handle
1343  * @ring_params: srng params handle
1344  * @msi2_addr: MSI2 addr to be set for the SRNG
1345  * @msi2_data: MSI2 data to be set for the SRNG
1346  *
1347  * Return: None
1348  */
1349 static inline
1350 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
1351 				  struct hal_srng_params *ring_params,
1352 				  qdf_dma_addr_t msi2_addr,
1353 				  uint32_t msi2_data)
1354 {
1355 	ring_params->msi2_addr = msi2_addr;
1356 	ring_params->msi2_data = msi2_data;
1357 }
1358 
1359 /**
1360  * dp_srng_msi2_setup() - Setup MSI2 details for near full IRQ of an SRNG
1361  * @soc: Datapath SoC handle
1362  * @ring_params: ring_params for SRNG
1363  * @ring_type: SENG type
1364  * @ring_num: ring number for the SRNG
1365  * @nf_msi_grp_num: near full msi group number
1366  *
1367  * Return: None
1368  */
1369 static inline void
1370 dp_srng_msi2_setup(struct dp_soc *soc,
1371 		   struct hal_srng_params *ring_params,
1372 		   int ring_type, int ring_num, int nf_msi_grp_num)
1373 {
1374 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1375 	int msi_data_count, ret;
1376 
1377 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1378 					  &msi_data_count, &msi_data_start,
1379 					  &msi_irq_start);
1380 	if (ret)
1381 		return;
1382 
1383 	if (nf_msi_grp_num < 0) {
1384 		dp_init_info("%pK: ring near full IRQ not part of an ext_group; ring_type: %d,ring_num %d",
1385 			     soc, ring_type, ring_num);
1386 		ring_params->msi2_addr = 0;
1387 		ring_params->msi2_data = 0;
1388 		return;
1389 	}
1390 
1391 	if (dp_is_msi_group_number_invalid(soc, nf_msi_grp_num,
1392 					   msi_data_count)) {
1393 		dp_init_warn("%pK: 2 msi_groups will share an msi for near full IRQ; msi_group_num %d",
1394 			     soc, nf_msi_grp_num);
1395 		QDF_ASSERT(0);
1396 	}
1397 
1398 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1399 
1400 	ring_params->nf_irq_support = 1;
1401 	ring_params->msi2_addr = addr_low;
1402 	ring_params->msi2_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1403 	ring_params->msi2_data = (nf_msi_grp_num % msi_data_count)
1404 		+ msi_data_start;
1405 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1406 }
1407 
1408 /* Percentage of ring entries considered as nearly full */
1409 #define DP_NF_HIGH_THRESH_PERCENTAGE	75
1410 /* Percentage of ring entries considered as critically full */
1411 #define DP_NF_CRIT_THRESH_PERCENTAGE	90
1412 /* Percentage of ring entries considered as safe threshold */
1413 #define DP_NF_SAFE_THRESH_PERCENTAGE	50
1414 
1415 /**
1416  * dp_srng_configure_nf_interrupt_thresholds() - Configure the thresholds for
1417  *			near full irq
1418  * @soc: Datapath SoC handle
1419  * @ring_params: ring params for SRNG
1420  * @ring_type: ring type
1421  */
1422 static inline void
1423 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
1424 					  struct hal_srng_params *ring_params,
1425 					  int ring_type)
1426 {
1427 	if (ring_params->nf_irq_support) {
1428 		ring_params->high_thresh = (ring_params->num_entries *
1429 					    DP_NF_HIGH_THRESH_PERCENTAGE) / 100;
1430 		ring_params->crit_thresh = (ring_params->num_entries *
1431 					    DP_NF_CRIT_THRESH_PERCENTAGE) / 100;
1432 		ring_params->safe_thresh = (ring_params->num_entries *
1433 					    DP_NF_SAFE_THRESH_PERCENTAGE) /100;
1434 	}
1435 }
1436 
1437 /**
1438  * dp_srng_set_nf_thresholds() - Set the near full thresholds to srng data
1439  *			structure from the ring params
1440  * @soc: Datapath SoC handle
1441  * @srng: SRNG handle
1442  * @ring_params: ring params for a SRNG
1443  *
1444  * Return: None
1445  */
1446 static inline void
1447 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
1448 			  struct hal_srng_params *ring_params)
1449 {
1450 	srng->crit_thresh = ring_params->crit_thresh;
1451 	srng->safe_thresh = ring_params->safe_thresh;
1452 }
1453 
1454 #else
1455 static inline
1456 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
1457 					enum hal_ring_type ring_type,
1458 					int ring_num)
1459 {
1460 	return NULL;
1461 }
1462 
1463 static inline
1464 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
1465 				  struct hal_srng_params *ring_params,
1466 				  qdf_dma_addr_t msi2_addr,
1467 				  uint32_t msi2_data)
1468 {
1469 }
1470 
1471 static inline void
1472 dp_srng_msi2_setup(struct dp_soc *soc,
1473 		   struct hal_srng_params *ring_params,
1474 		   int ring_type, int ring_num, int nf_msi_grp_num)
1475 {
1476 }
1477 
1478 static inline void
1479 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
1480 					  struct hal_srng_params *ring_params,
1481 					  int ring_type)
1482 {
1483 }
1484 
1485 static inline void
1486 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
1487 			  struct hal_srng_params *ring_params)
1488 {
1489 }
1490 #endif
1491 
1492 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
1493 				       enum hal_ring_type ring_type,
1494 				       int ring_num,
1495 				       int *reg_msi_grp_num,
1496 				       bool nf_irq_support,
1497 				       int *nf_msi_grp_num)
1498 {
1499 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
1500 	uint8_t *grp_mask, *nf_irq_mask = NULL;
1501 	bool nf_irq_enabled = false;
1502 	uint8_t wbm2_sw_rx_rel_ring_id;
1503 
1504 	switch (ring_type) {
1505 	case WBM2SW_RELEASE:
1506 		wbm2_sw_rx_rel_ring_id =
1507 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
1508 		if (ring_num == wbm2_sw_rx_rel_ring_id) {
1509 			/* dp_rx_wbm_err_process - soc->rx_rel_ring */
1510 			grp_mask = &cfg_ctx->int_rx_wbm_rel_ring_mask[0];
1511 			ring_num = 0;
1512 		} else if (ring_num == WBM2_SW_PPE_REL_RING_ID) {
1513 			grp_mask = &cfg_ctx->int_ppeds_wbm_release_ring_mask[0];
1514 			ring_num = 0;
1515 		}  else { /* dp_tx_comp_handler - soc->tx_comp_ring */
1516 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1517 			nf_irq_mask = dp_srng_get_near_full_irq_mask(soc,
1518 								     ring_type,
1519 								     ring_num);
1520 			if (nf_irq_mask)
1521 				nf_irq_enabled = true;
1522 
1523 			/*
1524 			 * Using ring 4 as 4th tx completion ring since ring 3
1525 			 * is Rx error ring
1526 			 */
1527 			if (ring_num == WBM2SW_TXCOMP_RING4_NUM)
1528 				ring_num = TXCOMP_RING4_NUM;
1529 		}
1530 	break;
1531 
1532 	case REO_EXCEPTION:
1533 		/* dp_rx_err_process - &soc->reo_exception_ring */
1534 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
1535 	break;
1536 
1537 	case REO_DST:
1538 		/* dp_rx_process - soc->reo_dest_ring */
1539 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1540 		nf_irq_mask = dp_srng_get_near_full_irq_mask(soc, ring_type,
1541 							     ring_num);
1542 		if (nf_irq_mask)
1543 			nf_irq_enabled = true;
1544 	break;
1545 
1546 	case REO_STATUS:
1547 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
1548 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
1549 	break;
1550 
1551 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
1552 	case RXDMA_MONITOR_STATUS:
1553 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
1554 	case RXDMA_MONITOR_DST:
1555 		/* dp_mon_process */
1556 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
1557 	break;
1558 	case TX_MONITOR_DST:
1559 		/* dp_tx_mon_process */
1560 		grp_mask = &soc->wlan_cfg_ctx->int_tx_mon_ring_mask[0];
1561 	break;
1562 	case RXDMA_DST:
1563 		/* dp_rxdma_err_process */
1564 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
1565 	break;
1566 
1567 	case RXDMA_BUF:
1568 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1569 	break;
1570 
1571 	case RXDMA_MONITOR_BUF:
1572 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
1573 	break;
1574 
1575 	case TX_MONITOR_BUF:
1576 		grp_mask = &soc->wlan_cfg_ctx->int_host2txmon_ring_mask[0];
1577 	break;
1578 
1579 	case REO2PPE:
1580 		grp_mask = &soc->wlan_cfg_ctx->int_reo2ppe_ring_mask[0];
1581 	break;
1582 
1583 	case PPE2TCL:
1584 		grp_mask = &soc->wlan_cfg_ctx->int_ppe2tcl_ring_mask[0];
1585 	break;
1586 
1587 	case TCL_DATA:
1588 	/* CMD_CREDIT_RING is used as command in 8074 and credit in 9000 */
1589 	case TCL_CMD_CREDIT:
1590 	case REO_CMD:
1591 	case SW2WBM_RELEASE:
1592 	case WBM_IDLE_LINK:
1593 		/* normally empty SW_TO_HW rings */
1594 		return -QDF_STATUS_E_NOENT;
1595 	break;
1596 
1597 	case TCL_STATUS:
1598 	case REO_REINJECT:
1599 		/* misc unused rings */
1600 		return -QDF_STATUS_E_NOENT;
1601 	break;
1602 
1603 	case CE_SRC:
1604 	case CE_DST:
1605 	case CE_DST_STATUS:
1606 		/* CE_rings - currently handled by hif */
1607 	default:
1608 		return -QDF_STATUS_E_NOENT;
1609 	break;
1610 	}
1611 
1612 	*reg_msi_grp_num = dp_srng_find_ring_in_mask(ring_num, grp_mask);
1613 
1614 	if (nf_irq_support && nf_irq_enabled) {
1615 		*nf_msi_grp_num = dp_srng_find_ring_in_mask(ring_num,
1616 							    nf_irq_mask);
1617 	}
1618 
1619 	return QDF_STATUS_SUCCESS;
1620 }
1621 
1622 /**
1623  * dp_get_num_msi_available()- API to get number of MSIs available
1624  * @soc: DP soc Handle
1625  * @interrupt_mode: Mode of interrupts
1626  *
1627  * Return: Number of MSIs available or 0 in case of integrated
1628  */
1629 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
1630 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1631 {
1632 	return 0;
1633 }
1634 #else
1635 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1636 {
1637 	int msi_data_count;
1638 	int msi_data_start;
1639 	int msi_irq_start;
1640 	int ret;
1641 
1642 	if (interrupt_mode == DP_INTR_INTEGRATED) {
1643 		return 0;
1644 	} else if (interrupt_mode == DP_INTR_MSI || interrupt_mode ==
1645 		   DP_INTR_POLL) {
1646 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1647 						  &msi_data_count,
1648 						  &msi_data_start,
1649 						  &msi_irq_start);
1650 		if (ret) {
1651 			qdf_err("Unable to get DP MSI assignment %d",
1652 				interrupt_mode);
1653 			return -EINVAL;
1654 		}
1655 		return msi_data_count;
1656 	}
1657 	qdf_err("Interrupt mode invalid %d", interrupt_mode);
1658 	return -EINVAL;
1659 }
1660 #endif
1661 
1662 #if defined(IPA_OFFLOAD) && defined(IPA_WDI3_VLAN_SUPPORT)
1663 static void
1664 dp_ipa_vlan_srng_msi_setup(struct hal_srng_params *ring_params, int ring_type,
1665 			   int ring_num)
1666 {
1667 	if (wlan_ipa_is_vlan_enabled()) {
1668 		if ((ring_type == REO_DST) &&
1669 				(ring_num == IPA_ALT_REO_DEST_RING_IDX)) {
1670 			ring_params->msi_addr = 0;
1671 			ring_params->msi_data = 0;
1672 			ring_params->flags &= ~HAL_SRNG_MSI_INTR;
1673 		}
1674 	}
1675 }
1676 #else
1677 static inline void
1678 dp_ipa_vlan_srng_msi_setup(struct hal_srng_params *ring_params, int ring_type,
1679 			   int ring_num)
1680 {
1681 }
1682 #endif
1683 
1684 static void dp_srng_msi_setup(struct dp_soc *soc, struct dp_srng *srng,
1685 			      struct hal_srng_params *ring_params,
1686 			      int ring_type, int ring_num)
1687 {
1688 	int reg_msi_grp_num;
1689 	/*
1690 	 * nf_msi_grp_num needs to be initialized with negative value,
1691 	 * to avoid configuring near-full msi for WBM2SW3 ring
1692 	 */
1693 	int nf_msi_grp_num = -1;
1694 	int msi_data_count;
1695 	int ret;
1696 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1697 	bool nf_irq_support;
1698 	int vector;
1699 
1700 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1701 					    &msi_data_count, &msi_data_start,
1702 					    &msi_irq_start);
1703 
1704 	if (ret)
1705 		return;
1706 
1707 	nf_irq_support = hal_srng_is_near_full_irq_supported(soc->hal_soc,
1708 							     ring_type,
1709 							     ring_num);
1710 	ret = dp_srng_calculate_msi_group(soc, ring_type, ring_num,
1711 					  &reg_msi_grp_num,
1712 					  nf_irq_support,
1713 					  &nf_msi_grp_num);
1714 	if (ret < 0) {
1715 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1716 			     soc, ring_type, ring_num);
1717 		ring_params->msi_addr = 0;
1718 		ring_params->msi_data = 0;
1719 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
1720 		return;
1721 	}
1722 
1723 	if (reg_msi_grp_num < 0) {
1724 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1725 			     soc, ring_type, ring_num);
1726 		ring_params->msi_addr = 0;
1727 		ring_params->msi_data = 0;
1728 		goto configure_msi2;
1729 	}
1730 
1731 	if (dp_is_msi_group_number_invalid(soc, reg_msi_grp_num,
1732 					   msi_data_count)) {
1733 		dp_init_warn("%pK: 2 msi_groups will share an msi; msi_group_num %d",
1734 			     soc, reg_msi_grp_num);
1735 		QDF_ASSERT(0);
1736 	}
1737 
1738 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1739 
1740 	ring_params->msi_addr = addr_low;
1741 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1742 	ring_params->msi_data = (reg_msi_grp_num % msi_data_count)
1743 		+ msi_data_start;
1744 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1745 
1746 	dp_ipa_vlan_srng_msi_setup(ring_params, ring_type, ring_num);
1747 
1748 	dp_debug("ring type %u ring_num %u msi->data %u msi_addr %llx",
1749 		 ring_type, ring_num, ring_params->msi_data,
1750 		 (uint64_t)ring_params->msi_addr);
1751 
1752 	vector = msi_irq_start + (reg_msi_grp_num % msi_data_count);
1753 	if (soc->arch_ops.dp_register_ppeds_interrupts)
1754 		if (soc->arch_ops.dp_register_ppeds_interrupts(soc, srng,
1755 							       vector,
1756 							       ring_type,
1757 							       ring_num))
1758 			return;
1759 
1760 configure_msi2:
1761 	if (!nf_irq_support) {
1762 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
1763 		return;
1764 	}
1765 
1766 	dp_srng_msi2_setup(soc, ring_params, ring_type, ring_num,
1767 			   nf_msi_grp_num);
1768 }
1769 
1770 #ifdef FEATURE_AST
1771 /**
1772  * dp_print_mlo_ast_stats() - Print AST stats for MLO peers
1773  *
1774  * @soc: core DP soc context
1775  *
1776  * Return: void
1777  */
1778 static void dp_print_mlo_ast_stats(struct dp_soc *soc)
1779 {
1780 	if (soc->arch_ops.print_mlo_ast_stats)
1781 		soc->arch_ops.print_mlo_ast_stats(soc);
1782 }
1783 
1784 void
1785 dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1786 {
1787 	struct dp_ast_entry *ase, *tmp_ase;
1788 	uint32_t num_entries = 0;
1789 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1790 			"NONE", "STATIC", "SELF", "WDS", "HMWDS", "BSS",
1791 			"DA", "HMWDS_SEC", "MLD"};
1792 
1793 	DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1794 	    DP_PRINT_STATS("%6d mac_addr = "QDF_MAC_ADDR_FMT
1795 		    " peer_mac_addr = "QDF_MAC_ADDR_FMT
1796 		    " peer_id = %u"
1797 		    " type = %s"
1798 		    " next_hop = %d"
1799 		    " is_active = %d"
1800 		    " ast_idx = %d"
1801 		    " ast_hash = %d"
1802 		    " delete_in_progress = %d"
1803 		    " pdev_id = %d"
1804 		    " vdev_id = %d",
1805 		    ++num_entries,
1806 		    QDF_MAC_ADDR_REF(ase->mac_addr.raw),
1807 		    QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1808 		    ase->peer_id,
1809 		    type[ase->type],
1810 		    ase->next_hop,
1811 		    ase->is_active,
1812 		    ase->ast_idx,
1813 		    ase->ast_hash_value,
1814 		    ase->delete_in_progress,
1815 		    ase->pdev_id,
1816 		    ase->vdev_id);
1817 	}
1818 }
1819 
1820 void dp_print_ast_stats(struct dp_soc *soc)
1821 {
1822 	DP_PRINT_STATS("AST Stats:");
1823 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1824 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1825 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1826 	DP_PRINT_STATS("	Entries MAP ERR  = %d", soc->stats.ast.map_err);
1827 	DP_PRINT_STATS("	Entries Mismatch ERR  = %d",
1828 		       soc->stats.ast.ast_mismatch);
1829 
1830 	DP_PRINT_STATS("AST Table:");
1831 
1832 	qdf_spin_lock_bh(&soc->ast_lock);
1833 
1834 	dp_soc_iterate_peer(soc, dp_print_peer_ast_entries, NULL,
1835 			    DP_MOD_ID_GENERIC_STATS);
1836 
1837 	qdf_spin_unlock_bh(&soc->ast_lock);
1838 
1839 	dp_print_mlo_ast_stats(soc);
1840 }
1841 #else
1842 void dp_print_ast_stats(struct dp_soc *soc)
1843 {
1844 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1845 	return;
1846 }
1847 #endif
1848 
1849 /**
1850  * dp_print_peer_info() - Dump peer info
1851  * @soc: Datapath soc handle
1852  * @peer: Datapath peer handle
1853  * @arg: argument to iter function
1854  *
1855  * Return: void
1856  */
1857 static void
1858 dp_print_peer_info(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1859 {
1860 	struct dp_txrx_peer *txrx_peer = NULL;
1861 
1862 	txrx_peer = dp_get_txrx_peer(peer);
1863 	if (!txrx_peer)
1864 		return;
1865 
1866 	DP_PRINT_STATS(" peer id = %d"
1867 		       " peer_mac_addr = "QDF_MAC_ADDR_FMT
1868 		       " nawds_enabled = %d"
1869 		       " bss_peer = %d"
1870 		       " wds_enabled = %d"
1871 		       " tx_cap_enabled = %d"
1872 		       " rx_cap_enabled = %d",
1873 		       peer->peer_id,
1874 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1875 		       txrx_peer->nawds_enabled,
1876 		       txrx_peer->bss_peer,
1877 		       txrx_peer->wds_enabled,
1878 		       dp_monitor_is_tx_cap_enabled(peer),
1879 		       dp_monitor_is_rx_cap_enabled(peer));
1880 }
1881 
1882 /**
1883  * dp_print_peer_table() - Dump all Peer stats
1884  * @vdev: Datapath Vdev handle
1885  *
1886  * Return: void
1887  */
1888 static void dp_print_peer_table(struct dp_vdev *vdev)
1889 {
1890 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1891 	dp_vdev_iterate_peer(vdev, dp_print_peer_info, NULL,
1892 			     DP_MOD_ID_GENERIC_STATS);
1893 }
1894 
1895 /**
1896  * dp_srng_configure_pointer_update_thresholds() - Retrieve pointer
1897  * update threshold value from wlan_cfg_ctx
1898  * @soc: device handle
1899  * @ring_params: per ring specific parameters
1900  * @ring_type: Ring type
1901  * @ring_num: Ring number for a given ring type
1902  * @num_entries: number of entries to fill
1903  *
1904  * Fill the ring params with the pointer update threshold
1905  * configuration parameters available in wlan_cfg_ctx
1906  *
1907  * Return: None
1908  */
1909 static void
1910 dp_srng_configure_pointer_update_thresholds(
1911 				struct dp_soc *soc,
1912 				struct hal_srng_params *ring_params,
1913 				int ring_type, int ring_num,
1914 				int num_entries)
1915 {
1916 	if (ring_type == REO_DST) {
1917 		ring_params->pointer_timer_threshold =
1918 			wlan_cfg_get_pointer_timer_threshold_rx(
1919 						soc->wlan_cfg_ctx);
1920 		ring_params->pointer_num_threshold =
1921 			wlan_cfg_get_pointer_num_threshold_rx(
1922 						soc->wlan_cfg_ctx);
1923 	}
1924 }
1925 
1926 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
1927 /**
1928  * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt
1929  * threshold values from the wlan_srng_cfg table for each ring type
1930  * @soc: device handle
1931  * @ring_params: per ring specific parameters
1932  * @ring_type: Ring type
1933  * @ring_num: Ring number for a given ring type
1934  * @num_entries: number of entries to fill
1935  *
1936  * Fill the ring params with the interrupt threshold
1937  * configuration parameters available in the per ring type wlan_srng_cfg
1938  * table.
1939  *
1940  * Return: None
1941  */
1942 static void
1943 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1944 				       struct hal_srng_params *ring_params,
1945 				       int ring_type, int ring_num,
1946 				       int num_entries)
1947 {
1948 	uint8_t wbm2_sw_rx_rel_ring_id;
1949 
1950 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
1951 
1952 	if (ring_type == REO_DST) {
1953 		ring_params->intr_timer_thres_us =
1954 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1955 		ring_params->intr_batch_cntr_thres_entries =
1956 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1957 	} else if (ring_type == WBM2SW_RELEASE &&
1958 		   (ring_num == wbm2_sw_rx_rel_ring_id)) {
1959 		ring_params->intr_timer_thres_us =
1960 				wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1961 		ring_params->intr_batch_cntr_thres_entries =
1962 				wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1963 	} else {
1964 		ring_params->intr_timer_thres_us =
1965 				soc->wlan_srng_cfg[ring_type].timer_threshold;
1966 		ring_params->intr_batch_cntr_thres_entries =
1967 				soc->wlan_srng_cfg[ring_type].batch_count_threshold;
1968 	}
1969 	ring_params->low_threshold =
1970 			soc->wlan_srng_cfg[ring_type].low_threshold;
1971 	if (ring_params->low_threshold)
1972 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1973 
1974 	dp_srng_configure_nf_interrupt_thresholds(soc, ring_params, ring_type);
1975 }
1976 #else
1977 static void
1978 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1979 				       struct hal_srng_params *ring_params,
1980 				       int ring_type, int ring_num,
1981 				       int num_entries)
1982 {
1983 	uint8_t wbm2_sw_rx_rel_ring_id;
1984 	bool rx_refill_lt_disable;
1985 
1986 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
1987 
1988 	if (ring_type == REO_DST || ring_type == REO2PPE) {
1989 		ring_params->intr_timer_thres_us =
1990 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1991 		ring_params->intr_batch_cntr_thres_entries =
1992 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1993 	} else if (ring_type == WBM2SW_RELEASE &&
1994 		   (ring_num < wbm2_sw_rx_rel_ring_id ||
1995 		   ring_num == WBM2SW_TXCOMP_RING4_NUM ||
1996 		   ring_num == WBM2_SW_PPE_REL_RING_ID)) {
1997 		ring_params->intr_timer_thres_us =
1998 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1999 		ring_params->intr_batch_cntr_thres_entries =
2000 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
2001 	} else if (ring_type == RXDMA_BUF) {
2002 		rx_refill_lt_disable =
2003 			wlan_cfg_get_dp_soc_rxdma_refill_lt_disable
2004 							(soc->wlan_cfg_ctx);
2005 		ring_params->intr_timer_thres_us =
2006 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
2007 
2008 		if (!rx_refill_lt_disable) {
2009 			ring_params->low_threshold = num_entries >> 3;
2010 			ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
2011 			ring_params->intr_batch_cntr_thres_entries = 0;
2012 		}
2013 	} else {
2014 		ring_params->intr_timer_thres_us =
2015 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
2016 		ring_params->intr_batch_cntr_thres_entries =
2017 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
2018 	}
2019 
2020 	/* These rings donot require interrupt to host. Make them zero */
2021 	switch (ring_type) {
2022 	case REO_REINJECT:
2023 	case REO_CMD:
2024 	case TCL_DATA:
2025 	case TCL_CMD_CREDIT:
2026 	case TCL_STATUS:
2027 	case WBM_IDLE_LINK:
2028 	case SW2WBM_RELEASE:
2029 	case SW2RXDMA_NEW:
2030 		ring_params->intr_timer_thres_us = 0;
2031 		ring_params->intr_batch_cntr_thres_entries = 0;
2032 		break;
2033 	case PPE2TCL:
2034 		ring_params->intr_timer_thres_us =
2035 			wlan_cfg_get_int_timer_threshold_ppe2tcl(soc->wlan_cfg_ctx);
2036 		ring_params->intr_batch_cntr_thres_entries =
2037 			wlan_cfg_get_int_batch_threshold_ppe2tcl(soc->wlan_cfg_ctx);
2038 		break;
2039 	}
2040 
2041 	/* Enable low threshold interrupts for rx buffer rings (regular and
2042 	 * monitor buffer rings.
2043 	 * TODO: See if this is required for any other ring
2044 	 */
2045 	if ((ring_type == RXDMA_MONITOR_BUF) ||
2046 	    (ring_type == RXDMA_MONITOR_STATUS ||
2047 	    (ring_type == TX_MONITOR_BUF))) {
2048 		/* TODO: Setting low threshold to 1/8th of ring size
2049 		 * see if this needs to be configurable
2050 		 */
2051 		ring_params->low_threshold = num_entries >> 3;
2052 		ring_params->intr_timer_thres_us =
2053 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
2054 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
2055 		ring_params->intr_batch_cntr_thres_entries = 0;
2056 	}
2057 
2058 	/* During initialisation monitor rings are only filled with
2059 	 * MON_BUF_MIN_ENTRIES entries. So low threshold needs to be set to
2060 	 * a value less than that. Low threshold value is reconfigured again
2061 	 * to 1/8th of the ring size when monitor vap is created.
2062 	 */
2063 	if (ring_type == RXDMA_MONITOR_BUF)
2064 		ring_params->low_threshold = MON_BUF_MIN_ENTRIES >> 1;
2065 
2066 	/* In case of PCI chipsets, we dont have PPDU end interrupts,
2067 	 * so MONITOR STATUS ring is reaped by receiving MSI from srng.
2068 	 * Keep batch threshold as 8 so that interrupt is received for
2069 	 * every 4 packets in MONITOR_STATUS ring
2070 	 */
2071 	if ((ring_type == RXDMA_MONITOR_STATUS) &&
2072 	    (soc->intr_mode == DP_INTR_MSI))
2073 		ring_params->intr_batch_cntr_thres_entries = 4;
2074 }
2075 #endif
2076 
2077 #ifdef DP_MEM_PRE_ALLOC
2078 
2079 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
2080 			   size_t ctxt_size)
2081 {
2082 	void *ctxt_mem;
2083 
2084 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_context) {
2085 		dp_warn("dp_prealloc_get_context null!");
2086 		goto dynamic_alloc;
2087 	}
2088 
2089 	ctxt_mem = soc->cdp_soc.ol_ops->dp_prealloc_get_context(ctxt_type,
2090 								ctxt_size);
2091 
2092 	if (ctxt_mem)
2093 		goto end;
2094 
2095 dynamic_alloc:
2096 	dp_info("switch to dynamic-alloc for type %d, size %zu",
2097 		ctxt_type, ctxt_size);
2098 	ctxt_mem = qdf_mem_malloc(ctxt_size);
2099 end:
2100 	return ctxt_mem;
2101 }
2102 
2103 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
2104 			 void *vaddr)
2105 {
2106 	QDF_STATUS status;
2107 
2108 	if (soc->cdp_soc.ol_ops->dp_prealloc_put_context) {
2109 		status = soc->cdp_soc.ol_ops->dp_prealloc_put_context(
2110 								ctxt_type,
2111 								vaddr);
2112 	} else {
2113 		dp_warn("dp_prealloc_put_context null!");
2114 		status = QDF_STATUS_E_NOSUPPORT;
2115 	}
2116 
2117 	if (QDF_IS_STATUS_ERROR(status)) {
2118 		dp_info("Context type %d not pre-allocated", ctxt_type);
2119 		qdf_mem_free(vaddr);
2120 	}
2121 }
2122 
2123 static inline
2124 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
2125 					   struct dp_srng *srng,
2126 					   uint32_t ring_type)
2127 {
2128 	void *mem;
2129 
2130 	qdf_assert(!srng->is_mem_prealloc);
2131 
2132 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_consistent) {
2133 		dp_warn("dp_prealloc_get_consistent is null!");
2134 		goto qdf;
2135 	}
2136 
2137 	mem =
2138 		soc->cdp_soc.ol_ops->dp_prealloc_get_consistent
2139 						(&srng->alloc_size,
2140 						 &srng->base_vaddr_unaligned,
2141 						 &srng->base_paddr_unaligned,
2142 						 &srng->base_paddr_aligned,
2143 						 DP_RING_BASE_ALIGN, ring_type);
2144 
2145 	if (mem) {
2146 		srng->is_mem_prealloc = true;
2147 		goto end;
2148 	}
2149 qdf:
2150 	mem =  qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
2151 						&srng->base_vaddr_unaligned,
2152 						&srng->base_paddr_unaligned,
2153 						&srng->base_paddr_aligned,
2154 						DP_RING_BASE_ALIGN);
2155 end:
2156 	dp_info("%s memory %pK dp_srng %pK ring_type %d alloc_size %d num_entries %d",
2157 		srng->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", mem,
2158 		srng, ring_type, srng->alloc_size, srng->num_entries);
2159 	return mem;
2160 }
2161 
2162 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
2163 					       struct dp_srng *srng)
2164 {
2165 	if (srng->is_mem_prealloc) {
2166 		if (!soc->cdp_soc.ol_ops->dp_prealloc_put_consistent) {
2167 			dp_warn("dp_prealloc_put_consistent is null!");
2168 			QDF_BUG(0);
2169 			return;
2170 		}
2171 		soc->cdp_soc.ol_ops->dp_prealloc_put_consistent
2172 						(srng->alloc_size,
2173 						 srng->base_vaddr_unaligned,
2174 						 srng->base_paddr_unaligned);
2175 
2176 	} else {
2177 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2178 					srng->alloc_size,
2179 					srng->base_vaddr_unaligned,
2180 					srng->base_paddr_unaligned, 0);
2181 	}
2182 }
2183 
2184 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
2185 				   enum dp_desc_type desc_type,
2186 				   struct qdf_mem_multi_page_t *pages,
2187 				   size_t element_size,
2188 				   uint32_t element_num,
2189 				   qdf_dma_context_t memctxt,
2190 				   bool cacheable)
2191 {
2192 	if (!soc->cdp_soc.ol_ops->dp_get_multi_pages) {
2193 		dp_warn("dp_get_multi_pages is null!");
2194 		goto qdf;
2195 	}
2196 
2197 	pages->num_pages = 0;
2198 	pages->is_mem_prealloc = 0;
2199 	soc->cdp_soc.ol_ops->dp_get_multi_pages(desc_type,
2200 						element_size,
2201 						element_num,
2202 						pages,
2203 						cacheable);
2204 	if (pages->num_pages)
2205 		goto end;
2206 
2207 qdf:
2208 	qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size,
2209 				  element_num, memctxt, cacheable);
2210 end:
2211 	dp_info("%s desc_type %d element_size %d element_num %d cacheable %d",
2212 		pages->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc",
2213 		desc_type, (int)element_size, element_num, cacheable);
2214 }
2215 
2216 void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
2217 				  enum dp_desc_type desc_type,
2218 				  struct qdf_mem_multi_page_t *pages,
2219 				  qdf_dma_context_t memctxt,
2220 				  bool cacheable)
2221 {
2222 	if (pages->is_mem_prealloc) {
2223 		if (!soc->cdp_soc.ol_ops->dp_put_multi_pages) {
2224 			dp_warn("dp_put_multi_pages is null!");
2225 			QDF_BUG(0);
2226 			return;
2227 		}
2228 
2229 		soc->cdp_soc.ol_ops->dp_put_multi_pages(desc_type, pages);
2230 		qdf_mem_zero(pages, sizeof(*pages));
2231 	} else {
2232 		qdf_mem_multi_pages_free(soc->osdev, pages,
2233 					 memctxt, cacheable);
2234 	}
2235 }
2236 
2237 #else
2238 
2239 static inline
2240 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
2241 					   struct dp_srng *srng,
2242 					   uint32_t ring_type)
2243 
2244 {
2245 	void *mem;
2246 
2247 	mem = qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
2248 					       &srng->base_vaddr_unaligned,
2249 					       &srng->base_paddr_unaligned,
2250 					       &srng->base_paddr_aligned,
2251 					       DP_RING_BASE_ALIGN);
2252 	if (mem)
2253 		qdf_mem_set(srng->base_vaddr_unaligned, 0, srng->alloc_size);
2254 
2255 	return mem;
2256 }
2257 
2258 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
2259 					       struct dp_srng *srng)
2260 {
2261 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2262 				srng->alloc_size,
2263 				srng->base_vaddr_unaligned,
2264 				srng->base_paddr_unaligned, 0);
2265 }
2266 
2267 #endif /* DP_MEM_PRE_ALLOC */
2268 
2269 #ifdef QCA_SUPPORT_WDS_EXTENDED
2270 bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev)
2271 {
2272 	return vdev->wds_ext_enabled;
2273 }
2274 #else
2275 bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev)
2276 {
2277 	return false;
2278 }
2279 #endif
2280 
2281 void dp_pdev_update_fast_rx_flag(struct dp_soc *soc, struct dp_pdev *pdev)
2282 {
2283 	struct dp_vdev *vdev = NULL;
2284 	uint8_t rx_fast_flag = true;
2285 
2286 	if (wlan_cfg_is_rx_flow_tag_enabled(soc->wlan_cfg_ctx)) {
2287 		rx_fast_flag = false;
2288 		goto update_flag;
2289 	}
2290 
2291 	/* Check if protocol tagging enable */
2292 	if (pdev->is_rx_protocol_tagging_enabled) {
2293 		rx_fast_flag = false;
2294 		goto update_flag;
2295 	}
2296 
2297 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
2298 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2299 		/* Check if any VDEV has NAWDS enabled */
2300 		if (vdev->nawds_enabled) {
2301 			rx_fast_flag = false;
2302 			break;
2303 		}
2304 
2305 		/* Check if any VDEV has multipass enabled */
2306 		if (vdev->multipass_en) {
2307 			rx_fast_flag = false;
2308 			break;
2309 		}
2310 
2311 		/* Check if any VDEV has mesh enabled */
2312 		if (vdev->mesh_vdev) {
2313 			rx_fast_flag = false;
2314 			break;
2315 		}
2316 
2317 		/* Check if any VDEV has WDS ext enabled */
2318 		if (dp_vdev_is_wds_ext_enabled(vdev)) {
2319 			rx_fast_flag = false;
2320 			break;
2321 		}
2322 	}
2323 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
2324 
2325 update_flag:
2326 	dp_init_info("Updated Rx fast flag to %u", rx_fast_flag);
2327 	pdev->rx_fast_flag = rx_fast_flag;
2328 }
2329 
2330 void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng)
2331 {
2332 	if (srng->alloc_size && srng->base_vaddr_unaligned) {
2333 		if (!srng->cached) {
2334 			dp_srng_mem_free_consistent(soc, srng);
2335 		} else {
2336 			qdf_mem_free(srng->base_vaddr_unaligned);
2337 		}
2338 		srng->alloc_size = 0;
2339 		srng->base_vaddr_unaligned = NULL;
2340 	}
2341 	srng->hal_srng = NULL;
2342 }
2343 
2344 qdf_export_symbol(dp_srng_free);
2345 
2346 #ifdef DISABLE_MON_RING_MSI_CFG
2347 /**
2348  * dp_skip_msi_cfg() - Check if msi cfg has to be skipped for ring_type
2349  * @soc: DP SoC context
2350  * @ring_type: sring type
2351  *
2352  * Return: True if msi cfg should be skipped for srng type else false
2353  */
2354 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2355 {
2356 	if (ring_type == RXDMA_MONITOR_STATUS)
2357 		return true;
2358 
2359 	return false;
2360 }
2361 #else
2362 #ifdef DP_CON_MON_MSI_ENABLED
2363 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2364 {
2365 	if (soc->cdp_soc.ol_ops->get_con_mode &&
2366 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE) {
2367 		if (ring_type == REO_DST || ring_type == RXDMA_DST)
2368 			return true;
2369 	} else if (ring_type == RXDMA_MONITOR_STATUS) {
2370 		return true;
2371 	}
2372 
2373 	return false;
2374 }
2375 #else
2376 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2377 {
2378 	return false;
2379 }
2380 #endif /* DP_CON_MON_MSI_ENABLED */
2381 #endif /* DISABLE_MON_RING_MSI_CFG */
2382 
2383 QDF_STATUS dp_srng_init_idx(struct dp_soc *soc, struct dp_srng *srng,
2384 			    int ring_type, int ring_num, int mac_id,
2385 			    uint32_t idx)
2386 {
2387 	bool idle_check;
2388 
2389 	hal_soc_handle_t hal_soc = soc->hal_soc;
2390 	struct hal_srng_params ring_params;
2391 
2392 	if (srng->hal_srng) {
2393 		dp_init_err("%pK: Ring type: %d, num:%d is already initialized",
2394 			    soc, ring_type, ring_num);
2395 		return QDF_STATUS_SUCCESS;
2396 	}
2397 
2398 	/* memset the srng ring to zero */
2399 	qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size);
2400 
2401 	qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params));
2402 	ring_params.ring_base_paddr = srng->base_paddr_aligned;
2403 	ring_params.ring_base_vaddr = srng->base_vaddr_aligned;
2404 
2405 	ring_params.num_entries = srng->num_entries;
2406 
2407 	dp_info("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
2408 		ring_type, ring_num,
2409 		(void *)ring_params.ring_base_vaddr,
2410 		(void *)ring_params.ring_base_paddr,
2411 		ring_params.num_entries);
2412 
2413 	if (soc->intr_mode == DP_INTR_MSI && !dp_skip_msi_cfg(soc, ring_type)) {
2414 		dp_srng_msi_setup(soc, srng, &ring_params, ring_type, ring_num);
2415 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
2416 				 ring_type, ring_num);
2417 	} else {
2418 		ring_params.msi_data = 0;
2419 		ring_params.msi_addr = 0;
2420 		dp_srng_set_msi2_ring_params(soc, &ring_params, 0, 0);
2421 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
2422 				 ring_type, ring_num);
2423 	}
2424 
2425 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
2426 					       ring_type, ring_num,
2427 					       srng->num_entries);
2428 
2429 	dp_srng_set_nf_thresholds(soc, srng, &ring_params);
2430 	dp_srng_configure_pointer_update_thresholds(soc, &ring_params,
2431 						    ring_type, ring_num,
2432 						    srng->num_entries);
2433 
2434 	if (srng->cached)
2435 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
2436 
2437 	idle_check = dp_check_umac_reset_in_progress(soc);
2438 
2439 	srng->hal_srng = hal_srng_setup_idx(hal_soc, ring_type, ring_num,
2440 					    mac_id, &ring_params, idle_check,
2441 					    idx);
2442 
2443 	if (!srng->hal_srng) {
2444 		dp_srng_free(soc, srng);
2445 		return QDF_STATUS_E_FAILURE;
2446 	}
2447 
2448 	return QDF_STATUS_SUCCESS;
2449 }
2450 
2451 qdf_export_symbol(dp_srng_init_idx);
2452 
2453 QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng, int ring_type,
2454 			int ring_num, int mac_id)
2455 {
2456 	return dp_srng_init_idx(soc, srng, ring_type, ring_num, mac_id, 0);
2457 }
2458 
2459 qdf_export_symbol(dp_srng_init);
2460 
2461 QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
2462 			 int ring_type, uint32_t num_entries,
2463 			 bool cached)
2464 {
2465 	hal_soc_handle_t hal_soc = soc->hal_soc;
2466 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
2467 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
2468 
2469 	if (srng->base_vaddr_unaligned) {
2470 		dp_init_err("%pK: Ring type: %d, is already allocated",
2471 			    soc, ring_type);
2472 		return QDF_STATUS_SUCCESS;
2473 	}
2474 
2475 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
2476 	srng->hal_srng = NULL;
2477 	srng->alloc_size = num_entries * entry_size;
2478 	srng->num_entries = num_entries;
2479 	srng->cached = cached;
2480 
2481 	if (!cached) {
2482 		srng->base_vaddr_aligned =
2483 		    dp_srng_aligned_mem_alloc_consistent(soc,
2484 							 srng,
2485 							 ring_type);
2486 	} else {
2487 		srng->base_vaddr_aligned = qdf_aligned_malloc(
2488 					&srng->alloc_size,
2489 					&srng->base_vaddr_unaligned,
2490 					&srng->base_paddr_unaligned,
2491 					&srng->base_paddr_aligned,
2492 					DP_RING_BASE_ALIGN);
2493 	}
2494 
2495 	if (!srng->base_vaddr_aligned)
2496 		return QDF_STATUS_E_NOMEM;
2497 
2498 	return QDF_STATUS_SUCCESS;
2499 }
2500 
2501 qdf_export_symbol(dp_srng_alloc);
2502 
2503 void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
2504 		    int ring_type, int ring_num)
2505 {
2506 	if (!srng->hal_srng) {
2507 		dp_init_err("%pK: Ring type: %d, num:%d not setup",
2508 			    soc, ring_type, ring_num);
2509 		return;
2510 	}
2511 
2512 	if (soc->arch_ops.dp_free_ppeds_interrupts)
2513 		soc->arch_ops.dp_free_ppeds_interrupts(soc, srng, ring_type,
2514 						       ring_num);
2515 
2516 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
2517 	srng->hal_srng = NULL;
2518 }
2519 
2520 qdf_export_symbol(dp_srng_deinit);
2521 
2522 /* TODO: Need this interface from HIF */
2523 void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle);
2524 
2525 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2526 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2527 			 hal_ring_handle_t hal_ring_hdl)
2528 {
2529 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2530 	uint32_t hp, tp;
2531 	uint8_t ring_id;
2532 
2533 	if (!int_ctx)
2534 		return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2535 
2536 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2537 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2538 
2539 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2540 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START);
2541 
2542 	return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2543 }
2544 
2545 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2546 			hal_ring_handle_t hal_ring_hdl)
2547 {
2548 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2549 	uint32_t hp, tp;
2550 	uint8_t ring_id;
2551 
2552 	if (!int_ctx)
2553 		return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2554 
2555 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2556 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2557 
2558 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2559 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END);
2560 
2561 	return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2562 }
2563 
2564 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2565 					      uint8_t hist_group_id)
2566 {
2567 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2568 			 0, 0, 0, HIF_EVENT_TIMER_ENTRY);
2569 }
2570 
2571 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2572 					     uint8_t hist_group_id)
2573 {
2574 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2575 			 0, 0, 0, HIF_EVENT_TIMER_EXIT);
2576 }
2577 #else
2578 
2579 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2580 					      uint8_t hist_group_id)
2581 {
2582 }
2583 
2584 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2585 					     uint8_t hist_group_id)
2586 {
2587 }
2588 
2589 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
2590 
2591 enum timer_yield_status
2592 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
2593 			  uint64_t start_time)
2594 {
2595 	uint64_t cur_time = qdf_get_log_timestamp();
2596 
2597 	if (!work_done)
2598 		return DP_TIMER_WORK_DONE;
2599 
2600 	if (cur_time - start_time > DP_MAX_TIMER_EXEC_TIME_TICKS)
2601 		return DP_TIMER_TIME_EXHAUST;
2602 
2603 	return DP_TIMER_NO_YIELD;
2604 }
2605 
2606 qdf_export_symbol(dp_should_timer_irq_yield);
2607 
2608 static int dp_process_rxdma_dst_ring(struct dp_soc *soc,
2609 				     struct dp_intr *int_ctx,
2610 				     int mac_for_pdev,
2611 				     int total_budget)
2612 {
2613 	return dp_rxdma_err_process(int_ctx, soc, mac_for_pdev,
2614 				    total_budget);
2615 }
2616 
2617 /**
2618  * dp_process_lmac_rings() - Process LMAC rings
2619  * @int_ctx: interrupt context
2620  * @total_budget: budget of work which can be done
2621  *
2622  * Return: work done
2623  */
2624 static int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget)
2625 {
2626 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2627 	struct dp_soc *soc = int_ctx->soc;
2628 	uint32_t remaining_quota = total_budget;
2629 	struct dp_pdev *pdev = NULL;
2630 	uint32_t work_done  = 0;
2631 	int budget = total_budget;
2632 	int ring = 0;
2633 
2634 	/* Process LMAC interrupts */
2635 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
2636 		int mac_for_pdev = ring;
2637 
2638 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
2639 		if (!pdev)
2640 			continue;
2641 		if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
2642 			work_done = dp_monitor_process(soc, int_ctx,
2643 						       mac_for_pdev,
2644 						       remaining_quota);
2645 			if (work_done)
2646 				intr_stats->num_rx_mon_ring_masks++;
2647 			budget -= work_done;
2648 			if (budget <= 0)
2649 				goto budget_done;
2650 			remaining_quota = budget;
2651 		}
2652 
2653 		if (int_ctx->tx_mon_ring_mask & (1 << mac_for_pdev)) {
2654 			work_done = dp_tx_mon_process(soc, int_ctx,
2655 						      mac_for_pdev,
2656 						      remaining_quota);
2657 			if (work_done)
2658 				intr_stats->num_tx_mon_ring_masks++;
2659 			budget -= work_done;
2660 			if (budget <= 0)
2661 				goto budget_done;
2662 			remaining_quota = budget;
2663 		}
2664 
2665 		if (int_ctx->rxdma2host_ring_mask &
2666 				(1 << mac_for_pdev)) {
2667 			work_done = dp_process_rxdma_dst_ring(soc, int_ctx,
2668 							      mac_for_pdev,
2669 							      remaining_quota);
2670 			if (work_done)
2671 				intr_stats->num_rxdma2host_ring_masks++;
2672 			budget -=  work_done;
2673 			if (budget <= 0)
2674 				goto budget_done;
2675 			remaining_quota = budget;
2676 		}
2677 
2678 		if (int_ctx->host2rxdma_ring_mask & (1 << mac_for_pdev)) {
2679 			union dp_rx_desc_list_elem_t *desc_list = NULL;
2680 			union dp_rx_desc_list_elem_t *tail = NULL;
2681 			struct dp_srng *rx_refill_buf_ring;
2682 			struct rx_desc_pool *rx_desc_pool;
2683 
2684 			rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2685 			if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
2686 				rx_refill_buf_ring =
2687 					&soc->rx_refill_buf_ring[mac_for_pdev];
2688 			else
2689 				rx_refill_buf_ring =
2690 					&soc->rx_refill_buf_ring[pdev->lmac_id];
2691 
2692 			intr_stats->num_host2rxdma_ring_masks++;
2693 			dp_rx_buffers_lt_replenish_simple(soc, mac_for_pdev,
2694 							  rx_refill_buf_ring,
2695 							  rx_desc_pool,
2696 							  0,
2697 							  &desc_list,
2698 							  &tail);
2699 		}
2700 
2701 	}
2702 
2703 	if (int_ctx->host2rxdma_mon_ring_mask)
2704 		dp_rx_mon_buf_refill(int_ctx);
2705 
2706 	if (int_ctx->host2txmon_ring_mask)
2707 		dp_tx_mon_buf_refill(int_ctx);
2708 
2709 budget_done:
2710 	return total_budget - budget;
2711 }
2712 
2713 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
2714 /**
2715  * dp_service_near_full_srngs() - Bottom half handler to process the near
2716  *				full IRQ on a SRNG
2717  * @dp_ctx: Datapath SoC handle
2718  * @dp_budget: Number of SRNGs which can be processed in a single attempt
2719  *		without rescheduling
2720  * @cpu: cpu id
2721  *
2722  * Return: remaining budget/quota for the soc device
2723  */
2724 static
2725 uint32_t dp_service_near_full_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
2726 {
2727 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2728 	struct dp_soc *soc = int_ctx->soc;
2729 
2730 	/*
2731 	 * dp_service_near_full_srngs arch ops should be initialized always
2732 	 * if the NEAR FULL IRQ feature is enabled.
2733 	 */
2734 	return soc->arch_ops.dp_service_near_full_srngs(soc, int_ctx,
2735 							dp_budget);
2736 }
2737 #endif
2738 
2739 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2740 
2741 /**
2742  * dp_srng_get_cpu() - Get the smp processor id for srng processing
2743  *
2744  * Return: smp processor id
2745  */
2746 static inline int dp_srng_get_cpu(void)
2747 {
2748 	return smp_processor_id();
2749 }
2750 
2751 /**
2752  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
2753  * @dp_ctx: DP SOC handle
2754  * @dp_budget: Number of frames/descriptors that can be processed in one shot
2755  * @cpu: CPU on which this instance is running
2756  *
2757  * Return: remaining budget/quota for the soc device
2758  */
2759 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
2760 {
2761 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2762 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2763 	struct dp_soc *soc = int_ctx->soc;
2764 	int ring = 0;
2765 	int index;
2766 	uint32_t work_done  = 0;
2767 	int budget = dp_budget;
2768 	uint8_t tx_mask = int_ctx->tx_ring_mask;
2769 	uint8_t rx_mask = int_ctx->rx_ring_mask;
2770 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
2771 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
2772 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2773 	uint32_t remaining_quota = dp_budget;
2774 
2775 	qdf_atomic_set_bit(cpu, &soc->service_rings_running);
2776 
2777 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n",
2778 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
2779 			 reo_status_mask,
2780 			 int_ctx->rx_mon_ring_mask,
2781 			 int_ctx->host2rxdma_ring_mask,
2782 			 int_ctx->rxdma2host_ring_mask);
2783 
2784 	/* Process Tx completion interrupts first to return back buffers */
2785 	for (index = 0; index < soc->num_tx_comp_rings; index++) {
2786 		if (!(1 << wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) & tx_mask))
2787 			continue;
2788 		work_done = dp_tx_comp_handler(int_ctx,
2789 					       soc,
2790 					       soc->tx_comp_ring[index].hal_srng,
2791 					       index, remaining_quota);
2792 		if (work_done) {
2793 			intr_stats->num_tx_ring_masks[index]++;
2794 			dp_verbose_debug("tx mask 0x%x index %d, budget %d, work_done %d",
2795 					 tx_mask, index, budget,
2796 					 work_done);
2797 		}
2798 		budget -= work_done;
2799 		if (budget <= 0)
2800 			goto budget_done;
2801 
2802 		remaining_quota = budget;
2803 	}
2804 
2805 	/* Process REO Exception ring interrupt */
2806 	if (rx_err_mask) {
2807 		work_done = dp_rx_err_process(int_ctx, soc,
2808 					      soc->reo_exception_ring.hal_srng,
2809 					      remaining_quota);
2810 
2811 		if (work_done) {
2812 			intr_stats->num_rx_err_ring_masks++;
2813 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
2814 					 work_done, budget);
2815 		}
2816 
2817 		budget -=  work_done;
2818 		if (budget <= 0) {
2819 			goto budget_done;
2820 		}
2821 		remaining_quota = budget;
2822 	}
2823 
2824 	/* Process Rx WBM release ring interrupt */
2825 	if (rx_wbm_rel_mask) {
2826 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
2827 						  soc->rx_rel_ring.hal_srng,
2828 						  remaining_quota);
2829 
2830 		if (work_done) {
2831 			intr_stats->num_rx_wbm_rel_ring_masks++;
2832 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
2833 					 work_done, budget);
2834 		}
2835 
2836 		budget -=  work_done;
2837 		if (budget <= 0) {
2838 			goto budget_done;
2839 		}
2840 		remaining_quota = budget;
2841 	}
2842 
2843 	/* Process Rx interrupts */
2844 	if (rx_mask) {
2845 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
2846 			if (!(rx_mask & (1 << ring)))
2847 				continue;
2848 			work_done = soc->arch_ops.dp_rx_process(int_ctx,
2849 						  soc->reo_dest_ring[ring].hal_srng,
2850 						  ring,
2851 						  remaining_quota);
2852 			if (work_done) {
2853 				intr_stats->num_rx_ring_masks[ring]++;
2854 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
2855 						 rx_mask, ring,
2856 						 work_done, budget);
2857 				budget -=  work_done;
2858 				if (budget <= 0)
2859 					goto budget_done;
2860 				remaining_quota = budget;
2861 			}
2862 		}
2863 	}
2864 
2865 	if (reo_status_mask) {
2866 		if (dp_reo_status_ring_handler(int_ctx, soc))
2867 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2868 	}
2869 
2870 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
2871 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2872 		if (work_done) {
2873 			budget -=  work_done;
2874 			if (budget <= 0)
2875 				goto budget_done;
2876 			remaining_quota = budget;
2877 		}
2878 	}
2879 
2880 	qdf_lro_flush(int_ctx->lro_ctx);
2881 	intr_stats->num_masks++;
2882 
2883 budget_done:
2884 	qdf_atomic_clear_bit(cpu, &soc->service_rings_running);
2885 
2886 	if (soc->notify_fw_callback)
2887 		soc->notify_fw_callback(soc);
2888 
2889 	return dp_budget - budget;
2890 }
2891 
2892 #else /* QCA_HOST_MODE_WIFI_DISABLED */
2893 
2894 /**
2895  * dp_srng_get_cpu() - Get the smp processor id for srng processing
2896  *
2897  * Return: smp processor id
2898  */
2899 static inline int dp_srng_get_cpu(void)
2900 {
2901 	return 0;
2902 }
2903 
2904 /**
2905  * dp_service_srngs() - Top level handler for DP Monitor Ring interrupts
2906  * @dp_ctx: DP SOC handle
2907  * @dp_budget: Number of frames/descriptors that can be processed in one shot
2908  * @cpu: CPU on which this instance is running
2909  *
2910  * Return: remaining budget/quota for the soc device
2911  */
2912 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
2913 {
2914 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2915 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2916 	struct dp_soc *soc = int_ctx->soc;
2917 	uint32_t remaining_quota = dp_budget;
2918 	uint32_t work_done  = 0;
2919 	int budget = dp_budget;
2920 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2921 
2922 	if (reo_status_mask) {
2923 		if (dp_reo_status_ring_handler(int_ctx, soc))
2924 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2925 	}
2926 
2927 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
2928 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2929 		if (work_done) {
2930 			budget -=  work_done;
2931 			if (budget <= 0)
2932 				goto budget_done;
2933 			remaining_quota = budget;
2934 		}
2935 	}
2936 
2937 	qdf_lro_flush(int_ctx->lro_ctx);
2938 	intr_stats->num_masks++;
2939 
2940 budget_done:
2941 	return dp_budget - budget;
2942 }
2943 
2944 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2945 
2946 /**
2947  * dp_interrupt_timer() - timer poll for interrupts
2948  * @arg: SoC Handle
2949  *
2950  * Return:
2951  *
2952  */
2953 static void dp_interrupt_timer(void *arg)
2954 {
2955 	struct dp_soc *soc = (struct dp_soc *) arg;
2956 	struct dp_pdev *pdev = soc->pdev_list[0];
2957 	enum timer_yield_status yield = DP_TIMER_NO_YIELD;
2958 	uint32_t work_done  = 0, total_work_done = 0;
2959 	int budget = 0xffff, i;
2960 	uint32_t remaining_quota = budget;
2961 	uint64_t start_time;
2962 	uint32_t lmac_id = DP_MON_INVALID_LMAC_ID;
2963 	uint8_t dp_intr_id = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
2964 	uint32_t lmac_iter;
2965 	int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
2966 	enum reg_wifi_band mon_band;
2967 	int cpu = dp_srng_get_cpu();
2968 
2969 	/*
2970 	 * this logic makes all data path interfacing rings (UMAC/LMAC)
2971 	 * and Monitor rings polling mode when NSS offload is disabled
2972 	 */
2973 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx) &&
2974 	    !wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2975 		if (qdf_atomic_read(&soc->cmn_init_done)) {
2976 			for (i = 0; i < wlan_cfg_get_num_contexts(
2977 						soc->wlan_cfg_ctx); i++)
2978 				dp_service_srngs(&soc->intr_ctx[i], 0xffff,
2979 						 cpu);
2980 
2981 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
2982 		}
2983 		return;
2984 	}
2985 
2986 	if (!qdf_atomic_read(&soc->cmn_init_done))
2987 		return;
2988 
2989 	if (dp_monitor_is_chan_band_known(pdev)) {
2990 		mon_band = dp_monitor_get_chan_band(pdev);
2991 		lmac_id = pdev->ch_band_lmac_id_mapping[mon_band];
2992 		if (qdf_likely(lmac_id != DP_MON_INVALID_LMAC_ID)) {
2993 			dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id];
2994 			dp_srng_record_timer_entry(soc, dp_intr_id);
2995 		}
2996 	}
2997 
2998 	start_time = qdf_get_log_timestamp();
2999 	dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
3000 
3001 	while (yield == DP_TIMER_NO_YIELD) {
3002 		for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) {
3003 			if (lmac_iter == lmac_id)
3004 				work_done = dp_monitor_process(soc,
3005 						&soc->intr_ctx[dp_intr_id],
3006 						lmac_iter, remaining_quota);
3007 			else
3008 				work_done =
3009 					dp_monitor_drop_packets_for_mac(pdev,
3010 							     lmac_iter,
3011 							     remaining_quota);
3012 			if (work_done) {
3013 				budget -=  work_done;
3014 				if (budget <= 0) {
3015 					yield = DP_TIMER_WORK_EXHAUST;
3016 					goto budget_done;
3017 				}
3018 				remaining_quota = budget;
3019 				total_work_done += work_done;
3020 			}
3021 		}
3022 
3023 		yield = dp_should_timer_irq_yield(soc, total_work_done,
3024 						  start_time);
3025 		total_work_done = 0;
3026 	}
3027 
3028 budget_done:
3029 	if (yield == DP_TIMER_WORK_EXHAUST ||
3030 	    yield == DP_TIMER_TIME_EXHAUST)
3031 		qdf_timer_mod(&soc->int_timer, 1);
3032 	else
3033 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
3034 
3035 	if (lmac_id != DP_MON_INVALID_LMAC_ID)
3036 		dp_srng_record_timer_exit(soc, dp_intr_id);
3037 }
3038 
3039 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
3040 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
3041 					struct dp_intr *intr_ctx)
3042 {
3043 	if (intr_ctx->rx_mon_ring_mask)
3044 		return true;
3045 
3046 	return false;
3047 }
3048 #else
3049 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
3050 					struct dp_intr *intr_ctx)
3051 {
3052 	return false;
3053 }
3054 #endif
3055 
3056 /**
3057  * dp_soc_attach_poll() - Register handlers for DP interrupts
3058  * @txrx_soc: DP SOC handle
3059  *
3060  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
3061  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
3062  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
3063  *
3064  * Return: 0 for success, nonzero for failure.
3065  */
3066 static QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
3067 {
3068 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3069 	int i;
3070 	int lmac_id = 0;
3071 
3072 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3073 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
3074 	soc->intr_mode = DP_INTR_POLL;
3075 
3076 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3077 		soc->intr_ctx[i].dp_intr_id = i;
3078 		soc->intr_ctx[i].tx_ring_mask =
3079 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
3080 		soc->intr_ctx[i].rx_ring_mask =
3081 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
3082 		soc->intr_ctx[i].rx_mon_ring_mask =
3083 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
3084 		soc->intr_ctx[i].rx_err_ring_mask =
3085 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
3086 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
3087 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
3088 		soc->intr_ctx[i].reo_status_ring_mask =
3089 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
3090 		soc->intr_ctx[i].rxdma2host_ring_mask =
3091 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
3092 		soc->intr_ctx[i].soc = soc;
3093 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
3094 
3095 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
3096 			hif_event_history_init(soc->hif_handle, i);
3097 			soc->mon_intr_id_lmac_map[lmac_id] = i;
3098 			lmac_id++;
3099 		}
3100 	}
3101 
3102 	qdf_timer_init(soc->osdev, &soc->int_timer,
3103 			dp_interrupt_timer, (void *)soc,
3104 			QDF_TIMER_TYPE_WAKE_APPS);
3105 
3106 	return QDF_STATUS_SUCCESS;
3107 }
3108 
3109 /**
3110  * dp_soc_set_interrupt_mode() - Set the interrupt mode in soc
3111  * @soc: DP soc handle
3112  *
3113  * Set the appropriate interrupt mode flag in the soc
3114  */
3115 static void dp_soc_set_interrupt_mode(struct dp_soc *soc)
3116 {
3117 	uint32_t msi_base_data, msi_vector_start;
3118 	int msi_vector_count, ret;
3119 
3120 	soc->intr_mode = DP_INTR_INTEGRATED;
3121 
3122 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
3123 	    (dp_is_monitor_mode_using_poll(soc) &&
3124 	     soc->cdp_soc.ol_ops->get_con_mode &&
3125 	     soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) {
3126 		soc->intr_mode = DP_INTR_POLL;
3127 	} else {
3128 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
3129 						  &msi_vector_count,
3130 						  &msi_base_data,
3131 						  &msi_vector_start);
3132 		if (ret)
3133 			return;
3134 
3135 		soc->intr_mode = DP_INTR_MSI;
3136 	}
3137 }
3138 
3139 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc);
3140 #if defined(DP_INTR_POLL_BOTH)
3141 /**
3142  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
3143  * @txrx_soc: DP SOC handle
3144  *
3145  * Call the appropriate attach function based on the mode of operation.
3146  * This is a WAR for enabling monitor mode.
3147  *
3148  * Return: 0 for success. nonzero for failure.
3149  */
3150 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
3151 {
3152 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3153 
3154 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
3155 	    (dp_is_monitor_mode_using_poll(soc) &&
3156 	     soc->cdp_soc.ol_ops->get_con_mode &&
3157 	     soc->cdp_soc.ol_ops->get_con_mode() ==
3158 	     QDF_GLOBAL_MONITOR_MODE)) {
3159 		dp_info("Poll mode");
3160 		return dp_soc_attach_poll(txrx_soc);
3161 	} else {
3162 		dp_info("Interrupt  mode");
3163 		return dp_soc_interrupt_attach(txrx_soc);
3164 	}
3165 }
3166 #else
3167 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
3168 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
3169 {
3170 	return dp_soc_attach_poll(txrx_soc);
3171 }
3172 #else
3173 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
3174 {
3175 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3176 
3177 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx))
3178 		return dp_soc_attach_poll(txrx_soc);
3179 	else
3180 		return dp_soc_interrupt_attach(txrx_soc);
3181 }
3182 #endif
3183 #endif
3184 
3185 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
3186 /**
3187  * dp_soc_interrupt_map_calculate_wifi3_pci_legacy() -
3188  * Calculate interrupt map for legacy interrupts
3189  * @soc: DP soc handle
3190  * @intr_ctx_num: Interrupt context number
3191  * @irq_id_map: IRQ map
3192  * @num_irq_r: Number of interrupts assigned for this context
3193  *
3194  * Return: void
3195  */
3196 static void dp_soc_interrupt_map_calculate_wifi3_pci_legacy(struct dp_soc *soc,
3197 							    int intr_ctx_num,
3198 							    int *irq_id_map,
3199 							    int *num_irq_r)
3200 {
3201 	int j;
3202 	int num_irq = 0;
3203 	int tx_mask = wlan_cfg_get_tx_ring_mask(
3204 					soc->wlan_cfg_ctx, intr_ctx_num);
3205 	int rx_mask = wlan_cfg_get_rx_ring_mask(
3206 					soc->wlan_cfg_ctx, intr_ctx_num);
3207 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
3208 					soc->wlan_cfg_ctx, intr_ctx_num);
3209 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3210 					soc->wlan_cfg_ctx, intr_ctx_num);
3211 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3212 					soc->wlan_cfg_ctx, intr_ctx_num);
3213 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3214 					soc->wlan_cfg_ctx, intr_ctx_num);
3215 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3216 					soc->wlan_cfg_ctx, intr_ctx_num);
3217 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3218 					soc->wlan_cfg_ctx, intr_ctx_num);
3219 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3220 					soc->wlan_cfg_ctx, intr_ctx_num);
3221 	soc->intr_mode = DP_INTR_LEGACY_VIRTUAL_IRQ;
3222 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
3223 		if (tx_mask & (1 << j))
3224 			irq_id_map[num_irq++] = (wbm2sw0_release - j);
3225 		if (rx_mask & (1 << j))
3226 			irq_id_map[num_irq++] = (reo2sw1_intr - j);
3227 		if (rx_mon_mask & (1 << j))
3228 			irq_id_map[num_irq++] = (rxmon2sw_p0_dest0 - j);
3229 		if (rx_err_ring_mask & (1 << j))
3230 			irq_id_map[num_irq++] = (reo2sw0_intr - j);
3231 		if (rx_wbm_rel_ring_mask & (1 << j))
3232 			irq_id_map[num_irq++] = (wbm2sw5_release - j);
3233 		if (reo_status_ring_mask & (1 << j))
3234 			irq_id_map[num_irq++] = (reo_status - j);
3235 		if (rxdma2host_ring_mask & (1 << j))
3236 			irq_id_map[num_irq++] = (rxdma2sw_dst_ring0 - j);
3237 		if (host2rxdma_ring_mask & (1 << j))
3238 			irq_id_map[num_irq++] = (sw2rxdma_0 - j);
3239 		if (host2rxdma_mon_ring_mask & (1 << j))
3240 			irq_id_map[num_irq++] = (sw2rxmon_src_ring - j);
3241 	}
3242 	*num_irq_r = num_irq;
3243 }
3244 #else
3245 static void dp_soc_interrupt_map_calculate_wifi3_pci_legacy(struct dp_soc *soc,
3246 							    int intr_ctx_num,
3247 							    int *irq_id_map,
3248 							    int *num_irq_r)
3249 {
3250 }
3251 #endif
3252 
3253 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
3254 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
3255 {
3256 	int j;
3257 	int num_irq = 0;
3258 
3259 	int tx_mask =
3260 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3261 	int rx_mask =
3262 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3263 	int rx_mon_mask =
3264 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3265 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3266 					soc->wlan_cfg_ctx, intr_ctx_num);
3267 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3268 					soc->wlan_cfg_ctx, intr_ctx_num);
3269 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3270 					soc->wlan_cfg_ctx, intr_ctx_num);
3271 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3272 					soc->wlan_cfg_ctx, intr_ctx_num);
3273 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3274 					soc->wlan_cfg_ctx, intr_ctx_num);
3275 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3276 					soc->wlan_cfg_ctx, intr_ctx_num);
3277 	int host2txmon_ring_mask = wlan_cfg_get_host2txmon_ring_mask(
3278 					soc->wlan_cfg_ctx, intr_ctx_num);
3279 	int txmon2host_mon_ring_mask = wlan_cfg_get_tx_mon_ring_mask(
3280 					soc->wlan_cfg_ctx, intr_ctx_num);
3281 
3282 	soc->intr_mode = DP_INTR_INTEGRATED;
3283 
3284 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
3285 
3286 		if (tx_mask & (1 << j)) {
3287 			irq_id_map[num_irq++] =
3288 				(wbm2host_tx_completions_ring1 - j);
3289 		}
3290 
3291 		if (rx_mask & (1 << j)) {
3292 			irq_id_map[num_irq++] =
3293 				(reo2host_destination_ring1 - j);
3294 		}
3295 
3296 		if (rxdma2host_ring_mask & (1 << j)) {
3297 			irq_id_map[num_irq++] =
3298 				rxdma2host_destination_ring_mac1 - j;
3299 		}
3300 
3301 		if (host2rxdma_ring_mask & (1 << j)) {
3302 			irq_id_map[num_irq++] =
3303 				host2rxdma_host_buf_ring_mac1 -	j;
3304 		}
3305 
3306 		if (host2rxdma_mon_ring_mask & (1 << j)) {
3307 			irq_id_map[num_irq++] =
3308 				host2rxdma_monitor_ring1 - j;
3309 		}
3310 
3311 		if (rx_mon_mask & (1 << j)) {
3312 			irq_id_map[num_irq++] =
3313 				ppdu_end_interrupts_mac1 - j;
3314 			irq_id_map[num_irq++] =
3315 				rxdma2host_monitor_status_ring_mac1 - j;
3316 			irq_id_map[num_irq++] =
3317 				rxdma2host_monitor_destination_mac1 - j;
3318 		}
3319 
3320 		if (rx_wbm_rel_ring_mask & (1 << j))
3321 			irq_id_map[num_irq++] = wbm2host_rx_release;
3322 
3323 		if (rx_err_ring_mask & (1 << j))
3324 			irq_id_map[num_irq++] = reo2host_exception;
3325 
3326 		if (reo_status_ring_mask & (1 << j))
3327 			irq_id_map[num_irq++] = reo2host_status;
3328 
3329 		if (host2txmon_ring_mask & (1 << j))
3330 			irq_id_map[num_irq++] = host2tx_monitor_ring1;
3331 
3332 		if (txmon2host_mon_ring_mask & (1 << j)) {
3333 			irq_id_map[num_irq++] =
3334 				(txmon2host_monitor_destination_mac1 - j);
3335 		}
3336 
3337 	}
3338 	*num_irq_r = num_irq;
3339 }
3340 
3341 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
3342 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
3343 		int msi_vector_count, int msi_vector_start)
3344 {
3345 	int tx_mask = wlan_cfg_get_tx_ring_mask(
3346 					soc->wlan_cfg_ctx, intr_ctx_num);
3347 	int rx_mask = wlan_cfg_get_rx_ring_mask(
3348 					soc->wlan_cfg_ctx, intr_ctx_num);
3349 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
3350 					soc->wlan_cfg_ctx, intr_ctx_num);
3351 	int tx_mon_mask = wlan_cfg_get_tx_mon_ring_mask(
3352 					soc->wlan_cfg_ctx, intr_ctx_num);
3353 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3354 					soc->wlan_cfg_ctx, intr_ctx_num);
3355 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3356 					soc->wlan_cfg_ctx, intr_ctx_num);
3357 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3358 					soc->wlan_cfg_ctx, intr_ctx_num);
3359 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3360 					soc->wlan_cfg_ctx, intr_ctx_num);
3361 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3362 					soc->wlan_cfg_ctx, intr_ctx_num);
3363 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3364 					soc->wlan_cfg_ctx, intr_ctx_num);
3365 	int rx_near_full_grp_1_mask =
3366 		wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
3367 						     intr_ctx_num);
3368 	int rx_near_full_grp_2_mask =
3369 		wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
3370 						     intr_ctx_num);
3371 	int tx_ring_near_full_mask =
3372 		wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
3373 						    intr_ctx_num);
3374 
3375 	int host2txmon_ring_mask =
3376 		wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx,
3377 						  intr_ctx_num);
3378 	unsigned int vector =
3379 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
3380 	int num_irq = 0;
3381 
3382 	soc->intr_mode = DP_INTR_MSI;
3383 
3384 	if (tx_mask | rx_mask | rx_mon_mask | tx_mon_mask | rx_err_ring_mask |
3385 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask |
3386 	    host2rxdma_ring_mask | host2rxdma_mon_ring_mask |
3387 	    rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
3388 	    tx_ring_near_full_mask | host2txmon_ring_mask)
3389 		irq_id_map[num_irq++] =
3390 			pld_get_msi_irq(soc->osdev->dev, vector);
3391 
3392 	*num_irq_r = num_irq;
3393 }
3394 
3395 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
3396 				    int *irq_id_map, int *num_irq)
3397 {
3398 	int msi_vector_count, ret;
3399 	uint32_t msi_base_data, msi_vector_start;
3400 
3401 	if (pld_get_enable_intx(soc->osdev->dev)) {
3402 		return dp_soc_interrupt_map_calculate_wifi3_pci_legacy(soc,
3403 				intr_ctx_num, irq_id_map, num_irq);
3404 	}
3405 
3406 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
3407 					    &msi_vector_count,
3408 					    &msi_base_data,
3409 					    &msi_vector_start);
3410 	if (ret)
3411 		return dp_soc_interrupt_map_calculate_integrated(soc,
3412 				intr_ctx_num, irq_id_map, num_irq);
3413 
3414 	else
3415 		dp_soc_interrupt_map_calculate_msi(soc,
3416 				intr_ctx_num, irq_id_map, num_irq,
3417 				msi_vector_count, msi_vector_start);
3418 }
3419 
3420 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
3421 /**
3422  * dp_soc_near_full_interrupt_attach() - Register handler for DP near fill irq
3423  * @soc: DP soc handle
3424  * @num_irq: IRQ number
3425  * @irq_id_map: IRQ map
3426  * @intr_id: interrupt context ID
3427  *
3428  * Return: 0 for success. nonzero for failure.
3429  */
3430 static inline int
3431 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
3432 				  int irq_id_map[], int intr_id)
3433 {
3434 	return hif_register_ext_group(soc->hif_handle,
3435 				      num_irq, irq_id_map,
3436 				      dp_service_near_full_srngs,
3437 				      &soc->intr_ctx[intr_id], "dp_nf_intr",
3438 				      HIF_EXEC_NAPI_TYPE,
3439 				      QCA_NAPI_DEF_SCALE_BIN_SHIFT);
3440 }
3441 #else
3442 static inline int
3443 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
3444 				  int *irq_id_map, int intr_id)
3445 {
3446 	return 0;
3447 }
3448 #endif
3449 
3450 #ifdef DP_CON_MON_MSI_SKIP_SET
3451 static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
3452 {
3453 	return !!(soc->cdp_soc.ol_ops->get_con_mode() !=
3454 			QDF_GLOBAL_MONITOR_MODE);
3455 }
3456 #else
3457 static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
3458 {
3459 	return false;
3460 }
3461 #endif
3462 
3463 /**
3464  * dp_soc_ppeds_stop() - Stop PPE DS processing
3465  * @soc_handle: DP SOC handle
3466  *
3467  * Return: none
3468  */
3469 static void dp_soc_ppeds_stop(struct cdp_soc_t *soc_handle)
3470 {
3471 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
3472 
3473 	if (soc->arch_ops.txrx_soc_ppeds_stop)
3474 		soc->arch_ops.txrx_soc_ppeds_stop(soc);
3475 }
3476 
3477 void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
3478 {
3479 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3480 	int i;
3481 
3482 	if (soc->intr_mode == DP_INTR_POLL) {
3483 		qdf_timer_free(&soc->int_timer);
3484 	} else {
3485 		hif_deconfigure_ext_group_interrupts(soc->hif_handle);
3486 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
3487 		hif_deregister_exec_group(soc->hif_handle, "dp_nf_intr");
3488 	}
3489 
3490 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3491 		soc->intr_ctx[i].tx_ring_mask = 0;
3492 		soc->intr_ctx[i].rx_ring_mask = 0;
3493 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
3494 		soc->intr_ctx[i].rx_err_ring_mask = 0;
3495 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
3496 		soc->intr_ctx[i].reo_status_ring_mask = 0;
3497 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
3498 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
3499 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
3500 		soc->intr_ctx[i].rx_near_full_grp_1_mask = 0;
3501 		soc->intr_ctx[i].rx_near_full_grp_2_mask = 0;
3502 		soc->intr_ctx[i].tx_ring_near_full_mask = 0;
3503 		soc->intr_ctx[i].tx_mon_ring_mask = 0;
3504 		soc->intr_ctx[i].host2txmon_ring_mask = 0;
3505 		soc->intr_ctx[i].umac_reset_intr_mask = 0;
3506 
3507 		hif_event_history_deinit(soc->hif_handle, i);
3508 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
3509 	}
3510 
3511 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3512 		    sizeof(soc->mon_intr_id_lmac_map),
3513 		    DP_MON_INVALID_LMAC_ID);
3514 }
3515 
3516 /**
3517  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
3518  * @txrx_soc: DP SOC handle
3519  *
3520  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
3521  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
3522  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
3523  *
3524  * Return: 0 for success. nonzero for failure.
3525  */
3526 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
3527 {
3528 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3529 
3530 	int i = 0;
3531 	int num_irq = 0;
3532 	int rx_err_ring_intr_ctxt_id = HIF_MAX_GROUP;
3533 	int lmac_id = 0;
3534 	int napi_scale;
3535 
3536 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3537 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
3538 
3539 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3540 		int ret = 0;
3541 
3542 		/* Map of IRQ ids registered with one interrupt context */
3543 		int irq_id_map[HIF_MAX_GRP_IRQ];
3544 
3545 		int tx_mask =
3546 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
3547 		int rx_mask =
3548 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
3549 		int rx_mon_mask =
3550 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
3551 		int tx_mon_ring_mask =
3552 			wlan_cfg_get_tx_mon_ring_mask(soc->wlan_cfg_ctx, i);
3553 		int rx_err_ring_mask =
3554 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
3555 		int rx_wbm_rel_ring_mask =
3556 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
3557 		int reo_status_ring_mask =
3558 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
3559 		int rxdma2host_ring_mask =
3560 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
3561 		int host2rxdma_ring_mask =
3562 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
3563 		int host2rxdma_mon_ring_mask =
3564 			wlan_cfg_get_host2rxdma_mon_ring_mask(
3565 				soc->wlan_cfg_ctx, i);
3566 		int rx_near_full_grp_1_mask =
3567 			wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
3568 							     i);
3569 		int rx_near_full_grp_2_mask =
3570 			wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
3571 							     i);
3572 		int tx_ring_near_full_mask =
3573 			wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
3574 							    i);
3575 		int host2txmon_ring_mask =
3576 			wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx, i);
3577 		int umac_reset_intr_mask =
3578 			wlan_cfg_get_umac_reset_intr_mask(soc->wlan_cfg_ctx, i);
3579 
3580 		if (dp_skip_rx_mon_ring_mask_set(soc))
3581 			rx_mon_mask = 0;
3582 
3583 		soc->intr_ctx[i].dp_intr_id = i;
3584 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
3585 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
3586 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
3587 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
3588 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
3589 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
3590 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
3591 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
3592 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
3593 			 host2rxdma_mon_ring_mask;
3594 		soc->intr_ctx[i].rx_near_full_grp_1_mask =
3595 						rx_near_full_grp_1_mask;
3596 		soc->intr_ctx[i].rx_near_full_grp_2_mask =
3597 						rx_near_full_grp_2_mask;
3598 		soc->intr_ctx[i].tx_ring_near_full_mask =
3599 						tx_ring_near_full_mask;
3600 		soc->intr_ctx[i].tx_mon_ring_mask = tx_mon_ring_mask;
3601 		soc->intr_ctx[i].host2txmon_ring_mask = host2txmon_ring_mask;
3602 		soc->intr_ctx[i].umac_reset_intr_mask = umac_reset_intr_mask;
3603 
3604 		soc->intr_ctx[i].soc = soc;
3605 
3606 		num_irq = 0;
3607 
3608 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
3609 					       &num_irq);
3610 
3611 		if (rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
3612 		    tx_ring_near_full_mask) {
3613 			dp_soc_near_full_interrupt_attach(soc, num_irq,
3614 							  irq_id_map, i);
3615 		} else {
3616 			napi_scale = wlan_cfg_get_napi_scale_factor(
3617 							    soc->wlan_cfg_ctx);
3618 			if (!napi_scale)
3619 				napi_scale = QCA_NAPI_DEF_SCALE_BIN_SHIFT;
3620 
3621 			ret = hif_register_ext_group(soc->hif_handle,
3622 				num_irq, irq_id_map, dp_service_srngs,
3623 				&soc->intr_ctx[i], "dp_intr",
3624 				HIF_EXEC_NAPI_TYPE, napi_scale);
3625 		}
3626 
3627 		dp_debug(" int ctx %u num_irq %u irq_id_map %u %u",
3628 			 i, num_irq, irq_id_map[0], irq_id_map[1]);
3629 
3630 		if (ret) {
3631 			dp_init_err("%pK: failed, ret = %d", soc, ret);
3632 			dp_soc_interrupt_detach(txrx_soc);
3633 			return QDF_STATUS_E_FAILURE;
3634 		}
3635 
3636 		hif_event_history_init(soc->hif_handle, i);
3637 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
3638 
3639 		if (rx_err_ring_mask)
3640 			rx_err_ring_intr_ctxt_id = i;
3641 
3642 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
3643 			soc->mon_intr_id_lmac_map[lmac_id] = i;
3644 			lmac_id++;
3645 		}
3646 	}
3647 
3648 	hif_configure_ext_group_interrupts(soc->hif_handle);
3649 	if (rx_err_ring_intr_ctxt_id != HIF_MAX_GROUP)
3650 		hif_config_irq_clear_cpu_affinity(soc->hif_handle,
3651 						  rx_err_ring_intr_ctxt_id, 0);
3652 
3653 	return QDF_STATUS_SUCCESS;
3654 }
3655 
3656 #define AVG_MAX_MPDUS_PER_TID 128
3657 #define AVG_TIDS_PER_CLIENT 2
3658 #define AVG_FLOWS_PER_TID 2
3659 #define AVG_MSDUS_PER_FLOW 128
3660 #define AVG_MSDUS_PER_MPDU 4
3661 
3662 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
3663 {
3664 	struct qdf_mem_multi_page_t *pages;
3665 
3666 	if (mac_id != WLAN_INVALID_PDEV_ID) {
3667 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3668 	} else {
3669 		pages = &soc->link_desc_pages;
3670 	}
3671 
3672 	if (!pages) {
3673 		dp_err("can not get link desc pages");
3674 		QDF_ASSERT(0);
3675 		return;
3676 	}
3677 
3678 	if (pages->dma_pages) {
3679 		wlan_minidump_remove((void *)
3680 				     pages->dma_pages->page_v_addr_start,
3681 				     pages->num_pages * pages->page_size,
3682 				     soc->ctrl_psoc,
3683 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3684 				     "hw_link_desc_bank");
3685 		dp_desc_multi_pages_mem_free(soc, DP_HW_LINK_DESC_TYPE,
3686 					     pages, 0, false);
3687 	}
3688 }
3689 
3690 qdf_export_symbol(dp_hw_link_desc_pool_banks_free);
3691 
3692 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
3693 {
3694 	hal_soc_handle_t hal_soc = soc->hal_soc;
3695 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
3696 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
3697 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
3698 	uint32_t num_mpdus_per_link_desc = hal_num_mpdus_per_link_desc(hal_soc);
3699 	uint32_t num_msdus_per_link_desc = hal_num_msdus_per_link_desc(hal_soc);
3700 	uint32_t num_mpdu_links_per_queue_desc =
3701 		hal_num_mpdu_links_per_queue_desc(hal_soc);
3702 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3703 	uint32_t *total_link_descs, total_mem_size;
3704 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
3705 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
3706 	uint32_t num_entries;
3707 	struct qdf_mem_multi_page_t *pages;
3708 	struct dp_srng *dp_srng;
3709 	uint8_t minidump_str[MINIDUMP_STR_SIZE];
3710 
3711 	/* Only Tx queue descriptors are allocated from common link descriptor
3712 	 * pool Rx queue descriptors are not included in this because (REO queue
3713 	 * extension descriptors) they are expected to be allocated contiguously
3714 	 * with REO queue descriptors
3715 	 */
3716 	if (mac_id != WLAN_INVALID_PDEV_ID) {
3717 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3718 		/* dp_monitor_get_link_desc_pages returns NULL only
3719 		 * if monitor SOC is  NULL
3720 		 */
3721 		if (!pages) {
3722 			dp_err("can not get link desc pages");
3723 			QDF_ASSERT(0);
3724 			return QDF_STATUS_E_FAULT;
3725 		}
3726 		dp_srng = &soc->rxdma_mon_desc_ring[mac_id];
3727 		num_entries = dp_srng->alloc_size /
3728 			hal_srng_get_entrysize(soc->hal_soc,
3729 					       RXDMA_MONITOR_DESC);
3730 		total_link_descs = dp_monitor_get_total_link_descs(soc, mac_id);
3731 		qdf_str_lcopy(minidump_str, "mon_link_desc_bank",
3732 			      MINIDUMP_STR_SIZE);
3733 	} else {
3734 		num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3735 			AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
3736 
3737 		num_mpdu_queue_descs = num_mpdu_link_descs /
3738 			num_mpdu_links_per_queue_desc;
3739 
3740 		num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3741 			AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
3742 			num_msdus_per_link_desc;
3743 
3744 		num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3745 			AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
3746 
3747 		num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
3748 			num_tx_msdu_link_descs + num_rx_msdu_link_descs;
3749 
3750 		pages = &soc->link_desc_pages;
3751 		total_link_descs = &soc->total_link_descs;
3752 		qdf_str_lcopy(minidump_str, "link_desc_bank",
3753 			      MINIDUMP_STR_SIZE);
3754 	}
3755 
3756 	/* If link descriptor banks are allocated, return from here */
3757 	if (pages->num_pages)
3758 		return QDF_STATUS_SUCCESS;
3759 
3760 	/* Round up to power of 2 */
3761 	*total_link_descs = 1;
3762 	while (*total_link_descs < num_entries)
3763 		*total_link_descs <<= 1;
3764 
3765 	dp_init_info("%pK: total_link_descs: %u, link_desc_size: %d",
3766 		     soc, *total_link_descs, link_desc_size);
3767 	total_mem_size =  *total_link_descs * link_desc_size;
3768 	total_mem_size += link_desc_align;
3769 
3770 	dp_init_info("%pK: total_mem_size: %d",
3771 		     soc, total_mem_size);
3772 
3773 	dp_set_max_page_size(pages, max_alloc_size);
3774 	dp_desc_multi_pages_mem_alloc(soc, DP_HW_LINK_DESC_TYPE,
3775 				      pages,
3776 				      link_desc_size,
3777 				      *total_link_descs,
3778 				      0, false);
3779 	if (!pages->num_pages) {
3780 		dp_err("Multi page alloc fail for hw link desc pool");
3781 		return QDF_STATUS_E_FAULT;
3782 	}
3783 
3784 	wlan_minidump_log(pages->dma_pages->page_v_addr_start,
3785 			  pages->num_pages * pages->page_size,
3786 			  soc->ctrl_psoc,
3787 			  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3788 			  "hw_link_desc_bank");
3789 
3790 	return QDF_STATUS_SUCCESS;
3791 }
3792 
3793 /**
3794  * dp_hw_link_desc_ring_free() - Free h/w link desc rings
3795  * @soc: DP SOC handle
3796  *
3797  * Return: none
3798  */
3799 static void dp_hw_link_desc_ring_free(struct dp_soc *soc)
3800 {
3801 	uint32_t i;
3802 	uint32_t size = soc->wbm_idle_scatter_buf_size;
3803 	void *vaddr = soc->wbm_idle_link_ring.base_vaddr_unaligned;
3804 	qdf_dma_addr_t paddr;
3805 
3806 	if (soc->wbm_idle_scatter_buf_base_vaddr[0]) {
3807 		for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3808 			vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3809 			paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3810 			if (vaddr) {
3811 				qdf_mem_free_consistent(soc->osdev,
3812 							soc->osdev->dev,
3813 							size,
3814 							vaddr,
3815 							paddr,
3816 							0);
3817 				vaddr = NULL;
3818 			}
3819 		}
3820 	} else {
3821 		wlan_minidump_remove(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3822 				     soc->wbm_idle_link_ring.alloc_size,
3823 				     soc->ctrl_psoc,
3824 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3825 				     "wbm_idle_link_ring");
3826 		dp_srng_free(soc, &soc->wbm_idle_link_ring);
3827 	}
3828 }
3829 
3830 /**
3831  * dp_hw_link_desc_ring_alloc() - Allocate hw link desc rings
3832  * @soc: DP SOC handle
3833  *
3834  * Allocate memory for WBM_IDLE_LINK srng ring if the number of
3835  * link descriptors is less then the max_allocated size. else
3836  * allocate memory for wbm_idle_scatter_buffer.
3837  *
3838  * Return: QDF_STATUS_SUCCESS: success
3839  *         QDF_STATUS_E_NO_MEM: No memory (Failure)
3840  */
3841 static QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc)
3842 {
3843 	uint32_t entry_size, i;
3844 	uint32_t total_mem_size;
3845 	qdf_dma_addr_t *baseaddr = NULL;
3846 	struct dp_srng *dp_srng;
3847 	uint32_t ring_type;
3848 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3849 	uint32_t tlds;
3850 
3851 	ring_type = WBM_IDLE_LINK;
3852 	dp_srng = &soc->wbm_idle_link_ring;
3853 	tlds = soc->total_link_descs;
3854 
3855 	entry_size = hal_srng_get_entrysize(soc->hal_soc, ring_type);
3856 	total_mem_size = entry_size * tlds;
3857 
3858 	if (total_mem_size <= max_alloc_size) {
3859 		if (dp_srng_alloc(soc, dp_srng, ring_type, tlds, 0)) {
3860 			dp_init_err("%pK: Link desc idle ring setup failed",
3861 				    soc);
3862 			goto fail;
3863 		}
3864 
3865 		wlan_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3866 				  soc->wbm_idle_link_ring.alloc_size,
3867 				  soc->ctrl_psoc,
3868 				  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3869 				  "wbm_idle_link_ring");
3870 	} else {
3871 		uint32_t num_scatter_bufs;
3872 		uint32_t buf_size = 0;
3873 
3874 		soc->wbm_idle_scatter_buf_size =
3875 			hal_idle_list_scatter_buf_size(soc->hal_soc);
3876 		hal_idle_scatter_buf_num_entries(
3877 					soc->hal_soc,
3878 					soc->wbm_idle_scatter_buf_size);
3879 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
3880 					soc->hal_soc, total_mem_size,
3881 					soc->wbm_idle_scatter_buf_size);
3882 
3883 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
3884 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3885 				  FL("scatter bufs size out of bounds"));
3886 			goto fail;
3887 		}
3888 
3889 		for (i = 0; i < num_scatter_bufs; i++) {
3890 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
3891 			buf_size = soc->wbm_idle_scatter_buf_size;
3892 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
3893 				qdf_mem_alloc_consistent(soc->osdev,
3894 							 soc->osdev->dev,
3895 							 buf_size,
3896 							 baseaddr);
3897 
3898 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
3899 				QDF_TRACE(QDF_MODULE_ID_DP,
3900 					  QDF_TRACE_LEVEL_ERROR,
3901 					  FL("Scatter lst memory alloc fail"));
3902 				goto fail;
3903 			}
3904 		}
3905 		soc->num_scatter_bufs = num_scatter_bufs;
3906 	}
3907 	return QDF_STATUS_SUCCESS;
3908 
3909 fail:
3910 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3911 		void *vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3912 		qdf_dma_addr_t paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3913 
3914 		if (vaddr) {
3915 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
3916 						soc->wbm_idle_scatter_buf_size,
3917 						vaddr,
3918 						paddr, 0);
3919 			vaddr = NULL;
3920 		}
3921 	}
3922 	return QDF_STATUS_E_NOMEM;
3923 }
3924 
3925 qdf_export_symbol(dp_hw_link_desc_pool_banks_alloc);
3926 
3927 /**
3928  * dp_hw_link_desc_ring_init() - Initialize hw link desc rings
3929  * @soc: DP SOC handle
3930  *
3931  * Return: QDF_STATUS_SUCCESS: success
3932  *         QDF_STATUS_E_FAILURE: failure
3933  */
3934 static QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc)
3935 {
3936 	struct dp_srng *dp_srng = &soc->wbm_idle_link_ring;
3937 
3938 	if (dp_srng->base_vaddr_unaligned) {
3939 		if (dp_srng_init(soc, dp_srng, WBM_IDLE_LINK, 0, 0))
3940 			return QDF_STATUS_E_FAILURE;
3941 	}
3942 	return QDF_STATUS_SUCCESS;
3943 }
3944 
3945 /**
3946  * dp_hw_link_desc_ring_deinit() - Reset hw link desc rings
3947  * @soc: DP SOC handle
3948  *
3949  * Return: None
3950  */
3951 static void dp_hw_link_desc_ring_deinit(struct dp_soc *soc)
3952 {
3953 	dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0);
3954 }
3955 
3956 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id)
3957 {
3958 	uint32_t cookie = 0;
3959 	uint32_t page_idx = 0;
3960 	struct qdf_mem_multi_page_t *pages;
3961 	struct qdf_mem_dma_page_t *dma_pages;
3962 	uint32_t offset = 0;
3963 	uint32_t count = 0;
3964 	uint32_t desc_id = 0;
3965 	void *desc_srng;
3966 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
3967 	uint32_t *total_link_descs_addr;
3968 	uint32_t total_link_descs;
3969 	uint32_t scatter_buf_num;
3970 	uint32_t num_entries_per_buf = 0;
3971 	uint32_t rem_entries;
3972 	uint32_t num_descs_per_page;
3973 	uint32_t num_scatter_bufs = 0;
3974 	uint8_t *scatter_buf_ptr;
3975 	void *desc;
3976 
3977 	num_scatter_bufs = soc->num_scatter_bufs;
3978 
3979 	if (mac_id == WLAN_INVALID_PDEV_ID) {
3980 		pages = &soc->link_desc_pages;
3981 		total_link_descs = soc->total_link_descs;
3982 		desc_srng = soc->wbm_idle_link_ring.hal_srng;
3983 	} else {
3984 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3985 		/* dp_monitor_get_link_desc_pages returns NULL only
3986 		 * if monitor SOC is  NULL
3987 		 */
3988 		if (!pages) {
3989 			dp_err("can not get link desc pages");
3990 			QDF_ASSERT(0);
3991 			return;
3992 		}
3993 		total_link_descs_addr =
3994 				dp_monitor_get_total_link_descs(soc, mac_id);
3995 		total_link_descs = *total_link_descs_addr;
3996 		desc_srng = soc->rxdma_mon_desc_ring[mac_id].hal_srng;
3997 	}
3998 
3999 	dma_pages = pages->dma_pages;
4000 	do {
4001 		qdf_mem_zero(dma_pages[page_idx].page_v_addr_start,
4002 			     pages->page_size);
4003 		page_idx++;
4004 	} while (page_idx < pages->num_pages);
4005 
4006 	if (desc_srng) {
4007 		hal_srng_access_start_unlocked(soc->hal_soc, desc_srng);
4008 		page_idx = 0;
4009 		count = 0;
4010 		offset = 0;
4011 		pages = &soc->link_desc_pages;
4012 		while ((desc = hal_srng_src_get_next(soc->hal_soc,
4013 						     desc_srng)) &&
4014 			(count < total_link_descs)) {
4015 			page_idx = count / pages->num_element_per_page;
4016 			if (desc_id == pages->num_element_per_page)
4017 				desc_id = 0;
4018 
4019 			offset = count % pages->num_element_per_page;
4020 			cookie = LINK_DESC_COOKIE(desc_id, page_idx,
4021 						  soc->link_desc_id_start);
4022 
4023 			hal_set_link_desc_addr(soc->hal_soc, desc, cookie,
4024 					       dma_pages[page_idx].page_p_addr
4025 					       + (offset * link_desc_size),
4026 					       soc->idle_link_bm_id);
4027 			count++;
4028 			desc_id++;
4029 		}
4030 		hal_srng_access_end_unlocked(soc->hal_soc, desc_srng);
4031 	} else {
4032 		/* Populate idle list scatter buffers with link descriptor
4033 		 * pointers
4034 		 */
4035 		scatter_buf_num = 0;
4036 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
4037 					soc->hal_soc,
4038 					soc->wbm_idle_scatter_buf_size);
4039 
4040 		scatter_buf_ptr = (uint8_t *)(
4041 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
4042 		rem_entries = num_entries_per_buf;
4043 		pages = &soc->link_desc_pages;
4044 		page_idx = 0; count = 0;
4045 		offset = 0;
4046 		num_descs_per_page = pages->num_element_per_page;
4047 
4048 		while (count < total_link_descs) {
4049 			page_idx = count / num_descs_per_page;
4050 			offset = count % num_descs_per_page;
4051 			if (desc_id == pages->num_element_per_page)
4052 				desc_id = 0;
4053 
4054 			cookie = LINK_DESC_COOKIE(desc_id, page_idx,
4055 						  soc->link_desc_id_start);
4056 			hal_set_link_desc_addr(soc->hal_soc,
4057 					       (void *)scatter_buf_ptr,
4058 					       cookie,
4059 					       dma_pages[page_idx].page_p_addr +
4060 					       (offset * link_desc_size),
4061 					       soc->idle_link_bm_id);
4062 			rem_entries--;
4063 			if (rem_entries) {
4064 				scatter_buf_ptr += link_desc_size;
4065 			} else {
4066 				rem_entries = num_entries_per_buf;
4067 				scatter_buf_num++;
4068 				if (scatter_buf_num >= num_scatter_bufs)
4069 					break;
4070 				scatter_buf_ptr = (uint8_t *)
4071 					(soc->wbm_idle_scatter_buf_base_vaddr[
4072 					 scatter_buf_num]);
4073 			}
4074 			count++;
4075 			desc_id++;
4076 		}
4077 		/* Setup link descriptor idle list in HW */
4078 		hal_setup_link_idle_list(soc->hal_soc,
4079 			soc->wbm_idle_scatter_buf_base_paddr,
4080 			soc->wbm_idle_scatter_buf_base_vaddr,
4081 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
4082 			(uint32_t)(scatter_buf_ptr -
4083 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
4084 			scatter_buf_num-1])), total_link_descs);
4085 	}
4086 }
4087 
4088 qdf_export_symbol(dp_link_desc_ring_replenish);
4089 
4090 #ifdef IPA_OFFLOAD
4091 #define USE_1_IPA_RX_REO_RING 1
4092 #define USE_2_IPA_RX_REO_RINGS 2
4093 #define REO_DST_RING_SIZE_QCA6290 1023
4094 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
4095 #define REO_DST_RING_SIZE_QCA8074 1023
4096 #define REO_DST_RING_SIZE_QCN9000 2048
4097 #else
4098 #define REO_DST_RING_SIZE_QCA8074 8
4099 #define REO_DST_RING_SIZE_QCN9000 8
4100 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
4101 
4102 #ifdef IPA_WDI3_TX_TWO_PIPES
4103 #ifdef DP_MEMORY_OPT
4104 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4105 {
4106 	return dp_init_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4107 }
4108 
4109 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4110 {
4111 	dp_deinit_tx_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4112 }
4113 
4114 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4115 {
4116 	return dp_alloc_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4117 }
4118 
4119 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4120 {
4121 	dp_free_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4122 }
4123 
4124 #else /* !DP_MEMORY_OPT */
4125 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4126 {
4127 	return 0;
4128 }
4129 
4130 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4131 {
4132 }
4133 
4134 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4135 {
4136 	return 0
4137 }
4138 
4139 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4140 {
4141 }
4142 #endif /* DP_MEMORY_OPT */
4143 
4144 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
4145 {
4146 	hal_tx_init_data_ring(soc->hal_soc,
4147 			      soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng);
4148 }
4149 
4150 #else /* !IPA_WDI3_TX_TWO_PIPES */
4151 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4152 {
4153 	return 0;
4154 }
4155 
4156 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4157 {
4158 }
4159 
4160 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4161 {
4162 	return 0;
4163 }
4164 
4165 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4166 {
4167 }
4168 
4169 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
4170 {
4171 }
4172 
4173 #endif /* IPA_WDI3_TX_TWO_PIPES */
4174 
4175 #else
4176 
4177 #define REO_DST_RING_SIZE_QCA6290 1024
4178 
4179 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4180 {
4181 	return 0;
4182 }
4183 
4184 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4185 {
4186 }
4187 
4188 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4189 {
4190 	return 0;
4191 }
4192 
4193 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4194 {
4195 }
4196 
4197 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
4198 {
4199 }
4200 
4201 #endif /* IPA_OFFLOAD */
4202 
4203 /**
4204  * dp_soc_reset_cpu_ring_map() - Reset cpu ring map
4205  * @soc: Datapath soc handler
4206  *
4207  * This api resets the default cpu ring map
4208  */
4209 
4210 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
4211 {
4212 	uint8_t i;
4213 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4214 
4215 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
4216 		switch (nss_config) {
4217 		case dp_nss_cfg_first_radio:
4218 			/*
4219 			 * Setting Tx ring map for one nss offloaded radio
4220 			 */
4221 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
4222 			break;
4223 
4224 		case dp_nss_cfg_second_radio:
4225 			/*
4226 			 * Setting Tx ring for two nss offloaded radios
4227 			 */
4228 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
4229 			break;
4230 
4231 		case dp_nss_cfg_dbdc:
4232 			/*
4233 			 * Setting Tx ring map for 2 nss offloaded radios
4234 			 */
4235 			soc->tx_ring_map[i] =
4236 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
4237 			break;
4238 
4239 		case dp_nss_cfg_dbtc:
4240 			/*
4241 			 * Setting Tx ring map for 3 nss offloaded radios
4242 			 */
4243 			soc->tx_ring_map[i] =
4244 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
4245 			break;
4246 
4247 		default:
4248 			dp_err("tx_ring_map failed due to invalid nss cfg");
4249 			break;
4250 		}
4251 	}
4252 }
4253 
4254 /**
4255  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
4256  * @soc: DP soc handle
4257  * @ring_type: ring type
4258  * @ring_num: ring_num
4259  *
4260  * Return: 0 if the ring is not offloaded, non-0 if it is offloaded
4261  */
4262 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
4263 					    enum hal_ring_type ring_type, int ring_num)
4264 {
4265 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4266 	uint8_t status = 0;
4267 
4268 	switch (ring_type) {
4269 	case WBM2SW_RELEASE:
4270 	case REO_DST:
4271 	case RXDMA_BUF:
4272 	case REO_EXCEPTION:
4273 		status = ((nss_config) & (1 << ring_num));
4274 		break;
4275 	default:
4276 		break;
4277 	}
4278 
4279 	return status;
4280 }
4281 
4282 /**
4283  * dp_soc_disable_unused_mac_intr_mask() - reset interrupt mask for
4284  *					  unused WMAC hw rings
4285  * @soc: DP Soc handle
4286  * @mac_num: wmac num
4287  *
4288  * Return: Return void
4289  */
4290 static void dp_soc_disable_unused_mac_intr_mask(struct dp_soc *soc,
4291 						int mac_num)
4292 {
4293 	uint8_t *grp_mask = NULL;
4294 	int group_number;
4295 
4296 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
4297 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4298 	wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4299 					  group_number, 0x0);
4300 
4301 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
4302 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4303 	wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
4304 				      group_number, 0x0);
4305 
4306 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
4307 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4308 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
4309 					  group_number, 0x0);
4310 
4311 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
4312 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4313 	wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
4314 					      group_number, 0x0);
4315 }
4316 
4317 #ifdef IPA_OFFLOAD
4318 #ifdef IPA_WDI3_VLAN_SUPPORT
4319 /**
4320  * dp_soc_reset_ipa_vlan_intr_mask() - reset interrupt mask for IPA offloaded
4321  *                                     ring for vlan tagged traffic
4322  * @soc: DP Soc handle
4323  *
4324  * Return: Return void
4325  */
4326 static void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
4327 {
4328 	uint8_t *grp_mask = NULL;
4329 	int group_number, mask;
4330 
4331 	if (!wlan_ipa_is_vlan_enabled())
4332 		return;
4333 
4334 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
4335 
4336 	group_number = dp_srng_find_ring_in_mask(IPA_ALT_REO_DEST_RING_IDX, grp_mask);
4337 	if (group_number < 0) {
4338 		dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4339 			      soc, REO_DST, IPA_ALT_REO_DEST_RING_IDX);
4340 		return;
4341 	}
4342 
4343 	mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
4344 
4345 	/* reset the interrupt mask for offloaded ring */
4346 	mask &= (~(1 << IPA_ALT_REO_DEST_RING_IDX));
4347 
4348 	/*
4349 	 * set the interrupt mask to zero for rx offloaded radio.
4350 	 */
4351 	wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4352 }
4353 #else
4354 static inline
4355 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
4356 { }
4357 #endif /* IPA_WDI3_VLAN_SUPPORT */
4358 #else
4359 static inline
4360 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
4361 { }
4362 #endif /* IPA_OFFLOAD */
4363 
4364 /**
4365  * dp_soc_reset_intr_mask() - reset interrupt mask
4366  * @soc: DP Soc handle
4367  *
4368  * Return: Return void
4369  */
4370 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
4371 {
4372 	uint8_t j;
4373 	uint8_t *grp_mask = NULL;
4374 	int group_number, mask, num_ring;
4375 
4376 	/* number of tx ring */
4377 	num_ring = soc->num_tcl_data_rings;
4378 
4379 	/*
4380 	 * group mask for tx completion  ring.
4381 	 */
4382 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
4383 
4384 	/* loop and reset the mask for only offloaded ring */
4385 	for (j = 0; j < WLAN_CFG_NUM_TCL_DATA_RINGS; j++) {
4386 		/*
4387 		 * Group number corresponding to tx offloaded ring.
4388 		 */
4389 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4390 		if (group_number < 0) {
4391 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4392 				      soc, WBM2SW_RELEASE, j);
4393 			continue;
4394 		}
4395 
4396 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
4397 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j) &&
4398 		    (!mask)) {
4399 			continue;
4400 		}
4401 
4402 		/* reset the tx mask for offloaded ring */
4403 		mask &= (~(1 << j));
4404 
4405 		/*
4406 		 * reset the interrupt mask for offloaded ring.
4407 		 */
4408 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4409 	}
4410 
4411 	/* number of rx rings */
4412 	num_ring = soc->num_reo_dest_rings;
4413 
4414 	/*
4415 	 * group mask for reo destination ring.
4416 	 */
4417 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
4418 
4419 	/* loop and reset the mask for only offloaded ring */
4420 	for (j = 0; j < WLAN_CFG_NUM_REO_DEST_RING; j++) {
4421 		/*
4422 		 * Group number corresponding to rx offloaded ring.
4423 		 */
4424 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4425 		if (group_number < 0) {
4426 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4427 				      soc, REO_DST, j);
4428 			continue;
4429 		}
4430 
4431 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
4432 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j) &&
4433 		    (!mask)) {
4434 			continue;
4435 		}
4436 
4437 		/* reset the interrupt mask for offloaded ring */
4438 		mask &= (~(1 << j));
4439 
4440 		/*
4441 		 * set the interrupt mask to zero for rx offloaded radio.
4442 		 */
4443 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4444 	}
4445 
4446 	/*
4447 	 * group mask for Rx buffer refill ring
4448 	 */
4449 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
4450 
4451 	/* loop and reset the mask for only offloaded ring */
4452 	for (j = 0; j < MAX_PDEV_CNT; j++) {
4453 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
4454 
4455 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
4456 			continue;
4457 		}
4458 
4459 		/*
4460 		 * Group number corresponding to rx offloaded ring.
4461 		 */
4462 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
4463 		if (group_number < 0) {
4464 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4465 				      soc, REO_DST, lmac_id);
4466 			continue;
4467 		}
4468 
4469 		/* set the interrupt mask for offloaded ring */
4470 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4471 				group_number);
4472 		mask &= (~(1 << lmac_id));
4473 
4474 		/*
4475 		 * set the interrupt mask to zero for rx offloaded radio.
4476 		 */
4477 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4478 			group_number, mask);
4479 	}
4480 
4481 	grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
4482 
4483 	for (j = 0; j < num_ring; j++) {
4484 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_EXCEPTION, j)) {
4485 			continue;
4486 		}
4487 
4488 		/*
4489 		 * Group number corresponding to rx err ring.
4490 		 */
4491 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4492 		if (group_number < 0) {
4493 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4494 				      soc, REO_EXCEPTION, j);
4495 			continue;
4496 		}
4497 
4498 		wlan_cfg_set_rx_err_ring_mask(soc->wlan_cfg_ctx,
4499 					      group_number, 0);
4500 	}
4501 }
4502 
4503 #ifdef IPA_OFFLOAD
4504 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
4505 			 uint32_t *remap1, uint32_t *remap2)
4506 {
4507 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX] = {
4508 				REO_REMAP_SW1, REO_REMAP_SW2, REO_REMAP_SW3,
4509 				REO_REMAP_SW5, REO_REMAP_SW6, REO_REMAP_SW7};
4510 
4511 	switch (soc->arch_id) {
4512 	case CDP_ARCH_TYPE_BE:
4513 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4514 					      soc->num_reo_dest_rings -
4515 					      USE_2_IPA_RX_REO_RINGS, remap1,
4516 					      remap2);
4517 		break;
4518 
4519 	case CDP_ARCH_TYPE_LI:
4520 		if (wlan_ipa_is_vlan_enabled()) {
4521 			hal_compute_reo_remap_ix2_ix3(
4522 					soc->hal_soc, ring,
4523 					soc->num_reo_dest_rings -
4524 					USE_2_IPA_RX_REO_RINGS, remap1,
4525 					remap2);
4526 
4527 		} else {
4528 			hal_compute_reo_remap_ix2_ix3(
4529 					soc->hal_soc, ring,
4530 					soc->num_reo_dest_rings -
4531 					USE_1_IPA_RX_REO_RING, remap1,
4532 					remap2);
4533 		}
4534 
4535 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
4536 		break;
4537 	default:
4538 		dp_err("unknown arch_id 0x%x", soc->arch_id);
4539 		QDF_BUG(0);
4540 
4541 	}
4542 
4543 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
4544 
4545 	return true;
4546 }
4547 
4548 #ifdef IPA_WDI3_TX_TWO_PIPES
4549 static bool dp_ipa_is_alt_tx_ring(int index)
4550 {
4551 	return index == IPA_TX_ALT_RING_IDX;
4552 }
4553 
4554 static bool dp_ipa_is_alt_tx_comp_ring(int index)
4555 {
4556 	return index == IPA_TX_ALT_COMP_RING_IDX;
4557 }
4558 #else /* !IPA_WDI3_TX_TWO_PIPES */
4559 static bool dp_ipa_is_alt_tx_ring(int index)
4560 {
4561 	return false;
4562 }
4563 
4564 static bool dp_ipa_is_alt_tx_comp_ring(int index)
4565 {
4566 	return false;
4567 }
4568 #endif /* IPA_WDI3_TX_TWO_PIPES */
4569 
4570 /**
4571  * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA
4572  *
4573  * @tx_ring_num: Tx ring number
4574  * @tx_ipa_ring_sz: Return param only updated for IPA.
4575  * @soc_cfg_ctx: dp soc cfg context
4576  *
4577  * Return: None
4578  */
4579 static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz,
4580 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4581 {
4582 	if (!soc_cfg_ctx->ipa_enabled)
4583 		return;
4584 
4585 	if (tx_ring_num == IPA_TCL_DATA_RING_IDX)
4586 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_ring_size(soc_cfg_ctx);
4587 	else if (dp_ipa_is_alt_tx_ring(tx_ring_num))
4588 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_alt_ring_size(soc_cfg_ctx);
4589 }
4590 
4591 /**
4592  * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA
4593  *
4594  * @tx_comp_ring_num: Tx comp ring number
4595  * @tx_comp_ipa_ring_sz: Return param only updated for IPA.
4596  * @soc_cfg_ctx: dp soc cfg context
4597  *
4598  * Return: None
4599  */
4600 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
4601 					 int *tx_comp_ipa_ring_sz,
4602 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4603 {
4604 	if (!soc_cfg_ctx->ipa_enabled)
4605 		return;
4606 
4607 	if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX)
4608 		*tx_comp_ipa_ring_sz =
4609 				wlan_cfg_ipa_tx_comp_ring_size(soc_cfg_ctx);
4610 	else if (dp_ipa_is_alt_tx_comp_ring(tx_comp_ring_num))
4611 		*tx_comp_ipa_ring_sz =
4612 				wlan_cfg_ipa_tx_alt_comp_ring_size(soc_cfg_ctx);
4613 }
4614 #else
4615 static uint8_t dp_reo_ring_selection(uint32_t value, uint32_t *ring)
4616 {
4617 	uint8_t num = 0;
4618 
4619 	switch (value) {
4620 	/* should we have all the different possible ring configs */
4621 	case 0xFF:
4622 		num = 8;
4623 		ring[0] = REO_REMAP_SW1;
4624 		ring[1] = REO_REMAP_SW2;
4625 		ring[2] = REO_REMAP_SW3;
4626 		ring[3] = REO_REMAP_SW4;
4627 		ring[4] = REO_REMAP_SW5;
4628 		ring[5] = REO_REMAP_SW6;
4629 		ring[6] = REO_REMAP_SW7;
4630 		ring[7] = REO_REMAP_SW8;
4631 		break;
4632 
4633 	case 0x3F:
4634 		num = 6;
4635 		ring[0] = REO_REMAP_SW1;
4636 		ring[1] = REO_REMAP_SW2;
4637 		ring[2] = REO_REMAP_SW3;
4638 		ring[3] = REO_REMAP_SW4;
4639 		ring[4] = REO_REMAP_SW5;
4640 		ring[5] = REO_REMAP_SW6;
4641 		break;
4642 
4643 	case 0xF:
4644 		num = 4;
4645 		ring[0] = REO_REMAP_SW1;
4646 		ring[1] = REO_REMAP_SW2;
4647 		ring[2] = REO_REMAP_SW3;
4648 		ring[3] = REO_REMAP_SW4;
4649 		break;
4650 	case 0xE:
4651 		num = 3;
4652 		ring[0] = REO_REMAP_SW2;
4653 		ring[1] = REO_REMAP_SW3;
4654 		ring[2] = REO_REMAP_SW4;
4655 		break;
4656 	case 0xD:
4657 		num = 3;
4658 		ring[0] = REO_REMAP_SW1;
4659 		ring[1] = REO_REMAP_SW3;
4660 		ring[2] = REO_REMAP_SW4;
4661 		break;
4662 	case 0xC:
4663 		num = 2;
4664 		ring[0] = REO_REMAP_SW3;
4665 		ring[1] = REO_REMAP_SW4;
4666 		break;
4667 	case 0xB:
4668 		num = 3;
4669 		ring[0] = REO_REMAP_SW1;
4670 		ring[1] = REO_REMAP_SW2;
4671 		ring[2] = REO_REMAP_SW4;
4672 		break;
4673 	case 0xA:
4674 		num = 2;
4675 		ring[0] = REO_REMAP_SW2;
4676 		ring[1] = REO_REMAP_SW4;
4677 		break;
4678 	case 0x9:
4679 		num = 2;
4680 		ring[0] = REO_REMAP_SW1;
4681 		ring[1] = REO_REMAP_SW4;
4682 		break;
4683 	case 0x8:
4684 		num = 1;
4685 		ring[0] = REO_REMAP_SW4;
4686 		break;
4687 	case 0x7:
4688 		num = 3;
4689 		ring[0] = REO_REMAP_SW1;
4690 		ring[1] = REO_REMAP_SW2;
4691 		ring[2] = REO_REMAP_SW3;
4692 		break;
4693 	case 0x6:
4694 		num = 2;
4695 		ring[0] = REO_REMAP_SW2;
4696 		ring[1] = REO_REMAP_SW3;
4697 		break;
4698 	case 0x5:
4699 		num = 2;
4700 		ring[0] = REO_REMAP_SW1;
4701 		ring[1] = REO_REMAP_SW3;
4702 		break;
4703 	case 0x4:
4704 		num = 1;
4705 		ring[0] = REO_REMAP_SW3;
4706 		break;
4707 	case 0x3:
4708 		num = 2;
4709 		ring[0] = REO_REMAP_SW1;
4710 		ring[1] = REO_REMAP_SW2;
4711 		break;
4712 	case 0x2:
4713 		num = 1;
4714 		ring[0] = REO_REMAP_SW2;
4715 		break;
4716 	case 0x1:
4717 		num = 1;
4718 		ring[0] = REO_REMAP_SW1;
4719 		break;
4720 	default:
4721 		dp_err("unknown reo ring map 0x%x", value);
4722 		QDF_BUG(0);
4723 	}
4724 	return num;
4725 }
4726 
4727 bool dp_reo_remap_config(struct dp_soc *soc,
4728 			 uint32_t *remap0,
4729 			 uint32_t *remap1,
4730 			 uint32_t *remap2)
4731 {
4732 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4733 	uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
4734 	uint8_t num;
4735 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX];
4736 	uint32_t value;
4737 
4738 	switch (offload_radio) {
4739 	case dp_nss_cfg_default:
4740 		value = reo_config & WLAN_CFG_NUM_REO_RINGS_MAP_MAX;
4741 		num = dp_reo_ring_selection(value, ring);
4742 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4743 					      num, remap1, remap2);
4744 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
4745 
4746 		break;
4747 	case dp_nss_cfg_first_radio:
4748 		value = reo_config & 0xE;
4749 		num = dp_reo_ring_selection(value, ring);
4750 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4751 					      num, remap1, remap2);
4752 
4753 		break;
4754 	case dp_nss_cfg_second_radio:
4755 		value = reo_config & 0xD;
4756 		num = dp_reo_ring_selection(value, ring);
4757 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4758 					      num, remap1, remap2);
4759 
4760 		break;
4761 	case dp_nss_cfg_dbdc:
4762 	case dp_nss_cfg_dbtc:
4763 		/* return false if both or all are offloaded to NSS */
4764 		return false;
4765 
4766 	}
4767 
4768 	dp_debug("remap1 %x remap2 %x offload_radio %u",
4769 		 *remap1, *remap2, offload_radio);
4770 	return true;
4771 }
4772 
4773 static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz,
4774 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4775 {
4776 }
4777 
4778 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
4779 					 int *tx_comp_ipa_ring_sz,
4780 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4781 {
4782 }
4783 #endif /* IPA_OFFLOAD */
4784 
4785 /**
4786  * dp_reo_frag_dst_set() - configure reo register to set the
4787  *                        fragment destination ring
4788  * @soc: Datapath soc
4789  * @frag_dst_ring: output parameter to set fragment destination ring
4790  *
4791  * Based on offload_radio below fragment destination rings is selected
4792  * 0 - TCL
4793  * 1 - SW1
4794  * 2 - SW2
4795  * 3 - SW3
4796  * 4 - SW4
4797  * 5 - Release
4798  * 6 - FW
4799  * 7 - alternate select
4800  *
4801  * Return: void
4802  */
4803 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
4804 {
4805 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4806 
4807 	switch (offload_radio) {
4808 	case dp_nss_cfg_default:
4809 		*frag_dst_ring = REO_REMAP_TCL;
4810 		break;
4811 	case dp_nss_cfg_first_radio:
4812 		/*
4813 		 * This configuration is valid for single band radio which
4814 		 * is also NSS offload.
4815 		 */
4816 	case dp_nss_cfg_dbdc:
4817 	case dp_nss_cfg_dbtc:
4818 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
4819 		break;
4820 	default:
4821 		dp_init_err("%pK: dp_reo_frag_dst_set invalid offload radio config", soc);
4822 		break;
4823 	}
4824 }
4825 
4826 #ifdef ENABLE_VERBOSE_DEBUG
4827 static void dp_enable_verbose_debug(struct dp_soc *soc)
4828 {
4829 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4830 
4831 	soc_cfg_ctx = soc->wlan_cfg_ctx;
4832 
4833 	if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask)
4834 		is_dp_verbose_debug_enabled = true;
4835 
4836 	if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask)
4837 		hal_set_verbose_debug(true);
4838 	else
4839 		hal_set_verbose_debug(false);
4840 }
4841 #else
4842 static void dp_enable_verbose_debug(struct dp_soc *soc)
4843 {
4844 }
4845 #endif
4846 
4847 #ifdef WLAN_FEATURE_STATS_EXT
4848 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
4849 {
4850 	qdf_event_create(&soc->rx_hw_stats_event);
4851 }
4852 #else
4853 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
4854 {
4855 }
4856 #endif
4857 
4858 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index)
4859 {
4860 	int tcl_ring_num, wbm_ring_num;
4861 
4862 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
4863 						index,
4864 						&tcl_ring_num,
4865 						&wbm_ring_num);
4866 
4867 	if (tcl_ring_num == -1) {
4868 		dp_err("incorrect tcl ring num for index %u", index);
4869 		return;
4870 	}
4871 
4872 	wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned,
4873 			     soc->tcl_data_ring[index].alloc_size,
4874 			     soc->ctrl_psoc,
4875 			     WLAN_MD_DP_SRNG_TCL_DATA,
4876 			     "tcl_data_ring");
4877 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
4878 	dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA,
4879 		       tcl_ring_num);
4880 
4881 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
4882 		return;
4883 
4884 	wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned,
4885 			     soc->tx_comp_ring[index].alloc_size,
4886 			     soc->ctrl_psoc,
4887 			     WLAN_MD_DP_SRNG_TX_COMP,
4888 			     "tcl_comp_ring");
4889 	dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4890 		       wbm_ring_num);
4891 }
4892 
4893 /**
4894  * dp_init_tx_ring_pair_by_index() - The function inits tcl data/wbm completion
4895  * ring pair
4896  * @soc: DP soc pointer
4897  * @index: index of soc->tcl_data or soc->tx_comp to initialize
4898  *
4899  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
4900  */
4901 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
4902 						uint8_t index)
4903 {
4904 	int tcl_ring_num, wbm_ring_num;
4905 	uint8_t bm_id;
4906 
4907 	if (index >= MAX_TCL_DATA_RINGS) {
4908 		dp_err("unexpected index!");
4909 		QDF_BUG(0);
4910 		goto fail1;
4911 	}
4912 
4913 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
4914 						index,
4915 						&tcl_ring_num,
4916 						&wbm_ring_num);
4917 
4918 	if (tcl_ring_num == -1) {
4919 		dp_err("incorrect tcl ring num for index %u", index);
4920 		goto fail1;
4921 	}
4922 
4923 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
4924 	if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA,
4925 			 tcl_ring_num, 0)) {
4926 		dp_err("dp_srng_init failed for tcl_data_ring");
4927 		goto fail1;
4928 	}
4929 	wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned,
4930 			  soc->tcl_data_ring[index].alloc_size,
4931 			  soc->ctrl_psoc,
4932 			  WLAN_MD_DP_SRNG_TCL_DATA,
4933 			  "tcl_data_ring");
4934 
4935 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
4936 		goto set_rbm;
4937 
4938 	if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4939 			 wbm_ring_num, 0)) {
4940 		dp_err("dp_srng_init failed for tx_comp_ring");
4941 		goto fail1;
4942 	}
4943 
4944 	wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned,
4945 			  soc->tx_comp_ring[index].alloc_size,
4946 			  soc->ctrl_psoc,
4947 			  WLAN_MD_DP_SRNG_TX_COMP,
4948 			  "tcl_comp_ring");
4949 set_rbm:
4950 	bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_ring_num);
4951 
4952 	soc->arch_ops.tx_implicit_rbm_set(soc, tcl_ring_num, bm_id);
4953 
4954 	return QDF_STATUS_SUCCESS;
4955 
4956 fail1:
4957 	return QDF_STATUS_E_FAILURE;
4958 }
4959 
4960 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index)
4961 {
4962 	dp_debug("index %u", index);
4963 	dp_srng_free(soc, &soc->tcl_data_ring[index]);
4964 	dp_srng_free(soc, &soc->tx_comp_ring[index]);
4965 }
4966 
4967 /**
4968  * dp_alloc_tx_ring_pair_by_index() - The function allocs tcl data/wbm2sw
4969  * ring pair for the given "index"
4970  * @soc: DP soc pointer
4971  * @index: index of soc->tcl_data or soc->tx_comp to initialize
4972  *
4973  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
4974  */
4975 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
4976 						 uint8_t index)
4977 {
4978 	int tx_ring_size;
4979 	int tx_comp_ring_size;
4980 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
4981 	int cached = 0;
4982 
4983 	if (index >= MAX_TCL_DATA_RINGS) {
4984 		dp_err("unexpected index!");
4985 		QDF_BUG(0);
4986 		goto fail1;
4987 	}
4988 
4989 	dp_debug("index %u", index);
4990 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
4991 	dp_ipa_get_tx_ring_size(index, &tx_ring_size, soc_cfg_ctx);
4992 
4993 	if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA,
4994 			  tx_ring_size, cached)) {
4995 		dp_err("dp_srng_alloc failed for tcl_data_ring");
4996 		goto fail1;
4997 	}
4998 
4999 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
5000 	dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size, soc_cfg_ctx);
5001 	/* Enable cached TCL desc if NSS offload is disabled */
5002 	if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
5003 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
5004 
5005 	if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) ==
5006 	    INVALID_WBM_RING_NUM)
5007 		return QDF_STATUS_SUCCESS;
5008 
5009 	if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
5010 			  tx_comp_ring_size, cached)) {
5011 		dp_err("dp_srng_alloc failed for tx_comp_ring");
5012 		goto fail1;
5013 	}
5014 
5015 	return QDF_STATUS_SUCCESS;
5016 
5017 fail1:
5018 	return QDF_STATUS_E_FAILURE;
5019 }
5020 
5021 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
5022 {
5023 	struct cdp_lro_hash_config lro_hash;
5024 	QDF_STATUS status;
5025 
5026 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
5027 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
5028 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
5029 		dp_err("LRO, GRO and RX hash disabled");
5030 		return QDF_STATUS_E_FAILURE;
5031 	}
5032 
5033 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
5034 
5035 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
5036 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
5037 		lro_hash.lro_enable = 1;
5038 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
5039 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
5040 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
5041 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
5042 	}
5043 
5044 	soc->arch_ops.get_rx_hash_key(soc, &lro_hash);
5045 
5046 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
5047 
5048 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
5049 		QDF_BUG(0);
5050 		dp_err("lro_hash_config not configured");
5051 		return QDF_STATUS_E_FAILURE;
5052 	}
5053 
5054 	status = soc->cdp_soc.ol_ops->lro_hash_config(soc->ctrl_psoc,
5055 						      pdev->pdev_id,
5056 						      &lro_hash);
5057 	if (!QDF_IS_STATUS_SUCCESS(status)) {
5058 		dp_err("failed to send lro_hash_config to FW %u", status);
5059 		return status;
5060 	}
5061 
5062 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
5063 		lro_hash.lro_enable, lro_hash.tcp_flag,
5064 		lro_hash.tcp_flag_mask);
5065 
5066 	dp_info("toeplitz_hash_ipv4:");
5067 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5068 			   lro_hash.toeplitz_hash_ipv4,
5069 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
5070 			   LRO_IPV4_SEED_ARR_SZ));
5071 
5072 	dp_info("toeplitz_hash_ipv6:");
5073 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5074 			   lro_hash.toeplitz_hash_ipv6,
5075 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
5076 			   LRO_IPV6_SEED_ARR_SZ));
5077 
5078 	return status;
5079 }
5080 
5081 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
5082 /**
5083  * dp_reap_timer_init() - initialize the reap timer
5084  * @soc: data path SoC handle
5085  *
5086  * Return: void
5087  */
5088 static void dp_reap_timer_init(struct dp_soc *soc)
5089 {
5090 	/*
5091 	 * Timer to reap rxdma status rings.
5092 	 * Needed until we enable ppdu end interrupts
5093 	 */
5094 	dp_monitor_reap_timer_init(soc);
5095 	dp_monitor_vdev_timer_init(soc);
5096 }
5097 
5098 /**
5099  * dp_reap_timer_deinit() - de-initialize the reap timer
5100  * @soc: data path SoC handle
5101  *
5102  * Return: void
5103  */
5104 static void dp_reap_timer_deinit(struct dp_soc *soc)
5105 {
5106 	dp_monitor_reap_timer_deinit(soc);
5107 }
5108 #else
5109 /* WIN use case */
5110 static void dp_reap_timer_init(struct dp_soc *soc)
5111 {
5112 	/* Configure LMAC rings in Polled mode */
5113 	if (soc->lmac_polled_mode) {
5114 		/*
5115 		 * Timer to reap lmac rings.
5116 		 */
5117 		qdf_timer_init(soc->osdev, &soc->lmac_reap_timer,
5118 			       dp_service_lmac_rings, (void *)soc,
5119 			       QDF_TIMER_TYPE_WAKE_APPS);
5120 		soc->lmac_timer_init = 1;
5121 		qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
5122 	}
5123 }
5124 
5125 static void dp_reap_timer_deinit(struct dp_soc *soc)
5126 {
5127 	if (soc->lmac_timer_init) {
5128 		qdf_timer_stop(&soc->lmac_reap_timer);
5129 		qdf_timer_free(&soc->lmac_reap_timer);
5130 		soc->lmac_timer_init = 0;
5131 	}
5132 }
5133 #endif
5134 
5135 #ifdef QCA_HOST2FW_RXBUF_RING
5136 /**
5137  * dp_rxdma_ring_alloc() - allocate the RXDMA rings
5138  * @soc: data path SoC handle
5139  * @pdev: Physical device handle
5140  *
5141  * Return: 0 - success, > 0 - failure
5142  */
5143 static int dp_rxdma_ring_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
5144 {
5145 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
5146 	int max_mac_rings;
5147 	int i;
5148 	int ring_size;
5149 
5150 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
5151 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
5152 	ring_size =  wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx);
5153 
5154 	for (i = 0; i < max_mac_rings; i++) {
5155 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
5156 		if (dp_srng_alloc(soc, &pdev->rx_mac_buf_ring[i],
5157 				  RXDMA_BUF, ring_size, 0)) {
5158 			dp_init_err("%pK: failed rx mac ring setup", soc);
5159 			return QDF_STATUS_E_FAILURE;
5160 		}
5161 	}
5162 	return QDF_STATUS_SUCCESS;
5163 }
5164 
5165 /**
5166  * dp_rxdma_ring_setup() - configure the RXDMA rings
5167  * @soc: data path SoC handle
5168  * @pdev: Physical device handle
5169  *
5170  * Return: 0 - success, > 0 - failure
5171  */
5172 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
5173 {
5174 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
5175 	int max_mac_rings;
5176 	int i;
5177 
5178 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
5179 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
5180 
5181 	for (i = 0; i < max_mac_rings; i++) {
5182 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
5183 		if (dp_srng_init(soc, &pdev->rx_mac_buf_ring[i],
5184 				 RXDMA_BUF, 1, i)) {
5185 			dp_init_err("%pK: failed rx mac ring setup", soc);
5186 			return QDF_STATUS_E_FAILURE;
5187 		}
5188 	}
5189 	return QDF_STATUS_SUCCESS;
5190 }
5191 
5192 /**
5193  * dp_rxdma_ring_cleanup() - Deinit the RXDMA rings and reap timer
5194  * @soc: data path SoC handle
5195  * @pdev: Physical device handle
5196  *
5197  * Return: void
5198  */
5199 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
5200 {
5201 	int i;
5202 
5203 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
5204 		dp_srng_deinit(soc, &pdev->rx_mac_buf_ring[i], RXDMA_BUF, 1);
5205 
5206 	dp_reap_timer_deinit(soc);
5207 }
5208 
5209 /**
5210  * dp_rxdma_ring_free() - Free the RXDMA rings
5211  * @pdev: Physical device handle
5212  *
5213  * Return: void
5214  */
5215 static void dp_rxdma_ring_free(struct dp_pdev *pdev)
5216 {
5217 	int i;
5218 
5219 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
5220 		dp_srng_free(pdev->soc, &pdev->rx_mac_buf_ring[i]);
5221 }
5222 
5223 #else
5224 static int dp_rxdma_ring_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
5225 {
5226 	return QDF_STATUS_SUCCESS;
5227 }
5228 
5229 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
5230 {
5231 	return QDF_STATUS_SUCCESS;
5232 }
5233 
5234 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
5235 {
5236 	dp_reap_timer_deinit(soc);
5237 }
5238 
5239 static void dp_rxdma_ring_free(struct dp_pdev *pdev)
5240 {
5241 }
5242 #endif
5243 
5244 /**
5245  * dp_dscp_tid_map_setup() - Initialize the dscp-tid maps
5246  * @pdev: DP_PDEV handle
5247  *
5248  * Return: void
5249  */
5250 static inline void
5251 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
5252 {
5253 	uint8_t map_id;
5254 	struct dp_soc *soc = pdev->soc;
5255 
5256 	if (!soc)
5257 		return;
5258 
5259 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
5260 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
5261 			     default_dscp_tid_map,
5262 			     sizeof(default_dscp_tid_map));
5263 	}
5264 
5265 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
5266 		hal_tx_set_dscp_tid_map(soc->hal_soc,
5267 					default_dscp_tid_map,
5268 					map_id);
5269 	}
5270 }
5271 
5272 /**
5273  * dp_pcp_tid_map_setup() - Initialize the pcp-tid maps
5274  * @pdev: DP_PDEV handle
5275  *
5276  * Return: void
5277  */
5278 static inline void
5279 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
5280 {
5281 	struct dp_soc *soc = pdev->soc;
5282 
5283 	if (!soc)
5284 		return;
5285 
5286 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
5287 		     sizeof(default_pcp_tid_map));
5288 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
5289 }
5290 
5291 #ifdef IPA_OFFLOAD
5292 /**
5293  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
5294  * @soc: data path instance
5295  * @pdev: core txrx pdev context
5296  *
5297  * Return: QDF_STATUS_SUCCESS: success
5298  *         QDF_STATUS_E_RESOURCES: Error return
5299  */
5300 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5301 					   struct dp_pdev *pdev)
5302 {
5303 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5304 	int entries;
5305 
5306 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5307 		soc_cfg_ctx = soc->wlan_cfg_ctx;
5308 		entries =
5309 			wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
5310 
5311 		/* Setup second Rx refill buffer ring */
5312 		if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
5313 				  entries, 0)) {
5314 			dp_init_err("%pK: dp_srng_alloc failed second"
5315 				    "rx refill ring", soc);
5316 			return QDF_STATUS_E_FAILURE;
5317 		}
5318 	}
5319 
5320 	return QDF_STATUS_SUCCESS;
5321 }
5322 
5323 #ifdef IPA_WDI3_VLAN_SUPPORT
5324 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5325 					       struct dp_pdev *pdev)
5326 {
5327 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5328 	int entries;
5329 
5330 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5331 	    wlan_ipa_is_vlan_enabled()) {
5332 		soc_cfg_ctx = soc->wlan_cfg_ctx;
5333 		entries =
5334 			wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
5335 
5336 		/* Setup second Rx refill buffer ring */
5337 		if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF,
5338 				  entries, 0)) {
5339 			dp_init_err("%pK: alloc failed for 3rd rx refill ring",
5340 				    soc);
5341 			return QDF_STATUS_E_FAILURE;
5342 		}
5343 	}
5344 
5345 	return QDF_STATUS_SUCCESS;
5346 }
5347 
5348 static int dp_init_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5349 					      struct dp_pdev *pdev)
5350 {
5351 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5352 	    wlan_ipa_is_vlan_enabled()) {
5353 		if (dp_srng_init(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF,
5354 				 IPA_RX_ALT_REFILL_BUF_RING_IDX,
5355 				 pdev->pdev_id)) {
5356 			dp_init_err("%pK: init failed for 3rd rx refill ring",
5357 				    soc);
5358 			return QDF_STATUS_E_FAILURE;
5359 		}
5360 	}
5361 
5362 	return QDF_STATUS_SUCCESS;
5363 }
5364 
5365 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5366 						 struct dp_pdev *pdev)
5367 {
5368 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5369 	    wlan_ipa_is_vlan_enabled())
5370 		dp_srng_deinit(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF, 0);
5371 }
5372 
5373 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5374 					       struct dp_pdev *pdev)
5375 {
5376 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5377 	    wlan_ipa_is_vlan_enabled())
5378 		dp_srng_free(soc, &pdev->rx_refill_buf_ring3);
5379 }
5380 #else
5381 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5382 					       struct dp_pdev *pdev)
5383 {
5384 	return QDF_STATUS_SUCCESS;
5385 }
5386 
5387 static int dp_init_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5388 					      struct dp_pdev *pdev)
5389 {
5390 	return QDF_STATUS_SUCCESS;
5391 }
5392 
5393 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5394 						 struct dp_pdev *pdev)
5395 {
5396 }
5397 
5398 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5399 					       struct dp_pdev *pdev)
5400 {
5401 }
5402 #endif
5403 
5404 /**
5405  * dp_deinit_ipa_rx_refill_buf_ring - deinit second Rx refill buffer ring
5406  * @soc: data path instance
5407  * @pdev: core txrx pdev context
5408  *
5409  * Return: void
5410  */
5411 static void dp_deinit_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5412 					     struct dp_pdev *pdev)
5413 {
5414 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
5415 		dp_srng_deinit(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 0);
5416 }
5417 
5418 /**
5419  * dp_init_ipa_rx_refill_buf_ring - Init second Rx refill buffer ring
5420  * @soc: data path instance
5421  * @pdev: core txrx pdev context
5422  *
5423  * Return: QDF_STATUS_SUCCESS: success
5424  *         QDF_STATUS_E_RESOURCES: Error return
5425  */
5426 static int dp_init_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5427 					  struct dp_pdev *pdev)
5428 {
5429 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5430 		if (dp_srng_init(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
5431 				 IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id)) {
5432 			dp_init_err("%pK: dp_srng_init failed second"
5433 				    "rx refill ring", soc);
5434 			return QDF_STATUS_E_FAILURE;
5435 		}
5436 	}
5437 
5438 	if (dp_init_ipa_rx_alt_refill_buf_ring(soc, pdev)) {
5439 		dp_deinit_ipa_rx_refill_buf_ring(soc, pdev);
5440 		return QDF_STATUS_E_FAILURE;
5441 	}
5442 
5443 	return QDF_STATUS_SUCCESS;
5444 }
5445 
5446 /**
5447  * dp_free_ipa_rx_refill_buf_ring - free second Rx refill buffer ring
5448  * @soc: data path instance
5449  * @pdev: core txrx pdev context
5450  *
5451  * Return: void
5452  */
5453 static void dp_free_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5454 					   struct dp_pdev *pdev)
5455 {
5456 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
5457 		dp_srng_free(soc, &pdev->rx_refill_buf_ring2);
5458 }
5459 #else
5460 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5461 					   struct dp_pdev *pdev)
5462 {
5463 	return QDF_STATUS_SUCCESS;
5464 }
5465 
5466 static int dp_init_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5467 					  struct dp_pdev *pdev)
5468 {
5469 	return QDF_STATUS_SUCCESS;
5470 }
5471 
5472 static void dp_deinit_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5473 					     struct dp_pdev *pdev)
5474 {
5475 }
5476 
5477 static void dp_free_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5478 					   struct dp_pdev *pdev)
5479 {
5480 }
5481 
5482 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5483 					       struct dp_pdev *pdev)
5484 {
5485 	return QDF_STATUS_SUCCESS;
5486 }
5487 
5488 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5489 						 struct dp_pdev *pdev)
5490 {
5491 }
5492 
5493 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5494 					       struct dp_pdev *pdev)
5495 {
5496 }
5497 #endif
5498 
5499 #ifdef WLAN_FEATURE_DP_CFG_EVENT_HISTORY
5500 
5501 /**
5502  * dp_soc_cfg_history_attach() - Allocate and attach datapath config events
5503  *				 history
5504  * @soc: DP soc handle
5505  *
5506  * Return: None
5507  */
5508 static void dp_soc_cfg_history_attach(struct dp_soc *soc)
5509 {
5510 	dp_soc_frag_history_attach(soc, &soc->cfg_event_history,
5511 				   DP_CFG_EVT_HIST_MAX_SLOTS,
5512 				   DP_CFG_EVT_HIST_PER_SLOT_MAX,
5513 				   sizeof(struct dp_cfg_event),
5514 				   true, DP_CFG_EVENT_HIST_TYPE);
5515 }
5516 
5517 /**
5518  * dp_soc_cfg_history_detach() - Detach and free DP config events history
5519  * @soc: DP soc handle
5520  *
5521  * Return: none
5522  */
5523 static void dp_soc_cfg_history_detach(struct dp_soc *soc)
5524 {
5525 	dp_soc_frag_history_detach(soc, &soc->cfg_event_history,
5526 				   DP_CFG_EVT_HIST_MAX_SLOTS,
5527 				   true, DP_CFG_EVENT_HIST_TYPE);
5528 }
5529 
5530 #else
5531 static void dp_soc_cfg_history_attach(struct dp_soc *soc)
5532 {
5533 }
5534 
5535 static void dp_soc_cfg_history_detach(struct dp_soc *soc)
5536 {
5537 }
5538 #endif
5539 
5540 #ifdef DP_TX_HW_DESC_HISTORY
5541 /**
5542  * dp_soc_tx_hw_desc_history_attach - Attach TX HW descriptor history
5543  *
5544  * @soc: DP soc handle
5545  *
5546  * Return: None
5547  */
5548 static void dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
5549 {
5550 	dp_soc_frag_history_attach(soc, &soc->tx_hw_desc_history,
5551 				   DP_TX_HW_DESC_HIST_MAX_SLOTS,
5552 				   DP_TX_HW_DESC_HIST_PER_SLOT_MAX,
5553 				   sizeof(struct dp_tx_hw_desc_evt),
5554 				   true, DP_TX_HW_DESC_HIST_TYPE);
5555 }
5556 
5557 static void dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
5558 {
5559 	dp_soc_frag_history_detach(soc, &soc->tx_hw_desc_history,
5560 				   DP_TX_HW_DESC_HIST_MAX_SLOTS,
5561 				   true, DP_TX_HW_DESC_HIST_TYPE);
5562 }
5563 
5564 #else /* DP_TX_HW_DESC_HISTORY */
5565 static inline void
5566 dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
5567 {
5568 }
5569 
5570 static inline void
5571 dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
5572 {
5573 }
5574 #endif /* DP_TX_HW_DESC_HISTORY */
5575 
5576 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
5577 #ifndef RX_DEFRAG_DO_NOT_REINJECT
5578 /**
5579  * dp_soc_rx_reinject_ring_history_attach - Attach the reo reinject ring
5580  *					    history.
5581  * @soc: DP soc handle
5582  *
5583  * Return: None
5584  */
5585 static void dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
5586 {
5587 	soc->rx_reinject_ring_history =
5588 		dp_context_alloc_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
5589 				     sizeof(struct dp_rx_reinject_history));
5590 	if (soc->rx_reinject_ring_history)
5591 		qdf_atomic_init(&soc->rx_reinject_ring_history->index);
5592 }
5593 #else /* RX_DEFRAG_DO_NOT_REINJECT */
5594 static inline void
5595 dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
5596 {
5597 }
5598 #endif /* RX_DEFRAG_DO_NOT_REINJECT */
5599 
5600 /**
5601  * dp_soc_rx_history_attach() - Attach the ring history record buffers
5602  * @soc: DP soc structure
5603  *
5604  * This function allocates the memory for recording the rx ring, rx error
5605  * ring and the reinject ring entries. There is no error returned in case
5606  * of allocation failure since the record function checks if the history is
5607  * initialized or not. We do not want to fail the driver load in case of
5608  * failure to allocate memory for debug history.
5609  *
5610  * Return: None
5611  */
5612 static void dp_soc_rx_history_attach(struct dp_soc *soc)
5613 {
5614 	int i;
5615 	uint32_t rx_ring_hist_size;
5616 	uint32_t rx_refill_ring_hist_size;
5617 
5618 	rx_ring_hist_size = sizeof(*soc->rx_ring_history[0]);
5619 	rx_refill_ring_hist_size = sizeof(*soc->rx_refill_ring_history[0]);
5620 
5621 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5622 		soc->rx_ring_history[i] = dp_context_alloc_mem(
5623 				soc, DP_RX_RING_HIST_TYPE, rx_ring_hist_size);
5624 		if (soc->rx_ring_history[i])
5625 			qdf_atomic_init(&soc->rx_ring_history[i]->index);
5626 	}
5627 
5628 	soc->rx_err_ring_history = dp_context_alloc_mem(
5629 			soc, DP_RX_ERR_RING_HIST_TYPE, rx_ring_hist_size);
5630 	if (soc->rx_err_ring_history)
5631 		qdf_atomic_init(&soc->rx_err_ring_history->index);
5632 
5633 	dp_soc_rx_reinject_ring_history_attach(soc);
5634 
5635 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5636 		soc->rx_refill_ring_history[i] = dp_context_alloc_mem(
5637 						soc,
5638 						DP_RX_REFILL_RING_HIST_TYPE,
5639 						rx_refill_ring_hist_size);
5640 
5641 		if (soc->rx_refill_ring_history[i])
5642 			qdf_atomic_init(&soc->rx_refill_ring_history[i]->index);
5643 	}
5644 }
5645 
5646 static void dp_soc_rx_history_detach(struct dp_soc *soc)
5647 {
5648 	int i;
5649 
5650 	for (i = 0; i < MAX_REO_DEST_RINGS; i++)
5651 		dp_context_free_mem(soc, DP_RX_RING_HIST_TYPE,
5652 				    soc->rx_ring_history[i]);
5653 
5654 	dp_context_free_mem(soc, DP_RX_ERR_RING_HIST_TYPE,
5655 			    soc->rx_err_ring_history);
5656 
5657 	/*
5658 	 * No need for a featurized detach since qdf_mem_free takes
5659 	 * care of NULL pointer.
5660 	 */
5661 	dp_context_free_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
5662 			    soc->rx_reinject_ring_history);
5663 
5664 	for (i = 0; i < MAX_PDEV_CNT; i++)
5665 		dp_context_free_mem(soc, DP_RX_REFILL_RING_HIST_TYPE,
5666 				    soc->rx_refill_ring_history[i]);
5667 }
5668 
5669 #else
5670 static inline void dp_soc_rx_history_attach(struct dp_soc *soc)
5671 {
5672 }
5673 
5674 static inline void dp_soc_rx_history_detach(struct dp_soc *soc)
5675 {
5676 }
5677 #endif
5678 
5679 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY
5680 /**
5681  * dp_soc_mon_status_ring_history_attach() - Attach the monitor status
5682  *					     buffer record history.
5683  * @soc: DP soc handle
5684  *
5685  * This function allocates memory to track the event for a monitor
5686  * status buffer, before its parsed and freed.
5687  *
5688  * Return: None
5689  */
5690 static void dp_soc_mon_status_ring_history_attach(struct dp_soc *soc)
5691 {
5692 	soc->mon_status_ring_history = dp_context_alloc_mem(soc,
5693 				DP_MON_STATUS_BUF_HIST_TYPE,
5694 				sizeof(struct dp_mon_status_ring_history));
5695 	if (!soc->mon_status_ring_history) {
5696 		dp_err("Failed to alloc memory for mon status ring history");
5697 		return;
5698 	}
5699 }
5700 
5701 /**
5702  * dp_soc_mon_status_ring_history_detach() - Detach the monitor status buffer
5703  *					     record history.
5704  * @soc: DP soc handle
5705  *
5706  * Return: None
5707  */
5708 static void dp_soc_mon_status_ring_history_detach(struct dp_soc *soc)
5709 {
5710 	dp_context_free_mem(soc, DP_MON_STATUS_BUF_HIST_TYPE,
5711 			    soc->mon_status_ring_history);
5712 }
5713 #else
5714 static void dp_soc_mon_status_ring_history_attach(struct dp_soc *soc)
5715 {
5716 }
5717 
5718 static void dp_soc_mon_status_ring_history_detach(struct dp_soc *soc)
5719 {
5720 }
5721 #endif
5722 
5723 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
5724 /**
5725  * dp_soc_tx_history_attach() - Attach the ring history record buffers
5726  * @soc: DP soc structure
5727  *
5728  * This function allocates the memory for recording the tx tcl ring and
5729  * the tx comp ring entries. There is no error returned in case
5730  * of allocation failure since the record function checks if the history is
5731  * initialized or not. We do not want to fail the driver load in case of
5732  * failure to allocate memory for debug history.
5733  *
5734  * Return: None
5735  */
5736 static void dp_soc_tx_history_attach(struct dp_soc *soc)
5737 {
5738 	dp_soc_frag_history_attach(soc, &soc->tx_tcl_history,
5739 				   DP_TX_TCL_HIST_MAX_SLOTS,
5740 				   DP_TX_TCL_HIST_PER_SLOT_MAX,
5741 				   sizeof(struct dp_tx_desc_event),
5742 				   true, DP_TX_TCL_HIST_TYPE);
5743 	dp_soc_frag_history_attach(soc, &soc->tx_comp_history,
5744 				   DP_TX_COMP_HIST_MAX_SLOTS,
5745 				   DP_TX_COMP_HIST_PER_SLOT_MAX,
5746 				   sizeof(struct dp_tx_desc_event),
5747 				   true, DP_TX_COMP_HIST_TYPE);
5748 }
5749 
5750 /**
5751  * dp_soc_tx_history_detach() - Detach the ring history record buffers
5752  * @soc: DP soc structure
5753  *
5754  * This function frees the memory for recording the tx tcl ring and
5755  * the tx comp ring entries.
5756  *
5757  * Return: None
5758  */
5759 static void dp_soc_tx_history_detach(struct dp_soc *soc)
5760 {
5761 	dp_soc_frag_history_detach(soc, &soc->tx_tcl_history,
5762 				   DP_TX_TCL_HIST_MAX_SLOTS,
5763 				   true, DP_TX_TCL_HIST_TYPE);
5764 	dp_soc_frag_history_detach(soc, &soc->tx_comp_history,
5765 				   DP_TX_COMP_HIST_MAX_SLOTS,
5766 				   true, DP_TX_COMP_HIST_TYPE);
5767 }
5768 
5769 #else
5770 static inline void dp_soc_tx_history_attach(struct dp_soc *soc)
5771 {
5772 }
5773 
5774 static inline void dp_soc_tx_history_detach(struct dp_soc *soc)
5775 {
5776 }
5777 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
5778 
5779 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
5780 QDF_STATUS
5781 dp_rx_fst_attach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev)
5782 {
5783 	struct dp_rx_fst *rx_fst = NULL;
5784 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
5785 
5786 	/* for Lithium the below API is not registered
5787 	 * hence fst attach happens for each pdev
5788 	 */
5789 	if (!soc->arch_ops.dp_get_rx_fst)
5790 		return dp_rx_fst_attach(soc, pdev);
5791 
5792 	rx_fst = soc->arch_ops.dp_get_rx_fst();
5793 
5794 	/* for BE the FST attach is called only once per
5795 	 * ML context. if rx_fst is already registered
5796 	 * increase the ref count and return.
5797 	 */
5798 	if (rx_fst) {
5799 		soc->rx_fst = rx_fst;
5800 		pdev->rx_fst = rx_fst;
5801 		soc->arch_ops.dp_rx_fst_ref();
5802 	} else {
5803 		ret = dp_rx_fst_attach(soc, pdev);
5804 		if ((ret != QDF_STATUS_SUCCESS) &&
5805 		    (ret != QDF_STATUS_E_NOSUPPORT))
5806 			return ret;
5807 
5808 		soc->arch_ops.dp_set_rx_fst(soc->rx_fst);
5809 		soc->arch_ops.dp_rx_fst_ref();
5810 	}
5811 	return ret;
5812 }
5813 
5814 void
5815 dp_rx_fst_detach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev)
5816 {
5817 	struct dp_rx_fst *rx_fst = NULL;
5818 
5819 	/* for Lithium the below API is not registered
5820 	 * hence fst detach happens for each pdev
5821 	 */
5822 	if (!soc->arch_ops.dp_get_rx_fst) {
5823 		dp_rx_fst_detach(soc, pdev);
5824 		return;
5825 	}
5826 
5827 	rx_fst = soc->arch_ops.dp_get_rx_fst();
5828 
5829 	/* for BE the FST detach is called only when last
5830 	 * ref count reaches 1.
5831 	 */
5832 	if (rx_fst) {
5833 		if (soc->arch_ops.dp_rx_fst_deref() == 1)
5834 			dp_rx_fst_detach(soc, pdev);
5835 	}
5836 	pdev->rx_fst = NULL;
5837 }
5838 #elif defined(WLAN_SUPPORT_RX_FISA)
5839 QDF_STATUS
5840 dp_rx_fst_attach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev)
5841 {
5842 	return dp_rx_fst_attach(soc, pdev);
5843 }
5844 
5845 void
5846 dp_rx_fst_detach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev)
5847 {
5848 	dp_rx_fst_detach(soc, pdev);
5849 }
5850 #else
5851 QDF_STATUS
5852 dp_rx_fst_attach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev)
5853 {
5854 	return QDF_STATUS_SUCCESS;
5855 }
5856 
5857 void
5858 dp_rx_fst_detach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev)
5859 {
5860 }
5861 #endif
5862 
5863 /**
5864  * dp_pdev_attach_wifi3() - attach txrx pdev
5865  * @txrx_soc: Datapath SOC handle
5866  * @params: Params for PDEV attach
5867  *
5868  * Return: QDF_STATUS
5869  */
5870 static inline
5871 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
5872 				struct cdp_pdev_attach_params *params)
5873 {
5874 	qdf_size_t pdev_context_size;
5875 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5876 	struct dp_pdev *pdev = NULL;
5877 	uint8_t pdev_id = params->pdev_id;
5878 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5879 	int nss_cfg;
5880 	QDF_STATUS ret;
5881 
5882 	pdev_context_size =
5883 		soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_PDEV);
5884 	if (pdev_context_size)
5885 		pdev = dp_context_alloc_mem(soc, DP_PDEV_TYPE,
5886 					    pdev_context_size);
5887 
5888 	if (!pdev) {
5889 		dp_init_err("%pK: DP PDEV memory allocation failed",
5890 			    soc);
5891 		goto fail0;
5892 	}
5893 	wlan_minidump_log(pdev, sizeof(*pdev), soc->ctrl_psoc,
5894 			  WLAN_MD_DP_PDEV, "dp_pdev");
5895 
5896 	soc_cfg_ctx = soc->wlan_cfg_ctx;
5897 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
5898 
5899 	if (!pdev->wlan_cfg_ctx) {
5900 		dp_init_err("%pK: pdev cfg_attach failed", soc);
5901 		goto fail1;
5902 	}
5903 
5904 	/*
5905 	 * set nss pdev config based on soc config
5906 	 */
5907 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
5908 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
5909 					 (nss_cfg & (1 << pdev_id)));
5910 
5911 	pdev->soc = soc;
5912 	pdev->pdev_id = pdev_id;
5913 	soc->pdev_list[pdev_id] = pdev;
5914 
5915 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
5916 	soc->pdev_count++;
5917 
5918 	/* Allocate memory for pdev srng rings */
5919 	if (dp_pdev_srng_alloc(pdev)) {
5920 		dp_init_err("%pK: dp_pdev_srng_alloc failed", soc);
5921 		goto fail2;
5922 	}
5923 
5924 	/* Setup second Rx refill buffer ring */
5925 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev)) {
5926 		dp_init_err("%pK: dp_srng_alloc failed rxrefill2 ring",
5927 			    soc);
5928 		goto fail3;
5929 	}
5930 
5931 	/* Allocate memory for pdev rxdma rings */
5932 	if (dp_rxdma_ring_alloc(soc, pdev)) {
5933 		dp_init_err("%pK: dp_rxdma_ring_alloc failed", soc);
5934 		goto fail4;
5935 	}
5936 
5937 	/* Rx specific init */
5938 	if (dp_rx_pdev_desc_pool_alloc(pdev)) {
5939 		dp_init_err("%pK: dp_rx_pdev_attach failed", soc);
5940 		goto fail4;
5941 	}
5942 
5943 	if (dp_monitor_pdev_attach(pdev)) {
5944 		dp_init_err("%pK: dp_monitor_pdev_attach failed", soc);
5945 		goto fail5;
5946 	}
5947 
5948 	soc->arch_ops.txrx_pdev_attach(pdev, params);
5949 
5950 	/* Setup third Rx refill buffer ring */
5951 	if (dp_setup_ipa_rx_alt_refill_buf_ring(soc, pdev)) {
5952 		dp_init_err("%pK: dp_srng_alloc failed rxrefill3 ring",
5953 			    soc);
5954 		goto fail6;
5955 	}
5956 
5957 	ret = dp_rx_fst_attach_wrapper(soc, pdev);
5958 	if ((ret != QDF_STATUS_SUCCESS) && (ret != QDF_STATUS_E_NOSUPPORT)) {
5959 		dp_init_err("%pK: RX FST attach failed: pdev %d err %d",
5960 			    soc, pdev_id, ret);
5961 		goto fail7;
5962 	}
5963 
5964 	return QDF_STATUS_SUCCESS;
5965 
5966 fail7:
5967 	dp_free_ipa_rx_alt_refill_buf_ring(soc, pdev);
5968 fail6:
5969 	dp_monitor_pdev_detach(pdev);
5970 fail5:
5971 	dp_rx_pdev_desc_pool_free(pdev);
5972 fail4:
5973 	dp_rxdma_ring_free(pdev);
5974 	dp_free_ipa_rx_refill_buf_ring(soc, pdev);
5975 fail3:
5976 	dp_pdev_srng_free(pdev);
5977 fail2:
5978 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
5979 fail1:
5980 	soc->pdev_list[pdev_id] = NULL;
5981 	qdf_mem_free(pdev);
5982 fail0:
5983 	return QDF_STATUS_E_FAILURE;
5984 }
5985 
5986 /**
5987  * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev
5988  * @pdev: Datapath PDEV handle
5989  *
5990  * This is the last chance to flush all pending dp vdevs/peers,
5991  * some peer/vdev leak case like Non-SSR + peer unmap missing
5992  * will be covered here.
5993  *
5994  * Return: None
5995  */
5996 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
5997 {
5998 	struct dp_soc *soc = pdev->soc;
5999 	struct dp_vdev *vdev_arr[MAX_VDEV_CNT] = {0};
6000 	uint32_t i = 0;
6001 	uint32_t num_vdevs = 0;
6002 	struct dp_vdev *vdev = NULL;
6003 
6004 	if (TAILQ_EMPTY(&soc->inactive_vdev_list))
6005 		return;
6006 
6007 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
6008 	TAILQ_FOREACH(vdev, &soc->inactive_vdev_list,
6009 		      inactive_list_elem) {
6010 		if (vdev->pdev != pdev)
6011 			continue;
6012 
6013 		vdev_arr[num_vdevs] = vdev;
6014 		num_vdevs++;
6015 		/* take reference to free */
6016 		dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CDP);
6017 	}
6018 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
6019 
6020 	for (i = 0; i < num_vdevs; i++) {
6021 		dp_vdev_flush_peers((struct cdp_vdev *)vdev_arr[i], 0, 0);
6022 		dp_vdev_unref_delete(soc, vdev_arr[i], DP_MOD_ID_CDP);
6023 	}
6024 }
6025 
6026 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
6027 /**
6028  * dp_vdev_stats_hw_offload_target_config() - Send HTT command to FW
6029  *                                          for enable/disable of HW vdev stats
6030  * @soc: Datapath soc handle
6031  * @pdev_id: INVALID_PDEV_ID for all pdevs or 0,1,2 for individual pdev
6032  * @enable: flag to represent enable/disable of hw vdev stats
6033  *
6034  * Return: none
6035  */
6036 static void dp_vdev_stats_hw_offload_target_config(struct dp_soc *soc,
6037 						   uint8_t pdev_id,
6038 						   bool enable)
6039 {
6040 	/* Check SOC level config for HW offload vdev stats support */
6041 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
6042 		dp_debug("%pK: HW vdev offload stats is disabled", soc);
6043 		return;
6044 	}
6045 
6046 	/* Send HTT command to FW for enable of stats */
6047 	dp_h2t_hw_vdev_stats_config_send(soc, pdev_id, enable, false, 0);
6048 }
6049 
6050 /**
6051  * dp_vdev_stats_hw_offload_target_clear() - Clear HW vdev stats on target
6052  * @soc: Datapath soc handle
6053  * @pdev_id: pdev_id (0,1,2)
6054  * @vdev_id_bitmask: bitmask with vdev_id(s) for which stats are to be
6055  *                   cleared on HW
6056  *
6057  * Return: none
6058  */
6059 static
6060 void dp_vdev_stats_hw_offload_target_clear(struct dp_soc *soc, uint8_t pdev_id,
6061 					   uint64_t vdev_id_bitmask)
6062 {
6063 	/* Check SOC level config for HW offload vdev stats support */
6064 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
6065 		dp_debug("%pK: HW vdev offload stats is disabled", soc);
6066 		return;
6067 	}
6068 
6069 	/* Send HTT command to FW for reset of stats */
6070 	dp_h2t_hw_vdev_stats_config_send(soc, pdev_id, true, true,
6071 					 vdev_id_bitmask);
6072 }
6073 #else
6074 static void
6075 dp_vdev_stats_hw_offload_target_config(struct dp_soc *soc, uint8_t pdev_id,
6076 				       bool enable)
6077 {
6078 }
6079 
6080 static
6081 void dp_vdev_stats_hw_offload_target_clear(struct dp_soc *soc, uint8_t pdev_id,
6082 					   uint64_t vdev_id_bitmask)
6083 {
6084 }
6085 #endif /*QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT */
6086 
6087 /**
6088  * dp_pdev_deinit() - Deinit txrx pdev
6089  * @txrx_pdev: Datapath PDEV handle
6090  * @force: Force deinit
6091  *
6092  * Return: None
6093  */
6094 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
6095 {
6096 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
6097 	qdf_nbuf_t curr_nbuf, next_nbuf;
6098 
6099 	if (pdev->pdev_deinit)
6100 		return;
6101 
6102 	dp_tx_me_exit(pdev);
6103 	dp_rx_pdev_buffers_free(pdev);
6104 	dp_rx_pdev_desc_pool_deinit(pdev);
6105 	dp_pdev_bkp_stats_detach(pdev);
6106 	qdf_event_destroy(&pdev->fw_peer_stats_event);
6107 	qdf_event_destroy(&pdev->fw_stats_event);
6108 	qdf_event_destroy(&pdev->fw_obss_stats_event);
6109 	if (pdev->sojourn_buf)
6110 		qdf_nbuf_free(pdev->sojourn_buf);
6111 
6112 	dp_pdev_flush_pending_vdevs(pdev);
6113 	dp_tx_desc_flush(pdev, NULL, true);
6114 
6115 	qdf_spinlock_destroy(&pdev->tx_mutex);
6116 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
6117 
6118 	dp_monitor_pdev_deinit(pdev);
6119 
6120 	dp_pdev_srng_deinit(pdev);
6121 
6122 	dp_ipa_uc_detach(pdev->soc, pdev);
6123 	dp_deinit_ipa_rx_alt_refill_buf_ring(pdev->soc, pdev);
6124 	dp_deinit_ipa_rx_refill_buf_ring(pdev->soc, pdev);
6125 	dp_rxdma_ring_cleanup(pdev->soc, pdev);
6126 
6127 	curr_nbuf = pdev->invalid_peer_head_msdu;
6128 	while (curr_nbuf) {
6129 		next_nbuf = qdf_nbuf_next(curr_nbuf);
6130 		dp_rx_nbuf_free(curr_nbuf);
6131 		curr_nbuf = next_nbuf;
6132 	}
6133 	pdev->invalid_peer_head_msdu = NULL;
6134 	pdev->invalid_peer_tail_msdu = NULL;
6135 
6136 	dp_wdi_event_detach(pdev);
6137 	pdev->pdev_deinit = 1;
6138 }
6139 
6140 /**
6141  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
6142  * @psoc: Datapath psoc handle
6143  * @pdev_id: Id of datapath PDEV handle
6144  * @force: Force deinit
6145  *
6146  * Return: QDF_STATUS
6147  */
6148 static QDF_STATUS
6149 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
6150 		     int force)
6151 {
6152 	struct dp_pdev *txrx_pdev;
6153 
6154 	txrx_pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
6155 						       pdev_id);
6156 
6157 	if (!txrx_pdev)
6158 		return QDF_STATUS_E_FAILURE;
6159 
6160 	dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force);
6161 
6162 	return QDF_STATUS_SUCCESS;
6163 }
6164 
6165 /**
6166  * dp_pdev_post_attach() - Do post pdev attach after dev_alloc_name
6167  * @txrx_pdev: Datapath PDEV handle
6168  *
6169  * Return: None
6170  */
6171 static void dp_pdev_post_attach(struct cdp_pdev *txrx_pdev)
6172 {
6173 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
6174 
6175 	dp_monitor_tx_capture_debugfs_init(pdev);
6176 
6177 	if (dp_pdev_htt_stats_dbgfs_init(pdev)) {
6178 		dp_init_err("%pK: Failed to initialize pdev HTT stats debugfs", pdev->soc);
6179 	}
6180 }
6181 
6182 /**
6183  * dp_pdev_post_attach_wifi3() - attach txrx pdev post
6184  * @soc: Datapath soc handle
6185  * @pdev_id: pdev id of pdev
6186  *
6187  * Return: QDF_STATUS
6188  */
6189 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *soc,
6190 				     uint8_t pdev_id)
6191 {
6192 	struct dp_pdev *pdev;
6193 
6194 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
6195 						  pdev_id);
6196 
6197 	if (!pdev) {
6198 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
6199 			    (struct dp_soc *)soc, pdev_id);
6200 		return QDF_STATUS_E_FAILURE;
6201 	}
6202 
6203 	dp_pdev_post_attach((struct cdp_pdev *)pdev);
6204 	return QDF_STATUS_SUCCESS;
6205 }
6206 
6207 /**
6208  * dp_pdev_detach() - Complete rest of pdev detach
6209  * @txrx_pdev: Datapath PDEV handle
6210  * @force: Force deinit
6211  *
6212  * Return: None
6213  */
6214 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
6215 {
6216 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
6217 	struct dp_soc *soc = pdev->soc;
6218 
6219 	dp_rx_fst_detach_wrapper(soc, pdev);
6220 	dp_pdev_htt_stats_dbgfs_deinit(pdev);
6221 	dp_rx_pdev_desc_pool_free(pdev);
6222 	dp_monitor_pdev_detach(pdev);
6223 	dp_rxdma_ring_free(pdev);
6224 	dp_free_ipa_rx_refill_buf_ring(soc, pdev);
6225 	dp_free_ipa_rx_alt_refill_buf_ring(soc, pdev);
6226 	dp_pdev_srng_free(pdev);
6227 
6228 	soc->pdev_count--;
6229 	soc->pdev_list[pdev->pdev_id] = NULL;
6230 
6231 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
6232 	wlan_minidump_remove(pdev, sizeof(*pdev), soc->ctrl_psoc,
6233 			     WLAN_MD_DP_PDEV, "dp_pdev");
6234 	dp_context_free_mem(soc, DP_PDEV_TYPE, pdev);
6235 }
6236 
6237 /**
6238  * dp_pdev_detach_wifi3() - detach txrx pdev
6239  * @psoc: Datapath soc handle
6240  * @pdev_id: pdev id of pdev
6241  * @force: Force detach
6242  *
6243  * Return: QDF_STATUS
6244  */
6245 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
6246 				       int force)
6247 {
6248 	struct dp_pdev *pdev;
6249 	struct dp_soc *soc = (struct dp_soc *)psoc;
6250 
6251 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
6252 						  pdev_id);
6253 
6254 	if (!pdev) {
6255 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
6256 			    (struct dp_soc *)psoc, pdev_id);
6257 		return QDF_STATUS_E_FAILURE;
6258 	}
6259 
6260 	soc->arch_ops.txrx_pdev_detach(pdev);
6261 
6262 	dp_pdev_detach((struct cdp_pdev *)pdev, force);
6263 	return QDF_STATUS_SUCCESS;
6264 }
6265 
6266 #ifndef DP_UMAC_HW_RESET_SUPPORT
6267 static inline
6268 #endif
6269 void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
6270 {
6271 	struct reo_desc_list_node *desc;
6272 	struct dp_rx_tid *rx_tid;
6273 
6274 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
6275 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
6276 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
6277 		rx_tid = &desc->rx_tid;
6278 		qdf_mem_unmap_nbytes_single(soc->osdev,
6279 			rx_tid->hw_qdesc_paddr,
6280 			QDF_DMA_BIDIRECTIONAL,
6281 			rx_tid->hw_qdesc_alloc_size);
6282 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
6283 		qdf_mem_free(desc);
6284 	}
6285 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
6286 	qdf_list_destroy(&soc->reo_desc_freelist);
6287 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
6288 }
6289 
6290 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
6291 /**
6292  * dp_reo_desc_deferred_freelist_create() - Initialize the resources used
6293  *                                          for deferred reo desc list
6294  * @soc: Datapath soc handle
6295  *
6296  * Return: void
6297  */
6298 static void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
6299 {
6300 	qdf_spinlock_create(&soc->reo_desc_deferred_freelist_lock);
6301 	qdf_list_create(&soc->reo_desc_deferred_freelist,
6302 			REO_DESC_DEFERRED_FREELIST_SIZE);
6303 	soc->reo_desc_deferred_freelist_init = true;
6304 }
6305 
6306 /**
6307  * dp_reo_desc_deferred_freelist_destroy() - loop the deferred free list &
6308  *                                           free the leftover REO QDESCs
6309  * @soc: Datapath soc handle
6310  *
6311  * Return: void
6312  */
6313 static void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
6314 {
6315 	struct reo_desc_deferred_freelist_node *desc;
6316 
6317 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
6318 	soc->reo_desc_deferred_freelist_init = false;
6319 	while (qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
6320 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
6321 		qdf_mem_unmap_nbytes_single(soc->osdev,
6322 					    desc->hw_qdesc_paddr,
6323 					    QDF_DMA_BIDIRECTIONAL,
6324 					    desc->hw_qdesc_alloc_size);
6325 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
6326 		qdf_mem_free(desc);
6327 	}
6328 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
6329 
6330 	qdf_list_destroy(&soc->reo_desc_deferred_freelist);
6331 	qdf_spinlock_destroy(&soc->reo_desc_deferred_freelist_lock);
6332 }
6333 #else
6334 static inline void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
6335 {
6336 }
6337 
6338 static inline void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
6339 {
6340 }
6341 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
6342 
6343 /**
6344  * dp_soc_reset_txrx_ring_map() - reset tx ring map
6345  * @soc: DP SOC handle
6346  *
6347  */
6348 static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc)
6349 {
6350 	uint32_t i;
6351 
6352 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++)
6353 		soc->tx_ring_map[i] = 0;
6354 }
6355 
6356 /**
6357  * dp_soc_print_inactive_objects() - prints inactive peer and vdev list
6358  * @soc: DP SOC handle
6359  *
6360  */
6361 static void dp_soc_print_inactive_objects(struct dp_soc *soc)
6362 {
6363 	struct dp_peer *peer = NULL;
6364 	struct dp_peer *tmp_peer = NULL;
6365 	struct dp_vdev *vdev = NULL;
6366 	struct dp_vdev *tmp_vdev = NULL;
6367 	int i = 0;
6368 	uint32_t count;
6369 
6370 	if (TAILQ_EMPTY(&soc->inactive_peer_list) &&
6371 	    TAILQ_EMPTY(&soc->inactive_vdev_list))
6372 		return;
6373 
6374 	TAILQ_FOREACH_SAFE(peer, &soc->inactive_peer_list,
6375 			   inactive_list_elem, tmp_peer) {
6376 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
6377 			count = qdf_atomic_read(&peer->mod_refs[i]);
6378 			if (count)
6379 				DP_PRINT_STATS("peer %pK Module id %u ==> %u",
6380 					       peer, i, count);
6381 		}
6382 	}
6383 
6384 	TAILQ_FOREACH_SAFE(vdev, &soc->inactive_vdev_list,
6385 			   inactive_list_elem, tmp_vdev) {
6386 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
6387 			count = qdf_atomic_read(&vdev->mod_refs[i]);
6388 			if (count)
6389 				DP_PRINT_STATS("vdev %pK Module id %u ==> %u",
6390 					       vdev, i, count);
6391 		}
6392 	}
6393 	QDF_BUG(0);
6394 }
6395 
6396 /**
6397  * dp_soc_deinit() - Deinitialize txrx SOC
6398  * @txrx_soc: Opaque DP SOC handle
6399  *
6400  * Return: None
6401  */
6402 static void dp_soc_deinit(void *txrx_soc)
6403 {
6404 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6405 	struct htt_soc *htt_soc = soc->htt_handle;
6406 
6407 	qdf_atomic_set(&soc->cmn_init_done, 0);
6408 
6409 	if (soc->arch_ops.txrx_soc_ppeds_stop)
6410 		soc->arch_ops.txrx_soc_ppeds_stop(soc);
6411 
6412 	soc->arch_ops.txrx_soc_deinit(soc);
6413 
6414 	dp_monitor_soc_deinit(soc);
6415 
6416 	/* free peer tables & AST tables allocated during peer_map_attach */
6417 	if (soc->peer_map_attach_success) {
6418 		dp_peer_find_detach(soc);
6419 		soc->arch_ops.txrx_peer_map_detach(soc);
6420 		soc->peer_map_attach_success = FALSE;
6421 	}
6422 
6423 	qdf_flush_work(&soc->htt_stats.work);
6424 	qdf_disable_work(&soc->htt_stats.work);
6425 
6426 	qdf_spinlock_destroy(&soc->htt_stats.lock);
6427 
6428 	dp_soc_reset_txrx_ring_map(soc);
6429 
6430 	dp_reo_desc_freelist_destroy(soc);
6431 	dp_reo_desc_deferred_freelist_destroy(soc);
6432 
6433 	DEINIT_RX_HW_STATS_LOCK(soc);
6434 
6435 	qdf_spinlock_destroy(&soc->ast_lock);
6436 
6437 	dp_peer_mec_spinlock_destroy(soc);
6438 
6439 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
6440 
6441 	qdf_nbuf_queue_free(&soc->invalid_buf_queue);
6442 
6443 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
6444 
6445 	qdf_spinlock_destroy(&soc->vdev_map_lock);
6446 
6447 	dp_reo_cmdlist_destroy(soc);
6448 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
6449 
6450 	dp_soc_tx_desc_sw_pools_deinit(soc);
6451 
6452 	dp_soc_srng_deinit(soc);
6453 
6454 	dp_hw_link_desc_ring_deinit(soc);
6455 
6456 	dp_soc_print_inactive_objects(soc);
6457 	qdf_spinlock_destroy(&soc->inactive_peer_list_lock);
6458 	qdf_spinlock_destroy(&soc->inactive_vdev_list_lock);
6459 
6460 	htt_soc_htc_dealloc(soc->htt_handle);
6461 
6462 	htt_soc_detach(htt_soc);
6463 
6464 	/* Free wbm sg list and reset flags in down path */
6465 	dp_rx_wbm_sg_list_deinit(soc);
6466 
6467 	wlan_minidump_remove(soc, sizeof(*soc), soc->ctrl_psoc,
6468 			     WLAN_MD_DP_SOC, "dp_soc");
6469 }
6470 
6471 /**
6472  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
6473  * @txrx_soc: Opaque DP SOC handle
6474  *
6475  * Return: None
6476  */
6477 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc)
6478 {
6479 	dp_soc_deinit(txrx_soc);
6480 }
6481 
6482 /**
6483  * dp_soc_detach() - Detach rest of txrx SOC
6484  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
6485  *
6486  * Return: None
6487  */
6488 static void dp_soc_detach(struct cdp_soc_t *txrx_soc)
6489 {
6490 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6491 
6492 	soc->arch_ops.txrx_soc_detach(soc);
6493 
6494 	dp_runtime_deinit();
6495 
6496 	dp_sysfs_deinitialize_stats(soc);
6497 	dp_soc_swlm_detach(soc);
6498 	dp_soc_tx_desc_sw_pools_free(soc);
6499 	dp_soc_srng_free(soc);
6500 	dp_hw_link_desc_ring_free(soc);
6501 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
6502 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
6503 	dp_soc_tx_hw_desc_history_detach(soc);
6504 	dp_soc_tx_history_detach(soc);
6505 	dp_soc_mon_status_ring_history_detach(soc);
6506 	dp_soc_rx_history_detach(soc);
6507 	dp_soc_cfg_history_detach(soc);
6508 
6509 	if (!dp_monitor_modularized_enable()) {
6510 		dp_mon_soc_detach_wrapper(soc);
6511 	}
6512 
6513 	qdf_mem_free(soc->cdp_soc.ops);
6514 	qdf_mem_common_free(soc);
6515 }
6516 
6517 /**
6518  * dp_soc_detach_wifi3() - Detach txrx SOC
6519  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
6520  *
6521  * Return: None
6522  */
6523 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc)
6524 {
6525 	dp_soc_detach(txrx_soc);
6526 }
6527 
6528 #ifdef QCA_HOST2FW_RXBUF_RING
6529 static inline void
6530 dp_htt_setup_rxdma_err_dst_ring(struct dp_soc *soc, int mac_id,
6531 				int lmac_id)
6532 {
6533 	if (soc->rxdma_err_dst_ring[lmac_id].hal_srng)
6534 		htt_srng_setup(soc->htt_handle, mac_id,
6535 			       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
6536 			       RXDMA_DST);
6537 }
6538 
6539 #ifdef IPA_WDI3_VLAN_SUPPORT
6540 static inline
6541 void dp_rxdma_setup_refill_ring3(struct dp_soc *soc,
6542 				 struct dp_pdev *pdev,
6543 				 uint8_t idx)
6544 {
6545 	if (pdev->rx_refill_buf_ring3.hal_srng)
6546 		htt_srng_setup(soc->htt_handle, idx,
6547 			       pdev->rx_refill_buf_ring3.hal_srng,
6548 			       RXDMA_BUF);
6549 }
6550 #else
6551 static inline
6552 void dp_rxdma_setup_refill_ring3(struct dp_soc *soc,
6553 				 struct dp_pdev *pdev,
6554 				 uint8_t idx)
6555 { }
6556 #endif
6557 
6558 /**
6559  * dp_rxdma_ring_config() - configure the RX DMA rings
6560  * @soc: data path SoC handle
6561  *
6562  * This function is used to configure the MAC rings.
6563  * On MCL host provides buffers in Host2FW ring
6564  * FW refills (copies) buffers to the ring and updates
6565  * ring_idx in register
6566  *
6567  * Return: zero on success, non-zero on failure
6568  */
6569 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
6570 {
6571 	int i;
6572 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6573 
6574 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6575 		struct dp_pdev *pdev = soc->pdev_list[i];
6576 
6577 		if (pdev) {
6578 			int mac_id;
6579 			int max_mac_rings =
6580 				 wlan_cfg_get_num_mac_rings
6581 				(pdev->wlan_cfg_ctx);
6582 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
6583 
6584 			htt_srng_setup(soc->htt_handle, i,
6585 				       soc->rx_refill_buf_ring[lmac_id]
6586 				       .hal_srng,
6587 				       RXDMA_BUF);
6588 
6589 			if (pdev->rx_refill_buf_ring2.hal_srng)
6590 				htt_srng_setup(soc->htt_handle, i,
6591 					       pdev->rx_refill_buf_ring2
6592 					       .hal_srng,
6593 					       RXDMA_BUF);
6594 
6595 			dp_rxdma_setup_refill_ring3(soc, pdev, i);
6596 
6597 			dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
6598 			dp_err("pdev_id %d max_mac_rings %d",
6599 			       pdev->pdev_id, max_mac_rings);
6600 
6601 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
6602 				int mac_for_pdev =
6603 					dp_get_mac_id_for_pdev(mac_id,
6604 							       pdev->pdev_id);
6605 				/*
6606 				 * Obtain lmac id from pdev to access the LMAC
6607 				 * ring in soc context
6608 				 */
6609 				lmac_id =
6610 				dp_get_lmac_id_for_pdev_id(soc,
6611 							   mac_id,
6612 							   pdev->pdev_id);
6613 				QDF_TRACE(QDF_MODULE_ID_TXRX,
6614 					 QDF_TRACE_LEVEL_ERROR,
6615 					 FL("mac_id %d"), mac_for_pdev);
6616 
6617 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
6618 					 pdev->rx_mac_buf_ring[mac_id]
6619 						.hal_srng,
6620 					 RXDMA_BUF);
6621 
6622 				if (!soc->rxdma2sw_rings_not_supported)
6623 					dp_htt_setup_rxdma_err_dst_ring(soc,
6624 						mac_for_pdev, lmac_id);
6625 
6626 				/* Configure monitor mode rings */
6627 				status = dp_monitor_htt_srng_setup(soc, pdev,
6628 								   lmac_id,
6629 								   mac_for_pdev);
6630 				if (status != QDF_STATUS_SUCCESS) {
6631 					dp_err("Failed to send htt monitor messages to target");
6632 					return status;
6633 				}
6634 
6635 			}
6636 		}
6637 	}
6638 
6639 	dp_reap_timer_init(soc);
6640 	return status;
6641 }
6642 #else
6643 /* This is only for WIN */
6644 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
6645 {
6646 	int i;
6647 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6648 	int mac_for_pdev;
6649 	int lmac_id;
6650 
6651 	/* Configure monitor mode rings */
6652 	dp_monitor_soc_htt_srng_setup(soc);
6653 
6654 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6655 		struct dp_pdev *pdev =  soc->pdev_list[i];
6656 
6657 		if (!pdev)
6658 			continue;
6659 
6660 		mac_for_pdev = i;
6661 		lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
6662 
6663 		if (soc->rx_refill_buf_ring[lmac_id].hal_srng)
6664 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
6665 				       soc->rx_refill_buf_ring[lmac_id].
6666 				       hal_srng, RXDMA_BUF);
6667 
6668 		/* Configure monitor mode rings */
6669 		dp_monitor_htt_srng_setup(soc, pdev,
6670 					  lmac_id,
6671 					  mac_for_pdev);
6672 		if (!soc->rxdma2sw_rings_not_supported)
6673 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
6674 				       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
6675 				       RXDMA_DST);
6676 	}
6677 
6678 	dp_reap_timer_init(soc);
6679 	return status;
6680 }
6681 #endif
6682 
6683 /**
6684  * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine
6685  *
6686  * This function is used to configure the FSE HW block in RX OLE on a
6687  * per pdev basis. Here, we will be programming parameters related to
6688  * the Flow Search Table.
6689  *
6690  * @soc: data path SoC handle
6691  *
6692  * Return: zero on success, non-zero on failure
6693  */
6694 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
6695 static QDF_STATUS
6696 dp_rx_target_fst_config(struct dp_soc *soc)
6697 {
6698 	int i;
6699 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6700 
6701 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6702 		struct dp_pdev *pdev = soc->pdev_list[i];
6703 
6704 		/* Flow search is not enabled if NSS offload is enabled */
6705 		if (pdev &&
6706 		    !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
6707 			status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev);
6708 			if (status != QDF_STATUS_SUCCESS)
6709 				break;
6710 		}
6711 	}
6712 	return status;
6713 }
6714 #elif defined(WLAN_SUPPORT_RX_FISA)
6715 /**
6716  * dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW
6717  * @soc: SoC handle
6718  *
6719  * Return: Success
6720  */
6721 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
6722 {
6723 	QDF_STATUS status;
6724 	struct dp_rx_fst *fst = soc->rx_fst;
6725 
6726 	/* Check if it is enabled in the INI */
6727 	if (!soc->fisa_enable) {
6728 		dp_err("RX FISA feature is disabled");
6729 		return QDF_STATUS_E_NOSUPPORT;
6730 	}
6731 
6732 	status = dp_rx_flow_send_fst_fw_setup(soc, soc->pdev_list[0]);
6733 	if (QDF_IS_STATUS_ERROR(status)) {
6734 		dp_err("dp_rx_flow_send_fst_fw_setup failed %d",
6735 		       status);
6736 		return status;
6737 	}
6738 
6739 	if (soc->fst_cmem_base) {
6740 		soc->fst_in_cmem = true;
6741 		dp_rx_fst_update_cmem_params(soc, fst->max_entries,
6742 					     soc->fst_cmem_base & 0xffffffff,
6743 					     soc->fst_cmem_base >> 32);
6744 	}
6745 	return status;
6746 }
6747 
6748 #define FISA_MAX_TIMEOUT 0xffffffff
6749 #define FISA_DISABLE_TIMEOUT 0
6750 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
6751 {
6752 	struct dp_htt_rx_fisa_cfg fisa_config;
6753 
6754 	fisa_config.pdev_id = 0;
6755 	fisa_config.fisa_timeout = FISA_MAX_TIMEOUT;
6756 
6757 	return dp_htt_rx_fisa_config(soc->pdev_list[0], &fisa_config);
6758 }
6759 
6760 #else /* !WLAN_SUPPORT_RX_FISA */
6761 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
6762 {
6763 	return QDF_STATUS_SUCCESS;
6764 }
6765 #endif /* !WLAN_SUPPORT_RX_FISA */
6766 
6767 #ifndef WLAN_SUPPORT_RX_FISA
6768 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
6769 {
6770 	return QDF_STATUS_SUCCESS;
6771 }
6772 
6773 static QDF_STATUS dp_rx_dump_fisa_stats(struct dp_soc *soc)
6774 {
6775 	return QDF_STATUS_SUCCESS;
6776 }
6777 
6778 static void dp_rx_dump_fisa_table(struct dp_soc *soc)
6779 {
6780 }
6781 
6782 static void dp_suspend_fse_cache_flush(struct dp_soc *soc)
6783 {
6784 }
6785 
6786 static void dp_resume_fse_cache_flush(struct dp_soc *soc)
6787 {
6788 }
6789 #endif /* !WLAN_SUPPORT_RX_FISA */
6790 
6791 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR
6792 static inline QDF_STATUS dp_print_swlm_stats(struct dp_soc *soc)
6793 {
6794 	return QDF_STATUS_SUCCESS;
6795 }
6796 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */
6797 
6798 #ifdef WLAN_SUPPORT_PPEDS
6799 /**
6800  * dp_soc_target_ppe_rxole_rxdma_cfg() - Configure the RxOLe and RxDMA for PPE
6801  * @soc: DP Tx/Rx handle
6802  *
6803  * Return: QDF_STATUS
6804  */
6805 static
6806 QDF_STATUS dp_soc_target_ppe_rxole_rxdma_cfg(struct dp_soc *soc)
6807 {
6808 	struct dp_htt_rxdma_rxole_ppe_config htt_cfg = {0};
6809 	QDF_STATUS status;
6810 
6811 	/*
6812 	 * Program RxDMA to override the reo destination indication
6813 	 * with REO2PPE_DST_IND, when use_ppe is set to 1 in RX_MSDU_END,
6814 	 * thereby driving the packet to REO2PPE ring.
6815 	 * If the MSDU is spanning more than 1 buffer, then this
6816 	 * override is not done.
6817 	 */
6818 	htt_cfg.override = 1;
6819 	htt_cfg.reo_destination_indication = REO2PPE_DST_IND;
6820 	htt_cfg.multi_buffer_msdu_override_en = 0;
6821 
6822 	/*
6823 	 * Override use_ppe to 0 in RxOLE for the following
6824 	 * cases.
6825 	 */
6826 	htt_cfg.intra_bss_override = 1;
6827 	htt_cfg.decap_raw_override = 1;
6828 	htt_cfg.decap_nwifi_override = 1;
6829 	htt_cfg.ip_frag_override = 1;
6830 
6831 	status = dp_htt_rxdma_rxole_ppe_cfg_set(soc, &htt_cfg);
6832 	if (status != QDF_STATUS_SUCCESS)
6833 		dp_err("RxOLE and RxDMA PPE config failed %d", status);
6834 
6835 	return status;
6836 }
6837 
6838 static inline
6839 void dp_soc_txrx_peer_setup(enum wlan_op_mode vdev_opmode, struct dp_soc *soc,
6840 			    struct dp_peer *peer)
6841 {
6842 	if (((vdev_opmode == wlan_op_mode_ap) ||
6843 	     (vdev_opmode == wlan_op_mode_sta)) &&
6844 	     (soc->arch_ops.txrx_peer_setup)) {
6845 		if (soc->arch_ops.txrx_peer_setup(soc, peer)
6846 				!= QDF_STATUS_SUCCESS) {
6847 			dp_err("unable to setup target peer features");
6848 			qdf_assert_always(0);
6849 		}
6850 	}
6851 }
6852 #else
6853 static inline
6854 QDF_STATUS dp_soc_target_ppe_rxole_rxdma_cfg(struct dp_soc *soc)
6855 {
6856 	return QDF_STATUS_SUCCESS;
6857 }
6858 
6859 static inline
6860 void dp_soc_txrx_peer_setup(enum wlan_op_mode vdev_opmode, struct dp_soc *soc,
6861 			    struct dp_peer *peer)
6862 {
6863 }
6864 #endif /* WLAN_SUPPORT_PPEDS */
6865 
6866 #ifdef DP_UMAC_HW_RESET_SUPPORT
6867 static void dp_register_umac_reset_handlers(struct dp_soc *soc)
6868 {
6869 	dp_umac_reset_register_rx_action_callback(soc,
6870 					dp_umac_reset_action_trigger_recovery,
6871 					UMAC_RESET_ACTION_DO_TRIGGER_RECOVERY);
6872 
6873 	dp_umac_reset_register_rx_action_callback(soc,
6874 		dp_umac_reset_handle_pre_reset, UMAC_RESET_ACTION_DO_PRE_RESET);
6875 
6876 	dp_umac_reset_register_rx_action_callback(soc,
6877 					dp_umac_reset_handle_post_reset,
6878 					UMAC_RESET_ACTION_DO_POST_RESET_START);
6879 
6880 	dp_umac_reset_register_rx_action_callback(soc,
6881 				dp_umac_reset_handle_post_reset_complete,
6882 				UMAC_RESET_ACTION_DO_POST_RESET_COMPLETE);
6883 
6884 }
6885 #else
6886 static void dp_register_umac_reset_handlers(struct dp_soc *soc)
6887 {
6888 }
6889 #endif
6890 /**
6891  * dp_soc_attach_target_wifi3() - SOC initialization in the target
6892  * @cdp_soc: Opaque Datapath SOC handle
6893  *
6894  * Return: zero on success, non-zero on failure
6895  */
6896 static QDF_STATUS
6897 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
6898 {
6899 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
6900 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6901 	struct hal_reo_params reo_params;
6902 
6903 	htt_soc_attach_target(soc->htt_handle);
6904 
6905 	status = dp_soc_target_ppe_rxole_rxdma_cfg(soc);
6906 	if (status != QDF_STATUS_SUCCESS) {
6907 		dp_err("Failed to send htt RxOLE and RxDMA messages to target");
6908 		return status;
6909 	}
6910 
6911 	status = dp_rxdma_ring_config(soc);
6912 	if (status != QDF_STATUS_SUCCESS) {
6913 		dp_err("Failed to send htt srng setup messages to target");
6914 		return status;
6915 	}
6916 
6917 	status = soc->arch_ops.dp_rxdma_ring_sel_cfg(soc);
6918 	if (status != QDF_STATUS_SUCCESS) {
6919 		dp_err("Failed to send htt ring config message to target");
6920 		return status;
6921 	}
6922 
6923 	status = dp_soc_umac_reset_init(soc);
6924 	if (status != QDF_STATUS_SUCCESS &&
6925 	    status != QDF_STATUS_E_NOSUPPORT) {
6926 		dp_err("Failed to initialize UMAC reset");
6927 		return status;
6928 	}
6929 
6930 	dp_register_umac_reset_handlers(soc);
6931 
6932 	status = dp_rx_target_fst_config(soc);
6933 	if (status != QDF_STATUS_SUCCESS &&
6934 	    status != QDF_STATUS_E_NOSUPPORT) {
6935 		dp_err("Failed to send htt fst setup config message to target");
6936 		return status;
6937 	}
6938 
6939 	if (status == QDF_STATUS_SUCCESS) {
6940 		status = dp_rx_fisa_config(soc);
6941 		if (status != QDF_STATUS_SUCCESS) {
6942 			dp_err("Failed to send htt FISA config message to target");
6943 			return status;
6944 		}
6945 	}
6946 
6947 	DP_STATS_INIT(soc);
6948 
6949 	dp_runtime_init(soc);
6950 
6951 	/* Enable HW vdev offload stats if feature is supported */
6952 	dp_vdev_stats_hw_offload_target_config(soc, INVALID_PDEV_ID, true);
6953 
6954 	/* initialize work queue for stats processing */
6955 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
6956 
6957 	wlan_cfg_soc_update_tgt_params(soc->wlan_cfg_ctx,
6958 				       soc->ctrl_psoc);
6959 	/* Setup HW REO */
6960 	qdf_mem_zero(&reo_params, sizeof(reo_params));
6961 
6962 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
6963 		/*
6964 		 * Reo ring remap is not required if both radios
6965 		 * are offloaded to NSS
6966 		 */
6967 
6968 		if (soc->arch_ops.reo_remap_config(soc, &reo_params.remap0,
6969 						   &reo_params.remap1,
6970 						   &reo_params.remap2))
6971 			reo_params.rx_hash_enabled = true;
6972 		else
6973 			reo_params.rx_hash_enabled = false;
6974 	}
6975 
6976 	/*
6977 	 * set the fragment destination ring
6978 	 */
6979 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
6980 
6981 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
6982 		reo_params.alt_dst_ind_0 = REO_REMAP_RELEASE;
6983 
6984 	reo_params.reo_qref = &soc->reo_qref;
6985 	hal_reo_setup(soc->hal_soc, &reo_params, 1);
6986 
6987 	hal_reo_set_err_dst_remap(soc->hal_soc);
6988 
6989 	soc->features.pn_in_reo_dest = hal_reo_enable_pn_in_dest(soc->hal_soc);
6990 
6991 	return QDF_STATUS_SUCCESS;
6992 }
6993 
6994 /**
6995  * dp_vdev_id_map_tbl_add() - Add vdev into vdev_id table
6996  * @soc: SoC handle
6997  * @vdev: vdev handle
6998  * @vdev_id: vdev_id
6999  *
7000  * Return: None
7001  */
7002 static void dp_vdev_id_map_tbl_add(struct dp_soc *soc,
7003 				   struct dp_vdev *vdev,
7004 				   uint8_t vdev_id)
7005 {
7006 	QDF_ASSERT(vdev_id <= MAX_VDEV_CNT);
7007 
7008 	qdf_spin_lock_bh(&soc->vdev_map_lock);
7009 
7010 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
7011 			QDF_STATUS_SUCCESS) {
7012 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK vdev_id %u",
7013 			     soc, vdev, vdev_id);
7014 		qdf_spin_unlock_bh(&soc->vdev_map_lock);
7015 		return;
7016 	}
7017 
7018 	if (!soc->vdev_id_map[vdev_id])
7019 		soc->vdev_id_map[vdev_id] = vdev;
7020 	else
7021 		QDF_ASSERT(0);
7022 
7023 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
7024 }
7025 
7026 /**
7027  * dp_vdev_id_map_tbl_remove() - remove vdev from vdev_id table
7028  * @soc: SoC handle
7029  * @vdev: vdev handle
7030  *
7031  * Return: None
7032  */
7033 static void dp_vdev_id_map_tbl_remove(struct dp_soc *soc,
7034 				      struct dp_vdev *vdev)
7035 {
7036 	qdf_spin_lock_bh(&soc->vdev_map_lock);
7037 	QDF_ASSERT(soc->vdev_id_map[vdev->vdev_id] == vdev);
7038 
7039 	soc->vdev_id_map[vdev->vdev_id] = NULL;
7040 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
7041 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
7042 }
7043 
7044 /**
7045  * dp_vdev_pdev_list_add() - add vdev into pdev's list
7046  * @soc: soc handle
7047  * @pdev: pdev handle
7048  * @vdev: vdev handle
7049  *
7050  * Return: none
7051  */
7052 static void dp_vdev_pdev_list_add(struct dp_soc *soc,
7053 				  struct dp_pdev *pdev,
7054 				  struct dp_vdev *vdev)
7055 {
7056 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
7057 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
7058 			QDF_STATUS_SUCCESS) {
7059 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK",
7060 			     soc, vdev);
7061 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
7062 		return;
7063 	}
7064 	/* add this vdev into the pdev's list */
7065 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
7066 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
7067 }
7068 
7069 /**
7070  * dp_vdev_pdev_list_remove() - remove vdev from pdev's list
7071  * @soc: SoC handle
7072  * @pdev: pdev handle
7073  * @vdev: VDEV handle
7074  *
7075  * Return: none
7076  */
7077 static void dp_vdev_pdev_list_remove(struct dp_soc *soc,
7078 				     struct dp_pdev *pdev,
7079 				     struct dp_vdev *vdev)
7080 {
7081 	uint8_t found = 0;
7082 	struct dp_vdev *tmpvdev = NULL;
7083 
7084 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
7085 	TAILQ_FOREACH(tmpvdev, &pdev->vdev_list, vdev_list_elem) {
7086 		if (tmpvdev == vdev) {
7087 			found = 1;
7088 			break;
7089 		}
7090 	}
7091 
7092 	if (found) {
7093 		TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
7094 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
7095 	} else {
7096 		dp_vdev_debug("%pK: vdev:%pK not found in pdev:%pK vdevlist:%pK",
7097 			      soc, vdev, pdev, &pdev->vdev_list);
7098 		QDF_ASSERT(0);
7099 	}
7100 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
7101 }
7102 
7103 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
7104 /**
7105  * dp_vdev_init_rx_eapol() - initializing osif_rx_eapol
7106  * @vdev: Datapath VDEV handle
7107  *
7108  * Return: None
7109  */
7110 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev)
7111 {
7112 	vdev->osif_rx_eapol = NULL;
7113 }
7114 
7115 /**
7116  * dp_vdev_register_rx_eapol() - Register VDEV operations for rx_eapol
7117  * @vdev: DP vdev handle
7118  * @txrx_ops: Tx and Rx operations
7119  *
7120  * Return: None
7121  */
7122 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev,
7123 					     struct ol_txrx_ops *txrx_ops)
7124 {
7125 	vdev->osif_rx_eapol = txrx_ops->rx.rx_eapol;
7126 }
7127 #else
7128 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev)
7129 {
7130 }
7131 
7132 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev,
7133 					     struct ol_txrx_ops *txrx_ops)
7134 {
7135 }
7136 #endif
7137 
7138 #ifdef WLAN_FEATURE_11BE_MLO
7139 static inline void dp_vdev_save_mld_addr(struct dp_vdev *vdev,
7140 					 struct cdp_vdev_info *vdev_info)
7141 {
7142 	if (vdev_info->mld_mac_addr)
7143 		qdf_mem_copy(&vdev->mld_mac_addr.raw[0],
7144 			     vdev_info->mld_mac_addr, QDF_MAC_ADDR_SIZE);
7145 }
7146 #else
7147 static inline void dp_vdev_save_mld_addr(struct dp_vdev *vdev,
7148 					 struct cdp_vdev_info *vdev_info)
7149 {
7150 
7151 }
7152 #endif
7153 
7154 #ifdef DP_TRAFFIC_END_INDICATION
7155 /**
7156  * dp_tx_vdev_traffic_end_indication_attach() - Initialize data end indication
7157  *                                              related members in VDEV
7158  * @vdev: DP vdev handle
7159  *
7160  * Return: None
7161  */
7162 static inline void
7163 dp_tx_vdev_traffic_end_indication_attach(struct dp_vdev *vdev)
7164 {
7165 	qdf_nbuf_queue_init(&vdev->end_ind_pkt_q);
7166 }
7167 
7168 /**
7169  * dp_tx_vdev_traffic_end_indication_detach() - De-init data end indication
7170  *                                              related members in VDEV
7171  * @vdev: DP vdev handle
7172  *
7173  * Return: None
7174  */
7175 static inline void
7176 dp_tx_vdev_traffic_end_indication_detach(struct dp_vdev *vdev)
7177 {
7178 	qdf_nbuf_t nbuf;
7179 
7180 	while ((nbuf = qdf_nbuf_queue_remove(&vdev->end_ind_pkt_q)) != NULL)
7181 		qdf_nbuf_free(nbuf);
7182 }
7183 #else
7184 static inline void
7185 dp_tx_vdev_traffic_end_indication_attach(struct dp_vdev *vdev)
7186 {}
7187 
7188 static inline void
7189 dp_tx_vdev_traffic_end_indication_detach(struct dp_vdev *vdev)
7190 {}
7191 #endif
7192 
7193 /**
7194  * dp_vdev_attach_wifi3() - attach txrx vdev
7195  * @cdp_soc: CDP SoC context
7196  * @pdev_id: PDEV ID for vdev creation
7197  * @vdev_info: parameters used for vdev creation
7198  *
7199  * Return: status
7200  */
7201 static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc,
7202 				       uint8_t pdev_id,
7203 				       struct cdp_vdev_info *vdev_info)
7204 {
7205 	int i = 0;
7206 	qdf_size_t vdev_context_size;
7207 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
7208 	struct dp_pdev *pdev =
7209 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7210 						   pdev_id);
7211 	struct dp_vdev *vdev;
7212 	uint8_t *vdev_mac_addr = vdev_info->vdev_mac_addr;
7213 	uint8_t vdev_id = vdev_info->vdev_id;
7214 	enum wlan_op_mode op_mode = vdev_info->op_mode;
7215 	enum wlan_op_subtype subtype = vdev_info->subtype;
7216 	uint8_t vdev_stats_id = vdev_info->vdev_stats_id;
7217 
7218 	vdev_context_size =
7219 		soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_VDEV);
7220 	vdev = qdf_mem_malloc(vdev_context_size);
7221 
7222 	if (!pdev) {
7223 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
7224 			    cdp_soc, pdev_id);
7225 		qdf_mem_free(vdev);
7226 		goto fail0;
7227 	}
7228 
7229 	if (!vdev) {
7230 		dp_init_err("%pK: DP VDEV memory allocation failed",
7231 			    cdp_soc);
7232 		goto fail0;
7233 	}
7234 
7235 	wlan_minidump_log(vdev, sizeof(*vdev), soc->ctrl_psoc,
7236 			  WLAN_MD_DP_VDEV, "dp_vdev");
7237 
7238 	vdev->pdev = pdev;
7239 	vdev->vdev_id = vdev_id;
7240 	vdev->vdev_stats_id = vdev_stats_id;
7241 	vdev->opmode = op_mode;
7242 	vdev->subtype = subtype;
7243 	vdev->osdev = soc->osdev;
7244 
7245 	vdev->osif_rx = NULL;
7246 	vdev->osif_rsim_rx_decap = NULL;
7247 	vdev->osif_get_key = NULL;
7248 	vdev->osif_tx_free_ext = NULL;
7249 	vdev->osif_vdev = NULL;
7250 
7251 	vdev->delete.pending = 0;
7252 	vdev->safemode = 0;
7253 	vdev->drop_unenc = 1;
7254 	vdev->sec_type = cdp_sec_type_none;
7255 	vdev->multipass_en = false;
7256 	vdev->wrap_vdev = false;
7257 	dp_vdev_init_rx_eapol(vdev);
7258 	qdf_atomic_init(&vdev->ref_cnt);
7259 	for (i = 0; i < DP_MOD_ID_MAX; i++)
7260 		qdf_atomic_init(&vdev->mod_refs[i]);
7261 
7262 	/* Take one reference for create*/
7263 	qdf_atomic_inc(&vdev->ref_cnt);
7264 	qdf_atomic_inc(&vdev->mod_refs[DP_MOD_ID_CONFIG]);
7265 	vdev->num_peers = 0;
7266 #ifdef notyet
7267 	vdev->filters_num = 0;
7268 #endif
7269 	vdev->lmac_id = pdev->lmac_id;
7270 
7271 	qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
7272 
7273 	dp_vdev_save_mld_addr(vdev, vdev_info);
7274 
7275 	/* TODO: Initialize default HTT meta data that will be used in
7276 	 * TCL descriptors for packets transmitted from this VDEV
7277 	 */
7278 
7279 	qdf_spinlock_create(&vdev->peer_list_lock);
7280 	TAILQ_INIT(&vdev->peer_list);
7281 	dp_peer_multipass_list_init(vdev);
7282 	if ((soc->intr_mode == DP_INTR_POLL) &&
7283 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
7284 		if ((pdev->vdev_count == 0) ||
7285 		    (wlan_op_mode_monitor == vdev->opmode))
7286 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7287 	} else if (dp_soc_get_con_mode(soc) == QDF_GLOBAL_MISSION_MODE &&
7288 		   soc->intr_mode == DP_INTR_MSI &&
7289 		   wlan_op_mode_monitor == vdev->opmode) {
7290 		/* Timer to reap status ring in mission mode */
7291 		dp_monitor_vdev_timer_start(soc);
7292 	}
7293 
7294 	dp_vdev_id_map_tbl_add(soc, vdev, vdev_id);
7295 
7296 	if (wlan_op_mode_monitor == vdev->opmode) {
7297 		if (dp_monitor_vdev_attach(vdev) == QDF_STATUS_SUCCESS) {
7298 			dp_monitor_pdev_set_mon_vdev(vdev);
7299 			return dp_monitor_vdev_set_monitor_mode_buf_rings(pdev);
7300 		}
7301 		return QDF_STATUS_E_FAILURE;
7302 	}
7303 
7304 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
7305 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
7306 	vdev->dscp_tid_map_id = 0;
7307 	vdev->mcast_enhancement_en = 0;
7308 	vdev->igmp_mcast_enhanc_en = 0;
7309 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
7310 	vdev->prev_tx_enq_tstamp = 0;
7311 	vdev->prev_rx_deliver_tstamp = 0;
7312 	vdev->skip_sw_tid_classification = DP_TX_HW_DSCP_TID_MAP_VALID;
7313 	dp_tx_vdev_traffic_end_indication_attach(vdev);
7314 
7315 	dp_vdev_pdev_list_add(soc, pdev, vdev);
7316 	pdev->vdev_count++;
7317 
7318 	if (wlan_op_mode_sta != vdev->opmode &&
7319 	    wlan_op_mode_ndi != vdev->opmode)
7320 		vdev->ap_bridge_enabled = true;
7321 	else
7322 		vdev->ap_bridge_enabled = false;
7323 	dp_init_info("%pK: wlan_cfg_ap_bridge_enabled %d",
7324 		     cdp_soc, vdev->ap_bridge_enabled);
7325 
7326 	dp_tx_vdev_attach(vdev);
7327 
7328 	dp_monitor_vdev_attach(vdev);
7329 	if (!pdev->is_lro_hash_configured) {
7330 		if (QDF_IS_STATUS_SUCCESS(dp_lro_hash_setup(soc, pdev)))
7331 			pdev->is_lro_hash_configured = true;
7332 		else
7333 			dp_err("LRO hash setup failure!");
7334 	}
7335 
7336 	dp_cfg_event_record_vdev_evt(soc, DP_CFG_EVENT_VDEV_ATTACH, vdev);
7337 	dp_info("Created vdev %pK ("QDF_MAC_ADDR_FMT") vdev_id %d", vdev,
7338 		QDF_MAC_ADDR_REF(vdev->mac_addr.raw), vdev->vdev_id);
7339 	DP_STATS_INIT(vdev);
7340 
7341 	if (QDF_IS_STATUS_ERROR(soc->arch_ops.txrx_vdev_attach(soc, vdev)))
7342 		goto fail0;
7343 
7344 	if (wlan_op_mode_sta == vdev->opmode)
7345 		dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id,
7346 				     vdev->mac_addr.raw, CDP_LINK_PEER_TYPE);
7347 
7348 	dp_pdev_update_fast_rx_flag(soc, pdev);
7349 
7350 	return QDF_STATUS_SUCCESS;
7351 
7352 fail0:
7353 	return QDF_STATUS_E_FAILURE;
7354 }
7355 
7356 #ifndef QCA_HOST_MODE_WIFI_DISABLED
7357 /**
7358  * dp_vdev_fetch_tx_handler() - Fetch Tx handlers
7359  * @vdev: struct dp_vdev *
7360  * @soc: struct dp_soc *
7361  * @ctx: struct ol_txrx_hardtart_ctxt *
7362  */
7363 static inline void dp_vdev_fetch_tx_handler(struct dp_vdev *vdev,
7364 					    struct dp_soc *soc,
7365 					    struct ol_txrx_hardtart_ctxt *ctx)
7366 {
7367 	/* Enable vdev_id check only for ap, if flag is enabled */
7368 	if (vdev->mesh_vdev)
7369 		ctx->tx = dp_tx_send_mesh;
7370 	else if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
7371 		 (vdev->opmode == wlan_op_mode_ap)) {
7372 		ctx->tx = dp_tx_send_vdev_id_check;
7373 		ctx->tx_fast = dp_tx_send_vdev_id_check;
7374 	} else {
7375 		ctx->tx = dp_tx_send;
7376 		ctx->tx_fast = soc->arch_ops.dp_tx_send_fast;
7377 	}
7378 
7379 	/* Avoid check in regular exception Path */
7380 	if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
7381 	    (vdev->opmode == wlan_op_mode_ap))
7382 		ctx->tx_exception = dp_tx_send_exception_vdev_id_check;
7383 	else
7384 		ctx->tx_exception = dp_tx_send_exception;
7385 }
7386 
7387 /**
7388  * dp_vdev_register_tx_handler() - Register Tx handler
7389  * @vdev: struct dp_vdev *
7390  * @soc: struct dp_soc *
7391  * @txrx_ops: struct ol_txrx_ops *
7392  */
7393 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
7394 					       struct dp_soc *soc,
7395 					       struct ol_txrx_ops *txrx_ops)
7396 {
7397 	struct ol_txrx_hardtart_ctxt ctx = {0};
7398 
7399 	dp_vdev_fetch_tx_handler(vdev, soc, &ctx);
7400 
7401 	txrx_ops->tx.tx = ctx.tx;
7402 	txrx_ops->tx.tx_fast = ctx.tx_fast;
7403 	txrx_ops->tx.tx_exception = ctx.tx_exception;
7404 
7405 	dp_info("Configure tx_vdev_id_chk_handler Feature Flag: %d and mode:%d for vdev_id:%d",
7406 		wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx),
7407 		vdev->opmode, vdev->vdev_id);
7408 }
7409 #else /* QCA_HOST_MODE_WIFI_DISABLED */
7410 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
7411 					       struct dp_soc *soc,
7412 					       struct ol_txrx_ops *txrx_ops)
7413 {
7414 }
7415 
7416 static inline void dp_vdev_fetch_tx_handler(struct dp_vdev *vdev,
7417 					    struct dp_soc *soc,
7418 					    struct ol_txrx_hardtart_ctxt *ctx)
7419 {
7420 }
7421 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
7422 
7423 /**
7424  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
7425  * @soc_hdl: Datapath soc handle
7426  * @vdev_id: id of Datapath VDEV handle
7427  * @osif_vdev: OSIF vdev handle
7428  * @txrx_ops: Tx and Rx operations
7429  *
7430  * Return: DP VDEV handle on success, NULL on failure
7431  */
7432 static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc_hdl,
7433 					 uint8_t vdev_id,
7434 					 ol_osif_vdev_handle osif_vdev,
7435 					 struct ol_txrx_ops *txrx_ops)
7436 {
7437 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7438 	struct dp_vdev *vdev =	dp_vdev_get_ref_by_id(soc, vdev_id,
7439 						      DP_MOD_ID_CDP);
7440 
7441 	if (!vdev)
7442 		return QDF_STATUS_E_FAILURE;
7443 
7444 	vdev->osif_vdev = osif_vdev;
7445 	vdev->osif_rx = txrx_ops->rx.rx;
7446 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
7447 	vdev->osif_rx_flush = txrx_ops->rx.rx_flush;
7448 	vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush;
7449 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
7450 	vdev->osif_fisa_rx = txrx_ops->rx.osif_fisa_rx;
7451 	vdev->osif_fisa_flush = txrx_ops->rx.osif_fisa_flush;
7452 	vdev->osif_get_key = txrx_ops->get_key;
7453 	dp_monitor_vdev_register_osif(vdev, txrx_ops);
7454 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
7455 	vdev->tx_comp = txrx_ops->tx.tx_comp;
7456 	vdev->stats_cb = txrx_ops->rx.stats_rx;
7457 	vdev->tx_classify_critical_pkt_cb =
7458 		txrx_ops->tx.tx_classify_critical_pkt_cb;
7459 #ifdef notyet
7460 #if ATH_SUPPORT_WAPI
7461 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
7462 #endif
7463 #endif
7464 #ifdef UMAC_SUPPORT_PROXY_ARP
7465 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
7466 #endif
7467 	vdev->me_convert = txrx_ops->me_convert;
7468 	vdev->get_tsf_time = txrx_ops->get_tsf_time;
7469 
7470 	dp_vdev_register_rx_eapol(vdev, txrx_ops);
7471 
7472 	dp_vdev_register_tx_handler(vdev, soc, txrx_ops);
7473 
7474 	dp_init_info("%pK: DP Vdev Register success", soc);
7475 
7476 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7477 	return QDF_STATUS_SUCCESS;
7478 }
7479 
7480 #ifdef WLAN_FEATURE_11BE_MLO
7481 void dp_peer_delete(struct dp_soc *soc,
7482 		    struct dp_peer *peer,
7483 		    void *arg)
7484 {
7485 	if (!peer->valid)
7486 		return;
7487 
7488 	dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
7489 			     peer->vdev->vdev_id,
7490 			     peer->mac_addr.raw, 0,
7491 			     peer->peer_type);
7492 }
7493 #else
7494 void dp_peer_delete(struct dp_soc *soc,
7495 		    struct dp_peer *peer,
7496 		    void *arg)
7497 {
7498 	if (!peer->valid)
7499 		return;
7500 
7501 	dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
7502 			     peer->vdev->vdev_id,
7503 			     peer->mac_addr.raw, 0,
7504 			     CDP_LINK_PEER_TYPE);
7505 }
7506 #endif
7507 
7508 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
7509 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg)
7510 {
7511 	if (!peer->valid)
7512 		return;
7513 
7514 	if (IS_MLO_DP_LINK_PEER(peer))
7515 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
7516 				     peer->vdev->vdev_id,
7517 				     peer->mac_addr.raw, 0,
7518 				     CDP_LINK_PEER_TYPE);
7519 }
7520 #else
7521 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg)
7522 {
7523 }
7524 #endif
7525 /**
7526  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
7527  * @vdev_handle: Datapath VDEV handle
7528  * @unmap_only: Flag to indicate "only unmap"
7529  * @mlo_peers_only: true if only MLO peers should be flushed
7530  *
7531  * Return: void
7532  */
7533 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
7534 				bool unmap_only,
7535 				bool mlo_peers_only)
7536 {
7537 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7538 	struct dp_pdev *pdev = vdev->pdev;
7539 	struct dp_soc *soc = pdev->soc;
7540 	struct dp_peer *peer;
7541 	uint32_t i = 0;
7542 
7543 
7544 	if (!unmap_only) {
7545 		if (!mlo_peers_only)
7546 			dp_vdev_iterate_peer_lock_safe(vdev,
7547 						       dp_peer_delete,
7548 						       NULL,
7549 						       DP_MOD_ID_CDP);
7550 		else
7551 			dp_vdev_iterate_peer_lock_safe(vdev,
7552 						       dp_mlo_peer_delete,
7553 						       NULL,
7554 						       DP_MOD_ID_CDP);
7555 	}
7556 
7557 	for (i = 0; i < soc->max_peer_id ; i++) {
7558 		peer = __dp_peer_get_ref_by_id(soc, i, DP_MOD_ID_CDP);
7559 
7560 		if (!peer)
7561 			continue;
7562 
7563 		if (peer->vdev != vdev) {
7564 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7565 			continue;
7566 		}
7567 
7568 		if (!mlo_peers_only) {
7569 			dp_info("peer: " QDF_MAC_ADDR_FMT " is getting unmap",
7570 				QDF_MAC_ADDR_REF(peer->mac_addr.raw));
7571 			dp_rx_peer_unmap_handler(soc, i,
7572 						 vdev->vdev_id,
7573 						 peer->mac_addr.raw, 0,
7574 						 DP_PEER_WDS_COUNT_INVALID);
7575 			SET_PEER_REF_CNT_ONE(peer);
7576 		} else if (IS_MLO_DP_LINK_PEER(peer) ||
7577 			   IS_MLO_DP_MLD_PEER(peer)) {
7578 			dp_info("peer: " QDF_MAC_ADDR_FMT " is getting unmap",
7579 				QDF_MAC_ADDR_REF(peer->mac_addr.raw));
7580 			dp_rx_peer_unmap_handler(soc, i,
7581 						 vdev->vdev_id,
7582 						 peer->mac_addr.raw, 0,
7583 						 DP_PEER_WDS_COUNT_INVALID);
7584 			SET_PEER_REF_CNT_ONE(peer);
7585 		}
7586 
7587 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7588 	}
7589 }
7590 
7591 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
7592 /**
7593  * dp_txrx_alloc_vdev_stats_id()- Allocate vdev_stats_id
7594  * @soc_hdl: Datapath soc handle
7595  * @vdev_stats_id: Address of vdev_stats_id
7596  *
7597  * Return: QDF_STATUS
7598  */
7599 static QDF_STATUS dp_txrx_alloc_vdev_stats_id(struct cdp_soc_t *soc_hdl,
7600 					      uint8_t *vdev_stats_id)
7601 {
7602 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7603 	uint8_t id = 0;
7604 
7605 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
7606 		*vdev_stats_id = CDP_INVALID_VDEV_STATS_ID;
7607 		return QDF_STATUS_E_FAILURE;
7608 	}
7609 
7610 	while (id < CDP_MAX_VDEV_STATS_ID) {
7611 		if (!qdf_atomic_test_and_set_bit(id, &soc->vdev_stats_id_map)) {
7612 			*vdev_stats_id = id;
7613 			return QDF_STATUS_SUCCESS;
7614 		}
7615 		id++;
7616 	}
7617 
7618 	*vdev_stats_id = CDP_INVALID_VDEV_STATS_ID;
7619 	return QDF_STATUS_E_FAILURE;
7620 }
7621 
7622 /**
7623  * dp_txrx_reset_vdev_stats_id() - Reset vdev_stats_id in dp_soc
7624  * @soc_hdl: Datapath soc handle
7625  * @vdev_stats_id: vdev_stats_id to reset in dp_soc
7626  *
7627  * Return: none
7628  */
7629 static void dp_txrx_reset_vdev_stats_id(struct cdp_soc_t *soc_hdl,
7630 					uint8_t vdev_stats_id)
7631 {
7632 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7633 
7634 	if ((!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) ||
7635 	    (vdev_stats_id >= CDP_MAX_VDEV_STATS_ID))
7636 		return;
7637 
7638 	qdf_atomic_clear_bit(vdev_stats_id, &soc->vdev_stats_id_map);
7639 }
7640 #else
7641 static void dp_txrx_reset_vdev_stats_id(struct cdp_soc_t *soc,
7642 					uint8_t vdev_stats_id)
7643 {}
7644 #endif
7645 /**
7646  * dp_vdev_detach_wifi3() - Detach txrx vdev
7647  * @cdp_soc: Datapath soc handle
7648  * @vdev_id: VDEV Id
7649  * @callback: Callback OL_IF on completion of detach
7650  * @cb_context:	Callback context
7651  *
7652  */
7653 static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc,
7654 				       uint8_t vdev_id,
7655 				       ol_txrx_vdev_delete_cb callback,
7656 				       void *cb_context)
7657 {
7658 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
7659 	struct dp_pdev *pdev;
7660 	struct dp_neighbour_peer *peer = NULL;
7661 	struct dp_peer *vap_self_peer = NULL;
7662 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7663 						     DP_MOD_ID_CDP);
7664 
7665 	if (!vdev)
7666 		return QDF_STATUS_E_FAILURE;
7667 
7668 	soc->arch_ops.txrx_vdev_detach(soc, vdev);
7669 
7670 	pdev = vdev->pdev;
7671 
7672 	vap_self_peer = dp_sta_vdev_self_peer_ref_n_get(soc, vdev,
7673 							DP_MOD_ID_CONFIG);
7674 	if (vap_self_peer) {
7675 		qdf_spin_lock_bh(&soc->ast_lock);
7676 		if (vap_self_peer->self_ast_entry) {
7677 			dp_peer_del_ast(soc, vap_self_peer->self_ast_entry);
7678 			vap_self_peer->self_ast_entry = NULL;
7679 		}
7680 		qdf_spin_unlock_bh(&soc->ast_lock);
7681 
7682 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc, vdev->vdev_id,
7683 				     vap_self_peer->mac_addr.raw, 0,
7684 				     CDP_LINK_PEER_TYPE);
7685 		dp_peer_unref_delete(vap_self_peer, DP_MOD_ID_CONFIG);
7686 	}
7687 
7688 	/*
7689 	 * If Target is hung, flush all peers before detaching vdev
7690 	 * this will free all references held due to missing
7691 	 * unmap commands from Target
7692 	 */
7693 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
7694 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, false, false);
7695 	else if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
7696 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, true, false);
7697 
7698 	/* indicate that the vdev needs to be deleted */
7699 	vdev->delete.pending = 1;
7700 	dp_rx_vdev_detach(vdev);
7701 	/*
7702 	 * move it after dp_rx_vdev_detach(),
7703 	 * as the call back done in dp_rx_vdev_detach()
7704 	 * still need to get vdev pointer by vdev_id.
7705 	 */
7706 	dp_vdev_id_map_tbl_remove(soc, vdev);
7707 
7708 	dp_monitor_neighbour_peer_list_remove(pdev, vdev, peer);
7709 
7710 	dp_txrx_reset_vdev_stats_id(cdp_soc, vdev->vdev_stats_id);
7711 
7712 	dp_tx_vdev_multipass_deinit(vdev);
7713 	dp_tx_vdev_traffic_end_indication_detach(vdev);
7714 
7715 	if (vdev->vdev_dp_ext_handle) {
7716 		qdf_mem_free(vdev->vdev_dp_ext_handle);
7717 		vdev->vdev_dp_ext_handle = NULL;
7718 	}
7719 	vdev->delete.callback = callback;
7720 	vdev->delete.context = cb_context;
7721 
7722 	if (vdev->opmode != wlan_op_mode_monitor)
7723 		dp_vdev_pdev_list_remove(soc, pdev, vdev);
7724 
7725 	pdev->vdev_count--;
7726 	/* release reference taken above for find */
7727 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7728 
7729 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
7730 	TAILQ_INSERT_TAIL(&soc->inactive_vdev_list, vdev, inactive_list_elem);
7731 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
7732 
7733 	dp_cfg_event_record_vdev_evt(soc, DP_CFG_EVENT_VDEV_DETACH, vdev);
7734 	dp_info("detach vdev %pK id %d pending refs %d",
7735 		vdev, vdev->vdev_id, qdf_atomic_read(&vdev->ref_cnt));
7736 
7737 	/* release reference taken at dp_vdev_create */
7738 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
7739 
7740 	return QDF_STATUS_SUCCESS;
7741 }
7742 
7743 #ifdef WLAN_FEATURE_11BE_MLO
7744 /**
7745  * is_dp_peer_can_reuse() - check if the dp_peer match condition to be reused
7746  * @vdev: Target DP vdev handle
7747  * @peer: DP peer handle to be checked
7748  * @peer_mac_addr: Target peer mac address
7749  * @peer_type: Target peer type
7750  *
7751  * Return: true - if match, false - not match
7752  */
7753 static inline
7754 bool is_dp_peer_can_reuse(struct dp_vdev *vdev,
7755 			  struct dp_peer *peer,
7756 			  uint8_t *peer_mac_addr,
7757 			  enum cdp_peer_type peer_type)
7758 {
7759 	if (peer->bss_peer && (peer->vdev == vdev) &&
7760 	    (peer->peer_type == peer_type) &&
7761 	    (qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw,
7762 			 QDF_MAC_ADDR_SIZE) == 0))
7763 		return true;
7764 
7765 	return false;
7766 }
7767 #else
7768 static inline
7769 bool is_dp_peer_can_reuse(struct dp_vdev *vdev,
7770 			  struct dp_peer *peer,
7771 			  uint8_t *peer_mac_addr,
7772 			  enum cdp_peer_type peer_type)
7773 {
7774 	if (peer->bss_peer && (peer->vdev == vdev) &&
7775 	    (qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw,
7776 			 QDF_MAC_ADDR_SIZE) == 0))
7777 		return true;
7778 
7779 	return false;
7780 }
7781 #endif
7782 
7783 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
7784 						uint8_t *peer_mac_addr,
7785 						enum cdp_peer_type peer_type)
7786 {
7787 	struct dp_peer *peer;
7788 	struct dp_soc *soc = vdev->pdev->soc;
7789 
7790 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
7791 	TAILQ_FOREACH(peer, &soc->inactive_peer_list,
7792 		      inactive_list_elem) {
7793 
7794 		/* reuse bss peer only when vdev matches*/
7795 		if (is_dp_peer_can_reuse(vdev, peer,
7796 					 peer_mac_addr, peer_type)) {
7797 			/* increment ref count for cdp_peer_create*/
7798 			if (dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG) ==
7799 						QDF_STATUS_SUCCESS) {
7800 				TAILQ_REMOVE(&soc->inactive_peer_list, peer,
7801 					     inactive_list_elem);
7802 				qdf_spin_unlock_bh
7803 					(&soc->inactive_peer_list_lock);
7804 				return peer;
7805 			}
7806 		}
7807 	}
7808 
7809 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
7810 	return NULL;
7811 }
7812 
7813 #ifdef FEATURE_AST
7814 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
7815 					       struct dp_pdev *pdev,
7816 					       uint8_t *peer_mac_addr)
7817 {
7818 	struct dp_ast_entry *ast_entry;
7819 
7820 	if (soc->ast_offload_support)
7821 		return;
7822 
7823 	qdf_spin_lock_bh(&soc->ast_lock);
7824 	if (soc->ast_override_support)
7825 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
7826 							    pdev->pdev_id);
7827 	else
7828 		ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
7829 
7830 	if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress)
7831 		dp_peer_del_ast(soc, ast_entry);
7832 
7833 	qdf_spin_unlock_bh(&soc->ast_lock);
7834 }
7835 #else
7836 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
7837 					       struct dp_pdev *pdev,
7838 					       uint8_t *peer_mac_addr)
7839 {
7840 }
7841 #endif
7842 
7843 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
7844 /**
7845  * dp_peer_hw_txrx_stats_init() - Initialize hw_txrx_stats_en in dp_peer
7846  * @soc: Datapath soc handle
7847  * @txrx_peer: Datapath peer handle
7848  *
7849  * Return: none
7850  */
7851 static inline
7852 void dp_peer_hw_txrx_stats_init(struct dp_soc *soc,
7853 				struct dp_txrx_peer *txrx_peer)
7854 {
7855 	txrx_peer->hw_txrx_stats_en =
7856 		wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx);
7857 }
7858 #else
7859 static inline
7860 void dp_peer_hw_txrx_stats_init(struct dp_soc *soc,
7861 				struct dp_txrx_peer *txrx_peer)
7862 {
7863 	txrx_peer->hw_txrx_stats_en = 0;
7864 }
7865 #endif
7866 
7867 static QDF_STATUS dp_txrx_peer_detach(struct dp_soc *soc, struct dp_peer *peer)
7868 {
7869 	struct dp_txrx_peer *txrx_peer;
7870 	struct dp_pdev *pdev;
7871 	struct cdp_txrx_peer_params_update params = {0};
7872 
7873 	/* dp_txrx_peer exists for mld peer and legacy peer */
7874 	if (peer->txrx_peer) {
7875 		txrx_peer = peer->txrx_peer;
7876 		peer->txrx_peer = NULL;
7877 		pdev = txrx_peer->vdev->pdev;
7878 
7879 		params.osif_vdev = (void *)peer->vdev->osif_vdev;
7880 		params.peer_mac = peer->mac_addr.raw;
7881 
7882 		dp_wdi_event_handler(WDI_EVENT_PEER_DELETE, soc,
7883 				     (void *)&params, peer->peer_id,
7884 				     WDI_NO_VAL, pdev->pdev_id);
7885 
7886 		dp_peer_defrag_rx_tids_deinit(txrx_peer);
7887 		/*
7888 		 * Deallocate the extended stats contenxt
7889 		 */
7890 		dp_peer_delay_stats_ctx_dealloc(soc, txrx_peer);
7891 		dp_peer_rx_bufq_resources_deinit(txrx_peer);
7892 		dp_peer_jitter_stats_ctx_dealloc(pdev, txrx_peer);
7893 		dp_peer_sawf_stats_ctx_free(soc, txrx_peer);
7894 
7895 		qdf_mem_free(txrx_peer);
7896 	}
7897 
7898 	return QDF_STATUS_SUCCESS;
7899 }
7900 
7901 static inline
7902 uint8_t dp_txrx_peer_calculate_stats_size(struct dp_soc *soc,
7903 					  struct dp_peer *peer)
7904 {
7905 	if ((wlan_cfg_is_peer_link_stats_enabled(soc->wlan_cfg_ctx)) &&
7906 	    IS_MLO_DP_MLD_PEER(peer)) {
7907 		return (DP_MAX_MLO_LINKS + 1);
7908 	}
7909 	return 1;
7910 }
7911 
7912 static QDF_STATUS dp_txrx_peer_attach(struct dp_soc *soc, struct dp_peer *peer)
7913 {
7914 	struct dp_txrx_peer *txrx_peer;
7915 	struct dp_pdev *pdev;
7916 	struct cdp_txrx_peer_params_update params = {0};
7917 	uint8_t stats_arr_size = 0;
7918 
7919 	stats_arr_size = dp_txrx_peer_calculate_stats_size(soc, peer);
7920 
7921 	txrx_peer = (struct dp_txrx_peer *)qdf_mem_malloc(sizeof(*txrx_peer) +
7922 							  (stats_arr_size *
7923 							   sizeof(struct dp_peer_stats)));
7924 
7925 	if (!txrx_peer)
7926 		return QDF_STATUS_E_NOMEM; /* failure */
7927 
7928 	txrx_peer->peer_id = HTT_INVALID_PEER;
7929 	/* initialize the peer_id */
7930 	txrx_peer->vdev = peer->vdev;
7931 	pdev = peer->vdev->pdev;
7932 	txrx_peer->stats_arr_size = stats_arr_size;
7933 
7934 	DP_TXRX_PEER_STATS_INIT(txrx_peer,
7935 				(txrx_peer->stats_arr_size *
7936 				sizeof(struct dp_peer_stats)));
7937 
7938 	if (!IS_DP_LEGACY_PEER(peer))
7939 		txrx_peer->is_mld_peer = 1;
7940 
7941 	dp_wds_ext_peer_init(txrx_peer);
7942 	dp_peer_rx_bufq_resources_init(txrx_peer);
7943 	dp_peer_hw_txrx_stats_init(soc, txrx_peer);
7944 	/*
7945 	 * Allocate peer extended stats context. Fall through in
7946 	 * case of failure as its not an implicit requirement to have
7947 	 * this object for regular statistics updates.
7948 	 */
7949 	if (dp_peer_delay_stats_ctx_alloc(soc, txrx_peer) !=
7950 					  QDF_STATUS_SUCCESS)
7951 		dp_warn("peer delay_stats ctx alloc failed");
7952 
7953 	/*
7954 	 * Alloctate memory for jitter stats. Fall through in
7955 	 * case of failure as its not an implicit requirement to have
7956 	 * this object for regular statistics updates.
7957 	 */
7958 	if (dp_peer_jitter_stats_ctx_alloc(pdev, txrx_peer) !=
7959 					   QDF_STATUS_SUCCESS)
7960 		dp_warn("peer jitter_stats ctx alloc failed");
7961 
7962 	dp_set_peer_isolation(txrx_peer, false);
7963 
7964 	dp_peer_defrag_rx_tids_init(txrx_peer);
7965 
7966 	if (dp_peer_sawf_stats_ctx_alloc(soc, txrx_peer) != QDF_STATUS_SUCCESS)
7967 		dp_warn("peer sawf stats alloc failed");
7968 
7969 	dp_txrx_peer_attach_add(soc, peer, txrx_peer);
7970 
7971 	params.peer_mac = peer->mac_addr.raw;
7972 	params.osif_vdev = (void *)peer->vdev->osif_vdev;
7973 	params.chip_id = dp_mlo_get_chip_id(soc);
7974 	params.pdev_id = peer->vdev->pdev->pdev_id;
7975 
7976 	dp_wdi_event_handler(WDI_EVENT_TXRX_PEER_CREATE, soc,
7977 			     (void *)&params, peer->peer_id,
7978 			     WDI_NO_VAL, params.pdev_id);
7979 
7980 	return QDF_STATUS_SUCCESS;
7981 }
7982 
7983 static inline
7984 void dp_txrx_peer_stats_clr(struct dp_txrx_peer *txrx_peer)
7985 {
7986 	if (!txrx_peer)
7987 		return;
7988 
7989 	txrx_peer->tx_failed = 0;
7990 	txrx_peer->comp_pkt.num = 0;
7991 	txrx_peer->comp_pkt.bytes = 0;
7992 	txrx_peer->to_stack.num = 0;
7993 	txrx_peer->to_stack.bytes = 0;
7994 
7995 	DP_TXRX_PEER_STATS_CLR(txrx_peer,
7996 			       (txrx_peer->stats_arr_size *
7997 			       sizeof(struct dp_peer_stats)));
7998 	dp_peer_delay_stats_ctx_clr(txrx_peer);
7999 	dp_peer_jitter_stats_ctx_clr(txrx_peer);
8000 }
8001 
8002 /**
8003  * dp_peer_create_wifi3() - attach txrx peer
8004  * @soc_hdl: Datapath soc handle
8005  * @vdev_id: id of vdev
8006  * @peer_mac_addr: Peer MAC address
8007  * @peer_type: link or MLD peer type
8008  *
8009  * Return: 0 on success, -1 on failure
8010  */
8011 static QDF_STATUS
8012 dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8013 		     uint8_t *peer_mac_addr, enum cdp_peer_type peer_type)
8014 {
8015 	struct dp_peer *peer;
8016 	int i;
8017 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8018 	struct dp_pdev *pdev;
8019 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
8020 	struct dp_vdev *vdev = NULL;
8021 
8022 	if (!peer_mac_addr)
8023 		return QDF_STATUS_E_FAILURE;
8024 
8025 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
8026 
8027 	if (!vdev)
8028 		return QDF_STATUS_E_FAILURE;
8029 
8030 	pdev = vdev->pdev;
8031 	soc = pdev->soc;
8032 
8033 	/*
8034 	 * If a peer entry with given MAC address already exists,
8035 	 * reuse the peer and reset the state of peer.
8036 	 */
8037 	peer = dp_peer_can_reuse(vdev, peer_mac_addr, peer_type);
8038 
8039 	if (peer) {
8040 		qdf_atomic_init(&peer->is_default_route_set);
8041 		dp_peer_cleanup(vdev, peer);
8042 
8043 		dp_peer_vdev_list_add(soc, vdev, peer);
8044 		dp_peer_find_hash_add(soc, peer);
8045 
8046 		if (dp_peer_rx_tids_create(peer) != QDF_STATUS_SUCCESS) {
8047 			dp_alert("RX tid alloc fail for peer %pK (" QDF_MAC_ADDR_FMT ")",
8048 				 peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
8049 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8050 			return QDF_STATUS_E_FAILURE;
8051 		}
8052 
8053 		if (IS_MLO_DP_MLD_PEER(peer))
8054 			dp_mld_peer_init_link_peers_info(peer);
8055 
8056 		qdf_spin_lock_bh(&soc->ast_lock);
8057 		dp_peer_delete_ast_entries(soc, peer);
8058 		qdf_spin_unlock_bh(&soc->ast_lock);
8059 
8060 		if ((vdev->opmode == wlan_op_mode_sta) &&
8061 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
8062 		     QDF_MAC_ADDR_SIZE)) {
8063 			ast_type = CDP_TXRX_AST_TYPE_SELF;
8064 		}
8065 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
8066 
8067 		peer->valid = 1;
8068 		peer->is_tdls_peer = false;
8069 		dp_local_peer_id_alloc(pdev, peer);
8070 
8071 		qdf_spinlock_create(&peer->peer_info_lock);
8072 
8073 		DP_STATS_INIT(peer);
8074 
8075 		/*
8076 		 * In tx_monitor mode, filter may be set for unassociated peer
8077 		 * when unassociated peer get associated peer need to
8078 		 * update tx_cap_enabled flag to support peer filter.
8079 		 */
8080 		if (!IS_MLO_DP_MLD_PEER(peer)) {
8081 			dp_monitor_peer_tx_capture_filter_check(pdev, peer);
8082 			dp_monitor_peer_reset_stats(soc, peer);
8083 		}
8084 
8085 		if (peer->txrx_peer) {
8086 			dp_peer_rx_bufq_resources_init(peer->txrx_peer);
8087 			dp_txrx_peer_stats_clr(peer->txrx_peer);
8088 			dp_set_peer_isolation(peer->txrx_peer, false);
8089 			dp_wds_ext_peer_init(peer->txrx_peer);
8090 			dp_peer_hw_txrx_stats_init(soc, peer->txrx_peer);
8091 		}
8092 
8093 		dp_cfg_event_record_peer_evt(soc, DP_CFG_EVENT_PEER_CREATE,
8094 					     peer, vdev, 1);
8095 		dp_info("vdev %pK Reused peer %pK ("QDF_MAC_ADDR_FMT
8096 			") vdev_ref_cnt "
8097 			"%d peer_ref_cnt: %d",
8098 			vdev, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
8099 			qdf_atomic_read(&vdev->ref_cnt),
8100 			qdf_atomic_read(&peer->ref_cnt));
8101 			dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
8102 
8103 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8104 		return QDF_STATUS_SUCCESS;
8105 	} else {
8106 		/*
8107 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
8108 		 * need to remove the AST entry which was earlier added as a WDS
8109 		 * entry.
8110 		 * If an AST entry exists, but no peer entry exists with a given
8111 		 * MAC addresses, we could deduce it as a WDS entry
8112 		 */
8113 		dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
8114 	}
8115 
8116 #ifdef notyet
8117 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
8118 		soc->mempool_ol_ath_peer);
8119 #else
8120 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
8121 #endif
8122 	wlan_minidump_log(peer,
8123 			  sizeof(*peer),
8124 			  soc->ctrl_psoc,
8125 			  WLAN_MD_DP_PEER, "dp_peer");
8126 	if (!peer) {
8127 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8128 		return QDF_STATUS_E_FAILURE; /* failure */
8129 	}
8130 
8131 	qdf_mem_zero(peer, sizeof(struct dp_peer));
8132 
8133 	/* store provided params */
8134 	peer->vdev = vdev;
8135 
8136 	/* initialize the peer_id */
8137 	peer->peer_id = HTT_INVALID_PEER;
8138 
8139 	qdf_mem_copy(
8140 		&peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
8141 
8142 	DP_PEER_SET_TYPE(peer, peer_type);
8143 	if (IS_MLO_DP_MLD_PEER(peer)) {
8144 		if (dp_txrx_peer_attach(soc, peer) !=
8145 				QDF_STATUS_SUCCESS)
8146 			goto fail; /* failure */
8147 
8148 		dp_mld_peer_init_link_peers_info(peer);
8149 	} else if (dp_monitor_peer_attach(soc, peer) !=
8150 				QDF_STATUS_SUCCESS)
8151 		dp_warn("peer monitor ctx alloc failed");
8152 
8153 	TAILQ_INIT(&peer->ast_entry_list);
8154 
8155 	/* get the vdev reference for new peer */
8156 	dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CHILD);
8157 
8158 	if ((vdev->opmode == wlan_op_mode_sta) &&
8159 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
8160 			 QDF_MAC_ADDR_SIZE)) {
8161 		ast_type = CDP_TXRX_AST_TYPE_SELF;
8162 	}
8163 	qdf_spinlock_create(&peer->peer_state_lock);
8164 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
8165 	qdf_spinlock_create(&peer->peer_info_lock);
8166 
8167 	/* reset the ast index to flowid table */
8168 	dp_peer_reset_flowq_map(peer);
8169 
8170 	qdf_atomic_init(&peer->ref_cnt);
8171 
8172 	for (i = 0; i < DP_MOD_ID_MAX; i++)
8173 		qdf_atomic_init(&peer->mod_refs[i]);
8174 
8175 	/* keep one reference for attach */
8176 	qdf_atomic_inc(&peer->ref_cnt);
8177 	qdf_atomic_inc(&peer->mod_refs[DP_MOD_ID_CONFIG]);
8178 
8179 	dp_peer_vdev_list_add(soc, vdev, peer);
8180 
8181 	/* TODO: See if hash based search is required */
8182 	dp_peer_find_hash_add(soc, peer);
8183 
8184 	/* Initialize the peer state */
8185 	peer->state = OL_TXRX_PEER_STATE_DISC;
8186 
8187 	dp_cfg_event_record_peer_evt(soc, DP_CFG_EVENT_PEER_CREATE,
8188 				     peer, vdev, 0);
8189 	dp_info("vdev %pK created peer %pK ("QDF_MAC_ADDR_FMT") vdev_ref_cnt "
8190 		"%d peer_ref_cnt: %d",
8191 		vdev, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
8192 		qdf_atomic_read(&vdev->ref_cnt),
8193 		qdf_atomic_read(&peer->ref_cnt));
8194 	/*
8195 	 * For every peer MAp message search and set if bss_peer
8196 	 */
8197 	if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
8198 			QDF_MAC_ADDR_SIZE) == 0 &&
8199 			(wlan_op_mode_sta != vdev->opmode)) {
8200 		dp_info("vdev bss_peer!!");
8201 		peer->bss_peer = 1;
8202 		if (peer->txrx_peer)
8203 			peer->txrx_peer->bss_peer = 1;
8204 	}
8205 
8206 	if (wlan_op_mode_sta == vdev->opmode &&
8207 	    qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
8208 			QDF_MAC_ADDR_SIZE) == 0) {
8209 		peer->sta_self_peer = 1;
8210 	}
8211 
8212 	if (dp_peer_rx_tids_create(peer) != QDF_STATUS_SUCCESS) {
8213 		dp_alert("RX tid alloc fail for peer %pK (" QDF_MAC_ADDR_FMT ")",
8214 			 peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
8215 		goto fail;
8216 	}
8217 
8218 	peer->valid = 1;
8219 	dp_local_peer_id_alloc(pdev, peer);
8220 	DP_STATS_INIT(peer);
8221 
8222 	if (dp_peer_sawf_ctx_alloc(soc, peer) != QDF_STATUS_SUCCESS)
8223 		dp_warn("peer sawf context alloc failed");
8224 
8225 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
8226 
8227 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8228 
8229 	return QDF_STATUS_SUCCESS;
8230 fail:
8231 	qdf_mem_free(peer);
8232 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8233 
8234 	return QDF_STATUS_E_FAILURE;
8235 }
8236 
8237 static QDF_STATUS dp_peer_legacy_setup(struct dp_soc *soc, struct dp_peer *peer)
8238 {
8239 	/* txrx_peer might exist already in peer reuse case */
8240 	if (peer->txrx_peer)
8241 		return QDF_STATUS_SUCCESS;
8242 
8243 	if (dp_txrx_peer_attach(soc, peer) !=
8244 				QDF_STATUS_SUCCESS) {
8245 		dp_err("peer txrx ctx alloc failed");
8246 		return QDF_STATUS_E_FAILURE;
8247 	}
8248 
8249 	return QDF_STATUS_SUCCESS;
8250 }
8251 
8252 #ifdef WLAN_FEATURE_11BE_MLO
8253 QDF_STATUS dp_peer_mlo_setup(
8254 			struct dp_soc *soc,
8255 			struct dp_peer *peer,
8256 			uint8_t vdev_id,
8257 			struct cdp_peer_setup_info *setup_info)
8258 {
8259 	struct dp_peer *mld_peer = NULL;
8260 	struct cdp_txrx_peer_params_update params = {0};
8261 
8262 	/* Non-MLO connection, do nothing */
8263 	if (!setup_info || !setup_info->mld_peer_mac)
8264 		return QDF_STATUS_SUCCESS;
8265 
8266 	dp_cfg_event_record_peer_setup_evt(soc, DP_CFG_EVENT_MLO_SETUP,
8267 					   peer, NULL, vdev_id, setup_info);
8268 	dp_info("link peer: " QDF_MAC_ADDR_FMT "mld peer: " QDF_MAC_ADDR_FMT
8269 		"first_link %d, primary_link %d",
8270 		QDF_MAC_ADDR_REF(peer->mac_addr.raw),
8271 		QDF_MAC_ADDR_REF(setup_info->mld_peer_mac),
8272 		setup_info->is_first_link,
8273 		setup_info->is_primary_link);
8274 
8275 	/* if this is the first link peer */
8276 	if (setup_info->is_first_link)
8277 		/* create MLD peer */
8278 		dp_peer_create_wifi3((struct cdp_soc_t *)soc,
8279 				     vdev_id,
8280 				     setup_info->mld_peer_mac,
8281 				     CDP_MLD_PEER_TYPE);
8282 
8283 	if (peer->vdev->opmode == wlan_op_mode_sta &&
8284 	    setup_info->is_primary_link) {
8285 		struct cdp_txrx_peer_params_update params = {0};
8286 
8287 		params.chip_id = dp_mlo_get_chip_id(soc);
8288 		params.pdev_id = peer->vdev->pdev->pdev_id;
8289 		params.osif_vdev = peer->vdev->osif_vdev;
8290 
8291 		dp_wdi_event_handler(
8292 				WDI_EVENT_STA_PRIMARY_UMAC_UPDATE,
8293 				soc,
8294 				(void *)&params, peer->peer_id,
8295 				WDI_NO_VAL, params.pdev_id);
8296 	}
8297 
8298 	peer->first_link = setup_info->is_first_link;
8299 	peer->primary_link = setup_info->is_primary_link;
8300 	mld_peer = dp_mld_peer_find_hash_find(soc,
8301 					      setup_info->mld_peer_mac,
8302 					      0, vdev_id, DP_MOD_ID_CDP);
8303 	if (mld_peer) {
8304 		if (setup_info->is_first_link) {
8305 			/* assign rx_tid to mld peer */
8306 			mld_peer->rx_tid = peer->rx_tid;
8307 			/* no cdp_peer_setup for MLD peer,
8308 			 * set it for addba processing
8309 			 */
8310 			qdf_atomic_set(&mld_peer->is_default_route_set, 1);
8311 		} else {
8312 			/* free link peer original rx_tids mem */
8313 			dp_peer_rx_tids_destroy(peer);
8314 			/* assign mld peer rx_tid to link peer */
8315 			peer->rx_tid = mld_peer->rx_tid;
8316 		}
8317 
8318 		if (setup_info->is_primary_link &&
8319 		    !setup_info->is_first_link) {
8320 			struct dp_vdev *prev_vdev;
8321 			/*
8322 			 * if first link is not the primary link,
8323 			 * then need to change mld_peer->vdev as
8324 			 * primary link dp_vdev is not same one
8325 			 * during mld peer creation.
8326 			 */
8327 			prev_vdev = mld_peer->vdev;
8328 			dp_info("Primary link is not the first link. vdev: %pK,"
8329 				"vdev_id %d vdev_ref_cnt %d",
8330 				mld_peer->vdev, vdev_id,
8331 				qdf_atomic_read(&mld_peer->vdev->ref_cnt));
8332 			/* release the ref to original dp_vdev */
8333 			dp_vdev_unref_delete(soc, mld_peer->vdev,
8334 					     DP_MOD_ID_CHILD);
8335 			/*
8336 			 * get the ref to new dp_vdev,
8337 			 * increase dp_vdev ref_cnt
8338 			 */
8339 			mld_peer->vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8340 							       DP_MOD_ID_CHILD);
8341 			mld_peer->txrx_peer->vdev = mld_peer->vdev;
8342 
8343 			dp_cfg_event_record_mlo_setup_vdev_update_evt(
8344 					soc, mld_peer, prev_vdev,
8345 					mld_peer->vdev);
8346 
8347 			params.osif_vdev = (void *)peer->vdev->osif_vdev;
8348 			params.peer_mac = peer->mac_addr.raw;
8349 			params.chip_id = dp_mlo_get_chip_id(soc);
8350 			params.pdev_id = peer->vdev->pdev->pdev_id;
8351 
8352 			dp_wdi_event_handler(
8353 					WDI_EVENT_PEER_PRIMARY_UMAC_UPDATE,
8354 					soc, (void *)&params, peer->peer_id,
8355 					WDI_NO_VAL, params.pdev_id);
8356 		}
8357 
8358 		/* associate mld and link peer */
8359 		dp_link_peer_add_mld_peer(peer, mld_peer);
8360 		dp_mld_peer_add_link_peer(mld_peer, peer);
8361 
8362 		mld_peer->txrx_peer->is_mld_peer = 1;
8363 		dp_peer_unref_delete(mld_peer, DP_MOD_ID_CDP);
8364 	} else {
8365 		peer->mld_peer = NULL;
8366 		dp_err("mld peer" QDF_MAC_ADDR_FMT "not found!",
8367 		       QDF_MAC_ADDR_REF(setup_info->mld_peer_mac));
8368 		return QDF_STATUS_E_FAILURE;
8369 	}
8370 
8371 	return QDF_STATUS_SUCCESS;
8372 }
8373 
8374 /**
8375  * dp_mlo_peer_authorize() - authorize MLO peer
8376  * @soc: soc handle
8377  * @peer: pointer to link peer
8378  *
8379  * Return: void
8380  */
8381 static void dp_mlo_peer_authorize(struct dp_soc *soc,
8382 				  struct dp_peer *peer)
8383 {
8384 	int i;
8385 	struct dp_peer *link_peer = NULL;
8386 	struct dp_peer *mld_peer = peer->mld_peer;
8387 	struct dp_mld_link_peers link_peers_info;
8388 
8389 	if (!mld_peer)
8390 		return;
8391 
8392 	/* get link peers with reference */
8393 	dp_get_link_peers_ref_from_mld_peer(soc, mld_peer,
8394 					    &link_peers_info,
8395 					    DP_MOD_ID_CDP);
8396 
8397 	for (i = 0; i < link_peers_info.num_links; i++) {
8398 		link_peer = link_peers_info.link_peers[i];
8399 
8400 		if (!link_peer->authorize) {
8401 			dp_release_link_peers_ref(&link_peers_info,
8402 						  DP_MOD_ID_CDP);
8403 			mld_peer->authorize = false;
8404 			return;
8405 		}
8406 	}
8407 
8408 	/* if we are here all link peers are authorized,
8409 	 * authorize ml_peer also
8410 	 */
8411 	mld_peer->authorize = true;
8412 
8413 	/* release link peers reference */
8414 	dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
8415 }
8416 #endif
8417 
8418 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
8419 				  enum cdp_host_reo_dest_ring *reo_dest,
8420 				  bool *hash_based)
8421 {
8422 	struct dp_soc *soc;
8423 	struct dp_pdev *pdev;
8424 
8425 	pdev = vdev->pdev;
8426 	soc = pdev->soc;
8427 	/*
8428 	 * hash based steering is disabled for Radios which are offloaded
8429 	 * to NSS
8430 	 */
8431 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
8432 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
8433 
8434 	/*
8435 	 * Below line of code will ensure the proper reo_dest ring is chosen
8436 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
8437 	 */
8438 	*reo_dest = pdev->reo_dest;
8439 }
8440 
8441 #ifdef IPA_OFFLOAD
8442 /**
8443  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
8444  * @vdev: Virtual device
8445  *
8446  * Return: true if the vdev is of subtype P2P
8447  *	   false if the vdev is of any other subtype
8448  */
8449 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
8450 {
8451 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
8452 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
8453 	    vdev->subtype == wlan_op_subtype_p2p_go)
8454 		return true;
8455 
8456 	return false;
8457 }
8458 
8459 /**
8460  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
8461  * @vdev: Datapath VDEV handle
8462  * @setup_info:
8463  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
8464  * @hash_based: pointer to hash value (enabled/disabled) to be populated
8465  * @lmac_peer_id_msb:
8466  *
8467  * If IPA is enabled in ini, for SAP mode, disable hash based
8468  * steering, use default reo_dst ring for RX. Use config values for other modes.
8469  *
8470  * Return: None
8471  */
8472 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
8473 				       struct cdp_peer_setup_info *setup_info,
8474 				       enum cdp_host_reo_dest_ring *reo_dest,
8475 				       bool *hash_based,
8476 				       uint8_t *lmac_peer_id_msb)
8477 {
8478 	struct dp_soc *soc;
8479 	struct dp_pdev *pdev;
8480 
8481 	pdev = vdev->pdev;
8482 	soc = pdev->soc;
8483 
8484 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
8485 
8486 	/* For P2P-GO interfaces we do not need to change the REO
8487 	 * configuration even if IPA config is enabled
8488 	 */
8489 	if (dp_is_vdev_subtype_p2p(vdev))
8490 		return;
8491 
8492 	/*
8493 	 * If IPA is enabled, disable hash-based flow steering and set
8494 	 * reo_dest_ring_4 as the REO ring to receive packets on.
8495 	 * IPA is configured to reap reo_dest_ring_4.
8496 	 *
8497 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
8498 	 * value enum value is from 1 - 4.
8499 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
8500 	 */
8501 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
8502 		if (vdev->opmode == wlan_op_mode_ap) {
8503 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
8504 			*hash_based = 0;
8505 		} else if (vdev->opmode == wlan_op_mode_sta &&
8506 			   dp_ipa_is_mdm_platform()) {
8507 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
8508 		} else if (vdev->opmode == wlan_op_mode_sta &&
8509 			   (!dp_ipa_is_mdm_platform())) {
8510 			dp_debug("opt_dp: default reo ring is set");
8511 		}
8512 	}
8513 }
8514 
8515 #else
8516 
8517 /**
8518  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
8519  * @vdev: Datapath VDEV handle
8520  * @setup_info:
8521  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
8522  * @hash_based: pointer to hash value (enabled/disabled) to be populated
8523  * @lmac_peer_id_msb:
8524  *
8525  * Use system config values for hash based steering.
8526  * Return: None
8527  */
8528 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
8529 				       struct cdp_peer_setup_info *setup_info,
8530 				       enum cdp_host_reo_dest_ring *reo_dest,
8531 				       bool *hash_based,
8532 				       uint8_t *lmac_peer_id_msb)
8533 {
8534 	struct dp_soc *soc = vdev->pdev->soc;
8535 
8536 	soc->arch_ops.peer_get_reo_hash(vdev, setup_info, reo_dest, hash_based,
8537 					lmac_peer_id_msb);
8538 }
8539 #endif /* IPA_OFFLOAD */
8540 
8541 /**
8542  * dp_peer_setup_wifi3() - initialize the peer
8543  * @soc_hdl: soc handle object
8544  * @vdev_id: vdev_id of vdev object
8545  * @peer_mac: Peer's mac address
8546  * @setup_info: peer setup info for MLO
8547  *
8548  * Return: QDF_STATUS
8549  */
8550 static QDF_STATUS
8551 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8552 		    uint8_t *peer_mac,
8553 		    struct cdp_peer_setup_info *setup_info)
8554 {
8555 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8556 	struct dp_pdev *pdev;
8557 	bool hash_based = 0;
8558 	enum cdp_host_reo_dest_ring reo_dest;
8559 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8560 	struct dp_vdev *vdev = NULL;
8561 	struct dp_peer *peer =
8562 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
8563 					       DP_MOD_ID_CDP);
8564 	struct dp_peer *mld_peer = NULL;
8565 	enum wlan_op_mode vdev_opmode;
8566 	uint8_t lmac_peer_id_msb = 0;
8567 
8568 	if (!peer)
8569 		return QDF_STATUS_E_FAILURE;
8570 
8571 	vdev = peer->vdev;
8572 	if (!vdev) {
8573 		status = QDF_STATUS_E_FAILURE;
8574 		goto fail;
8575 	}
8576 
8577 	/* save vdev related member in case vdev freed */
8578 	vdev_opmode = vdev->opmode;
8579 	pdev = vdev->pdev;
8580 	dp_peer_setup_get_reo_hash(vdev, setup_info,
8581 				   &reo_dest, &hash_based,
8582 				   &lmac_peer_id_msb);
8583 
8584 	dp_cfg_event_record_peer_setup_evt(soc, DP_CFG_EVENT_PEER_SETUP,
8585 					   peer, vdev, vdev->vdev_id,
8586 					   setup_info);
8587 	dp_info("pdev: %d vdev :%d opmode:%u peer %pK (" QDF_MAC_ADDR_FMT ") "
8588 		"hash-based-steering:%d default-reo_dest:%u",
8589 		pdev->pdev_id, vdev->vdev_id,
8590 		vdev->opmode, peer,
8591 		QDF_MAC_ADDR_REF(peer->mac_addr.raw), hash_based, reo_dest);
8592 
8593 	/*
8594 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
8595 	 * i.e both the devices have same MAC address. In these
8596 	 * cases we want such pkts to be processed in NULL Q handler
8597 	 * which is REO2TCL ring. for this reason we should
8598 	 * not setup reo_queues and default route for bss_peer.
8599 	 */
8600 	if (!IS_MLO_DP_MLD_PEER(peer))
8601 		dp_monitor_peer_tx_init(pdev, peer);
8602 
8603 	if (!setup_info)
8604 		if (dp_peer_legacy_setup(soc, peer) !=
8605 				QDF_STATUS_SUCCESS) {
8606 			status = QDF_STATUS_E_RESOURCES;
8607 			goto fail;
8608 		}
8609 
8610 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
8611 		status = QDF_STATUS_E_FAILURE;
8612 		goto fail;
8613 	}
8614 
8615 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
8616 		/* TODO: Check the destination ring number to be passed to FW */
8617 		soc->cdp_soc.ol_ops->peer_set_default_routing(
8618 				soc->ctrl_psoc,
8619 				peer->vdev->pdev->pdev_id,
8620 				peer->mac_addr.raw,
8621 				peer->vdev->vdev_id, hash_based, reo_dest,
8622 				lmac_peer_id_msb);
8623 	}
8624 
8625 	qdf_atomic_set(&peer->is_default_route_set, 1);
8626 
8627 	status = dp_peer_mlo_setup(soc, peer, vdev->vdev_id, setup_info);
8628 	if (QDF_IS_STATUS_ERROR(status)) {
8629 		dp_peer_err("peer mlo setup failed");
8630 		qdf_assert_always(0);
8631 	}
8632 
8633 	if (vdev_opmode != wlan_op_mode_monitor) {
8634 		/* In case of MLD peer, switch peer to mld peer and
8635 		 * do peer_rx_init.
8636 		 */
8637 		if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
8638 		    IS_MLO_DP_LINK_PEER(peer)) {
8639 			if (setup_info && setup_info->is_first_link) {
8640 				mld_peer = DP_GET_MLD_PEER_FROM_PEER(peer);
8641 				if (mld_peer)
8642 					dp_peer_rx_init(pdev, mld_peer);
8643 				else
8644 					dp_peer_err("MLD peer null. Primary link peer:%pK", peer);
8645 			}
8646 		} else {
8647 			dp_peer_rx_init(pdev, peer);
8648 		}
8649 	}
8650 
8651 	dp_soc_txrx_peer_setup(vdev_opmode, soc, peer);
8652 
8653 	if (!IS_MLO_DP_MLD_PEER(peer))
8654 		dp_peer_ppdu_delayed_ba_init(peer);
8655 
8656 fail:
8657 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8658 	return status;
8659 }
8660 
8661 /**
8662  * dp_cp_peer_del_resp_handler() - Handle the peer delete response
8663  * @soc_hdl: Datapath SOC handle
8664  * @vdev_id: id of virtual device object
8665  * @mac_addr: Mac address of the peer
8666  *
8667  * Return: QDF_STATUS
8668  */
8669 static QDF_STATUS dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl,
8670 					      uint8_t vdev_id,
8671 					      uint8_t *mac_addr)
8672 {
8673 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8674 	struct dp_ast_entry  *ast_entry = NULL;
8675 	txrx_ast_free_cb cb = NULL;
8676 	void *cookie;
8677 
8678 	if (soc->ast_offload_support)
8679 		return QDF_STATUS_E_INVAL;
8680 
8681 	qdf_spin_lock_bh(&soc->ast_lock);
8682 
8683 	ast_entry =
8684 		dp_peer_ast_hash_find_by_vdevid(soc, mac_addr,
8685 						vdev_id);
8686 
8687 	/* in case of qwrap we have multiple BSS peers
8688 	 * with same mac address
8689 	 *
8690 	 * AST entry for this mac address will be created
8691 	 * only for one peer hence it will be NULL here
8692 	 */
8693 	if ((!ast_entry || !ast_entry->delete_in_progress) ||
8694 	    (ast_entry->peer_id != HTT_INVALID_PEER)) {
8695 		qdf_spin_unlock_bh(&soc->ast_lock);
8696 		return QDF_STATUS_E_FAILURE;
8697 	}
8698 
8699 	if (ast_entry->is_mapped)
8700 		soc->ast_table[ast_entry->ast_idx] = NULL;
8701 
8702 	DP_STATS_INC(soc, ast.deleted, 1);
8703 	dp_peer_ast_hash_remove(soc, ast_entry);
8704 
8705 	cb = ast_entry->callback;
8706 	cookie = ast_entry->cookie;
8707 	ast_entry->callback = NULL;
8708 	ast_entry->cookie = NULL;
8709 
8710 	soc->num_ast_entries--;
8711 	qdf_spin_unlock_bh(&soc->ast_lock);
8712 
8713 	if (cb) {
8714 		cb(soc->ctrl_psoc,
8715 		   dp_soc_to_cdp_soc(soc),
8716 		   cookie,
8717 		   CDP_TXRX_AST_DELETED);
8718 	}
8719 	qdf_mem_free(ast_entry);
8720 
8721 	return QDF_STATUS_SUCCESS;
8722 }
8723 
8724 /**
8725  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
8726  * @txrx_soc: cdp soc handle
8727  * @ac: Access category
8728  * @value: timeout value in millisec
8729  *
8730  * Return: void
8731  */
8732 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
8733 				    uint8_t ac, uint32_t value)
8734 {
8735 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
8736 
8737 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
8738 }
8739 
8740 /**
8741  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
8742  * @txrx_soc: cdp soc handle
8743  * @ac: access category
8744  * @value: timeout value in millisec
8745  *
8746  * Return: void
8747  */
8748 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
8749 				    uint8_t ac, uint32_t *value)
8750 {
8751 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
8752 
8753 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
8754 }
8755 
8756 /**
8757  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
8758  * @txrx_soc: cdp soc handle
8759  * @pdev_id: id of physical device object
8760  * @val: reo destination ring index (1 - 4)
8761  *
8762  * Return: QDF_STATUS
8763  */
8764 static QDF_STATUS
8765 dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
8766 		     enum cdp_host_reo_dest_ring val)
8767 {
8768 	struct dp_pdev *pdev =
8769 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
8770 						   pdev_id);
8771 
8772 	if (pdev) {
8773 		pdev->reo_dest = val;
8774 		return QDF_STATUS_SUCCESS;
8775 	}
8776 
8777 	return QDF_STATUS_E_FAILURE;
8778 }
8779 
8780 /**
8781  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
8782  * @txrx_soc: cdp soc handle
8783  * @pdev_id: id of physical device object
8784  *
8785  * Return: reo destination ring index
8786  */
8787 static enum cdp_host_reo_dest_ring
8788 dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id)
8789 {
8790 	struct dp_pdev *pdev =
8791 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
8792 						   pdev_id);
8793 
8794 	if (pdev)
8795 		return pdev->reo_dest;
8796 	else
8797 		return cdp_host_reo_dest_ring_unknown;
8798 }
8799 
8800 #ifdef WLAN_SUPPORT_MSCS
8801 /**
8802  * dp_record_mscs_params() - Record MSCS parameters sent by the STA in
8803  * the MSCS Request to the AP.
8804  * @soc_hdl: Datapath soc handle
8805  * @peer_mac: STA Mac address
8806  * @vdev_id: ID of the vdev handle
8807  * @mscs_params: Structure having MSCS parameters obtained
8808  * from handshake
8809  * @active: Flag to set MSCS active/inactive
8810  *
8811  * The AP makes a note of these parameters while comparing the MSDUs
8812  * sent by the STA, to send the downlink traffic with correct User
8813  * priority.
8814  *
8815  * Return: QDF_STATUS - Success/Invalid
8816  */
8817 static QDF_STATUS
8818 dp_record_mscs_params(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
8819 		      uint8_t vdev_id, struct cdp_mscs_params *mscs_params,
8820 		      bool active)
8821 {
8822 	struct dp_peer *peer;
8823 	QDF_STATUS status = QDF_STATUS_E_INVAL;
8824 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8825 
8826 	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
8827 				      DP_MOD_ID_CDP);
8828 
8829 	if (!peer) {
8830 		dp_err("Peer is NULL!");
8831 		goto fail;
8832 	}
8833 	if (!active) {
8834 		dp_info("MSCS Procedure is terminated");
8835 		peer->mscs_active = active;
8836 		goto fail;
8837 	}
8838 
8839 	if (mscs_params->classifier_type == IEEE80211_TCLAS_MASK_CLA_TYPE_4) {
8840 		/* Populate entries inside IPV4 database first */
8841 		peer->mscs_ipv4_parameter.user_priority_bitmap =
8842 			mscs_params->user_pri_bitmap;
8843 		peer->mscs_ipv4_parameter.user_priority_limit =
8844 			mscs_params->user_pri_limit;
8845 		peer->mscs_ipv4_parameter.classifier_mask =
8846 			mscs_params->classifier_mask;
8847 
8848 		/* Populate entries inside IPV6 database */
8849 		peer->mscs_ipv6_parameter.user_priority_bitmap =
8850 			mscs_params->user_pri_bitmap;
8851 		peer->mscs_ipv6_parameter.user_priority_limit =
8852 			mscs_params->user_pri_limit;
8853 		peer->mscs_ipv6_parameter.classifier_mask =
8854 			mscs_params->classifier_mask;
8855 		peer->mscs_active = 1;
8856 		dp_info("\n\tMSCS Procedure request based parameters for "QDF_MAC_ADDR_FMT"\n"
8857 			"\tClassifier_type = %d\tUser priority bitmap = %x\n"
8858 			"\tUser priority limit = %x\tClassifier mask = %x",
8859 			QDF_MAC_ADDR_REF(peer_mac),
8860 			mscs_params->classifier_type,
8861 			peer->mscs_ipv4_parameter.user_priority_bitmap,
8862 			peer->mscs_ipv4_parameter.user_priority_limit,
8863 			peer->mscs_ipv4_parameter.classifier_mask);
8864 	}
8865 
8866 	status = QDF_STATUS_SUCCESS;
8867 fail:
8868 	if (peer)
8869 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8870 	return status;
8871 }
8872 #endif
8873 
8874 /**
8875  * dp_get_sec_type() - Get the security type
8876  * @soc: soc handle
8877  * @vdev_id: id of dp handle
8878  * @peer_mac: mac of datapath PEER handle
8879  * @sec_idx:    Security id (mcast, ucast)
8880  *
8881  * return sec_type: Security type
8882  */
8883 static int dp_get_sec_type(struct cdp_soc_t *soc, uint8_t vdev_id,
8884 			   uint8_t *peer_mac, uint8_t sec_idx)
8885 {
8886 	int sec_type = 0;
8887 	struct dp_peer *peer =
8888 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
8889 						       peer_mac, 0, vdev_id,
8890 						       DP_MOD_ID_CDP);
8891 
8892 	if (!peer) {
8893 		dp_cdp_err("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
8894 		return sec_type;
8895 	}
8896 
8897 	if (!peer->txrx_peer) {
8898 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8899 		dp_peer_debug("%pK: txrx peer is NULL!\n", soc);
8900 		return sec_type;
8901 	}
8902 	sec_type = peer->txrx_peer->security[sec_idx].sec_type;
8903 
8904 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8905 	return sec_type;
8906 }
8907 
8908 /**
8909  * dp_peer_authorize() - authorize txrx peer
8910  * @soc_hdl: soc handle
8911  * @vdev_id: id of dp handle
8912  * @peer_mac: mac of datapath PEER handle
8913  * @authorize:
8914  *
8915  * Return: QDF_STATUS
8916  *
8917  */
8918 static QDF_STATUS
8919 dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8920 		  uint8_t *peer_mac, uint32_t authorize)
8921 {
8922 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8923 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8924 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(soc, peer_mac,
8925 							      0, vdev_id,
8926 							      DP_MOD_ID_CDP);
8927 
8928 	if (!peer) {
8929 		dp_cdp_debug("%pK: Peer is NULL!\n", soc);
8930 		status = QDF_STATUS_E_FAILURE;
8931 	} else {
8932 		peer->authorize = authorize ? 1 : 0;
8933 		if (peer->txrx_peer)
8934 			peer->txrx_peer->authorize = peer->authorize;
8935 
8936 		if (!peer->authorize)
8937 			dp_peer_flush_frags(soc_hdl, vdev_id, peer_mac);
8938 
8939 		dp_mlo_peer_authorize(soc, peer);
8940 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8941 	}
8942 
8943 	return status;
8944 }
8945 
8946 /**
8947  * dp_peer_get_authorize() - get peer authorize status
8948  * @soc_hdl: soc handle
8949  * @vdev_id: id of dp handle
8950  * @peer_mac: mac of datapath PEER handle
8951  *
8952  * Return: true is peer is authorized, false otherwise
8953  */
8954 static bool
8955 dp_peer_get_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8956 		      uint8_t *peer_mac)
8957 {
8958 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8959 	bool authorize = false;
8960 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
8961 						      0, vdev_id,
8962 						      DP_MOD_ID_CDP);
8963 
8964 	if (!peer) {
8965 		dp_cdp_debug("%pK: Peer is NULL!\n", soc);
8966 		return authorize;
8967 	}
8968 
8969 	authorize = peer->authorize;
8970 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8971 
8972 	return authorize;
8973 }
8974 
8975 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev,
8976 			  enum dp_mod_id mod_id)
8977 {
8978 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
8979 	void *vdev_delete_context = NULL;
8980 	uint8_t vdev_id = vdev->vdev_id;
8981 	struct dp_pdev *pdev = vdev->pdev;
8982 	struct dp_vdev *tmp_vdev = NULL;
8983 	uint8_t found = 0;
8984 
8985 	QDF_ASSERT(qdf_atomic_dec_return(&vdev->mod_refs[mod_id]) >= 0);
8986 
8987 	/* Return if this is not the last reference*/
8988 	if (!qdf_atomic_dec_and_test(&vdev->ref_cnt))
8989 		return;
8990 
8991 	/*
8992 	 * This should be set as last reference need to released
8993 	 * after cdp_vdev_detach() is called
8994 	 *
8995 	 * if this assert is hit there is a ref count issue
8996 	 */
8997 	QDF_ASSERT(vdev->delete.pending);
8998 
8999 	vdev_delete_cb = vdev->delete.callback;
9000 	vdev_delete_context = vdev->delete.context;
9001 
9002 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")- its last peer is done",
9003 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
9004 
9005 	if (wlan_op_mode_monitor == vdev->opmode) {
9006 		dp_monitor_vdev_delete(soc, vdev);
9007 		goto free_vdev;
9008 	}
9009 
9010 	/* all peers are gone, go ahead and delete it */
9011 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
9012 			FLOW_TYPE_VDEV, vdev_id);
9013 	dp_tx_vdev_detach(vdev);
9014 	dp_monitor_vdev_detach(vdev);
9015 
9016 free_vdev:
9017 	qdf_spinlock_destroy(&vdev->peer_list_lock);
9018 
9019 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
9020 	TAILQ_FOREACH(tmp_vdev, &soc->inactive_vdev_list,
9021 		      inactive_list_elem) {
9022 		if (tmp_vdev == vdev) {
9023 			found = 1;
9024 			break;
9025 		}
9026 	}
9027 	if (found)
9028 		TAILQ_REMOVE(&soc->inactive_vdev_list, vdev,
9029 			     inactive_list_elem);
9030 	/* delete this peer from the list */
9031 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
9032 
9033 	dp_cfg_event_record_vdev_evt(soc, DP_CFG_EVENT_VDEV_UNREF_DEL,
9034 				     vdev);
9035 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")",
9036 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
9037 	wlan_minidump_remove(vdev, sizeof(*vdev), soc->ctrl_psoc,
9038 			     WLAN_MD_DP_VDEV, "dp_vdev");
9039 	qdf_mem_free(vdev);
9040 	vdev = NULL;
9041 
9042 	if (vdev_delete_cb)
9043 		vdev_delete_cb(vdev_delete_context);
9044 }
9045 
9046 qdf_export_symbol(dp_vdev_unref_delete);
9047 
9048 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id mod_id)
9049 {
9050 	struct dp_vdev *vdev = peer->vdev;
9051 	struct dp_pdev *pdev = vdev->pdev;
9052 	struct dp_soc *soc = pdev->soc;
9053 	uint16_t peer_id;
9054 	struct dp_peer *tmp_peer;
9055 	bool found = false;
9056 
9057 	if (mod_id > DP_MOD_ID_RX)
9058 		QDF_ASSERT(qdf_atomic_dec_return(&peer->mod_refs[mod_id]) >= 0);
9059 
9060 	/*
9061 	 * Hold the lock all the way from checking if the peer ref count
9062 	 * is zero until the peer references are removed from the hash
9063 	 * table and vdev list (if the peer ref count is zero).
9064 	 * This protects against a new HL tx operation starting to use the
9065 	 * peer object just after this function concludes it's done being used.
9066 	 * Furthermore, the lock needs to be held while checking whether the
9067 	 * vdev's list of peers is empty, to make sure that list is not modified
9068 	 * concurrently with the empty check.
9069 	 */
9070 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
9071 		peer_id = peer->peer_id;
9072 
9073 		/*
9074 		 * Make sure that the reference to the peer in
9075 		 * peer object map is removed
9076 		 */
9077 		QDF_ASSERT(peer_id == HTT_INVALID_PEER);
9078 
9079 		dp_peer_info("Deleting peer %pK ("QDF_MAC_ADDR_FMT")", peer,
9080 			     QDF_MAC_ADDR_REF(peer->mac_addr.raw));
9081 
9082 		dp_peer_sawf_ctx_free(soc, peer);
9083 
9084 		wlan_minidump_remove(peer, sizeof(*peer), soc->ctrl_psoc,
9085 				     WLAN_MD_DP_PEER, "dp_peer");
9086 
9087 		qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
9088 		TAILQ_FOREACH(tmp_peer, &soc->inactive_peer_list,
9089 			      inactive_list_elem) {
9090 			if (tmp_peer == peer) {
9091 				found = 1;
9092 				break;
9093 			}
9094 		}
9095 		if (found)
9096 			TAILQ_REMOVE(&soc->inactive_peer_list, peer,
9097 				     inactive_list_elem);
9098 		/* delete this peer from the list */
9099 		qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
9100 		DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
9101 		dp_peer_update_state(soc, peer, DP_PEER_STATE_FREED);
9102 
9103 		/* cleanup the peer data */
9104 		dp_peer_cleanup(vdev, peer);
9105 
9106 		if (!IS_MLO_DP_MLD_PEER(peer))
9107 			dp_monitor_peer_detach(soc, peer);
9108 
9109 		qdf_spinlock_destroy(&peer->peer_state_lock);
9110 
9111 		dp_txrx_peer_detach(soc, peer);
9112 		dp_cfg_event_record_peer_evt(soc, DP_CFG_EVENT_PEER_UNREF_DEL,
9113 					     peer, vdev, 0);
9114 		qdf_mem_free(peer);
9115 
9116 		/*
9117 		 * Decrement ref count taken at peer create
9118 		 */
9119 		dp_peer_info("Deleted peer. Unref vdev %pK, vdev_ref_cnt %d",
9120 			     vdev, qdf_atomic_read(&vdev->ref_cnt));
9121 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CHILD);
9122 	}
9123 }
9124 
9125 qdf_export_symbol(dp_peer_unref_delete);
9126 
9127 void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle,
9128 			       enum dp_mod_id mod_id)
9129 {
9130 	dp_peer_unref_delete((struct dp_peer *)handle, mod_id);
9131 }
9132 
9133 qdf_export_symbol(dp_txrx_peer_unref_delete);
9134 
9135 /**
9136  * dp_peer_delete_wifi3() - Delete txrx peer
9137  * @soc_hdl: soc handle
9138  * @vdev_id: id of dp handle
9139  * @peer_mac: mac of datapath PEER handle
9140  * @bitmap: bitmap indicating special handling of request.
9141  * @peer_type: peer type (link or MLD)
9142  *
9143  */
9144 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
9145 				       uint8_t vdev_id,
9146 				       uint8_t *peer_mac, uint32_t bitmap,
9147 				       enum cdp_peer_type peer_type)
9148 {
9149 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9150 	struct dp_peer *peer;
9151 	struct cdp_peer_info peer_info = { 0 };
9152 	struct dp_vdev *vdev = NULL;
9153 
9154 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac,
9155 				 false, peer_type);
9156 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP);
9157 
9158 	/* Peer can be null for monitor vap mac address */
9159 	if (!peer) {
9160 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
9161 			  "%s: Invalid peer\n", __func__);
9162 		return QDF_STATUS_E_FAILURE;
9163 	}
9164 
9165 	if (!peer->valid) {
9166 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9167 		dp_err("Invalid peer: "QDF_MAC_ADDR_FMT,
9168 			QDF_MAC_ADDR_REF(peer_mac));
9169 		return QDF_STATUS_E_ALREADY;
9170 	}
9171 
9172 	vdev = peer->vdev;
9173 
9174 	if (!vdev) {
9175 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9176 		return QDF_STATUS_E_FAILURE;
9177 	}
9178 
9179 	peer->valid = 0;
9180 
9181 	dp_cfg_event_record_peer_evt(soc, DP_CFG_EVENT_PEER_DELETE, peer,
9182 				     vdev, 0);
9183 	dp_init_info("%pK: peer %pK (" QDF_MAC_ADDR_FMT ") pending-refs %d",
9184 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
9185 		     qdf_atomic_read(&peer->ref_cnt));
9186 
9187 	dp_peer_rx_reo_shared_qaddr_delete(soc, peer);
9188 
9189 	dp_local_peer_id_free(peer->vdev->pdev, peer);
9190 
9191 	/* Drop all rx packets before deleting peer */
9192 	dp_clear_peer_internal(soc, peer);
9193 
9194 	qdf_spinlock_destroy(&peer->peer_info_lock);
9195 	dp_peer_multipass_list_remove(peer);
9196 
9197 	/* remove the reference to the peer from the hash table */
9198 	dp_peer_find_hash_remove(soc, peer);
9199 
9200 	dp_peer_vdev_list_remove(soc, vdev, peer);
9201 
9202 	dp_peer_mlo_delete(peer);
9203 
9204 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
9205 	TAILQ_INSERT_TAIL(&soc->inactive_peer_list, peer,
9206 			  inactive_list_elem);
9207 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
9208 
9209 	/*
9210 	 * Remove the reference added during peer_attach.
9211 	 * The peer will still be left allocated until the
9212 	 * PEER_UNMAP message arrives to remove the other
9213 	 * reference, added by the PEER_MAP message.
9214 	 */
9215 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
9216 	/*
9217 	 * Remove the reference taken above
9218 	 */
9219 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9220 
9221 	return QDF_STATUS_SUCCESS;
9222 }
9223 
9224 #ifdef DP_RX_UDP_OVER_PEER_ROAM
9225 static QDF_STATUS dp_update_roaming_peer_wifi3(struct cdp_soc_t *soc_hdl,
9226 					       uint8_t vdev_id,
9227 					       uint8_t *peer_mac,
9228 					       uint32_t auth_status)
9229 {
9230 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9231 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9232 						     DP_MOD_ID_CDP);
9233 	if (!vdev)
9234 		return QDF_STATUS_E_FAILURE;
9235 
9236 	vdev->roaming_peer_status = auth_status;
9237 	qdf_mem_copy(vdev->roaming_peer_mac.raw, peer_mac,
9238 		     QDF_MAC_ADDR_SIZE);
9239 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9240 
9241 	return QDF_STATUS_SUCCESS;
9242 }
9243 #endif
9244 /**
9245  * dp_get_vdev_mac_addr_wifi3() - Detach txrx peer
9246  * @soc_hdl: Datapath soc handle
9247  * @vdev_id: virtual interface id
9248  *
9249  * Return: MAC address on success, NULL on failure.
9250  *
9251  */
9252 static uint8_t *dp_get_vdev_mac_addr_wifi3(struct cdp_soc_t *soc_hdl,
9253 					   uint8_t vdev_id)
9254 {
9255 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9256 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9257 						     DP_MOD_ID_CDP);
9258 	uint8_t *mac = NULL;
9259 
9260 	if (!vdev)
9261 		return NULL;
9262 
9263 	mac = vdev->mac_addr.raw;
9264 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9265 
9266 	return mac;
9267 }
9268 
9269 /**
9270  * dp_vdev_set_wds() - Enable per packet stats
9271  * @soc_hdl: DP soc handle
9272  * @vdev_id: id of DP VDEV handle
9273  * @val: value
9274  *
9275  * Return: none
9276  */
9277 static int dp_vdev_set_wds(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
9278 			   uint32_t val)
9279 {
9280 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9281 	struct dp_vdev *vdev =
9282 		dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id,
9283 				      DP_MOD_ID_CDP);
9284 
9285 	if (!vdev)
9286 		return QDF_STATUS_E_FAILURE;
9287 
9288 	vdev->wds_enabled = val;
9289 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9290 
9291 	return QDF_STATUS_SUCCESS;
9292 }
9293 
9294 static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
9295 {
9296 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9297 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9298 						     DP_MOD_ID_CDP);
9299 	int opmode;
9300 
9301 	if (!vdev) {
9302 		dp_err_rl("vdev for id %d is NULL", vdev_id);
9303 		return -EINVAL;
9304 	}
9305 	opmode = vdev->opmode;
9306 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9307 
9308 	return opmode;
9309 }
9310 
9311 /**
9312  * dp_get_os_rx_handles_from_vdev_wifi3() - Get os rx handles for a vdev
9313  * @soc_hdl: ol_txrx_soc_handle handle
9314  * @vdev_id: vdev id for which os rx handles are needed
9315  * @stack_fn_p: pointer to stack function pointer
9316  * @osif_vdev_p: pointer to ol_osif_vdev_handle
9317  *
9318  * Return: void
9319  */
9320 static
9321 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_soc_t *soc_hdl,
9322 					  uint8_t vdev_id,
9323 					  ol_txrx_rx_fp *stack_fn_p,
9324 					  ol_osif_vdev_handle *osif_vdev_p)
9325 {
9326 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9327 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9328 						     DP_MOD_ID_CDP);
9329 
9330 	if (qdf_unlikely(!vdev)) {
9331 		*stack_fn_p = NULL;
9332 		*osif_vdev_p = NULL;
9333 		return;
9334 	}
9335 	*stack_fn_p = vdev->osif_rx_stack;
9336 	*osif_vdev_p = vdev->osif_vdev;
9337 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9338 }
9339 
9340 /**
9341  * dp_get_ctrl_pdev_from_vdev_wifi3() - Get control pdev of vdev
9342  * @soc_hdl: datapath soc handle
9343  * @vdev_id: virtual device/interface id
9344  *
9345  * Return: Handle to control pdev
9346  */
9347 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(
9348 						struct cdp_soc_t *soc_hdl,
9349 						uint8_t vdev_id)
9350 {
9351 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9352 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9353 						     DP_MOD_ID_CDP);
9354 	struct dp_pdev *pdev;
9355 
9356 	if (!vdev)
9357 		return NULL;
9358 
9359 	pdev = vdev->pdev;
9360 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9361 	return pdev ? (struct cdp_cfg *)pdev->wlan_cfg_ctx : NULL;
9362 }
9363 
9364 /**
9365  * dp_get_tx_pending() - read pending tx
9366  * @pdev_handle: Datapath PDEV handle
9367  *
9368  * Return: outstanding tx
9369  */
9370 static int32_t dp_get_tx_pending(struct cdp_pdev *pdev_handle)
9371 {
9372 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9373 
9374 	return qdf_atomic_read(&pdev->num_tx_outstanding);
9375 }
9376 
9377 /**
9378  * dp_get_peer_mac_from_peer_id() - get peer mac
9379  * @soc: CDP SoC handle
9380  * @peer_id: Peer ID
9381  * @peer_mac: MAC addr of PEER
9382  *
9383  * Return: QDF_STATUS
9384  */
9385 static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc,
9386 					       uint32_t peer_id,
9387 					       uint8_t *peer_mac)
9388 {
9389 	struct dp_peer *peer;
9390 
9391 	if (soc && peer_mac) {
9392 		peer = dp_peer_get_ref_by_id((struct dp_soc *)soc,
9393 					     (uint16_t)peer_id,
9394 					     DP_MOD_ID_CDP);
9395 		if (peer) {
9396 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
9397 				     QDF_MAC_ADDR_SIZE);
9398 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9399 			return QDF_STATUS_SUCCESS;
9400 		}
9401 	}
9402 
9403 	return QDF_STATUS_E_FAILURE;
9404 }
9405 
9406 #ifdef MESH_MODE_SUPPORT
9407 static
9408 void dp_vdev_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
9409 {
9410 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
9411 
9412 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
9413 	vdev->mesh_vdev = val;
9414 	if (val)
9415 		vdev->skip_sw_tid_classification |=
9416 			DP_TX_MESH_ENABLED;
9417 	else
9418 		vdev->skip_sw_tid_classification &=
9419 			~DP_TX_MESH_ENABLED;
9420 }
9421 
9422 /**
9423  * dp_vdev_set_mesh_rx_filter() - to set the mesh rx filter
9424  * @vdev_hdl: virtual device object
9425  * @val: value to be set
9426  *
9427  * Return: void
9428  */
9429 static
9430 void dp_vdev_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
9431 {
9432 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
9433 
9434 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
9435 	vdev->mesh_rx_filter = val;
9436 }
9437 #endif
9438 
9439 /**
9440  * dp_vdev_set_hlos_tid_override() - to set hlos tid override
9441  * @vdev: virtual device object
9442  * @val: value to be set
9443  *
9444  * Return: void
9445  */
9446 static
9447 void dp_vdev_set_hlos_tid_override(struct dp_vdev *vdev, uint32_t val)
9448 {
9449 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
9450 	if (val)
9451 		vdev->skip_sw_tid_classification |=
9452 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
9453 	else
9454 		vdev->skip_sw_tid_classification &=
9455 			~DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
9456 }
9457 
9458 /**
9459  * dp_vdev_get_hlos_tid_override() - to get hlos tid override flag
9460  * @vdev_hdl: virtual device object
9461  *
9462  * Return: 1 if this flag is set
9463  */
9464 static
9465 uint8_t dp_vdev_get_hlos_tid_override(struct cdp_vdev *vdev_hdl)
9466 {
9467 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
9468 
9469 	return !!(vdev->skip_sw_tid_classification &
9470 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED);
9471 }
9472 
9473 #ifdef VDEV_PEER_PROTOCOL_COUNT
9474 static void dp_enable_vdev_peer_protocol_count(struct cdp_soc_t *soc_hdl,
9475 					       int8_t vdev_id,
9476 					       bool enable)
9477 {
9478 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9479 	struct dp_vdev *vdev;
9480 
9481 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9482 	if (!vdev)
9483 		return;
9484 
9485 	dp_info("enable %d vdev_id %d", enable, vdev_id);
9486 	vdev->peer_protocol_count_track = enable;
9487 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9488 }
9489 
9490 static void dp_enable_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
9491 						   int8_t vdev_id,
9492 						   int drop_mask)
9493 {
9494 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9495 	struct dp_vdev *vdev;
9496 
9497 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9498 	if (!vdev)
9499 		return;
9500 
9501 	dp_info("drop_mask %d vdev_id %d", drop_mask, vdev_id);
9502 	vdev->peer_protocol_count_dropmask = drop_mask;
9503 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9504 }
9505 
9506 static int dp_is_vdev_peer_protocol_count_enabled(struct cdp_soc_t *soc_hdl,
9507 						  int8_t vdev_id)
9508 {
9509 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9510 	struct dp_vdev *vdev;
9511 	int peer_protocol_count_track;
9512 
9513 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9514 	if (!vdev)
9515 		return 0;
9516 
9517 	dp_info("enable %d vdev_id %d", vdev->peer_protocol_count_track,
9518 		vdev_id);
9519 	peer_protocol_count_track =
9520 		vdev->peer_protocol_count_track;
9521 
9522 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9523 	return peer_protocol_count_track;
9524 }
9525 
9526 static int dp_get_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
9527 					       int8_t vdev_id)
9528 {
9529 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9530 	struct dp_vdev *vdev;
9531 	int peer_protocol_count_dropmask;
9532 
9533 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9534 	if (!vdev)
9535 		return 0;
9536 
9537 	dp_info("drop_mask %d vdev_id %d", vdev->peer_protocol_count_dropmask,
9538 		vdev_id);
9539 	peer_protocol_count_dropmask =
9540 		vdev->peer_protocol_count_dropmask;
9541 
9542 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9543 	return peer_protocol_count_dropmask;
9544 }
9545 
9546 #endif
9547 
9548 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data)
9549 {
9550 	uint8_t pdev_count;
9551 
9552 	for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) {
9553 		if (soc->pdev_list[pdev_count] &&
9554 		    soc->pdev_list[pdev_count] == data)
9555 			return true;
9556 	}
9557 	return false;
9558 }
9559 
9560 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
9561 	union hal_reo_status *reo_status)
9562 {
9563 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
9564 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
9565 
9566 	if (!dp_check_pdev_exists(soc, pdev)) {
9567 		dp_err_rl("pdev doesn't exist");
9568 		return;
9569 	}
9570 
9571 	if (!qdf_atomic_read(&soc->cmn_init_done))
9572 		return;
9573 
9574 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
9575 		DP_PRINT_STATS("REO stats failure %d",
9576 			       queue_status->header.status);
9577 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
9578 		return;
9579 	}
9580 
9581 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
9582 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
9583 
9584 }
9585 
9586 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
9587 			     struct cdp_vdev_stats *vdev_stats)
9588 {
9589 
9590 	if (!vdev || !vdev->pdev)
9591 		return;
9592 
9593 
9594 	dp_update_vdev_ingress_stats(vdev);
9595 
9596 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
9597 
9598 	dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats,
9599 			     DP_MOD_ID_GENERIC_STATS);
9600 
9601 	dp_update_vdev_rate_stats(vdev_stats, &vdev->stats);
9602 
9603 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9604 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
9605 			     vdev_stats, vdev->vdev_id,
9606 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
9607 #endif
9608 }
9609 
9610 void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
9611 {
9612 	struct dp_vdev *vdev = NULL;
9613 	struct dp_soc *soc;
9614 	struct cdp_vdev_stats *vdev_stats =
9615 			qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
9616 
9617 	if (!vdev_stats) {
9618 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
9619 			   pdev->soc);
9620 		return;
9621 	}
9622 
9623 	soc = pdev->soc;
9624 
9625 	qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
9626 	qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
9627 	qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
9628 	qdf_mem_zero(&pdev->stats.rx_i, sizeof(pdev->stats.rx_i));
9629 
9630 	if (dp_monitor_is_enable_mcopy_mode(pdev))
9631 		dp_monitor_invalid_peer_update_pdev_stats(soc, pdev);
9632 
9633 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
9634 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
9635 
9636 		dp_aggregate_vdev_stats(vdev, vdev_stats);
9637 		dp_update_pdev_stats(pdev, vdev_stats);
9638 		dp_update_pdev_ingress_stats(pdev, vdev);
9639 	}
9640 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
9641 	qdf_mem_free(vdev_stats);
9642 
9643 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9644 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
9645 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
9646 #endif
9647 }
9648 
9649 /**
9650  * dp_vdev_getstats() - get vdev packet level stats
9651  * @vdev_handle: Datapath VDEV handle
9652  * @stats: cdp network device stats structure
9653  *
9654  * Return: QDF_STATUS
9655  */
9656 static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle,
9657 				   struct cdp_dev_stats *stats)
9658 {
9659 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
9660 	struct dp_pdev *pdev;
9661 	struct dp_soc *soc;
9662 	struct cdp_vdev_stats *vdev_stats;
9663 
9664 	if (!vdev)
9665 		return QDF_STATUS_E_FAILURE;
9666 
9667 	pdev = vdev->pdev;
9668 	if (!pdev)
9669 		return QDF_STATUS_E_FAILURE;
9670 
9671 	soc = pdev->soc;
9672 
9673 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
9674 
9675 	if (!vdev_stats) {
9676 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
9677 			   soc);
9678 		return QDF_STATUS_E_FAILURE;
9679 	}
9680 
9681 	dp_aggregate_vdev_stats(vdev, vdev_stats);
9682 
9683 	stats->tx_packets = vdev_stats->tx.comp_pkt.num;
9684 	stats->tx_bytes = vdev_stats->tx.comp_pkt.bytes;
9685 
9686 	stats->tx_errors = vdev_stats->tx.tx_failed;
9687 	stats->tx_dropped = vdev_stats->tx_i.dropped.dropped_pkt.num +
9688 			    vdev_stats->tx_i.sg.dropped_host.num +
9689 			    vdev_stats->tx_i.mcast_en.dropped_map_error +
9690 			    vdev_stats->tx_i.mcast_en.dropped_self_mac +
9691 			    vdev_stats->tx_i.mcast_en.dropped_send_fail +
9692 			    vdev_stats->tx.nawds_mcast_drop;
9693 
9694 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
9695 		stats->rx_packets = vdev_stats->rx.to_stack.num;
9696 		stats->rx_bytes = vdev_stats->rx.to_stack.bytes;
9697 	} else {
9698 		stats->rx_packets = vdev_stats->rx_i.reo_rcvd_pkt.num +
9699 				    vdev_stats->rx_i.null_q_desc_pkt.num +
9700 				    vdev_stats->rx_i.routed_eapol_pkt.num;
9701 		stats->rx_bytes = vdev_stats->rx_i.reo_rcvd_pkt.bytes +
9702 				  vdev_stats->rx_i.null_q_desc_pkt.bytes +
9703 				  vdev_stats->rx_i.routed_eapol_pkt.bytes;
9704 	}
9705 
9706 	stats->rx_errors = vdev_stats->rx.err.mic_err +
9707 			   vdev_stats->rx.err.decrypt_err +
9708 			   vdev_stats->rx.err.fcserr +
9709 			   vdev_stats->rx.err.pn_err +
9710 			   vdev_stats->rx.err.oor_err +
9711 			   vdev_stats->rx.err.jump_2k_err +
9712 			   vdev_stats->rx.err.rxdma_wifi_parse_err;
9713 
9714 	stats->rx_dropped = vdev_stats->rx.mec_drop.num +
9715 			    vdev_stats->rx.multipass_rx_pkt_drop +
9716 			    vdev_stats->rx.peer_unauth_rx_pkt_drop +
9717 			    vdev_stats->rx.policy_check_drop +
9718 			    vdev_stats->rx.nawds_mcast_drop +
9719 			    vdev_stats->rx.mcast_3addr_drop;
9720 
9721 	qdf_mem_free(vdev_stats);
9722 
9723 	return QDF_STATUS_SUCCESS;
9724 }
9725 
9726 /**
9727  * dp_pdev_getstats() - get pdev packet level stats
9728  * @pdev_handle: Datapath PDEV handle
9729  * @stats: cdp network device stats structure
9730  *
9731  * Return: QDF_STATUS
9732  */
9733 static void dp_pdev_getstats(struct cdp_pdev *pdev_handle,
9734 			     struct cdp_dev_stats *stats)
9735 {
9736 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9737 
9738 	dp_aggregate_pdev_stats(pdev);
9739 
9740 	stats->tx_packets = pdev->stats.tx.comp_pkt.num;
9741 	stats->tx_bytes = pdev->stats.tx.comp_pkt.bytes;
9742 
9743 	stats->tx_errors = pdev->stats.tx.tx_failed;
9744 	stats->tx_dropped = pdev->stats.tx_i.dropped.dropped_pkt.num +
9745 			    pdev->stats.tx_i.sg.dropped_host.num +
9746 			    pdev->stats.tx_i.mcast_en.dropped_map_error +
9747 			    pdev->stats.tx_i.mcast_en.dropped_self_mac +
9748 			    pdev->stats.tx_i.mcast_en.dropped_send_fail +
9749 			    pdev->stats.tx.nawds_mcast_drop +
9750 			    pdev->stats.tso_stats.dropped_host.num;
9751 
9752 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(pdev->soc->wlan_cfg_ctx)) {
9753 		stats->rx_packets = pdev->stats.rx.to_stack.num;
9754 		stats->rx_bytes = pdev->stats.rx.to_stack.bytes;
9755 	} else {
9756 		stats->rx_packets = pdev->stats.rx_i.reo_rcvd_pkt.num +
9757 				    pdev->stats.rx_i.null_q_desc_pkt.num +
9758 				    pdev->stats.rx_i.routed_eapol_pkt.num;
9759 		stats->rx_bytes = pdev->stats.rx_i.reo_rcvd_pkt.bytes +
9760 				  pdev->stats.rx_i.null_q_desc_pkt.bytes +
9761 				  pdev->stats.rx_i.routed_eapol_pkt.bytes;
9762 	}
9763 
9764 	stats->rx_errors = pdev->stats.err.ip_csum_err +
9765 		pdev->stats.err.tcp_udp_csum_err +
9766 		pdev->stats.rx.err.mic_err +
9767 		pdev->stats.rx.err.decrypt_err +
9768 		pdev->stats.rx.err.fcserr +
9769 		pdev->stats.rx.err.pn_err +
9770 		pdev->stats.rx.err.oor_err +
9771 		pdev->stats.rx.err.jump_2k_err +
9772 		pdev->stats.rx.err.rxdma_wifi_parse_err;
9773 	stats->rx_dropped = pdev->stats.dropped.msdu_not_done +
9774 		pdev->stats.dropped.mec +
9775 		pdev->stats.dropped.mesh_filter +
9776 		pdev->stats.dropped.wifi_parse +
9777 		pdev->stats.dropped.mon_rx_drop +
9778 		pdev->stats.dropped.mon_radiotap_update_err +
9779 		pdev->stats.rx.mec_drop.num +
9780 		pdev->stats.rx.multipass_rx_pkt_drop +
9781 		pdev->stats.rx.peer_unauth_rx_pkt_drop +
9782 		pdev->stats.rx.policy_check_drop +
9783 		pdev->stats.rx.nawds_mcast_drop +
9784 		pdev->stats.rx.mcast_3addr_drop;
9785 }
9786 
9787 /**
9788  * dp_get_device_stats() - get interface level packet stats
9789  * @soc_hdl: soc handle
9790  * @id: vdev_id or pdev_id based on type
9791  * @stats: cdp network device stats structure
9792  * @type: device type pdev/vdev
9793  *
9794  * Return: QDF_STATUS
9795  */
9796 static QDF_STATUS dp_get_device_stats(struct cdp_soc_t *soc_hdl, uint8_t id,
9797 				      struct cdp_dev_stats *stats,
9798 				      uint8_t type)
9799 {
9800 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9801 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
9802 	struct dp_vdev *vdev;
9803 
9804 	switch (type) {
9805 	case UPDATE_VDEV_STATS:
9806 		vdev = dp_vdev_get_ref_by_id(soc, id, DP_MOD_ID_CDP);
9807 
9808 		if (vdev) {
9809 			status = dp_vdev_getstats((struct cdp_vdev *)vdev,
9810 						  stats);
9811 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9812 		}
9813 		return status;
9814 	case UPDATE_PDEV_STATS:
9815 		{
9816 			struct dp_pdev *pdev =
9817 				dp_get_pdev_from_soc_pdev_id_wifi3(
9818 						(struct dp_soc *)soc,
9819 						 id);
9820 			if (pdev) {
9821 				dp_pdev_getstats((struct cdp_pdev *)pdev,
9822 						 stats);
9823 				return QDF_STATUS_SUCCESS;
9824 			}
9825 		}
9826 		break;
9827 	default:
9828 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9829 			"apstats cannot be updated for this input "
9830 			"type %d", type);
9831 		break;
9832 	}
9833 
9834 	return QDF_STATUS_E_FAILURE;
9835 }
9836 
9837 const
9838 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
9839 {
9840 	switch (ring_type) {
9841 	case REO_DST:
9842 		return "Reo_dst";
9843 	case REO_EXCEPTION:
9844 		return "Reo_exception";
9845 	case REO_CMD:
9846 		return "Reo_cmd";
9847 	case REO_REINJECT:
9848 		return "Reo_reinject";
9849 	case REO_STATUS:
9850 		return "Reo_status";
9851 	case WBM2SW_RELEASE:
9852 		return "wbm2sw_release";
9853 	case TCL_DATA:
9854 		return "tcl_data";
9855 	case TCL_CMD_CREDIT:
9856 		return "tcl_cmd_credit";
9857 	case TCL_STATUS:
9858 		return "tcl_status";
9859 	case SW2WBM_RELEASE:
9860 		return "sw2wbm_release";
9861 	case RXDMA_BUF:
9862 		return "Rxdma_buf";
9863 	case RXDMA_DST:
9864 		return "Rxdma_dst";
9865 	case RXDMA_MONITOR_BUF:
9866 		return "Rxdma_monitor_buf";
9867 	case RXDMA_MONITOR_DESC:
9868 		return "Rxdma_monitor_desc";
9869 	case RXDMA_MONITOR_STATUS:
9870 		return "Rxdma_monitor_status";
9871 	case RXDMA_MONITOR_DST:
9872 		return "Rxdma_monitor_destination";
9873 	case WBM_IDLE_LINK:
9874 		return "WBM_hw_idle_link";
9875 	case PPE2TCL:
9876 		return "PPE2TCL";
9877 	case REO2PPE:
9878 		return "REO2PPE";
9879 	case TX_MONITOR_DST:
9880 		return "tx_monitor_destination";
9881 	case TX_MONITOR_BUF:
9882 		return "tx_monitor_buf";
9883 	default:
9884 		dp_err("Invalid ring type");
9885 		break;
9886 	}
9887 	return "Invalid";
9888 }
9889 
9890 void dp_print_napi_stats(struct dp_soc *soc)
9891 {
9892 	hif_print_napi_stats(soc->hif_handle);
9893 }
9894 
9895 /**
9896  * dp_txrx_host_peer_stats_clr() - Reinitialize the txrx peer stats
9897  * @soc: Datapath soc
9898  * @peer: Datatpath peer
9899  * @arg: argument to iter function
9900  *
9901  * Return: QDF_STATUS
9902  */
9903 static inline void
9904 dp_txrx_host_peer_stats_clr(struct dp_soc *soc,
9905 			    struct dp_peer *peer,
9906 			    void *arg)
9907 {
9908 	struct dp_txrx_peer *txrx_peer = NULL;
9909 	struct dp_peer *tgt_peer = NULL;
9910 	struct cdp_interface_peer_stats peer_stats_intf;
9911 
9912 	qdf_mem_zero(&peer_stats_intf, sizeof(struct cdp_interface_peer_stats));
9913 
9914 	DP_STATS_CLR(peer);
9915 	/* Clear monitor peer stats */
9916 	dp_monitor_peer_reset_stats(soc, peer);
9917 
9918 	/* Clear MLD peer stats only when link peer is primary */
9919 	if (dp_peer_is_primary_link_peer(peer)) {
9920 		tgt_peer = dp_get_tgt_peer_from_peer(peer);
9921 		if (tgt_peer) {
9922 			DP_STATS_CLR(tgt_peer);
9923 			txrx_peer = tgt_peer->txrx_peer;
9924 			dp_txrx_peer_stats_clr(txrx_peer);
9925 		}
9926 	}
9927 
9928 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9929 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, peer->vdev->pdev->soc,
9930 			     &peer_stats_intf,  peer->peer_id,
9931 			     UPDATE_PEER_STATS, peer->vdev->pdev->pdev_id);
9932 #endif
9933 }
9934 
9935 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING
9936 static inline void dp_srng_clear_ring_usage_wm_stats(struct dp_soc *soc)
9937 {
9938 	int ring;
9939 
9940 	for (ring = 0; ring < soc->num_reo_dest_rings; ring++)
9941 		hal_srng_clear_ring_usage_wm_locked(soc->hal_soc,
9942 					    soc->reo_dest_ring[ring].hal_srng);
9943 }
9944 #else
9945 static inline void dp_srng_clear_ring_usage_wm_stats(struct dp_soc *soc)
9946 {
9947 }
9948 #endif
9949 
9950 /**
9951  * dp_txrx_host_stats_clr() - Reinitialize the txrx stats
9952  * @vdev: DP_VDEV handle
9953  * @soc: DP_SOC handle
9954  *
9955  * Return: QDF_STATUS
9956  */
9957 static inline QDF_STATUS
9958 dp_txrx_host_stats_clr(struct dp_vdev *vdev, struct dp_soc *soc)
9959 {
9960 	if (!vdev || !vdev->pdev)
9961 		return QDF_STATUS_E_FAILURE;
9962 
9963 	/*
9964 	 * if NSS offload is enabled, then send message
9965 	 * to NSS FW to clear the stats. Once NSS FW clears the statistics
9966 	 * then clear host statistics.
9967 	 */
9968 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
9969 		if (soc->cdp_soc.ol_ops->nss_stats_clr)
9970 			soc->cdp_soc.ol_ops->nss_stats_clr(soc->ctrl_psoc,
9971 							   vdev->vdev_id);
9972 	}
9973 
9974 	dp_vdev_stats_hw_offload_target_clear(soc, vdev->pdev->pdev_id,
9975 					      (1 << vdev->vdev_id));
9976 
9977 	DP_STATS_CLR(vdev->pdev);
9978 	DP_STATS_CLR(vdev->pdev->soc);
9979 	DP_STATS_CLR(vdev);
9980 
9981 	hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
9982 
9983 	dp_vdev_iterate_peer(vdev, dp_txrx_host_peer_stats_clr, NULL,
9984 			     DP_MOD_ID_GENERIC_STATS);
9985 
9986 	dp_srng_clear_ring_usage_wm_stats(soc);
9987 
9988 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9989 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
9990 			     &vdev->stats,  vdev->vdev_id,
9991 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
9992 #endif
9993 	return QDF_STATUS_SUCCESS;
9994 }
9995 
9996 /**
9997  * dp_get_peer_calibr_stats()- Get peer calibrated stats
9998  * @peer: Datapath peer
9999  * @peer_stats: buffer for peer stats
10000  *
10001  * Return: none
10002  */
10003 static inline
10004 void dp_get_peer_calibr_stats(struct dp_peer *peer,
10005 			      struct cdp_peer_stats *peer_stats)
10006 {
10007 	struct dp_peer *tgt_peer;
10008 
10009 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
10010 	if (!tgt_peer)
10011 		return;
10012 
10013 	peer_stats->tx.last_per = tgt_peer->stats.tx.last_per;
10014 	peer_stats->tx.tx_bytes_success_last =
10015 				tgt_peer->stats.tx.tx_bytes_success_last;
10016 	peer_stats->tx.tx_data_success_last =
10017 					tgt_peer->stats.tx.tx_data_success_last;
10018 	peer_stats->tx.tx_byte_rate = tgt_peer->stats.tx.tx_byte_rate;
10019 	peer_stats->tx.tx_data_rate = tgt_peer->stats.tx.tx_data_rate;
10020 	peer_stats->tx.tx_data_ucast_last =
10021 					tgt_peer->stats.tx.tx_data_ucast_last;
10022 	peer_stats->tx.tx_data_ucast_rate =
10023 					tgt_peer->stats.tx.tx_data_ucast_rate;
10024 	peer_stats->tx.inactive_time = tgt_peer->stats.tx.inactive_time;
10025 	peer_stats->rx.rx_bytes_success_last =
10026 				tgt_peer->stats.rx.rx_bytes_success_last;
10027 	peer_stats->rx.rx_data_success_last =
10028 				tgt_peer->stats.rx.rx_data_success_last;
10029 	peer_stats->rx.rx_byte_rate = tgt_peer->stats.rx.rx_byte_rate;
10030 	peer_stats->rx.rx_data_rate = tgt_peer->stats.rx.rx_data_rate;
10031 }
10032 
10033 /**
10034  * dp_get_peer_basic_stats()- Get peer basic stats
10035  * @peer: Datapath peer
10036  * @peer_stats: buffer for peer stats
10037  *
10038  * Return: none
10039  */
10040 static inline
10041 void dp_get_peer_basic_stats(struct dp_peer *peer,
10042 			     struct cdp_peer_stats *peer_stats)
10043 {
10044 	struct dp_txrx_peer *txrx_peer;
10045 
10046 	txrx_peer = dp_get_txrx_peer(peer);
10047 	if (!txrx_peer)
10048 		return;
10049 
10050 	peer_stats->tx.comp_pkt.num += txrx_peer->comp_pkt.num;
10051 	peer_stats->tx.comp_pkt.bytes += txrx_peer->comp_pkt.bytes;
10052 	peer_stats->tx.tx_failed += txrx_peer->tx_failed;
10053 	peer_stats->rx.to_stack.num += txrx_peer->to_stack.num;
10054 	peer_stats->rx.to_stack.bytes += txrx_peer->to_stack.bytes;
10055 }
10056 
10057 #ifdef QCA_ENHANCED_STATS_SUPPORT
10058 /**
10059  * dp_get_peer_per_pkt_stats()- Get peer per pkt stats
10060  * @peer: Datapath peer
10061  * @peer_stats: buffer for peer stats
10062  *
10063  * Return: none
10064  */
10065 static inline
10066 void dp_get_peer_per_pkt_stats(struct dp_peer *peer,
10067 			       struct cdp_peer_stats *peer_stats)
10068 {
10069 	struct dp_txrx_peer *txrx_peer;
10070 	struct dp_peer_per_pkt_stats *per_pkt_stats;
10071 	uint8_t inx = 0, link_id = 0;
10072 	struct dp_pdev *pdev;
10073 	struct dp_soc *soc;
10074 	uint8_t stats_arr_size;
10075 
10076 	txrx_peer = dp_get_txrx_peer(peer);
10077 	pdev = peer->vdev->pdev;
10078 
10079 	if (!txrx_peer)
10080 		return;
10081 
10082 	if (!IS_MLO_DP_LINK_PEER(peer)) {
10083 		stats_arr_size = txrx_peer->stats_arr_size;
10084 		for (inx = 0; inx < stats_arr_size; inx++) {
10085 			per_pkt_stats = &txrx_peer->stats[inx].per_pkt_stats;
10086 			DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats);
10087 		}
10088 	} else {
10089 		soc = pdev->soc;
10090 		link_id = dp_get_peer_hw_link_id(soc, pdev);
10091 		per_pkt_stats =
10092 			&txrx_peer->stats[link_id].per_pkt_stats;
10093 		DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats);
10094 	}
10095 }
10096 
10097 #ifdef WLAN_FEATURE_11BE_MLO
10098 /**
10099  * dp_get_peer_extd_stats()- Get peer extd stats
10100  * @peer: Datapath peer
10101  * @peer_stats: buffer for peer stats
10102  *
10103  * Return: none
10104  */
10105 static inline
10106 void dp_get_peer_extd_stats(struct dp_peer *peer,
10107 			    struct cdp_peer_stats *peer_stats)
10108 {
10109 	struct dp_soc *soc = peer->vdev->pdev->soc;
10110 
10111 	if (IS_MLO_DP_MLD_PEER(peer)) {
10112 		uint8_t i;
10113 		struct dp_peer *link_peer;
10114 		struct dp_soc *link_peer_soc;
10115 		struct dp_mld_link_peers link_peers_info;
10116 
10117 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
10118 						    &link_peers_info,
10119 						    DP_MOD_ID_CDP);
10120 		for (i = 0; i < link_peers_info.num_links; i++) {
10121 			link_peer = link_peers_info.link_peers[i];
10122 			link_peer_soc = link_peer->vdev->pdev->soc;
10123 			dp_monitor_peer_get_stats(link_peer_soc, link_peer,
10124 						  peer_stats,
10125 						  UPDATE_PEER_STATS);
10126 		}
10127 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
10128 	} else {
10129 		dp_monitor_peer_get_stats(soc, peer, peer_stats,
10130 					  UPDATE_PEER_STATS);
10131 	}
10132 }
10133 #else
10134 static inline
10135 void dp_get_peer_extd_stats(struct dp_peer *peer,
10136 			    struct cdp_peer_stats *peer_stats)
10137 {
10138 	struct dp_soc *soc = peer->vdev->pdev->soc;
10139 
10140 	dp_monitor_peer_get_stats(soc, peer, peer_stats, UPDATE_PEER_STATS);
10141 }
10142 #endif
10143 #else
10144 static inline
10145 void dp_get_peer_per_pkt_stats(struct dp_peer *peer,
10146 			       struct cdp_peer_stats *peer_stats)
10147 {
10148 	struct dp_txrx_peer *txrx_peer;
10149 	struct dp_peer_per_pkt_stats *per_pkt_stats;
10150 
10151 	txrx_peer = dp_get_txrx_peer(peer);
10152 	if (!txrx_peer)
10153 		return;
10154 
10155 	per_pkt_stats = &txrx_peer->stats[0].per_pkt_stats;
10156 	DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats);
10157 }
10158 
10159 static inline
10160 void dp_get_peer_extd_stats(struct dp_peer *peer,
10161 			    struct cdp_peer_stats *peer_stats)
10162 {
10163 	struct dp_txrx_peer *txrx_peer;
10164 	struct dp_peer_extd_stats *extd_stats;
10165 
10166 	txrx_peer = dp_get_txrx_peer(peer);
10167 	if (qdf_unlikely(!txrx_peer)) {
10168 		dp_err_rl("txrx_peer NULL");
10169 		return;
10170 	}
10171 
10172 	extd_stats = &txrx_peer->stats[0].extd_stats;
10173 	DP_UPDATE_EXTD_STATS(peer_stats, extd_stats);
10174 }
10175 #endif
10176 
10177 /**
10178  * dp_get_peer_tx_per()- Get peer packet error ratio
10179  * @peer_stats: buffer for peer stats
10180  *
10181  * Return: none
10182  */
10183 static inline
10184 void dp_get_peer_tx_per(struct cdp_peer_stats *peer_stats)
10185 {
10186 	if (peer_stats->tx.tx_success.num + peer_stats->tx.retries > 0)
10187 		peer_stats->tx.per = (peer_stats->tx.retries * 100) /
10188 				  (peer_stats->tx.tx_success.num +
10189 				   peer_stats->tx.retries);
10190 	else
10191 		peer_stats->tx.per = 0;
10192 }
10193 
10194 void dp_get_peer_stats(struct dp_peer *peer, struct cdp_peer_stats *peer_stats)
10195 {
10196 	dp_get_peer_calibr_stats(peer, peer_stats);
10197 
10198 	dp_get_peer_basic_stats(peer, peer_stats);
10199 
10200 	dp_get_peer_per_pkt_stats(peer, peer_stats);
10201 
10202 	dp_get_peer_extd_stats(peer, peer_stats);
10203 
10204 	dp_get_peer_tx_per(peer_stats);
10205 }
10206 
10207 /**
10208  * dp_get_host_peer_stats()- function to print peer stats
10209  * @soc: dp_soc handle
10210  * @mac_addr: mac address of the peer
10211  *
10212  * Return: QDF_STATUS
10213  */
10214 static QDF_STATUS
10215 dp_get_host_peer_stats(struct cdp_soc_t *soc, uint8_t *mac_addr)
10216 {
10217 	struct dp_peer *peer = NULL;
10218 	struct cdp_peer_stats *peer_stats = NULL;
10219 	struct cdp_peer_info peer_info = { 0 };
10220 
10221 	if (!mac_addr) {
10222 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
10223 			  "%s: NULL peer mac addr\n", __func__);
10224 		return QDF_STATUS_E_FAILURE;
10225 	}
10226 
10227 	DP_PEER_INFO_PARAMS_INIT(&peer_info, DP_VDEV_ALL, mac_addr, false,
10228 				 CDP_WILD_PEER_TYPE);
10229 
10230 	peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info,
10231 					 DP_MOD_ID_CDP);
10232 	if (!peer) {
10233 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
10234 			  "%s: Invalid peer\n", __func__);
10235 		return QDF_STATUS_E_FAILURE;
10236 	}
10237 
10238 	peer_stats = qdf_mem_malloc(sizeof(struct cdp_peer_stats));
10239 	if (!peer_stats) {
10240 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
10241 			  "%s: Memory allocation failed for cdp_peer_stats\n",
10242 			  __func__);
10243 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10244 		return QDF_STATUS_E_NOMEM;
10245 	}
10246 
10247 	qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats));
10248 
10249 	dp_get_peer_stats(peer, peer_stats);
10250 	dp_print_peer_stats(peer, peer_stats);
10251 
10252 	dp_peer_rxtid_stats(dp_get_tgt_peer_from_peer(peer),
10253 			    dp_rx_tid_stats_cb, NULL);
10254 
10255 	qdf_mem_free(peer_stats);
10256 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10257 
10258 	return QDF_STATUS_SUCCESS;
10259 }
10260 
10261 /**
10262  * dp_dump_wbm_idle_hptp() - dump wbm idle ring, hw hp tp info.
10263  * @soc: dp soc.
10264  * @pdev: dp pdev.
10265  *
10266  * Return: None.
10267  */
10268 static void
10269 dp_dump_wbm_idle_hptp(struct dp_soc *soc, struct dp_pdev *pdev)
10270 {
10271 	uint32_t hw_head;
10272 	uint32_t hw_tail;
10273 	struct dp_srng *srng;
10274 
10275 	if (!soc) {
10276 		dp_err("soc is NULL");
10277 		return;
10278 	}
10279 
10280 	if (!pdev) {
10281 		dp_err("pdev is NULL");
10282 		return;
10283 	}
10284 
10285 	srng = &pdev->soc->wbm_idle_link_ring;
10286 	if (!srng) {
10287 		dp_err("wbm_idle_link_ring srng is NULL");
10288 		return;
10289 	}
10290 
10291 	hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &hw_head,
10292 			&hw_tail, WBM_IDLE_LINK);
10293 
10294 	dp_debug("WBM_IDLE_LINK: HW hp: %d, HW tp: %d",
10295 			hw_head, hw_tail);
10296 }
10297 
10298 
10299 /**
10300  * dp_txrx_stats_help() - Helper function for Txrx_Stats
10301  *
10302  * Return: None
10303  */
10304 static void dp_txrx_stats_help(void)
10305 {
10306 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
10307 	dp_info("stats_option:");
10308 	dp_info("  1 -- HTT Tx Statistics");
10309 	dp_info("  2 -- HTT Rx Statistics");
10310 	dp_info("  3 -- HTT Tx HW Queue Statistics");
10311 	dp_info("  4 -- HTT Tx HW Sched Statistics");
10312 	dp_info("  5 -- HTT Error Statistics");
10313 	dp_info("  6 -- HTT TQM Statistics");
10314 	dp_info("  7 -- HTT TQM CMDQ Statistics");
10315 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
10316 	dp_info("  9 -- HTT Tx Rate Statistics");
10317 	dp_info(" 10 -- HTT Rx Rate Statistics");
10318 	dp_info(" 11 -- HTT Peer Statistics");
10319 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
10320 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
10321 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
10322 	dp_info(" 15 -- HTT SRNG Statistics");
10323 	dp_info(" 16 -- HTT SFM Info Statistics");
10324 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
10325 	dp_info(" 18 -- HTT Peer List Details");
10326 	dp_info(" 20 -- Clear Host Statistics");
10327 	dp_info(" 21 -- Host Rx Rate Statistics");
10328 	dp_info(" 22 -- Host Tx Rate Statistics");
10329 	dp_info(" 23 -- Host Tx Statistics");
10330 	dp_info(" 24 -- Host Rx Statistics");
10331 	dp_info(" 25 -- Host AST Statistics");
10332 	dp_info(" 26 -- Host SRNG PTR Statistics");
10333 	dp_info(" 27 -- Host Mon Statistics");
10334 	dp_info(" 28 -- Host REO Queue Statistics");
10335 	dp_info(" 29 -- Host Soc cfg param Statistics");
10336 	dp_info(" 30 -- Host pdev cfg param Statistics");
10337 	dp_info(" 31 -- Host NAPI stats");
10338 	dp_info(" 32 -- Host Interrupt stats");
10339 	dp_info(" 33 -- Host FISA stats");
10340 	dp_info(" 34 -- Host Register Work stats");
10341 	dp_info(" 35 -- HW REO Queue stats");
10342 	dp_info(" 36 -- Host WBM IDLE link desc ring HP/TP");
10343 	dp_info(" 37 -- Host SRNG usage watermark stats");
10344 }
10345 
10346 #ifdef DP_UMAC_HW_RESET_SUPPORT
10347 /**
10348  * dp_umac_rst_skel_enable_update() - Update skel dbg flag for umac reset
10349  * @soc: dp soc handle
10350  * @en: ebable/disable
10351  *
10352  * Return: void
10353  */
10354 static void dp_umac_rst_skel_enable_update(struct dp_soc *soc, bool en)
10355 {
10356 	soc->umac_reset_ctx.skel_enable = en;
10357 	dp_cdp_debug("UMAC HW reset debug skeleton code enabled :%u",
10358 		     soc->umac_reset_ctx.skel_enable);
10359 }
10360 
10361 /**
10362  * dp_umac_rst_skel_enable_get() - Get skel dbg flag for umac reset
10363  * @soc: dp soc handle
10364  *
10365  * Return: enable/disable flag
10366  */
10367 static bool dp_umac_rst_skel_enable_get(struct dp_soc *soc)
10368 {
10369 	return soc->umac_reset_ctx.skel_enable;
10370 }
10371 #else
10372 static void dp_umac_rst_skel_enable_update(struct dp_soc *soc, bool en)
10373 {
10374 }
10375 
10376 static bool dp_umac_rst_skel_enable_get(struct dp_soc *soc)
10377 {
10378 	return false;
10379 }
10380 #endif
10381 
10382 /**
10383  * dp_print_host_stats()- Function to print the stats aggregated at host
10384  * @vdev: DP_VDEV handle
10385  * @req: host stats type
10386  * @soc: dp soc handler
10387  *
10388  * Return: 0 on success, print error message in case of failure
10389  */
10390 static int
10391 dp_print_host_stats(struct dp_vdev *vdev,
10392 		    struct cdp_txrx_stats_req *req,
10393 		    struct dp_soc *soc)
10394 {
10395 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
10396 	enum cdp_host_txrx_stats type =
10397 			dp_stats_mapping_table[req->stats][STATS_HOST];
10398 
10399 	dp_aggregate_pdev_stats(pdev);
10400 
10401 	switch (type) {
10402 	case TXRX_CLEAR_STATS:
10403 		dp_txrx_host_stats_clr(vdev, soc);
10404 		break;
10405 	case TXRX_RX_RATE_STATS:
10406 		dp_print_rx_rates(vdev);
10407 		break;
10408 	case TXRX_TX_RATE_STATS:
10409 		dp_print_tx_rates(vdev);
10410 		break;
10411 	case TXRX_TX_HOST_STATS:
10412 		dp_print_pdev_tx_stats(pdev);
10413 		dp_print_soc_tx_stats(pdev->soc);
10414 		dp_print_global_desc_count();
10415 		break;
10416 	case TXRX_RX_HOST_STATS:
10417 		dp_print_pdev_rx_stats(pdev);
10418 		dp_print_soc_rx_stats(pdev->soc);
10419 		break;
10420 	case TXRX_AST_STATS:
10421 		dp_print_ast_stats(pdev->soc);
10422 		dp_print_mec_stats(pdev->soc);
10423 		dp_print_peer_table(vdev);
10424 		break;
10425 	case TXRX_SRNG_PTR_STATS:
10426 		dp_print_ring_stats(pdev);
10427 		break;
10428 	case TXRX_RX_MON_STATS:
10429 		dp_monitor_print_pdev_rx_mon_stats(pdev);
10430 		break;
10431 	case TXRX_REO_QUEUE_STATS:
10432 		dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc,
10433 				       req->peer_addr);
10434 		break;
10435 	case TXRX_SOC_CFG_PARAMS:
10436 		dp_print_soc_cfg_params(pdev->soc);
10437 		break;
10438 	case TXRX_PDEV_CFG_PARAMS:
10439 		dp_print_pdev_cfg_params(pdev);
10440 		break;
10441 	case TXRX_NAPI_STATS:
10442 		dp_print_napi_stats(pdev->soc);
10443 		break;
10444 	case TXRX_SOC_INTERRUPT_STATS:
10445 		dp_print_soc_interrupt_stats(pdev->soc);
10446 		break;
10447 	case TXRX_SOC_FSE_STATS:
10448 		dp_rx_dump_fisa_table(pdev->soc);
10449 		break;
10450 	case TXRX_HAL_REG_WRITE_STATS:
10451 		hal_dump_reg_write_stats(pdev->soc->hal_soc);
10452 		hal_dump_reg_write_srng_stats(pdev->soc->hal_soc);
10453 		break;
10454 	case TXRX_SOC_REO_HW_DESC_DUMP:
10455 		dp_get_rx_reo_queue_info((struct cdp_soc_t *)pdev->soc,
10456 					 vdev->vdev_id);
10457 		break;
10458 	case TXRX_SOC_WBM_IDLE_HPTP_DUMP:
10459 		dp_dump_wbm_idle_hptp(pdev->soc, pdev);
10460 		break;
10461 	case TXRX_SRNG_USAGE_WM_STATS:
10462 		/* Dump usage watermark stats for all SRNGs */
10463 		dp_dump_srng_high_wm_stats(soc, 0xFF);
10464 		break;
10465 	default:
10466 		dp_info("Wrong Input For TxRx Host Stats");
10467 		dp_txrx_stats_help();
10468 		break;
10469 	}
10470 	return 0;
10471 }
10472 
10473 /**
10474  * dp_pdev_tid_stats_ingress_inc() - increment ingress_stack counter
10475  * @pdev: pdev handle
10476  * @val: increase in value
10477  *
10478  * Return: void
10479  */
10480 static void
10481 dp_pdev_tid_stats_ingress_inc(struct dp_pdev *pdev, uint32_t val)
10482 {
10483 	pdev->stats.tid_stats.ingress_stack += val;
10484 }
10485 
10486 /**
10487  * dp_pdev_tid_stats_osif_drop() - increment osif_drop counter
10488  * @pdev: pdev handle
10489  * @val: increase in value
10490  *
10491  * Return: void
10492  */
10493 static void
10494 dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val)
10495 {
10496 	pdev->stats.tid_stats.osif_drop += val;
10497 }
10498 
10499 /**
10500  * dp_get_fw_peer_stats()- function to print peer stats
10501  * @soc: soc handle
10502  * @pdev_id: id of the pdev handle
10503  * @mac_addr: mac address of the peer
10504  * @cap: Type of htt stats requested
10505  * @is_wait: if set, wait on completion from firmware response
10506  *
10507  * Currently Supporting only MAC ID based requests Only
10508  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
10509  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
10510  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
10511  *
10512  * Return: QDF_STATUS
10513  */
10514 static QDF_STATUS
10515 dp_get_fw_peer_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
10516 		     uint8_t *mac_addr,
10517 		     uint32_t cap, uint32_t is_wait)
10518 {
10519 	int i;
10520 	uint32_t config_param0 = 0;
10521 	uint32_t config_param1 = 0;
10522 	uint32_t config_param2 = 0;
10523 	uint32_t config_param3 = 0;
10524 	struct dp_pdev *pdev =
10525 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10526 						   pdev_id);
10527 
10528 	if (!pdev)
10529 		return QDF_STATUS_E_FAILURE;
10530 
10531 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
10532 	config_param0 |= (1 << (cap + 1));
10533 
10534 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
10535 		config_param1 |= (1 << i);
10536 	}
10537 
10538 	config_param2 |= (mac_addr[0] & 0x000000ff);
10539 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
10540 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
10541 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
10542 
10543 	config_param3 |= (mac_addr[4] & 0x000000ff);
10544 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
10545 
10546 	if (is_wait) {
10547 		qdf_event_reset(&pdev->fw_peer_stats_event);
10548 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
10549 					  config_param0, config_param1,
10550 					  config_param2, config_param3,
10551 					  0, DBG_STATS_COOKIE_DP_STATS, 0);
10552 		qdf_wait_single_event(&pdev->fw_peer_stats_event,
10553 				      DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
10554 	} else {
10555 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
10556 					  config_param0, config_param1,
10557 					  config_param2, config_param3,
10558 					  0, DBG_STATS_COOKIE_DEFAULT, 0);
10559 	}
10560 
10561 	return QDF_STATUS_SUCCESS;
10562 
10563 }
10564 
10565 /* This struct definition will be removed from here
10566  * once it get added in FW headers*/
10567 struct httstats_cmd_req {
10568     uint32_t    config_param0;
10569     uint32_t    config_param1;
10570     uint32_t    config_param2;
10571     uint32_t    config_param3;
10572     int cookie;
10573     u_int8_t    stats_id;
10574 };
10575 
10576 /**
10577  * dp_get_htt_stats: function to process the httstas request
10578  * @soc: DP soc handle
10579  * @pdev_id: id of pdev handle
10580  * @data: pointer to request data
10581  * @data_len: length for request data
10582  *
10583  * Return: QDF_STATUS
10584  */
10585 static QDF_STATUS
10586 dp_get_htt_stats(struct cdp_soc_t *soc, uint8_t pdev_id, void *data,
10587 		 uint32_t data_len)
10588 {
10589 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
10590 	struct dp_pdev *pdev =
10591 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10592 						   pdev_id);
10593 
10594 	if (!pdev)
10595 		return QDF_STATUS_E_FAILURE;
10596 
10597 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
10598 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
10599 				req->config_param0, req->config_param1,
10600 				req->config_param2, req->config_param3,
10601 				req->cookie, DBG_STATS_COOKIE_DEFAULT, 0);
10602 
10603 	return QDF_STATUS_SUCCESS;
10604 }
10605 
10606 /**
10607  * dp_set_pdev_tidmap_prty_wifi3() - update tidmap priority in pdev
10608  * @pdev: DP_PDEV handle
10609  * @prio: tidmap priority value passed by the user
10610  *
10611  * Return: QDF_STATUS_SUCCESS on success
10612  */
10613 static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct dp_pdev *pdev,
10614 						uint8_t prio)
10615 {
10616 	struct dp_soc *soc = pdev->soc;
10617 
10618 	soc->tidmap_prty = prio;
10619 
10620 	hal_tx_set_tidmap_prty(soc->hal_soc, prio);
10621 	return QDF_STATUS_SUCCESS;
10622 }
10623 
10624 /**
10625  * dp_get_peer_param: function to get parameters in peer
10626  * @cdp_soc: DP soc handle
10627  * @vdev_id: id of vdev handle
10628  * @peer_mac: peer mac address
10629  * @param: parameter type to be set
10630  * @val: address of buffer
10631  *
10632  * Return: val
10633  */
10634 static QDF_STATUS dp_get_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
10635 				    uint8_t *peer_mac,
10636 				    enum cdp_peer_param_type param,
10637 				    cdp_config_param_type *val)
10638 {
10639 	return QDF_STATUS_SUCCESS;
10640 }
10641 
10642 /**
10643  * dp_set_peer_param: function to set parameters in peer
10644  * @cdp_soc: DP soc handle
10645  * @vdev_id: id of vdev handle
10646  * @peer_mac: peer mac address
10647  * @param: parameter type to be set
10648  * @val: value of parameter to be set
10649  *
10650  * Return: 0 for success. nonzero for failure.
10651  */
10652 static QDF_STATUS dp_set_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
10653 				    uint8_t *peer_mac,
10654 				    enum cdp_peer_param_type param,
10655 				    cdp_config_param_type val)
10656 {
10657 	struct dp_peer *peer =
10658 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
10659 						       peer_mac, 0, vdev_id,
10660 						       DP_MOD_ID_CDP);
10661 	struct dp_txrx_peer *txrx_peer;
10662 
10663 	if (!peer)
10664 		return QDF_STATUS_E_FAILURE;
10665 
10666 	txrx_peer = peer->txrx_peer;
10667 	if (!txrx_peer) {
10668 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10669 		return QDF_STATUS_E_FAILURE;
10670 	}
10671 
10672 	switch (param) {
10673 	case CDP_CONFIG_NAWDS:
10674 		txrx_peer->nawds_enabled = val.cdp_peer_param_nawds;
10675 		break;
10676 	case CDP_CONFIG_ISOLATION:
10677 		dp_set_peer_isolation(txrx_peer, val.cdp_peer_param_isolation);
10678 		break;
10679 	case CDP_CONFIG_IN_TWT:
10680 		txrx_peer->in_twt = !!(val.cdp_peer_param_in_twt);
10681 		break;
10682 	default:
10683 		break;
10684 	}
10685 
10686 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10687 
10688 	return QDF_STATUS_SUCCESS;
10689 }
10690 
10691 /**
10692  * dp_get_pdev_param() - function to get parameters from pdev
10693  * @cdp_soc: DP soc handle
10694  * @pdev_id: id of pdev handle
10695  * @param: parameter type to be get
10696  * @val: buffer for value
10697  *
10698  * Return: status
10699  */
10700 static QDF_STATUS dp_get_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
10701 				    enum cdp_pdev_param_type param,
10702 				    cdp_config_param_type *val)
10703 {
10704 	struct cdp_pdev *pdev = (struct cdp_pdev *)
10705 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10706 						   pdev_id);
10707 	if (!pdev)
10708 		return QDF_STATUS_E_FAILURE;
10709 
10710 	switch (param) {
10711 	case CDP_CONFIG_VOW:
10712 		val->cdp_pdev_param_cfg_vow =
10713 				((struct dp_pdev *)pdev)->delay_stats_flag;
10714 		break;
10715 	case CDP_TX_PENDING:
10716 		val->cdp_pdev_param_tx_pending = dp_get_tx_pending(pdev);
10717 		break;
10718 	case CDP_FILTER_MCAST_DATA:
10719 		val->cdp_pdev_param_fltr_mcast =
10720 				dp_monitor_pdev_get_filter_mcast_data(pdev);
10721 		break;
10722 	case CDP_FILTER_NO_DATA:
10723 		val->cdp_pdev_param_fltr_none =
10724 				dp_monitor_pdev_get_filter_non_data(pdev);
10725 		break;
10726 	case CDP_FILTER_UCAST_DATA:
10727 		val->cdp_pdev_param_fltr_ucast =
10728 				dp_monitor_pdev_get_filter_ucast_data(pdev);
10729 		break;
10730 	case CDP_MONITOR_CHANNEL:
10731 		val->cdp_pdev_param_monitor_chan =
10732 			dp_monitor_get_chan_num((struct dp_pdev *)pdev);
10733 		break;
10734 	case CDP_MONITOR_FREQUENCY:
10735 		val->cdp_pdev_param_mon_freq =
10736 			dp_monitor_get_chan_freq((struct dp_pdev *)pdev);
10737 		break;
10738 	default:
10739 		return QDF_STATUS_E_FAILURE;
10740 	}
10741 
10742 	return QDF_STATUS_SUCCESS;
10743 }
10744 
10745 /**
10746  * dp_set_pdev_param() - function to set parameters in pdev
10747  * @cdp_soc: DP soc handle
10748  * @pdev_id: id of pdev handle
10749  * @param: parameter type to be set
10750  * @val: value of parameter to be set
10751  *
10752  * Return: 0 for success. nonzero for failure.
10753  */
10754 static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
10755 				    enum cdp_pdev_param_type param,
10756 				    cdp_config_param_type val)
10757 {
10758 	int target_type;
10759 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
10760 	struct dp_pdev *pdev =
10761 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10762 						   pdev_id);
10763 	enum reg_wifi_band chan_band;
10764 
10765 	if (!pdev)
10766 		return QDF_STATUS_E_FAILURE;
10767 
10768 	target_type = hal_get_target_type(soc->hal_soc);
10769 	switch (target_type) {
10770 	case TARGET_TYPE_QCA6750:
10771 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC0_LMAC_ID;
10772 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
10773 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
10774 		break;
10775 	case TARGET_TYPE_KIWI:
10776 	case TARGET_TYPE_MANGO:
10777 	case TARGET_TYPE_PEACH:
10778 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC0_LMAC_ID;
10779 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
10780 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
10781 		break;
10782 	default:
10783 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC1_LMAC_ID;
10784 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
10785 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
10786 		break;
10787 	}
10788 
10789 	switch (param) {
10790 	case CDP_CONFIG_TX_CAPTURE:
10791 		return dp_monitor_config_debug_sniffer(pdev,
10792 						val.cdp_pdev_param_tx_capture);
10793 	case CDP_CONFIG_DEBUG_SNIFFER:
10794 		return dp_monitor_config_debug_sniffer(pdev,
10795 						val.cdp_pdev_param_dbg_snf);
10796 	case CDP_CONFIG_BPR_ENABLE:
10797 		return dp_monitor_set_bpr_enable(pdev,
10798 						 val.cdp_pdev_param_bpr_enable);
10799 	case CDP_CONFIG_PRIMARY_RADIO:
10800 		pdev->is_primary = val.cdp_pdev_param_primary_radio;
10801 		break;
10802 	case CDP_CONFIG_CAPTURE_LATENCY:
10803 		pdev->latency_capture_enable = val.cdp_pdev_param_cptr_latcy;
10804 		break;
10805 	case CDP_INGRESS_STATS:
10806 		dp_pdev_tid_stats_ingress_inc(pdev,
10807 					      val.cdp_pdev_param_ingrs_stats);
10808 		break;
10809 	case CDP_OSIF_DROP:
10810 		dp_pdev_tid_stats_osif_drop(pdev,
10811 					    val.cdp_pdev_param_osif_drop);
10812 		break;
10813 	case CDP_CONFIG_ENH_RX_CAPTURE:
10814 		return dp_monitor_config_enh_rx_capture(pdev,
10815 						val.cdp_pdev_param_en_rx_cap);
10816 	case CDP_CONFIG_ENH_TX_CAPTURE:
10817 		return dp_monitor_config_enh_tx_capture(pdev,
10818 						val.cdp_pdev_param_en_tx_cap);
10819 	case CDP_CONFIG_HMMC_TID_OVERRIDE:
10820 		pdev->hmmc_tid_override_en = val.cdp_pdev_param_hmmc_tid_ovrd;
10821 		break;
10822 	case CDP_CONFIG_HMMC_TID_VALUE:
10823 		pdev->hmmc_tid = val.cdp_pdev_param_hmmc_tid;
10824 		break;
10825 	case CDP_CHAN_NOISE_FLOOR:
10826 		pdev->chan_noise_floor = val.cdp_pdev_param_chn_noise_flr;
10827 		break;
10828 	case CDP_TIDMAP_PRTY:
10829 		dp_set_pdev_tidmap_prty_wifi3(pdev,
10830 					      val.cdp_pdev_param_tidmap_prty);
10831 		break;
10832 	case CDP_FILTER_NEIGH_PEERS:
10833 		dp_monitor_set_filter_neigh_peers(pdev,
10834 					val.cdp_pdev_param_fltr_neigh_peers);
10835 		break;
10836 	case CDP_MONITOR_CHANNEL:
10837 		dp_monitor_set_chan_num(pdev, val.cdp_pdev_param_monitor_chan);
10838 		break;
10839 	case CDP_MONITOR_FREQUENCY:
10840 		chan_band = wlan_reg_freq_to_band(val.cdp_pdev_param_mon_freq);
10841 		dp_monitor_set_chan_freq(pdev, val.cdp_pdev_param_mon_freq);
10842 		dp_monitor_set_chan_band(pdev, chan_band);
10843 		break;
10844 	case CDP_CONFIG_BSS_COLOR:
10845 		dp_monitor_set_bsscolor(pdev, val.cdp_pdev_param_bss_color);
10846 		break;
10847 	case CDP_SET_ATF_STATS_ENABLE:
10848 		dp_monitor_set_atf_stats_enable(pdev,
10849 					val.cdp_pdev_param_atf_stats_enable);
10850 		break;
10851 	case CDP_CONFIG_SPECIAL_VAP:
10852 		dp_monitor_pdev_config_scan_spcl_vap(pdev,
10853 					val.cdp_pdev_param_config_special_vap);
10854 		dp_monitor_vdev_set_monitor_mode_buf_rings(pdev);
10855 		break;
10856 	case CDP_RESET_SCAN_SPCL_VAP_STATS_ENABLE:
10857 		dp_monitor_pdev_reset_scan_spcl_vap_stats_enable(pdev,
10858 				val.cdp_pdev_param_reset_scan_spcl_vap_stats_enable);
10859 		break;
10860 	case CDP_CONFIG_ENHANCED_STATS_ENABLE:
10861 		pdev->enhanced_stats_en = val.cdp_pdev_param_enhanced_stats_enable;
10862 		break;
10863 	case CDP_ISOLATION:
10864 		pdev->isolation = val.cdp_pdev_param_isolation;
10865 		break;
10866 	case CDP_CONFIG_UNDECODED_METADATA_CAPTURE_ENABLE:
10867 		return dp_monitor_config_undecoded_metadata_capture(pdev,
10868 				val.cdp_pdev_param_undecoded_metadata_enable);
10869 		break;
10870 	default:
10871 		return QDF_STATUS_E_INVAL;
10872 	}
10873 	return QDF_STATUS_SUCCESS;
10874 }
10875 
10876 #ifdef QCA_UNDECODED_METADATA_SUPPORT
10877 static
10878 QDF_STATUS dp_set_pdev_phyrx_error_mask(struct cdp_soc_t *cdp_soc,
10879 					uint8_t pdev_id, uint32_t mask,
10880 					uint32_t mask_cont)
10881 {
10882 	struct dp_pdev *pdev =
10883 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10884 						   pdev_id);
10885 
10886 	if (!pdev)
10887 		return QDF_STATUS_E_FAILURE;
10888 
10889 	return dp_monitor_config_undecoded_metadata_phyrx_error_mask(pdev,
10890 				mask, mask_cont);
10891 }
10892 
10893 static
10894 QDF_STATUS dp_get_pdev_phyrx_error_mask(struct cdp_soc_t *cdp_soc,
10895 					uint8_t pdev_id, uint32_t *mask,
10896 					uint32_t *mask_cont)
10897 {
10898 	struct dp_pdev *pdev =
10899 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10900 						   pdev_id);
10901 
10902 	if (!pdev)
10903 		return QDF_STATUS_E_FAILURE;
10904 
10905 	return dp_monitor_get_undecoded_metadata_phyrx_error_mask(pdev,
10906 				mask, mask_cont);
10907 }
10908 #endif
10909 
10910 #ifdef QCA_PEER_EXT_STATS
10911 static void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
10912 					  qdf_nbuf_t nbuf)
10913 {
10914 	struct dp_peer *peer = NULL;
10915 	uint16_t peer_id, ring_id;
10916 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
10917 	struct dp_peer_delay_stats *delay_stats = NULL;
10918 
10919 	peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
10920 	if (peer_id > soc->max_peer_id)
10921 		return;
10922 
10923 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
10924 	if (qdf_unlikely(!peer))
10925 		return;
10926 
10927 	if (qdf_unlikely(!peer->txrx_peer)) {
10928 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10929 		return;
10930 	}
10931 
10932 	if (qdf_likely(peer->txrx_peer->delay_stats)) {
10933 		delay_stats = peer->txrx_peer->delay_stats;
10934 		ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
10935 		dp_rx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
10936 					nbuf);
10937 	}
10938 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10939 }
10940 #else
10941 static inline void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
10942 						 qdf_nbuf_t nbuf)
10943 {
10944 }
10945 #endif
10946 
10947 /**
10948  * dp_calculate_delay_stats() - function to get rx delay stats
10949  * @cdp_soc: DP soc handle
10950  * @vdev_id: id of DP vdev handle
10951  * @nbuf: skb
10952  *
10953  * Return: QDF_STATUS
10954  */
10955 static QDF_STATUS
10956 dp_calculate_delay_stats(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
10957 			 qdf_nbuf_t nbuf)
10958 {
10959 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
10960 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10961 						     DP_MOD_ID_CDP);
10962 
10963 	if (!vdev)
10964 		return QDF_STATUS_SUCCESS;
10965 
10966 	if (vdev->pdev->delay_stats_flag)
10967 		dp_rx_compute_delay(vdev, nbuf);
10968 	else
10969 		dp_rx_update_peer_delay_stats(soc, nbuf);
10970 
10971 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10972 	return QDF_STATUS_SUCCESS;
10973 }
10974 
10975 /**
10976  * dp_get_vdev_param() - function to get parameters from vdev
10977  * @cdp_soc: DP soc handle
10978  * @vdev_id: id of DP vdev handle
10979  * @param: parameter type to get value
10980  * @val: buffer address
10981  *
10982  * Return: status
10983  */
10984 static QDF_STATUS dp_get_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
10985 				    enum cdp_vdev_param_type param,
10986 				    cdp_config_param_type *val)
10987 {
10988 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
10989 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10990 						     DP_MOD_ID_CDP);
10991 
10992 	if (!vdev)
10993 		return QDF_STATUS_E_FAILURE;
10994 
10995 	switch (param) {
10996 	case CDP_ENABLE_WDS:
10997 		val->cdp_vdev_param_wds = vdev->wds_enabled;
10998 		break;
10999 	case CDP_ENABLE_MEC:
11000 		val->cdp_vdev_param_mec = vdev->mec_enabled;
11001 		break;
11002 	case CDP_ENABLE_DA_WAR:
11003 		val->cdp_vdev_param_da_war = vdev->pdev->soc->da_war_enabled;
11004 		break;
11005 	case CDP_ENABLE_IGMP_MCAST_EN:
11006 		val->cdp_vdev_param_igmp_mcast_en = vdev->igmp_mcast_enhanc_en;
11007 		break;
11008 	case CDP_ENABLE_MCAST_EN:
11009 		val->cdp_vdev_param_mcast_en = vdev->mcast_enhancement_en;
11010 		break;
11011 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
11012 		val->cdp_vdev_param_hlos_tid_override =
11013 			    dp_vdev_get_hlos_tid_override((struct cdp_vdev *)vdev);
11014 		break;
11015 	case CDP_ENABLE_PEER_AUTHORIZE:
11016 		val->cdp_vdev_param_peer_authorize =
11017 			    vdev->peer_authorize;
11018 		break;
11019 	case CDP_TX_ENCAP_TYPE:
11020 		val->cdp_vdev_param_tx_encap = vdev->tx_encap_type;
11021 		break;
11022 	case CDP_ENABLE_CIPHER:
11023 		val->cdp_vdev_param_cipher_en = vdev->sec_type;
11024 		break;
11025 #ifdef WLAN_SUPPORT_MESH_LATENCY
11026 	case CDP_ENABLE_PEER_TID_LATENCY:
11027 		val->cdp_vdev_param_peer_tid_latency_enable =
11028 			vdev->peer_tid_latency_enabled;
11029 		break;
11030 	case CDP_SET_VAP_MESH_TID:
11031 		val->cdp_vdev_param_mesh_tid =
11032 				vdev->mesh_tid_latency_config.latency_tid;
11033 		break;
11034 #endif
11035 	case CDP_DROP_3ADDR_MCAST:
11036 		val->cdp_drop_3addr_mcast = vdev->drop_3addr_mcast;
11037 		break;
11038 	case CDP_SET_MCAST_VDEV:
11039 		soc->arch_ops.txrx_get_vdev_mcast_param(soc, vdev, val);
11040 		break;
11041 #ifdef QCA_SUPPORT_WDS_EXTENDED
11042 	case CDP_DROP_TX_MCAST:
11043 		val->cdp_drop_tx_mcast = vdev->drop_tx_mcast;
11044 		break;
11045 #endif
11046 
11047 #ifdef MESH_MODE_SUPPORT
11048 	case CDP_MESH_RX_FILTER:
11049 		val->cdp_vdev_param_mesh_rx_filter = vdev->mesh_rx_filter;
11050 		break;
11051 	case CDP_MESH_MODE:
11052 		val->cdp_vdev_param_mesh_mode = vdev->mesh_vdev;
11053 		break;
11054 #endif
11055 	case CDP_ENABLE_NAWDS:
11056 		val->cdp_vdev_param_nawds = vdev->nawds_enabled;
11057 		break;
11058 
11059 	case CDP_ENABLE_WRAP:
11060 		val->cdp_vdev_param_wrap = vdev->wrap_vdev;
11061 		break;
11062 
11063 #ifdef DP_TRAFFIC_END_INDICATION
11064 	case CDP_ENABLE_TRAFFIC_END_INDICATION:
11065 		val->cdp_vdev_param_traffic_end_ind = vdev->traffic_end_ind_en;
11066 		break;
11067 #endif
11068 
11069 	default:
11070 		dp_cdp_err("%pK: param value %d is wrong",
11071 			   soc, param);
11072 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11073 		return QDF_STATUS_E_FAILURE;
11074 	}
11075 
11076 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11077 	return QDF_STATUS_SUCCESS;
11078 }
11079 
11080 /**
11081  * dp_set_vdev_param() - function to set parameters in vdev
11082  * @cdp_soc: DP soc handle
11083  * @vdev_id: id of DP vdev handle
11084  * @param: parameter type to get value
11085  * @val: value
11086  *
11087  * Return: QDF_STATUS
11088  */
11089 static QDF_STATUS
11090 dp_set_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
11091 		  enum cdp_vdev_param_type param, cdp_config_param_type val)
11092 {
11093 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
11094 	struct dp_vdev *vdev =
11095 		dp_vdev_get_ref_by_id(dsoc, vdev_id, DP_MOD_ID_CDP);
11096 	uint32_t var = 0;
11097 
11098 	if (!vdev)
11099 		return QDF_STATUS_E_FAILURE;
11100 
11101 	switch (param) {
11102 	case CDP_ENABLE_WDS:
11103 		dp_cdp_err("%pK: wds_enable %d for vdev(%pK) id(%d)\n",
11104 			   dsoc, val.cdp_vdev_param_wds, vdev, vdev->vdev_id);
11105 		vdev->wds_enabled = val.cdp_vdev_param_wds;
11106 		break;
11107 	case CDP_ENABLE_MEC:
11108 		dp_cdp_err("%pK: mec_enable %d for vdev(%pK) id(%d)\n",
11109 			   dsoc, val.cdp_vdev_param_mec, vdev, vdev->vdev_id);
11110 		vdev->mec_enabled = val.cdp_vdev_param_mec;
11111 		break;
11112 	case CDP_ENABLE_DA_WAR:
11113 		dp_cdp_err("%pK: da_war_enable %d for vdev(%pK) id(%d)\n",
11114 			   dsoc, val.cdp_vdev_param_da_war, vdev, vdev->vdev_id);
11115 		vdev->pdev->soc->da_war_enabled = val.cdp_vdev_param_da_war;
11116 		dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
11117 					     vdev->pdev->soc));
11118 		break;
11119 	case CDP_ENABLE_NAWDS:
11120 		vdev->nawds_enabled = val.cdp_vdev_param_nawds;
11121 		break;
11122 	case CDP_ENABLE_MCAST_EN:
11123 		vdev->mcast_enhancement_en = val.cdp_vdev_param_mcast_en;
11124 		break;
11125 	case CDP_ENABLE_IGMP_MCAST_EN:
11126 		vdev->igmp_mcast_enhanc_en = val.cdp_vdev_param_igmp_mcast_en;
11127 		break;
11128 	case CDP_ENABLE_PROXYSTA:
11129 		vdev->proxysta_vdev = val.cdp_vdev_param_proxysta;
11130 		break;
11131 	case CDP_UPDATE_TDLS_FLAGS:
11132 		vdev->tdls_link_connected = val.cdp_vdev_param_tdls_flags;
11133 		break;
11134 	case CDP_CFG_WDS_AGING_TIMER:
11135 		var = val.cdp_vdev_param_aging_tmr;
11136 		if (!var)
11137 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
11138 		else if (var != vdev->wds_aging_timer_val)
11139 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, var);
11140 
11141 		vdev->wds_aging_timer_val = var;
11142 		break;
11143 	case CDP_ENABLE_AP_BRIDGE:
11144 		if (wlan_op_mode_sta != vdev->opmode)
11145 			vdev->ap_bridge_enabled = val.cdp_vdev_param_ap_brdg_en;
11146 		else
11147 			vdev->ap_bridge_enabled = false;
11148 		break;
11149 	case CDP_ENABLE_CIPHER:
11150 		vdev->sec_type = val.cdp_vdev_param_cipher_en;
11151 		break;
11152 	case CDP_ENABLE_QWRAP_ISOLATION:
11153 		vdev->isolation_vdev = val.cdp_vdev_param_qwrap_isolation;
11154 		break;
11155 	case CDP_UPDATE_MULTIPASS:
11156 		vdev->multipass_en = val.cdp_vdev_param_update_multipass;
11157 		break;
11158 	case CDP_TX_ENCAP_TYPE:
11159 		vdev->tx_encap_type = val.cdp_vdev_param_tx_encap;
11160 		break;
11161 	case CDP_RX_DECAP_TYPE:
11162 		vdev->rx_decap_type = val.cdp_vdev_param_rx_decap;
11163 		break;
11164 	case CDP_TID_VDEV_PRTY:
11165 		vdev->tidmap_prty = val.cdp_vdev_param_tidmap_prty;
11166 		break;
11167 	case CDP_TIDMAP_TBL_ID:
11168 		vdev->tidmap_tbl_id = val.cdp_vdev_param_tidmap_tbl_id;
11169 		break;
11170 #ifdef MESH_MODE_SUPPORT
11171 	case CDP_MESH_RX_FILTER:
11172 		dp_vdev_set_mesh_rx_filter((struct cdp_vdev *)vdev,
11173 					   val.cdp_vdev_param_mesh_rx_filter);
11174 		break;
11175 	case CDP_MESH_MODE:
11176 		dp_vdev_set_mesh_mode((struct cdp_vdev *)vdev,
11177 				      val.cdp_vdev_param_mesh_mode);
11178 		break;
11179 #endif
11180 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
11181 		dp_info("vdev_id %d enable hlod tid override %d", vdev_id,
11182 			val.cdp_vdev_param_hlos_tid_override);
11183 		dp_vdev_set_hlos_tid_override(vdev,
11184 				val.cdp_vdev_param_hlos_tid_override);
11185 		break;
11186 #ifdef QCA_SUPPORT_WDS_EXTENDED
11187 	case CDP_CFG_WDS_EXT:
11188 		if (vdev->opmode == wlan_op_mode_ap)
11189 			vdev->wds_ext_enabled = val.cdp_vdev_param_wds_ext;
11190 		break;
11191 	case CDP_DROP_TX_MCAST:
11192 		dp_info("vdev_id %d drop tx mcast :%d", vdev_id,
11193 			val.cdp_drop_tx_mcast);
11194 		vdev->drop_tx_mcast = val.cdp_drop_tx_mcast;
11195 		break;
11196 #endif
11197 	case CDP_ENABLE_PEER_AUTHORIZE:
11198 		vdev->peer_authorize = val.cdp_vdev_param_peer_authorize;
11199 		break;
11200 #ifdef WLAN_SUPPORT_MESH_LATENCY
11201 	case CDP_ENABLE_PEER_TID_LATENCY:
11202 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
11203 			val.cdp_vdev_param_peer_tid_latency_enable);
11204 		vdev->peer_tid_latency_enabled =
11205 			val.cdp_vdev_param_peer_tid_latency_enable;
11206 		break;
11207 	case CDP_SET_VAP_MESH_TID:
11208 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
11209 			val.cdp_vdev_param_mesh_tid);
11210 		vdev->mesh_tid_latency_config.latency_tid
11211 				= val.cdp_vdev_param_mesh_tid;
11212 		break;
11213 #endif
11214 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
11215 	case CDP_SKIP_BAR_UPDATE_AP:
11216 		dp_info("vdev_id %d skip BAR update: %u", vdev_id,
11217 			val.cdp_skip_bar_update);
11218 		vdev->skip_bar_update = val.cdp_skip_bar_update;
11219 		vdev->skip_bar_update_last_ts = 0;
11220 		break;
11221 #endif
11222 	case CDP_DROP_3ADDR_MCAST:
11223 		dp_info("vdev_id %d drop 3 addr mcast :%d", vdev_id,
11224 			val.cdp_drop_3addr_mcast);
11225 		vdev->drop_3addr_mcast = val.cdp_drop_3addr_mcast;
11226 		break;
11227 	case CDP_ENABLE_WRAP:
11228 		vdev->wrap_vdev = val.cdp_vdev_param_wrap;
11229 		break;
11230 #ifdef DP_TRAFFIC_END_INDICATION
11231 	case CDP_ENABLE_TRAFFIC_END_INDICATION:
11232 		vdev->traffic_end_ind_en = val.cdp_vdev_param_traffic_end_ind;
11233 		break;
11234 #endif
11235 #ifdef FEATURE_DIRECT_LINK
11236 	case CDP_VDEV_TX_TO_FW:
11237 		dp_info("vdev_id %d to_fw :%d", vdev_id, val.cdp_vdev_tx_to_fw);
11238 		vdev->to_fw = val.cdp_vdev_tx_to_fw;
11239 		break;
11240 #endif
11241 	default:
11242 		break;
11243 	}
11244 
11245 	dp_tx_vdev_update_search_flags((struct dp_vdev *)vdev);
11246 	dsoc->arch_ops.txrx_set_vdev_param(dsoc, vdev, param, val);
11247 
11248 	/* Update PDEV flags as VDEV flags are updated */
11249 	dp_pdev_update_fast_rx_flag(dsoc, vdev->pdev);
11250 	dp_vdev_unref_delete(dsoc, vdev, DP_MOD_ID_CDP);
11251 
11252 	return QDF_STATUS_SUCCESS;
11253 }
11254 
11255 /**
11256  * dp_set_psoc_param: function to set parameters in psoc
11257  * @cdp_soc: DP soc handle
11258  * @param: parameter type to be set
11259  * @val: value of parameter to be set
11260  *
11261  * Return: QDF_STATUS
11262  */
11263 static QDF_STATUS
11264 dp_set_psoc_param(struct cdp_soc_t *cdp_soc,
11265 		  enum cdp_psoc_param_type param, cdp_config_param_type val)
11266 {
11267 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
11268 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->wlan_cfg_ctx;
11269 
11270 	switch (param) {
11271 	case CDP_ENABLE_RATE_STATS:
11272 		soc->peerstats_enabled = val.cdp_psoc_param_en_rate_stats;
11273 		break;
11274 	case CDP_SET_NSS_CFG:
11275 		wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx,
11276 					    val.cdp_psoc_param_en_nss_cfg);
11277 		/*
11278 		 * TODO: masked out based on the per offloaded radio
11279 		 */
11280 		switch (val.cdp_psoc_param_en_nss_cfg) {
11281 		case dp_nss_cfg_default:
11282 			break;
11283 		case dp_nss_cfg_first_radio:
11284 		/*
11285 		 * This configuration is valid for single band radio which
11286 		 * is also NSS offload.
11287 		 */
11288 		case dp_nss_cfg_dbdc:
11289 		case dp_nss_cfg_dbtc:
11290 			wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
11291 			wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
11292 			wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
11293 			wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
11294 			break;
11295 		default:
11296 			dp_cdp_err("%pK: Invalid offload config %d",
11297 				   soc, val.cdp_psoc_param_en_nss_cfg);
11298 		}
11299 
11300 			dp_cdp_err("%pK: nss-wifi<0> nss config is enabled"
11301 				   , soc);
11302 		break;
11303 	case CDP_SET_PREFERRED_HW_MODE:
11304 		soc->preferred_hw_mode = val.cdp_psoc_param_preferred_hw_mode;
11305 		break;
11306 	case CDP_IPA_ENABLE:
11307 		soc->wlan_cfg_ctx->ipa_enabled = val.cdp_ipa_enabled;
11308 		break;
11309 	case CDP_CFG_VDEV_STATS_HW_OFFLOAD:
11310 		wlan_cfg_set_vdev_stats_hw_offload_config(wlan_cfg_ctx,
11311 				val.cdp_psoc_param_vdev_stats_hw_offload);
11312 		break;
11313 	case CDP_SAWF_ENABLE:
11314 		wlan_cfg_set_sawf_config(wlan_cfg_ctx, val.cdp_sawf_enabled);
11315 		break;
11316 	case CDP_UMAC_RST_SKEL_ENABLE:
11317 		dp_umac_rst_skel_enable_update(soc, val.cdp_umac_rst_skel);
11318 		break;
11319 	case CDP_SAWF_STATS:
11320 		wlan_cfg_set_sawf_stats_config(wlan_cfg_ctx,
11321 					       val.cdp_sawf_stats);
11322 		break;
11323 	default:
11324 		break;
11325 	}
11326 
11327 	return QDF_STATUS_SUCCESS;
11328 }
11329 
11330 /**
11331  * dp_get_psoc_param: function to get parameters in soc
11332  * @cdp_soc: DP soc handle
11333  * @param: parameter type to be set
11334  * @val: address of buffer
11335  *
11336  * Return: status
11337  */
11338 static QDF_STATUS dp_get_psoc_param(struct cdp_soc_t *cdp_soc,
11339 				    enum cdp_psoc_param_type param,
11340 				    cdp_config_param_type *val)
11341 {
11342 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
11343 
11344 	if (!soc)
11345 		return QDF_STATUS_E_FAILURE;
11346 
11347 	switch (param) {
11348 	case CDP_CFG_PEER_EXT_STATS:
11349 		val->cdp_psoc_param_pext_stats =
11350 			wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx);
11351 		break;
11352 	case CDP_CFG_VDEV_STATS_HW_OFFLOAD:
11353 		val->cdp_psoc_param_vdev_stats_hw_offload =
11354 			wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx);
11355 		break;
11356 	case CDP_UMAC_RST_SKEL_ENABLE:
11357 		val->cdp_umac_rst_skel = dp_umac_rst_skel_enable_get(soc);
11358 		break;
11359 	case CDP_PPEDS_ENABLE:
11360 		val->cdp_psoc_param_ppeds_enabled =
11361 			wlan_cfg_get_dp_soc_is_ppeds_enabled(soc->wlan_cfg_ctx);
11362 		break;
11363 	default:
11364 		dp_warn("Invalid param");
11365 		break;
11366 	}
11367 
11368 	return QDF_STATUS_SUCCESS;
11369 }
11370 
11371 /**
11372  * dp_set_vdev_dscp_tid_map_wifi3() - Update Map ID selected for particular vdev
11373  * @cdp_soc: CDP SOC handle
11374  * @vdev_id: id of DP_VDEV handle
11375  * @map_id:ID of map that needs to be updated
11376  *
11377  * Return: QDF_STATUS
11378  */
11379 static QDF_STATUS dp_set_vdev_dscp_tid_map_wifi3(ol_txrx_soc_handle cdp_soc,
11380 						 uint8_t vdev_id,
11381 						 uint8_t map_id)
11382 {
11383 	cdp_config_param_type val;
11384 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
11385 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11386 						     DP_MOD_ID_CDP);
11387 	if (vdev) {
11388 		vdev->dscp_tid_map_id = map_id;
11389 		val.cdp_vdev_param_dscp_tid_map_id = map_id;
11390 		soc->arch_ops.txrx_set_vdev_param(soc,
11391 						  vdev,
11392 						  CDP_UPDATE_DSCP_TO_TID_MAP,
11393 						  val);
11394 		/* Update flag for transmit tid classification */
11395 		if (vdev->dscp_tid_map_id < soc->num_hw_dscp_tid_map)
11396 			vdev->skip_sw_tid_classification |=
11397 				DP_TX_HW_DSCP_TID_MAP_VALID;
11398 		else
11399 			vdev->skip_sw_tid_classification &=
11400 				~DP_TX_HW_DSCP_TID_MAP_VALID;
11401 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11402 		return QDF_STATUS_SUCCESS;
11403 	}
11404 
11405 	return QDF_STATUS_E_FAILURE;
11406 }
11407 
11408 #ifdef DP_RATETABLE_SUPPORT
11409 static int dp_txrx_get_ratekbps(int preamb, int mcs,
11410 				int htflag, int gintval)
11411 {
11412 	uint32_t rix;
11413 	uint16_t ratecode;
11414 	enum cdp_punctured_modes punc_mode = NO_PUNCTURE;
11415 
11416 	return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
11417 			       (uint8_t)preamb, 1, punc_mode,
11418 			       &rix, &ratecode);
11419 }
11420 #else
11421 static int dp_txrx_get_ratekbps(int preamb, int mcs,
11422 				int htflag, int gintval)
11423 {
11424 	return 0;
11425 }
11426 #endif
11427 
11428 /**
11429  * dp_txrx_get_pdev_stats() - Returns cdp_pdev_stats
11430  * @soc: DP soc handle
11431  * @pdev_id: id of DP pdev handle
11432  * @pdev_stats: buffer to copy to
11433  *
11434  * Return: status success/failure
11435  */
11436 static QDF_STATUS
11437 dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
11438 		       struct cdp_pdev_stats *pdev_stats)
11439 {
11440 	struct dp_pdev *pdev =
11441 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11442 						   pdev_id);
11443 	if (!pdev)
11444 		return QDF_STATUS_E_FAILURE;
11445 
11446 	dp_aggregate_pdev_stats(pdev);
11447 
11448 	qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats));
11449 	return QDF_STATUS_SUCCESS;
11450 }
11451 
11452 /**
11453  * dp_txrx_update_vdev_me_stats() - Update vdev ME stats sent from CDP
11454  * @vdev: DP vdev handle
11455  * @buf: buffer containing specific stats structure
11456  *
11457  * Return: void
11458  */
11459 static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev,
11460 					 void *buf)
11461 {
11462 	struct cdp_tx_ingress_stats *host_stats = NULL;
11463 
11464 	if (!buf) {
11465 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
11466 		return;
11467 	}
11468 	host_stats = (struct cdp_tx_ingress_stats *)buf;
11469 
11470 	DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt,
11471 			 host_stats->mcast_en.mcast_pkt.num,
11472 			 host_stats->mcast_en.mcast_pkt.bytes);
11473 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error,
11474 		     host_stats->mcast_en.dropped_map_error);
11475 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac,
11476 		     host_stats->mcast_en.dropped_self_mac);
11477 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail,
11478 		     host_stats->mcast_en.dropped_send_fail);
11479 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast,
11480 		     host_stats->mcast_en.ucast);
11481 	DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc,
11482 		     host_stats->mcast_en.fail_seg_alloc);
11483 	DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail,
11484 		     host_stats->mcast_en.clone_fail);
11485 }
11486 
11487 /**
11488  * dp_txrx_update_vdev_igmp_me_stats() - Update vdev IGMP ME stats sent from CDP
11489  * @vdev: DP vdev handle
11490  * @buf: buffer containing specific stats structure
11491  *
11492  * Return: void
11493  */
11494 static void dp_txrx_update_vdev_igmp_me_stats(struct dp_vdev *vdev,
11495 					      void *buf)
11496 {
11497 	struct cdp_tx_ingress_stats *host_stats = NULL;
11498 
11499 	if (!buf) {
11500 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
11501 		return;
11502 	}
11503 	host_stats = (struct cdp_tx_ingress_stats *)buf;
11504 
11505 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_rcvd,
11506 		     host_stats->igmp_mcast_en.igmp_rcvd);
11507 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_ucast_converted,
11508 		     host_stats->igmp_mcast_en.igmp_ucast_converted);
11509 }
11510 
11511 /**
11512  * dp_txrx_update_vdev_host_stats() - Update stats sent through CDP
11513  * @soc_hdl: DP soc handle
11514  * @vdev_id: id of DP vdev handle
11515  * @buf: buffer containing specific stats structure
11516  * @stats_id: stats type
11517  *
11518  * Return: QDF_STATUS
11519  */
11520 static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc_hdl,
11521 						 uint8_t vdev_id,
11522 						 void *buf,
11523 						 uint16_t stats_id)
11524 {
11525 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11526 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11527 						     DP_MOD_ID_CDP);
11528 
11529 	if (!vdev) {
11530 		dp_cdp_err("%pK: Invalid vdev handle", soc);
11531 		return QDF_STATUS_E_FAILURE;
11532 	}
11533 
11534 	switch (stats_id) {
11535 	case DP_VDEV_STATS_PKT_CNT_ONLY:
11536 		break;
11537 	case DP_VDEV_STATS_TX_ME:
11538 		dp_txrx_update_vdev_me_stats(vdev, buf);
11539 		dp_txrx_update_vdev_igmp_me_stats(vdev, buf);
11540 		break;
11541 	default:
11542 		qdf_info("Invalid stats_id %d", stats_id);
11543 		break;
11544 	}
11545 
11546 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11547 	return QDF_STATUS_SUCCESS;
11548 }
11549 
11550 /**
11551  * dp_txrx_get_peer_stats() - will return cdp_peer_stats
11552  * @soc: soc handle
11553  * @vdev_id: id of vdev handle
11554  * @peer_mac: mac of DP_PEER handle
11555  * @peer_stats: buffer to copy to
11556  *
11557  * Return: status success/failure
11558  */
11559 static QDF_STATUS
11560 dp_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
11561 		       uint8_t *peer_mac, struct cdp_peer_stats *peer_stats)
11562 {
11563 	struct dp_peer *peer = NULL;
11564 	struct cdp_peer_info peer_info = { 0 };
11565 
11566 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac, false,
11567 				 CDP_WILD_PEER_TYPE);
11568 
11569 	peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info,
11570 					 DP_MOD_ID_CDP);
11571 
11572 	qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats));
11573 
11574 	if (!peer)
11575 		return QDF_STATUS_E_FAILURE;
11576 
11577 	dp_get_peer_stats(peer, peer_stats);
11578 
11579 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11580 
11581 	return QDF_STATUS_SUCCESS;
11582 }
11583 
11584 /**
11585  * dp_txrx_get_peer_stats_param() - will return specified cdp_peer_stats
11586  * @soc: soc handle
11587  * @vdev_id: vdev_id of vdev object
11588  * @peer_mac: mac address of the peer
11589  * @type: enum of required stats
11590  * @buf: buffer to hold the value
11591  *
11592  * Return: status success/failure
11593  */
11594 static QDF_STATUS
11595 dp_txrx_get_peer_stats_param(struct cdp_soc_t *soc, uint8_t vdev_id,
11596 			     uint8_t *peer_mac, enum cdp_peer_stats_type type,
11597 			     cdp_peer_stats_param_t *buf)
11598 {
11599 	QDF_STATUS ret;
11600 	struct dp_peer *peer = NULL;
11601 	struct cdp_peer_info peer_info = { 0 };
11602 
11603 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac, false,
11604 				 CDP_WILD_PEER_TYPE);
11605 
11606 	peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info,
11607 				         DP_MOD_ID_CDP);
11608 
11609 	if (!peer) {
11610 		dp_peer_err("%pK: Invalid Peer for Mac " QDF_MAC_ADDR_FMT,
11611 			    soc, QDF_MAC_ADDR_REF(peer_mac));
11612 		return QDF_STATUS_E_FAILURE;
11613 	}
11614 
11615 	if (type >= cdp_peer_per_pkt_stats_min &&
11616 	    type < cdp_peer_per_pkt_stats_max) {
11617 		ret = dp_txrx_get_peer_per_pkt_stats_param(peer, type, buf);
11618 	} else if (type >= cdp_peer_extd_stats_min &&
11619 		   type < cdp_peer_extd_stats_max) {
11620 		ret = dp_txrx_get_peer_extd_stats_param(peer, type, buf);
11621 	} else {
11622 		dp_err("%pK: Invalid stat type requested", soc);
11623 		ret = QDF_STATUS_E_FAILURE;
11624 	}
11625 
11626 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11627 
11628 	return ret;
11629 }
11630 
11631 /**
11632  * dp_txrx_reset_peer_stats() - reset cdp_peer_stats for particular peer
11633  * @soc_hdl: soc handle
11634  * @vdev_id: id of vdev handle
11635  * @peer_mac: mac of DP_PEER handle
11636  *
11637  * Return: QDF_STATUS
11638  */
11639 #ifdef WLAN_FEATURE_11BE_MLO
11640 static QDF_STATUS
11641 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
11642 			 uint8_t *peer_mac)
11643 {
11644 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11645 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11646 	struct dp_peer *peer =
11647 			dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
11648 						       vdev_id, DP_MOD_ID_CDP);
11649 
11650 	if (!peer)
11651 		return QDF_STATUS_E_FAILURE;
11652 
11653 	DP_STATS_CLR(peer);
11654 	dp_txrx_peer_stats_clr(peer->txrx_peer);
11655 
11656 	if (IS_MLO_DP_MLD_PEER(peer)) {
11657 		uint8_t i;
11658 		struct dp_peer *link_peer;
11659 		struct dp_soc *link_peer_soc;
11660 		struct dp_mld_link_peers link_peers_info;
11661 
11662 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
11663 						    &link_peers_info,
11664 						    DP_MOD_ID_CDP);
11665 		for (i = 0; i < link_peers_info.num_links; i++) {
11666 			link_peer = link_peers_info.link_peers[i];
11667 			link_peer_soc = link_peer->vdev->pdev->soc;
11668 
11669 			DP_STATS_CLR(link_peer);
11670 			dp_monitor_peer_reset_stats(link_peer_soc, link_peer);
11671 		}
11672 
11673 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
11674 	} else {
11675 		dp_monitor_peer_reset_stats(soc, peer);
11676 	}
11677 
11678 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11679 
11680 	return status;
11681 }
11682 #else
11683 static QDF_STATUS
11684 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
11685 			 uint8_t *peer_mac)
11686 {
11687 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11688 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
11689 						      peer_mac, 0, vdev_id,
11690 						      DP_MOD_ID_CDP);
11691 
11692 	if (!peer)
11693 		return QDF_STATUS_E_FAILURE;
11694 
11695 	DP_STATS_CLR(peer);
11696 	dp_txrx_peer_stats_clr(peer->txrx_peer);
11697 	dp_monitor_peer_reset_stats((struct dp_soc *)soc, peer);
11698 
11699 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11700 
11701 	return status;
11702 }
11703 #endif
11704 
11705 /**
11706  * dp_txrx_get_vdev_stats() - Update buffer with cdp_vdev_stats
11707  * @soc_hdl: CDP SoC handle
11708  * @vdev_id: vdev Id
11709  * @buf: buffer for vdev stats
11710  * @is_aggregate: are aggregate stats being collected
11711  *
11712  * Return: int
11713  */
11714 static int dp_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
11715 				  void *buf, bool is_aggregate)
11716 {
11717 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11718 	struct cdp_vdev_stats *vdev_stats;
11719 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11720 						     DP_MOD_ID_CDP);
11721 
11722 	if (!vdev)
11723 		return 1;
11724 
11725 	vdev_stats = (struct cdp_vdev_stats *)buf;
11726 
11727 	if (is_aggregate) {
11728 		dp_aggregate_vdev_stats(vdev, buf);
11729 	} else {
11730 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
11731 	}
11732 
11733 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11734 	return 0;
11735 }
11736 
11737 /**
11738  * dp_get_total_per() - get total per
11739  * @soc: DP soc handle
11740  * @pdev_id: id of DP_PDEV handle
11741  *
11742  * Return: % error rate using retries per packet and success packets
11743  */
11744 static int dp_get_total_per(struct cdp_soc_t *soc, uint8_t pdev_id)
11745 {
11746 	struct dp_pdev *pdev =
11747 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11748 						   pdev_id);
11749 
11750 	if (!pdev)
11751 		return 0;
11752 
11753 	dp_aggregate_pdev_stats(pdev);
11754 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
11755 		return 0;
11756 	return ((pdev->stats.tx.retries * 100) /
11757 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
11758 }
11759 
11760 /**
11761  * dp_txrx_stats_publish() - publish pdev stats into a buffer
11762  * @soc: DP soc handle
11763  * @pdev_id: id of DP_PDEV handle
11764  * @buf: to hold pdev_stats
11765  *
11766  * Return: int
11767  */
11768 static int
11769 dp_txrx_stats_publish(struct cdp_soc_t *soc, uint8_t pdev_id,
11770 		      struct cdp_stats_extd *buf)
11771 {
11772 	struct cdp_txrx_stats_req req = {0,};
11773 	QDF_STATUS status;
11774 	struct dp_pdev *pdev =
11775 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11776 						   pdev_id);
11777 
11778 	if (!pdev)
11779 		return TXRX_STATS_LEVEL_OFF;
11780 
11781 	if (pdev->pending_fw_stats_response)
11782 		return TXRX_STATS_LEVEL_OFF;
11783 
11784 	dp_aggregate_pdev_stats(pdev);
11785 
11786 	pdev->pending_fw_stats_response = true;
11787 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
11788 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
11789 	pdev->fw_stats_tlv_bitmap_rcvd = 0;
11790 	qdf_event_reset(&pdev->fw_stats_event);
11791 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
11792 				req.param1, req.param2, req.param3, 0,
11793 				req.cookie_val, 0);
11794 
11795 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
11796 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
11797 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
11798 				req.param1, req.param2, req.param3, 0,
11799 				req.cookie_val, 0);
11800 
11801 	status =
11802 		qdf_wait_single_event(&pdev->fw_stats_event, DP_MAX_SLEEP_TIME);
11803 
11804 	if (status != QDF_STATUS_SUCCESS) {
11805 		if (status == QDF_STATUS_E_TIMEOUT)
11806 			qdf_debug("TIMEOUT_OCCURS");
11807 		pdev->pending_fw_stats_response = false;
11808 		return TXRX_STATS_LEVEL_OFF;
11809 	}
11810 	qdf_mem_copy(buf, &pdev->stats, sizeof(struct cdp_pdev_stats));
11811 	pdev->pending_fw_stats_response = false;
11812 
11813 	return TXRX_STATS_LEVEL;
11814 }
11815 
11816 /**
11817  * dp_get_obss_stats() - Get Pdev OBSS stats from Fw
11818  * @soc: DP soc handle
11819  * @pdev_id: id of DP_PDEV handle
11820  * @buf: to hold pdev obss stats
11821  * @req: Pointer to CDP TxRx stats
11822  *
11823  * Return: status
11824  */
11825 static QDF_STATUS
11826 dp_get_obss_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
11827 		  struct cdp_pdev_obss_pd_stats_tlv *buf,
11828 		  struct cdp_txrx_stats_req *req)
11829 {
11830 	QDF_STATUS status;
11831 	struct dp_pdev *pdev =
11832 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11833 						   pdev_id);
11834 
11835 	if (!pdev)
11836 		return QDF_STATUS_E_INVAL;
11837 
11838 	if (pdev->pending_fw_obss_stats_response)
11839 		return QDF_STATUS_E_AGAIN;
11840 
11841 	pdev->pending_fw_obss_stats_response = true;
11842 	req->stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_OBSS_PD_STATS;
11843 	req->cookie_val = DBG_STATS_COOKIE_HTT_OBSS;
11844 	qdf_event_reset(&pdev->fw_obss_stats_event);
11845 	status = dp_h2t_ext_stats_msg_send(pdev, req->stats, req->param0,
11846 					   req->param1, req->param2,
11847 					   req->param3, 0, req->cookie_val,
11848 					   req->mac_id);
11849 	if (QDF_IS_STATUS_ERROR(status)) {
11850 		pdev->pending_fw_obss_stats_response = false;
11851 		return status;
11852 	}
11853 	status =
11854 		qdf_wait_single_event(&pdev->fw_obss_stats_event,
11855 				      DP_MAX_SLEEP_TIME);
11856 
11857 	if (status != QDF_STATUS_SUCCESS) {
11858 		if (status == QDF_STATUS_E_TIMEOUT)
11859 			qdf_debug("TIMEOUT_OCCURS");
11860 		pdev->pending_fw_obss_stats_response = false;
11861 		return QDF_STATUS_E_TIMEOUT;
11862 	}
11863 	qdf_mem_copy(buf, &pdev->stats.htt_tx_pdev_stats.obss_pd_stats_tlv,
11864 		     sizeof(struct cdp_pdev_obss_pd_stats_tlv));
11865 	pdev->pending_fw_obss_stats_response = false;
11866 	return status;
11867 }
11868 
11869 /**
11870  * dp_clear_pdev_obss_pd_stats() - Clear pdev obss stats
11871  * @soc: DP soc handle
11872  * @pdev_id: id of DP_PDEV handle
11873  * @req: Pointer to CDP TxRx stats request mac_id will be
11874  *	 pre-filled and should not be overwritten
11875  *
11876  * Return: status
11877  */
11878 static QDF_STATUS
11879 dp_clear_pdev_obss_pd_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
11880 			    struct cdp_txrx_stats_req *req)
11881 {
11882 	struct dp_pdev *pdev =
11883 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11884 						   pdev_id);
11885 	uint32_t cookie_val = DBG_STATS_COOKIE_DEFAULT;
11886 
11887 	if (!pdev)
11888 		return QDF_STATUS_E_INVAL;
11889 
11890 	/*
11891 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
11892 	 * from param0 to param3 according to below rule:
11893 	 *
11894 	 * PARAM:
11895 	 *   - config_param0 : start_offset (stats type)
11896 	 *   - config_param1 : stats bmask from start offset
11897 	 *   - config_param2 : stats bmask from start offset + 32
11898 	 *   - config_param3 : stats bmask from start offset + 64
11899 	 */
11900 	req->stats = (enum cdp_stats)HTT_DBG_EXT_STATS_RESET;
11901 	req->param0 = HTT_DBG_EXT_STATS_PDEV_OBSS_PD_STATS;
11902 	req->param1 = 0x00000001;
11903 
11904 	return dp_h2t_ext_stats_msg_send(pdev, req->stats, req->param0,
11905 				  req->param1, req->param2, req->param3, 0,
11906 				cookie_val, req->mac_id);
11907 }
11908 
11909 /**
11910  * dp_set_pdev_dscp_tid_map_wifi3() - update dscp tid map in pdev
11911  * @soc_handle: soc handle
11912  * @pdev_id: id of DP_PDEV handle
11913  * @map_id: ID of map that needs to be updated
11914  * @tos: index value in map
11915  * @tid: tid value passed by the user
11916  *
11917  * Return: QDF_STATUS
11918  */
11919 static QDF_STATUS
11920 dp_set_pdev_dscp_tid_map_wifi3(struct cdp_soc_t *soc_handle,
11921 			       uint8_t pdev_id,
11922 			       uint8_t map_id,
11923 			       uint8_t tos, uint8_t tid)
11924 {
11925 	uint8_t dscp;
11926 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11927 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11928 
11929 	if (!pdev)
11930 		return QDF_STATUS_E_FAILURE;
11931 
11932 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
11933 	pdev->dscp_tid_map[map_id][dscp] = tid;
11934 
11935 	if (map_id < soc->num_hw_dscp_tid_map)
11936 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
11937 				       map_id, dscp);
11938 	else
11939 		return QDF_STATUS_E_FAILURE;
11940 
11941 	return QDF_STATUS_SUCCESS;
11942 }
11943 
11944 #ifdef WLAN_SYSFS_DP_STATS
11945 /**
11946  * dp_sysfs_event_trigger() - Trigger event to wait for firmware
11947  * stats request response.
11948  * @soc: soc handle
11949  * @cookie_val: cookie value
11950  *
11951  * Return: QDF_STATUS
11952  */
11953 static QDF_STATUS
11954 dp_sysfs_event_trigger(struct dp_soc *soc, uint32_t cookie_val)
11955 {
11956 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11957 	/* wait for firmware response for sysfs stats request */
11958 	if (cookie_val == DBG_SYSFS_STATS_COOKIE) {
11959 		if (!soc) {
11960 			dp_cdp_err("soc is NULL");
11961 			return QDF_STATUS_E_FAILURE;
11962 		}
11963 		/* wait for event completion */
11964 		status = qdf_wait_single_event(&soc->sysfs_config->sysfs_txrx_fw_request_done,
11965 					       WLAN_SYSFS_STAT_REQ_WAIT_MS);
11966 		if (status == QDF_STATUS_SUCCESS)
11967 			dp_cdp_info("sysfs_txrx_fw_request_done event completed");
11968 		else if (status == QDF_STATUS_E_TIMEOUT)
11969 			dp_cdp_warn("sysfs_txrx_fw_request_done event expired");
11970 		else
11971 			dp_cdp_warn("sysfs_txrx_fw_request_done event error code %d", status);
11972 	}
11973 
11974 	return status;
11975 }
11976 #else /* WLAN_SYSFS_DP_STATS */
11977 static QDF_STATUS
11978 dp_sysfs_event_trigger(struct dp_soc *soc, uint32_t cookie_val)
11979 {
11980 	return QDF_STATUS_SUCCESS;
11981 }
11982 #endif /* WLAN_SYSFS_DP_STATS */
11983 
11984 /**
11985  * dp_fw_stats_process() - Process TXRX FW stats request.
11986  * @vdev: DP VDEV handle
11987  * @req: stats request
11988  *
11989  * Return: QDF_STATUS
11990  */
11991 static QDF_STATUS
11992 dp_fw_stats_process(struct dp_vdev *vdev,
11993 		    struct cdp_txrx_stats_req *req)
11994 {
11995 	struct dp_pdev *pdev = NULL;
11996 	struct dp_soc *soc = NULL;
11997 	uint32_t stats = req->stats;
11998 	uint8_t mac_id = req->mac_id;
11999 	uint32_t cookie_val = DBG_STATS_COOKIE_DEFAULT;
12000 
12001 	if (!vdev) {
12002 		DP_TRACE(NONE, "VDEV not found");
12003 		return QDF_STATUS_E_FAILURE;
12004 	}
12005 
12006 	pdev = vdev->pdev;
12007 	if (!pdev) {
12008 		DP_TRACE(NONE, "PDEV not found");
12009 		return QDF_STATUS_E_FAILURE;
12010 	}
12011 
12012 	soc = pdev->soc;
12013 	if (!soc) {
12014 		DP_TRACE(NONE, "soc not found");
12015 		return QDF_STATUS_E_FAILURE;
12016 	}
12017 
12018 	/* In case request is from host sysfs for displaying stats on console */
12019 	if (req->cookie_val == DBG_SYSFS_STATS_COOKIE)
12020 		cookie_val = DBG_SYSFS_STATS_COOKIE;
12021 
12022 	/*
12023 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
12024 	 * from param0 to param3 according to below rule:
12025 	 *
12026 	 * PARAM:
12027 	 *   - config_param0 : start_offset (stats type)
12028 	 *   - config_param1 : stats bmask from start offset
12029 	 *   - config_param2 : stats bmask from start offset + 32
12030 	 *   - config_param3 : stats bmask from start offset + 64
12031 	 */
12032 	if (req->stats == CDP_TXRX_STATS_0) {
12033 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
12034 		req->param1 = 0xFFFFFFFF;
12035 		req->param2 = 0xFFFFFFFF;
12036 		req->param3 = 0xFFFFFFFF;
12037 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
12038 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
12039 	}
12040 
12041 	if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT) {
12042 		dp_h2t_ext_stats_msg_send(pdev,
12043 					  HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT,
12044 					  req->param0, req->param1, req->param2,
12045 					  req->param3, 0, cookie_val,
12046 					  mac_id);
12047 	} else {
12048 		dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
12049 					  req->param1, req->param2, req->param3,
12050 					  0, cookie_val, mac_id);
12051 	}
12052 
12053 	dp_sysfs_event_trigger(soc, cookie_val);
12054 
12055 	return QDF_STATUS_SUCCESS;
12056 }
12057 
12058 /**
12059  * dp_txrx_stats_request - function to map to firmware and host stats
12060  * @soc_handle: soc handle
12061  * @vdev_id: virtual device ID
12062  * @req: stats request
12063  *
12064  * Return: QDF_STATUS
12065  */
12066 static
12067 QDF_STATUS dp_txrx_stats_request(struct cdp_soc_t *soc_handle,
12068 				 uint8_t vdev_id,
12069 				 struct cdp_txrx_stats_req *req)
12070 {
12071 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_handle);
12072 	int host_stats;
12073 	int fw_stats;
12074 	enum cdp_stats stats;
12075 	int num_stats;
12076 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12077 						     DP_MOD_ID_CDP);
12078 	QDF_STATUS status = QDF_STATUS_E_INVAL;
12079 
12080 	if (!vdev || !req) {
12081 		dp_cdp_err("%pK: Invalid vdev/req instance", soc);
12082 		status = QDF_STATUS_E_INVAL;
12083 		goto fail0;
12084 	}
12085 
12086 	if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) {
12087 		dp_err("Invalid mac id request");
12088 		status = QDF_STATUS_E_INVAL;
12089 		goto fail0;
12090 	}
12091 
12092 	stats = req->stats;
12093 	if (stats >= CDP_TXRX_MAX_STATS) {
12094 		status = QDF_STATUS_E_INVAL;
12095 		goto fail0;
12096 	}
12097 
12098 	/*
12099 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
12100 	 *			has to be updated if new FW HTT stats added
12101 	 */
12102 	if (stats > CDP_TXRX_STATS_HTT_MAX)
12103 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
12104 
12105 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
12106 
12107 	if (stats >= num_stats) {
12108 		dp_cdp_err("%pK : Invalid stats option: %d", soc, stats);
12109 		status = QDF_STATUS_E_INVAL;
12110 		goto fail0;
12111 	}
12112 
12113 	req->stats = stats;
12114 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
12115 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
12116 
12117 	dp_info("stats: %u fw_stats_type: %d host_stats: %d",
12118 		stats, fw_stats, host_stats);
12119 
12120 	if (fw_stats != TXRX_FW_STATS_INVALID) {
12121 		/* update request with FW stats type */
12122 		req->stats = fw_stats;
12123 		status = dp_fw_stats_process(vdev, req);
12124 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
12125 			(host_stats <= TXRX_HOST_STATS_MAX))
12126 		status = dp_print_host_stats(vdev, req, soc);
12127 	else
12128 		dp_cdp_info("%pK: Wrong Input for TxRx Stats", soc);
12129 fail0:
12130 	if (vdev)
12131 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12132 	return status;
12133 }
12134 
12135 /**
12136  * dp_txrx_dump_stats() -  Dump statistics
12137  * @psoc: CDP soc handle
12138  * @value: Statistics option
12139  * @level: verbosity level
12140  */
12141 static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value,
12142 				     enum qdf_stats_verbosity_level level)
12143 {
12144 	struct dp_soc *soc =
12145 		(struct dp_soc *)psoc;
12146 	QDF_STATUS status = QDF_STATUS_SUCCESS;
12147 
12148 	if (!soc) {
12149 		dp_cdp_err("%pK: soc is NULL", soc);
12150 		return QDF_STATUS_E_INVAL;
12151 	}
12152 
12153 	switch (value) {
12154 	case CDP_TXRX_PATH_STATS:
12155 		dp_txrx_path_stats(soc);
12156 		dp_print_soc_interrupt_stats(soc);
12157 		hal_dump_reg_write_stats(soc->hal_soc);
12158 		dp_pdev_print_tx_delay_stats(soc);
12159 		/* Dump usage watermark stats for core TX/RX SRNGs */
12160 		dp_dump_srng_high_wm_stats(soc, (1 << REO_DST));
12161 		dp_print_fisa_stats(soc);
12162 		break;
12163 
12164 	case CDP_RX_RING_STATS:
12165 		dp_print_per_ring_stats(soc);
12166 		break;
12167 
12168 	case CDP_TXRX_TSO_STATS:
12169 		dp_print_tso_stats(soc, level);
12170 		break;
12171 
12172 	case CDP_DUMP_TX_FLOW_POOL_INFO:
12173 		if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH)
12174 			cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
12175 		else
12176 			dp_tx_dump_flow_pool_info_compact(soc);
12177 		break;
12178 
12179 	case CDP_DP_NAPI_STATS:
12180 		dp_print_napi_stats(soc);
12181 		break;
12182 
12183 	case CDP_TXRX_DESC_STATS:
12184 		/* TODO: NOT IMPLEMENTED */
12185 		break;
12186 
12187 	case CDP_DP_RX_FISA_STATS:
12188 		dp_rx_dump_fisa_stats(soc);
12189 		break;
12190 
12191 	case CDP_DP_SWLM_STATS:
12192 		dp_print_swlm_stats(soc);
12193 		break;
12194 
12195 	case CDP_DP_TX_HW_LATENCY_STATS:
12196 		dp_pdev_print_tx_delay_stats(soc);
12197 		break;
12198 
12199 	default:
12200 		status = QDF_STATUS_E_INVAL;
12201 		break;
12202 	}
12203 
12204 	return status;
12205 
12206 }
12207 
12208 #ifdef WLAN_SYSFS_DP_STATS
12209 static
12210 void dp_sysfs_get_stat_type(struct dp_soc *soc, uint32_t *mac_id,
12211 			    uint32_t *stat_type)
12212 {
12213 	qdf_spinlock_acquire(&soc->sysfs_config->rw_stats_lock);
12214 	*stat_type = soc->sysfs_config->stat_type_requested;
12215 	*mac_id   = soc->sysfs_config->mac_id;
12216 
12217 	qdf_spinlock_release(&soc->sysfs_config->rw_stats_lock);
12218 }
12219 
12220 static
12221 void dp_sysfs_update_config_buf_params(struct dp_soc *soc,
12222 				       uint32_t curr_len,
12223 				       uint32_t max_buf_len,
12224 				       char *buf)
12225 {
12226 	qdf_spinlock_acquire(&soc->sysfs_config->sysfs_write_user_buffer);
12227 	/* set sysfs_config parameters */
12228 	soc->sysfs_config->buf = buf;
12229 	soc->sysfs_config->curr_buffer_length = curr_len;
12230 	soc->sysfs_config->max_buffer_length = max_buf_len;
12231 	qdf_spinlock_release(&soc->sysfs_config->sysfs_write_user_buffer);
12232 }
12233 
12234 static
12235 QDF_STATUS dp_sysfs_fill_stats(ol_txrx_soc_handle soc_hdl,
12236 			       char *buf, uint32_t buf_size)
12237 {
12238 	uint32_t mac_id = 0;
12239 	uint32_t stat_type = 0;
12240 	uint32_t fw_stats = 0;
12241 	uint32_t host_stats = 0;
12242 	enum cdp_stats stats;
12243 	struct cdp_txrx_stats_req req;
12244 	uint32_t num_stats;
12245 	struct dp_soc *soc = NULL;
12246 
12247 	if (!soc_hdl) {
12248 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
12249 		return QDF_STATUS_E_INVAL;
12250 	}
12251 
12252 	soc = cdp_soc_t_to_dp_soc(soc_hdl);
12253 
12254 	if (!soc) {
12255 		dp_cdp_err("%pK: soc is NULL", soc);
12256 		return QDF_STATUS_E_INVAL;
12257 	}
12258 
12259 	dp_sysfs_get_stat_type(soc, &mac_id, &stat_type);
12260 
12261 	stats = stat_type;
12262 	if (stats >= CDP_TXRX_MAX_STATS) {
12263 		dp_cdp_info("sysfs stat type requested is invalid");
12264 		return QDF_STATUS_E_INVAL;
12265 	}
12266 	/*
12267 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
12268 	 *			has to be updated if new FW HTT stats added
12269 	 */
12270 	if (stats > CDP_TXRX_MAX_STATS)
12271 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
12272 
12273 	num_stats = QDF_ARRAY_SIZE(dp_stats_mapping_table);
12274 
12275 	if (stats >= num_stats) {
12276 		dp_cdp_err("%pK : Invalid stats option: %d, max num stats: %d",
12277 				soc, stats, num_stats);
12278 		return QDF_STATUS_E_INVAL;
12279 	}
12280 
12281 	/* build request */
12282 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
12283 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
12284 
12285 	req.stats = stat_type;
12286 	req.mac_id = mac_id;
12287 	/* request stats to be printed */
12288 	qdf_mutex_acquire(&soc->sysfs_config->sysfs_read_lock);
12289 
12290 	if (fw_stats != TXRX_FW_STATS_INVALID) {
12291 		/* update request with FW stats type */
12292 		req.cookie_val = DBG_SYSFS_STATS_COOKIE;
12293 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
12294 			(host_stats <= TXRX_HOST_STATS_MAX)) {
12295 		req.cookie_val = DBG_STATS_COOKIE_DEFAULT;
12296 		soc->sysfs_config->process_id = qdf_get_current_pid();
12297 		soc->sysfs_config->printing_mode = PRINTING_MODE_ENABLED;
12298 	}
12299 
12300 	dp_sysfs_update_config_buf_params(soc, 0, buf_size, buf);
12301 
12302 	dp_txrx_stats_request(soc_hdl, mac_id, &req);
12303 	soc->sysfs_config->process_id = 0;
12304 	soc->sysfs_config->printing_mode = PRINTING_MODE_DISABLED;
12305 
12306 	dp_sysfs_update_config_buf_params(soc, 0, 0, NULL);
12307 
12308 	qdf_mutex_release(&soc->sysfs_config->sysfs_read_lock);
12309 	return QDF_STATUS_SUCCESS;
12310 }
12311 
12312 static
12313 QDF_STATUS dp_sysfs_set_stat_type(ol_txrx_soc_handle soc_hdl,
12314 				  uint32_t stat_type, uint32_t mac_id)
12315 {
12316 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12317 
12318 	if (!soc_hdl) {
12319 		dp_cdp_err("%pK: soc is NULL", soc);
12320 		return QDF_STATUS_E_INVAL;
12321 	}
12322 
12323 	qdf_spinlock_acquire(&soc->sysfs_config->rw_stats_lock);
12324 
12325 	soc->sysfs_config->stat_type_requested = stat_type;
12326 	soc->sysfs_config->mac_id = mac_id;
12327 
12328 	qdf_spinlock_release(&soc->sysfs_config->rw_stats_lock);
12329 
12330 	return QDF_STATUS_SUCCESS;
12331 }
12332 
12333 static
12334 QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl)
12335 {
12336 	struct dp_soc *soc;
12337 	QDF_STATUS status;
12338 
12339 	if (!soc_hdl) {
12340 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
12341 		return QDF_STATUS_E_INVAL;
12342 	}
12343 
12344 	soc = soc_hdl;
12345 
12346 	soc->sysfs_config = qdf_mem_malloc(sizeof(struct sysfs_stats_config));
12347 	if (!soc->sysfs_config) {
12348 		dp_cdp_err("failed to allocate memory for sysfs_config no memory");
12349 		return QDF_STATUS_E_NOMEM;
12350 	}
12351 
12352 	status = qdf_event_create(&soc->sysfs_config->sysfs_txrx_fw_request_done);
12353 	/* create event for fw stats request from sysfs */
12354 	if (status != QDF_STATUS_SUCCESS) {
12355 		dp_cdp_err("failed to create event sysfs_txrx_fw_request_done");
12356 		qdf_mem_free(soc->sysfs_config);
12357 		soc->sysfs_config = NULL;
12358 		return QDF_STATUS_E_FAILURE;
12359 	}
12360 
12361 	qdf_spinlock_create(&soc->sysfs_config->rw_stats_lock);
12362 	qdf_mutex_create(&soc->sysfs_config->sysfs_read_lock);
12363 	qdf_spinlock_create(&soc->sysfs_config->sysfs_write_user_buffer);
12364 
12365 	return QDF_STATUS_SUCCESS;
12366 }
12367 
12368 static
12369 QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl)
12370 {
12371 	struct dp_soc *soc;
12372 	QDF_STATUS status;
12373 
12374 	if (!soc_hdl) {
12375 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
12376 		return QDF_STATUS_E_INVAL;
12377 	}
12378 
12379 	soc = soc_hdl;
12380 	if (!soc->sysfs_config) {
12381 		dp_cdp_err("soc->sysfs_config is NULL");
12382 		return QDF_STATUS_E_FAILURE;
12383 	}
12384 
12385 	status = qdf_event_destroy(&soc->sysfs_config->sysfs_txrx_fw_request_done);
12386 	if (status != QDF_STATUS_SUCCESS)
12387 		dp_cdp_err("Failed to destroy event sysfs_txrx_fw_request_done");
12388 
12389 	qdf_mutex_destroy(&soc->sysfs_config->sysfs_read_lock);
12390 	qdf_spinlock_destroy(&soc->sysfs_config->rw_stats_lock);
12391 	qdf_spinlock_destroy(&soc->sysfs_config->sysfs_write_user_buffer);
12392 
12393 	qdf_mem_free(soc->sysfs_config);
12394 
12395 	return QDF_STATUS_SUCCESS;
12396 }
12397 
12398 #else /* WLAN_SYSFS_DP_STATS */
12399 
12400 static
12401 QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl)
12402 {
12403 	return QDF_STATUS_SUCCESS;
12404 }
12405 
12406 static
12407 QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl)
12408 {
12409 	return QDF_STATUS_SUCCESS;
12410 }
12411 #endif /* WLAN_SYSFS_DP_STATS */
12412 
12413 /**
12414  * dp_txrx_clear_dump_stats() - clear dumpStats
12415  * @soc_hdl: soc handle
12416  * @pdev_id: pdev ID
12417  * @value: stats option
12418  *
12419  * Return: 0 - Success, non-zero - failure
12420  */
12421 static
12422 QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12423 				    uint8_t value)
12424 {
12425 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12426 	QDF_STATUS status = QDF_STATUS_SUCCESS;
12427 
12428 	if (!soc) {
12429 		dp_err("soc is NULL");
12430 		return QDF_STATUS_E_INVAL;
12431 	}
12432 
12433 	switch (value) {
12434 	case CDP_TXRX_TSO_STATS:
12435 		dp_txrx_clear_tso_stats(soc);
12436 		break;
12437 
12438 	case CDP_DP_TX_HW_LATENCY_STATS:
12439 		dp_pdev_clear_tx_delay_stats(soc);
12440 		break;
12441 
12442 	default:
12443 		status = QDF_STATUS_E_INVAL;
12444 		break;
12445 	}
12446 
12447 	return status;
12448 }
12449 
12450 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
12451 /**
12452  * dp_update_flow_control_parameters() - API to store datapath
12453  *                            config parameters
12454  * @soc: soc handle
12455  * @params: ini parameter handle
12456  *
12457  * Return: void
12458  */
12459 static inline
12460 void dp_update_flow_control_parameters(struct dp_soc *soc,
12461 				struct cdp_config_params *params)
12462 {
12463 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
12464 					params->tx_flow_stop_queue_threshold;
12465 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
12466 					params->tx_flow_start_queue_offset;
12467 }
12468 #else
12469 static inline
12470 void dp_update_flow_control_parameters(struct dp_soc *soc,
12471 				struct cdp_config_params *params)
12472 {
12473 }
12474 #endif
12475 
12476 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
12477 /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */
12478 #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024
12479 
12480 /* Max packet limit for RX REAP Loop (dp_rx_process) */
12481 #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024
12482 
12483 static
12484 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
12485 					struct cdp_config_params *params)
12486 {
12487 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit =
12488 				params->tx_comp_loop_pkt_limit;
12489 
12490 	if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX)
12491 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true;
12492 	else
12493 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false;
12494 
12495 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit =
12496 				params->rx_reap_loop_pkt_limit;
12497 
12498 	if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX)
12499 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = true;
12500 	else
12501 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = false;
12502 
12503 	soc->wlan_cfg_ctx->rx_hp_oos_update_limit =
12504 				params->rx_hp_oos_update_limit;
12505 
12506 	dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u",
12507 		soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit,
12508 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check,
12509 		soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit,
12510 		soc->wlan_cfg_ctx->rx_enable_eol_data_check,
12511 		soc->wlan_cfg_ctx->rx_hp_oos_update_limit);
12512 }
12513 
12514 static void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
12515 				      uint32_t rx_limit)
12516 {
12517 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit = tx_limit;
12518 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit = rx_limit;
12519 }
12520 
12521 #else
12522 static inline
12523 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
12524 					struct cdp_config_params *params)
12525 { }
12526 
12527 static inline
12528 void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
12529 			       uint32_t rx_limit)
12530 {
12531 }
12532 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
12533 
12534 /**
12535  * dp_update_config_parameters() - API to store datapath
12536  *                            config parameters
12537  * @psoc: soc handle
12538  * @params: ini parameter handle
12539  *
12540  * Return: status
12541  */
12542 static
12543 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
12544 				struct cdp_config_params *params)
12545 {
12546 	struct dp_soc *soc = (struct dp_soc *)psoc;
12547 
12548 	if (!(soc)) {
12549 		dp_cdp_err("%pK: Invalid handle", soc);
12550 		return QDF_STATUS_E_INVAL;
12551 	}
12552 
12553 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
12554 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
12555 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
12556 	soc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload =
12557 				params->p2p_tcp_udp_checksumoffload;
12558 	soc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload =
12559 				params->nan_tcp_udp_checksumoffload;
12560 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
12561 				params->tcp_udp_checksumoffload;
12562 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
12563 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
12564 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
12565 
12566 	dp_update_rx_soft_irq_limit_params(soc, params);
12567 	dp_update_flow_control_parameters(soc, params);
12568 
12569 	return QDF_STATUS_SUCCESS;
12570 }
12571 
12572 static struct cdp_wds_ops dp_ops_wds = {
12573 	.vdev_set_wds = dp_vdev_set_wds,
12574 #ifdef WDS_VENDOR_EXTENSION
12575 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
12576 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
12577 #endif
12578 };
12579 
12580 /**
12581  * dp_txrx_data_tx_cb_set() - set the callback for non standard tx
12582  * @soc_hdl: datapath soc handle
12583  * @vdev_id: virtual interface id
12584  * @callback: callback function
12585  * @ctxt: callback context
12586  *
12587  */
12588 static void
12589 dp_txrx_data_tx_cb_set(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
12590 		       ol_txrx_data_tx_cb callback, void *ctxt)
12591 {
12592 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12593 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12594 						     DP_MOD_ID_CDP);
12595 
12596 	if (!vdev)
12597 		return;
12598 
12599 	vdev->tx_non_std_data_callback.func = callback;
12600 	vdev->tx_non_std_data_callback.ctxt = ctxt;
12601 
12602 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12603 }
12604 
12605 /**
12606  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
12607  * @soc: datapath soc handle
12608  * @pdev_id: id of datapath pdev handle
12609  *
12610  * Return: opaque pointer to dp txrx handle
12611  */
12612 static void *dp_pdev_get_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id)
12613 {
12614 	struct dp_pdev *pdev =
12615 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12616 						   pdev_id);
12617 	if (qdf_unlikely(!pdev))
12618 		return NULL;
12619 
12620 	return pdev->dp_txrx_handle;
12621 }
12622 
12623 /**
12624  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
12625  * @soc: datapath soc handle
12626  * @pdev_id: id of datapath pdev handle
12627  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
12628  *
12629  * Return: void
12630  */
12631 static void
12632 dp_pdev_set_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id,
12633 			   void *dp_txrx_hdl)
12634 {
12635 	struct dp_pdev *pdev =
12636 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12637 						   pdev_id);
12638 
12639 	if (!pdev)
12640 		return;
12641 
12642 	pdev->dp_txrx_handle = dp_txrx_hdl;
12643 }
12644 
12645 /**
12646  * dp_vdev_get_dp_ext_handle() - get dp handle from vdev
12647  * @soc_hdl: datapath soc handle
12648  * @vdev_id: vdev id
12649  *
12650  * Return: opaque pointer to dp txrx handle
12651  */
12652 static void *dp_vdev_get_dp_ext_handle(ol_txrx_soc_handle soc_hdl,
12653 				       uint8_t vdev_id)
12654 {
12655 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12656 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12657 						     DP_MOD_ID_CDP);
12658 	void *dp_ext_handle;
12659 
12660 	if (!vdev)
12661 		return NULL;
12662 	dp_ext_handle = vdev->vdev_dp_ext_handle;
12663 
12664 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12665 	return dp_ext_handle;
12666 }
12667 
12668 /**
12669  * dp_vdev_set_dp_ext_handle() - set dp handle in vdev
12670  * @soc_hdl: datapath soc handle
12671  * @vdev_id: vdev id
12672  * @size: size of advance dp handle
12673  *
12674  * Return: QDF_STATUS
12675  */
12676 static QDF_STATUS
12677 dp_vdev_set_dp_ext_handle(ol_txrx_soc_handle soc_hdl, uint8_t vdev_id,
12678 			  uint16_t size)
12679 {
12680 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12681 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12682 						     DP_MOD_ID_CDP);
12683 	void *dp_ext_handle;
12684 
12685 	if (!vdev)
12686 		return QDF_STATUS_E_FAILURE;
12687 
12688 	dp_ext_handle = qdf_mem_malloc(size);
12689 
12690 	if (!dp_ext_handle) {
12691 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12692 		return QDF_STATUS_E_FAILURE;
12693 	}
12694 
12695 	vdev->vdev_dp_ext_handle = dp_ext_handle;
12696 
12697 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12698 	return QDF_STATUS_SUCCESS;
12699 }
12700 
12701 /**
12702  * dp_vdev_inform_ll_conn() - Inform vdev to add/delete a latency critical
12703  *			      connection for this vdev
12704  * @soc_hdl: CDP soc handle
12705  * @vdev_id: vdev ID
12706  * @action: Add/Delete action
12707  *
12708  * Return: QDF_STATUS.
12709  */
12710 static QDF_STATUS
12711 dp_vdev_inform_ll_conn(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
12712 		       enum vdev_ll_conn_actions action)
12713 {
12714 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12715 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12716 						     DP_MOD_ID_CDP);
12717 
12718 	if (!vdev) {
12719 		dp_err("LL connection action for invalid vdev %d", vdev_id);
12720 		return QDF_STATUS_E_FAILURE;
12721 	}
12722 
12723 	switch (action) {
12724 	case CDP_VDEV_LL_CONN_ADD:
12725 		vdev->num_latency_critical_conn++;
12726 		break;
12727 
12728 	case CDP_VDEV_LL_CONN_DEL:
12729 		vdev->num_latency_critical_conn--;
12730 		break;
12731 
12732 	default:
12733 		dp_err("LL connection action invalid %d", action);
12734 		break;
12735 	}
12736 
12737 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12738 	return QDF_STATUS_SUCCESS;
12739 }
12740 
12741 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
12742 /**
12743  * dp_soc_set_swlm_enable() - Enable/Disable SWLM if initialized.
12744  * @soc_hdl: CDP Soc handle
12745  * @value: Enable/Disable value
12746  *
12747  * Return: QDF_STATUS
12748  */
12749 static QDF_STATUS dp_soc_set_swlm_enable(struct cdp_soc_t *soc_hdl,
12750 					 uint8_t value)
12751 {
12752 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12753 
12754 	if (!soc->swlm.is_init) {
12755 		dp_err("SWLM is not initialized");
12756 		return QDF_STATUS_E_FAILURE;
12757 	}
12758 
12759 	soc->swlm.is_enabled = !!value;
12760 
12761 	return QDF_STATUS_SUCCESS;
12762 }
12763 
12764 /**
12765  * dp_soc_is_swlm_enabled() - Check if SWLM is enabled.
12766  * @soc_hdl: CDP Soc handle
12767  *
12768  * Return: QDF_STATUS
12769  */
12770 static uint8_t dp_soc_is_swlm_enabled(struct cdp_soc_t *soc_hdl)
12771 {
12772 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12773 
12774 	return soc->swlm.is_enabled;
12775 }
12776 #endif
12777 
12778 /**
12779  * dp_display_srng_info() - Dump the srng HP TP info
12780  * @soc_hdl: CDP Soc handle
12781  *
12782  * This function dumps the SW hp/tp values for the important rings.
12783  * HW hp/tp values are not being dumped, since it can lead to
12784  * READ NOC error when UMAC is in low power state. MCC does not have
12785  * device force wake working yet.
12786  *
12787  * Return: none
12788  */
12789 static void dp_display_srng_info(struct cdp_soc_t *soc_hdl)
12790 {
12791 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12792 	hal_soc_handle_t hal_soc = soc->hal_soc;
12793 	uint32_t hp, tp, i;
12794 
12795 	dp_info("SRNG HP-TP data:");
12796 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
12797 		hal_get_sw_hptp(hal_soc, soc->tcl_data_ring[i].hal_srng,
12798 				&tp, &hp);
12799 		dp_info("TCL DATA ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
12800 
12801 		if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, i) ==
12802 		    INVALID_WBM_RING_NUM)
12803 			continue;
12804 
12805 		hal_get_sw_hptp(hal_soc, soc->tx_comp_ring[i].hal_srng,
12806 				&tp, &hp);
12807 		dp_info("TX comp ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
12808 	}
12809 
12810 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
12811 		hal_get_sw_hptp(hal_soc, soc->reo_dest_ring[i].hal_srng,
12812 				&tp, &hp);
12813 		dp_info("REO DST ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
12814 	}
12815 
12816 	hal_get_sw_hptp(hal_soc, soc->reo_exception_ring.hal_srng, &tp, &hp);
12817 	dp_info("REO exception ring: hp=0x%x, tp=0x%x", hp, tp);
12818 
12819 	hal_get_sw_hptp(hal_soc, soc->rx_rel_ring.hal_srng, &tp, &hp);
12820 	dp_info("WBM RX release ring: hp=0x%x, tp=0x%x", hp, tp);
12821 
12822 	hal_get_sw_hptp(hal_soc, soc->wbm_desc_rel_ring.hal_srng, &tp, &hp);
12823 	dp_info("WBM desc release ring: hp=0x%x, tp=0x%x", hp, tp);
12824 }
12825 
12826 /**
12827  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
12828  * @soc_handle: datapath soc handle
12829  *
12830  * Return: opaque pointer to external dp (non-core DP)
12831  */
12832 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
12833 {
12834 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12835 
12836 	return soc->external_txrx_handle;
12837 }
12838 
12839 /**
12840  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
12841  * @soc_handle: datapath soc handle
12842  * @txrx_handle: opaque pointer to external dp (non-core DP)
12843  *
12844  * Return: void
12845  */
12846 static void
12847 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
12848 {
12849 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12850 
12851 	soc->external_txrx_handle = txrx_handle;
12852 }
12853 
12854 /**
12855  * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
12856  * @soc_hdl: datapath soc handle
12857  * @pdev_id: id of the datapath pdev handle
12858  * @lmac_id: lmac id
12859  *
12860  * Return: QDF_STATUS
12861  */
12862 static QDF_STATUS
12863 dp_soc_map_pdev_to_lmac
12864 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12865 	 uint32_t lmac_id)
12866 {
12867 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12868 
12869 	wlan_cfg_set_hw_mac_idx(soc->wlan_cfg_ctx,
12870 				pdev_id,
12871 				lmac_id);
12872 
12873 	/*Set host PDEV ID for lmac_id*/
12874 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
12875 			      pdev_id,
12876 			      lmac_id);
12877 
12878 	return QDF_STATUS_SUCCESS;
12879 }
12880 
12881 /**
12882  * dp_soc_handle_pdev_mode_change() - Update pdev to lmac mapping
12883  * @soc_hdl: datapath soc handle
12884  * @pdev_id: id of the datapath pdev handle
12885  * @lmac_id: lmac id
12886  *
12887  * In the event of a dynamic mode change, update the pdev to lmac mapping
12888  *
12889  * Return: QDF_STATUS
12890  */
12891 static QDF_STATUS
12892 dp_soc_handle_pdev_mode_change
12893 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12894 	 uint32_t lmac_id)
12895 {
12896 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12897 	struct dp_vdev *vdev = NULL;
12898 	uint8_t hw_pdev_id, mac_id;
12899 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc,
12900 								  pdev_id);
12901 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
12902 
12903 	if (qdf_unlikely(!pdev))
12904 		return QDF_STATUS_E_FAILURE;
12905 
12906 	pdev->lmac_id = lmac_id;
12907 	pdev->target_pdev_id =
12908 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
12909 	dp_info(" mode change %d %d\n", pdev->pdev_id, pdev->lmac_id);
12910 
12911 	/*Set host PDEV ID for lmac_id*/
12912 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
12913 			      pdev->pdev_id,
12914 			      lmac_id);
12915 
12916 	hw_pdev_id =
12917 		dp_get_target_pdev_id_for_host_pdev_id(soc,
12918 						       pdev->pdev_id);
12919 
12920 	/*
12921 	 * When NSS offload is enabled, send pdev_id->lmac_id
12922 	 * and pdev_id to hw_pdev_id to NSS FW
12923 	 */
12924 	if (nss_config) {
12925 		mac_id = pdev->lmac_id;
12926 		if (soc->cdp_soc.ol_ops->pdev_update_lmac_n_target_pdev_id)
12927 			soc->cdp_soc.ol_ops->
12928 				pdev_update_lmac_n_target_pdev_id(
12929 				soc->ctrl_psoc,
12930 				&pdev_id, &mac_id, &hw_pdev_id);
12931 	}
12932 
12933 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
12934 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
12935 		DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
12936 					       hw_pdev_id);
12937 		vdev->lmac_id = pdev->lmac_id;
12938 	}
12939 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
12940 
12941 	return QDF_STATUS_SUCCESS;
12942 }
12943 
12944 /**
12945  * dp_soc_set_pdev_status_down() - set pdev down/up status
12946  * @soc: datapath soc handle
12947  * @pdev_id: id of datapath pdev handle
12948  * @is_pdev_down: pdev down/up status
12949  *
12950  * Return: QDF_STATUS
12951  */
12952 static QDF_STATUS
12953 dp_soc_set_pdev_status_down(struct cdp_soc_t *soc, uint8_t pdev_id,
12954 			    bool is_pdev_down)
12955 {
12956 	struct dp_pdev *pdev =
12957 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12958 						   pdev_id);
12959 	if (!pdev)
12960 		return QDF_STATUS_E_FAILURE;
12961 
12962 	pdev->is_pdev_down = is_pdev_down;
12963 	return QDF_STATUS_SUCCESS;
12964 }
12965 
12966 /**
12967  * dp_get_cfg_capabilities() - get dp capabilities
12968  * @soc_handle: datapath soc handle
12969  * @dp_caps: enum for dp capabilities
12970  *
12971  * Return: bool to determine if dp caps is enabled
12972  */
12973 static bool
12974 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
12975 			enum cdp_capabilities dp_caps)
12976 {
12977 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12978 
12979 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
12980 }
12981 
12982 #ifdef FEATURE_AST
12983 static QDF_STATUS
12984 dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
12985 		       uint8_t *peer_mac)
12986 {
12987 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12988 	QDF_STATUS status = QDF_STATUS_SUCCESS;
12989 	struct dp_peer *peer =
12990 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
12991 					       DP_MOD_ID_CDP);
12992 
12993 	/* Peer can be null for monitor vap mac address */
12994 	if (!peer) {
12995 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
12996 			  "%s: Invalid peer\n", __func__);
12997 		return QDF_STATUS_E_FAILURE;
12998 	}
12999 
13000 	dp_peer_update_state(soc, peer, DP_PEER_STATE_LOGICAL_DELETE);
13001 
13002 	qdf_spin_lock_bh(&soc->ast_lock);
13003 	dp_peer_send_wds_disconnect(soc, peer);
13004 	dp_peer_delete_ast_entries(soc, peer);
13005 	qdf_spin_unlock_bh(&soc->ast_lock);
13006 
13007 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
13008 	return status;
13009 }
13010 #endif
13011 
13012 #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS
13013 /**
13014  * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
13015  * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
13016  * @soc: cdp_soc handle
13017  * @pdev_id: id of cdp_pdev handle
13018  * @protocol_type: protocol type for which stats should be displayed
13019  *
13020  * Return: none
13021  */
13022 static inline void
13023 dp_dump_pdev_rx_protocol_tag_stats(struct cdp_soc_t  *soc, uint8_t pdev_id,
13024 				   uint16_t protocol_type)
13025 {
13026 }
13027 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
13028 
13029 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
13030 /**
13031  * dp_update_pdev_rx_protocol_tag() - Add/remove a protocol tag that should be
13032  * applied to the desired protocol type packets
13033  * @soc: soc handle
13034  * @pdev_id: id of cdp_pdev handle
13035  * @enable_rx_protocol_tag: bitmask that indicates what protocol types
13036  * are enabled for tagging. zero indicates disable feature, non-zero indicates
13037  * enable feature
13038  * @protocol_type: new protocol type for which the tag is being added
13039  * @tag: user configured tag for the new protocol
13040  *
13041  * Return: Success
13042  */
13043 static inline QDF_STATUS
13044 dp_update_pdev_rx_protocol_tag(struct cdp_soc_t  *soc, uint8_t pdev_id,
13045 			       uint32_t enable_rx_protocol_tag,
13046 			       uint16_t protocol_type,
13047 			       uint16_t tag)
13048 {
13049 	return QDF_STATUS_SUCCESS;
13050 }
13051 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
13052 
13053 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
13054 /**
13055  * dp_set_rx_flow_tag() - add/delete a flow
13056  * @cdp_soc: CDP soc handle
13057  * @pdev_id: id of cdp_pdev handle
13058  * @flow_info: flow tuple that is to be added to/deleted from flow search table
13059  *
13060  * Return: Success
13061  */
13062 static inline QDF_STATUS
13063 dp_set_rx_flow_tag(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
13064 		   struct cdp_rx_flow_info *flow_info)
13065 {
13066 	return QDF_STATUS_SUCCESS;
13067 }
13068 /**
13069  * dp_dump_rx_flow_tag_stats() - dump the number of packets tagged for
13070  * given flow 5-tuple
13071  * @cdp_soc: soc handle
13072  * @pdev_id: id of cdp_pdev handle
13073  * @flow_info: flow 5-tuple for which stats should be displayed
13074  *
13075  * Return: Success
13076  */
13077 static inline QDF_STATUS
13078 dp_dump_rx_flow_tag_stats(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
13079 			  struct cdp_rx_flow_info *flow_info)
13080 {
13081 	return QDF_STATUS_SUCCESS;
13082 }
13083 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
13084 
13085 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
13086 					   uint32_t max_peers,
13087 					   uint32_t max_ast_index,
13088 					   uint8_t peer_map_unmap_versions)
13089 {
13090 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13091 	QDF_STATUS status;
13092 
13093 	soc->max_peers = max_peers;
13094 
13095 	wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
13096 
13097 	status = soc->arch_ops.txrx_peer_map_attach(soc);
13098 	if (!QDF_IS_STATUS_SUCCESS(status)) {
13099 		dp_err("failure in allocating peer tables");
13100 		return QDF_STATUS_E_FAILURE;
13101 	}
13102 
13103 	dp_info("max_peers %u, calculated max_peers %u max_ast_index: %u\n",
13104 		max_peers, soc->max_peer_id, max_ast_index);
13105 
13106 	status = dp_peer_find_attach(soc);
13107 	if (!QDF_IS_STATUS_SUCCESS(status)) {
13108 		dp_err("Peer find attach failure");
13109 		goto fail;
13110 	}
13111 
13112 	soc->peer_map_unmap_versions = peer_map_unmap_versions;
13113 	soc->peer_map_attach_success = TRUE;
13114 
13115 	return QDF_STATUS_SUCCESS;
13116 fail:
13117 	soc->arch_ops.txrx_peer_map_detach(soc);
13118 
13119 	return status;
13120 }
13121 
13122 static QDF_STATUS dp_soc_set_param(struct cdp_soc_t  *soc_hdl,
13123 				   enum cdp_soc_param_t param,
13124 				   uint32_t value)
13125 {
13126 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13127 
13128 	switch (param) {
13129 	case DP_SOC_PARAM_MSDU_EXCEPTION_DESC:
13130 		soc->num_msdu_exception_desc = value;
13131 		dp_info("num_msdu exception_desc %u",
13132 			value);
13133 		break;
13134 	case DP_SOC_PARAM_CMEM_FSE_SUPPORT:
13135 		if (wlan_cfg_is_fst_in_cmem_enabled(soc->wlan_cfg_ctx))
13136 			soc->fst_in_cmem = !!value;
13137 		dp_info("FW supports CMEM FSE %u", value);
13138 		break;
13139 	case DP_SOC_PARAM_MAX_AST_AGEOUT:
13140 		soc->max_ast_ageout_count = value;
13141 		dp_info("Max ast ageout count %u", soc->max_ast_ageout_count);
13142 		break;
13143 	case DP_SOC_PARAM_EAPOL_OVER_CONTROL_PORT:
13144 		soc->eapol_over_control_port = value;
13145 		dp_info("Eapol over control_port:%d",
13146 			soc->eapol_over_control_port);
13147 		break;
13148 	case DP_SOC_PARAM_MULTI_PEER_GRP_CMD_SUPPORT:
13149 		soc->multi_peer_grp_cmd_supported = value;
13150 		dp_info("Multi Peer group command support:%d",
13151 			soc->multi_peer_grp_cmd_supported);
13152 		break;
13153 	case DP_SOC_PARAM_RSSI_DBM_CONV_SUPPORT:
13154 		soc->features.rssi_dbm_conv_support = value;
13155 		dp_info("Rssi dbm conversion support:%u",
13156 			soc->features.rssi_dbm_conv_support);
13157 		break;
13158 	case DP_SOC_PARAM_UMAC_HW_RESET_SUPPORT:
13159 		soc->features.umac_hw_reset_support = value;
13160 		dp_info("UMAC HW reset support :%u",
13161 			soc->features.umac_hw_reset_support);
13162 		break;
13163 	default:
13164 		dp_info("not handled param %d ", param);
13165 		break;
13166 	}
13167 
13168 	return QDF_STATUS_SUCCESS;
13169 }
13170 
13171 static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
13172 				      void *stats_ctx)
13173 {
13174 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13175 
13176 	soc->rate_stats_ctx = (struct cdp_soc_rate_stats_ctx *)stats_ctx;
13177 }
13178 
13179 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
13180 /**
13181  * dp_peer_flush_rate_stats_req() - Flush peer rate stats
13182  * @soc: Datapath SOC handle
13183  * @peer: Datapath peer
13184  * @arg: argument to iter function
13185  *
13186  * Return: QDF_STATUS
13187  */
13188 static void
13189 dp_peer_flush_rate_stats_req(struct dp_soc *soc, struct dp_peer *peer,
13190 			     void *arg)
13191 {
13192 	if (peer->bss_peer)
13193 		return;
13194 
13195 	dp_wdi_event_handler(
13196 		WDI_EVENT_FLUSH_RATE_STATS_REQ,
13197 		soc, dp_monitor_peer_get_peerstats_ctx(soc, peer),
13198 		peer->peer_id,
13199 		WDI_NO_VAL, peer->vdev->pdev->pdev_id);
13200 }
13201 
13202 /**
13203  * dp_flush_rate_stats_req() - Flush peer rate stats in pdev
13204  * @soc_hdl: Datapath SOC handle
13205  * @pdev_id: pdev_id
13206  *
13207  * Return: QDF_STATUS
13208  */
13209 static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
13210 					  uint8_t pdev_id)
13211 {
13212 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13213 	struct dp_pdev *pdev =
13214 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
13215 						   pdev_id);
13216 	if (!pdev)
13217 		return QDF_STATUS_E_FAILURE;
13218 
13219 	dp_pdev_iterate_peer(pdev, dp_peer_flush_rate_stats_req, NULL,
13220 			     DP_MOD_ID_CDP);
13221 
13222 	return QDF_STATUS_SUCCESS;
13223 }
13224 #else
13225 static inline QDF_STATUS
13226 dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
13227 			uint8_t pdev_id)
13228 {
13229 	return QDF_STATUS_SUCCESS;
13230 }
13231 #endif
13232 
13233 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
13234 #ifdef WLAN_FEATURE_11BE_MLO
13235 /**
13236  * dp_get_peer_extd_rate_link_stats() - function to get peer
13237  *				extended rate and link stats
13238  * @soc_hdl: dp soc handler
13239  * @mac_addr: mac address of peer
13240  *
13241  * Return: QDF_STATUS
13242  */
13243 static QDF_STATUS
13244 dp_get_peer_extd_rate_link_stats(struct cdp_soc_t *soc_hdl, uint8_t *mac_addr)
13245 {
13246 	uint8_t i;
13247 	struct dp_peer *link_peer;
13248 	struct dp_soc *link_peer_soc;
13249 	struct dp_mld_link_peers link_peers_info;
13250 	struct dp_peer *peer = NULL;
13251 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13252 	struct cdp_peer_info peer_info = { 0 };
13253 
13254 	if (!mac_addr) {
13255 		dp_err("NULL peer mac addr\n");
13256 		return QDF_STATUS_E_FAILURE;
13257 	}
13258 
13259 	DP_PEER_INFO_PARAMS_INIT(&peer_info, DP_VDEV_ALL, mac_addr, false,
13260 				 CDP_WILD_PEER_TYPE);
13261 
13262 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP);
13263 	if (!peer) {
13264 		dp_err("Invalid peer\n");
13265 		return QDF_STATUS_E_FAILURE;
13266 	}
13267 
13268 	if (IS_MLO_DP_MLD_PEER(peer)) {
13269 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
13270 						    &link_peers_info,
13271 						    DP_MOD_ID_CDP);
13272 		for (i = 0; i < link_peers_info.num_links; i++) {
13273 			link_peer = link_peers_info.link_peers[i];
13274 			link_peer_soc = link_peer->vdev->pdev->soc;
13275 			dp_wdi_event_handler(WDI_EVENT_FLUSH_RATE_STATS_REQ,
13276 					     link_peer_soc,
13277 					     dp_monitor_peer_get_peerstats_ctx
13278 					     (link_peer_soc, link_peer),
13279 					     link_peer->peer_id,
13280 					     WDI_NO_VAL,
13281 					     link_peer->vdev->pdev->pdev_id);
13282 		}
13283 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
13284 	} else {
13285 		dp_wdi_event_handler(
13286 				WDI_EVENT_FLUSH_RATE_STATS_REQ, soc,
13287 				dp_monitor_peer_get_peerstats_ctx(soc, peer),
13288 				peer->peer_id,
13289 				WDI_NO_VAL, peer->vdev->pdev->pdev_id);
13290 	}
13291 
13292 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
13293 	return QDF_STATUS_SUCCESS;
13294 }
13295 #else
13296 static QDF_STATUS
13297 dp_get_peer_extd_rate_link_stats(struct cdp_soc_t *soc_hdl, uint8_t *mac_addr)
13298 {
13299 	struct dp_peer *peer = NULL;
13300 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13301 
13302 	if (!mac_addr) {
13303 		dp_err("NULL peer mac addr\n");
13304 		return QDF_STATUS_E_FAILURE;
13305 	}
13306 
13307 	peer = dp_peer_find_hash_find(soc, mac_addr, 0,
13308 				      DP_VDEV_ALL, DP_MOD_ID_CDP);
13309 	if (!peer) {
13310 		dp_err("Invalid peer\n");
13311 		return QDF_STATUS_E_FAILURE;
13312 	}
13313 
13314 	dp_wdi_event_handler(
13315 			WDI_EVENT_FLUSH_RATE_STATS_REQ, soc,
13316 			dp_monitor_peer_get_peerstats_ctx(soc, peer),
13317 			peer->peer_id,
13318 			WDI_NO_VAL, peer->vdev->pdev->pdev_id);
13319 
13320 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
13321 	return QDF_STATUS_SUCCESS;
13322 }
13323 #endif
13324 #else
13325 static inline QDF_STATUS
13326 dp_get_peer_extd_rate_link_stats(struct cdp_soc_t *soc_hdl, uint8_t *mac_addr)
13327 {
13328 	return QDF_STATUS_SUCCESS;
13329 }
13330 #endif
13331 
13332 static void *dp_peer_get_peerstats_ctx(struct cdp_soc_t *soc_hdl,
13333 				       uint8_t vdev_id,
13334 				       uint8_t *mac_addr)
13335 {
13336 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13337 	struct dp_peer *peer;
13338 	void *peerstats_ctx = NULL;
13339 
13340 	if (mac_addr) {
13341 		peer = dp_peer_find_hash_find(soc, mac_addr,
13342 					      0, vdev_id,
13343 					      DP_MOD_ID_CDP);
13344 		if (!peer)
13345 			return NULL;
13346 
13347 		if (!IS_MLO_DP_MLD_PEER(peer))
13348 			peerstats_ctx = dp_monitor_peer_get_peerstats_ctx(soc,
13349 									  peer);
13350 
13351 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
13352 	}
13353 
13354 	return peerstats_ctx;
13355 }
13356 
13357 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
13358 static QDF_STATUS dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
13359 					   uint8_t pdev_id,
13360 					   void *buf)
13361 {
13362 	 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS,
13363 			      (struct dp_soc *)soc, buf, HTT_INVALID_PEER,
13364 			      WDI_NO_VAL, pdev_id);
13365 	return QDF_STATUS_SUCCESS;
13366 }
13367 #else
13368 static inline QDF_STATUS
13369 dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
13370 			 uint8_t pdev_id,
13371 			 void *buf)
13372 {
13373 	return QDF_STATUS_SUCCESS;
13374 }
13375 #endif
13376 
13377 static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle)
13378 {
13379 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13380 
13381 	return soc->rate_stats_ctx;
13382 }
13383 
13384 /**
13385  * dp_get_cfg() - get dp cfg
13386  * @soc: cdp soc handle
13387  * @cfg: cfg enum
13388  *
13389  * Return: cfg value
13390  */
13391 static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg)
13392 {
13393 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
13394 	uint32_t value = 0;
13395 
13396 	switch (cfg) {
13397 	case cfg_dp_enable_data_stall:
13398 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
13399 		break;
13400 	case cfg_dp_enable_p2p_ip_tcp_udp_checksum_offload:
13401 		value = dpsoc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload;
13402 		break;
13403 	case cfg_dp_enable_nan_ip_tcp_udp_checksum_offload:
13404 		value = dpsoc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload;
13405 		break;
13406 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
13407 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
13408 		break;
13409 	case cfg_dp_disable_legacy_mode_csum_offload:
13410 		value = dpsoc->wlan_cfg_ctx->
13411 					legacy_mode_checksumoffload_disable;
13412 		break;
13413 	case cfg_dp_tso_enable:
13414 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
13415 		break;
13416 	case cfg_dp_lro_enable:
13417 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
13418 		break;
13419 	case cfg_dp_gro_enable:
13420 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
13421 		break;
13422 	case cfg_dp_tc_based_dyn_gro_enable:
13423 		value = dpsoc->wlan_cfg_ctx->tc_based_dynamic_gro;
13424 		break;
13425 	case cfg_dp_tc_ingress_prio:
13426 		value = dpsoc->wlan_cfg_ctx->tc_ingress_prio;
13427 		break;
13428 	case cfg_dp_sg_enable:
13429 		value = dpsoc->wlan_cfg_ctx->sg_enabled;
13430 		break;
13431 	case cfg_dp_tx_flow_start_queue_offset:
13432 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
13433 		break;
13434 	case cfg_dp_tx_flow_stop_queue_threshold:
13435 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
13436 		break;
13437 	case cfg_dp_disable_intra_bss_fwd:
13438 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
13439 		break;
13440 	case cfg_dp_pktlog_buffer_size:
13441 		value = dpsoc->wlan_cfg_ctx->pktlog_buffer_size;
13442 		break;
13443 	case cfg_dp_wow_check_rx_pending:
13444 		value = dpsoc->wlan_cfg_ctx->wow_check_rx_pending_enable;
13445 		break;
13446 	default:
13447 		value =  0;
13448 	}
13449 
13450 	return value;
13451 }
13452 
13453 #ifdef PEER_FLOW_CONTROL
13454 /**
13455  * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params
13456  * @soc_handle: datapath soc handle
13457  * @pdev_id: id of datapath pdev handle
13458  * @param: ol ath params
13459  * @value: value of the flag
13460  * @buff: Buffer to be passed
13461  *
13462  * Implemented this function same as legacy function. In legacy code, single
13463  * function is used to display stats and update pdev params.
13464  *
13465  * Return: 0 for success. nonzero for failure.
13466  */
13467 static uint32_t dp_tx_flow_ctrl_configure_pdev(struct cdp_soc_t *soc_handle,
13468 					       uint8_t pdev_id,
13469 					       enum _dp_param_t param,
13470 					       uint32_t value, void *buff)
13471 {
13472 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13473 	struct dp_pdev *pdev =
13474 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
13475 						   pdev_id);
13476 
13477 	if (qdf_unlikely(!pdev))
13478 		return 1;
13479 
13480 	soc = pdev->soc;
13481 	if (!soc)
13482 		return 1;
13483 
13484 	switch (param) {
13485 #ifdef QCA_ENH_V3_STATS_SUPPORT
13486 	case DP_PARAM_VIDEO_DELAY_STATS_FC:
13487 		if (value)
13488 			pdev->delay_stats_flag = true;
13489 		else
13490 			pdev->delay_stats_flag = false;
13491 		break;
13492 	case DP_PARAM_VIDEO_STATS_FC:
13493 		qdf_print("------- TID Stats ------\n");
13494 		dp_pdev_print_tid_stats(pdev);
13495 		qdf_print("------ Delay Stats ------\n");
13496 		dp_pdev_print_delay_stats(pdev);
13497 		qdf_print("------ Rx Error Stats ------\n");
13498 		dp_pdev_print_rx_error_stats(pdev);
13499 		break;
13500 #endif
13501 	case DP_PARAM_TOTAL_Q_SIZE:
13502 		{
13503 			uint32_t tx_min, tx_max;
13504 
13505 			tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx);
13506 			tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
13507 
13508 			if (!buff) {
13509 				if ((value >= tx_min) && (value <= tx_max)) {
13510 					pdev->num_tx_allowed = value;
13511 				} else {
13512 					dp_tx_info("%pK: Failed to update num_tx_allowed, Q_min = %d Q_max = %d",
13513 						   soc, tx_min, tx_max);
13514 					break;
13515 				}
13516 			} else {
13517 				*(int *)buff = pdev->num_tx_allowed;
13518 			}
13519 		}
13520 		break;
13521 	default:
13522 		dp_tx_info("%pK: not handled param %d ", soc, param);
13523 		break;
13524 	}
13525 
13526 	return 0;
13527 }
13528 #endif
13529 
13530 /**
13531  * dp_set_pdev_pcp_tid_map_wifi3() - update pcp tid map in pdev
13532  * @psoc: dp soc handle
13533  * @pdev_id: id of DP_PDEV handle
13534  * @pcp: pcp value
13535  * @tid: tid value passed by the user
13536  *
13537  * Return: QDF_STATUS_SUCCESS on success
13538  */
13539 static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
13540 						uint8_t pdev_id,
13541 						uint8_t pcp, uint8_t tid)
13542 {
13543 	struct dp_soc *soc = (struct dp_soc *)psoc;
13544 
13545 	soc->pcp_tid_map[pcp] = tid;
13546 
13547 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
13548 	return QDF_STATUS_SUCCESS;
13549 }
13550 
13551 /**
13552  * dp_set_vdev_pcp_tid_map_wifi3() - update pcp tid map in vdev
13553  * @soc_hdl: DP soc handle
13554  * @vdev_id: id of DP_VDEV handle
13555  * @pcp: pcp value
13556  * @tid: tid value passed by the user
13557  *
13558  * Return: QDF_STATUS_SUCCESS on success
13559  */
13560 static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc_hdl,
13561 						uint8_t vdev_id,
13562 						uint8_t pcp, uint8_t tid)
13563 {
13564 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13565 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
13566 						     DP_MOD_ID_CDP);
13567 
13568 	if (!vdev)
13569 		return QDF_STATUS_E_FAILURE;
13570 
13571 	vdev->pcp_tid_map[pcp] = tid;
13572 
13573 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13574 	return QDF_STATUS_SUCCESS;
13575 }
13576 
13577 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
13578 static void dp_drain_txrx(struct cdp_soc_t *soc_handle)
13579 {
13580 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13581 	uint32_t cur_tx_limit, cur_rx_limit;
13582 	uint32_t budget = 0xffff;
13583 	uint32_t val;
13584 	int i;
13585 	int cpu = dp_srng_get_cpu();
13586 
13587 	cur_tx_limit = soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit;
13588 	cur_rx_limit = soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit;
13589 
13590 	/* Temporarily increase soft irq limits when going to drain
13591 	 * the UMAC/LMAC SRNGs and restore them after polling.
13592 	 * Though the budget is on higher side, the TX/RX reaping loops
13593 	 * will not execute longer as both TX and RX would be suspended
13594 	 * by the time this API is called.
13595 	 */
13596 	dp_update_soft_irq_limits(soc, budget, budget);
13597 
13598 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
13599 		dp_service_srngs(&soc->intr_ctx[i], budget, cpu);
13600 
13601 	dp_update_soft_irq_limits(soc, cur_tx_limit, cur_rx_limit);
13602 
13603 	/* Do a dummy read at offset 0; this will ensure all
13604 	 * pendings writes(HP/TP) are flushed before read returns.
13605 	 */
13606 	val = HAL_REG_READ((struct hal_soc *)soc->hal_soc, 0);
13607 	dp_debug("Register value at offset 0: %u\n", val);
13608 }
13609 #endif
13610 
13611 #ifdef DP_UMAC_HW_RESET_SUPPORT
13612 /**
13613  * dp_reset_interrupt_ring_masks() - Reset rx interrupt masks
13614  * @soc: dp soc handle
13615  *
13616  * Return: void
13617  */
13618 static void dp_reset_interrupt_ring_masks(struct dp_soc *soc)
13619 {
13620 	struct dp_intr_bkp *intr_bkp;
13621 	struct dp_intr *intr_ctx;
13622 	int num_ctxt = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
13623 	int i;
13624 
13625 	intr_bkp =
13626 	(struct dp_intr_bkp *)qdf_mem_malloc_atomic(sizeof(struct dp_intr_bkp) *
13627 			num_ctxt);
13628 
13629 	qdf_assert_always(intr_bkp);
13630 
13631 	soc->umac_reset_ctx.intr_ctx_bkp = intr_bkp;
13632 	for (i = 0; i < num_ctxt; i++) {
13633 		intr_ctx = &soc->intr_ctx[i];
13634 
13635 		intr_bkp->tx_ring_mask = intr_ctx->tx_ring_mask;
13636 		intr_bkp->rx_ring_mask = intr_ctx->rx_ring_mask;
13637 		intr_bkp->rx_mon_ring_mask = intr_ctx->rx_mon_ring_mask;
13638 		intr_bkp->rx_err_ring_mask = intr_ctx->rx_err_ring_mask;
13639 		intr_bkp->rx_wbm_rel_ring_mask = intr_ctx->rx_wbm_rel_ring_mask;
13640 		intr_bkp->reo_status_ring_mask = intr_ctx->reo_status_ring_mask;
13641 		intr_bkp->rxdma2host_ring_mask = intr_ctx->rxdma2host_ring_mask;
13642 		intr_bkp->host2rxdma_ring_mask = intr_ctx->host2rxdma_ring_mask;
13643 		intr_bkp->host2rxdma_mon_ring_mask =
13644 					intr_ctx->host2rxdma_mon_ring_mask;
13645 		intr_bkp->tx_mon_ring_mask = intr_ctx->tx_mon_ring_mask;
13646 
13647 		intr_ctx->tx_ring_mask = 0;
13648 		intr_ctx->rx_ring_mask = 0;
13649 		intr_ctx->rx_mon_ring_mask = 0;
13650 		intr_ctx->rx_err_ring_mask = 0;
13651 		intr_ctx->rx_wbm_rel_ring_mask = 0;
13652 		intr_ctx->reo_status_ring_mask = 0;
13653 		intr_ctx->rxdma2host_ring_mask = 0;
13654 		intr_ctx->host2rxdma_ring_mask = 0;
13655 		intr_ctx->host2rxdma_mon_ring_mask = 0;
13656 		intr_ctx->tx_mon_ring_mask = 0;
13657 
13658 		intr_bkp++;
13659 	}
13660 }
13661 
13662 /**
13663  * dp_restore_interrupt_ring_masks() - Restore rx interrupt masks
13664  * @soc: dp soc handle
13665  *
13666  * Return: void
13667  */
13668 static void dp_restore_interrupt_ring_masks(struct dp_soc *soc)
13669 {
13670 	struct dp_intr_bkp *intr_bkp = soc->umac_reset_ctx.intr_ctx_bkp;
13671 	struct dp_intr_bkp *intr_bkp_base = intr_bkp;
13672 	struct dp_intr *intr_ctx;
13673 	int num_ctxt = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
13674 	int i;
13675 
13676 	if (!intr_bkp)
13677 		return;
13678 
13679 	for (i = 0; i < num_ctxt; i++) {
13680 		intr_ctx = &soc->intr_ctx[i];
13681 
13682 		intr_ctx->tx_ring_mask = intr_bkp->tx_ring_mask;
13683 		intr_ctx->rx_ring_mask = intr_bkp->rx_ring_mask;
13684 		intr_ctx->rx_mon_ring_mask = intr_bkp->rx_mon_ring_mask;
13685 		intr_ctx->rx_err_ring_mask = intr_bkp->rx_err_ring_mask;
13686 		intr_ctx->rx_wbm_rel_ring_mask = intr_bkp->rx_wbm_rel_ring_mask;
13687 		intr_ctx->reo_status_ring_mask = intr_bkp->reo_status_ring_mask;
13688 		intr_ctx->rxdma2host_ring_mask = intr_bkp->rxdma2host_ring_mask;
13689 		intr_ctx->host2rxdma_ring_mask = intr_bkp->host2rxdma_ring_mask;
13690 		intr_ctx->host2rxdma_mon_ring_mask =
13691 			intr_bkp->host2rxdma_mon_ring_mask;
13692 		intr_ctx->tx_mon_ring_mask = intr_bkp->tx_mon_ring_mask;
13693 
13694 		intr_bkp++;
13695 	}
13696 
13697 	qdf_mem_free(intr_bkp_base);
13698 	soc->umac_reset_ctx.intr_ctx_bkp = NULL;
13699 }
13700 
13701 /**
13702  * dp_resume_tx_hardstart() - Restore the old Tx hardstart functions
13703  * @soc: dp soc handle
13704  *
13705  * Return: void
13706  */
13707 static void dp_resume_tx_hardstart(struct dp_soc *soc)
13708 {
13709 	struct dp_vdev *vdev;
13710 	struct ol_txrx_hardtart_ctxt ctxt = {0};
13711 	struct cdp_ctrl_objmgr_psoc *psoc = soc->ctrl_psoc;
13712 	int i;
13713 
13714 	for (i = 0; i < MAX_PDEV_CNT; i++) {
13715 		struct dp_pdev *pdev = soc->pdev_list[i];
13716 
13717 		if (!pdev)
13718 			continue;
13719 
13720 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
13721 			uint8_t vdev_id = vdev->vdev_id;
13722 
13723 			dp_vdev_fetch_tx_handler(vdev, soc, &ctxt);
13724 			soc->cdp_soc.ol_ops->dp_update_tx_hardstart(psoc,
13725 								    vdev_id,
13726 								    &ctxt);
13727 		}
13728 	}
13729 }
13730 
13731 /**
13732  * dp_pause_tx_hardstart() - Register Tx hardstart functions to drop packets
13733  * @soc: dp soc handle
13734  *
13735  * Return: void
13736  */
13737 static void dp_pause_tx_hardstart(struct dp_soc *soc)
13738 {
13739 	struct dp_vdev *vdev;
13740 	struct ol_txrx_hardtart_ctxt ctxt;
13741 	struct cdp_ctrl_objmgr_psoc *psoc = soc->ctrl_psoc;
13742 	int i;
13743 
13744 	ctxt.tx = &dp_tx_drop;
13745 	ctxt.tx_fast = &dp_tx_drop;
13746 	ctxt.tx_exception = &dp_tx_exc_drop;
13747 
13748 	for (i = 0; i < MAX_PDEV_CNT; i++) {
13749 		struct dp_pdev *pdev = soc->pdev_list[i];
13750 
13751 		if (!pdev)
13752 			continue;
13753 
13754 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
13755 			uint8_t vdev_id = vdev->vdev_id;
13756 
13757 			soc->cdp_soc.ol_ops->dp_update_tx_hardstart(psoc,
13758 								    vdev_id,
13759 								    &ctxt);
13760 		}
13761 	}
13762 }
13763 
13764 /**
13765  * dp_unregister_notify_umac_pre_reset_fw_callback() - unregister notify_fw_cb
13766  * @soc: dp soc handle
13767  *
13768  * Return: void
13769  */
13770 static inline
13771 void dp_unregister_notify_umac_pre_reset_fw_callback(struct dp_soc *soc)
13772 {
13773 	soc->notify_fw_callback = NULL;
13774 }
13775 
13776 /**
13777  * dp_check_n_notify_umac_prereset_done() - Send pre reset done to firmware
13778  * @soc: dp soc handle
13779  *
13780  * Return: void
13781  */
13782 static inline
13783 void dp_check_n_notify_umac_prereset_done(struct dp_soc *soc)
13784 {
13785 	/* Some Cpu(s) is processing the umac rings*/
13786 	if (soc->service_rings_running)
13787 		return;
13788 
13789 	/* Notify the firmware that Umac pre reset is complete */
13790 	dp_umac_reset_notify_action_completion(soc,
13791 					       UMAC_RESET_ACTION_DO_PRE_RESET);
13792 
13793 	/* Unregister the callback */
13794 	dp_unregister_notify_umac_pre_reset_fw_callback(soc);
13795 }
13796 
13797 /**
13798  * dp_register_notify_umac_pre_reset_fw_callback() - register notify_fw_cb
13799  * @soc: dp soc handle
13800  *
13801  * Return: void
13802  */
13803 static inline
13804 void dp_register_notify_umac_pre_reset_fw_callback(struct dp_soc *soc)
13805 {
13806 	soc->notify_fw_callback = dp_check_n_notify_umac_prereset_done;
13807 }
13808 
13809 #ifdef DP_UMAC_HW_HARD_RESET
13810 /**
13811  * dp_set_umac_regs() - Reinitialize host umac registers
13812  * @soc: dp soc handle
13813  *
13814  * Return: void
13815  */
13816 static void dp_set_umac_regs(struct dp_soc *soc)
13817 {
13818 	int i;
13819 	struct hal_reo_params reo_params;
13820 
13821 	qdf_mem_zero(&reo_params, sizeof(reo_params));
13822 
13823 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
13824 		if (soc->arch_ops.reo_remap_config(soc, &reo_params.remap0,
13825 						   &reo_params.remap1,
13826 						   &reo_params.remap2))
13827 			reo_params.rx_hash_enabled = true;
13828 		else
13829 			reo_params.rx_hash_enabled = false;
13830 	}
13831 
13832 	reo_params.reo_qref = &soc->reo_qref;
13833 	hal_reo_setup(soc->hal_soc, &reo_params, 0);
13834 
13835 	soc->arch_ops.dp_cc_reg_cfg_init(soc, true);
13836 
13837 	for (i = 0; i < PCP_TID_MAP_MAX; i++)
13838 		hal_tx_update_pcp_tid_map(soc->hal_soc, soc->pcp_tid_map[i], i);
13839 
13840 	for (i = 0; i < MAX_PDEV_CNT; i++) {
13841 		struct dp_vdev *vdev = NULL;
13842 		struct dp_pdev *pdev = soc->pdev_list[i];
13843 
13844 		if (!pdev)
13845 			continue;
13846 
13847 		for (i = 0; i < soc->num_hw_dscp_tid_map; i++)
13848 			hal_tx_set_dscp_tid_map(soc->hal_soc,
13849 						pdev->dscp_tid_map[i], i);
13850 
13851 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
13852 			soc->arch_ops.dp_bank_reconfig(soc, vdev);
13853 			soc->arch_ops.dp_reconfig_tx_vdev_mcast_ctrl(soc,
13854 								      vdev);
13855 		}
13856 	}
13857 }
13858 #else
13859 static void dp_set_umac_regs(struct dp_soc *soc)
13860 {
13861 }
13862 #endif
13863 
13864 /**
13865  * dp_reinit_rings() - Reinitialize host managed rings
13866  * @soc: dp soc handle
13867  *
13868  * Return: QDF_STATUS
13869  */
13870 static void dp_reinit_rings(struct dp_soc *soc)
13871 {
13872 	unsigned long end;
13873 
13874 	dp_soc_srng_deinit(soc);
13875 	dp_hw_link_desc_ring_deinit(soc);
13876 
13877 	/* Busy wait for 2 ms to make sure the rings are in idle state
13878 	 * before we enable them again
13879 	 */
13880 	end = jiffies + msecs_to_jiffies(2);
13881 	while (time_before(jiffies, end))
13882 		;
13883 
13884 	dp_hw_link_desc_ring_init(soc);
13885 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
13886 	dp_soc_srng_init(soc);
13887 }
13888 
13889 /**
13890  * dp_umac_reset_action_trigger_recovery() - Handle FW Umac recovery trigger
13891  * @soc: dp soc handle
13892  *
13893  * Return: QDF_STATUS
13894  */
13895 static QDF_STATUS dp_umac_reset_action_trigger_recovery(struct dp_soc *soc)
13896 {
13897 	enum umac_reset_action action = UMAC_RESET_ACTION_DO_TRIGGER_RECOVERY;
13898 
13899 	return dp_umac_reset_notify_action_completion(soc, action);
13900 }
13901 
13902 /**
13903  * dp_umac_reset_handle_pre_reset() - Handle Umac prereset interrupt from FW
13904  * @soc: dp soc handle
13905  *
13906  * Return: QDF_STATUS
13907  */
13908 static QDF_STATUS dp_umac_reset_handle_pre_reset(struct dp_soc *soc)
13909 {
13910 	if (wlan_cfg_get_dp_soc_is_ppeds_enabled(soc->wlan_cfg_ctx)) {
13911 		dp_err("Umac reset is currently not supported in DS config");
13912 		qdf_assert_always(0);
13913 	}
13914 
13915 	dp_reset_interrupt_ring_masks(soc);
13916 
13917 	dp_pause_tx_hardstart(soc);
13918 	dp_pause_reo_send_cmd(soc);
13919 
13920 	dp_check_n_notify_umac_prereset_done(soc);
13921 
13922 	soc->umac_reset_ctx.nbuf_list = NULL;
13923 
13924 	return QDF_STATUS_SUCCESS;
13925 }
13926 
13927 /**
13928  * dp_umac_reset_handle_post_reset() - Handle Umac postreset interrupt from FW
13929  * @soc: dp soc handle
13930  *
13931  * Return: QDF_STATUS
13932  */
13933 static QDF_STATUS dp_umac_reset_handle_post_reset(struct dp_soc *soc)
13934 {
13935 	if (!soc->umac_reset_ctx.skel_enable) {
13936 		qdf_nbuf_t *nbuf_list = &soc->umac_reset_ctx.nbuf_list;
13937 
13938 		dp_set_umac_regs(soc);
13939 
13940 		dp_reinit_rings(soc);
13941 
13942 		dp_rx_desc_reuse(soc, nbuf_list);
13943 
13944 		dp_cleanup_reo_cmd_module(soc);
13945 
13946 		dp_tx_desc_pool_cleanup(soc, nbuf_list);
13947 
13948 		dp_reset_tid_q_setup(soc);
13949 	}
13950 
13951 	return dp_umac_reset_notify_action_completion(soc,
13952 					UMAC_RESET_ACTION_DO_POST_RESET_START);
13953 }
13954 
13955 /**
13956  * dp_umac_reset_handle_post_reset_complete() - Handle Umac postreset_complete
13957  *						interrupt from FW
13958  * @soc: dp soc handle
13959  *
13960  * Return: QDF_STATUS
13961  */
13962 static QDF_STATUS dp_umac_reset_handle_post_reset_complete(struct dp_soc *soc)
13963 {
13964 	QDF_STATUS status;
13965 	qdf_nbuf_t nbuf_list = soc->umac_reset_ctx.nbuf_list;
13966 
13967 	soc->umac_reset_ctx.nbuf_list = NULL;
13968 
13969 	dp_resume_reo_send_cmd(soc);
13970 
13971 	dp_restore_interrupt_ring_masks(soc);
13972 
13973 	dp_resume_tx_hardstart(soc);
13974 
13975 	status = dp_umac_reset_notify_action_completion(soc,
13976 				UMAC_RESET_ACTION_DO_POST_RESET_COMPLETE);
13977 
13978 	while (nbuf_list) {
13979 		qdf_nbuf_t nbuf = nbuf_list->next;
13980 
13981 		qdf_nbuf_free(nbuf_list);
13982 		nbuf_list = nbuf;
13983 	}
13984 
13985 	dp_umac_reset_info("Umac reset done on soc %pK\n trigger start : %u us "
13986 			   "trigger done : %u us prereset : %u us\n"
13987 			   "postreset : %u us \n postreset complete: %u us \n",
13988 			   soc,
13989 			   soc->umac_reset_ctx.ts.trigger_done -
13990 			   soc->umac_reset_ctx.ts.trigger_start,
13991 			   soc->umac_reset_ctx.ts.pre_reset_done -
13992 			   soc->umac_reset_ctx.ts.pre_reset_start,
13993 			   soc->umac_reset_ctx.ts.post_reset_done -
13994 			   soc->umac_reset_ctx.ts.post_reset_start,
13995 			   soc->umac_reset_ctx.ts.post_reset_complete_done -
13996 			   soc->umac_reset_ctx.ts.post_reset_complete_start);
13997 
13998 	return status;
13999 }
14000 #endif
14001 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
14002 static void
14003 dp_set_pkt_capture_mode(struct cdp_soc_t *soc_handle, bool val)
14004 {
14005 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
14006 
14007 	soc->wlan_cfg_ctx->pkt_capture_mode = val;
14008 }
14009 #endif
14010 
14011 #ifdef HW_TX_DELAY_STATS_ENABLE
14012 /**
14013  * dp_enable_disable_vdev_tx_delay_stats() - Start/Stop tx delay stats capture
14014  * @soc_hdl: DP soc handle
14015  * @vdev_id: vdev id
14016  * @value: value
14017  *
14018  * Return: None
14019  */
14020 static void
14021 dp_enable_disable_vdev_tx_delay_stats(struct cdp_soc_t *soc_hdl,
14022 				      uint8_t vdev_id,
14023 				      uint8_t value)
14024 {
14025 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14026 	struct dp_vdev *vdev = NULL;
14027 
14028 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
14029 	if (!vdev)
14030 		return;
14031 
14032 	vdev->hw_tx_delay_stats_enabled = value;
14033 
14034 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
14035 }
14036 
14037 /**
14038  * dp_check_vdev_tx_delay_stats_enabled() - check the feature is enabled or not
14039  * @soc_hdl: DP soc handle
14040  * @vdev_id: vdev id
14041  *
14042  * Return: 1 if enabled, 0 if disabled
14043  */
14044 static uint8_t
14045 dp_check_vdev_tx_delay_stats_enabled(struct cdp_soc_t *soc_hdl,
14046 				     uint8_t vdev_id)
14047 {
14048 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14049 	struct dp_vdev *vdev;
14050 	uint8_t ret_val = 0;
14051 
14052 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
14053 	if (!vdev)
14054 		return ret_val;
14055 
14056 	ret_val = vdev->hw_tx_delay_stats_enabled;
14057 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
14058 
14059 	return ret_val;
14060 }
14061 #endif
14062 
14063 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
14064 static void
14065 dp_recovery_vdev_flush_peers(struct cdp_soc_t *cdp_soc,
14066 			     uint8_t vdev_id,
14067 			     bool mlo_peers_only)
14068 {
14069 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
14070 	struct dp_vdev *vdev;
14071 
14072 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
14073 
14074 	if (!vdev)
14075 		return;
14076 
14077 	dp_vdev_flush_peers((struct cdp_vdev *)vdev, false, mlo_peers_only);
14078 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
14079 }
14080 #endif
14081 #ifdef QCA_GET_TSF_VIA_REG
14082 /**
14083  * dp_get_tsf_time() - get tsf time
14084  * @soc_hdl: Datapath soc handle
14085  * @tsf_id: TSF identifier
14086  * @mac_id: mac_id
14087  * @tsf: pointer to update tsf value
14088  * @tsf_sync_soc_time: pointer to update tsf sync time
14089  *
14090  * Return: None.
14091  */
14092 static inline void
14093 dp_get_tsf_time(struct cdp_soc_t *soc_hdl, uint32_t tsf_id, uint32_t mac_id,
14094 		uint64_t *tsf, uint64_t *tsf_sync_soc_time)
14095 {
14096 	hal_get_tsf_time(((struct dp_soc *)soc_hdl)->hal_soc, tsf_id, mac_id,
14097 			 tsf, tsf_sync_soc_time);
14098 }
14099 #else
14100 static inline void
14101 dp_get_tsf_time(struct cdp_soc_t *soc_hdl, uint32_t tsf_id, uint32_t mac_id,
14102 		uint64_t *tsf, uint64_t *tsf_sync_soc_time)
14103 {
14104 }
14105 #endif
14106 
14107 /**
14108  * dp_get_tsf2_scratch_reg() - get tsf2 offset from the scratch register
14109  * @soc_hdl: Datapath soc handle
14110  * @mac_id: mac_id
14111  * @value: pointer to update tsf2 offset value
14112  *
14113  * Return: None.
14114  */
14115 static inline void
14116 dp_get_tsf2_scratch_reg(struct cdp_soc_t *soc_hdl, uint8_t mac_id,
14117 			uint64_t *value)
14118 {
14119 	hal_get_tsf2_offset(((struct dp_soc *)soc_hdl)->hal_soc, mac_id, value);
14120 }
14121 
14122 /**
14123  * dp_get_tqm_scratch_reg() - get tqm offset from the scratch register
14124  * @soc_hdl: Datapath soc handle
14125  * @value: pointer to update tqm offset value
14126  *
14127  * Return: None.
14128  */
14129 static inline void
14130 dp_get_tqm_scratch_reg(struct cdp_soc_t *soc_hdl, uint64_t *value)
14131 {
14132 	hal_get_tqm_offset(((struct dp_soc *)soc_hdl)->hal_soc, value);
14133 }
14134 
14135 /**
14136  * dp_set_tx_pause() - Pause or resume tx path
14137  * @soc_hdl: Datapath soc handle
14138  * @flag: set or clear is_tx_pause
14139  *
14140  * Return: None.
14141  */
14142 static inline
14143 void dp_set_tx_pause(struct cdp_soc_t *soc_hdl, bool flag)
14144 {
14145 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14146 
14147 	soc->is_tx_pause = flag;
14148 }
14149 
14150 static struct cdp_cmn_ops dp_ops_cmn = {
14151 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
14152 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
14153 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
14154 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
14155 	.txrx_pdev_post_attach = dp_pdev_post_attach_wifi3,
14156 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
14157 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
14158 	.txrx_peer_create = dp_peer_create_wifi3,
14159 	.txrx_peer_setup = dp_peer_setup_wifi3,
14160 #ifdef FEATURE_AST
14161 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
14162 #else
14163 	.txrx_peer_teardown = NULL,
14164 #endif
14165 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
14166 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
14167 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
14168 	.txrx_peer_get_ast_info_by_pdev =
14169 		dp_peer_get_ast_info_by_pdevid_wifi3,
14170 	.txrx_peer_ast_delete_by_soc =
14171 		dp_peer_ast_entry_del_by_soc,
14172 	.txrx_peer_ast_delete_by_pdev =
14173 		dp_peer_ast_entry_del_by_pdev,
14174 	.txrx_peer_HMWDS_ast_delete = dp_peer_HMWDS_ast_entry_del,
14175 	.txrx_peer_delete = dp_peer_delete_wifi3,
14176 #ifdef DP_RX_UDP_OVER_PEER_ROAM
14177 	.txrx_update_roaming_peer = dp_update_roaming_peer_wifi3,
14178 #endif
14179 	.txrx_vdev_register = dp_vdev_register_wifi3,
14180 	.txrx_soc_detach = dp_soc_detach_wifi3,
14181 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
14182 	.txrx_soc_init = dp_soc_init_wifi3,
14183 #ifndef QCA_HOST_MODE_WIFI_DISABLED
14184 	.txrx_tso_soc_attach = dp_tso_soc_attach,
14185 	.txrx_tso_soc_detach = dp_tso_soc_detach,
14186 	.tx_send = dp_tx_send,
14187 	.tx_send_exc = dp_tx_send_exception,
14188 #endif
14189 	.set_tx_pause = dp_set_tx_pause,
14190 	.txrx_pdev_init = dp_pdev_init_wifi3,
14191 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
14192 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
14193 	.txrx_ath_getstats = dp_get_device_stats,
14194 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
14195 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
14196 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
14197 	.delba_process = dp_delba_process_wifi3,
14198 	.set_addba_response = dp_set_addba_response,
14199 	.flush_cache_rx_queue = NULL,
14200 	.tid_update_ba_win_size = dp_rx_tid_update_ba_win_size,
14201 	/* TODO: get API's for dscp-tid need to be added*/
14202 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
14203 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
14204 	.txrx_get_total_per = dp_get_total_per,
14205 	.txrx_stats_request = dp_txrx_stats_request,
14206 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
14207 	.display_stats = dp_txrx_dump_stats,
14208 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
14209 	.txrx_intr_detach = dp_soc_interrupt_detach,
14210 	.txrx_ppeds_stop = dp_soc_ppeds_stop,
14211 	.set_pn_check = dp_set_pn_check_wifi3,
14212 	.set_key_sec_type = dp_set_key_sec_type_wifi3,
14213 	.update_config_parameters = dp_update_config_parameters,
14214 	/* TODO: Add other functions */
14215 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
14216 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
14217 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
14218 	.get_vdev_dp_ext_txrx_handle = dp_vdev_get_dp_ext_handle,
14219 	.set_vdev_dp_ext_txrx_handle = dp_vdev_set_dp_ext_handle,
14220 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
14221 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
14222 	.map_pdev_to_lmac = dp_soc_map_pdev_to_lmac,
14223 	.handle_mode_change = dp_soc_handle_pdev_mode_change,
14224 	.set_pdev_status_down = dp_soc_set_pdev_status_down,
14225 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
14226 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
14227 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
14228 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
14229 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
14230 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
14231 	.set_soc_param = dp_soc_set_param,
14232 	.txrx_get_os_rx_handles_from_vdev =
14233 					dp_get_os_rx_handles_from_vdev_wifi3,
14234 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
14235 	.get_dp_capabilities = dp_get_cfg_capabilities,
14236 	.txrx_get_cfg = dp_get_cfg,
14237 	.set_rate_stats_ctx = dp_soc_set_rate_stats_ctx,
14238 	.get_rate_stats_ctx = dp_soc_get_rate_stats_ctx,
14239 	.txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats,
14240 	.txrx_flush_rate_stats_request = dp_flush_rate_stats_req,
14241 	.txrx_peer_get_peerstats_ctx = dp_peer_get_peerstats_ctx,
14242 
14243 	.set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3,
14244 	.set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3,
14245 
14246 	.txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler,
14247 #ifdef QCA_MULTIPASS_SUPPORT
14248 	.set_vlan_groupkey = dp_set_vlan_groupkey,
14249 #endif
14250 	.get_peer_mac_list = dp_get_peer_mac_list,
14251 	.get_peer_id = dp_get_peer_id,
14252 #ifdef QCA_SUPPORT_WDS_EXTENDED
14253 	.set_wds_ext_peer_rx = dp_wds_ext_set_peer_rx,
14254 	.get_wds_ext_peer_osif_handle = dp_wds_ext_get_peer_osif_handle,
14255 #endif /* QCA_SUPPORT_WDS_EXTENDED */
14256 
14257 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
14258 	.txrx_drain = dp_drain_txrx,
14259 #endif
14260 #if defined(FEATURE_RUNTIME_PM)
14261 	.set_rtpm_tput_policy = dp_set_rtpm_tput_policy_requirement,
14262 #endif
14263 #ifdef WLAN_SYSFS_DP_STATS
14264 	.txrx_sysfs_fill_stats = dp_sysfs_fill_stats,
14265 	.txrx_sysfs_set_stat_type = dp_sysfs_set_stat_type,
14266 #endif /* WLAN_SYSFS_DP_STATS */
14267 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
14268 	.set_pkt_capture_mode = dp_set_pkt_capture_mode,
14269 #endif
14270 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
14271 	.txrx_recovery_vdev_flush_peers = dp_recovery_vdev_flush_peers,
14272 #endif
14273 	.txrx_umac_reset_deinit = dp_soc_umac_reset_deinit,
14274 	.txrx_get_tsf_time = dp_get_tsf_time,
14275 	.txrx_get_tsf2_offset = dp_get_tsf2_scratch_reg,
14276 	.txrx_get_tqm_offset = dp_get_tqm_scratch_reg,
14277 };
14278 
14279 static struct cdp_ctrl_ops dp_ops_ctrl = {
14280 	.txrx_peer_authorize = dp_peer_authorize,
14281 	.txrx_peer_get_authorize = dp_peer_get_authorize,
14282 #ifdef VDEV_PEER_PROTOCOL_COUNT
14283 	.txrx_enable_peer_protocol_count = dp_enable_vdev_peer_protocol_count,
14284 	.txrx_set_peer_protocol_drop_mask =
14285 		dp_enable_vdev_peer_protocol_drop_mask,
14286 	.txrx_is_peer_protocol_count_enabled =
14287 		dp_is_vdev_peer_protocol_count_enabled,
14288 	.txrx_get_peer_protocol_drop_mask = dp_get_vdev_peer_protocol_drop_mask,
14289 #endif
14290 	.txrx_set_vdev_param = dp_set_vdev_param,
14291 	.txrx_set_psoc_param = dp_set_psoc_param,
14292 	.txrx_get_psoc_param = dp_get_psoc_param,
14293 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
14294 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
14295 	.txrx_get_sec_type = dp_get_sec_type,
14296 	.txrx_wdi_event_sub = dp_wdi_event_sub,
14297 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
14298 	.txrx_set_pdev_param = dp_set_pdev_param,
14299 	.txrx_get_pdev_param = dp_get_pdev_param,
14300 	.txrx_set_peer_param = dp_set_peer_param,
14301 	.txrx_get_peer_param = dp_get_peer_param,
14302 #ifdef VDEV_PEER_PROTOCOL_COUNT
14303 	.txrx_peer_protocol_cnt = dp_peer_stats_update_protocol_cnt,
14304 #endif
14305 #ifdef WLAN_SUPPORT_MSCS
14306 	.txrx_record_mscs_params = dp_record_mscs_params,
14307 #endif
14308 	.set_key = dp_set_michael_key,
14309 	.txrx_get_vdev_param = dp_get_vdev_param,
14310 	.calculate_delay_stats = dp_calculate_delay_stats,
14311 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
14312 	.txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag,
14313 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
14314 	.txrx_dump_pdev_rx_protocol_tag_stats =
14315 				dp_dump_pdev_rx_protocol_tag_stats,
14316 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
14317 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
14318 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
14319 	.txrx_set_rx_flow_tag = dp_set_rx_flow_tag,
14320 	.txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats,
14321 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
14322 #ifdef QCA_MULTIPASS_SUPPORT
14323 	.txrx_peer_set_vlan_id = dp_peer_set_vlan_id,
14324 #endif /*QCA_MULTIPASS_SUPPORT*/
14325 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
14326 	.txrx_set_delta_tsf = dp_set_delta_tsf,
14327 #endif
14328 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
14329 	.txrx_set_tsf_ul_delay_report = dp_set_tsf_ul_delay_report,
14330 	.txrx_get_uplink_delay = dp_get_uplink_delay,
14331 #endif
14332 #ifdef QCA_UNDECODED_METADATA_SUPPORT
14333 	.txrx_set_pdev_phyrx_error_mask = dp_set_pdev_phyrx_error_mask,
14334 	.txrx_get_pdev_phyrx_error_mask = dp_get_pdev_phyrx_error_mask,
14335 #endif
14336 	.txrx_peer_flush_frags = dp_peer_flush_frags,
14337 };
14338 
14339 static struct cdp_me_ops dp_ops_me = {
14340 #ifndef QCA_HOST_MODE_WIFI_DISABLED
14341 #ifdef ATH_SUPPORT_IQUE
14342 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
14343 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
14344 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
14345 #endif
14346 #endif
14347 };
14348 
14349 static struct cdp_host_stats_ops dp_ops_host_stats = {
14350 	.txrx_per_peer_stats = dp_get_host_peer_stats,
14351 	.get_fw_peer_stats = dp_get_fw_peer_stats,
14352 	.get_htt_stats = dp_get_htt_stats,
14353 	.txrx_stats_publish = dp_txrx_stats_publish,
14354 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
14355 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
14356 	.txrx_get_soc_stats = dp_txrx_get_soc_stats,
14357 	.txrx_get_peer_stats_param = dp_txrx_get_peer_stats_param,
14358 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
14359 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
14360 #if defined(IPA_OFFLOAD) && defined(QCA_ENHANCED_STATS_SUPPORT)
14361 	.txrx_get_peer_stats = dp_ipa_txrx_get_peer_stats,
14362 	.txrx_get_vdev_stats  = dp_ipa_txrx_get_vdev_stats,
14363 	.txrx_get_pdev_stats = dp_ipa_txrx_get_pdev_stats,
14364 #endif
14365 	.txrx_get_ratekbps = dp_txrx_get_ratekbps,
14366 	.txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats,
14367 	.txrx_get_peer_delay_stats = dp_txrx_get_peer_delay_stats,
14368 	.txrx_get_peer_jitter_stats = dp_txrx_get_peer_jitter_stats,
14369 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
14370 	.txrx_alloc_vdev_stats_id = dp_txrx_alloc_vdev_stats_id,
14371 	.txrx_reset_vdev_stats_id = dp_txrx_reset_vdev_stats_id,
14372 #endif
14373 #ifdef WLAN_TX_PKT_CAPTURE_ENH
14374 	.get_peer_tx_capture_stats = dp_peer_get_tx_capture_stats,
14375 	.get_pdev_tx_capture_stats = dp_pdev_get_tx_capture_stats,
14376 #endif /* WLAN_TX_PKT_CAPTURE_ENH */
14377 #ifdef HW_TX_DELAY_STATS_ENABLE
14378 	.enable_disable_vdev_tx_delay_stats =
14379 				dp_enable_disable_vdev_tx_delay_stats,
14380 	.is_tx_delay_stats_enabled = dp_check_vdev_tx_delay_stats_enabled,
14381 #endif
14382 	.txrx_get_pdev_tid_stats = dp_pdev_get_tid_stats,
14383 #ifdef WLAN_TELEMETRY_STATS_SUPPORT
14384 	.txrx_pdev_telemetry_stats = dp_get_pdev_telemetry_stats,
14385 	.txrx_peer_telemetry_stats = dp_get_peer_telemetry_stats,
14386 	.txrx_pdev_deter_stats = dp_get_pdev_deter_stats,
14387 	.txrx_peer_deter_stats = dp_get_peer_deter_stats,
14388 	.txrx_update_pdev_chan_util_stats = dp_update_pdev_chan_util_stats,
14389 #endif
14390 	.txrx_get_peer_extd_rate_link_stats =
14391 					dp_get_peer_extd_rate_link_stats,
14392 	.get_pdev_obss_stats = dp_get_obss_stats,
14393 	.clear_pdev_obss_pd_stats = dp_clear_pdev_obss_pd_stats,
14394 	/* TODO */
14395 };
14396 
14397 static struct cdp_raw_ops dp_ops_raw = {
14398 	/* TODO */
14399 };
14400 
14401 #ifdef PEER_FLOW_CONTROL
14402 static struct cdp_pflow_ops dp_ops_pflow = {
14403 	dp_tx_flow_ctrl_configure_pdev,
14404 };
14405 #endif
14406 
14407 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
14408 static struct cdp_cfr_ops dp_ops_cfr = {
14409 	.txrx_cfr_filter = NULL,
14410 	.txrx_get_cfr_rcc = dp_get_cfr_rcc,
14411 	.txrx_set_cfr_rcc = dp_set_cfr_rcc,
14412 	.txrx_get_cfr_dbg_stats = dp_get_cfr_dbg_stats,
14413 	.txrx_clear_cfr_dbg_stats = dp_clear_cfr_dbg_stats,
14414 };
14415 #endif
14416 
14417 #ifdef WLAN_SUPPORT_MSCS
14418 static struct cdp_mscs_ops dp_ops_mscs = {
14419 	.mscs_peer_lookup_n_get_priority = dp_mscs_peer_lookup_n_get_priority,
14420 };
14421 #endif
14422 
14423 #ifdef WLAN_SUPPORT_MESH_LATENCY
14424 static struct cdp_mesh_latency_ops dp_ops_mesh_latency = {
14425 	.mesh_latency_update_peer_parameter =
14426 		dp_mesh_latency_update_peer_parameter,
14427 };
14428 #endif
14429 
14430 #ifdef WLAN_SUPPORT_SCS
14431 static struct cdp_scs_ops dp_ops_scs = {
14432 	.scs_peer_lookup_n_rule_match = dp_scs_peer_lookup_n_rule_match,
14433 };
14434 #endif
14435 
14436 #ifdef CONFIG_SAWF_DEF_QUEUES
14437 static struct cdp_sawf_ops dp_ops_sawf = {
14438 	.sawf_def_queues_map_req = dp_sawf_def_queues_map_req,
14439 	.sawf_def_queues_unmap_req = dp_sawf_def_queues_unmap_req,
14440 	.sawf_def_queues_get_map_report =
14441 		dp_sawf_def_queues_get_map_report,
14442 #ifdef CONFIG_SAWF_STATS
14443 	.txrx_get_peer_sawf_delay_stats = dp_sawf_get_peer_delay_stats,
14444 	.txrx_get_peer_sawf_tx_stats = dp_sawf_get_peer_tx_stats,
14445 	.sawf_mpdu_stats_req = dp_sawf_mpdu_stats_req,
14446 	.sawf_mpdu_details_stats_req = dp_sawf_mpdu_details_stats_req,
14447 	.txrx_sawf_set_mov_avg_params = dp_sawf_set_mov_avg_params,
14448 	.txrx_sawf_set_sla_params = dp_sawf_set_sla_params,
14449 	.txrx_sawf_init_telemtery_params = dp_sawf_init_telemetry_params,
14450 	.telemetry_get_throughput_stats = dp_sawf_get_tx_stats,
14451 	.telemetry_get_mpdu_stats = dp_sawf_get_mpdu_sched_stats,
14452 	.telemetry_get_drop_stats = dp_sawf_get_drop_stats,
14453 	.peer_config_ul = dp_sawf_peer_config_ul,
14454 	.swaf_peer_is_sla_configured = dp_swaf_peer_is_sla_configured,
14455 #endif
14456 };
14457 #endif
14458 
14459 #if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
14460 /**
14461  * dp_flush_ring_hptp() - Update ring shadow
14462  *			  register HP/TP address when runtime
14463  *                        resume
14464  * @soc: DP soc context
14465  * @hal_srng: srng
14466  *
14467  * Return: None
14468  */
14469 static
14470 void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
14471 {
14472 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
14473 						 HAL_SRNG_FLUSH_EVENT)) {
14474 		/* Acquire the lock */
14475 		hal_srng_access_start(soc->hal_soc, hal_srng);
14476 
14477 		hal_srng_access_end(soc->hal_soc, hal_srng);
14478 
14479 		hal_srng_set_flush_last_ts(hal_srng);
14480 
14481 		dp_debug("flushed");
14482 	}
14483 }
14484 #endif
14485 
14486 #ifdef DP_TX_TRACKING
14487 
14488 #define DP_TX_COMP_MAX_LATENCY_MS 60000
14489 /**
14490  * dp_tx_comp_delay_check() - calculate time latency for tx completion per pkt
14491  * @tx_desc: tx descriptor
14492  *
14493  * Calculate time latency for tx completion per pkt and trigger self recovery
14494  * when the delay is more than threshold value.
14495  *
14496  * Return: True if delay is more than threshold
14497  */
14498 static bool dp_tx_comp_delay_check(struct dp_tx_desc_s *tx_desc)
14499 {
14500 	uint64_t time_latency, timestamp_tick = tx_desc->timestamp_tick;
14501 	qdf_ktime_t current_time = qdf_ktime_real_get();
14502 	qdf_ktime_t timestamp = tx_desc->timestamp;
14503 
14504 	if (dp_tx_pkt_tracepoints_enabled()) {
14505 		if (!timestamp)
14506 			return false;
14507 
14508 		time_latency = qdf_ktime_to_ms(current_time) -
14509 				qdf_ktime_to_ms(timestamp);
14510 		if (time_latency >= DP_TX_COMP_MAX_LATENCY_MS) {
14511 			dp_err_rl("enqueued: %llu ms, current : %llu ms",
14512 				  timestamp, current_time);
14513 			return true;
14514 		}
14515 	} else {
14516 		if (!timestamp_tick)
14517 			return false;
14518 
14519 		current_time = qdf_system_ticks();
14520 		time_latency = qdf_system_ticks_to_msecs(current_time -
14521 							 timestamp_tick);
14522 		if (time_latency >= DP_TX_COMP_MAX_LATENCY_MS) {
14523 			dp_err_rl("enqueued: %u ms, current : %u ms",
14524 				  qdf_system_ticks_to_msecs(timestamp_tick),
14525 				  qdf_system_ticks_to_msecs(current_time));
14526 			return true;
14527 		}
14528 	}
14529 
14530 	return false;
14531 }
14532 
14533 /**
14534  * dp_find_missing_tx_comp() - check for leaked descriptor in tx path
14535  * @soc: DP SOC context
14536  *
14537  * Parse through descriptors in all pools and validate magic number and
14538  * completion time. Trigger self recovery if magic value is corrupted.
14539  *
14540  * Return: None.
14541  */
14542 static void dp_find_missing_tx_comp(struct dp_soc *soc)
14543 {
14544 	uint8_t i;
14545 	uint32_t j;
14546 	uint32_t num_desc, page_id, offset;
14547 	uint16_t num_desc_per_page;
14548 	struct dp_tx_desc_s *tx_desc = NULL;
14549 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
14550 
14551 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
14552 		tx_desc_pool = &soc->tx_desc[i];
14553 		if (!(tx_desc_pool->pool_size) ||
14554 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
14555 		    !(tx_desc_pool->desc_pages.cacheable_pages))
14556 			continue;
14557 
14558 		num_desc = tx_desc_pool->pool_size;
14559 		num_desc_per_page =
14560 			tx_desc_pool->desc_pages.num_element_per_page;
14561 		for (j = 0; j < num_desc; j++) {
14562 			page_id = j / num_desc_per_page;
14563 			offset = j % num_desc_per_page;
14564 
14565 			if (qdf_unlikely(!(tx_desc_pool->
14566 					 desc_pages.cacheable_pages)))
14567 				break;
14568 
14569 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
14570 			if (tx_desc->magic == DP_TX_MAGIC_PATTERN_FREE) {
14571 				continue;
14572 			} else if (tx_desc->magic ==
14573 				   DP_TX_MAGIC_PATTERN_INUSE) {
14574 				if (dp_tx_comp_delay_check(tx_desc)) {
14575 					dp_err_rl("Tx completion not rcvd for id: %u",
14576 						  tx_desc->id);
14577 					if (tx_desc->vdev_id == DP_INVALID_VDEV_ID) {
14578 						tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
14579 						dp_err_rl("Freed tx_desc %u",
14580 							  tx_desc->id);
14581 						dp_tx_comp_free_buf(soc,
14582 								    tx_desc,
14583 								    false);
14584 						dp_tx_desc_release(tx_desc, i);
14585 						DP_STATS_INC(soc,
14586 							     tx.tx_comp_force_freed, 1);
14587 					}
14588 				}
14589 			} else {
14590 				dp_err_rl("tx desc %u corrupted, flags: 0x%x",
14591 					  tx_desc->id, tx_desc->flags);
14592 			}
14593 		}
14594 	}
14595 }
14596 #else
14597 static inline void dp_find_missing_tx_comp(struct dp_soc *soc)
14598 {
14599 }
14600 #endif
14601 
14602 #ifdef FEATURE_RUNTIME_PM
14603 /**
14604  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
14605  * @soc_hdl: Datapath soc handle
14606  * @pdev_id: id of data path pdev handle
14607  *
14608  * DP is ready to runtime suspend if there are no pending TX packets.
14609  *
14610  * Return: QDF_STATUS
14611  */
14612 static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
14613 {
14614 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14615 	struct dp_pdev *pdev;
14616 	uint8_t i;
14617 	int32_t tx_pending;
14618 
14619 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14620 	if (!pdev) {
14621 		dp_err("pdev is NULL");
14622 		return QDF_STATUS_E_INVAL;
14623 	}
14624 
14625 	/* Abort if there are any pending TX packets */
14626 	tx_pending = dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev));
14627 	if (tx_pending) {
14628 		dp_info_rl("%pK: Abort suspend due to pending TX packets %d",
14629 			   soc, tx_pending);
14630 		dp_find_missing_tx_comp(soc);
14631 		/* perform a force flush if tx is pending */
14632 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
14633 			hal_srng_set_event(soc->tcl_data_ring[i].hal_srng,
14634 					   HAL_SRNG_FLUSH_EVENT);
14635 			dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
14636 		}
14637 		qdf_atomic_set(&soc->tx_pending_rtpm, 0);
14638 
14639 		return QDF_STATUS_E_AGAIN;
14640 	}
14641 
14642 	if (dp_runtime_get_refcount(soc)) {
14643 		dp_init_info("refcount: %d", dp_runtime_get_refcount(soc));
14644 
14645 		return QDF_STATUS_E_AGAIN;
14646 	}
14647 
14648 	if (soc->intr_mode == DP_INTR_POLL)
14649 		qdf_timer_stop(&soc->int_timer);
14650 
14651 	dp_rx_fst_update_pm_suspend_status(soc, true);
14652 
14653 	return QDF_STATUS_SUCCESS;
14654 }
14655 
14656 #define DP_FLUSH_WAIT_CNT 10
14657 #define DP_RUNTIME_SUSPEND_WAIT_MS 10
14658 /**
14659  * dp_runtime_resume() - ensure DP is ready to runtime resume
14660  * @soc_hdl: Datapath soc handle
14661  * @pdev_id: id of data path pdev handle
14662  *
14663  * Resume DP for runtime PM.
14664  *
14665  * Return: QDF_STATUS
14666  */
14667 static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
14668 {
14669 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14670 	int i, suspend_wait = 0;
14671 
14672 	if (soc->intr_mode == DP_INTR_POLL)
14673 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
14674 
14675 	/*
14676 	 * Wait until dp runtime refcount becomes zero or time out, then flush
14677 	 * pending tx for runtime suspend.
14678 	 */
14679 	while (dp_runtime_get_refcount(soc) &&
14680 	       suspend_wait < DP_FLUSH_WAIT_CNT) {
14681 		qdf_sleep(DP_RUNTIME_SUSPEND_WAIT_MS);
14682 		suspend_wait++;
14683 	}
14684 
14685 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
14686 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
14687 	}
14688 	qdf_atomic_set(&soc->tx_pending_rtpm, 0);
14689 
14690 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
14691 	dp_rx_fst_update_pm_suspend_status(soc, false);
14692 
14693 	return QDF_STATUS_SUCCESS;
14694 }
14695 #endif /* FEATURE_RUNTIME_PM */
14696 
14697 /**
14698  * dp_tx_get_success_ack_stats() - get tx success completion count
14699  * @soc_hdl: Datapath soc handle
14700  * @vdev_id: vdev identifier
14701  *
14702  * Return: tx success ack count
14703  */
14704 static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl,
14705 					    uint8_t vdev_id)
14706 {
14707 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14708 	struct cdp_vdev_stats *vdev_stats = NULL;
14709 	uint32_t tx_success;
14710 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
14711 						     DP_MOD_ID_CDP);
14712 
14713 	if (!vdev) {
14714 		dp_cdp_err("%pK: Invalid vdev id %d", soc, vdev_id);
14715 		return 0;
14716 	}
14717 
14718 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
14719 	if (!vdev_stats) {
14720 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats", soc);
14721 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
14722 		return 0;
14723 	}
14724 
14725 	dp_aggregate_vdev_stats(vdev, vdev_stats);
14726 
14727 	tx_success = vdev_stats->tx.tx_success.num;
14728 	qdf_mem_free(vdev_stats);
14729 
14730 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
14731 	return tx_success;
14732 }
14733 
14734 #ifdef WLAN_SUPPORT_DATA_STALL
14735 /**
14736  * dp_register_data_stall_detect_cb() - register data stall callback
14737  * @soc_hdl: Datapath soc handle
14738  * @pdev_id: id of data path pdev handle
14739  * @data_stall_detect_callback: data stall callback function
14740  *
14741  * Return: QDF_STATUS Enumeration
14742  */
14743 static
14744 QDF_STATUS dp_register_data_stall_detect_cb(
14745 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14746 			data_stall_detect_cb data_stall_detect_callback)
14747 {
14748 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14749 	struct dp_pdev *pdev;
14750 
14751 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14752 	if (!pdev) {
14753 		dp_err("pdev NULL!");
14754 		return QDF_STATUS_E_INVAL;
14755 	}
14756 
14757 	pdev->data_stall_detect_callback = data_stall_detect_callback;
14758 	return QDF_STATUS_SUCCESS;
14759 }
14760 
14761 /**
14762  * dp_deregister_data_stall_detect_cb() - de-register data stall callback
14763  * @soc_hdl: Datapath soc handle
14764  * @pdev_id: id of data path pdev handle
14765  * @data_stall_detect_callback: data stall callback function
14766  *
14767  * Return: QDF_STATUS Enumeration
14768  */
14769 static
14770 QDF_STATUS dp_deregister_data_stall_detect_cb(
14771 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14772 			data_stall_detect_cb data_stall_detect_callback)
14773 {
14774 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14775 	struct dp_pdev *pdev;
14776 
14777 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14778 	if (!pdev) {
14779 		dp_err("pdev NULL!");
14780 		return QDF_STATUS_E_INVAL;
14781 	}
14782 
14783 	pdev->data_stall_detect_callback = NULL;
14784 	return QDF_STATUS_SUCCESS;
14785 }
14786 
14787 /**
14788  * dp_txrx_post_data_stall_event() - post data stall event
14789  * @soc_hdl: Datapath soc handle
14790  * @indicator: Module triggering data stall
14791  * @data_stall_type: data stall event type
14792  * @pdev_id: pdev id
14793  * @vdev_id_bitmap: vdev id bitmap
14794  * @recovery_type: data stall recovery type
14795  *
14796  * Return: None
14797  */
14798 static void
14799 dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl,
14800 			      enum data_stall_log_event_indicator indicator,
14801 			      enum data_stall_log_event_type data_stall_type,
14802 			      uint32_t pdev_id, uint32_t vdev_id_bitmap,
14803 			      enum data_stall_log_recovery_type recovery_type)
14804 {
14805 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14806 	struct data_stall_event_info data_stall_info;
14807 	struct dp_pdev *pdev;
14808 
14809 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14810 	if (!pdev) {
14811 		dp_err("pdev NULL!");
14812 		return;
14813 	}
14814 
14815 	if (!pdev->data_stall_detect_callback) {
14816 		dp_err("data stall cb not registered!");
14817 		return;
14818 	}
14819 
14820 	dp_info("data_stall_type: %x pdev_id: %d",
14821 		data_stall_type, pdev_id);
14822 
14823 	data_stall_info.indicator = indicator;
14824 	data_stall_info.data_stall_type = data_stall_type;
14825 	data_stall_info.vdev_id_bitmap = vdev_id_bitmap;
14826 	data_stall_info.pdev_id = pdev_id;
14827 	data_stall_info.recovery_type = recovery_type;
14828 
14829 	pdev->data_stall_detect_callback(&data_stall_info);
14830 }
14831 #endif /* WLAN_SUPPORT_DATA_STALL */
14832 
14833 #ifdef WLAN_FEATURE_STATS_EXT
14834 /* rx hw stats event wait timeout in ms */
14835 #define DP_REO_STATUS_STATS_TIMEOUT 850
14836 /**
14837  * dp_txrx_ext_stats_request() - request dp txrx extended stats request
14838  * @soc_hdl: soc handle
14839  * @pdev_id: pdev id
14840  * @req: stats request
14841  *
14842  * Return: QDF_STATUS
14843  */
14844 static QDF_STATUS
14845 dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14846 			  struct cdp_txrx_ext_stats *req)
14847 {
14848 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
14849 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14850 	int i = 0;
14851 	int tcl_ring_full = 0;
14852 
14853 	if (!pdev) {
14854 		dp_err("pdev is null");
14855 		return QDF_STATUS_E_INVAL;
14856 	}
14857 
14858 	dp_aggregate_pdev_stats(pdev);
14859 
14860 	for(i = 0 ; i < MAX_TCL_DATA_RINGS; i++)
14861 		tcl_ring_full += soc->stats.tx.tcl_ring_full[i];
14862 
14863 	req->tx_msdu_enqueue = pdev->stats.tx_i.processed.num;
14864 	req->tx_msdu_overflow = tcl_ring_full;
14865 	/* Error rate at LMAC */
14866 	req->rx_mpdu_received = soc->ext_stats.rx_mpdu_received +
14867 				pdev->stats.err.fw_reported_rxdma_error;
14868 	/* only count error source from RXDMA */
14869 	req->rx_mpdu_error = pdev->stats.err.fw_reported_rxdma_error;
14870 
14871 	/* Error rate at above the MAC */
14872 	req->rx_mpdu_delivered = soc->ext_stats.rx_mpdu_received;
14873 	req->rx_mpdu_missed = pdev->stats.err.reo_error;
14874 
14875 	dp_info("ext stats: tx_msdu_enq = %u, tx_msdu_overflow = %u, "
14876 		"rx_mpdu_receive = %u, rx_mpdu_delivered = %u, "
14877 		"rx_mpdu_missed = %u, rx_mpdu_error = %u",
14878 		req->tx_msdu_enqueue,
14879 		req->tx_msdu_overflow,
14880 		req->rx_mpdu_received,
14881 		req->rx_mpdu_delivered,
14882 		req->rx_mpdu_missed,
14883 		req->rx_mpdu_error);
14884 
14885 	return QDF_STATUS_SUCCESS;
14886 }
14887 
14888 /**
14889  * dp_rx_hw_stats_cb() - request rx hw stats response callback
14890  * @soc: soc handle
14891  * @cb_ctxt: callback context
14892  * @reo_status: reo command response status
14893  *
14894  * Return: None
14895  */
14896 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
14897 			      union hal_reo_status *reo_status)
14898 {
14899 	struct dp_req_rx_hw_stats_t *rx_hw_stats = cb_ctxt;
14900 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
14901 	bool is_query_timeout;
14902 
14903 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
14904 	is_query_timeout = rx_hw_stats->is_query_timeout;
14905 	/* free the cb_ctxt if all pending tid stats query is received */
14906 	if (qdf_atomic_dec_and_test(&rx_hw_stats->pending_tid_stats_cnt)) {
14907 		if (!is_query_timeout) {
14908 			qdf_event_set(&soc->rx_hw_stats_event);
14909 			soc->is_last_stats_ctx_init = false;
14910 		}
14911 
14912 		qdf_mem_free(rx_hw_stats);
14913 	}
14914 
14915 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
14916 		dp_info("REO stats failure %d",
14917 			queue_status->header.status);
14918 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14919 		return;
14920 	}
14921 
14922 	if (!is_query_timeout) {
14923 		soc->ext_stats.rx_mpdu_received +=
14924 					queue_status->mpdu_frms_cnt;
14925 		soc->ext_stats.rx_mpdu_missed +=
14926 					queue_status->hole_cnt;
14927 	}
14928 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14929 }
14930 
14931 /**
14932  * dp_request_rx_hw_stats() - request rx hardware stats
14933  * @soc_hdl: soc handle
14934  * @vdev_id: vdev id
14935  *
14936  * Return: None
14937  */
14938 static QDF_STATUS
14939 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
14940 {
14941 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
14942 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
14943 						     DP_MOD_ID_CDP);
14944 	struct dp_peer *peer = NULL;
14945 	QDF_STATUS status;
14946 	struct dp_req_rx_hw_stats_t *rx_hw_stats;
14947 	int rx_stats_sent_cnt = 0;
14948 	uint32_t last_rx_mpdu_received;
14949 	uint32_t last_rx_mpdu_missed;
14950 
14951 	if (!vdev) {
14952 		dp_err("vdev is null for vdev_id: %u", vdev_id);
14953 		status = QDF_STATUS_E_INVAL;
14954 		goto out;
14955 	}
14956 
14957 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_CDP);
14958 
14959 	if (!peer) {
14960 		dp_err("Peer is NULL");
14961 		status = QDF_STATUS_E_INVAL;
14962 		goto out;
14963 	}
14964 
14965 	rx_hw_stats = qdf_mem_malloc(sizeof(*rx_hw_stats));
14966 
14967 	if (!rx_hw_stats) {
14968 		dp_err("malloc failed for hw stats structure");
14969 		status = QDF_STATUS_E_INVAL;
14970 		goto out;
14971 	}
14972 
14973 	qdf_event_reset(&soc->rx_hw_stats_event);
14974 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
14975 	/* save the last soc cumulative stats and reset it to 0 */
14976 	last_rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
14977 	last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
14978 	soc->ext_stats.rx_mpdu_received = 0;
14979 	soc->ext_stats.rx_mpdu_missed = 0;
14980 
14981 	dp_debug("HW stats query start");
14982 	rx_stats_sent_cnt =
14983 		dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
14984 	if (!rx_stats_sent_cnt) {
14985 		dp_err("no tid stats sent successfully");
14986 		qdf_mem_free(rx_hw_stats);
14987 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14988 		status = QDF_STATUS_E_INVAL;
14989 		goto out;
14990 	}
14991 	qdf_atomic_set(&rx_hw_stats->pending_tid_stats_cnt,
14992 		       rx_stats_sent_cnt);
14993 	rx_hw_stats->is_query_timeout = false;
14994 	soc->is_last_stats_ctx_init = true;
14995 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14996 
14997 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
14998 				       DP_REO_STATUS_STATS_TIMEOUT);
14999 	dp_debug("HW stats query end with %d", rx_stats_sent_cnt);
15000 
15001 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
15002 	if (status != QDF_STATUS_SUCCESS) {
15003 		dp_info("partial rx hw stats event collected with %d",
15004 			qdf_atomic_read(
15005 				&rx_hw_stats->pending_tid_stats_cnt));
15006 		if (soc->is_last_stats_ctx_init)
15007 			rx_hw_stats->is_query_timeout = true;
15008 		/*
15009 		 * If query timeout happened, use the last saved stats
15010 		 * for this time query.
15011 		 */
15012 		soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
15013 		soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
15014 		DP_STATS_INC(soc, rx.rx_hw_stats_timeout, 1);
15015 
15016 	}
15017 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
15018 
15019 out:
15020 	if (peer)
15021 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
15022 	if (vdev)
15023 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
15024 	DP_STATS_INC(soc, rx.rx_hw_stats_requested, 1);
15025 
15026 	return status;
15027 }
15028 
15029 /**
15030  * dp_reset_rx_hw_ext_stats() - Reset rx hardware ext stats
15031  * @soc_hdl: soc handle
15032  *
15033  * Return: None
15034  */
15035 static
15036 void dp_reset_rx_hw_ext_stats(struct cdp_soc_t *soc_hdl)
15037 {
15038 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
15039 
15040 	soc->ext_stats.rx_mpdu_received = 0;
15041 	soc->ext_stats.rx_mpdu_missed = 0;
15042 }
15043 #endif /* WLAN_FEATURE_STATS_EXT */
15044 
15045 static
15046 uint32_t dp_get_tx_rings_grp_bitmap(struct cdp_soc_t *soc_hdl)
15047 {
15048 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
15049 
15050 	return soc->wlan_cfg_ctx->tx_rings_grp_bitmap;
15051 }
15052 
15053 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
15054 /**
15055  * dp_mark_first_wakeup_packet() - set flag to indicate that
15056  *    fw is compatible for marking first packet after wow wakeup
15057  * @soc_hdl: Datapath soc handle
15058  * @pdev_id: id of data path pdev handle
15059  * @value: 1 for enabled/ 0 for disabled
15060  *
15061  * Return: None
15062  */
15063 static void dp_mark_first_wakeup_packet(struct cdp_soc_t *soc_hdl,
15064 					uint8_t pdev_id, uint8_t value)
15065 {
15066 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15067 	struct dp_pdev *pdev;
15068 
15069 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15070 	if (!pdev) {
15071 		dp_err("pdev is NULL");
15072 		return;
15073 	}
15074 
15075 	pdev->is_first_wakeup_packet = value;
15076 }
15077 #endif
15078 
15079 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
15080 /**
15081  * dp_set_peer_txq_flush_config() - Set the peer txq flush configuration
15082  * @soc_hdl: Opaque handle to the DP soc object
15083  * @vdev_id: VDEV identifier
15084  * @mac: MAC address of the peer
15085  * @ac: access category mask
15086  * @tid: TID mask
15087  * @policy: Flush policy
15088  *
15089  * Return: 0 on success, errno on failure
15090  */
15091 static int dp_set_peer_txq_flush_config(struct cdp_soc_t *soc_hdl,
15092 					uint8_t vdev_id, uint8_t *mac,
15093 					uint8_t ac, uint32_t tid,
15094 					enum cdp_peer_txq_flush_policy policy)
15095 {
15096 	struct dp_soc *soc;
15097 
15098 	if (!soc_hdl) {
15099 		dp_err("soc is null");
15100 		return -EINVAL;
15101 	}
15102 	soc = cdp_soc_t_to_dp_soc(soc_hdl);
15103 	return target_if_peer_txq_flush_config(soc->ctrl_psoc, vdev_id,
15104 					       mac, ac, tid, policy);
15105 }
15106 #endif
15107 
15108 #ifdef CONNECTIVITY_PKTLOG
15109 /**
15110  * dp_register_packetdump_callback() - registers
15111  *  tx data packet, tx mgmt. packet and rx data packet
15112  *  dump callback handler.
15113  *
15114  * @soc_hdl: Datapath soc handle
15115  * @pdev_id: id of data path pdev handle
15116  * @dp_tx_packetdump_cb: tx packetdump cb
15117  * @dp_rx_packetdump_cb: rx packetdump cb
15118  *
15119  * This function is used to register tx data pkt, tx mgmt.
15120  * pkt and rx data pkt dump callback
15121  *
15122  * Return: None
15123  *
15124  */
15125 static inline
15126 void dp_register_packetdump_callback(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
15127 				     ol_txrx_pktdump_cb dp_tx_packetdump_cb,
15128 				     ol_txrx_pktdump_cb dp_rx_packetdump_cb)
15129 {
15130 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15131 	struct dp_pdev *pdev;
15132 
15133 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15134 	if (!pdev) {
15135 		dp_err("pdev is NULL!");
15136 		return;
15137 	}
15138 
15139 	pdev->dp_tx_packetdump_cb = dp_tx_packetdump_cb;
15140 	pdev->dp_rx_packetdump_cb = dp_rx_packetdump_cb;
15141 }
15142 
15143 /**
15144  * dp_deregister_packetdump_callback() - deregidters
15145  *  tx data packet, tx mgmt. packet and rx data packet
15146  *  dump callback handler
15147  * @soc_hdl: Datapath soc handle
15148  * @pdev_id: id of data path pdev handle
15149  *
15150  * This function is used to deregidter tx data pkt.,
15151  * tx mgmt. pkt and rx data pkt. dump callback
15152  *
15153  * Return: None
15154  *
15155  */
15156 static inline
15157 void dp_deregister_packetdump_callback(struct cdp_soc_t *soc_hdl,
15158 				       uint8_t pdev_id)
15159 {
15160 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15161 	struct dp_pdev *pdev;
15162 
15163 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15164 	if (!pdev) {
15165 		dp_err("pdev is NULL!");
15166 		return;
15167 	}
15168 
15169 	pdev->dp_tx_packetdump_cb = NULL;
15170 	pdev->dp_rx_packetdump_cb = NULL;
15171 }
15172 #endif
15173 
15174 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
15175 /**
15176  * dp_set_bus_vote_lvl_high() - Take a vote on bus bandwidth from dp
15177  * @soc_hdl: Datapath soc handle
15178  * @high: whether the bus bw is high or not
15179  *
15180  * Return: void
15181  */
15182 static void
15183 dp_set_bus_vote_lvl_high(ol_txrx_soc_handle soc_hdl, bool high)
15184 {
15185 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15186 
15187 	soc->high_throughput = high;
15188 }
15189 
15190 /**
15191  * dp_get_bus_vote_lvl_high() - get bus bandwidth vote to dp
15192  * @soc_hdl: Datapath soc handle
15193  *
15194  * Return: bool
15195  */
15196 static bool
15197 dp_get_bus_vote_lvl_high(ol_txrx_soc_handle soc_hdl)
15198 {
15199 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15200 
15201 	return soc->high_throughput;
15202 }
15203 #endif
15204 
15205 #ifdef DP_PEER_EXTENDED_API
15206 static struct cdp_misc_ops dp_ops_misc = {
15207 #ifdef FEATURE_WLAN_TDLS
15208 	.tx_non_std = dp_tx_non_std,
15209 #endif /* FEATURE_WLAN_TDLS */
15210 	.get_opmode = dp_get_opmode,
15211 #ifdef FEATURE_RUNTIME_PM
15212 	.runtime_suspend = dp_runtime_suspend,
15213 	.runtime_resume = dp_runtime_resume,
15214 #endif /* FEATURE_RUNTIME_PM */
15215 	.get_num_rx_contexts = dp_get_num_rx_contexts,
15216 	.get_tx_ack_stats = dp_tx_get_success_ack_stats,
15217 #ifdef WLAN_SUPPORT_DATA_STALL
15218 	.txrx_data_stall_cb_register = dp_register_data_stall_detect_cb,
15219 	.txrx_data_stall_cb_deregister = dp_deregister_data_stall_detect_cb,
15220 	.txrx_post_data_stall_event = dp_txrx_post_data_stall_event,
15221 #endif
15222 
15223 #ifdef WLAN_FEATURE_STATS_EXT
15224 	.txrx_ext_stats_request = dp_txrx_ext_stats_request,
15225 	.request_rx_hw_stats = dp_request_rx_hw_stats,
15226 	.reset_rx_hw_ext_stats = dp_reset_rx_hw_ext_stats,
15227 #endif /* WLAN_FEATURE_STATS_EXT */
15228 	.vdev_inform_ll_conn = dp_vdev_inform_ll_conn,
15229 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
15230 	.set_swlm_enable = dp_soc_set_swlm_enable,
15231 	.is_swlm_enabled = dp_soc_is_swlm_enabled,
15232 #endif
15233 	.display_txrx_hw_info = dp_display_srng_info,
15234 	.get_tx_rings_grp_bitmap = dp_get_tx_rings_grp_bitmap,
15235 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
15236 	.mark_first_wakeup_packet = dp_mark_first_wakeup_packet,
15237 #endif
15238 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
15239 	.set_peer_txq_flush_config = dp_set_peer_txq_flush_config,
15240 #endif
15241 #ifdef CONNECTIVITY_PKTLOG
15242 	.register_pktdump_cb = dp_register_packetdump_callback,
15243 	.unregister_pktdump_cb = dp_deregister_packetdump_callback,
15244 #endif
15245 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
15246 	.set_bus_vote_lvl_high = dp_set_bus_vote_lvl_high,
15247 	.get_bus_vote_lvl_high = dp_get_bus_vote_lvl_high,
15248 #endif
15249 };
15250 #endif
15251 
15252 #ifdef DP_FLOW_CTL
15253 static struct cdp_flowctl_ops dp_ops_flowctl = {
15254 	/* WIFI 3.0 DP implement as required. */
15255 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
15256 	.flow_pool_map_handler = dp_tx_flow_pool_map,
15257 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
15258 	.register_pause_cb = dp_txrx_register_pause_cb,
15259 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
15260 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
15261 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
15262 };
15263 
15264 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
15265 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
15266 };
15267 #endif
15268 
15269 #ifdef IPA_OFFLOAD
15270 static struct cdp_ipa_ops dp_ops_ipa = {
15271 	.ipa_get_resource = dp_ipa_get_resource,
15272 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
15273 	.ipa_iounmap_doorbell_vaddr = dp_ipa_iounmap_doorbell_vaddr,
15274 	.ipa_op_response = dp_ipa_op_response,
15275 	.ipa_register_op_cb = dp_ipa_register_op_cb,
15276 	.ipa_deregister_op_cb = dp_ipa_deregister_op_cb,
15277 	.ipa_get_stat = dp_ipa_get_stat,
15278 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
15279 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
15280 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
15281 	.ipa_setup = dp_ipa_setup,
15282 	.ipa_cleanup = dp_ipa_cleanup,
15283 	.ipa_setup_iface = dp_ipa_setup_iface,
15284 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
15285 	.ipa_enable_pipes = dp_ipa_enable_pipes,
15286 	.ipa_disable_pipes = dp_ipa_disable_pipes,
15287 	.ipa_set_perf_level = dp_ipa_set_perf_level,
15288 	.ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd,
15289 	.ipa_tx_buf_smmu_mapping = dp_ipa_tx_buf_smmu_mapping,
15290 	.ipa_tx_buf_smmu_unmapping = dp_ipa_tx_buf_smmu_unmapping,
15291 #ifdef QCA_ENHANCED_STATS_SUPPORT
15292 	.ipa_update_peer_rx_stats = dp_ipa_update_peer_rx_stats,
15293 #endif
15294 #ifdef IPA_OPT_WIFI_DP
15295 	.ipa_rx_super_rule_setup = dp_ipa_rx_super_rule_setup,
15296 	.ipa_pcie_link_up = dp_ipa_pcie_link_up,
15297 	.ipa_pcie_link_down = dp_ipa_pcie_link_down,
15298 #endif
15299 #ifdef IPA_WDS_EASYMESH_FEATURE
15300 	.ipa_ast_create = dp_ipa_ast_create,
15301 #endif
15302 	.ipa_get_wdi_version = dp_ipa_get_wdi_version,
15303 };
15304 #endif
15305 
15306 #ifdef DP_POWER_SAVE
15307 static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
15308 {
15309 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15310 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15311 	int timeout = SUSPEND_DRAIN_WAIT;
15312 	int drain_wait_delay = 50; /* 50 ms */
15313 	int32_t tx_pending;
15314 
15315 	if (qdf_unlikely(!pdev)) {
15316 		dp_err("pdev is NULL");
15317 		return QDF_STATUS_E_INVAL;
15318 	}
15319 
15320 	/* Abort if there are any pending TX packets */
15321 	while ((tx_pending = dp_get_tx_pending((struct cdp_pdev *)pdev))) {
15322 		qdf_sleep(drain_wait_delay);
15323 		if (timeout <= 0) {
15324 			dp_info("TX frames are pending %d, abort suspend",
15325 				tx_pending);
15326 			dp_find_missing_tx_comp(soc);
15327 			return QDF_STATUS_E_TIMEOUT;
15328 		}
15329 		timeout = timeout - drain_wait_delay;
15330 	}
15331 
15332 	if (soc->intr_mode == DP_INTR_POLL)
15333 		qdf_timer_stop(&soc->int_timer);
15334 
15335 	/* Stop monitor reap timer and reap any pending frames in ring */
15336 	dp_monitor_reap_timer_suspend(soc);
15337 
15338 	dp_suspend_fse_cache_flush(soc);
15339 	dp_rx_fst_update_pm_suspend_status(soc, true);
15340 
15341 	return QDF_STATUS_SUCCESS;
15342 }
15343 
15344 static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
15345 {
15346 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15347 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15348 	uint8_t i;
15349 
15350 	if (qdf_unlikely(!pdev)) {
15351 		dp_err("pdev is NULL");
15352 		return QDF_STATUS_E_INVAL;
15353 	}
15354 
15355 	if (soc->intr_mode == DP_INTR_POLL)
15356 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
15357 
15358 	/* Start monitor reap timer */
15359 	dp_monitor_reap_timer_start(soc, CDP_MON_REAP_SOURCE_ANY);
15360 
15361 	dp_resume_fse_cache_flush(soc);
15362 
15363 	for (i = 0; i < soc->num_tcl_data_rings; i++)
15364 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
15365 
15366 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
15367 	dp_rx_fst_update_pm_suspend_status(soc, false);
15368 
15369 	dp_rx_fst_requeue_wq(soc);
15370 
15371 	return QDF_STATUS_SUCCESS;
15372 }
15373 
15374 /**
15375  * dp_process_wow_ack_rsp() - process wow ack response
15376  * @soc_hdl: datapath soc handle
15377  * @pdev_id: data path pdev handle id
15378  *
15379  * Return: none
15380  */
15381 static void dp_process_wow_ack_rsp(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
15382 {
15383 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15384 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15385 
15386 	if (qdf_unlikely(!pdev)) {
15387 		dp_err("pdev is NULL");
15388 		return;
15389 	}
15390 
15391 	/*
15392 	 * As part of wow enable FW disables the mon status ring and in wow ack
15393 	 * response from FW reap mon status ring to make sure no packets pending
15394 	 * in the ring.
15395 	 */
15396 	dp_monitor_reap_timer_suspend(soc);
15397 }
15398 
15399 /**
15400  * dp_process_target_suspend_req() - process target suspend request
15401  * @soc_hdl: datapath soc handle
15402  * @pdev_id: data path pdev handle id
15403  *
15404  * Return: none
15405  */
15406 static void dp_process_target_suspend_req(struct cdp_soc_t *soc_hdl,
15407 					  uint8_t pdev_id)
15408 {
15409 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15410 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15411 
15412 	if (qdf_unlikely(!pdev)) {
15413 		dp_err("pdev is NULL");
15414 		return;
15415 	}
15416 
15417 	/* Stop monitor reap timer and reap any pending frames in ring */
15418 	dp_monitor_reap_timer_suspend(soc);
15419 }
15420 
15421 static struct cdp_bus_ops dp_ops_bus = {
15422 	.bus_suspend = dp_bus_suspend,
15423 	.bus_resume = dp_bus_resume,
15424 	.process_wow_ack_rsp = dp_process_wow_ack_rsp,
15425 	.process_target_suspend_req = dp_process_target_suspend_req
15426 };
15427 #endif
15428 
15429 #ifdef DP_FLOW_CTL
15430 static struct cdp_throttle_ops dp_ops_throttle = {
15431 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
15432 };
15433 
15434 static struct cdp_cfg_ops dp_ops_cfg = {
15435 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
15436 };
15437 #endif
15438 
15439 #ifdef DP_PEER_EXTENDED_API
15440 static struct cdp_ocb_ops dp_ops_ocb = {
15441 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
15442 };
15443 
15444 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
15445 	.clear_stats = dp_txrx_clear_dump_stats,
15446 };
15447 
15448 static struct cdp_peer_ops dp_ops_peer = {
15449 	.register_peer = dp_register_peer,
15450 	.clear_peer = dp_clear_peer,
15451 	.find_peer_exist = dp_find_peer_exist,
15452 	.find_peer_exist_on_vdev = dp_find_peer_exist_on_vdev,
15453 	.find_peer_exist_on_other_vdev = dp_find_peer_exist_on_other_vdev,
15454 	.peer_state_update = dp_peer_state_update,
15455 	.get_vdevid = dp_get_vdevid,
15456 	.get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr,
15457 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
15458 	.get_peer_state = dp_get_peer_state,
15459 	.peer_flush_frags = dp_peer_flush_frags,
15460 	.set_peer_as_tdls_peer = dp_set_peer_as_tdls_peer,
15461 };
15462 #endif
15463 
15464 static void dp_soc_txrx_ops_attach(struct dp_soc *soc)
15465 {
15466 	soc->cdp_soc.ops->cmn_drv_ops = &dp_ops_cmn;
15467 	soc->cdp_soc.ops->ctrl_ops = &dp_ops_ctrl;
15468 	soc->cdp_soc.ops->me_ops = &dp_ops_me;
15469 	soc->cdp_soc.ops->host_stats_ops = &dp_ops_host_stats;
15470 	soc->cdp_soc.ops->wds_ops = &dp_ops_wds;
15471 	soc->cdp_soc.ops->raw_ops = &dp_ops_raw;
15472 #ifdef PEER_FLOW_CONTROL
15473 	soc->cdp_soc.ops->pflow_ops = &dp_ops_pflow;
15474 #endif /* PEER_FLOW_CONTROL */
15475 #ifdef DP_PEER_EXTENDED_API
15476 	soc->cdp_soc.ops->misc_ops = &dp_ops_misc;
15477 	soc->cdp_soc.ops->ocb_ops = &dp_ops_ocb;
15478 	soc->cdp_soc.ops->peer_ops = &dp_ops_peer;
15479 	soc->cdp_soc.ops->mob_stats_ops = &dp_ops_mob_stats;
15480 #endif
15481 #ifdef DP_FLOW_CTL
15482 	soc->cdp_soc.ops->cfg_ops = &dp_ops_cfg;
15483 	soc->cdp_soc.ops->flowctl_ops = &dp_ops_flowctl;
15484 	soc->cdp_soc.ops->l_flowctl_ops = &dp_ops_l_flowctl;
15485 	soc->cdp_soc.ops->throttle_ops = &dp_ops_throttle;
15486 #endif
15487 #ifdef IPA_OFFLOAD
15488 	soc->cdp_soc.ops->ipa_ops = &dp_ops_ipa;
15489 #endif
15490 #ifdef DP_POWER_SAVE
15491 	soc->cdp_soc.ops->bus_ops = &dp_ops_bus;
15492 #endif
15493 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
15494 	soc->cdp_soc.ops->cfr_ops = &dp_ops_cfr;
15495 #endif
15496 #ifdef WLAN_SUPPORT_MSCS
15497 	soc->cdp_soc.ops->mscs_ops = &dp_ops_mscs;
15498 #endif
15499 #ifdef WLAN_SUPPORT_MESH_LATENCY
15500 	soc->cdp_soc.ops->mesh_latency_ops = &dp_ops_mesh_latency;
15501 #endif
15502 #ifdef CONFIG_SAWF_DEF_QUEUES
15503 	soc->cdp_soc.ops->sawf_ops = &dp_ops_sawf;
15504 #endif
15505 #ifdef WLAN_SUPPORT_SCS
15506 	soc->cdp_soc.ops->scs_ops = &dp_ops_scs;
15507 #endif
15508 };
15509 
15510 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
15511 {
15512 	uint32_t i;
15513 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
15514 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
15515 	}
15516 }
15517 
15518 qdf_export_symbol(dp_soc_set_txrx_ring_map);
15519 
15520 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
15521 	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574) || \
15522 	defined(QCA_WIFI_QCA5332)
15523 /**
15524  * dp_soc_attach_wifi3() - Attach txrx SOC
15525  * @ctrl_psoc: Opaque SOC handle from control plane
15526  * @params: SOC attach params
15527  *
15528  * Return: DP SOC handle on success, NULL on failure
15529  */
15530 struct cdp_soc_t *
15531 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
15532 		    struct cdp_soc_attach_params *params)
15533 {
15534 	struct dp_soc *dp_soc = NULL;
15535 
15536 	dp_soc = dp_soc_attach(ctrl_psoc, params);
15537 
15538 	return dp_soc_to_cdp_soc_t(dp_soc);
15539 }
15540 
15541 static inline void dp_soc_set_def_pdev(struct dp_soc *soc)
15542 {
15543 	int lmac_id;
15544 
15545 	for (lmac_id = 0; lmac_id < MAX_NUM_LMAC_HW; lmac_id++) {
15546 		/*Set default host PDEV ID for lmac_id*/
15547 		wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
15548 				      INVALID_PDEV_ID, lmac_id);
15549 	}
15550 }
15551 
15552 static uint32_t
15553 dp_get_link_desc_id_start(uint16_t arch_id)
15554 {
15555 	switch (arch_id) {
15556 	case CDP_ARCH_TYPE_LI:
15557 		return LINK_DESC_ID_START_21_BITS_COOKIE;
15558 	case CDP_ARCH_TYPE_BE:
15559 		return LINK_DESC_ID_START_20_BITS_COOKIE;
15560 	default:
15561 		dp_err("unknown arch_id 0x%x", arch_id);
15562 		QDF_BUG(0);
15563 		return LINK_DESC_ID_START_21_BITS_COOKIE;
15564 	}
15565 }
15566 
15567 /**
15568  * dp_soc_attach() - Attach txrx SOC
15569  * @ctrl_psoc: Opaque SOC handle from control plane
15570  * @params: SOC attach params
15571  *
15572  * Return: DP SOC handle on success, NULL on failure
15573  */
15574 static struct dp_soc *
15575 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
15576 	      struct cdp_soc_attach_params *params)
15577 {
15578 	struct dp_soc *soc =  NULL;
15579 	uint16_t arch_id;
15580 	struct hif_opaque_softc *hif_handle = params->hif_handle;
15581 	qdf_device_t qdf_osdev = params->qdf_osdev;
15582 	struct ol_if_ops *ol_ops = params->ol_ops;
15583 	uint16_t device_id = params->device_id;
15584 
15585 	if (!hif_handle) {
15586 		dp_err("HIF handle is NULL");
15587 		goto fail0;
15588 	}
15589 	arch_id = cdp_get_arch_type_from_devid(device_id);
15590 	soc = qdf_mem_common_alloc(dp_get_soc_context_size(device_id));
15591 	if (!soc) {
15592 		dp_err("DP SOC memory allocation failed");
15593 		goto fail0;
15594 	}
15595 
15596 	dp_info("soc memory allocated %pK", soc);
15597 	soc->hif_handle = hif_handle;
15598 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
15599 	if (!soc->hal_soc)
15600 		goto fail1;
15601 
15602 	hif_get_cmem_info(soc->hif_handle,
15603 			  &soc->cmem_base,
15604 			  &soc->cmem_total_size);
15605 	soc->cmem_avail_size = soc->cmem_total_size;
15606 	soc->device_id = device_id;
15607 	soc->cdp_soc.ops =
15608 		(struct cdp_ops *)qdf_mem_malloc(sizeof(struct cdp_ops));
15609 	if (!soc->cdp_soc.ops)
15610 		goto fail1;
15611 
15612 	dp_soc_txrx_ops_attach(soc);
15613 	soc->cdp_soc.ol_ops = ol_ops;
15614 	soc->ctrl_psoc = ctrl_psoc;
15615 	soc->osdev = qdf_osdev;
15616 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
15617 	hal_rx_get_tlv_size(soc->hal_soc, &soc->rx_pkt_tlv_size,
15618 			    &soc->rx_mon_pkt_tlv_size);
15619 	soc->idle_link_bm_id = hal_get_idle_link_bm_id(soc->hal_soc,
15620 						       params->mlo_chip_id);
15621 	soc->features.dmac_cmn_src_rxbuf_ring_enabled =
15622 		hal_dmac_cmn_src_rxbuf_ring_get(soc->hal_soc);
15623 	soc->arch_id = arch_id;
15624 	soc->link_desc_id_start =
15625 			dp_get_link_desc_id_start(soc->arch_id);
15626 	dp_configure_arch_ops(soc);
15627 
15628 	/* Reset wbm sg list and flags */
15629 	dp_rx_wbm_sg_list_reset(soc);
15630 
15631 	dp_soc_cfg_history_attach(soc);
15632 	dp_soc_tx_hw_desc_history_attach(soc);
15633 	dp_soc_rx_history_attach(soc);
15634 	dp_soc_mon_status_ring_history_attach(soc);
15635 	dp_soc_tx_history_attach(soc);
15636 	wlan_set_srng_cfg(&soc->wlan_srng_cfg);
15637 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
15638 	if (!soc->wlan_cfg_ctx) {
15639 		dp_err("wlan_cfg_ctx failed\n");
15640 		goto fail2;
15641 	}
15642 	dp_soc_cfg_attach(soc);
15643 
15644 	if (dp_hw_link_desc_pool_banks_alloc(soc, WLAN_INVALID_PDEV_ID)) {
15645 		dp_err("failed to allocate link desc pool banks");
15646 		goto fail3;
15647 	}
15648 
15649 	if (dp_hw_link_desc_ring_alloc(soc)) {
15650 		dp_err("failed to allocate link_desc_ring");
15651 		goto fail4;
15652 	}
15653 
15654 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_attach(soc,
15655 								 params))) {
15656 		dp_err("unable to do target specific attach");
15657 		goto fail5;
15658 	}
15659 
15660 	if (dp_soc_srng_alloc(soc)) {
15661 		dp_err("failed to allocate soc srng rings");
15662 		goto fail6;
15663 	}
15664 
15665 	if (dp_soc_tx_desc_sw_pools_alloc(soc)) {
15666 		dp_err("dp_soc_tx_desc_sw_pools_alloc failed");
15667 		goto fail7;
15668 	}
15669 
15670 	if (!dp_monitor_modularized_enable()) {
15671 		if (dp_mon_soc_attach_wrapper(soc)) {
15672 			dp_err("failed to attach monitor");
15673 			goto fail8;
15674 		}
15675 	}
15676 
15677 	if (hal_reo_shared_qaddr_setup((hal_soc_handle_t)soc->hal_soc,
15678 				       &soc->reo_qref)
15679 	    != QDF_STATUS_SUCCESS) {
15680 		dp_err("unable to setup reo shared qaddr");
15681 		goto fail9;
15682 	}
15683 
15684 	if (dp_sysfs_initialize_stats(soc) != QDF_STATUS_SUCCESS) {
15685 		dp_err("failed to initialize dp stats sysfs file");
15686 		dp_sysfs_deinitialize_stats(soc);
15687 	}
15688 
15689 	dp_soc_swlm_attach(soc);
15690 	dp_soc_set_interrupt_mode(soc);
15691 	dp_soc_set_def_pdev(soc);
15692 
15693 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
15694 		qdf_dma_mem_stats_read(),
15695 		qdf_heap_mem_stats_read(),
15696 		qdf_skb_total_mem_stats_read());
15697 
15698 	return soc;
15699 fail9:
15700 	if (!dp_monitor_modularized_enable())
15701 		dp_mon_soc_detach_wrapper(soc);
15702 fail8:
15703 	dp_soc_tx_desc_sw_pools_free(soc);
15704 fail7:
15705 	dp_soc_srng_free(soc);
15706 fail6:
15707 	soc->arch_ops.txrx_soc_detach(soc);
15708 fail5:
15709 	dp_hw_link_desc_ring_free(soc);
15710 fail4:
15711 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
15712 fail3:
15713 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
15714 fail2:
15715 	qdf_mem_free(soc->cdp_soc.ops);
15716 fail1:
15717 	qdf_mem_common_free(soc);
15718 fail0:
15719 	return NULL;
15720 }
15721 
15722 /**
15723  * dp_soc_init() - Initialize txrx SOC
15724  * @soc: Opaque DP SOC handle
15725  * @htc_handle: Opaque HTC handle
15726  * @hif_handle: Opaque HIF handle
15727  *
15728  * Return: DP SOC handle on success, NULL on failure
15729  */
15730 static void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
15731 			 struct hif_opaque_softc *hif_handle)
15732 {
15733 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
15734 	bool is_monitor_mode = false;
15735 	uint8_t i;
15736 	int num_dp_msi;
15737 
15738 	wlan_minidump_log(soc, sizeof(*soc), soc->ctrl_psoc,
15739 			  WLAN_MD_DP_SOC, "dp_soc");
15740 
15741 	soc->hif_handle = hif_handle;
15742 
15743 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
15744 	if (!soc->hal_soc)
15745 		goto fail0;
15746 
15747 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_init(soc))) {
15748 		dp_err("unable to do target specific init");
15749 		goto fail0;
15750 	}
15751 
15752 	htt_soc = htt_soc_attach(soc, htc_handle);
15753 	if (!htt_soc)
15754 		goto fail1;
15755 
15756 	soc->htt_handle = htt_soc;
15757 
15758 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
15759 		goto fail2;
15760 
15761 	htt_set_htc_handle(htt_soc, htc_handle);
15762 
15763 	dp_soc_cfg_init(soc);
15764 
15765 	dp_monitor_soc_cfg_init(soc);
15766 	/* Reset/Initialize wbm sg list and flags */
15767 	dp_rx_wbm_sg_list_reset(soc);
15768 
15769 	/* Note: Any SRNG ring initialization should happen only after
15770 	 * Interrupt mode is set and followed by filling up the
15771 	 * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER.
15772 	 */
15773 	dp_soc_set_interrupt_mode(soc);
15774 	if (soc->cdp_soc.ol_ops->get_con_mode &&
15775 	    soc->cdp_soc.ol_ops->get_con_mode() ==
15776 	    QDF_GLOBAL_MONITOR_MODE) {
15777 		is_monitor_mode = true;
15778 		soc->curr_rx_pkt_tlv_size = soc->rx_mon_pkt_tlv_size;
15779 	} else {
15780 		soc->curr_rx_pkt_tlv_size = soc->rx_pkt_tlv_size;
15781 	}
15782 
15783 	num_dp_msi = dp_get_num_msi_available(soc, soc->intr_mode);
15784 	if (num_dp_msi < 0) {
15785 		dp_init_err("%pK: dp_interrupt assignment failed", soc);
15786 		goto fail3;
15787 	}
15788 
15789 	wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, num_dp_msi,
15790 				     soc->intr_mode, is_monitor_mode);
15791 
15792 	/* initialize WBM_IDLE_LINK ring */
15793 	if (dp_hw_link_desc_ring_init(soc)) {
15794 		dp_init_err("%pK: dp_hw_link_desc_ring_init failed", soc);
15795 		goto fail3;
15796 	}
15797 
15798 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
15799 
15800 	if (dp_soc_srng_init(soc)) {
15801 		dp_init_err("%pK: dp_soc_srng_init failed", soc);
15802 		goto fail4;
15803 	}
15804 
15805 	if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
15806 			       htt_get_htc_handle(htt_soc),
15807 			       soc->hal_soc, soc->osdev) == NULL)
15808 		goto fail5;
15809 
15810 	/* Initialize descriptors in TCL Rings */
15811 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
15812 		hal_tx_init_data_ring(soc->hal_soc,
15813 				      soc->tcl_data_ring[i].hal_srng);
15814 	}
15815 
15816 	if (dp_soc_tx_desc_sw_pools_init(soc)) {
15817 		dp_init_err("%pK: dp_tx_soc_attach failed", soc);
15818 		goto fail6;
15819 	}
15820 
15821 	if (soc->arch_ops.txrx_soc_ppeds_start) {
15822 		if (soc->arch_ops.txrx_soc_ppeds_start(soc)) {
15823 			dp_init_err("%pK: ppeds start failed", soc);
15824 			goto fail7;
15825 		}
15826 	}
15827 
15828 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
15829 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
15830 	soc->cce_disable = false;
15831 	soc->max_ast_ageout_count = MAX_AST_AGEOUT_COUNT;
15832 
15833 	soc->sta_mode_search_policy = DP_TX_ADDR_SEARCH_ADDR_POLICY;
15834 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
15835 	qdf_spinlock_create(&soc->vdev_map_lock);
15836 	qdf_atomic_init(&soc->num_tx_outstanding);
15837 	qdf_atomic_init(&soc->num_tx_exception);
15838 	soc->num_tx_allowed =
15839 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
15840 	soc->num_tx_spl_allowed =
15841 		wlan_cfg_get_dp_soc_tx_spl_device_limit(soc->wlan_cfg_ctx);
15842 	soc->num_reg_tx_allowed = soc->num_tx_allowed - soc->num_tx_spl_allowed;
15843 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
15844 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
15845 				CDP_CFG_MAX_PEER_ID);
15846 
15847 		if (ret != -EINVAL)
15848 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
15849 
15850 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
15851 				CDP_CFG_CCE_DISABLE);
15852 		if (ret == 1)
15853 			soc->cce_disable = true;
15854 	}
15855 
15856 	/*
15857 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
15858 	 * and IPQ5018 WMAC2 is not there in these platforms.
15859 	 */
15860 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018 ||
15861 	    soc->disable_mac2_intr)
15862 		dp_soc_disable_unused_mac_intr_mask(soc, 0x2);
15863 
15864 	/*
15865 	 * Skip registering hw ring interrupts for WMAC1 on IPQ5018
15866 	 * WMAC1 is not there in this platform.
15867 	 */
15868 	if (soc->disable_mac1_intr)
15869 		dp_soc_disable_unused_mac_intr_mask(soc, 0x1);
15870 
15871 	/* setup the global rx defrag waitlist */
15872 	TAILQ_INIT(&soc->rx.defrag.waitlist);
15873 	soc->rx.defrag.timeout_ms =
15874 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
15875 	soc->rx.defrag.next_flush_ms = 0;
15876 	soc->rx.flags.defrag_timeout_check =
15877 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
15878 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
15879 
15880 	dp_monitor_soc_init(soc);
15881 
15882 	qdf_atomic_set(&soc->cmn_init_done, 1);
15883 
15884 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
15885 
15886 	qdf_spinlock_create(&soc->ast_lock);
15887 	dp_peer_mec_spinlock_create(soc);
15888 
15889 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
15890 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
15891 	INIT_RX_HW_STATS_LOCK(soc);
15892 
15893 	qdf_nbuf_queue_init(&soc->invalid_buf_queue);
15894 	/* fill the tx/rx cpu ring map*/
15895 	dp_soc_set_txrx_ring_map(soc);
15896 
15897 	TAILQ_INIT(&soc->inactive_peer_list);
15898 	qdf_spinlock_create(&soc->inactive_peer_list_lock);
15899 	TAILQ_INIT(&soc->inactive_vdev_list);
15900 	qdf_spinlock_create(&soc->inactive_vdev_list_lock);
15901 	qdf_spinlock_create(&soc->htt_stats.lock);
15902 	/* initialize work queue for stats processing */
15903 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
15904 
15905 	dp_reo_desc_deferred_freelist_create(soc);
15906 
15907 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
15908 		qdf_dma_mem_stats_read(),
15909 		qdf_heap_mem_stats_read(),
15910 		qdf_skb_total_mem_stats_read());
15911 
15912 	soc->vdev_stats_id_map = 0;
15913 
15914 	return soc;
15915 fail7:
15916 	dp_soc_tx_desc_sw_pools_deinit(soc);
15917 fail6:
15918 	htt_soc_htc_dealloc(soc->htt_handle);
15919 fail5:
15920 	dp_soc_srng_deinit(soc);
15921 fail4:
15922 	dp_hw_link_desc_ring_deinit(soc);
15923 fail3:
15924 	htt_htc_pkt_pool_free(htt_soc);
15925 fail2:
15926 	htt_soc_detach(htt_soc);
15927 fail1:
15928 	soc->arch_ops.txrx_soc_deinit(soc);
15929 fail0:
15930 	return NULL;
15931 }
15932 
15933 void *dp_soc_init_wifi3(struct cdp_soc_t *soc,
15934 			struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
15935 			struct hif_opaque_softc *hif_handle,
15936 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
15937 			struct ol_if_ops *ol_ops, uint16_t device_id)
15938 {
15939 	return dp_soc_init((struct dp_soc *)soc, htc_handle, hif_handle);
15940 }
15941 
15942 #endif
15943 
15944 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
15945 {
15946 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
15947 		return (mac_id < MAX_PDEV_CNT) ? soc->pdev_list[mac_id] : NULL;
15948 
15949 	/* Typically for MCL as there only 1 PDEV*/
15950 	return soc->pdev_list[0];
15951 }
15952 
15953 void dp_update_num_mac_rings_for_dbs(struct dp_soc *soc,
15954 				     int *max_mac_rings)
15955 {
15956 	bool dbs_enable = false;
15957 
15958 	if (soc->cdp_soc.ol_ops->is_hw_dbs_capable)
15959 		dbs_enable = soc->cdp_soc.ol_ops->
15960 				is_hw_dbs_capable((void *)soc->ctrl_psoc);
15961 
15962 	*max_mac_rings = dbs_enable ? (*max_mac_rings) : 1;
15963 	dp_info("dbs_enable %d, max_mac_rings %d",
15964 		dbs_enable, *max_mac_rings);
15965 }
15966 
15967 qdf_export_symbol(dp_update_num_mac_rings_for_dbs);
15968 
15969 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
15970 /**
15971  * dp_get_cfr_rcc() - get cfr rcc config
15972  * @soc_hdl: Datapath soc handle
15973  * @pdev_id: id of objmgr pdev
15974  *
15975  * Return: true/false based on cfr mode setting
15976  */
15977 static
15978 bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
15979 {
15980 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15981 	struct dp_pdev *pdev = NULL;
15982 
15983 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15984 	if (!pdev) {
15985 		dp_err("pdev is NULL");
15986 		return false;
15987 	}
15988 
15989 	return pdev->cfr_rcc_mode;
15990 }
15991 
15992 /**
15993  * dp_set_cfr_rcc() - enable/disable cfr rcc config
15994  * @soc_hdl: Datapath soc handle
15995  * @pdev_id: id of objmgr pdev
15996  * @enable: Enable/Disable cfr rcc mode
15997  *
15998  * Return: none
15999  */
16000 static
16001 void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, bool enable)
16002 {
16003 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
16004 	struct dp_pdev *pdev = NULL;
16005 
16006 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
16007 	if (!pdev) {
16008 		dp_err("pdev is NULL");
16009 		return;
16010 	}
16011 
16012 	pdev->cfr_rcc_mode = enable;
16013 }
16014 
16015 /**
16016  * dp_get_cfr_dbg_stats - Get the debug statistics for CFR
16017  * @soc_hdl: Datapath soc handle
16018  * @pdev_id: id of data path pdev handle
16019  * @cfr_rcc_stats: CFR RCC debug statistics buffer
16020  *
16021  * Return: none
16022  */
16023 static inline void
16024 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
16025 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats)
16026 {
16027 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
16028 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
16029 
16030 	if (!pdev) {
16031 		dp_err("Invalid pdev");
16032 		return;
16033 	}
16034 
16035 	qdf_mem_copy(cfr_rcc_stats, &pdev->stats.rcc,
16036 		     sizeof(struct cdp_cfr_rcc_stats));
16037 }
16038 
16039 /**
16040  * dp_clear_cfr_dbg_stats - Clear debug statistics for CFR
16041  * @soc_hdl: Datapath soc handle
16042  * @pdev_id: id of data path pdev handle
16043  *
16044  * Return: none
16045  */
16046 static void dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl,
16047 				   uint8_t pdev_id)
16048 {
16049 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
16050 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
16051 
16052 	if (!pdev) {
16053 		dp_err("dp pdev is NULL");
16054 		return;
16055 	}
16056 
16057 	qdf_mem_zero(&pdev->stats.rcc, sizeof(pdev->stats.rcc));
16058 }
16059 #endif
16060 
16061 /**
16062  * dp_bucket_index() - Return index from array
16063  *
16064  * @delay: delay measured
16065  * @array: array used to index corresponding delay
16066  * @delay_in_us: flag to indicate whether the delay in ms or us
16067  *
16068  * Return: index
16069  */
16070 static uint8_t
16071 dp_bucket_index(uint32_t delay, uint16_t *array, bool delay_in_us)
16072 {
16073 	uint8_t i = CDP_DELAY_BUCKET_0;
16074 	uint32_t thr_low, thr_high;
16075 
16076 	for (; i < CDP_DELAY_BUCKET_MAX - 1; i++) {
16077 		thr_low = array[i];
16078 		thr_high = array[i + 1];
16079 
16080 		if (delay_in_us) {
16081 			thr_low = thr_low * USEC_PER_MSEC;
16082 			thr_high = thr_high * USEC_PER_MSEC;
16083 		}
16084 		if (delay >= thr_low && delay <= thr_high)
16085 			return i;
16086 	}
16087 	return (CDP_DELAY_BUCKET_MAX - 1);
16088 }
16089 
16090 #ifdef HW_TX_DELAY_STATS_ENABLE
16091 /*
16092  * cdp_fw_to_hw_delay_range
16093  * Fw to hw delay ranges in milliseconds
16094  */
16095 static uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
16096 	0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
16097 #else
16098 static uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
16099 	0, 2, 4, 6, 8, 10, 20, 30, 40, 50, 100, 250, 500};
16100 #endif
16101 
16102 /*
16103  * cdp_sw_enq_delay_range
16104  * Software enqueue delay ranges in milliseconds
16105  */
16106 static uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
16107 	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
16108 
16109 /*
16110  * cdp_intfrm_delay_range
16111  * Interframe delay ranges in milliseconds
16112  */
16113 static uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
16114 	0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
16115 
16116 /**
16117  * dp_fill_delay_buckets() - Fill delay statistics bucket for each
16118  *				type of delay
16119  * @tstats: tid tx stats
16120  * @rstats: tid rx stats
16121  * @delay: delay in ms
16122  * @tid: tid value
16123  * @mode: type of tx delay mode
16124  * @ring_id: ring number
16125  * @delay_in_us: flag to indicate whether the delay in ms or us
16126  *
16127  * Return: pointer to cdp_delay_stats structure
16128  */
16129 static struct cdp_delay_stats *
16130 dp_fill_delay_buckets(struct cdp_tid_tx_stats *tstats,
16131 		      struct cdp_tid_rx_stats *rstats, uint32_t delay,
16132 		      uint8_t tid, uint8_t mode, uint8_t ring_id,
16133 		      bool delay_in_us)
16134 {
16135 	uint8_t delay_index = 0;
16136 	struct cdp_delay_stats *stats = NULL;
16137 
16138 	/*
16139 	 * Update delay stats in proper bucket
16140 	 */
16141 	switch (mode) {
16142 	/* Software Enqueue delay ranges */
16143 	case CDP_DELAY_STATS_SW_ENQ:
16144 		if (!tstats)
16145 			break;
16146 
16147 		delay_index = dp_bucket_index(delay, cdp_sw_enq_delay,
16148 					      delay_in_us);
16149 		tstats->swq_delay.delay_bucket[delay_index]++;
16150 		stats = &tstats->swq_delay;
16151 		break;
16152 
16153 	/* Tx Completion delay ranges */
16154 	case CDP_DELAY_STATS_FW_HW_TRANSMIT:
16155 		if (!tstats)
16156 			break;
16157 
16158 		delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay,
16159 					      delay_in_us);
16160 		tstats->hwtx_delay.delay_bucket[delay_index]++;
16161 		stats = &tstats->hwtx_delay;
16162 		break;
16163 
16164 	/* Interframe tx delay ranges */
16165 	case CDP_DELAY_STATS_TX_INTERFRAME:
16166 		if (!tstats)
16167 			break;
16168 
16169 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
16170 					      delay_in_us);
16171 		tstats->intfrm_delay.delay_bucket[delay_index]++;
16172 		stats = &tstats->intfrm_delay;
16173 		break;
16174 
16175 	/* Interframe rx delay ranges */
16176 	case CDP_DELAY_STATS_RX_INTERFRAME:
16177 		if (!rstats)
16178 			break;
16179 
16180 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
16181 					      delay_in_us);
16182 		rstats->intfrm_delay.delay_bucket[delay_index]++;
16183 		stats = &rstats->intfrm_delay;
16184 		break;
16185 
16186 	/* Ring reap to indication to network stack */
16187 	case CDP_DELAY_STATS_REAP_STACK:
16188 		if (!rstats)
16189 			break;
16190 
16191 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
16192 					      delay_in_us);
16193 		rstats->to_stack_delay.delay_bucket[delay_index]++;
16194 		stats = &rstats->to_stack_delay;
16195 		break;
16196 	default:
16197 		dp_debug("Incorrect delay mode: %d", mode);
16198 	}
16199 
16200 	return stats;
16201 }
16202 
16203 void dp_update_delay_stats(struct cdp_tid_tx_stats *tstats,
16204 			   struct cdp_tid_rx_stats *rstats, uint32_t delay,
16205 			   uint8_t tid, uint8_t mode, uint8_t ring_id,
16206 			   bool delay_in_us)
16207 {
16208 	struct cdp_delay_stats *dstats = NULL;
16209 
16210 	/*
16211 	 * Delay ranges are different for different delay modes
16212 	 * Get the correct index to update delay bucket
16213 	 */
16214 	dstats = dp_fill_delay_buckets(tstats, rstats, delay, tid, mode,
16215 				       ring_id, delay_in_us);
16216 	if (qdf_unlikely(!dstats))
16217 		return;
16218 
16219 	if (delay != 0) {
16220 		/*
16221 		 * Compute minimum,average and maximum
16222 		 * delay
16223 		 */
16224 		if (delay < dstats->min_delay)
16225 			dstats->min_delay = delay;
16226 
16227 		if (delay > dstats->max_delay)
16228 			dstats->max_delay = delay;
16229 
16230 		/*
16231 		 * Average over delay measured till now
16232 		 */
16233 		if (!dstats->avg_delay)
16234 			dstats->avg_delay = delay;
16235 		else
16236 			dstats->avg_delay = ((delay + dstats->avg_delay) >> 1);
16237 	}
16238 }
16239 
16240 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id,
16241 			      u_int8_t newmac[][QDF_MAC_ADDR_SIZE],
16242 			      u_int16_t mac_cnt, bool limit)
16243 {
16244 	struct dp_soc *dp_soc = (struct dp_soc *)soc;
16245 	struct dp_vdev *vdev =
16246 		dp_vdev_get_ref_by_id(dp_soc, vdev_id, DP_MOD_ID_CDP);
16247 	struct dp_peer *peer;
16248 	uint16_t new_mac_cnt = 0;
16249 
16250 	if (!vdev)
16251 		return new_mac_cnt;
16252 
16253 	if (limit && (vdev->num_peers > mac_cnt))
16254 		return 0;
16255 
16256 	qdf_spin_lock_bh(&vdev->peer_list_lock);
16257 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
16258 		if (peer->bss_peer)
16259 			continue;
16260 		if (new_mac_cnt < mac_cnt) {
16261 			WLAN_ADDR_COPY(newmac[new_mac_cnt], peer->mac_addr.raw);
16262 			new_mac_cnt++;
16263 		}
16264 	}
16265 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
16266 	dp_vdev_unref_delete(dp_soc, vdev, DP_MOD_ID_CDP);
16267 	return new_mac_cnt;
16268 }
16269 
16270 uint16_t dp_get_peer_id(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *mac)
16271 {
16272 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
16273 						       mac, 0, vdev_id,
16274 						       DP_MOD_ID_CDP);
16275 	uint16_t peer_id = HTT_INVALID_PEER;
16276 
16277 	if (!peer) {
16278 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
16279 		return peer_id;
16280 	}
16281 
16282 	peer_id = peer->peer_id;
16283 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
16284 	return peer_id;
16285 }
16286 
16287 #ifdef QCA_SUPPORT_WDS_EXTENDED
16288 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc,
16289 				  uint8_t vdev_id,
16290 				  uint8_t *mac,
16291 				  ol_txrx_rx_fp rx,
16292 				  ol_osif_peer_handle osif_peer)
16293 {
16294 	struct dp_txrx_peer *txrx_peer = NULL;
16295 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
16296 						       mac, 0, vdev_id,
16297 						       DP_MOD_ID_CDP);
16298 	QDF_STATUS status = QDF_STATUS_E_INVAL;
16299 
16300 	if (!peer) {
16301 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
16302 		return status;
16303 	}
16304 
16305 	txrx_peer = dp_get_txrx_peer(peer);
16306 	if (!txrx_peer) {
16307 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
16308 		return status;
16309 	}
16310 
16311 	if (rx) {
16312 		if (txrx_peer->osif_rx) {
16313 			status = QDF_STATUS_E_ALREADY;
16314 		} else {
16315 			txrx_peer->osif_rx = rx;
16316 			status = QDF_STATUS_SUCCESS;
16317 		}
16318 	} else {
16319 		if (txrx_peer->osif_rx) {
16320 			txrx_peer->osif_rx = NULL;
16321 			status = QDF_STATUS_SUCCESS;
16322 		} else {
16323 			status = QDF_STATUS_E_ALREADY;
16324 		}
16325 	}
16326 
16327 	txrx_peer->wds_ext.osif_peer = osif_peer;
16328 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
16329 
16330 	return status;
16331 }
16332 
16333 QDF_STATUS dp_wds_ext_get_peer_osif_handle(
16334 				ol_txrx_soc_handle soc,
16335 				uint8_t vdev_id,
16336 				uint8_t *mac,
16337 				ol_osif_peer_handle *osif_peer)
16338 {
16339 	struct dp_soc *dp_soc = (struct dp_soc *)soc;
16340 	struct dp_txrx_peer *txrx_peer = NULL;
16341 	struct dp_peer *peer = dp_peer_find_hash_find(dp_soc,
16342 						      mac, 0, vdev_id,
16343 						      DP_MOD_ID_CDP);
16344 
16345 	if (!peer) {
16346 		dp_cdp_debug("%pK: Peer is NULL!\n", dp_soc);
16347 		return QDF_STATUS_E_INVAL;
16348 	}
16349 
16350 	txrx_peer = dp_get_txrx_peer(peer);
16351 	if (!txrx_peer) {
16352 		dp_cdp_debug("%pK: TXRX Peer is NULL!\n", dp_soc);
16353 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
16354 		return QDF_STATUS_E_INVAL;
16355 	}
16356 
16357 	*osif_peer = txrx_peer->wds_ext.osif_peer;
16358 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
16359 
16360 	return QDF_STATUS_SUCCESS;
16361 }
16362 #endif /* QCA_SUPPORT_WDS_EXTENDED */
16363 
16364 /**
16365  * dp_pdev_srng_deinit() - de-initialize all pdev srng ring including
16366  *			   monitor rings
16367  * @pdev: Datapath pdev handle
16368  *
16369  */
16370 static void dp_pdev_srng_deinit(struct dp_pdev *pdev)
16371 {
16372 	struct dp_soc *soc = pdev->soc;
16373 	uint8_t i;
16374 
16375 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled)
16376 		dp_srng_deinit(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
16377 			       RXDMA_BUF,
16378 			       pdev->lmac_id);
16379 
16380 	if (!soc->rxdma2sw_rings_not_supported) {
16381 		for (i = 0;
16382 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
16383 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
16384 								 pdev->pdev_id);
16385 
16386 			wlan_minidump_remove(soc->rxdma_err_dst_ring[lmac_id].
16387 							base_vaddr_unaligned,
16388 					     soc->rxdma_err_dst_ring[lmac_id].
16389 								alloc_size,
16390 					     soc->ctrl_psoc,
16391 					     WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
16392 					     "rxdma_err_dst");
16393 			dp_srng_deinit(soc, &soc->rxdma_err_dst_ring[lmac_id],
16394 				       RXDMA_DST, lmac_id);
16395 		}
16396 	}
16397 
16398 
16399 }
16400 
16401 /**
16402  * dp_pdev_srng_init() - initialize all pdev srng rings including
16403  *			   monitor rings
16404  * @pdev: Datapath pdev handle
16405  *
16406  * Return: QDF_STATUS_SUCCESS on success
16407  *	   QDF_STATUS_E_NOMEM on failure
16408  */
16409 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev)
16410 {
16411 	struct dp_soc *soc = pdev->soc;
16412 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16413 	uint32_t i;
16414 
16415 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16416 
16417 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
16418 		if (dp_srng_init(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
16419 				 RXDMA_BUF, 0, pdev->lmac_id)) {
16420 			dp_init_err("%pK: dp_srng_init failed rx refill ring",
16421 				    soc);
16422 			goto fail1;
16423 		}
16424 	}
16425 
16426 	/* LMAC RxDMA to SW Rings configuration */
16427 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
16428 		/* Only valid for MCL */
16429 		pdev = soc->pdev_list[0];
16430 
16431 	if (!soc->rxdma2sw_rings_not_supported) {
16432 		for (i = 0;
16433 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
16434 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
16435 								 pdev->pdev_id);
16436 			struct dp_srng *srng =
16437 				&soc->rxdma_err_dst_ring[lmac_id];
16438 
16439 			if (srng->hal_srng)
16440 				continue;
16441 
16442 			if (dp_srng_init(soc, srng, RXDMA_DST, 0, lmac_id)) {
16443 				dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring",
16444 					    soc);
16445 				goto fail1;
16446 			}
16447 			wlan_minidump_log(soc->rxdma_err_dst_ring[lmac_id].
16448 						base_vaddr_unaligned,
16449 					  soc->rxdma_err_dst_ring[lmac_id].
16450 						alloc_size,
16451 					  soc->ctrl_psoc,
16452 					  WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
16453 					  "rxdma_err_dst");
16454 		}
16455 	}
16456 	return QDF_STATUS_SUCCESS;
16457 
16458 fail1:
16459 	dp_pdev_srng_deinit(pdev);
16460 	return QDF_STATUS_E_NOMEM;
16461 }
16462 
16463 /**
16464  * dp_pdev_srng_free() - free all pdev srng rings including monitor rings
16465  * @pdev: Datapath pdev handle
16466  *
16467  */
16468 static void dp_pdev_srng_free(struct dp_pdev *pdev)
16469 {
16470 	struct dp_soc *soc = pdev->soc;
16471 	uint8_t i;
16472 
16473 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled)
16474 		dp_srng_free(soc, &soc->rx_refill_buf_ring[pdev->lmac_id]);
16475 
16476 	if (!soc->rxdma2sw_rings_not_supported) {
16477 		for (i = 0;
16478 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
16479 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
16480 								 pdev->pdev_id);
16481 
16482 			dp_srng_free(soc, &soc->rxdma_err_dst_ring[lmac_id]);
16483 		}
16484 	}
16485 }
16486 
16487 /**
16488  * dp_pdev_srng_alloc() - allocate memory for all pdev srng rings including
16489  *			  monitor rings
16490  * @pdev: Datapath pdev handle
16491  *
16492  * Return: QDF_STATUS_SUCCESS on success
16493  *	   QDF_STATUS_E_NOMEM on failure
16494  */
16495 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev)
16496 {
16497 	struct dp_soc *soc = pdev->soc;
16498 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16499 	uint32_t ring_size;
16500 	uint32_t i;
16501 
16502 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16503 
16504 	ring_size = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
16505 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
16506 		if (dp_srng_alloc(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
16507 				  RXDMA_BUF, ring_size, 0)) {
16508 			dp_init_err("%pK: dp_srng_alloc failed rx refill ring",
16509 				    soc);
16510 			goto fail1;
16511 		}
16512 	}
16513 
16514 	ring_size = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
16515 	/* LMAC RxDMA to SW Rings configuration */
16516 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
16517 		/* Only valid for MCL */
16518 		pdev = soc->pdev_list[0];
16519 
16520 	if (!soc->rxdma2sw_rings_not_supported) {
16521 		for (i = 0;
16522 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
16523 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
16524 								 pdev->pdev_id);
16525 			struct dp_srng *srng =
16526 				&soc->rxdma_err_dst_ring[lmac_id];
16527 
16528 			if (srng->base_vaddr_unaligned)
16529 				continue;
16530 
16531 			if (dp_srng_alloc(soc, srng, RXDMA_DST, ring_size, 0)) {
16532 				dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring",
16533 					    soc);
16534 				goto fail1;
16535 			}
16536 		}
16537 	}
16538 
16539 	return QDF_STATUS_SUCCESS;
16540 fail1:
16541 	dp_pdev_srng_free(pdev);
16542 	return QDF_STATUS_E_NOMEM;
16543 }
16544 
16545 #ifndef WLAN_DP_DISABLE_TCL_CMD_CRED_SRNG
16546 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
16547 {
16548 	QDF_STATUS status;
16549 
16550 	if (soc->init_tcl_cmd_cred_ring) {
16551 		status =  dp_srng_init(soc, &soc->tcl_cmd_credit_ring,
16552 				       TCL_CMD_CREDIT, 0, 0);
16553 		if (QDF_IS_STATUS_ERROR(status))
16554 			return status;
16555 
16556 		wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
16557 				  soc->tcl_cmd_credit_ring.alloc_size,
16558 				  soc->ctrl_psoc,
16559 				  WLAN_MD_DP_SRNG_TCL_CMD,
16560 				  "wbm_desc_rel_ring");
16561 	}
16562 
16563 	return QDF_STATUS_SUCCESS;
16564 }
16565 
16566 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
16567 {
16568 	if (soc->init_tcl_cmd_cred_ring) {
16569 		wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
16570 				     soc->tcl_cmd_credit_ring.alloc_size,
16571 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_CMD,
16572 				     "wbm_desc_rel_ring");
16573 		dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring,
16574 			       TCL_CMD_CREDIT, 0);
16575 	}
16576 }
16577 
16578 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
16579 {
16580 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
16581 	uint32_t entries;
16582 	QDF_STATUS status;
16583 
16584 	entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx);
16585 	if (soc->init_tcl_cmd_cred_ring) {
16586 		status = dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring,
16587 				       TCL_CMD_CREDIT, entries, 0);
16588 		if (QDF_IS_STATUS_ERROR(status))
16589 			return status;
16590 	}
16591 
16592 	return QDF_STATUS_SUCCESS;
16593 }
16594 
16595 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
16596 {
16597 	if (soc->init_tcl_cmd_cred_ring)
16598 		dp_srng_free(soc, &soc->tcl_cmd_credit_ring);
16599 }
16600 
16601 static inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
16602 {
16603 	if (soc->init_tcl_cmd_cred_ring)
16604 		hal_tx_init_cmd_credit_ring(soc->hal_soc,
16605 					    soc->tcl_cmd_credit_ring.hal_srng);
16606 }
16607 #else
16608 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
16609 {
16610 	return QDF_STATUS_SUCCESS;
16611 }
16612 
16613 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
16614 {
16615 }
16616 
16617 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
16618 {
16619 	return QDF_STATUS_SUCCESS;
16620 }
16621 
16622 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
16623 {
16624 }
16625 
16626 static inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
16627 {
16628 }
16629 #endif
16630 
16631 #ifndef WLAN_DP_DISABLE_TCL_STATUS_SRNG
16632 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
16633 {
16634 	QDF_STATUS status;
16635 
16636 	status =  dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0);
16637 	if (QDF_IS_STATUS_ERROR(status))
16638 		return status;
16639 
16640 	wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned,
16641 			  soc->tcl_status_ring.alloc_size,
16642 			  soc->ctrl_psoc,
16643 			  WLAN_MD_DP_SRNG_TCL_STATUS,
16644 			  "wbm_desc_rel_ring");
16645 
16646 	return QDF_STATUS_SUCCESS;
16647 }
16648 
16649 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
16650 {
16651 	wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned,
16652 			     soc->tcl_status_ring.alloc_size,
16653 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_STATUS,
16654 			     "wbm_desc_rel_ring");
16655 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
16656 }
16657 
16658 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
16659 {
16660 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
16661 	uint32_t entries;
16662 	QDF_STATUS status = QDF_STATUS_SUCCESS;
16663 
16664 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
16665 	status = dp_srng_alloc(soc, &soc->tcl_status_ring,
16666 			       TCL_STATUS, entries, 0);
16667 
16668 	return status;
16669 }
16670 
16671 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
16672 {
16673 	dp_srng_free(soc, &soc->tcl_status_ring);
16674 }
16675 #else
16676 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
16677 {
16678 	return QDF_STATUS_SUCCESS;
16679 }
16680 
16681 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
16682 {
16683 }
16684 
16685 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
16686 {
16687 	return QDF_STATUS_SUCCESS;
16688 }
16689 
16690 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
16691 {
16692 }
16693 #endif
16694 
16695 /**
16696  * dp_soc_srng_deinit() - de-initialize soc srng rings
16697  * @soc: Datapath soc handle
16698  *
16699  */
16700 static void dp_soc_srng_deinit(struct dp_soc *soc)
16701 {
16702 	uint32_t i;
16703 
16704 	if (soc->arch_ops.txrx_soc_srng_deinit)
16705 		soc->arch_ops.txrx_soc_srng_deinit(soc);
16706 
16707 	/* Free the ring memories */
16708 	/* Common rings */
16709 	wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
16710 			     soc->wbm_desc_rel_ring.alloc_size,
16711 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_WBM_DESC_REL,
16712 			     "wbm_desc_rel_ring");
16713 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
16714 
16715 	/* Tx data rings */
16716 	for (i = 0; i < soc->num_tcl_data_rings; i++)
16717 		dp_deinit_tx_pair_by_index(soc, i);
16718 
16719 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16720 		dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
16721 		dp_ipa_deinit_alt_tx_ring(soc);
16722 	}
16723 
16724 	/* TCL command and status rings */
16725 	dp_soc_tcl_cmd_cred_srng_deinit(soc);
16726 	dp_soc_tcl_status_srng_deinit(soc);
16727 
16728 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
16729 		/* TODO: Get number of rings and ring sizes
16730 		 * from wlan_cfg
16731 		 */
16732 		wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned,
16733 				     soc->reo_dest_ring[i].alloc_size,
16734 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_DEST,
16735 				     "reo_dest_ring");
16736 		dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i);
16737 	}
16738 
16739 	/* REO reinjection ring */
16740 	wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned,
16741 			     soc->reo_reinject_ring.alloc_size,
16742 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_REINJECT,
16743 			     "reo_reinject_ring");
16744 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
16745 
16746 	/* Rx release ring */
16747 	wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned,
16748 			     soc->rx_rel_ring.alloc_size,
16749 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_RX_REL,
16750 			     "reo_release_ring");
16751 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
16752 
16753 	/* Rx exception ring */
16754 	/* TODO: Better to store ring_type and ring_num in
16755 	 * dp_srng during setup
16756 	 */
16757 	wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned,
16758 			     soc->reo_exception_ring.alloc_size,
16759 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_EXCEPTION,
16760 			     "reo_exception_ring");
16761 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
16762 
16763 	/* REO command and status rings */
16764 	wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned,
16765 			     soc->reo_cmd_ring.alloc_size,
16766 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_CMD,
16767 			     "reo_cmd_ring");
16768 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
16769 	wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned,
16770 			     soc->reo_status_ring.alloc_size,
16771 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_STATUS,
16772 			     "reo_status_ring");
16773 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
16774 }
16775 
16776 /**
16777  * dp_soc_srng_init() - Initialize soc level srng rings
16778  * @soc: Datapath soc handle
16779  *
16780  * Return: QDF_STATUS_SUCCESS on success
16781  *	   QDF_STATUS_E_FAILURE on failure
16782  */
16783 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc)
16784 {
16785 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16786 	uint8_t i;
16787 	uint8_t wbm2_sw_rx_rel_ring_id;
16788 
16789 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16790 
16791 	dp_enable_verbose_debug(soc);
16792 
16793 	/* WBM descriptor release ring */
16794 	if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) {
16795 		dp_init_err("%pK: dp_srng_init failed for wbm_desc_rel_ring", soc);
16796 		goto fail1;
16797 	}
16798 
16799 	wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
16800 			  soc->wbm_desc_rel_ring.alloc_size,
16801 			  soc->ctrl_psoc,
16802 			  WLAN_MD_DP_SRNG_WBM_DESC_REL,
16803 			  "wbm_desc_rel_ring");
16804 
16805 	/* TCL command and status rings */
16806 	if (dp_soc_tcl_cmd_cred_srng_init(soc)) {
16807 		dp_init_err("%pK: dp_srng_init failed for tcl_cmd_ring", soc);
16808 		goto fail1;
16809 	}
16810 
16811 	if (dp_soc_tcl_status_srng_init(soc)) {
16812 		dp_init_err("%pK: dp_srng_init failed for tcl_status_ring", soc);
16813 		goto fail1;
16814 	}
16815 
16816 	/* REO reinjection ring */
16817 	if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) {
16818 		dp_init_err("%pK: dp_srng_init failed for reo_reinject_ring", soc);
16819 		goto fail1;
16820 	}
16821 
16822 	wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned,
16823 			  soc->reo_reinject_ring.alloc_size,
16824 			  soc->ctrl_psoc,
16825 			  WLAN_MD_DP_SRNG_REO_REINJECT,
16826 			  "reo_reinject_ring");
16827 
16828 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc_cfg_ctx);
16829 	/* Rx release ring */
16830 	if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
16831 			 wbm2_sw_rx_rel_ring_id, 0)) {
16832 		dp_init_err("%pK: dp_srng_init failed for rx_rel_ring", soc);
16833 		goto fail1;
16834 	}
16835 
16836 	wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned,
16837 			  soc->rx_rel_ring.alloc_size,
16838 			  soc->ctrl_psoc,
16839 			  WLAN_MD_DP_SRNG_RX_REL,
16840 			  "reo_release_ring");
16841 
16842 	/* Rx exception ring */
16843 	if (dp_srng_init(soc, &soc->reo_exception_ring,
16844 			 REO_EXCEPTION, 0, MAX_REO_DEST_RINGS)) {
16845 		dp_init_err("%pK: dp_srng_init failed - reo_exception", soc);
16846 		goto fail1;
16847 	}
16848 
16849 	wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned,
16850 			  soc->reo_exception_ring.alloc_size,
16851 			  soc->ctrl_psoc,
16852 			  WLAN_MD_DP_SRNG_REO_EXCEPTION,
16853 			  "reo_exception_ring");
16854 
16855 	/* REO command and status rings */
16856 	if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) {
16857 		dp_init_err("%pK: dp_srng_init failed for reo_cmd_ring", soc);
16858 		goto fail1;
16859 	}
16860 
16861 	wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned,
16862 			  soc->reo_cmd_ring.alloc_size,
16863 			  soc->ctrl_psoc,
16864 			  WLAN_MD_DP_SRNG_REO_CMD,
16865 			  "reo_cmd_ring");
16866 
16867 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
16868 	TAILQ_INIT(&soc->rx.reo_cmd_list);
16869 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
16870 
16871 	if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) {
16872 		dp_init_err("%pK: dp_srng_init failed for reo_status_ring", soc);
16873 		goto fail1;
16874 	}
16875 
16876 	wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned,
16877 			  soc->reo_status_ring.alloc_size,
16878 			  soc->ctrl_psoc,
16879 			  WLAN_MD_DP_SRNG_REO_STATUS,
16880 			  "reo_status_ring");
16881 
16882 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
16883 		if (dp_init_tx_ring_pair_by_index(soc, i))
16884 			goto fail1;
16885 	}
16886 
16887 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16888 		if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
16889 			goto fail1;
16890 
16891 		if (dp_ipa_init_alt_tx_ring(soc))
16892 			goto fail1;
16893 	}
16894 
16895 	dp_create_ext_stats_event(soc);
16896 
16897 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
16898 		/* Initialize REO destination ring */
16899 		if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) {
16900 			dp_init_err("%pK: dp_srng_init failed for reo_dest_ringn", soc);
16901 			goto fail1;
16902 		}
16903 
16904 		wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned,
16905 				  soc->reo_dest_ring[i].alloc_size,
16906 				  soc->ctrl_psoc,
16907 				  WLAN_MD_DP_SRNG_REO_DEST,
16908 				  "reo_dest_ring");
16909 	}
16910 
16911 	if (soc->arch_ops.txrx_soc_srng_init) {
16912 		if (soc->arch_ops.txrx_soc_srng_init(soc)) {
16913 			dp_init_err("%pK: dp_srng_init failed for arch rings",
16914 				    soc);
16915 			goto fail1;
16916 		}
16917 	}
16918 
16919 	return QDF_STATUS_SUCCESS;
16920 fail1:
16921 	/*
16922 	 * Cleanup will be done as part of soc_detach, which will
16923 	 * be called on pdev attach failure
16924 	 */
16925 	dp_soc_srng_deinit(soc);
16926 	return QDF_STATUS_E_FAILURE;
16927 }
16928 
16929 /**
16930  * dp_soc_srng_free() - free soc level srng rings
16931  * @soc: Datapath soc handle
16932  *
16933  */
16934 static void dp_soc_srng_free(struct dp_soc *soc)
16935 {
16936 	uint32_t i;
16937 
16938 	if (soc->arch_ops.txrx_soc_srng_free)
16939 		soc->arch_ops.txrx_soc_srng_free(soc);
16940 
16941 	dp_srng_free(soc, &soc->wbm_desc_rel_ring);
16942 
16943 	for (i = 0; i < soc->num_tcl_data_rings; i++)
16944 		dp_free_tx_ring_pair_by_index(soc, i);
16945 
16946 	/* Free IPA rings for TCL_TX and TCL_COMPL ring */
16947 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16948 		dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
16949 		dp_ipa_free_alt_tx_ring(soc);
16950 	}
16951 
16952 	dp_soc_tcl_cmd_cred_srng_free(soc);
16953 	dp_soc_tcl_status_srng_free(soc);
16954 
16955 	for (i = 0; i < soc->num_reo_dest_rings; i++)
16956 		dp_srng_free(soc, &soc->reo_dest_ring[i]);
16957 
16958 	dp_srng_free(soc, &soc->reo_reinject_ring);
16959 	dp_srng_free(soc, &soc->rx_rel_ring);
16960 
16961 	dp_srng_free(soc, &soc->reo_exception_ring);
16962 
16963 	dp_srng_free(soc, &soc->reo_cmd_ring);
16964 	dp_srng_free(soc, &soc->reo_status_ring);
16965 }
16966 
16967 /**
16968  * dp_soc_srng_alloc() - Allocate memory for soc level srng rings
16969  * @soc: Datapath soc handle
16970  *
16971  * Return: QDF_STATUS_SUCCESS on success
16972  *	   QDF_STATUS_E_NOMEM on failure
16973  */
16974 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc)
16975 {
16976 	uint32_t entries;
16977 	uint32_t i;
16978 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16979 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
16980 	uint32_t reo_dst_ring_size;
16981 
16982 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16983 
16984 	/* sw2wbm link descriptor release ring */
16985 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
16986 	if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE,
16987 			  entries, 0)) {
16988 		dp_init_err("%pK: dp_srng_alloc failed for wbm_desc_rel_ring", soc);
16989 		goto fail1;
16990 	}
16991 
16992 	/* TCL command and status rings */
16993 	if (dp_soc_tcl_cmd_cred_srng_alloc(soc)) {
16994 		dp_init_err("%pK: dp_srng_alloc failed for tcl_cmd_ring", soc);
16995 		goto fail1;
16996 	}
16997 
16998 	if (dp_soc_tcl_status_srng_alloc(soc)) {
16999 		dp_init_err("%pK: dp_srng_alloc failed for tcl_status_ring", soc);
17000 		goto fail1;
17001 	}
17002 
17003 	/* REO reinjection ring */
17004 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
17005 	if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT,
17006 			  entries, 0)) {
17007 		dp_init_err("%pK: dp_srng_alloc failed for reo_reinject_ring", soc);
17008 		goto fail1;
17009 	}
17010 
17011 	/* Rx release ring */
17012 	entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx);
17013 	if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
17014 			  entries, 0)) {
17015 		dp_init_err("%pK: dp_srng_alloc failed for rx_rel_ring", soc);
17016 		goto fail1;
17017 	}
17018 
17019 	/* Rx exception ring */
17020 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
17021 	if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION,
17022 			  entries, 0)) {
17023 		dp_init_err("%pK: dp_srng_alloc failed - reo_exception", soc);
17024 		goto fail1;
17025 	}
17026 
17027 	/* REO command and status rings */
17028 	entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx);
17029 	if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) {
17030 		dp_init_err("%pK: dp_srng_alloc failed for reo_cmd_ring", soc);
17031 		goto fail1;
17032 	}
17033 
17034 	entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx);
17035 	if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS,
17036 			  entries, 0)) {
17037 		dp_init_err("%pK: dp_srng_alloc failed for reo_status_ring", soc);
17038 		goto fail1;
17039 	}
17040 
17041 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx);
17042 
17043 	/* Disable cached desc if NSS offload is enabled */
17044 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
17045 		cached = 0;
17046 
17047 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
17048 		if (dp_alloc_tx_ring_pair_by_index(soc, i))
17049 			goto fail1;
17050 	}
17051 
17052 	/* IPA rings for TCL_TX and TX_COMP will be allocated here */
17053 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
17054 		if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
17055 			goto fail1;
17056 
17057 		if (dp_ipa_alloc_alt_tx_ring(soc))
17058 			goto fail1;
17059 	}
17060 
17061 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
17062 		/* Setup REO destination ring */
17063 		if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST,
17064 				  reo_dst_ring_size, cached)) {
17065 			dp_init_err("%pK: dp_srng_alloc failed for reo_dest_ring", soc);
17066 			goto fail1;
17067 		}
17068 	}
17069 
17070 	if (soc->arch_ops.txrx_soc_srng_alloc) {
17071 		if (soc->arch_ops.txrx_soc_srng_alloc(soc)) {
17072 			dp_init_err("%pK: dp_srng_alloc failed for arch rings",
17073 				    soc);
17074 			goto fail1;
17075 		}
17076 	}
17077 
17078 	return QDF_STATUS_SUCCESS;
17079 
17080 fail1:
17081 	dp_soc_srng_free(soc);
17082 	return QDF_STATUS_E_NOMEM;
17083 }
17084 
17085 static void dp_soc_cfg_dump(struct dp_soc *soc, uint32_t target_type)
17086 {
17087 	dp_init_info("DP soc Dump for Target = %d", target_type);
17088 	dp_init_info("ast_override_support = %d, da_war_enabled = %d,",
17089 		     soc->ast_override_support, soc->da_war_enabled);
17090 
17091 	wlan_cfg_dp_soc_ctx_dump(soc->wlan_cfg_ctx);
17092 }
17093 
17094 /**
17095  * dp_soc_cfg_init() - initialize target specific configuration
17096  *		       during dp_soc_init
17097  * @soc: dp soc handle
17098  */
17099 static void dp_soc_cfg_init(struct dp_soc *soc)
17100 {
17101 	uint32_t target_type;
17102 
17103 	target_type = hal_get_target_type(soc->hal_soc);
17104 	switch (target_type) {
17105 	case TARGET_TYPE_QCA6290:
17106 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
17107 					       REO_DST_RING_SIZE_QCA6290);
17108 		soc->ast_override_support = 1;
17109 		soc->da_war_enabled = false;
17110 		break;
17111 	case TARGET_TYPE_QCA6390:
17112 	case TARGET_TYPE_QCA6490:
17113 	case TARGET_TYPE_QCA6750:
17114 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
17115 					       REO_DST_RING_SIZE_QCA6290);
17116 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
17117 		soc->ast_override_support = 1;
17118 		if (soc->cdp_soc.ol_ops->get_con_mode &&
17119 		    soc->cdp_soc.ol_ops->get_con_mode() ==
17120 		    QDF_GLOBAL_MONITOR_MODE) {
17121 			int int_ctx;
17122 
17123 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
17124 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
17125 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
17126 			}
17127 		}
17128 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
17129 		break;
17130 	case TARGET_TYPE_KIWI:
17131 	case TARGET_TYPE_MANGO:
17132 	case TARGET_TYPE_PEACH:
17133 		soc->ast_override_support = 1;
17134 		soc->per_tid_basize_max_tid = 8;
17135 
17136 		if (soc->cdp_soc.ol_ops->get_con_mode &&
17137 		    soc->cdp_soc.ol_ops->get_con_mode() ==
17138 		    QDF_GLOBAL_MONITOR_MODE) {
17139 			int int_ctx;
17140 
17141 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS;
17142 			     int_ctx++) {
17143 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
17144 				if (dp_is_monitor_mode_using_poll(soc))
17145 					soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
17146 			}
17147 		}
17148 
17149 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
17150 		soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev = 1;
17151 		break;
17152 	case TARGET_TYPE_QCA8074:
17153 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
17154 		soc->da_war_enabled = true;
17155 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
17156 		break;
17157 	case TARGET_TYPE_QCA8074V2:
17158 	case TARGET_TYPE_QCA6018:
17159 	case TARGET_TYPE_QCA9574:
17160 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
17161 		soc->ast_override_support = 1;
17162 		soc->per_tid_basize_max_tid = 8;
17163 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
17164 		soc->da_war_enabled = false;
17165 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
17166 		break;
17167 	case TARGET_TYPE_QCN9000:
17168 		soc->ast_override_support = 1;
17169 		soc->da_war_enabled = false;
17170 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
17171 		soc->per_tid_basize_max_tid = 8;
17172 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
17173 		soc->lmac_polled_mode = 0;
17174 		soc->wbm_release_desc_rx_sg_support = 1;
17175 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
17176 		break;
17177 	case TARGET_TYPE_QCA5018:
17178 	case TARGET_TYPE_QCN6122:
17179 	case TARGET_TYPE_QCN9160:
17180 		soc->ast_override_support = 1;
17181 		soc->da_war_enabled = false;
17182 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
17183 		soc->per_tid_basize_max_tid = 8;
17184 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS_11AX;
17185 		soc->disable_mac1_intr = 1;
17186 		soc->disable_mac2_intr = 1;
17187 		soc->wbm_release_desc_rx_sg_support = 1;
17188 		break;
17189 	case TARGET_TYPE_QCN9224:
17190 		soc->ast_override_support = 1;
17191 		soc->da_war_enabled = false;
17192 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
17193 		soc->per_tid_basize_max_tid = 8;
17194 		soc->wbm_release_desc_rx_sg_support = 1;
17195 		soc->rxdma2sw_rings_not_supported = 1;
17196 		soc->wbm_sg_last_msdu_war = 1;
17197 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
17198 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
17199 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
17200 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
17201 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
17202 						  CFG_DP_HOST_AST_DB_ENABLE);
17203 		soc->features.wds_ext_ast_override_enable = true;
17204 		break;
17205 	case TARGET_TYPE_QCA5332:
17206 		soc->ast_override_support = 1;
17207 		soc->da_war_enabled = false;
17208 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
17209 		soc->per_tid_basize_max_tid = 8;
17210 		soc->wbm_release_desc_rx_sg_support = 1;
17211 		soc->rxdma2sw_rings_not_supported = 1;
17212 		soc->wbm_sg_last_msdu_war = 1;
17213 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
17214 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
17215 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS_5332;
17216 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
17217 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
17218 						  CFG_DP_HOST_AST_DB_ENABLE);
17219 		soc->features.wds_ext_ast_override_enable = true;
17220 		break;
17221 	default:
17222 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
17223 		qdf_assert_always(0);
17224 		break;
17225 	}
17226 	dp_soc_cfg_dump(soc, target_type);
17227 }
17228 
17229 /**
17230  * dp_soc_cfg_attach() - set target specific configuration in
17231  *			 dp soc cfg.
17232  * @soc: dp soc handle
17233  */
17234 static void dp_soc_cfg_attach(struct dp_soc *soc)
17235 {
17236 	int target_type;
17237 	int nss_cfg = 0;
17238 
17239 	target_type = hal_get_target_type(soc->hal_soc);
17240 	switch (target_type) {
17241 	case TARGET_TYPE_QCA6290:
17242 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
17243 					       REO_DST_RING_SIZE_QCA6290);
17244 		break;
17245 	case TARGET_TYPE_QCA6390:
17246 	case TARGET_TYPE_QCA6490:
17247 	case TARGET_TYPE_QCA6750:
17248 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
17249 					       REO_DST_RING_SIZE_QCA6290);
17250 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
17251 		break;
17252 	case TARGET_TYPE_KIWI:
17253 	case TARGET_TYPE_MANGO:
17254 	case TARGET_TYPE_PEACH:
17255 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
17256 		break;
17257 	case TARGET_TYPE_QCA8074:
17258 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
17259 		break;
17260 	case TARGET_TYPE_QCA8074V2:
17261 	case TARGET_TYPE_QCA6018:
17262 	case TARGET_TYPE_QCA9574:
17263 	case TARGET_TYPE_QCN6122:
17264 	case TARGET_TYPE_QCN9160:
17265 	case TARGET_TYPE_QCA5018:
17266 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
17267 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
17268 		break;
17269 	case TARGET_TYPE_QCN9000:
17270 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
17271 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
17272 		break;
17273 	case TARGET_TYPE_QCN9224:
17274 	case TARGET_TYPE_QCA5332:
17275 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
17276 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
17277 		break;
17278 	default:
17279 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
17280 		qdf_assert_always(0);
17281 		break;
17282 	}
17283 
17284 	if (soc->cdp_soc.ol_ops->get_soc_nss_cfg)
17285 		nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc);
17286 
17287 	wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg);
17288 
17289 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
17290 		wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0);
17291 		wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0);
17292 		wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0);
17293 		wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0);
17294 		soc->init_tcl_cmd_cred_ring = false;
17295 		soc->num_tcl_data_rings =
17296 			wlan_cfg_num_nss_tcl_data_rings(soc->wlan_cfg_ctx);
17297 		soc->num_reo_dest_rings =
17298 			wlan_cfg_num_nss_reo_dest_rings(soc->wlan_cfg_ctx);
17299 
17300 	} else {
17301 		soc->init_tcl_cmd_cred_ring = true;
17302 		soc->num_tx_comp_rings =
17303 			wlan_cfg_num_tx_comp_rings(soc->wlan_cfg_ctx);
17304 		soc->num_tcl_data_rings =
17305 			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
17306 		soc->num_reo_dest_rings =
17307 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
17308 	}
17309 
17310 	soc->arch_ops.soc_cfg_attach(soc);
17311 }
17312 
17313 static inline  void dp_pdev_set_default_reo(struct dp_pdev *pdev)
17314 {
17315 	struct dp_soc *soc = pdev->soc;
17316 
17317 	switch (pdev->pdev_id) {
17318 	case 0:
17319 		pdev->reo_dest =
17320 			wlan_cfg_radio0_default_reo_get(soc->wlan_cfg_ctx);
17321 		break;
17322 
17323 	case 1:
17324 		pdev->reo_dest =
17325 			wlan_cfg_radio1_default_reo_get(soc->wlan_cfg_ctx);
17326 		break;
17327 
17328 	case 2:
17329 		pdev->reo_dest =
17330 			wlan_cfg_radio2_default_reo_get(soc->wlan_cfg_ctx);
17331 		break;
17332 
17333 	default:
17334 		dp_init_err("%pK: Invalid pdev_id %d for reo selection",
17335 			    soc, pdev->pdev_id);
17336 		break;
17337 	}
17338 }
17339 
17340 static QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc,
17341 				      HTC_HANDLE htc_handle,
17342 				      qdf_device_t qdf_osdev,
17343 				      uint8_t pdev_id)
17344 {
17345 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
17346 	int nss_cfg;
17347 	void *sojourn_buf;
17348 
17349 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
17350 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
17351 
17352 	soc_cfg_ctx = soc->wlan_cfg_ctx;
17353 	pdev->soc = soc;
17354 	pdev->pdev_id = pdev_id;
17355 
17356 	/*
17357 	 * Variable to prevent double pdev deinitialization during
17358 	 * radio detach execution .i.e. in the absence of any vdev.
17359 	 */
17360 	pdev->pdev_deinit = 0;
17361 
17362 	if (dp_wdi_event_attach(pdev)) {
17363 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
17364 			  "dp_wdi_evet_attach failed");
17365 		goto fail0;
17366 	}
17367 
17368 	if (dp_pdev_srng_init(pdev)) {
17369 		dp_init_err("%pK: Failed to initialize pdev srng rings", soc);
17370 		goto fail1;
17371 	}
17372 
17373 	/* Initialize descriptors in TCL Rings used by IPA */
17374 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
17375 		hal_tx_init_data_ring(soc->hal_soc,
17376 				      soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng);
17377 		dp_ipa_hal_tx_init_alt_data_ring(soc);
17378 	}
17379 
17380 	/*
17381 	 * Initialize command/credit ring descriptor
17382 	 * Command/CREDIT ring also used for sending DATA cmds
17383 	 */
17384 	dp_tx_init_cmd_credit_ring(soc);
17385 
17386 	dp_tx_pdev_init(pdev);
17387 
17388 	/*
17389 	 * set nss pdev config based on soc config
17390 	 */
17391 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
17392 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
17393 					 (nss_cfg & (1 << pdev_id)));
17394 	pdev->target_pdev_id =
17395 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
17396 
17397 	if (soc->preferred_hw_mode == WMI_HOST_HW_MODE_2G_PHYB &&
17398 	    pdev->lmac_id == PHYB_2G_LMAC_ID) {
17399 		pdev->target_pdev_id = PHYB_2G_TARGET_PDEV_ID;
17400 	}
17401 
17402 	/* Reset the cpu ring map if radio is NSS offloaded */
17403 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
17404 		dp_soc_reset_cpu_ring_map(soc);
17405 		dp_soc_reset_intr_mask(soc);
17406 	}
17407 
17408 	/* Reset the cpu ring map if radio is NSS offloaded */
17409 	dp_soc_reset_ipa_vlan_intr_mask(soc);
17410 
17411 	TAILQ_INIT(&pdev->vdev_list);
17412 	qdf_spinlock_create(&pdev->vdev_list_lock);
17413 	pdev->vdev_count = 0;
17414 	pdev->is_lro_hash_configured = 0;
17415 
17416 	qdf_spinlock_create(&pdev->tx_mutex);
17417 	pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_INVALID_LMAC_ID;
17418 	pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_INVALID_LMAC_ID;
17419 	pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_INVALID_LMAC_ID;
17420 
17421 	DP_STATS_INIT(pdev);
17422 
17423 	dp_local_peer_id_pool_init(pdev);
17424 
17425 	dp_dscp_tid_map_setup(pdev);
17426 	dp_pcp_tid_map_setup(pdev);
17427 
17428 	/* set the reo destination during initialization */
17429 	dp_pdev_set_default_reo(pdev);
17430 
17431 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
17432 
17433 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
17434 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
17435 			      TRUE);
17436 
17437 	if (!pdev->sojourn_buf) {
17438 		dp_init_err("%pK: Failed to allocate sojourn buf", soc);
17439 		goto fail2;
17440 	}
17441 	sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
17442 	qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
17443 
17444 	qdf_event_create(&pdev->fw_peer_stats_event);
17445 	qdf_event_create(&pdev->fw_stats_event);
17446 	qdf_event_create(&pdev->fw_obss_stats_event);
17447 
17448 	pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
17449 	pdev->num_tx_spl_allowed =
17450 		wlan_cfg_get_num_tx_spl_desc(soc->wlan_cfg_ctx);
17451 	pdev->num_reg_tx_allowed =
17452 		pdev->num_tx_allowed - pdev->num_tx_spl_allowed;
17453 	if (dp_rxdma_ring_setup(soc, pdev)) {
17454 		dp_init_err("%pK: RXDMA ring config failed", soc);
17455 		goto fail3;
17456 	}
17457 
17458 	if (dp_init_ipa_rx_refill_buf_ring(soc, pdev))
17459 		goto fail3;
17460 
17461 	if (dp_ipa_ring_resource_setup(soc, pdev))
17462 		goto fail4;
17463 
17464 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
17465 		dp_init_err("%pK: dp_ipa_uc_attach failed", soc);
17466 		goto fail4;
17467 	}
17468 
17469 	if (dp_pdev_bkp_stats_attach(pdev) != QDF_STATUS_SUCCESS) {
17470 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
17471 			  FL("dp_pdev_bkp_stats_attach failed"));
17472 		goto fail5;
17473 	}
17474 
17475 	if (dp_monitor_pdev_init(pdev)) {
17476 		dp_init_err("%pK: dp_monitor_pdev_init failed\n", soc);
17477 		goto fail6;
17478 	}
17479 
17480 	/* initialize sw rx descriptors */
17481 	dp_rx_pdev_desc_pool_init(pdev);
17482 	/* allocate buffers and replenish the RxDMA ring */
17483 	dp_rx_pdev_buffers_alloc(pdev);
17484 
17485 	dp_init_tso_stats(pdev);
17486 
17487 	pdev->rx_fast_flag = false;
17488 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
17489 		qdf_dma_mem_stats_read(),
17490 		qdf_heap_mem_stats_read(),
17491 		qdf_skb_total_mem_stats_read());
17492 
17493 	return QDF_STATUS_SUCCESS;
17494 fail6:
17495 	dp_pdev_bkp_stats_detach(pdev);
17496 fail5:
17497 	dp_ipa_uc_detach(soc, pdev);
17498 fail4:
17499 	dp_deinit_ipa_rx_refill_buf_ring(soc, pdev);
17500 fail3:
17501 	dp_rxdma_ring_cleanup(soc, pdev);
17502 	qdf_nbuf_free(pdev->sojourn_buf);
17503 fail2:
17504 	qdf_spinlock_destroy(&pdev->tx_mutex);
17505 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
17506 	dp_pdev_srng_deinit(pdev);
17507 fail1:
17508 	dp_wdi_event_detach(pdev);
17509 fail0:
17510 	return QDF_STATUS_E_FAILURE;
17511 }
17512 
17513 /**
17514  * dp_pdev_init_wifi3() - Init txrx pdev
17515  * @txrx_soc:
17516  * @htc_handle: HTC handle for host-target interface
17517  * @qdf_osdev: QDF OS device
17518  * @pdev_id: pdev Id
17519  *
17520  * Return: QDF_STATUS
17521  */
17522 static QDF_STATUS dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
17523 				     HTC_HANDLE htc_handle,
17524 				     qdf_device_t qdf_osdev,
17525 				     uint8_t pdev_id)
17526 {
17527 	return dp_pdev_init(txrx_soc, htc_handle, qdf_osdev, pdev_id);
17528 }
17529 
17530 #ifdef FEATURE_DIRECT_LINK
17531 struct dp_srng *dp_setup_direct_link_refill_ring(struct cdp_soc_t *soc_hdl,
17532 						 uint8_t pdev_id)
17533 {
17534 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
17535 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
17536 
17537 	if (!pdev) {
17538 		dp_err("DP pdev is NULL");
17539 		return NULL;
17540 	}
17541 
17542 	if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring4,
17543 			  RXDMA_BUF, DIRECT_LINK_REFILL_RING_ENTRIES, false)) {
17544 		dp_err("SRNG alloc failed for rx_refill_buf_ring4");
17545 		return NULL;
17546 	}
17547 
17548 	if (dp_srng_init(soc, &pdev->rx_refill_buf_ring4,
17549 			 RXDMA_BUF, DIRECT_LINK_REFILL_RING_IDX, 0)) {
17550 		dp_err("SRNG init failed for rx_refill_buf_ring4");
17551 		dp_srng_free(soc, &pdev->rx_refill_buf_ring4);
17552 		return NULL;
17553 	}
17554 
17555 	if (htt_srng_setup(soc->htt_handle, pdev_id,
17556 			   pdev->rx_refill_buf_ring4.hal_srng, RXDMA_BUF)) {
17557 		dp_srng_deinit(soc, &pdev->rx_refill_buf_ring4, RXDMA_BUF,
17558 			       DIRECT_LINK_REFILL_RING_IDX);
17559 		dp_srng_free(soc, &pdev->rx_refill_buf_ring4);
17560 		return NULL;
17561 	}
17562 
17563 	return &pdev->rx_refill_buf_ring4;
17564 }
17565 
17566 void dp_destroy_direct_link_refill_ring(struct cdp_soc_t *soc_hdl,
17567 					uint8_t pdev_id)
17568 {
17569 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
17570 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
17571 
17572 	if (!pdev) {
17573 		dp_err("DP pdev is NULL");
17574 		return;
17575 	}
17576 
17577 	dp_srng_deinit(soc, &pdev->rx_refill_buf_ring4, RXDMA_BUF, 0);
17578 	dp_srng_free(soc, &pdev->rx_refill_buf_ring4);
17579 }
17580 #endif
17581