xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision 2888b71da71bce103343119fa1b31f4a0cee07c8)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <wlan_ipa_obj_mgmt_api.h>
21 #include <qdf_types.h>
22 #include <qdf_lock.h>
23 #include <qdf_net_types.h>
24 #include <qdf_lro.h>
25 #include <qdf_module.h>
26 #include <hal_hw_headers.h>
27 #include <hal_api.h>
28 #include <hif.h>
29 #include <htt.h>
30 #include <wdi_event.h>
31 #include <queue.h>
32 #include "dp_types.h"
33 #include "dp_internal.h"
34 #include "dp_tx.h"
35 #include "dp_tx_desc.h"
36 #include "dp_rx.h"
37 #ifdef DP_RATETABLE_SUPPORT
38 #include "dp_ratetable.h"
39 #endif
40 #include <cdp_txrx_handle.h>
41 #include <wlan_cfg.h>
42 #include <wlan_utility.h>
43 #include "cdp_txrx_cmn_struct.h"
44 #include "cdp_txrx_stats_struct.h"
45 #include "cdp_txrx_cmn_reg.h"
46 #include <qdf_util.h>
47 #include "dp_peer.h"
48 #include "htt_stats.h"
49 #include "dp_htt.h"
50 #ifdef WLAN_SUPPORT_RX_FISA
51 #include <dp_fisa_rx.h>
52 #endif
53 #include "htt_ppdu_stats.h"
54 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
55 #include "cfg_ucfg_api.h"
56 #include <wlan_module_ids.h>
57 
58 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
59 #include "cdp_txrx_flow_ctrl_v2.h"
60 #else
61 
62 static inline void
63 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
64 {
65 	return;
66 }
67 #endif
68 #ifdef WIFI_MONITOR_SUPPORT
69 #include <dp_mon.h>
70 #endif
71 #include "dp_ipa.h"
72 #ifdef FEATURE_WDS
73 #include "dp_txrx_wds.h"
74 #endif
75 #ifdef WLAN_SUPPORT_MSCS
76 #include "dp_mscs.h"
77 #endif
78 #ifdef WLAN_SUPPORT_MESH_LATENCY
79 #include "dp_mesh_latency.h"
80 #endif
81 #ifdef WLAN_SUPPORT_SCS
82 #include "dp_scs.h"
83 #endif
84 #ifdef ATH_SUPPORT_IQUE
85 #include "dp_txrx_me.h"
86 #endif
87 #if defined(DP_CON_MON)
88 #ifndef REMOVE_PKT_LOG
89 #include <pktlog_ac_api.h>
90 #include <pktlog_ac.h>
91 #endif
92 #endif
93 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
94 #include <wlan_dp_swlm.h>
95 #endif
96 #ifdef CONFIG_SAWF_DEF_QUEUES
97 #include "dp_sawf.h"
98 #endif
99 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
100 #include <target_if_dp.h>
101 #endif
102 
103 #ifdef WLAN_FEATURE_STATS_EXT
104 #define INIT_RX_HW_STATS_LOCK(_soc) \
105 	qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
106 #define DEINIT_RX_HW_STATS_LOCK(_soc) \
107 	qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
108 #else
109 #define INIT_RX_HW_STATS_LOCK(_soc)  /* no op */
110 #define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
111 #endif
112 
113 #if defined(DP_PEER_EXTENDED_API) || defined(WLAN_DP_PENDING_MEM_FLUSH)
114 #define SET_PEER_REF_CNT_ONE(_peer) \
115 	qdf_atomic_set(&(_peer)->ref_cnt, 1)
116 #else
117 #define SET_PEER_REF_CNT_ONE(_peer)
118 #endif
119 
120 #ifdef WLAN_SYSFS_DP_STATS
121 /* sysfs event wait time for firmware stat request unit millseconds */
122 #define WLAN_SYSFS_STAT_REQ_WAIT_MS 3000
123 #endif
124 
125 #ifdef QCA_DP_ENABLE_TX_COMP_RING4
126 #define TXCOMP_RING4_NUM 3
127 #else
128 #define TXCOMP_RING4_NUM WBM2SW_TXCOMP_RING4_NUM
129 #endif
130 
131 #ifdef QCA_DP_TX_FW_METADATA_V2
132 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) \
133 		HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
134 #else
135 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) \
136 		HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
137 #endif
138 
139 QDF_COMPILE_TIME_ASSERT(max_rx_rings_check,
140 			MAX_REO_DEST_RINGS == CDP_MAX_RX_RINGS);
141 
142 QDF_COMPILE_TIME_ASSERT(max_tx_rings_check,
143 			MAX_TCL_DATA_RINGS == CDP_MAX_TX_COMP_RINGS);
144 
145 #define dp_init_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_INIT, params)
146 #define dp_init_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_INIT, params)
147 #define dp_init_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_INIT, params)
148 #define dp_init_info(params...) \
149 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_INIT, ## params)
150 #define dp_init_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_INIT, params)
151 
152 #define dp_vdev_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_VDEV, params)
153 #define dp_vdev_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_VDEV, params)
154 #define dp_vdev_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_VDEV, params)
155 #define dp_vdev_info(params...) \
156 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_VDEV, ## params)
157 #define dp_vdev_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_VDEV, params)
158 
159 void dp_configure_arch_ops(struct dp_soc *soc);
160 qdf_size_t dp_get_soc_context_size(uint16_t device_id);
161 
162 /*
163  * The max size of cdp_peer_stats_param_t is limited to 16 bytes.
164  * If the buffer size is exceeding this size limit,
165  * dp_txrx_get_peer_stats is to be used instead.
166  */
167 QDF_COMPILE_TIME_ASSERT(cdp_peer_stats_param_t_max_size,
168 			(sizeof(cdp_peer_stats_param_t) <= 16));
169 
170 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
171 /*
172  * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS
173  * also should be updated accordingly
174  */
175 QDF_COMPILE_TIME_ASSERT(num_intr_grps,
176 			HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS);
177 
178 /*
179  * HIF_EVENT_HIST_MAX should always be power of 2
180  */
181 QDF_COMPILE_TIME_ASSERT(hif_event_history_size,
182 			(HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0);
183 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
184 
185 /*
186  * If WLAN_CFG_INT_NUM_CONTEXTS is changed,
187  * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated
188  */
189 QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs,
190 			WLAN_CFG_INT_NUM_CONTEXTS_MAX >=
191 			WLAN_CFG_INT_NUM_CONTEXTS);
192 
193 static QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl);
194 static QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl);
195 
196 static void dp_pdev_srng_deinit(struct dp_pdev *pdev);
197 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev);
198 static void dp_pdev_srng_free(struct dp_pdev *pdev);
199 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev);
200 
201 static void dp_soc_srng_deinit(struct dp_soc *soc);
202 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc);
203 static void dp_soc_srng_free(struct dp_soc *soc);
204 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc);
205 
206 static void dp_soc_cfg_init(struct dp_soc *soc);
207 static void dp_soc_cfg_attach(struct dp_soc *soc);
208 
209 static inline
210 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
211 				struct cdp_pdev_attach_params *params);
212 
213 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id);
214 
215 static QDF_STATUS
216 dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
217 		   HTC_HANDLE htc_handle,
218 		   qdf_device_t qdf_osdev,
219 		   uint8_t pdev_id);
220 
221 static QDF_STATUS
222 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, int force);
223 
224 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc);
225 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc);
226 
227 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
228 		  struct hif_opaque_softc *hif_handle);
229 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
230 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc,
231 				       uint8_t pdev_id,
232 				       int force);
233 static struct dp_soc *
234 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
235 	      struct cdp_soc_attach_params *params);
236 static inline QDF_STATUS dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl,
237 					      uint8_t vdev_id,
238 					      uint8_t *peer_mac_addr,
239 					      enum cdp_peer_type peer_type);
240 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
241 				       uint8_t vdev_id,
242 				       uint8_t *peer_mac, uint32_t bitmap,
243 				       enum cdp_peer_type peer_type);
244 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
245 				bool unmap_only,
246 				bool mlo_peers_only);
247 #ifdef ENABLE_VERBOSE_DEBUG
248 bool is_dp_verbose_debug_enabled;
249 #endif
250 
251 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
252 static bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
253 static void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
254 			   bool enable);
255 static inline void
256 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
257 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats);
258 static inline void
259 dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
260 #endif
261 
262 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
263 						uint8_t index);
264 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index);
265 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index);
266 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
267 						 uint8_t index);
268 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
269 					    enum hal_ring_type ring_type,
270 					    int ring_num);
271 #ifdef FEATURE_AST
272 void dp_print_mlo_ast_stats(struct dp_soc *soc);
273 #endif
274 
275 #ifdef DP_UMAC_HW_RESET_SUPPORT
276 static QDF_STATUS dp_umac_reset_handle_pre_reset(struct dp_soc *soc);
277 static QDF_STATUS dp_umac_reset_handle_post_reset(struct dp_soc *soc);
278 static QDF_STATUS dp_umac_reset_handle_post_reset_complete(struct dp_soc *soc);
279 #endif
280 
281 #define DP_INTR_POLL_TIMER_MS	5
282 
283 #define MON_VDEV_TIMER_INIT 0x1
284 #define MON_VDEV_TIMER_RUNNING 0x2
285 
286 #define DP_MCS_LENGTH (6*MAX_MCS)
287 
288 #define DP_CURR_FW_STATS_AVAIL 19
289 #define DP_HTT_DBG_EXT_STATS_MAX 256
290 #define DP_MAX_SLEEP_TIME 100
291 #ifndef QCA_WIFI_3_0_EMU
292 #define SUSPEND_DRAIN_WAIT 500
293 #else
294 #define SUSPEND_DRAIN_WAIT 3000
295 #endif
296 
297 #ifdef IPA_OFFLOAD
298 /* Exclude IPA rings from the interrupt context */
299 #define TX_RING_MASK_VAL	0xb
300 #define RX_RING_MASK_VAL	0x7
301 #else
302 #define TX_RING_MASK_VAL	0xF
303 #define RX_RING_MASK_VAL	0xF
304 #endif
305 
306 #define STR_MAXLEN	64
307 
308 #define RNG_ERR		"SRNG setup failed for"
309 
310 /**
311  * default_dscp_tid_map - Default DSCP-TID mapping
312  *
313  * DSCP        TID
314  * 000000      0
315  * 001000      1
316  * 010000      2
317  * 011000      3
318  * 100000      4
319  * 101000      5
320  * 110000      6
321  * 111000      7
322  */
323 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
324 	0, 0, 0, 0, 0, 0, 0, 0,
325 	1, 1, 1, 1, 1, 1, 1, 1,
326 	2, 2, 2, 2, 2, 2, 2, 2,
327 	3, 3, 3, 3, 3, 3, 3, 3,
328 	4, 4, 4, 4, 4, 4, 4, 4,
329 	5, 5, 5, 5, 5, 5, 5, 5,
330 	6, 6, 6, 6, 6, 6, 6, 6,
331 	7, 7, 7, 7, 7, 7, 7, 7,
332 };
333 
334 /**
335  * default_pcp_tid_map - Default PCP-TID mapping
336  *
337  * PCP     TID
338  * 000      0
339  * 001      1
340  * 010      2
341  * 011      3
342  * 100      4
343  * 101      5
344  * 110      6
345  * 111      7
346  */
347 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
348 	0, 1, 2, 3, 4, 5, 6, 7,
349 };
350 
351 /**
352  * @brief Cpu to tx ring map
353  */
354 uint8_t
355 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
356 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
357 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
358 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
359 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
360 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
361 #ifdef WLAN_TX_PKT_CAPTURE_ENH
362 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
363 #endif
364 };
365 
366 qdf_export_symbol(dp_cpu_ring_map);
367 
368 /**
369  * @brief Select the type of statistics
370  */
371 enum dp_stats_type {
372 	STATS_FW = 0,
373 	STATS_HOST = 1,
374 	STATS_TYPE_MAX = 2,
375 };
376 
377 /**
378  * @brief General Firmware statistics options
379  *
380  */
381 enum dp_fw_stats {
382 	TXRX_FW_STATS_INVALID	= -1,
383 };
384 
385 /**
386  * dp_stats_mapping_table - Firmware and Host statistics
387  * currently supported
388  */
389 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
390 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
391 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
392 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
393 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
394 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
395 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
396 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
397 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
398 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
399 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
400 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
401 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
402 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
403 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
404 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
405 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
406 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
407 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
408 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
409 	/* Last ENUM for HTT FW STATS */
410 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
411 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
412 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
413 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
414 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
415 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
416 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
417 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
418 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
419 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
420 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
421 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
422 	{TXRX_FW_STATS_INVALID, TXRX_NAPI_STATS},
423 	{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
424 	{TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS},
425 	{TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS},
426 	{TXRX_FW_STATS_INVALID, TXRX_SOC_REO_HW_DESC_DUMP},
427 	{TXRX_FW_STATS_INVALID, TXRX_SOC_WBM_IDLE_HPTP_DUMP},
428 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_USAGE_WM_STATS},
429 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, TXRX_HOST_STATS_INVALID},
430 	{HTT_DBG_EXT_STATS_TX_SOUNDING_INFO, TXRX_HOST_STATS_INVALID}
431 };
432 
433 /* MCL specific functions */
434 #if defined(DP_CON_MON)
435 
436 #ifdef DP_CON_MON_MSI_ENABLED
437 /**
438  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
439  * @soc: pointer to dp_soc handle
440  * @intr_ctx_num: interrupt context number for which mon mask is needed
441  *
442  * For MCL, monitor mode rings are being processed in timer contexts (polled).
443  * This function is returning 0, since in interrupt mode(softirq based RX),
444  * we donot want to process monitor mode rings in a softirq.
445  *
446  * So, in case packet log is enabled for SAP/STA/P2P modes,
447  * regular interrupt processing will not process monitor mode rings. It would be
448  * done in a separate timer context.
449  *
450  * Return: 0
451  */
452 static inline uint32_t
453 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
454 {
455 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
456 }
457 #else
458 /**
459  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
460  * @soc: pointer to dp_soc handle
461  * @intr_ctx_num: interrupt context number for which mon mask is needed
462  *
463  * For MCL, monitor mode rings are being processed in timer contexts (polled).
464  * This function is returning 0, since in interrupt mode(softirq based RX),
465  * we donot want to process monitor mode rings in a softirq.
466  *
467  * So, in case packet log is enabled for SAP/STA/P2P modes,
468  * regular interrupt processing will not process monitor mode rings. It would be
469  * done in a separate timer context.
470  *
471  * Return: 0
472  */
473 static inline uint32_t
474 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
475 {
476 	return 0;
477 }
478 #endif
479 
480 #ifdef IPA_OFFLOAD
481 /**
482  * dp_get_num_rx_contexts() - get number of RX contexts
483  * @soc_hdl: cdp opaque soc handle
484  *
485  * Return: number of RX contexts
486  */
487 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
488 {
489 	int num_rx_contexts;
490 	uint32_t reo_ring_map;
491 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
492 
493 	reo_ring_map = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
494 
495 	switch (soc->arch_id) {
496 	case CDP_ARCH_TYPE_BE:
497 		/* 2 REO rings are used for IPA */
498 		reo_ring_map &=  ~(BIT(3) | BIT(7));
499 
500 		break;
501 	case CDP_ARCH_TYPE_LI:
502 		/* 1 REO ring is used for IPA */
503 		reo_ring_map &=  ~BIT(3);
504 		break;
505 	default:
506 		dp_err("unkonwn arch_id 0x%x", soc->arch_id);
507 		QDF_BUG(0);
508 	}
509 	/*
510 	 * qdf_get_hweight32 prefer over qdf_get_hweight8 in case map is scaled
511 	 * in future
512 	 */
513 	num_rx_contexts = qdf_get_hweight32(reo_ring_map);
514 
515 	return num_rx_contexts;
516 }
517 #else
518 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
519 {
520 	int num_rx_contexts;
521 	uint32_t reo_config;
522 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
523 
524 	reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
525 	/*
526 	 * qdf_get_hweight32 prefer over qdf_get_hweight8 in case map is scaled
527 	 * in future
528 	 */
529 	num_rx_contexts = qdf_get_hweight32(reo_config);
530 
531 	return num_rx_contexts;
532 }
533 #endif
534 
535 #else
536 
537 /**
538  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
539  * @soc: pointer to dp_soc handle
540  * @intr_ctx_num: interrupt context number for which mon mask is needed
541  *
542  * Return: mon mask value
543  */
544 static inline
545 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
546 {
547 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
548 }
549 
550 /**
551  * dp_soc_reset_mon_intr_mask() - reset mon intr mask
552  * @soc: pointer to dp_soc handle
553  *
554  * Return:
555  */
556 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc)
557 {
558 	int i;
559 
560 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
561 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
562 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
563 	}
564 }
565 
566 qdf_export_symbol(dp_soc_reset_mon_intr_mask);
567 
568 /*
569  * dp_service_lmac_rings()- timer to reap lmac rings
570  * @arg: SoC Handle
571  *
572  * Return:
573  *
574  */
575 static void dp_service_lmac_rings(void *arg)
576 {
577 	struct dp_soc *soc = (struct dp_soc *)arg;
578 	int ring = 0, i;
579 	struct dp_pdev *pdev = NULL;
580 	union dp_rx_desc_list_elem_t *desc_list = NULL;
581 	union dp_rx_desc_list_elem_t *tail = NULL;
582 
583 	/* Process LMAC interrupts */
584 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
585 		int mac_for_pdev = ring;
586 		struct dp_srng *rx_refill_buf_ring;
587 
588 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
589 		if (!pdev)
590 			continue;
591 
592 		rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev];
593 
594 		dp_monitor_process(soc, NULL, mac_for_pdev,
595 				   QCA_NAPI_BUDGET);
596 
597 		for (i = 0;
598 		     i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
599 			dp_rxdma_err_process(&soc->intr_ctx[i], soc,
600 					     mac_for_pdev,
601 					     QCA_NAPI_BUDGET);
602 
603 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF,
604 						  mac_for_pdev))
605 			dp_rx_buffers_replenish(soc, mac_for_pdev,
606 						rx_refill_buf_ring,
607 						&soc->rx_desc_buf[mac_for_pdev],
608 						0, &desc_list, &tail, false);
609 	}
610 
611 	qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
612 }
613 
614 #endif
615 
616 #ifdef FEATURE_MEC
617 void dp_peer_mec_flush_entries(struct dp_soc *soc)
618 {
619 	unsigned int index;
620 	struct dp_mec_entry *mecentry, *mecentry_next;
621 
622 	TAILQ_HEAD(, dp_mec_entry) free_list;
623 	TAILQ_INIT(&free_list);
624 
625 	if (!soc->mec_hash.mask)
626 		return;
627 
628 	if (!soc->mec_hash.bins)
629 		return;
630 
631 	if (!qdf_atomic_read(&soc->mec_cnt))
632 		return;
633 
634 	qdf_spin_lock_bh(&soc->mec_lock);
635 	for (index = 0; index <= soc->mec_hash.mask; index++) {
636 		if (!TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
637 			TAILQ_FOREACH_SAFE(mecentry, &soc->mec_hash.bins[index],
638 					   hash_list_elem, mecentry_next) {
639 			    dp_peer_mec_detach_entry(soc, mecentry, &free_list);
640 			}
641 		}
642 	}
643 	qdf_spin_unlock_bh(&soc->mec_lock);
644 
645 	dp_peer_mec_free_list(soc, &free_list);
646 }
647 
648 /**
649  * dp_print_mec_entries() - Dump MEC entries in table
650  * @soc: Datapath soc handle
651  *
652  * Return: none
653  */
654 static void dp_print_mec_stats(struct dp_soc *soc)
655 {
656 	int i;
657 	uint32_t index;
658 	struct dp_mec_entry *mecentry = NULL, *mec_list;
659 	uint32_t num_entries = 0;
660 
661 	DP_PRINT_STATS("MEC Stats:");
662 	DP_PRINT_STATS("   Entries Added   = %d", soc->stats.mec.added);
663 	DP_PRINT_STATS("   Entries Deleted = %d", soc->stats.mec.deleted);
664 
665 	if (!qdf_atomic_read(&soc->mec_cnt))
666 		return;
667 
668 	mec_list = qdf_mem_malloc(sizeof(*mecentry) * DP_PEER_MAX_MEC_ENTRY);
669 	if (!mec_list) {
670 		dp_peer_warn("%pK: failed to allocate mec_list", soc);
671 		return;
672 	}
673 
674 	DP_PRINT_STATS("MEC Table:");
675 	for (index = 0; index <= soc->mec_hash.mask; index++) {
676 		qdf_spin_lock_bh(&soc->mec_lock);
677 		if (TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
678 			qdf_spin_unlock_bh(&soc->mec_lock);
679 			continue;
680 		}
681 
682 		TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index],
683 			      hash_list_elem) {
684 			qdf_mem_copy(&mec_list[num_entries], mecentry,
685 				     sizeof(*mecentry));
686 			num_entries++;
687 		}
688 		qdf_spin_unlock_bh(&soc->mec_lock);
689 	}
690 
691 	if (!num_entries) {
692 		qdf_mem_free(mec_list);
693 		return;
694 	}
695 
696 	for (i = 0; i < num_entries; i++) {
697 		DP_PRINT_STATS("%6d mac_addr = " QDF_MAC_ADDR_FMT
698 			       " is_active = %d pdev_id = %d vdev_id = %d",
699 			       i,
700 			       QDF_MAC_ADDR_REF(mec_list[i].mac_addr.raw),
701 			       mec_list[i].is_active,
702 			       mec_list[i].pdev_id,
703 			       mec_list[i].vdev_id);
704 	}
705 	qdf_mem_free(mec_list);
706 }
707 #else
708 static void dp_print_mec_stats(struct dp_soc *soc)
709 {
710 }
711 #endif
712 
713 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
714 				 uint8_t vdev_id,
715 				 uint8_t *peer_mac,
716 				 uint8_t *mac_addr,
717 				 enum cdp_txrx_ast_entry_type type,
718 				 uint32_t flags)
719 {
720 	int ret = -1;
721 	QDF_STATUS status = QDF_STATUS_SUCCESS;
722 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
723 						       peer_mac, 0, vdev_id,
724 						       DP_MOD_ID_CDP);
725 
726 	if (!peer) {
727 		dp_peer_debug("Peer is NULL!");
728 		return ret;
729 	}
730 
731 	status = dp_peer_add_ast((struct dp_soc *)soc_hdl,
732 				 peer,
733 				 mac_addr,
734 				 type,
735 				 flags);
736 	if ((status == QDF_STATUS_SUCCESS) ||
737 	    (status == QDF_STATUS_E_ALREADY) ||
738 	    (status == QDF_STATUS_E_AGAIN))
739 		ret = 0;
740 
741 	dp_hmwds_ast_add_notify(peer, mac_addr,
742 				type, status, false);
743 
744 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
745 
746 	return ret;
747 }
748 
749 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
750 						uint8_t vdev_id,
751 						uint8_t *peer_mac,
752 						uint8_t *wds_macaddr,
753 						uint32_t flags)
754 {
755 	int status = -1;
756 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
757 	struct dp_ast_entry  *ast_entry = NULL;
758 	struct dp_peer *peer;
759 
760 	if (soc->ast_offload_support)
761 		return status;
762 
763 	peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
764 				      peer_mac, 0, vdev_id,
765 				      DP_MOD_ID_CDP);
766 
767 	if (!peer) {
768 		dp_peer_debug("Peer is NULL!");
769 		return status;
770 	}
771 
772 	qdf_spin_lock_bh(&soc->ast_lock);
773 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
774 						    peer->vdev->pdev->pdev_id);
775 
776 	if (ast_entry) {
777 		status = dp_peer_update_ast(soc,
778 					    peer,
779 					    ast_entry, flags);
780 	}
781 	qdf_spin_unlock_bh(&soc->ast_lock);
782 
783 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
784 
785 	return status;
786 }
787 
788 /*
789  * dp_peer_reset_ast_entries() - Deletes all HMWDS entries for a peer
790  * @soc_handle:		Datapath SOC handle
791  * @peer:		DP peer
792  * @arg:		callback argument
793  *
794  * Return: None
795  */
796 static void
797 dp_peer_reset_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
798 {
799 	struct dp_ast_entry *ast_entry = NULL;
800 	struct dp_ast_entry *tmp_ast_entry;
801 
802 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
803 		if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
804 		    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
805 			dp_peer_del_ast(soc, ast_entry);
806 	}
807 }
808 
809 /*
810  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
811  * @soc_handle:		Datapath SOC handle
812  * @wds_macaddr:	WDS entry MAC Address
813  * @peer_macaddr:	WDS entry MAC Address
814  * @vdev_id:		id of vdev handle
815  * Return: QDF_STATUS
816  */
817 static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
818 					 uint8_t *wds_macaddr,
819 					 uint8_t *peer_mac_addr,
820 					 uint8_t vdev_id)
821 {
822 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
823 	struct dp_ast_entry *ast_entry = NULL;
824 	struct dp_peer *peer;
825 	struct dp_pdev *pdev;
826 	struct dp_vdev *vdev;
827 
828 	if (soc->ast_offload_support)
829 		return QDF_STATUS_E_FAILURE;
830 
831 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
832 
833 	if (!vdev)
834 		return QDF_STATUS_E_FAILURE;
835 
836 	pdev = vdev->pdev;
837 
838 	if (peer_mac_addr) {
839 		peer = dp_peer_find_hash_find(soc, peer_mac_addr,
840 					      0, vdev->vdev_id,
841 					      DP_MOD_ID_CDP);
842 		if (!peer) {
843 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
844 			return QDF_STATUS_E_FAILURE;
845 		}
846 
847 		qdf_spin_lock_bh(&soc->ast_lock);
848 		dp_peer_reset_ast_entries(soc, peer, NULL);
849 		qdf_spin_unlock_bh(&soc->ast_lock);
850 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
851 	} else if (wds_macaddr) {
852 		qdf_spin_lock_bh(&soc->ast_lock);
853 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
854 							    pdev->pdev_id);
855 
856 		if (ast_entry) {
857 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
858 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
859 				dp_peer_del_ast(soc, ast_entry);
860 		}
861 		qdf_spin_unlock_bh(&soc->ast_lock);
862 	}
863 
864 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
865 	return QDF_STATUS_SUCCESS;
866 }
867 
868 /*
869  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
870  * @soc:		Datapath SOC handle
871  * @vdev_id:		id of vdev object
872  *
873  * Return: QDF_STATUS
874  */
875 static QDF_STATUS
876 dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
877 			     uint8_t vdev_id)
878 {
879 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
880 
881 	if (soc->ast_offload_support)
882 		return QDF_STATUS_SUCCESS;
883 
884 	qdf_spin_lock_bh(&soc->ast_lock);
885 
886 	dp_soc_iterate_peer(soc, dp_peer_reset_ast_entries, NULL,
887 			    DP_MOD_ID_CDP);
888 	qdf_spin_unlock_bh(&soc->ast_lock);
889 
890 	return QDF_STATUS_SUCCESS;
891 }
892 
893 /*
894  * dp_peer_flush_ast_entries() - Delete all wds and hmwds ast entries of a peer
895  * @soc:		Datapath SOC
896  * @peer:		Datapath peer
897  * @arg:		arg to callback
898  *
899  * Return: None
900  */
901 static void
902 dp_peer_flush_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
903 {
904 	struct dp_ast_entry *ase = NULL;
905 	struct dp_ast_entry *temp_ase;
906 
907 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
908 		if ((ase->type ==
909 			CDP_TXRX_AST_TYPE_STATIC) ||
910 			(ase->type ==
911 			 CDP_TXRX_AST_TYPE_SELF) ||
912 			(ase->type ==
913 			 CDP_TXRX_AST_TYPE_STA_BSS))
914 			continue;
915 		dp_peer_del_ast(soc, ase);
916 	}
917 }
918 
919 /*
920  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
921  * @soc:		Datapath SOC handle
922  *
923  * Return: None
924  */
925 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
926 {
927 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
928 
929 	qdf_spin_lock_bh(&soc->ast_lock);
930 
931 	dp_soc_iterate_peer(soc, dp_peer_flush_ast_entries, NULL,
932 			    DP_MOD_ID_CDP);
933 
934 	qdf_spin_unlock_bh(&soc->ast_lock);
935 	dp_peer_mec_flush_entries(soc);
936 }
937 
938 #if defined(IPA_WDS_EASYMESH_FEATURE) && defined(FEATURE_AST)
939 /*
940  * dp_peer_send_wds_disconnect() - Send Disconnect event to IPA for each peer
941  * @soc: Datapath SOC
942  * @peer: Datapath peer
943  *
944  * Return: None
945  */
946 static void
947 dp_peer_send_wds_disconnect(struct dp_soc *soc, struct dp_peer *peer)
948 {
949 	struct dp_ast_entry *ase = NULL;
950 	struct dp_ast_entry *temp_ase;
951 
952 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
953 		if (ase->type == CDP_TXRX_AST_TYPE_WDS) {
954 			soc->cdp_soc.ol_ops->peer_send_wds_disconnect(soc->ctrl_psoc,
955 								      ase->mac_addr.raw,
956 								      ase->vdev_id);
957 		}
958 	}
959 }
960 #elif defined(FEATURE_AST)
961 static void
962 dp_peer_send_wds_disconnect(struct dp_soc *soc, struct dp_peer *peer)
963 {
964 }
965 #endif
966 
967 /**
968  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
969  *                                       and return ast entry information
970  *                                       of first ast entry found in the
971  *                                       table with given mac address
972  *
973  * @soc : data path soc handle
974  * @ast_mac_addr : AST entry mac address
975  * @ast_entry_info : ast entry information
976  *
977  * return : true if ast entry found with ast_mac_addr
978  *          false if ast entry not found
979  */
980 static bool dp_peer_get_ast_info_by_soc_wifi3
981 	(struct cdp_soc_t *soc_hdl,
982 	 uint8_t *ast_mac_addr,
983 	 struct cdp_ast_entry_info *ast_entry_info)
984 {
985 	struct dp_ast_entry *ast_entry = NULL;
986 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
987 	struct dp_peer *peer = NULL;
988 
989 	if (soc->ast_offload_support)
990 		return false;
991 
992 	qdf_spin_lock_bh(&soc->ast_lock);
993 
994 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
995 	if ((!ast_entry) ||
996 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
997 		qdf_spin_unlock_bh(&soc->ast_lock);
998 		return false;
999 	}
1000 
1001 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1002 				     DP_MOD_ID_AST);
1003 	if (!peer) {
1004 		qdf_spin_unlock_bh(&soc->ast_lock);
1005 		return false;
1006 	}
1007 
1008 	ast_entry_info->type = ast_entry->type;
1009 	ast_entry_info->pdev_id = ast_entry->pdev_id;
1010 	ast_entry_info->vdev_id = ast_entry->vdev_id;
1011 	ast_entry_info->peer_id = ast_entry->peer_id;
1012 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
1013 		     &peer->mac_addr.raw[0],
1014 		     QDF_MAC_ADDR_SIZE);
1015 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1016 	qdf_spin_unlock_bh(&soc->ast_lock);
1017 	return true;
1018 }
1019 
1020 /**
1021  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
1022  *                                          and return ast entry information
1023  *                                          if mac address and pdev_id matches
1024  *
1025  * @soc : data path soc handle
1026  * @ast_mac_addr : AST entry mac address
1027  * @pdev_id : pdev_id
1028  * @ast_entry_info : ast entry information
1029  *
1030  * return : true if ast entry found with ast_mac_addr
1031  *          false if ast entry not found
1032  */
1033 static bool dp_peer_get_ast_info_by_pdevid_wifi3
1034 		(struct cdp_soc_t *soc_hdl,
1035 		 uint8_t *ast_mac_addr,
1036 		 uint8_t pdev_id,
1037 		 struct cdp_ast_entry_info *ast_entry_info)
1038 {
1039 	struct dp_ast_entry *ast_entry;
1040 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
1041 	struct dp_peer *peer = NULL;
1042 
1043 	if (soc->ast_offload_support)
1044 		return false;
1045 
1046 	qdf_spin_lock_bh(&soc->ast_lock);
1047 
1048 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr,
1049 						    pdev_id);
1050 
1051 	if ((!ast_entry) ||
1052 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
1053 		qdf_spin_unlock_bh(&soc->ast_lock);
1054 		return false;
1055 	}
1056 
1057 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1058 				     DP_MOD_ID_AST);
1059 	if (!peer) {
1060 		qdf_spin_unlock_bh(&soc->ast_lock);
1061 		return false;
1062 	}
1063 
1064 	ast_entry_info->type = ast_entry->type;
1065 	ast_entry_info->pdev_id = ast_entry->pdev_id;
1066 	ast_entry_info->vdev_id = ast_entry->vdev_id;
1067 	ast_entry_info->peer_id = ast_entry->peer_id;
1068 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
1069 		     &peer->mac_addr.raw[0],
1070 		     QDF_MAC_ADDR_SIZE);
1071 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1072 	qdf_spin_unlock_bh(&soc->ast_lock);
1073 	return true;
1074 }
1075 
1076 /**
1077  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
1078  *                            with given mac address
1079  *
1080  * @soc : data path soc handle
1081  * @ast_mac_addr : AST entry mac address
1082  * @callback : callback function to called on ast delete response from FW
1083  * @cookie : argument to be passed to callback
1084  *
1085  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
1086  *          is sent
1087  *          QDF_STATUS_E_INVAL false if ast entry not found
1088  */
1089 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
1090 					       uint8_t *mac_addr,
1091 					       txrx_ast_free_cb callback,
1092 					       void *cookie)
1093 
1094 {
1095 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1096 	struct dp_ast_entry *ast_entry = NULL;
1097 	txrx_ast_free_cb cb = NULL;
1098 	void *arg = NULL;
1099 
1100 	if (soc->ast_offload_support)
1101 		return -QDF_STATUS_E_INVAL;
1102 
1103 	qdf_spin_lock_bh(&soc->ast_lock);
1104 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1105 	if (!ast_entry) {
1106 		qdf_spin_unlock_bh(&soc->ast_lock);
1107 		return -QDF_STATUS_E_INVAL;
1108 	}
1109 
1110 	if (ast_entry->callback) {
1111 		cb = ast_entry->callback;
1112 		arg = ast_entry->cookie;
1113 	}
1114 
1115 	ast_entry->callback = callback;
1116 	ast_entry->cookie = cookie;
1117 
1118 	/*
1119 	 * if delete_in_progress is set AST delete is sent to target
1120 	 * and host is waiting for response should not send delete
1121 	 * again
1122 	 */
1123 	if (!ast_entry->delete_in_progress)
1124 		dp_peer_del_ast(soc, ast_entry);
1125 
1126 	qdf_spin_unlock_bh(&soc->ast_lock);
1127 	if (cb) {
1128 		cb(soc->ctrl_psoc,
1129 		   dp_soc_to_cdp_soc(soc),
1130 		   arg,
1131 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1132 	}
1133 	return QDF_STATUS_SUCCESS;
1134 }
1135 
1136 /**
1137  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
1138  *                                   table if mac address and pdev_id matches
1139  *
1140  * @soc : data path soc handle
1141  * @ast_mac_addr : AST entry mac address
1142  * @pdev_id : pdev id
1143  * @callback : callback function to called on ast delete response from FW
1144  * @cookie : argument to be passed to callback
1145  *
1146  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
1147  *          is sent
1148  *          QDF_STATUS_E_INVAL false if ast entry not found
1149  */
1150 
1151 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
1152 						uint8_t *mac_addr,
1153 						uint8_t pdev_id,
1154 						txrx_ast_free_cb callback,
1155 						void *cookie)
1156 
1157 {
1158 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1159 	struct dp_ast_entry *ast_entry;
1160 	txrx_ast_free_cb cb = NULL;
1161 	void *arg = NULL;
1162 
1163 	if (soc->ast_offload_support)
1164 		return -QDF_STATUS_E_INVAL;
1165 
1166 	qdf_spin_lock_bh(&soc->ast_lock);
1167 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
1168 
1169 	if (!ast_entry) {
1170 		qdf_spin_unlock_bh(&soc->ast_lock);
1171 		return -QDF_STATUS_E_INVAL;
1172 	}
1173 
1174 	if (ast_entry->callback) {
1175 		cb = ast_entry->callback;
1176 		arg = ast_entry->cookie;
1177 	}
1178 
1179 	ast_entry->callback = callback;
1180 	ast_entry->cookie = cookie;
1181 
1182 	/*
1183 	 * if delete_in_progress is set AST delete is sent to target
1184 	 * and host is waiting for response should not sent delete
1185 	 * again
1186 	 */
1187 	if (!ast_entry->delete_in_progress)
1188 		dp_peer_del_ast(soc, ast_entry);
1189 
1190 	qdf_spin_unlock_bh(&soc->ast_lock);
1191 
1192 	if (cb) {
1193 		cb(soc->ctrl_psoc,
1194 		   dp_soc_to_cdp_soc(soc),
1195 		   arg,
1196 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1197 	}
1198 	return QDF_STATUS_SUCCESS;
1199 }
1200 
1201 /**
1202  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
1203  * @ring_num: ring num of the ring being queried
1204  * @grp_mask: the grp_mask array for the ring type in question.
1205  *
1206  * The grp_mask array is indexed by group number and the bit fields correspond
1207  * to ring numbers.  We are finding which interrupt group a ring belongs to.
1208  *
1209  * Return: the index in the grp_mask array with the ring number.
1210  * -QDF_STATUS_E_NOENT if no entry is found
1211  */
1212 static int dp_srng_find_ring_in_mask(int ring_num, uint8_t *grp_mask)
1213 {
1214 	int ext_group_num;
1215 	uint8_t mask = 1 << ring_num;
1216 
1217 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
1218 	     ext_group_num++) {
1219 		if (mask & grp_mask[ext_group_num])
1220 			return ext_group_num;
1221 	}
1222 
1223 	return -QDF_STATUS_E_NOENT;
1224 }
1225 
1226 /**
1227  * dp_is_msi_group_number_invalid() - check msi_group_number valid or not
1228  * @msi_group_number: MSI group number.
1229  * @msi_data_count: MSI data count.
1230  *
1231  * Return: true if msi_group_number is invalid.
1232  */
1233 #ifdef WLAN_ONE_MSI_VECTOR
1234 static bool dp_is_msi_group_number_invalid(int msi_group_number,
1235 					   int msi_data_count)
1236 {
1237 	return false;
1238 }
1239 #else
1240 static bool dp_is_msi_group_number_invalid(int msi_group_number,
1241 					   int msi_data_count)
1242 {
1243 	return msi_group_number > msi_data_count;
1244 }
1245 #endif
1246 
1247 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1248 /**
1249  * dp_is_reo_ring_num_in_nf_grp1() - Check if the current reo ring is part of
1250  *				rx_near_full_grp1 mask
1251  * @soc: Datapath SoC Handle
1252  * @ring_num: REO ring number
1253  *
1254  * Return: 1 if the ring_num belongs to reo_nf_grp1,
1255  *	   0, otherwise.
1256  */
1257 static inline int
1258 dp_is_reo_ring_num_in_nf_grp1(struct dp_soc *soc, int ring_num)
1259 {
1260 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_1 & (1 << ring_num));
1261 }
1262 
1263 /**
1264  * dp_is_reo_ring_num_in_nf_grp2() - Check if the current reo ring is part of
1265  *				rx_near_full_grp2 mask
1266  * @soc: Datapath SoC Handle
1267  * @ring_num: REO ring number
1268  *
1269  * Return: 1 if the ring_num belongs to reo_nf_grp2,
1270  *	   0, otherwise.
1271  */
1272 static inline int
1273 dp_is_reo_ring_num_in_nf_grp2(struct dp_soc *soc, int ring_num)
1274 {
1275 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_2 & (1 << ring_num));
1276 }
1277 
1278 /**
1279  * dp_srng_get_near_full_irq_mask() - Get near-full irq mask for a particular
1280  *				ring type and number
1281  * @soc: Datapath SoC handle
1282  * @ring_type: SRNG type
1283  * @ring_num: ring num
1284  *
1285  * Return: near ful irq mask pointer
1286  */
1287 static inline
1288 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
1289 					enum hal_ring_type ring_type,
1290 					int ring_num)
1291 {
1292 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
1293 	uint8_t wbm2_sw_rx_rel_ring_id;
1294 	uint8_t *nf_irq_mask = NULL;
1295 
1296 	switch (ring_type) {
1297 	case WBM2SW_RELEASE:
1298 		wbm2_sw_rx_rel_ring_id =
1299 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
1300 		if (ring_num != wbm2_sw_rx_rel_ring_id) {
1301 			nf_irq_mask = &soc->wlan_cfg_ctx->
1302 					int_tx_ring_near_full_irq_mask[0];
1303 		}
1304 		break;
1305 	case REO_DST:
1306 		if (dp_is_reo_ring_num_in_nf_grp1(soc, ring_num))
1307 			nf_irq_mask =
1308 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_1_mask[0];
1309 		else if (dp_is_reo_ring_num_in_nf_grp2(soc, ring_num))
1310 			nf_irq_mask =
1311 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_2_mask[0];
1312 		else
1313 			qdf_assert(0);
1314 		break;
1315 	default:
1316 		break;
1317 	}
1318 
1319 	return nf_irq_mask;
1320 }
1321 
1322 /**
1323  * dp_srng_set_msi2_ring_params() - Set the msi2 addr/data in the ring params
1324  * @soc: Datapath SoC handle
1325  * @ring_params: srng params handle
1326  * @msi2_addr: MSI2 addr to be set for the SRNG
1327  * @msi2_data: MSI2 data to be set for the SRNG
1328  *
1329  * Return: None
1330  */
1331 static inline
1332 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
1333 				  struct hal_srng_params *ring_params,
1334 				  qdf_dma_addr_t msi2_addr,
1335 				  uint32_t msi2_data)
1336 {
1337 	ring_params->msi2_addr = msi2_addr;
1338 	ring_params->msi2_data = msi2_data;
1339 }
1340 
1341 /**
1342  * dp_srng_msi2_setup() - Setup MSI2 details for near full IRQ of an SRNG
1343  * @soc: Datapath SoC handle
1344  * @ring_params: ring_params for SRNG
1345  * @ring_type: SENG type
1346  * @ring_num: ring number for the SRNG
1347  * @nf_msi_grp_num: near full msi group number
1348  *
1349  * Return: None
1350  */
1351 static inline void
1352 dp_srng_msi2_setup(struct dp_soc *soc,
1353 		   struct hal_srng_params *ring_params,
1354 		   int ring_type, int ring_num, int nf_msi_grp_num)
1355 {
1356 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1357 	int msi_data_count, ret;
1358 
1359 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1360 					  &msi_data_count, &msi_data_start,
1361 					  &msi_irq_start);
1362 	if (ret)
1363 		return;
1364 
1365 	if (nf_msi_grp_num < 0) {
1366 		dp_init_info("%pK: ring near full IRQ not part of an ext_group; ring_type: %d,ring_num %d",
1367 			     soc, ring_type, ring_num);
1368 		ring_params->msi2_addr = 0;
1369 		ring_params->msi2_data = 0;
1370 		return;
1371 	}
1372 
1373 	if (dp_is_msi_group_number_invalid(nf_msi_grp_num, msi_data_count)) {
1374 		dp_init_warn("%pK: 2 msi_groups will share an msi for near full IRQ; msi_group_num %d",
1375 			     soc, nf_msi_grp_num);
1376 		QDF_ASSERT(0);
1377 	}
1378 
1379 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1380 
1381 	ring_params->nf_irq_support = 1;
1382 	ring_params->msi2_addr = addr_low;
1383 	ring_params->msi2_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1384 	ring_params->msi2_data = (nf_msi_grp_num % msi_data_count)
1385 		+ msi_data_start;
1386 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1387 }
1388 
1389 /* Percentage of ring entries considered as nearly full */
1390 #define DP_NF_HIGH_THRESH_PERCENTAGE	75
1391 /* Percentage of ring entries considered as critically full */
1392 #define DP_NF_CRIT_THRESH_PERCENTAGE	90
1393 /* Percentage of ring entries considered as safe threshold */
1394 #define DP_NF_SAFE_THRESH_PERCENTAGE	50
1395 
1396 /**
1397  * dp_srng_configure_nf_interrupt_thresholds() - Configure the thresholds for
1398  *			near full irq
1399  * @soc: Datapath SoC handle
1400  * @ring_params: ring params for SRNG
1401  * @ring_type: ring type
1402  */
1403 static inline void
1404 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
1405 					  struct hal_srng_params *ring_params,
1406 					  int ring_type)
1407 {
1408 	if (ring_params->nf_irq_support) {
1409 		ring_params->high_thresh = (ring_params->num_entries *
1410 					    DP_NF_HIGH_THRESH_PERCENTAGE) / 100;
1411 		ring_params->crit_thresh = (ring_params->num_entries *
1412 					    DP_NF_CRIT_THRESH_PERCENTAGE) / 100;
1413 		ring_params->safe_thresh = (ring_params->num_entries *
1414 					    DP_NF_SAFE_THRESH_PERCENTAGE) /100;
1415 	}
1416 }
1417 
1418 /**
1419  * dp_srng_set_nf_thresholds() - Set the near full thresholds to srng data
1420  *			structure from the ring params
1421  * @soc: Datapath SoC handle
1422  * @srng: SRNG handle
1423  * @ring_params: ring params for a SRNG
1424  *
1425  * Return: None
1426  */
1427 static inline void
1428 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
1429 			  struct hal_srng_params *ring_params)
1430 {
1431 	srng->crit_thresh = ring_params->crit_thresh;
1432 	srng->safe_thresh = ring_params->safe_thresh;
1433 }
1434 
1435 #else
1436 static inline
1437 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
1438 					enum hal_ring_type ring_type,
1439 					int ring_num)
1440 {
1441 	return NULL;
1442 }
1443 
1444 static inline
1445 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
1446 				  struct hal_srng_params *ring_params,
1447 				  qdf_dma_addr_t msi2_addr,
1448 				  uint32_t msi2_data)
1449 {
1450 }
1451 
1452 static inline void
1453 dp_srng_msi2_setup(struct dp_soc *soc,
1454 		   struct hal_srng_params *ring_params,
1455 		   int ring_type, int ring_num, int nf_msi_grp_num)
1456 {
1457 }
1458 
1459 static inline void
1460 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
1461 					  struct hal_srng_params *ring_params,
1462 					  int ring_type)
1463 {
1464 }
1465 
1466 static inline void
1467 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
1468 			  struct hal_srng_params *ring_params)
1469 {
1470 }
1471 #endif
1472 
1473 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
1474 				       enum hal_ring_type ring_type,
1475 				       int ring_num,
1476 				       int *reg_msi_grp_num,
1477 				       bool nf_irq_support,
1478 				       int *nf_msi_grp_num)
1479 {
1480 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
1481 	uint8_t *grp_mask, *nf_irq_mask = NULL;
1482 	bool nf_irq_enabled = false;
1483 	uint8_t wbm2_sw_rx_rel_ring_id;
1484 
1485 	switch (ring_type) {
1486 	case WBM2SW_RELEASE:
1487 		wbm2_sw_rx_rel_ring_id =
1488 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
1489 		if (ring_num == wbm2_sw_rx_rel_ring_id) {
1490 			/* dp_rx_wbm_err_process - soc->rx_rel_ring */
1491 			grp_mask = &cfg_ctx->int_rx_wbm_rel_ring_mask[0];
1492 			ring_num = 0;
1493 		} else { /* dp_tx_comp_handler - soc->tx_comp_ring */
1494 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1495 			nf_irq_mask = dp_srng_get_near_full_irq_mask(soc,
1496 								     ring_type,
1497 								     ring_num);
1498 			if (nf_irq_mask)
1499 				nf_irq_enabled = true;
1500 
1501 			/*
1502 			 * Using ring 4 as 4th tx completion ring since ring 3
1503 			 * is Rx error ring
1504 			 */
1505 			if (ring_num == WBM2SW_TXCOMP_RING4_NUM)
1506 				ring_num = TXCOMP_RING4_NUM;
1507 		}
1508 	break;
1509 
1510 	case REO_EXCEPTION:
1511 		/* dp_rx_err_process - &soc->reo_exception_ring */
1512 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
1513 	break;
1514 
1515 	case REO_DST:
1516 		/* dp_rx_process - soc->reo_dest_ring */
1517 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1518 		nf_irq_mask = dp_srng_get_near_full_irq_mask(soc, ring_type,
1519 							     ring_num);
1520 		if (nf_irq_mask)
1521 			nf_irq_enabled = true;
1522 	break;
1523 
1524 	case REO_STATUS:
1525 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
1526 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
1527 	break;
1528 
1529 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
1530 	case RXDMA_MONITOR_STATUS:
1531 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
1532 	case RXDMA_MONITOR_DST:
1533 		/* dp_mon_process */
1534 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
1535 	break;
1536 	case TX_MONITOR_DST:
1537 		/* dp_tx_mon_process */
1538 		grp_mask = &soc->wlan_cfg_ctx->int_tx_mon_ring_mask[0];
1539 	break;
1540 	case RXDMA_DST:
1541 		/* dp_rxdma_err_process */
1542 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
1543 	break;
1544 
1545 	case RXDMA_BUF:
1546 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1547 	break;
1548 
1549 	case RXDMA_MONITOR_BUF:
1550 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
1551 	break;
1552 
1553 	case TX_MONITOR_BUF:
1554 		grp_mask = &soc->wlan_cfg_ctx->int_host2txmon_ring_mask[0];
1555 	break;
1556 
1557 	case TCL_DATA:
1558 	/* CMD_CREDIT_RING is used as command in 8074 and credit in 9000 */
1559 	case TCL_CMD_CREDIT:
1560 	case REO_CMD:
1561 	case SW2WBM_RELEASE:
1562 	case WBM_IDLE_LINK:
1563 		/* normally empty SW_TO_HW rings */
1564 		return -QDF_STATUS_E_NOENT;
1565 	break;
1566 
1567 	case TCL_STATUS:
1568 	case REO_REINJECT:
1569 		/* misc unused rings */
1570 		return -QDF_STATUS_E_NOENT;
1571 	break;
1572 
1573 	case CE_SRC:
1574 	case CE_DST:
1575 	case CE_DST_STATUS:
1576 		/* CE_rings - currently handled by hif */
1577 	default:
1578 		return -QDF_STATUS_E_NOENT;
1579 	break;
1580 	}
1581 
1582 	*reg_msi_grp_num = dp_srng_find_ring_in_mask(ring_num, grp_mask);
1583 
1584 	if (nf_irq_support && nf_irq_enabled) {
1585 		*nf_msi_grp_num = dp_srng_find_ring_in_mask(ring_num,
1586 							    nf_irq_mask);
1587 	}
1588 
1589 	return QDF_STATUS_SUCCESS;
1590 }
1591 
1592 /*
1593  * dp_get_num_msi_available()- API to get number of MSIs available
1594  * @dp_soc: DP soc Handle
1595  * @interrupt_mode: Mode of interrupts
1596  *
1597  * Return: Number of MSIs available or 0 in case of integrated
1598  */
1599 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
1600 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1601 {
1602 	return 0;
1603 }
1604 #else
1605 /*
1606  * dp_get_num_msi_available()- API to get number of MSIs available
1607  * @dp_soc: DP soc Handle
1608  * @interrupt_mode: Mode of interrupts
1609  *
1610  * Return: Number of MSIs available or 0 in case of integrated
1611  */
1612 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1613 {
1614 	int msi_data_count;
1615 	int msi_data_start;
1616 	int msi_irq_start;
1617 	int ret;
1618 
1619 	if (interrupt_mode == DP_INTR_INTEGRATED) {
1620 		return 0;
1621 	} else if (interrupt_mode == DP_INTR_MSI || interrupt_mode ==
1622 		   DP_INTR_POLL) {
1623 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1624 						  &msi_data_count,
1625 						  &msi_data_start,
1626 						  &msi_irq_start);
1627 		if (ret) {
1628 			qdf_err("Unable to get DP MSI assignment %d",
1629 				interrupt_mode);
1630 			return -EINVAL;
1631 		}
1632 		return msi_data_count;
1633 	}
1634 	qdf_err("Interrupt mode invalid %d", interrupt_mode);
1635 	return -EINVAL;
1636 }
1637 #endif
1638 
1639 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
1640 			      *ring_params, int ring_type, int ring_num)
1641 {
1642 	int reg_msi_grp_num;
1643 	/*
1644 	 * nf_msi_grp_num needs to be initialized with negative value,
1645 	 * to avoid configuring near-full msi for WBM2SW3 ring
1646 	 */
1647 	int nf_msi_grp_num = -1;
1648 	int msi_data_count;
1649 	int ret;
1650 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1651 	bool nf_irq_support;
1652 
1653 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1654 					    &msi_data_count, &msi_data_start,
1655 					    &msi_irq_start);
1656 
1657 	if (ret)
1658 		return;
1659 
1660 	nf_irq_support = hal_srng_is_near_full_irq_supported(soc->hal_soc,
1661 							     ring_type,
1662 							     ring_num);
1663 	ret = dp_srng_calculate_msi_group(soc, ring_type, ring_num,
1664 					  &reg_msi_grp_num,
1665 					  nf_irq_support,
1666 					  &nf_msi_grp_num);
1667 	if (ret < 0) {
1668 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1669 			     soc, ring_type, ring_num);
1670 		ring_params->msi_addr = 0;
1671 		ring_params->msi_data = 0;
1672 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
1673 		return;
1674 	}
1675 
1676 	if (reg_msi_grp_num < 0) {
1677 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1678 			     soc, ring_type, ring_num);
1679 		ring_params->msi_addr = 0;
1680 		ring_params->msi_data = 0;
1681 		goto configure_msi2;
1682 	}
1683 
1684 	if (dp_is_msi_group_number_invalid(reg_msi_grp_num, msi_data_count)) {
1685 		dp_init_warn("%pK: 2 msi_groups will share an msi; msi_group_num %d",
1686 			     soc, reg_msi_grp_num);
1687 		QDF_ASSERT(0);
1688 	}
1689 
1690 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1691 
1692 	ring_params->msi_addr = addr_low;
1693 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1694 	ring_params->msi_data = (reg_msi_grp_num % msi_data_count)
1695 		+ msi_data_start;
1696 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1697 
1698 	dp_debug("ring type %u ring_num %u msi->data %u msi_addr %llx",
1699 		 ring_type, ring_num, ring_params->msi_data,
1700 		 (uint64_t)ring_params->msi_addr);
1701 
1702 configure_msi2:
1703 	if (!nf_irq_support) {
1704 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
1705 		return;
1706 	}
1707 
1708 	dp_srng_msi2_setup(soc, ring_params, ring_type, ring_num,
1709 			   nf_msi_grp_num);
1710 }
1711 
1712 #ifdef FEATURE_AST
1713 /**
1714  * dp_print_mlo_ast_stats() - Print AST stats for MLO peers
1715  *
1716  * @soc : core DP soc context
1717  *
1718  * Return: void
1719  */
1720 void dp_print_mlo_ast_stats(struct dp_soc *soc)
1721 {
1722 	if (soc->arch_ops.print_mlo_ast_stats)
1723 		soc->arch_ops.print_mlo_ast_stats(soc);
1724 }
1725 
1726 /**
1727  * dp_print_peer_ast_entries() - Dump AST entries of peer
1728  * @soc: Datapath soc handle
1729  * @peer: Datapath peer
1730  * @arg: argument to iterate function
1731  *
1732  * return void
1733  */
1734 void
1735 dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1736 {
1737 	struct dp_ast_entry *ase, *tmp_ase;
1738 	uint32_t num_entries = 0;
1739 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1740 			"NONE", "STATIC", "SELF", "WDS", "HMWDS", "BSS",
1741 			"DA", "HMWDS_SEC", "MLD"};
1742 
1743 	DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1744 	    DP_PRINT_STATS("%6d mac_addr = "QDF_MAC_ADDR_FMT
1745 		    " peer_mac_addr = "QDF_MAC_ADDR_FMT
1746 		    " peer_id = %u"
1747 		    " type = %s"
1748 		    " next_hop = %d"
1749 		    " is_active = %d"
1750 		    " ast_idx = %d"
1751 		    " ast_hash = %d"
1752 		    " delete_in_progress = %d"
1753 		    " pdev_id = %d"
1754 		    " vdev_id = %d",
1755 		    ++num_entries,
1756 		    QDF_MAC_ADDR_REF(ase->mac_addr.raw),
1757 		    QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1758 		    ase->peer_id,
1759 		    type[ase->type],
1760 		    ase->next_hop,
1761 		    ase->is_active,
1762 		    ase->ast_idx,
1763 		    ase->ast_hash_value,
1764 		    ase->delete_in_progress,
1765 		    ase->pdev_id,
1766 		    ase->vdev_id);
1767 	}
1768 }
1769 
1770 /**
1771  * dp_print_ast_stats() - Dump AST table contents
1772  * @soc: Datapath soc handle
1773  *
1774  * return void
1775  */
1776 void dp_print_ast_stats(struct dp_soc *soc)
1777 {
1778 	DP_PRINT_STATS("AST Stats:");
1779 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1780 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1781 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1782 	DP_PRINT_STATS("	Entries MAP ERR  = %d", soc->stats.ast.map_err);
1783 	DP_PRINT_STATS("	Entries Mismatch ERR  = %d",
1784 		       soc->stats.ast.ast_mismatch);
1785 
1786 	DP_PRINT_STATS("AST Table:");
1787 
1788 	qdf_spin_lock_bh(&soc->ast_lock);
1789 
1790 	dp_soc_iterate_peer(soc, dp_print_peer_ast_entries, NULL,
1791 			    DP_MOD_ID_GENERIC_STATS);
1792 
1793 	qdf_spin_unlock_bh(&soc->ast_lock);
1794 
1795 	dp_print_mlo_ast_stats(soc);
1796 }
1797 #else
1798 void dp_print_ast_stats(struct dp_soc *soc)
1799 {
1800 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1801 	return;
1802 }
1803 #endif
1804 
1805 /**
1806  * dp_print_peer_info() - Dump peer info
1807  * @soc: Datapath soc handle
1808  * @peer: Datapath peer handle
1809  * @arg: argument to iter function
1810  *
1811  * return void
1812  */
1813 static void
1814 dp_print_peer_info(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1815 {
1816 	struct dp_txrx_peer *txrx_peer = NULL;
1817 
1818 	txrx_peer = dp_get_txrx_peer(peer);
1819 	if (!txrx_peer)
1820 		return;
1821 
1822 	DP_PRINT_STATS(" peer id = %d"
1823 		       " peer_mac_addr = "QDF_MAC_ADDR_FMT
1824 		       " nawds_enabled = %d"
1825 		       " bss_peer = %d"
1826 		       " wds_enabled = %d"
1827 		       " tx_cap_enabled = %d"
1828 		       " rx_cap_enabled = %d",
1829 		       peer->peer_id,
1830 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1831 		       txrx_peer->nawds_enabled,
1832 		       txrx_peer->bss_peer,
1833 		       txrx_peer->wds_enabled,
1834 		       dp_monitor_is_tx_cap_enabled(peer),
1835 		       dp_monitor_is_rx_cap_enabled(peer));
1836 }
1837 
1838 /**
1839  * dp_print_peer_table() - Dump all Peer stats
1840  * @vdev: Datapath Vdev handle
1841  *
1842  * return void
1843  */
1844 static void dp_print_peer_table(struct dp_vdev *vdev)
1845 {
1846 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1847 	dp_vdev_iterate_peer(vdev, dp_print_peer_info, NULL,
1848 			     DP_MOD_ID_GENERIC_STATS);
1849 }
1850 
1851 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
1852 /**
1853  * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt
1854  * threshold values from the wlan_srng_cfg table for each ring type
1855  * @soc: device handle
1856  * @ring_params: per ring specific parameters
1857  * @ring_type: Ring type
1858  * @ring_num: Ring number for a given ring type
1859  *
1860  * Fill the ring params with the interrupt threshold
1861  * configuration parameters available in the per ring type wlan_srng_cfg
1862  * table.
1863  *
1864  * Return: None
1865  */
1866 static void
1867 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1868 				       struct hal_srng_params *ring_params,
1869 				       int ring_type, int ring_num,
1870 				       int num_entries)
1871 {
1872 	uint8_t wbm2_sw_rx_rel_ring_id;
1873 
1874 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
1875 
1876 	if (ring_type == REO_DST) {
1877 		ring_params->intr_timer_thres_us =
1878 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1879 		ring_params->intr_batch_cntr_thres_entries =
1880 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1881 	} else if (ring_type == WBM2SW_RELEASE &&
1882 		   (ring_num == wbm2_sw_rx_rel_ring_id)) {
1883 		ring_params->intr_timer_thres_us =
1884 				wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1885 		ring_params->intr_batch_cntr_thres_entries =
1886 				wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1887 	} else {
1888 		ring_params->intr_timer_thres_us =
1889 				soc->wlan_srng_cfg[ring_type].timer_threshold;
1890 		ring_params->intr_batch_cntr_thres_entries =
1891 				soc->wlan_srng_cfg[ring_type].batch_count_threshold;
1892 	}
1893 	ring_params->low_threshold =
1894 			soc->wlan_srng_cfg[ring_type].low_threshold;
1895 	if (ring_params->low_threshold)
1896 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1897 
1898 	dp_srng_configure_nf_interrupt_thresholds(soc, ring_params, ring_type);
1899 }
1900 #else
1901 static void
1902 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1903 				       struct hal_srng_params *ring_params,
1904 				       int ring_type, int ring_num,
1905 				       int num_entries)
1906 {
1907 	uint8_t wbm2_sw_rx_rel_ring_id;
1908 
1909 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
1910 
1911 	if (ring_type == REO_DST) {
1912 		ring_params->intr_timer_thres_us =
1913 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1914 		ring_params->intr_batch_cntr_thres_entries =
1915 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1916 	} else if (ring_type == WBM2SW_RELEASE &&
1917 		   (ring_num < wbm2_sw_rx_rel_ring_id ||
1918 		   ring_num == WBM2SW_TXCOMP_RING4_NUM)) {
1919 		ring_params->intr_timer_thres_us =
1920 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1921 		ring_params->intr_batch_cntr_thres_entries =
1922 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1923 	} else {
1924 		ring_params->intr_timer_thres_us =
1925 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1926 		ring_params->intr_batch_cntr_thres_entries =
1927 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1928 	}
1929 
1930 	/* These rings donot require interrupt to host. Make them zero */
1931 	switch (ring_type) {
1932 	case REO_REINJECT:
1933 	case REO_CMD:
1934 	case TCL_DATA:
1935 	case TCL_CMD_CREDIT:
1936 	case TCL_STATUS:
1937 	case WBM_IDLE_LINK:
1938 	case SW2WBM_RELEASE:
1939 	case PPE2TCL:
1940 	case SW2RXDMA_NEW:
1941 		ring_params->intr_timer_thres_us = 0;
1942 		ring_params->intr_batch_cntr_thres_entries = 0;
1943 		break;
1944 	}
1945 
1946 	/* Enable low threshold interrupts for rx buffer rings (regular and
1947 	 * monitor buffer rings.
1948 	 * TODO: See if this is required for any other ring
1949 	 */
1950 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1951 	    (ring_type == RXDMA_MONITOR_STATUS ||
1952 	    (ring_type == TX_MONITOR_BUF))) {
1953 		/* TODO: Setting low threshold to 1/8th of ring size
1954 		 * see if this needs to be configurable
1955 		 */
1956 		ring_params->low_threshold = num_entries >> 3;
1957 		ring_params->intr_timer_thres_us =
1958 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1959 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1960 		ring_params->intr_batch_cntr_thres_entries = 0;
1961 	}
1962 
1963 	/* During initialisation monitor rings are only filled with
1964 	 * MON_BUF_MIN_ENTRIES entries. So low threshold needs to be set to
1965 	 * a value less than that. Low threshold value is reconfigured again
1966 	 * to 1/8th of the ring size when monitor vap is created.
1967 	 */
1968 	if (ring_type == RXDMA_MONITOR_BUF)
1969 		ring_params->low_threshold = MON_BUF_MIN_ENTRIES >> 1;
1970 
1971 	/* In case of PCI chipsets, we dont have PPDU end interrupts,
1972 	 * so MONITOR STATUS ring is reaped by receiving MSI from srng.
1973 	 * Keep batch threshold as 8 so that interrupt is received for
1974 	 * every 4 packets in MONITOR_STATUS ring
1975 	 */
1976 	if ((ring_type == RXDMA_MONITOR_STATUS) &&
1977 	    (soc->intr_mode == DP_INTR_MSI))
1978 		ring_params->intr_batch_cntr_thres_entries = 4;
1979 }
1980 #endif
1981 
1982 #ifdef DP_MEM_PRE_ALLOC
1983 
1984 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
1985 			   size_t ctxt_size)
1986 {
1987 	void *ctxt_mem;
1988 
1989 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_context) {
1990 		dp_warn("dp_prealloc_get_context null!");
1991 		goto dynamic_alloc;
1992 	}
1993 
1994 	ctxt_mem = soc->cdp_soc.ol_ops->dp_prealloc_get_context(ctxt_type,
1995 								ctxt_size);
1996 
1997 	if (ctxt_mem)
1998 		goto end;
1999 
2000 dynamic_alloc:
2001 	dp_info("Pre-alloc type %d, size %zu failed, need dynamic-alloc",
2002 		ctxt_type, ctxt_size);
2003 	ctxt_mem = qdf_mem_malloc(ctxt_size);
2004 end:
2005 	return ctxt_mem;
2006 }
2007 
2008 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
2009 			 void *vaddr)
2010 {
2011 	QDF_STATUS status;
2012 
2013 	if (soc->cdp_soc.ol_ops->dp_prealloc_put_context) {
2014 		status = soc->cdp_soc.ol_ops->dp_prealloc_put_context(
2015 								ctxt_type,
2016 								vaddr);
2017 	} else {
2018 		dp_warn("dp_prealloc_put_context null!");
2019 		status = QDF_STATUS_E_NOSUPPORT;
2020 	}
2021 
2022 	if (QDF_IS_STATUS_ERROR(status)) {
2023 		dp_info("Context type %d not pre-allocated", ctxt_type);
2024 		qdf_mem_free(vaddr);
2025 	}
2026 }
2027 
2028 static inline
2029 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
2030 					   struct dp_srng *srng,
2031 					   uint32_t ring_type)
2032 {
2033 	void *mem;
2034 
2035 	qdf_assert(!srng->is_mem_prealloc);
2036 
2037 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_consistent) {
2038 		dp_warn("dp_prealloc_get_consistent is null!");
2039 		goto qdf;
2040 	}
2041 
2042 	mem =
2043 		soc->cdp_soc.ol_ops->dp_prealloc_get_consistent
2044 						(&srng->alloc_size,
2045 						 &srng->base_vaddr_unaligned,
2046 						 &srng->base_paddr_unaligned,
2047 						 &srng->base_paddr_aligned,
2048 						 DP_RING_BASE_ALIGN, ring_type);
2049 
2050 	if (mem) {
2051 		srng->is_mem_prealloc = true;
2052 		goto end;
2053 	}
2054 qdf:
2055 	mem =  qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
2056 						&srng->base_vaddr_unaligned,
2057 						&srng->base_paddr_unaligned,
2058 						&srng->base_paddr_aligned,
2059 						DP_RING_BASE_ALIGN);
2060 end:
2061 	dp_info("%s memory %pK dp_srng %pK ring_type %d alloc_size %d num_entries %d",
2062 		srng->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", mem,
2063 		srng, ring_type, srng->alloc_size, srng->num_entries);
2064 	return mem;
2065 }
2066 
2067 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
2068 					       struct dp_srng *srng)
2069 {
2070 	if (srng->is_mem_prealloc) {
2071 		if (!soc->cdp_soc.ol_ops->dp_prealloc_put_consistent) {
2072 			dp_warn("dp_prealloc_put_consistent is null!");
2073 			QDF_BUG(0);
2074 			return;
2075 		}
2076 		soc->cdp_soc.ol_ops->dp_prealloc_put_consistent
2077 						(srng->alloc_size,
2078 						 srng->base_vaddr_unaligned,
2079 						 srng->base_paddr_unaligned);
2080 
2081 	} else {
2082 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2083 					srng->alloc_size,
2084 					srng->base_vaddr_unaligned,
2085 					srng->base_paddr_unaligned, 0);
2086 	}
2087 }
2088 
2089 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
2090 				   enum dp_desc_type desc_type,
2091 				   struct qdf_mem_multi_page_t *pages,
2092 				   size_t element_size,
2093 				   uint32_t element_num,
2094 				   qdf_dma_context_t memctxt,
2095 				   bool cacheable)
2096 {
2097 	if (!soc->cdp_soc.ol_ops->dp_get_multi_pages) {
2098 		dp_warn("dp_get_multi_pages is null!");
2099 		goto qdf;
2100 	}
2101 
2102 	pages->num_pages = 0;
2103 	pages->is_mem_prealloc = 0;
2104 	soc->cdp_soc.ol_ops->dp_get_multi_pages(desc_type,
2105 						element_size,
2106 						element_num,
2107 						pages,
2108 						cacheable);
2109 	if (pages->num_pages)
2110 		goto end;
2111 
2112 qdf:
2113 	qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size,
2114 				  element_num, memctxt, cacheable);
2115 end:
2116 	dp_info("%s desc_type %d element_size %d element_num %d cacheable %d",
2117 		pages->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc",
2118 		desc_type, (int)element_size, element_num, cacheable);
2119 }
2120 
2121 void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
2122 				  enum dp_desc_type desc_type,
2123 				  struct qdf_mem_multi_page_t *pages,
2124 				  qdf_dma_context_t memctxt,
2125 				  bool cacheable)
2126 {
2127 	if (pages->is_mem_prealloc) {
2128 		if (!soc->cdp_soc.ol_ops->dp_put_multi_pages) {
2129 			dp_warn("dp_put_multi_pages is null!");
2130 			QDF_BUG(0);
2131 			return;
2132 		}
2133 
2134 		soc->cdp_soc.ol_ops->dp_put_multi_pages(desc_type, pages);
2135 		qdf_mem_zero(pages, sizeof(*pages));
2136 	} else {
2137 		qdf_mem_multi_pages_free(soc->osdev, pages,
2138 					 memctxt, cacheable);
2139 	}
2140 }
2141 
2142 #else
2143 
2144 static inline
2145 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
2146 					   struct dp_srng *srng,
2147 					   uint32_t ring_type)
2148 
2149 {
2150 	void *mem;
2151 
2152 	mem = qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
2153 					       &srng->base_vaddr_unaligned,
2154 					       &srng->base_paddr_unaligned,
2155 					       &srng->base_paddr_aligned,
2156 					       DP_RING_BASE_ALIGN);
2157 	if (mem)
2158 		qdf_mem_set(srng->base_vaddr_unaligned, 0, srng->alloc_size);
2159 
2160 	return mem;
2161 }
2162 
2163 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
2164 					       struct dp_srng *srng)
2165 {
2166 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2167 				srng->alloc_size,
2168 				srng->base_vaddr_unaligned,
2169 				srng->base_paddr_unaligned, 0);
2170 }
2171 
2172 #endif /* DP_MEM_PRE_ALLOC */
2173 
2174 #ifdef QCA_SUPPORT_WDS_EXTENDED
2175 static bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev)
2176 {
2177 	return vdev->wds_ext_enabled;
2178 }
2179 #else
2180 static bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev)
2181 {
2182 	return false;
2183 }
2184 #endif
2185 
2186 void dp_pdev_update_fast_rx_flag(struct dp_soc *soc, struct dp_pdev *pdev)
2187 {
2188 	struct dp_vdev *vdev = NULL;
2189 	uint8_t rx_fast_flag = true;
2190 
2191 	if (wlan_cfg_is_rx_flow_tag_enabled(soc->wlan_cfg_ctx)) {
2192 		rx_fast_flag = false;
2193 		goto update_flag;
2194 	}
2195 
2196 	/* Check if protocol tagging enable */
2197 	if (pdev->is_rx_protocol_tagging_enabled) {
2198 		rx_fast_flag = false;
2199 		goto update_flag;
2200 	}
2201 
2202 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
2203 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2204 		/* Check if any VDEV has NAWDS enabled */
2205 		if (vdev->nawds_enabled) {
2206 			rx_fast_flag = false;
2207 			break;
2208 		}
2209 
2210 		/* Check if any VDEV has multipass enabled */
2211 		if (vdev->multipass_en) {
2212 			rx_fast_flag = false;
2213 			break;
2214 		}
2215 
2216 		/* Check if any VDEV has mesh enabled */
2217 		if (vdev->mesh_vdev) {
2218 			rx_fast_flag = false;
2219 			break;
2220 		}
2221 
2222 		/* Check if any VDEV has WDS ext enabled */
2223 		if (dp_vdev_is_wds_ext_enabled(vdev)) {
2224 			rx_fast_flag = false;
2225 			break;
2226 		}
2227 	}
2228 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
2229 
2230 update_flag:
2231 	dp_init_info("Updated Rx fast flag to %u", rx_fast_flag);
2232 	pdev->rx_fast_flag = rx_fast_flag;
2233 }
2234 
2235 /*
2236  * dp_srng_free() - Free SRNG memory
2237  * @soc  : Data path soc handle
2238  * @srng : SRNG pointer
2239  *
2240  * return: None
2241  */
2242 void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng)
2243 {
2244 	if (srng->alloc_size && srng->base_vaddr_unaligned) {
2245 		if (!srng->cached) {
2246 			dp_srng_mem_free_consistent(soc, srng);
2247 		} else {
2248 			qdf_mem_free(srng->base_vaddr_unaligned);
2249 		}
2250 		srng->alloc_size = 0;
2251 		srng->base_vaddr_unaligned = NULL;
2252 	}
2253 	srng->hal_srng = NULL;
2254 }
2255 
2256 qdf_export_symbol(dp_srng_free);
2257 
2258 #ifdef DISABLE_MON_RING_MSI_CFG
2259 /*
2260  * dp_skip_msi_cfg() - Check if msi cfg has to be skipped for ring_type
2261  * @ring_type: sring type
2262  *
2263  * Return: True if msi cfg should be skipped for srng type else false
2264  */
2265 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2266 {
2267 	if (ring_type == RXDMA_MONITOR_STATUS)
2268 		return true;
2269 
2270 	return false;
2271 }
2272 #else
2273 #ifdef DP_CON_MON_MSI_ENABLED
2274 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2275 {
2276 	if (soc->cdp_soc.ol_ops->get_con_mode &&
2277 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE) {
2278 		if (ring_type == REO_DST || ring_type == RXDMA_DST)
2279 			return true;
2280 	} else if (ring_type == RXDMA_MONITOR_STATUS) {
2281 		return true;
2282 	}
2283 
2284 	return false;
2285 }
2286 #else
2287 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2288 {
2289 	return false;
2290 }
2291 #endif /* DP_CON_MON_MSI_ENABLED */
2292 #endif /* DISABLE_MON_RING_MSI_CFG */
2293 
2294 #ifdef DP_UMAC_HW_RESET_SUPPORT
2295 static bool dp_check_umac_reset_in_progress(struct dp_soc *soc)
2296 {
2297 	return !!soc->umac_reset_ctx.intr_ctx_bkp;
2298 }
2299 #else
2300 static bool dp_check_umac_reset_in_progress(struct dp_soc *soc)
2301 {
2302 	return false;
2303 }
2304 #endif
2305 
2306 /*
2307  * dp_srng_init() - Initialize SRNG
2308  * @soc  : Data path soc handle
2309  * @srng : SRNG pointer
2310  * @ring_type : Ring Type
2311  * @ring_num: Ring number
2312  * @mac_id: mac_id
2313  *
2314  * return: QDF_STATUS
2315  */
2316 QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
2317 			int ring_type, int ring_num, int mac_id)
2318 {
2319 	bool idle_check;
2320 
2321 	hal_soc_handle_t hal_soc = soc->hal_soc;
2322 	struct hal_srng_params ring_params;
2323 
2324 	if (srng->hal_srng) {
2325 		dp_init_err("%pK: Ring type: %d, num:%d is already initialized",
2326 			    soc, ring_type, ring_num);
2327 		return QDF_STATUS_SUCCESS;
2328 	}
2329 
2330 	/* memset the srng ring to zero */
2331 	qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size);
2332 
2333 	qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params));
2334 	ring_params.ring_base_paddr = srng->base_paddr_aligned;
2335 	ring_params.ring_base_vaddr = srng->base_vaddr_aligned;
2336 
2337 	ring_params.num_entries = srng->num_entries;
2338 
2339 	dp_info("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
2340 		ring_type, ring_num,
2341 		(void *)ring_params.ring_base_vaddr,
2342 		(void *)ring_params.ring_base_paddr,
2343 		ring_params.num_entries);
2344 
2345 	if (soc->intr_mode == DP_INTR_MSI && !dp_skip_msi_cfg(soc, ring_type)) {
2346 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
2347 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
2348 				 ring_type, ring_num);
2349 	} else {
2350 		ring_params.msi_data = 0;
2351 		ring_params.msi_addr = 0;
2352 		dp_srng_set_msi2_ring_params(soc, &ring_params, 0, 0);
2353 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
2354 				 ring_type, ring_num);
2355 	}
2356 
2357 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
2358 					       ring_type, ring_num,
2359 					       srng->num_entries);
2360 
2361 	dp_srng_set_nf_thresholds(soc, srng, &ring_params);
2362 
2363 	if (srng->cached)
2364 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
2365 
2366 	idle_check = dp_check_umac_reset_in_progress(soc);
2367 
2368 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
2369 					mac_id, &ring_params, idle_check);
2370 
2371 	if (!srng->hal_srng) {
2372 		dp_srng_free(soc, srng);
2373 		return QDF_STATUS_E_FAILURE;
2374 	}
2375 
2376 	return QDF_STATUS_SUCCESS;
2377 }
2378 
2379 qdf_export_symbol(dp_srng_init);
2380 
2381 /*
2382  * dp_srng_alloc() - Allocate memory for SRNG
2383  * @soc  : Data path soc handle
2384  * @srng : SRNG pointer
2385  * @ring_type : Ring Type
2386  * @num_entries: Number of entries
2387  * @cached: cached flag variable
2388  *
2389  * return: QDF_STATUS
2390  */
2391 QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
2392 			 int ring_type, uint32_t num_entries,
2393 			 bool cached)
2394 {
2395 	hal_soc_handle_t hal_soc = soc->hal_soc;
2396 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
2397 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
2398 
2399 	if (srng->base_vaddr_unaligned) {
2400 		dp_init_err("%pK: Ring type: %d, is already allocated",
2401 			    soc, ring_type);
2402 		return QDF_STATUS_SUCCESS;
2403 	}
2404 
2405 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
2406 	srng->hal_srng = NULL;
2407 	srng->alloc_size = num_entries * entry_size;
2408 	srng->num_entries = num_entries;
2409 	srng->cached = cached;
2410 
2411 	if (!cached) {
2412 		srng->base_vaddr_aligned =
2413 		    dp_srng_aligned_mem_alloc_consistent(soc,
2414 							 srng,
2415 							 ring_type);
2416 	} else {
2417 		srng->base_vaddr_aligned = qdf_aligned_malloc(
2418 					&srng->alloc_size,
2419 					&srng->base_vaddr_unaligned,
2420 					&srng->base_paddr_unaligned,
2421 					&srng->base_paddr_aligned,
2422 					DP_RING_BASE_ALIGN);
2423 	}
2424 
2425 	if (!srng->base_vaddr_aligned)
2426 		return QDF_STATUS_E_NOMEM;
2427 
2428 	return QDF_STATUS_SUCCESS;
2429 }
2430 
2431 qdf_export_symbol(dp_srng_alloc);
2432 
2433 /*
2434  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
2435  * @soc: DP SOC handle
2436  * @srng: source ring structure
2437  * @ring_type: type of ring
2438  * @ring_num: ring number
2439  *
2440  * Return: None
2441  */
2442 void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
2443 		    int ring_type, int ring_num)
2444 {
2445 	if (!srng->hal_srng) {
2446 		dp_init_err("%pK: Ring type: %d, num:%d not setup",
2447 			    soc, ring_type, ring_num);
2448 		return;
2449 	}
2450 
2451 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
2452 	srng->hal_srng = NULL;
2453 }
2454 
2455 qdf_export_symbol(dp_srng_deinit);
2456 
2457 /* TODO: Need this interface from HIF */
2458 void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle);
2459 
2460 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2461 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2462 			 hal_ring_handle_t hal_ring_hdl)
2463 {
2464 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2465 	uint32_t hp, tp;
2466 	uint8_t ring_id;
2467 
2468 	if (!int_ctx)
2469 		return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2470 
2471 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2472 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2473 
2474 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2475 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START);
2476 
2477 	return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2478 }
2479 
2480 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2481 			hal_ring_handle_t hal_ring_hdl)
2482 {
2483 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2484 	uint32_t hp, tp;
2485 	uint8_t ring_id;
2486 
2487 	if (!int_ctx)
2488 		return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2489 
2490 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2491 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2492 
2493 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2494 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END);
2495 
2496 	return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2497 }
2498 
2499 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2500 					      uint8_t hist_group_id)
2501 {
2502 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2503 			 0, 0, 0, HIF_EVENT_TIMER_ENTRY);
2504 }
2505 
2506 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2507 					     uint8_t hist_group_id)
2508 {
2509 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2510 			 0, 0, 0, HIF_EVENT_TIMER_EXIT);
2511 }
2512 #else
2513 
2514 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2515 					      uint8_t hist_group_id)
2516 {
2517 }
2518 
2519 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2520 					     uint8_t hist_group_id)
2521 {
2522 }
2523 
2524 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
2525 
2526 /*
2527  * dp_should_timer_irq_yield() - Decide if the bottom half should yield
2528  * @soc: DP soc handle
2529  * @work_done: work done in softirq context
2530  * @start_time: start time for the softirq
2531  *
2532  * Return: enum with yield code
2533  */
2534 enum timer_yield_status
2535 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
2536 			  uint64_t start_time)
2537 {
2538 	uint64_t cur_time = qdf_get_log_timestamp();
2539 
2540 	if (!work_done)
2541 		return DP_TIMER_WORK_DONE;
2542 
2543 	if (cur_time - start_time > DP_MAX_TIMER_EXEC_TIME_TICKS)
2544 		return DP_TIMER_TIME_EXHAUST;
2545 
2546 	return DP_TIMER_NO_YIELD;
2547 }
2548 
2549 qdf_export_symbol(dp_should_timer_irq_yield);
2550 
2551 static int dp_process_rxdma_dst_ring(struct dp_soc *soc,
2552 				     struct dp_intr *int_ctx,
2553 				     int mac_for_pdev,
2554 				     int total_budget)
2555 {
2556 	return dp_rxdma_err_process(int_ctx, soc, mac_for_pdev,
2557 				    total_budget);
2558 }
2559 
2560 /**
2561  * dp_process_lmac_rings() - Process LMAC rings
2562  * @int_ctx: interrupt context
2563  * @total_budget: budget of work which can be done
2564  *
2565  * Return: work done
2566  */
2567 static int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget)
2568 {
2569 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2570 	struct dp_soc *soc = int_ctx->soc;
2571 	uint32_t remaining_quota = total_budget;
2572 	struct dp_pdev *pdev = NULL;
2573 	uint32_t work_done  = 0;
2574 	int budget = total_budget;
2575 	int ring = 0;
2576 
2577 	/* Process LMAC interrupts */
2578 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
2579 		int mac_for_pdev = ring;
2580 
2581 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
2582 		if (!pdev)
2583 			continue;
2584 		if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
2585 			work_done = dp_monitor_process(soc, int_ctx,
2586 						       mac_for_pdev,
2587 						       remaining_quota);
2588 			if (work_done)
2589 				intr_stats->num_rx_mon_ring_masks++;
2590 			budget -= work_done;
2591 			if (budget <= 0)
2592 				goto budget_done;
2593 			remaining_quota = budget;
2594 		}
2595 
2596 		if (int_ctx->tx_mon_ring_mask & (1 << mac_for_pdev)) {
2597 			work_done = dp_tx_mon_process(soc, int_ctx,
2598 						      mac_for_pdev,
2599 						      remaining_quota);
2600 			if (work_done)
2601 				intr_stats->num_tx_mon_ring_masks++;
2602 			budget -= work_done;
2603 			if (budget <= 0)
2604 				goto budget_done;
2605 			remaining_quota = budget;
2606 		}
2607 
2608 		if (int_ctx->rxdma2host_ring_mask &
2609 				(1 << mac_for_pdev)) {
2610 			work_done = dp_process_rxdma_dst_ring(soc, int_ctx,
2611 							      mac_for_pdev,
2612 							      remaining_quota);
2613 			if (work_done)
2614 				intr_stats->num_rxdma2host_ring_masks++;
2615 			budget -=  work_done;
2616 			if (budget <= 0)
2617 				goto budget_done;
2618 			remaining_quota = budget;
2619 		}
2620 
2621 		if (int_ctx->host2rxdma_ring_mask & (1 << mac_for_pdev)) {
2622 			union dp_rx_desc_list_elem_t *desc_list = NULL;
2623 			union dp_rx_desc_list_elem_t *tail = NULL;
2624 			struct dp_srng *rx_refill_buf_ring;
2625 			struct rx_desc_pool *rx_desc_pool;
2626 
2627 			rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2628 			if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
2629 				rx_refill_buf_ring =
2630 					&soc->rx_refill_buf_ring[mac_for_pdev];
2631 			else
2632 				rx_refill_buf_ring =
2633 					&soc->rx_refill_buf_ring[pdev->lmac_id];
2634 
2635 			intr_stats->num_host2rxdma_ring_masks++;
2636 			dp_rx_buffers_lt_replenish_simple(soc, mac_for_pdev,
2637 							  rx_refill_buf_ring,
2638 							  rx_desc_pool,
2639 							  0,
2640 							  &desc_list,
2641 							  &tail);
2642 		}
2643 
2644 	}
2645 
2646 	if (int_ctx->host2rxdma_mon_ring_mask)
2647 		dp_rx_mon_buf_refill(int_ctx);
2648 
2649 	if (int_ctx->host2txmon_ring_mask)
2650 		dp_tx_mon_buf_refill(int_ctx);
2651 
2652 budget_done:
2653 	return total_budget - budget;
2654 }
2655 
2656 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
2657 /**
2658  * dp_service_near_full_srngs() - Bottom half handler to process the near
2659  *				full IRQ on a SRNG
2660  * @dp_ctx: Datapath SoC handle
2661  * @dp_budget: Number of SRNGs which can be processed in a single attempt
2662  *		without rescheduling
2663  * @cpu: cpu id
2664  *
2665  * Return: remaining budget/quota for the soc device
2666  */
2667 static
2668 uint32_t dp_service_near_full_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
2669 {
2670 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2671 	struct dp_soc *soc = int_ctx->soc;
2672 
2673 	/*
2674 	 * dp_service_near_full_srngs arch ops should be initialized always
2675 	 * if the NEAR FULL IRQ feature is enabled.
2676 	 */
2677 	return soc->arch_ops.dp_service_near_full_srngs(soc, int_ctx,
2678 							dp_budget);
2679 }
2680 #endif
2681 
2682 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2683 
2684 /*
2685  * dp_srng_get_cpu() - Get the smp processor id for srng processing
2686  *
2687  * Return: smp processor id
2688  */
2689 static inline int dp_srng_get_cpu(void)
2690 {
2691 	return smp_processor_id();
2692 }
2693 
2694 /*
2695  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
2696  * @dp_ctx: DP SOC handle
2697  * @budget: Number of frames/descriptors that can be processed in one shot
2698  * @cpu: CPU on which this instance is running
2699  *
2700  * Return: remaining budget/quota for the soc device
2701  */
2702 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
2703 {
2704 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2705 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2706 	struct dp_soc *soc = int_ctx->soc;
2707 	int ring = 0;
2708 	int index;
2709 	uint32_t work_done  = 0;
2710 	int budget = dp_budget;
2711 	uint8_t tx_mask = int_ctx->tx_ring_mask;
2712 	uint8_t rx_mask = int_ctx->rx_ring_mask;
2713 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
2714 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
2715 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2716 	uint32_t remaining_quota = dp_budget;
2717 
2718 	qdf_atomic_set_bit(cpu, &soc->service_rings_running);
2719 
2720 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n",
2721 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
2722 			 reo_status_mask,
2723 			 int_ctx->rx_mon_ring_mask,
2724 			 int_ctx->host2rxdma_ring_mask,
2725 			 int_ctx->rxdma2host_ring_mask);
2726 
2727 	/* Process Tx completion interrupts first to return back buffers */
2728 	for (index = 0; index < soc->num_tx_comp_rings; index++) {
2729 		if (!(1 << wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) & tx_mask))
2730 			continue;
2731 		work_done = dp_tx_comp_handler(int_ctx,
2732 					       soc,
2733 					       soc->tx_comp_ring[index].hal_srng,
2734 					       index, remaining_quota);
2735 		if (work_done) {
2736 			intr_stats->num_tx_ring_masks[index]++;
2737 			dp_verbose_debug("tx mask 0x%x index %d, budget %d, work_done %d",
2738 					 tx_mask, index, budget,
2739 					 work_done);
2740 		}
2741 		budget -= work_done;
2742 		if (budget <= 0)
2743 			goto budget_done;
2744 
2745 		remaining_quota = budget;
2746 	}
2747 
2748 	/* Process REO Exception ring interrupt */
2749 	if (rx_err_mask) {
2750 		work_done = dp_rx_err_process(int_ctx, soc,
2751 					      soc->reo_exception_ring.hal_srng,
2752 					      remaining_quota);
2753 
2754 		if (work_done) {
2755 			intr_stats->num_rx_err_ring_masks++;
2756 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
2757 					 work_done, budget);
2758 		}
2759 
2760 		budget -=  work_done;
2761 		if (budget <= 0) {
2762 			goto budget_done;
2763 		}
2764 		remaining_quota = budget;
2765 	}
2766 
2767 	/* Process Rx WBM release ring interrupt */
2768 	if (rx_wbm_rel_mask) {
2769 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
2770 						  soc->rx_rel_ring.hal_srng,
2771 						  remaining_quota);
2772 
2773 		if (work_done) {
2774 			intr_stats->num_rx_wbm_rel_ring_masks++;
2775 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
2776 					 work_done, budget);
2777 		}
2778 
2779 		budget -=  work_done;
2780 		if (budget <= 0) {
2781 			goto budget_done;
2782 		}
2783 		remaining_quota = budget;
2784 	}
2785 
2786 	/* Process Rx interrupts */
2787 	if (rx_mask) {
2788 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
2789 			if (!(rx_mask & (1 << ring)))
2790 				continue;
2791 			work_done = soc->arch_ops.dp_rx_process(int_ctx,
2792 						  soc->reo_dest_ring[ring].hal_srng,
2793 						  ring,
2794 						  remaining_quota);
2795 			if (work_done) {
2796 				intr_stats->num_rx_ring_masks[ring]++;
2797 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
2798 						 rx_mask, ring,
2799 						 work_done, budget);
2800 				budget -=  work_done;
2801 				if (budget <= 0)
2802 					goto budget_done;
2803 				remaining_quota = budget;
2804 			}
2805 		}
2806 	}
2807 
2808 	if (reo_status_mask) {
2809 		if (dp_reo_status_ring_handler(int_ctx, soc))
2810 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2811 	}
2812 
2813 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
2814 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2815 		if (work_done) {
2816 			budget -=  work_done;
2817 			if (budget <= 0)
2818 				goto budget_done;
2819 			remaining_quota = budget;
2820 		}
2821 	}
2822 
2823 	qdf_lro_flush(int_ctx->lro_ctx);
2824 	intr_stats->num_masks++;
2825 
2826 budget_done:
2827 	qdf_atomic_clear_bit(cpu, &soc->service_rings_running);
2828 
2829 	if (soc->notify_fw_callback)
2830 		soc->notify_fw_callback(soc);
2831 
2832 	return dp_budget - budget;
2833 }
2834 
2835 #else /* QCA_HOST_MODE_WIFI_DISABLED */
2836 
2837 /*
2838  * dp_srng_get_cpu() - Get the smp processor id for srng processing
2839  *
2840  * Return: smp processor id
2841  */
2842 static inline int dp_srng_get_cpu(void)
2843 {
2844 	return 0;
2845 }
2846 
2847 /*
2848  * dp_service_srngs() - Top level handler for DP Monitor Ring interrupts
2849  * @dp_ctx: DP SOC handle
2850  * @budget: Number of frames/descriptors that can be processed in one shot
2851  *
2852  * Return: remaining budget/quota for the soc device
2853  */
2854 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
2855 {
2856 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2857 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2858 	struct dp_soc *soc = int_ctx->soc;
2859 	uint32_t remaining_quota = dp_budget;
2860 	uint32_t work_done  = 0;
2861 	int budget = dp_budget;
2862 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2863 
2864 	if (reo_status_mask) {
2865 		if (dp_reo_status_ring_handler(int_ctx, soc))
2866 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2867 	}
2868 
2869 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
2870 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2871 		if (work_done) {
2872 			budget -=  work_done;
2873 			if (budget <= 0)
2874 				goto budget_done;
2875 			remaining_quota = budget;
2876 		}
2877 	}
2878 
2879 	qdf_lro_flush(int_ctx->lro_ctx);
2880 	intr_stats->num_masks++;
2881 
2882 budget_done:
2883 	return dp_budget - budget;
2884 }
2885 
2886 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2887 
2888 /* dp_interrupt_timer()- timer poll for interrupts
2889  *
2890  * @arg: SoC Handle
2891  *
2892  * Return:
2893  *
2894  */
2895 static void dp_interrupt_timer(void *arg)
2896 {
2897 	struct dp_soc *soc = (struct dp_soc *) arg;
2898 	struct dp_pdev *pdev = soc->pdev_list[0];
2899 	enum timer_yield_status yield = DP_TIMER_NO_YIELD;
2900 	uint32_t work_done  = 0, total_work_done = 0;
2901 	int budget = 0xffff, i;
2902 	uint32_t remaining_quota = budget;
2903 	uint64_t start_time;
2904 	uint32_t lmac_id = DP_MON_INVALID_LMAC_ID;
2905 	uint8_t dp_intr_id = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
2906 	uint32_t lmac_iter;
2907 	int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
2908 	enum reg_wifi_band mon_band;
2909 	int cpu = dp_srng_get_cpu();
2910 
2911 	/*
2912 	 * this logic makes all data path interfacing rings (UMAC/LMAC)
2913 	 * and Monitor rings polling mode when NSS offload is disabled
2914 	 */
2915 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx) &&
2916 	    !wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2917 		if (qdf_atomic_read(&soc->cmn_init_done)) {
2918 			for (i = 0; i < wlan_cfg_get_num_contexts(
2919 						soc->wlan_cfg_ctx); i++)
2920 				dp_service_srngs(&soc->intr_ctx[i], 0xffff,
2921 						 cpu);
2922 
2923 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
2924 		}
2925 		return;
2926 	}
2927 
2928 	if (!qdf_atomic_read(&soc->cmn_init_done))
2929 		return;
2930 
2931 	if (dp_monitor_is_chan_band_known(pdev)) {
2932 		mon_band = dp_monitor_get_chan_band(pdev);
2933 		lmac_id = pdev->ch_band_lmac_id_mapping[mon_band];
2934 		if (qdf_likely(lmac_id != DP_MON_INVALID_LMAC_ID)) {
2935 			dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id];
2936 			dp_srng_record_timer_entry(soc, dp_intr_id);
2937 		}
2938 	}
2939 
2940 	start_time = qdf_get_log_timestamp();
2941 	dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
2942 
2943 	while (yield == DP_TIMER_NO_YIELD) {
2944 		for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) {
2945 			if (lmac_iter == lmac_id)
2946 				work_done = dp_monitor_process(soc,
2947 						&soc->intr_ctx[dp_intr_id],
2948 						lmac_iter, remaining_quota);
2949 			else
2950 				work_done =
2951 					dp_monitor_drop_packets_for_mac(pdev,
2952 							     lmac_iter,
2953 							     remaining_quota);
2954 			if (work_done) {
2955 				budget -=  work_done;
2956 				if (budget <= 0) {
2957 					yield = DP_TIMER_WORK_EXHAUST;
2958 					goto budget_done;
2959 				}
2960 				remaining_quota = budget;
2961 				total_work_done += work_done;
2962 			}
2963 		}
2964 
2965 		yield = dp_should_timer_irq_yield(soc, total_work_done,
2966 						  start_time);
2967 		total_work_done = 0;
2968 	}
2969 
2970 budget_done:
2971 	if (yield == DP_TIMER_WORK_EXHAUST ||
2972 	    yield == DP_TIMER_TIME_EXHAUST)
2973 		qdf_timer_mod(&soc->int_timer, 1);
2974 	else
2975 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
2976 
2977 	if (lmac_id != DP_MON_INVALID_LMAC_ID)
2978 		dp_srng_record_timer_exit(soc, dp_intr_id);
2979 }
2980 
2981 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2982 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
2983 					struct dp_intr *intr_ctx)
2984 {
2985 	if (intr_ctx->rx_mon_ring_mask)
2986 		return true;
2987 
2988 	return false;
2989 }
2990 #else
2991 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
2992 					struct dp_intr *intr_ctx)
2993 {
2994 	return false;
2995 }
2996 #endif
2997 
2998 /*
2999  * dp_soc_attach_poll() - Register handlers for DP interrupts
3000  * @txrx_soc: DP SOC handle
3001  *
3002  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
3003  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
3004  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
3005  *
3006  * Return: 0 for success, nonzero for failure.
3007  */
3008 static QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
3009 {
3010 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3011 	int i;
3012 	int lmac_id = 0;
3013 
3014 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3015 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
3016 	soc->intr_mode = DP_INTR_POLL;
3017 
3018 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3019 		soc->intr_ctx[i].dp_intr_id = i;
3020 		soc->intr_ctx[i].tx_ring_mask =
3021 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
3022 		soc->intr_ctx[i].rx_ring_mask =
3023 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
3024 		soc->intr_ctx[i].rx_mon_ring_mask =
3025 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
3026 		soc->intr_ctx[i].rx_err_ring_mask =
3027 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
3028 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
3029 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
3030 		soc->intr_ctx[i].reo_status_ring_mask =
3031 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
3032 		soc->intr_ctx[i].rxdma2host_ring_mask =
3033 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
3034 		soc->intr_ctx[i].soc = soc;
3035 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
3036 
3037 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
3038 			hif_event_history_init(soc->hif_handle, i);
3039 			soc->mon_intr_id_lmac_map[lmac_id] = i;
3040 			lmac_id++;
3041 		}
3042 	}
3043 
3044 	qdf_timer_init(soc->osdev, &soc->int_timer,
3045 			dp_interrupt_timer, (void *)soc,
3046 			QDF_TIMER_TYPE_WAKE_APPS);
3047 
3048 	return QDF_STATUS_SUCCESS;
3049 }
3050 
3051 /**
3052  * dp_soc_set_interrupt_mode() - Set the interrupt mode in soc
3053  * soc: DP soc handle
3054  *
3055  * Set the appropriate interrupt mode flag in the soc
3056  */
3057 static void dp_soc_set_interrupt_mode(struct dp_soc *soc)
3058 {
3059 	uint32_t msi_base_data, msi_vector_start;
3060 	int msi_vector_count, ret;
3061 
3062 	soc->intr_mode = DP_INTR_INTEGRATED;
3063 
3064 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
3065 	    (dp_is_monitor_mode_using_poll(soc) &&
3066 	     soc->cdp_soc.ol_ops->get_con_mode &&
3067 	     soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) {
3068 		soc->intr_mode = DP_INTR_POLL;
3069 	} else {
3070 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
3071 						  &msi_vector_count,
3072 						  &msi_base_data,
3073 						  &msi_vector_start);
3074 		if (ret)
3075 			return;
3076 
3077 		soc->intr_mode = DP_INTR_MSI;
3078 	}
3079 }
3080 
3081 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc);
3082 #if defined(DP_INTR_POLL_BOTH)
3083 /*
3084  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
3085  * @txrx_soc: DP SOC handle
3086  *
3087  * Call the appropriate attach function based on the mode of operation.
3088  * This is a WAR for enabling monitor mode.
3089  *
3090  * Return: 0 for success. nonzero for failure.
3091  */
3092 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
3093 {
3094 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3095 
3096 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
3097 	    (dp_is_monitor_mode_using_poll(soc) &&
3098 	     soc->cdp_soc.ol_ops->get_con_mode &&
3099 	     soc->cdp_soc.ol_ops->get_con_mode() ==
3100 	     QDF_GLOBAL_MONITOR_MODE)) {
3101 		dp_info("Poll mode");
3102 		return dp_soc_attach_poll(txrx_soc);
3103 	} else {
3104 		dp_info("Interrupt  mode");
3105 		return dp_soc_interrupt_attach(txrx_soc);
3106 	}
3107 }
3108 #else
3109 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
3110 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
3111 {
3112 	return dp_soc_attach_poll(txrx_soc);
3113 }
3114 #else
3115 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
3116 {
3117 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3118 
3119 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx))
3120 		return dp_soc_attach_poll(txrx_soc);
3121 	else
3122 		return dp_soc_interrupt_attach(txrx_soc);
3123 }
3124 #endif
3125 #endif
3126 
3127 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
3128 /**
3129  * dp_soc_interrupt_map_calculate_wifi3_pci_legacy()
3130  * Calculate interrupt map for legacy interrupts
3131  * @soc: DP soc handle
3132  * @intr_ctx_num: Interrupt context number
3133  * @irq_id_map: IRQ map
3134  * num_irq_r: Number of interrupts assigned for this context
3135  *
3136  * Return: void
3137  */
3138 static void dp_soc_interrupt_map_calculate_wifi3_pci_legacy(struct dp_soc *soc,
3139 							    int intr_ctx_num,
3140 							    int *irq_id_map,
3141 							    int *num_irq_r)
3142 {
3143 	int j;
3144 	int num_irq = 0;
3145 	int tx_mask = wlan_cfg_get_tx_ring_mask(
3146 					soc->wlan_cfg_ctx, intr_ctx_num);
3147 	int rx_mask = wlan_cfg_get_rx_ring_mask(
3148 					soc->wlan_cfg_ctx, intr_ctx_num);
3149 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
3150 					soc->wlan_cfg_ctx, intr_ctx_num);
3151 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3152 					soc->wlan_cfg_ctx, intr_ctx_num);
3153 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3154 					soc->wlan_cfg_ctx, intr_ctx_num);
3155 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3156 					soc->wlan_cfg_ctx, intr_ctx_num);
3157 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3158 					soc->wlan_cfg_ctx, intr_ctx_num);
3159 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3160 					soc->wlan_cfg_ctx, intr_ctx_num);
3161 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3162 					soc->wlan_cfg_ctx, intr_ctx_num);
3163 	soc->intr_mode = DP_INTR_LEGACY_VIRTUAL_IRQ;
3164 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
3165 		if (tx_mask & (1 << j))
3166 			irq_id_map[num_irq++] = (wbm2sw0_release - j);
3167 		if (rx_mask & (1 << j))
3168 			irq_id_map[num_irq++] = (reo2sw1_intr - j);
3169 		if (rx_mon_mask & (1 << j))
3170 			irq_id_map[num_irq++] = (rxmon2sw_p0_dest0 - j);
3171 		if (rx_err_ring_mask & (1 << j))
3172 			irq_id_map[num_irq++] = (reo2sw0_intr - j);
3173 		if (rx_wbm_rel_ring_mask & (1 << j))
3174 			irq_id_map[num_irq++] = (wbm2sw5_release - j);
3175 		if (reo_status_ring_mask & (1 << j))
3176 			irq_id_map[num_irq++] = (reo_status - j);
3177 		if (rxdma2host_ring_mask & (1 << j))
3178 			irq_id_map[num_irq++] = (rxdma2sw_dst_ring0 - j);
3179 		if (host2rxdma_ring_mask & (1 << j))
3180 			irq_id_map[num_irq++] = (sw2rxdma_0 - j);
3181 		if (host2rxdma_mon_ring_mask & (1 << j))
3182 			irq_id_map[num_irq++] = (sw2rxmon_src_ring - j);
3183 	}
3184 	*num_irq_r = num_irq;
3185 }
3186 #else
3187 /**
3188  * dp_soc_interrupt_map_calculate_wifi3_pci_legacy()
3189  * Calculate interrupt map for legacy interrupts
3190  * @soc: DP soc handle
3191  * @intr_ctx_num: Interrupt context number
3192  * @irq_id_map: IRQ map
3193  * num_irq_r: Number of interrupts assigned for this context
3194  *
3195  * Return: void
3196  */
3197 static void dp_soc_interrupt_map_calculate_wifi3_pci_legacy(struct dp_soc *soc,
3198 							    int intr_ctx_num,
3199 							    int *irq_id_map,
3200 							    int *num_irq_r)
3201 {
3202 }
3203 #endif
3204 
3205 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
3206 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
3207 {
3208 	int j;
3209 	int num_irq = 0;
3210 
3211 	int tx_mask =
3212 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3213 	int rx_mask =
3214 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3215 	int rx_mon_mask =
3216 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3217 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3218 					soc->wlan_cfg_ctx, intr_ctx_num);
3219 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3220 					soc->wlan_cfg_ctx, intr_ctx_num);
3221 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3222 					soc->wlan_cfg_ctx, intr_ctx_num);
3223 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3224 					soc->wlan_cfg_ctx, intr_ctx_num);
3225 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3226 					soc->wlan_cfg_ctx, intr_ctx_num);
3227 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3228 					soc->wlan_cfg_ctx, intr_ctx_num);
3229 
3230 	soc->intr_mode = DP_INTR_INTEGRATED;
3231 
3232 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
3233 
3234 		if (tx_mask & (1 << j)) {
3235 			irq_id_map[num_irq++] =
3236 				(wbm2host_tx_completions_ring1 - j);
3237 		}
3238 
3239 		if (rx_mask & (1 << j)) {
3240 			irq_id_map[num_irq++] =
3241 				(reo2host_destination_ring1 - j);
3242 		}
3243 
3244 		if (rxdma2host_ring_mask & (1 << j)) {
3245 			irq_id_map[num_irq++] =
3246 				rxdma2host_destination_ring_mac1 - j;
3247 		}
3248 
3249 		if (host2rxdma_ring_mask & (1 << j)) {
3250 			irq_id_map[num_irq++] =
3251 				host2rxdma_host_buf_ring_mac1 -	j;
3252 		}
3253 
3254 		if (host2rxdma_mon_ring_mask & (1 << j)) {
3255 			irq_id_map[num_irq++] =
3256 				host2rxdma_monitor_ring1 - j;
3257 		}
3258 
3259 		if (rx_mon_mask & (1 << j)) {
3260 			irq_id_map[num_irq++] =
3261 				ppdu_end_interrupts_mac1 - j;
3262 			irq_id_map[num_irq++] =
3263 				rxdma2host_monitor_status_ring_mac1 - j;
3264 			irq_id_map[num_irq++] =
3265 				rxdma2host_monitor_destination_mac1 - j;
3266 		}
3267 
3268 		if (rx_wbm_rel_ring_mask & (1 << j))
3269 			irq_id_map[num_irq++] = wbm2host_rx_release;
3270 
3271 		if (rx_err_ring_mask & (1 << j))
3272 			irq_id_map[num_irq++] = reo2host_exception;
3273 
3274 		if (reo_status_ring_mask & (1 << j))
3275 			irq_id_map[num_irq++] = reo2host_status;
3276 
3277 	}
3278 	*num_irq_r = num_irq;
3279 }
3280 
3281 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
3282 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
3283 		int msi_vector_count, int msi_vector_start)
3284 {
3285 	int tx_mask = wlan_cfg_get_tx_ring_mask(
3286 					soc->wlan_cfg_ctx, intr_ctx_num);
3287 	int rx_mask = wlan_cfg_get_rx_ring_mask(
3288 					soc->wlan_cfg_ctx, intr_ctx_num);
3289 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
3290 					soc->wlan_cfg_ctx, intr_ctx_num);
3291 	int tx_mon_mask = wlan_cfg_get_tx_mon_ring_mask(
3292 					soc->wlan_cfg_ctx, intr_ctx_num);
3293 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3294 					soc->wlan_cfg_ctx, intr_ctx_num);
3295 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3296 					soc->wlan_cfg_ctx, intr_ctx_num);
3297 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3298 					soc->wlan_cfg_ctx, intr_ctx_num);
3299 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3300 					soc->wlan_cfg_ctx, intr_ctx_num);
3301 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3302 					soc->wlan_cfg_ctx, intr_ctx_num);
3303 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3304 					soc->wlan_cfg_ctx, intr_ctx_num);
3305 	int rx_near_full_grp_1_mask =
3306 		wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
3307 						     intr_ctx_num);
3308 	int rx_near_full_grp_2_mask =
3309 		wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
3310 						     intr_ctx_num);
3311 	int tx_ring_near_full_mask =
3312 		wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
3313 						    intr_ctx_num);
3314 
3315 	int host2txmon_ring_mask =
3316 		wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx,
3317 						  intr_ctx_num);
3318 	unsigned int vector =
3319 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
3320 	int num_irq = 0;
3321 
3322 	soc->intr_mode = DP_INTR_MSI;
3323 
3324 	if (tx_mask | rx_mask | rx_mon_mask | tx_mon_mask | rx_err_ring_mask |
3325 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask |
3326 	    host2rxdma_ring_mask | host2rxdma_mon_ring_mask |
3327 	    rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
3328 	    tx_ring_near_full_mask | host2txmon_ring_mask)
3329 		irq_id_map[num_irq++] =
3330 			pld_get_msi_irq(soc->osdev->dev, vector);
3331 
3332 	*num_irq_r = num_irq;
3333 }
3334 
3335 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
3336 				    int *irq_id_map, int *num_irq)
3337 {
3338 	int msi_vector_count, ret;
3339 	uint32_t msi_base_data, msi_vector_start;
3340 
3341 	if (pld_get_enable_intx(soc->osdev->dev)) {
3342 		return dp_soc_interrupt_map_calculate_wifi3_pci_legacy(soc,
3343 				intr_ctx_num, irq_id_map, num_irq);
3344 	}
3345 
3346 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
3347 					    &msi_vector_count,
3348 					    &msi_base_data,
3349 					    &msi_vector_start);
3350 	if (ret)
3351 		return dp_soc_interrupt_map_calculate_integrated(soc,
3352 				intr_ctx_num, irq_id_map, num_irq);
3353 
3354 	else
3355 		dp_soc_interrupt_map_calculate_msi(soc,
3356 				intr_ctx_num, irq_id_map, num_irq,
3357 				msi_vector_count, msi_vector_start);
3358 }
3359 
3360 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
3361 /**
3362  * dp_soc_near_full_interrupt_attach() - Register handler for DP near fill irq
3363  * @soc: DP soc handle
3364  * @num_irq: IRQ number
3365  * @irq_id_map: IRQ map
3366  * intr_id: interrupt context ID
3367  *
3368  * Return: 0 for success. nonzero for failure.
3369  */
3370 static inline int
3371 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
3372 				  int irq_id_map[], int intr_id)
3373 {
3374 	return hif_register_ext_group(soc->hif_handle,
3375 				      num_irq, irq_id_map,
3376 				      dp_service_near_full_srngs,
3377 				      &soc->intr_ctx[intr_id], "dp_nf_intr",
3378 				      HIF_EXEC_NAPI_TYPE,
3379 				      QCA_NAPI_DEF_SCALE_BIN_SHIFT);
3380 }
3381 #else
3382 static inline int
3383 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
3384 				  int *irq_id_map, int intr_id)
3385 {
3386 	return 0;
3387 }
3388 #endif
3389 
3390 #ifdef DP_CON_MON_MSI_SKIP_SET
3391 static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
3392 {
3393 	return !!(soc->cdp_soc.ol_ops->get_con_mode() !=
3394 			QDF_GLOBAL_MONITOR_MODE);
3395 }
3396 #else
3397 static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
3398 {
3399 	return false;
3400 }
3401 #endif
3402 
3403 /*
3404  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
3405  * @txrx_soc: DP SOC handle
3406  *
3407  * Return: none
3408  */
3409 static void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
3410 {
3411 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3412 	int i;
3413 
3414 	if (soc->intr_mode == DP_INTR_POLL) {
3415 		qdf_timer_free(&soc->int_timer);
3416 	} else {
3417 		hif_deconfigure_ext_group_interrupts(soc->hif_handle);
3418 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
3419 		hif_deregister_exec_group(soc->hif_handle, "dp_nf_intr");
3420 	}
3421 
3422 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3423 		soc->intr_ctx[i].tx_ring_mask = 0;
3424 		soc->intr_ctx[i].rx_ring_mask = 0;
3425 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
3426 		soc->intr_ctx[i].rx_err_ring_mask = 0;
3427 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
3428 		soc->intr_ctx[i].reo_status_ring_mask = 0;
3429 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
3430 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
3431 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
3432 		soc->intr_ctx[i].rx_near_full_grp_1_mask = 0;
3433 		soc->intr_ctx[i].rx_near_full_grp_2_mask = 0;
3434 		soc->intr_ctx[i].tx_ring_near_full_mask = 0;
3435 		soc->intr_ctx[i].tx_mon_ring_mask = 0;
3436 		soc->intr_ctx[i].host2txmon_ring_mask = 0;
3437 		soc->intr_ctx[i].umac_reset_intr_mask = 0;
3438 
3439 		hif_event_history_deinit(soc->hif_handle, i);
3440 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
3441 	}
3442 
3443 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3444 		    sizeof(soc->mon_intr_id_lmac_map),
3445 		    DP_MON_INVALID_LMAC_ID);
3446 }
3447 
3448 /*
3449  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
3450  * @txrx_soc: DP SOC handle
3451  *
3452  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
3453  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
3454  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
3455  *
3456  * Return: 0 for success. nonzero for failure.
3457  */
3458 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
3459 {
3460 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3461 
3462 	int i = 0;
3463 	int num_irq = 0;
3464 	int rx_err_ring_intr_ctxt_id = HIF_MAX_GROUP;
3465 	int lmac_id = 0;
3466 	int napi_scale;
3467 
3468 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3469 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
3470 
3471 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3472 		int ret = 0;
3473 
3474 		/* Map of IRQ ids registered with one interrupt context */
3475 		int irq_id_map[HIF_MAX_GRP_IRQ];
3476 
3477 		int tx_mask =
3478 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
3479 		int rx_mask =
3480 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
3481 		int rx_mon_mask =
3482 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
3483 		int tx_mon_ring_mask =
3484 			wlan_cfg_get_tx_mon_ring_mask(soc->wlan_cfg_ctx, i);
3485 		int rx_err_ring_mask =
3486 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
3487 		int rx_wbm_rel_ring_mask =
3488 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
3489 		int reo_status_ring_mask =
3490 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
3491 		int rxdma2host_ring_mask =
3492 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
3493 		int host2rxdma_ring_mask =
3494 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
3495 		int host2rxdma_mon_ring_mask =
3496 			wlan_cfg_get_host2rxdma_mon_ring_mask(
3497 				soc->wlan_cfg_ctx, i);
3498 		int rx_near_full_grp_1_mask =
3499 			wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
3500 							     i);
3501 		int rx_near_full_grp_2_mask =
3502 			wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
3503 							     i);
3504 		int tx_ring_near_full_mask =
3505 			wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
3506 							    i);
3507 		int host2txmon_ring_mask =
3508 			wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx, i);
3509 		int umac_reset_intr_mask =
3510 			wlan_cfg_get_umac_reset_intr_mask(soc->wlan_cfg_ctx, i);
3511 
3512 		if (dp_skip_rx_mon_ring_mask_set(soc))
3513 			rx_mon_mask = 0;
3514 
3515 		soc->intr_ctx[i].dp_intr_id = i;
3516 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
3517 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
3518 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
3519 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
3520 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
3521 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
3522 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
3523 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
3524 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
3525 			 host2rxdma_mon_ring_mask;
3526 		soc->intr_ctx[i].rx_near_full_grp_1_mask =
3527 						rx_near_full_grp_1_mask;
3528 		soc->intr_ctx[i].rx_near_full_grp_2_mask =
3529 						rx_near_full_grp_2_mask;
3530 		soc->intr_ctx[i].tx_ring_near_full_mask =
3531 						tx_ring_near_full_mask;
3532 		soc->intr_ctx[i].tx_mon_ring_mask = tx_mon_ring_mask;
3533 		soc->intr_ctx[i].host2txmon_ring_mask = host2txmon_ring_mask;
3534 		soc->intr_ctx[i].umac_reset_intr_mask = umac_reset_intr_mask;
3535 
3536 		soc->intr_ctx[i].soc = soc;
3537 
3538 		num_irq = 0;
3539 
3540 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
3541 					       &num_irq);
3542 
3543 		if (rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
3544 		    tx_ring_near_full_mask) {
3545 			dp_soc_near_full_interrupt_attach(soc, num_irq,
3546 							  irq_id_map, i);
3547 		} else {
3548 			napi_scale = wlan_cfg_get_napi_scale_factor(
3549 							    soc->wlan_cfg_ctx);
3550 			if (!napi_scale)
3551 				napi_scale = QCA_NAPI_DEF_SCALE_BIN_SHIFT;
3552 
3553 			ret = hif_register_ext_group(soc->hif_handle,
3554 				num_irq, irq_id_map, dp_service_srngs,
3555 				&soc->intr_ctx[i], "dp_intr",
3556 				HIF_EXEC_NAPI_TYPE, napi_scale);
3557 		}
3558 
3559 		dp_debug(" int ctx %u num_irq %u irq_id_map %u %u",
3560 			 i, num_irq, irq_id_map[0], irq_id_map[1]);
3561 
3562 		if (ret) {
3563 			dp_init_err("%pK: failed, ret = %d", soc, ret);
3564 			dp_soc_interrupt_detach(txrx_soc);
3565 			return QDF_STATUS_E_FAILURE;
3566 		}
3567 
3568 		hif_event_history_init(soc->hif_handle, i);
3569 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
3570 
3571 		if (rx_err_ring_mask)
3572 			rx_err_ring_intr_ctxt_id = i;
3573 
3574 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
3575 			soc->mon_intr_id_lmac_map[lmac_id] = i;
3576 			lmac_id++;
3577 		}
3578 	}
3579 
3580 	hif_configure_ext_group_interrupts(soc->hif_handle);
3581 	if (rx_err_ring_intr_ctxt_id != HIF_MAX_GROUP)
3582 		hif_config_irq_clear_cpu_affinity(soc->hif_handle,
3583 						  rx_err_ring_intr_ctxt_id, 0);
3584 
3585 	return QDF_STATUS_SUCCESS;
3586 }
3587 
3588 #define AVG_MAX_MPDUS_PER_TID 128
3589 #define AVG_TIDS_PER_CLIENT 2
3590 #define AVG_FLOWS_PER_TID 2
3591 #define AVG_MSDUS_PER_FLOW 128
3592 #define AVG_MSDUS_PER_MPDU 4
3593 
3594 /*
3595  * dp_hw_link_desc_pool_banks_free() - Free h/w link desc pool banks
3596  * @soc: DP SOC handle
3597  * @mac_id: mac id
3598  *
3599  * Return: none
3600  */
3601 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
3602 {
3603 	struct qdf_mem_multi_page_t *pages;
3604 
3605 	if (mac_id != WLAN_INVALID_PDEV_ID) {
3606 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3607 	} else {
3608 		pages = &soc->link_desc_pages;
3609 	}
3610 
3611 	if (!pages) {
3612 		dp_err("can not get link desc pages");
3613 		QDF_ASSERT(0);
3614 		return;
3615 	}
3616 
3617 	if (pages->dma_pages) {
3618 		wlan_minidump_remove((void *)
3619 				     pages->dma_pages->page_v_addr_start,
3620 				     pages->num_pages * pages->page_size,
3621 				     soc->ctrl_psoc,
3622 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3623 				     "hw_link_desc_bank");
3624 		dp_desc_multi_pages_mem_free(soc, DP_HW_LINK_DESC_TYPE,
3625 					     pages, 0, false);
3626 	}
3627 }
3628 
3629 qdf_export_symbol(dp_hw_link_desc_pool_banks_free);
3630 
3631 /*
3632  * dp_hw_link_desc_pool_banks_alloc() - Allocate h/w link desc pool banks
3633  * @soc: DP SOC handle
3634  * @mac_id: mac id
3635  *
3636  * Allocates memory pages for link descriptors, the page size is 4K for
3637  * MCL and 2MB for WIN. if the mac_id is invalid link descriptor pages are
3638  * allocated for regular RX/TX and if the there is a proper mac_id link
3639  * descriptors are allocated for RX monitor mode.
3640  *
3641  * Return: QDF_STATUS_SUCCESS: Success
3642  *	   QDF_STATUS_E_FAILURE: Failure
3643  */
3644 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
3645 {
3646 	hal_soc_handle_t hal_soc = soc->hal_soc;
3647 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
3648 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
3649 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
3650 	uint32_t num_mpdus_per_link_desc = hal_num_mpdus_per_link_desc(hal_soc);
3651 	uint32_t num_msdus_per_link_desc = hal_num_msdus_per_link_desc(hal_soc);
3652 	uint32_t num_mpdu_links_per_queue_desc =
3653 		hal_num_mpdu_links_per_queue_desc(hal_soc);
3654 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3655 	uint32_t *total_link_descs, total_mem_size;
3656 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
3657 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
3658 	uint32_t num_entries;
3659 	struct qdf_mem_multi_page_t *pages;
3660 	struct dp_srng *dp_srng;
3661 	uint8_t minidump_str[MINIDUMP_STR_SIZE];
3662 
3663 	/* Only Tx queue descriptors are allocated from common link descriptor
3664 	 * pool Rx queue descriptors are not included in this because (REO queue
3665 	 * extension descriptors) they are expected to be allocated contiguously
3666 	 * with REO queue descriptors
3667 	 */
3668 	if (mac_id != WLAN_INVALID_PDEV_ID) {
3669 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3670 		/* dp_monitor_get_link_desc_pages returns NULL only
3671 		 * if monitor SOC is  NULL
3672 		 */
3673 		if (!pages) {
3674 			dp_err("can not get link desc pages");
3675 			QDF_ASSERT(0);
3676 			return QDF_STATUS_E_FAULT;
3677 		}
3678 		dp_srng = &soc->rxdma_mon_desc_ring[mac_id];
3679 		num_entries = dp_srng->alloc_size /
3680 			hal_srng_get_entrysize(soc->hal_soc,
3681 					       RXDMA_MONITOR_DESC);
3682 		total_link_descs = dp_monitor_get_total_link_descs(soc, mac_id);
3683 		qdf_str_lcopy(minidump_str, "mon_link_desc_bank",
3684 			      MINIDUMP_STR_SIZE);
3685 	} else {
3686 		num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3687 			AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
3688 
3689 		num_mpdu_queue_descs = num_mpdu_link_descs /
3690 			num_mpdu_links_per_queue_desc;
3691 
3692 		num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3693 			AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
3694 			num_msdus_per_link_desc;
3695 
3696 		num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3697 			AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
3698 
3699 		num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
3700 			num_tx_msdu_link_descs + num_rx_msdu_link_descs;
3701 
3702 		pages = &soc->link_desc_pages;
3703 		total_link_descs = &soc->total_link_descs;
3704 		qdf_str_lcopy(minidump_str, "link_desc_bank",
3705 			      MINIDUMP_STR_SIZE);
3706 	}
3707 
3708 	/* If link descriptor banks are allocated, return from here */
3709 	if (pages->num_pages)
3710 		return QDF_STATUS_SUCCESS;
3711 
3712 	/* Round up to power of 2 */
3713 	*total_link_descs = 1;
3714 	while (*total_link_descs < num_entries)
3715 		*total_link_descs <<= 1;
3716 
3717 	dp_init_info("%pK: total_link_descs: %u, link_desc_size: %d",
3718 		     soc, *total_link_descs, link_desc_size);
3719 	total_mem_size =  *total_link_descs * link_desc_size;
3720 	total_mem_size += link_desc_align;
3721 
3722 	dp_init_info("%pK: total_mem_size: %d",
3723 		     soc, total_mem_size);
3724 
3725 	dp_set_max_page_size(pages, max_alloc_size);
3726 	dp_desc_multi_pages_mem_alloc(soc, DP_HW_LINK_DESC_TYPE,
3727 				      pages,
3728 				      link_desc_size,
3729 				      *total_link_descs,
3730 				      0, false);
3731 	if (!pages->num_pages) {
3732 		dp_err("Multi page alloc fail for hw link desc pool");
3733 		return QDF_STATUS_E_FAULT;
3734 	}
3735 
3736 	wlan_minidump_log(pages->dma_pages->page_v_addr_start,
3737 			  pages->num_pages * pages->page_size,
3738 			  soc->ctrl_psoc,
3739 			  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3740 			  "hw_link_desc_bank");
3741 
3742 	return QDF_STATUS_SUCCESS;
3743 }
3744 
3745 /*
3746  * dp_hw_link_desc_ring_free() - Free h/w link desc rings
3747  * @soc: DP SOC handle
3748  *
3749  * Return: none
3750  */
3751 static void dp_hw_link_desc_ring_free(struct dp_soc *soc)
3752 {
3753 	uint32_t i;
3754 	uint32_t size = soc->wbm_idle_scatter_buf_size;
3755 	void *vaddr = soc->wbm_idle_link_ring.base_vaddr_unaligned;
3756 	qdf_dma_addr_t paddr;
3757 
3758 	if (soc->wbm_idle_scatter_buf_base_vaddr[0]) {
3759 		for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3760 			vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3761 			paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3762 			if (vaddr) {
3763 				qdf_mem_free_consistent(soc->osdev,
3764 							soc->osdev->dev,
3765 							size,
3766 							vaddr,
3767 							paddr,
3768 							0);
3769 				vaddr = NULL;
3770 			}
3771 		}
3772 	} else {
3773 		wlan_minidump_remove(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3774 				     soc->wbm_idle_link_ring.alloc_size,
3775 				     soc->ctrl_psoc,
3776 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3777 				     "wbm_idle_link_ring");
3778 		dp_srng_free(soc, &soc->wbm_idle_link_ring);
3779 	}
3780 }
3781 
3782 /*
3783  * dp_hw_link_desc_ring_alloc() - Allocate hw link desc rings
3784  * @soc: DP SOC handle
3785  *
3786  * Allocate memory for WBM_IDLE_LINK srng ring if the number of
3787  * link descriptors is less then the max_allocated size. else
3788  * allocate memory for wbm_idle_scatter_buffer.
3789  *
3790  * Return: QDF_STATUS_SUCCESS: success
3791  *         QDF_STATUS_E_NO_MEM: No memory (Failure)
3792  */
3793 static QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc)
3794 {
3795 	uint32_t entry_size, i;
3796 	uint32_t total_mem_size;
3797 	qdf_dma_addr_t *baseaddr = NULL;
3798 	struct dp_srng *dp_srng;
3799 	uint32_t ring_type;
3800 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3801 	uint32_t tlds;
3802 
3803 	ring_type = WBM_IDLE_LINK;
3804 	dp_srng = &soc->wbm_idle_link_ring;
3805 	tlds = soc->total_link_descs;
3806 
3807 	entry_size = hal_srng_get_entrysize(soc->hal_soc, ring_type);
3808 	total_mem_size = entry_size * tlds;
3809 
3810 	if (total_mem_size <= max_alloc_size) {
3811 		if (dp_srng_alloc(soc, dp_srng, ring_type, tlds, 0)) {
3812 			dp_init_err("%pK: Link desc idle ring setup failed",
3813 				    soc);
3814 			goto fail;
3815 		}
3816 
3817 		wlan_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3818 				  soc->wbm_idle_link_ring.alloc_size,
3819 				  soc->ctrl_psoc,
3820 				  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3821 				  "wbm_idle_link_ring");
3822 	} else {
3823 		uint32_t num_scatter_bufs;
3824 		uint32_t num_entries_per_buf;
3825 		uint32_t buf_size = 0;
3826 
3827 		soc->wbm_idle_scatter_buf_size =
3828 			hal_idle_list_scatter_buf_size(soc->hal_soc);
3829 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
3830 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
3831 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
3832 					soc->hal_soc, total_mem_size,
3833 					soc->wbm_idle_scatter_buf_size);
3834 
3835 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
3836 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3837 				  FL("scatter bufs size out of bounds"));
3838 			goto fail;
3839 		}
3840 
3841 		for (i = 0; i < num_scatter_bufs; i++) {
3842 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
3843 			buf_size = soc->wbm_idle_scatter_buf_size;
3844 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
3845 				qdf_mem_alloc_consistent(soc->osdev,
3846 							 soc->osdev->dev,
3847 							 buf_size,
3848 							 baseaddr);
3849 
3850 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
3851 				QDF_TRACE(QDF_MODULE_ID_DP,
3852 					  QDF_TRACE_LEVEL_ERROR,
3853 					  FL("Scatter lst memory alloc fail"));
3854 				goto fail;
3855 			}
3856 		}
3857 		soc->num_scatter_bufs = num_scatter_bufs;
3858 	}
3859 	return QDF_STATUS_SUCCESS;
3860 
3861 fail:
3862 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3863 		void *vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3864 		qdf_dma_addr_t paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3865 
3866 		if (vaddr) {
3867 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
3868 						soc->wbm_idle_scatter_buf_size,
3869 						vaddr,
3870 						paddr, 0);
3871 			vaddr = NULL;
3872 		}
3873 	}
3874 	return QDF_STATUS_E_NOMEM;
3875 }
3876 
3877 qdf_export_symbol(dp_hw_link_desc_pool_banks_alloc);
3878 
3879 /*
3880  * dp_hw_link_desc_ring_init() - Initialize hw link desc rings
3881  * @soc: DP SOC handle
3882  *
3883  * Return: QDF_STATUS_SUCCESS: success
3884  *         QDF_STATUS_E_FAILURE: failure
3885  */
3886 static QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc)
3887 {
3888 	struct dp_srng *dp_srng = &soc->wbm_idle_link_ring;
3889 
3890 	if (dp_srng->base_vaddr_unaligned) {
3891 		if (dp_srng_init(soc, dp_srng, WBM_IDLE_LINK, 0, 0))
3892 			return QDF_STATUS_E_FAILURE;
3893 	}
3894 	return QDF_STATUS_SUCCESS;
3895 }
3896 
3897 /*
3898  * dp_hw_link_desc_ring_deinit() - Reset hw link desc rings
3899  * @soc: DP SOC handle
3900  *
3901  * Return: None
3902  */
3903 static void dp_hw_link_desc_ring_deinit(struct dp_soc *soc)
3904 {
3905 	dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0);
3906 }
3907 
3908 /*
3909  * dp_hw_link_desc_ring_replenish() - Replenish hw link desc rings
3910  * @soc: DP SOC handle
3911  * @mac_id: mac id
3912  *
3913  * Return: None
3914  */
3915 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id)
3916 {
3917 	uint32_t cookie = 0;
3918 	uint32_t page_idx = 0;
3919 	struct qdf_mem_multi_page_t *pages;
3920 	struct qdf_mem_dma_page_t *dma_pages;
3921 	uint32_t offset = 0;
3922 	uint32_t count = 0;
3923 	uint32_t desc_id = 0;
3924 	void *desc_srng;
3925 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
3926 	uint32_t *total_link_descs_addr;
3927 	uint32_t total_link_descs;
3928 	uint32_t scatter_buf_num;
3929 	uint32_t num_entries_per_buf = 0;
3930 	uint32_t rem_entries;
3931 	uint32_t num_descs_per_page;
3932 	uint32_t num_scatter_bufs = 0;
3933 	uint8_t *scatter_buf_ptr;
3934 	void *desc;
3935 
3936 	num_scatter_bufs = soc->num_scatter_bufs;
3937 
3938 	if (mac_id == WLAN_INVALID_PDEV_ID) {
3939 		pages = &soc->link_desc_pages;
3940 		total_link_descs = soc->total_link_descs;
3941 		desc_srng = soc->wbm_idle_link_ring.hal_srng;
3942 	} else {
3943 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3944 		/* dp_monitor_get_link_desc_pages returns NULL only
3945 		 * if monitor SOC is  NULL
3946 		 */
3947 		if (!pages) {
3948 			dp_err("can not get link desc pages");
3949 			QDF_ASSERT(0);
3950 			return;
3951 		}
3952 		total_link_descs_addr =
3953 				dp_monitor_get_total_link_descs(soc, mac_id);
3954 		total_link_descs = *total_link_descs_addr;
3955 		desc_srng = soc->rxdma_mon_desc_ring[mac_id].hal_srng;
3956 	}
3957 
3958 	dma_pages = pages->dma_pages;
3959 	do {
3960 		qdf_mem_zero(dma_pages[page_idx].page_v_addr_start,
3961 			     pages->page_size);
3962 		page_idx++;
3963 	} while (page_idx < pages->num_pages);
3964 
3965 	if (desc_srng) {
3966 		hal_srng_access_start_unlocked(soc->hal_soc, desc_srng);
3967 		page_idx = 0;
3968 		count = 0;
3969 		offset = 0;
3970 		pages = &soc->link_desc_pages;
3971 		while ((desc = hal_srng_src_get_next(soc->hal_soc,
3972 						     desc_srng)) &&
3973 			(count < total_link_descs)) {
3974 			page_idx = count / pages->num_element_per_page;
3975 			if (desc_id == pages->num_element_per_page)
3976 				desc_id = 0;
3977 
3978 			offset = count % pages->num_element_per_page;
3979 			cookie = LINK_DESC_COOKIE(desc_id, page_idx,
3980 						  soc->link_desc_id_start);
3981 
3982 			hal_set_link_desc_addr(soc->hal_soc, desc, cookie,
3983 					       dma_pages[page_idx].page_p_addr
3984 					       + (offset * link_desc_size),
3985 					       soc->idle_link_bm_id);
3986 			count++;
3987 			desc_id++;
3988 		}
3989 		hal_srng_access_end_unlocked(soc->hal_soc, desc_srng);
3990 	} else {
3991 		/* Populate idle list scatter buffers with link descriptor
3992 		 * pointers
3993 		 */
3994 		scatter_buf_num = 0;
3995 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
3996 					soc->hal_soc,
3997 					soc->wbm_idle_scatter_buf_size);
3998 
3999 		scatter_buf_ptr = (uint8_t *)(
4000 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
4001 		rem_entries = num_entries_per_buf;
4002 		pages = &soc->link_desc_pages;
4003 		page_idx = 0; count = 0;
4004 		offset = 0;
4005 		num_descs_per_page = pages->num_element_per_page;
4006 
4007 		while (count < total_link_descs) {
4008 			page_idx = count / num_descs_per_page;
4009 			offset = count % num_descs_per_page;
4010 			if (desc_id == pages->num_element_per_page)
4011 				desc_id = 0;
4012 
4013 			cookie = LINK_DESC_COOKIE(desc_id, page_idx,
4014 						  soc->link_desc_id_start);
4015 			hal_set_link_desc_addr(soc->hal_soc,
4016 					       (void *)scatter_buf_ptr,
4017 					       cookie,
4018 					       dma_pages[page_idx].page_p_addr +
4019 					       (offset * link_desc_size),
4020 					       soc->idle_link_bm_id);
4021 			rem_entries--;
4022 			if (rem_entries) {
4023 				scatter_buf_ptr += link_desc_size;
4024 			} else {
4025 				rem_entries = num_entries_per_buf;
4026 				scatter_buf_num++;
4027 				if (scatter_buf_num >= num_scatter_bufs)
4028 					break;
4029 				scatter_buf_ptr = (uint8_t *)
4030 					(soc->wbm_idle_scatter_buf_base_vaddr[
4031 					 scatter_buf_num]);
4032 			}
4033 			count++;
4034 			desc_id++;
4035 		}
4036 		/* Setup link descriptor idle list in HW */
4037 		hal_setup_link_idle_list(soc->hal_soc,
4038 			soc->wbm_idle_scatter_buf_base_paddr,
4039 			soc->wbm_idle_scatter_buf_base_vaddr,
4040 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
4041 			(uint32_t)(scatter_buf_ptr -
4042 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
4043 			scatter_buf_num-1])), total_link_descs);
4044 	}
4045 }
4046 
4047 qdf_export_symbol(dp_link_desc_ring_replenish);
4048 
4049 #ifdef IPA_OFFLOAD
4050 #define USE_1_IPA_RX_REO_RING 1
4051 #define USE_2_IPA_RX_REO_RINGS 2
4052 #define REO_DST_RING_SIZE_QCA6290 1023
4053 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
4054 #define REO_DST_RING_SIZE_QCA8074 1023
4055 #define REO_DST_RING_SIZE_QCN9000 2048
4056 #else
4057 #define REO_DST_RING_SIZE_QCA8074 8
4058 #define REO_DST_RING_SIZE_QCN9000 8
4059 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
4060 
4061 #ifdef IPA_WDI3_TX_TWO_PIPES
4062 #ifdef DP_MEMORY_OPT
4063 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4064 {
4065 	return dp_init_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4066 }
4067 
4068 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4069 {
4070 	dp_deinit_tx_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4071 }
4072 
4073 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4074 {
4075 	return dp_alloc_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4076 }
4077 
4078 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4079 {
4080 	dp_free_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4081 }
4082 
4083 #else /* !DP_MEMORY_OPT */
4084 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4085 {
4086 	return 0;
4087 }
4088 
4089 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4090 {
4091 }
4092 
4093 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4094 {
4095 	return 0
4096 }
4097 
4098 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4099 {
4100 }
4101 #endif /* DP_MEMORY_OPT */
4102 
4103 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
4104 {
4105 	hal_tx_init_data_ring(soc->hal_soc,
4106 			      soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng);
4107 }
4108 
4109 #else /* !IPA_WDI3_TX_TWO_PIPES */
4110 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4111 {
4112 	return 0;
4113 }
4114 
4115 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4116 {
4117 }
4118 
4119 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4120 {
4121 	return 0;
4122 }
4123 
4124 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4125 {
4126 }
4127 
4128 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
4129 {
4130 }
4131 
4132 #endif /* IPA_WDI3_TX_TWO_PIPES */
4133 
4134 #else
4135 
4136 #define REO_DST_RING_SIZE_QCA6290 1024
4137 
4138 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4139 {
4140 	return 0;
4141 }
4142 
4143 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4144 {
4145 }
4146 
4147 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4148 {
4149 	return 0;
4150 }
4151 
4152 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4153 {
4154 }
4155 
4156 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
4157 {
4158 }
4159 
4160 #endif /* IPA_OFFLOAD */
4161 
4162 /*
4163  * dp_soc_reset_ring_map() - Reset cpu ring map
4164  * @soc: Datapath soc handler
4165  *
4166  * This api resets the default cpu ring map
4167  */
4168 
4169 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
4170 {
4171 	uint8_t i;
4172 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4173 
4174 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
4175 		switch (nss_config) {
4176 		case dp_nss_cfg_first_radio:
4177 			/*
4178 			 * Setting Tx ring map for one nss offloaded radio
4179 			 */
4180 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
4181 			break;
4182 
4183 		case dp_nss_cfg_second_radio:
4184 			/*
4185 			 * Setting Tx ring for two nss offloaded radios
4186 			 */
4187 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
4188 			break;
4189 
4190 		case dp_nss_cfg_dbdc:
4191 			/*
4192 			 * Setting Tx ring map for 2 nss offloaded radios
4193 			 */
4194 			soc->tx_ring_map[i] =
4195 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
4196 			break;
4197 
4198 		case dp_nss_cfg_dbtc:
4199 			/*
4200 			 * Setting Tx ring map for 3 nss offloaded radios
4201 			 */
4202 			soc->tx_ring_map[i] =
4203 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
4204 			break;
4205 
4206 		default:
4207 			dp_err("tx_ring_map failed due to invalid nss cfg");
4208 			break;
4209 		}
4210 	}
4211 }
4212 
4213 /*
4214  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
4215  * @dp_soc - DP soc handle
4216  * @ring_type - ring type
4217  * @ring_num - ring_num
4218  *
4219  * return 0 or 1
4220  */
4221 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
4222 {
4223 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4224 	uint8_t status = 0;
4225 
4226 	switch (ring_type) {
4227 	case WBM2SW_RELEASE:
4228 	case REO_DST:
4229 	case RXDMA_BUF:
4230 	case REO_EXCEPTION:
4231 		status = ((nss_config) & (1 << ring_num));
4232 		break;
4233 	default:
4234 		break;
4235 	}
4236 
4237 	return status;
4238 }
4239 
4240 /*
4241  * dp_soc_disable_unused_mac_intr_mask() - reset interrupt mask for
4242  *					  unused WMAC hw rings
4243  * @dp_soc - DP Soc handle
4244  * @mac_num - wmac num
4245  *
4246  * Return: Return void
4247  */
4248 static void dp_soc_disable_unused_mac_intr_mask(struct dp_soc *soc,
4249 						int mac_num)
4250 {
4251 	uint8_t *grp_mask = NULL;
4252 	int group_number;
4253 
4254 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
4255 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4256 	wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4257 					  group_number, 0x0);
4258 
4259 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
4260 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4261 	wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
4262 				      group_number, 0x0);
4263 
4264 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
4265 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4266 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
4267 					  group_number, 0x0);
4268 
4269 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
4270 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4271 	wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
4272 					      group_number, 0x0);
4273 }
4274 
4275 #ifdef IPA_OFFLOAD
4276 #ifdef IPA_WDI3_VLAN_SUPPORT
4277 /*
4278  * dp_soc_reset_ipa_vlan_intr_mask() - reset interrupt mask for IPA offloaded
4279  * ring for vlan tagged traffic
4280  * @dp_soc - DP Soc handle
4281  *
4282  * Return: Return void
4283  */
4284 static void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
4285 {
4286 	uint8_t *grp_mask = NULL;
4287 	int group_number, mask;
4288 
4289 	if (!wlan_ipa_is_vlan_enabled())
4290 		return;
4291 
4292 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
4293 
4294 	group_number = dp_srng_find_ring_in_mask(IPA_ALT_REO_DEST_RING_IDX, grp_mask);
4295 	if (group_number < 0) {
4296 		dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4297 			      soc, REO_DST, IPA_ALT_REO_DEST_RING_IDX);
4298 		return;
4299 	}
4300 
4301 	mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
4302 
4303 	/* reset the interrupt mask for offloaded ring */
4304 	mask &= (~(1 << IPA_ALT_REO_DEST_RING_IDX));
4305 
4306 	/*
4307 	 * set the interrupt mask to zero for rx offloaded radio.
4308 	 */
4309 	wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4310 }
4311 #else
4312 static inline
4313 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
4314 { }
4315 #endif /* IPA_WDI3_VLAN_SUPPORT */
4316 #else
4317 static inline
4318 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
4319 { }
4320 #endif /* IPA_OFFLOAD */
4321 
4322 /*
4323  * dp_soc_reset_intr_mask() - reset interrupt mask
4324  * @dp_soc - DP Soc handle
4325  *
4326  * Return: Return void
4327  */
4328 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
4329 {
4330 	uint8_t j;
4331 	uint8_t *grp_mask = NULL;
4332 	int group_number, mask, num_ring;
4333 
4334 	/* number of tx ring */
4335 	num_ring = soc->num_tcl_data_rings;
4336 
4337 	/*
4338 	 * group mask for tx completion  ring.
4339 	 */
4340 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
4341 
4342 	/* loop and reset the mask for only offloaded ring */
4343 	for (j = 0; j < WLAN_CFG_NUM_TCL_DATA_RINGS; j++) {
4344 		/*
4345 		 * Group number corresponding to tx offloaded ring.
4346 		 */
4347 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4348 		if (group_number < 0) {
4349 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4350 				      soc, WBM2SW_RELEASE, j);
4351 			continue;
4352 		}
4353 
4354 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
4355 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j) &&
4356 		    (!mask)) {
4357 			continue;
4358 		}
4359 
4360 		/* reset the tx mask for offloaded ring */
4361 		mask &= (~(1 << j));
4362 
4363 		/*
4364 		 * reset the interrupt mask for offloaded ring.
4365 		 */
4366 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4367 	}
4368 
4369 	/* number of rx rings */
4370 	num_ring = soc->num_reo_dest_rings;
4371 
4372 	/*
4373 	 * group mask for reo destination ring.
4374 	 */
4375 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
4376 
4377 	/* loop and reset the mask for only offloaded ring */
4378 	for (j = 0; j < WLAN_CFG_NUM_REO_DEST_RING; j++) {
4379 		/*
4380 		 * Group number corresponding to rx offloaded ring.
4381 		 */
4382 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4383 		if (group_number < 0) {
4384 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4385 				      soc, REO_DST, j);
4386 			continue;
4387 		}
4388 
4389 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
4390 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j) &&
4391 		    (!mask)) {
4392 			continue;
4393 		}
4394 
4395 		/* reset the interrupt mask for offloaded ring */
4396 		mask &= (~(1 << j));
4397 
4398 		/*
4399 		 * set the interrupt mask to zero for rx offloaded radio.
4400 		 */
4401 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4402 	}
4403 
4404 	/*
4405 	 * group mask for Rx buffer refill ring
4406 	 */
4407 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
4408 
4409 	/* loop and reset the mask for only offloaded ring */
4410 	for (j = 0; j < MAX_PDEV_CNT; j++) {
4411 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
4412 
4413 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
4414 			continue;
4415 		}
4416 
4417 		/*
4418 		 * Group number corresponding to rx offloaded ring.
4419 		 */
4420 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
4421 		if (group_number < 0) {
4422 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4423 				      soc, REO_DST, lmac_id);
4424 			continue;
4425 		}
4426 
4427 		/* set the interrupt mask for offloaded ring */
4428 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4429 				group_number);
4430 		mask &= (~(1 << lmac_id));
4431 
4432 		/*
4433 		 * set the interrupt mask to zero for rx offloaded radio.
4434 		 */
4435 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4436 			group_number, mask);
4437 	}
4438 
4439 	grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
4440 
4441 	for (j = 0; j < num_ring; j++) {
4442 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_EXCEPTION, j)) {
4443 			continue;
4444 		}
4445 
4446 		/*
4447 		 * Group number corresponding to rx err ring.
4448 		 */
4449 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4450 		if (group_number < 0) {
4451 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4452 				      soc, REO_EXCEPTION, j);
4453 			continue;
4454 		}
4455 
4456 		wlan_cfg_set_rx_err_ring_mask(soc->wlan_cfg_ctx,
4457 					      group_number, 0);
4458 	}
4459 }
4460 
4461 #ifdef IPA_OFFLOAD
4462 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
4463 			 uint32_t *remap1, uint32_t *remap2)
4464 {
4465 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX] = {
4466 				REO_REMAP_SW1, REO_REMAP_SW2, REO_REMAP_SW3,
4467 				REO_REMAP_SW5, REO_REMAP_SW6, REO_REMAP_SW7};
4468 
4469 	switch (soc->arch_id) {
4470 	case CDP_ARCH_TYPE_BE:
4471 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4472 					      soc->num_reo_dest_rings -
4473 					      USE_2_IPA_RX_REO_RINGS, remap1,
4474 					      remap2);
4475 		break;
4476 
4477 	case CDP_ARCH_TYPE_LI:
4478 		if (wlan_ipa_is_vlan_enabled()) {
4479 			hal_compute_reo_remap_ix2_ix3(
4480 					soc->hal_soc, ring,
4481 					soc->num_reo_dest_rings -
4482 					USE_2_IPA_RX_REO_RINGS, remap1,
4483 					remap2);
4484 
4485 		} else {
4486 			hal_compute_reo_remap_ix2_ix3(
4487 					soc->hal_soc, ring,
4488 					soc->num_reo_dest_rings -
4489 					USE_1_IPA_RX_REO_RING, remap1,
4490 					remap2);
4491 		}
4492 
4493 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
4494 		break;
4495 	default:
4496 		dp_err("unkonwn arch_id 0x%x", soc->arch_id);
4497 		QDF_BUG(0);
4498 
4499 	}
4500 
4501 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
4502 
4503 	return true;
4504 }
4505 
4506 #ifdef IPA_WDI3_TX_TWO_PIPES
4507 static bool dp_ipa_is_alt_tx_ring(int index)
4508 {
4509 	return index == IPA_TX_ALT_RING_IDX;
4510 }
4511 
4512 static bool dp_ipa_is_alt_tx_comp_ring(int index)
4513 {
4514 	return index == IPA_TX_ALT_COMP_RING_IDX;
4515 }
4516 #else /* !IPA_WDI3_TX_TWO_PIPES */
4517 static bool dp_ipa_is_alt_tx_ring(int index)
4518 {
4519 	return false;
4520 }
4521 
4522 static bool dp_ipa_is_alt_tx_comp_ring(int index)
4523 {
4524 	return false;
4525 }
4526 #endif /* IPA_WDI3_TX_TWO_PIPES */
4527 
4528 /**
4529  * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA
4530  *
4531  * @tx_ring_num: Tx ring number
4532  * @tx_ipa_ring_sz: Return param only updated for IPA.
4533  * @soc_cfg_ctx: dp soc cfg context
4534  *
4535  * Return: None
4536  */
4537 static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz,
4538 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4539 {
4540 	if (!soc_cfg_ctx->ipa_enabled)
4541 		return;
4542 
4543 	if (tx_ring_num == IPA_TCL_DATA_RING_IDX)
4544 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_ring_size(soc_cfg_ctx);
4545 	else if (dp_ipa_is_alt_tx_ring(tx_ring_num))
4546 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_alt_ring_size(soc_cfg_ctx);
4547 }
4548 
4549 /**
4550  * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA
4551  *
4552  * @tx_comp_ring_num: Tx comp ring number
4553  * @tx_comp_ipa_ring_sz: Return param only updated for IPA.
4554  * @soc_cfg_ctx: dp soc cfg context
4555  *
4556  * Return: None
4557  */
4558 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
4559 					 int *tx_comp_ipa_ring_sz,
4560 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4561 {
4562 	if (!soc_cfg_ctx->ipa_enabled)
4563 		return;
4564 
4565 	if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX)
4566 		*tx_comp_ipa_ring_sz =
4567 				wlan_cfg_ipa_tx_comp_ring_size(soc_cfg_ctx);
4568 	else if (dp_ipa_is_alt_tx_comp_ring(tx_comp_ring_num))
4569 		*tx_comp_ipa_ring_sz =
4570 				wlan_cfg_ipa_tx_alt_comp_ring_size(soc_cfg_ctx);
4571 }
4572 #else
4573 static uint8_t dp_reo_ring_selection(uint32_t value, uint32_t *ring)
4574 {
4575 	uint8_t num = 0;
4576 
4577 	switch (value) {
4578 	/* should we have all the different possible ring configs */
4579 	case 0xFF:
4580 		num = 8;
4581 		ring[0] = REO_REMAP_SW1;
4582 		ring[1] = REO_REMAP_SW2;
4583 		ring[2] = REO_REMAP_SW3;
4584 		ring[3] = REO_REMAP_SW4;
4585 		ring[4] = REO_REMAP_SW5;
4586 		ring[5] = REO_REMAP_SW6;
4587 		ring[6] = REO_REMAP_SW7;
4588 		ring[7] = REO_REMAP_SW8;
4589 		break;
4590 
4591 	case 0x3F:
4592 		num = 6;
4593 		ring[0] = REO_REMAP_SW1;
4594 		ring[1] = REO_REMAP_SW2;
4595 		ring[2] = REO_REMAP_SW3;
4596 		ring[3] = REO_REMAP_SW4;
4597 		ring[4] = REO_REMAP_SW5;
4598 		ring[5] = REO_REMAP_SW6;
4599 		break;
4600 
4601 	case 0xF:
4602 		num = 4;
4603 		ring[0] = REO_REMAP_SW1;
4604 		ring[1] = REO_REMAP_SW2;
4605 		ring[2] = REO_REMAP_SW3;
4606 		ring[3] = REO_REMAP_SW4;
4607 		break;
4608 	case 0xE:
4609 		num = 3;
4610 		ring[0] = REO_REMAP_SW2;
4611 		ring[1] = REO_REMAP_SW3;
4612 		ring[2] = REO_REMAP_SW4;
4613 		break;
4614 	case 0xD:
4615 		num = 3;
4616 		ring[0] = REO_REMAP_SW1;
4617 		ring[1] = REO_REMAP_SW3;
4618 		ring[2] = REO_REMAP_SW4;
4619 		break;
4620 	case 0xC:
4621 		num = 2;
4622 		ring[0] = REO_REMAP_SW3;
4623 		ring[1] = REO_REMAP_SW4;
4624 		break;
4625 	case 0xB:
4626 		num = 3;
4627 		ring[0] = REO_REMAP_SW1;
4628 		ring[1] = REO_REMAP_SW2;
4629 		ring[2] = REO_REMAP_SW4;
4630 		break;
4631 	case 0xA:
4632 		num = 2;
4633 		ring[0] = REO_REMAP_SW2;
4634 		ring[1] = REO_REMAP_SW4;
4635 		break;
4636 	case 0x9:
4637 		num = 2;
4638 		ring[0] = REO_REMAP_SW1;
4639 		ring[1] = REO_REMAP_SW4;
4640 		break;
4641 	case 0x8:
4642 		num = 1;
4643 		ring[0] = REO_REMAP_SW4;
4644 		break;
4645 	case 0x7:
4646 		num = 3;
4647 		ring[0] = REO_REMAP_SW1;
4648 		ring[1] = REO_REMAP_SW2;
4649 		ring[2] = REO_REMAP_SW3;
4650 		break;
4651 	case 0x6:
4652 		num = 2;
4653 		ring[0] = REO_REMAP_SW2;
4654 		ring[1] = REO_REMAP_SW3;
4655 		break;
4656 	case 0x5:
4657 		num = 2;
4658 		ring[0] = REO_REMAP_SW1;
4659 		ring[1] = REO_REMAP_SW3;
4660 		break;
4661 	case 0x4:
4662 		num = 1;
4663 		ring[0] = REO_REMAP_SW3;
4664 		break;
4665 	case 0x3:
4666 		num = 2;
4667 		ring[0] = REO_REMAP_SW1;
4668 		ring[1] = REO_REMAP_SW2;
4669 		break;
4670 	case 0x2:
4671 		num = 1;
4672 		ring[0] = REO_REMAP_SW2;
4673 		break;
4674 	case 0x1:
4675 		num = 1;
4676 		ring[0] = REO_REMAP_SW1;
4677 		break;
4678 	default:
4679 		dp_err("unkonwn reo ring map 0x%x", value);
4680 		QDF_BUG(0);
4681 	}
4682 	return num;
4683 }
4684 
4685 bool dp_reo_remap_config(struct dp_soc *soc,
4686 			 uint32_t *remap0,
4687 			 uint32_t *remap1,
4688 			 uint32_t *remap2)
4689 {
4690 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4691 	uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
4692 	uint8_t num;
4693 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX];
4694 	uint32_t value;
4695 
4696 	switch (offload_radio) {
4697 	case dp_nss_cfg_default:
4698 		value = reo_config & WLAN_CFG_NUM_REO_RINGS_MAP_MAX;
4699 		num = dp_reo_ring_selection(value, ring);
4700 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4701 					      num, remap1, remap2);
4702 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
4703 
4704 		break;
4705 	case dp_nss_cfg_first_radio:
4706 		value = reo_config & 0xE;
4707 		num = dp_reo_ring_selection(value, ring);
4708 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4709 					      num, remap1, remap2);
4710 
4711 		break;
4712 	case dp_nss_cfg_second_radio:
4713 		value = reo_config & 0xD;
4714 		num = dp_reo_ring_selection(value, ring);
4715 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4716 					      num, remap1, remap2);
4717 
4718 		break;
4719 	case dp_nss_cfg_dbdc:
4720 	case dp_nss_cfg_dbtc:
4721 		/* return false if both or all are offloaded to NSS */
4722 		return false;
4723 
4724 	}
4725 
4726 	dp_debug("remap1 %x remap2 %x offload_radio %u",
4727 		 *remap1, *remap2, offload_radio);
4728 	return true;
4729 }
4730 
4731 static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz,
4732 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4733 {
4734 }
4735 
4736 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
4737 					 int *tx_comp_ipa_ring_sz,
4738 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4739 {
4740 }
4741 #endif /* IPA_OFFLOAD */
4742 
4743 /*
4744  * dp_reo_frag_dst_set() - configure reo register to set the
4745  *                        fragment destination ring
4746  * @soc : Datapath soc
4747  * @frag_dst_ring : output parameter to set fragment destination ring
4748  *
4749  * Based on offload_radio below fragment destination rings is selected
4750  * 0 - TCL
4751  * 1 - SW1
4752  * 2 - SW2
4753  * 3 - SW3
4754  * 4 - SW4
4755  * 5 - Release
4756  * 6 - FW
4757  * 7 - alternate select
4758  *
4759  * return: void
4760  */
4761 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
4762 {
4763 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4764 
4765 	switch (offload_radio) {
4766 	case dp_nss_cfg_default:
4767 		*frag_dst_ring = REO_REMAP_TCL;
4768 		break;
4769 	case dp_nss_cfg_first_radio:
4770 		/*
4771 		 * This configuration is valid for single band radio which
4772 		 * is also NSS offload.
4773 		 */
4774 	case dp_nss_cfg_dbdc:
4775 	case dp_nss_cfg_dbtc:
4776 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
4777 		break;
4778 	default:
4779 		dp_init_err("%pK: dp_reo_frag_dst_set invalid offload radio config", soc);
4780 		break;
4781 	}
4782 }
4783 
4784 #ifdef ENABLE_VERBOSE_DEBUG
4785 static void dp_enable_verbose_debug(struct dp_soc *soc)
4786 {
4787 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4788 
4789 	soc_cfg_ctx = soc->wlan_cfg_ctx;
4790 
4791 	if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask)
4792 		is_dp_verbose_debug_enabled = true;
4793 
4794 	if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask)
4795 		hal_set_verbose_debug(true);
4796 	else
4797 		hal_set_verbose_debug(false);
4798 }
4799 #else
4800 static void dp_enable_verbose_debug(struct dp_soc *soc)
4801 {
4802 }
4803 #endif
4804 
4805 #ifdef WLAN_FEATURE_STATS_EXT
4806 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
4807 {
4808 	qdf_event_create(&soc->rx_hw_stats_event);
4809 }
4810 #else
4811 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
4812 {
4813 }
4814 #endif
4815 
4816 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index)
4817 {
4818 	int tcl_ring_num, wbm_ring_num;
4819 
4820 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
4821 						index,
4822 						&tcl_ring_num,
4823 						&wbm_ring_num);
4824 
4825 	if (tcl_ring_num == -1) {
4826 		dp_err("incorrect tcl ring num for index %u", index);
4827 		return;
4828 	}
4829 
4830 	wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned,
4831 			     soc->tcl_data_ring[index].alloc_size,
4832 			     soc->ctrl_psoc,
4833 			     WLAN_MD_DP_SRNG_TCL_DATA,
4834 			     "tcl_data_ring");
4835 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
4836 	dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA,
4837 		       tcl_ring_num);
4838 
4839 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
4840 		return;
4841 
4842 	wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned,
4843 			     soc->tx_comp_ring[index].alloc_size,
4844 			     soc->ctrl_psoc,
4845 			     WLAN_MD_DP_SRNG_TX_COMP,
4846 			     "tcl_comp_ring");
4847 	dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4848 		       wbm_ring_num);
4849 }
4850 
4851 /**
4852  * dp_init_tx_ring_pair_by_index() - The function inits tcl data/wbm completion
4853  * ring pair
4854  * @soc: DP soc pointer
4855  * @index: index of soc->tcl_data or soc->tx_comp to initialize
4856  *
4857  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
4858  */
4859 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
4860 						uint8_t index)
4861 {
4862 	int tcl_ring_num, wbm_ring_num;
4863 	uint8_t bm_id;
4864 
4865 	if (index >= MAX_TCL_DATA_RINGS) {
4866 		dp_err("unexpected index!");
4867 		QDF_BUG(0);
4868 		goto fail1;
4869 	}
4870 
4871 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
4872 						index,
4873 						&tcl_ring_num,
4874 						&wbm_ring_num);
4875 
4876 	if (tcl_ring_num == -1) {
4877 		dp_err("incorrect tcl ring num for index %u", index);
4878 		goto fail1;
4879 	}
4880 
4881 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
4882 	if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA,
4883 			 tcl_ring_num, 0)) {
4884 		dp_err("dp_srng_init failed for tcl_data_ring");
4885 		goto fail1;
4886 	}
4887 	wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned,
4888 			  soc->tcl_data_ring[index].alloc_size,
4889 			  soc->ctrl_psoc,
4890 			  WLAN_MD_DP_SRNG_TCL_DATA,
4891 			  "tcl_data_ring");
4892 
4893 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
4894 		goto set_rbm;
4895 
4896 	if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4897 			 wbm_ring_num, 0)) {
4898 		dp_err("dp_srng_init failed for tx_comp_ring");
4899 		goto fail1;
4900 	}
4901 
4902 	wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned,
4903 			  soc->tx_comp_ring[index].alloc_size,
4904 			  soc->ctrl_psoc,
4905 			  WLAN_MD_DP_SRNG_TX_COMP,
4906 			  "tcl_comp_ring");
4907 set_rbm:
4908 	bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_ring_num);
4909 
4910 	soc->arch_ops.tx_implicit_rbm_set(soc, tcl_ring_num, bm_id);
4911 
4912 	return QDF_STATUS_SUCCESS;
4913 
4914 fail1:
4915 	return QDF_STATUS_E_FAILURE;
4916 }
4917 
4918 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index)
4919 {
4920 	dp_debug("index %u", index);
4921 	dp_srng_free(soc, &soc->tcl_data_ring[index]);
4922 	dp_srng_free(soc, &soc->tx_comp_ring[index]);
4923 }
4924 
4925 /**
4926  * dp_alloc_tx_ring_pair_by_index() - The function allocs tcl data/wbm2sw
4927  * ring pair for the given "index"
4928  * @soc: DP soc pointer
4929  * @index: index of soc->tcl_data or soc->tx_comp to initialize
4930  *
4931  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
4932  */
4933 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
4934 						 uint8_t index)
4935 {
4936 	int tx_ring_size;
4937 	int tx_comp_ring_size;
4938 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
4939 	int cached = 0;
4940 
4941 	if (index >= MAX_TCL_DATA_RINGS) {
4942 		dp_err("unexpected index!");
4943 		QDF_BUG(0);
4944 		goto fail1;
4945 	}
4946 
4947 	dp_debug("index %u", index);
4948 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
4949 	dp_ipa_get_tx_ring_size(index, &tx_ring_size, soc_cfg_ctx);
4950 
4951 	if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA,
4952 			  tx_ring_size, cached)) {
4953 		dp_err("dp_srng_alloc failed for tcl_data_ring");
4954 		goto fail1;
4955 	}
4956 
4957 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
4958 	dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size, soc_cfg_ctx);
4959 	/* Enable cached TCL desc if NSS offload is disabled */
4960 	if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
4961 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
4962 
4963 	if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) ==
4964 	    INVALID_WBM_RING_NUM)
4965 		return QDF_STATUS_SUCCESS;
4966 
4967 	if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4968 			  tx_comp_ring_size, cached)) {
4969 		dp_err("dp_srng_alloc failed for tx_comp_ring");
4970 		goto fail1;
4971 	}
4972 
4973 	return QDF_STATUS_SUCCESS;
4974 
4975 fail1:
4976 	return QDF_STATUS_E_FAILURE;
4977 }
4978 
4979 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
4980 {
4981 	struct cdp_lro_hash_config lro_hash;
4982 	QDF_STATUS status;
4983 
4984 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
4985 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
4986 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
4987 		dp_err("LRO, GRO and RX hash disabled");
4988 		return QDF_STATUS_E_FAILURE;
4989 	}
4990 
4991 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
4992 
4993 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
4994 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
4995 		lro_hash.lro_enable = 1;
4996 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
4997 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
4998 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
4999 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
5000 	}
5001 
5002 	soc->arch_ops.get_rx_hash_key(soc, &lro_hash);
5003 
5004 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
5005 
5006 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
5007 		QDF_BUG(0);
5008 		dp_err("lro_hash_config not configured");
5009 		return QDF_STATUS_E_FAILURE;
5010 	}
5011 
5012 	status = soc->cdp_soc.ol_ops->lro_hash_config(soc->ctrl_psoc,
5013 						      pdev->pdev_id,
5014 						      &lro_hash);
5015 	if (!QDF_IS_STATUS_SUCCESS(status)) {
5016 		dp_err("failed to send lro_hash_config to FW %u", status);
5017 		return status;
5018 	}
5019 
5020 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
5021 		lro_hash.lro_enable, lro_hash.tcp_flag,
5022 		lro_hash.tcp_flag_mask);
5023 
5024 	dp_info("toeplitz_hash_ipv4:");
5025 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5026 			   lro_hash.toeplitz_hash_ipv4,
5027 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
5028 			   LRO_IPV4_SEED_ARR_SZ));
5029 
5030 	dp_info("toeplitz_hash_ipv6:");
5031 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5032 			   lro_hash.toeplitz_hash_ipv6,
5033 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
5034 			   LRO_IPV6_SEED_ARR_SZ));
5035 
5036 	return status;
5037 }
5038 
5039 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
5040 /*
5041  * dp_reap_timer_init() - initialize the reap timer
5042  * @soc: data path SoC handle
5043  *
5044  * Return: void
5045  */
5046 static void dp_reap_timer_init(struct dp_soc *soc)
5047 {
5048 	/*
5049 	 * Timer to reap rxdma status rings.
5050 	 * Needed until we enable ppdu end interrupts
5051 	 */
5052 	dp_monitor_reap_timer_init(soc);
5053 	dp_monitor_vdev_timer_init(soc);
5054 }
5055 
5056 /*
5057  * dp_reap_timer_deinit() - de-initialize the reap timer
5058  * @soc: data path SoC handle
5059  *
5060  * Return: void
5061  */
5062 static void dp_reap_timer_deinit(struct dp_soc *soc)
5063 {
5064 	dp_monitor_reap_timer_deinit(soc);
5065 }
5066 #else
5067 /* WIN use case */
5068 static void dp_reap_timer_init(struct dp_soc *soc)
5069 {
5070 	/* Configure LMAC rings in Polled mode */
5071 	if (soc->lmac_polled_mode) {
5072 		/*
5073 		 * Timer to reap lmac rings.
5074 		 */
5075 		qdf_timer_init(soc->osdev, &soc->lmac_reap_timer,
5076 			       dp_service_lmac_rings, (void *)soc,
5077 			       QDF_TIMER_TYPE_WAKE_APPS);
5078 		soc->lmac_timer_init = 1;
5079 		qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
5080 	}
5081 }
5082 
5083 static void dp_reap_timer_deinit(struct dp_soc *soc)
5084 {
5085 	if (soc->lmac_timer_init) {
5086 		qdf_timer_stop(&soc->lmac_reap_timer);
5087 		qdf_timer_free(&soc->lmac_reap_timer);
5088 		soc->lmac_timer_init = 0;
5089 	}
5090 }
5091 #endif
5092 
5093 #ifdef QCA_HOST2FW_RXBUF_RING
5094 /*
5095  * dp_rxdma_ring_alloc() - allocate the RXDMA rings
5096  * @soc: data path SoC handle
5097  * @pdev: Physical device handle
5098  *
5099  * Return: 0 - success, > 0 - failure
5100  */
5101 static int dp_rxdma_ring_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
5102 {
5103 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
5104 	int max_mac_rings;
5105 	int i;
5106 	int ring_size;
5107 
5108 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
5109 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
5110 	ring_size =  wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx);
5111 
5112 	for (i = 0; i < max_mac_rings; i++) {
5113 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
5114 		if (dp_srng_alloc(soc, &pdev->rx_mac_buf_ring[i],
5115 				  RXDMA_BUF, ring_size, 0)) {
5116 			dp_init_err("%pK: failed rx mac ring setup", soc);
5117 			return QDF_STATUS_E_FAILURE;
5118 		}
5119 	}
5120 	return QDF_STATUS_SUCCESS;
5121 }
5122 
5123 /*
5124  * dp_rxdma_ring_setup() - configure the RXDMA rings
5125  * @soc: data path SoC handle
5126  * @pdev: Physical device handle
5127  *
5128  * Return: 0 - success, > 0 - failure
5129  */
5130 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
5131 {
5132 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
5133 	int max_mac_rings;
5134 	int i;
5135 
5136 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
5137 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
5138 
5139 	for (i = 0; i < max_mac_rings; i++) {
5140 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
5141 		if (dp_srng_init(soc, &pdev->rx_mac_buf_ring[i],
5142 				 RXDMA_BUF, 1, i)) {
5143 			dp_init_err("%pK: failed rx mac ring setup", soc);
5144 			return QDF_STATUS_E_FAILURE;
5145 		}
5146 	}
5147 	return QDF_STATUS_SUCCESS;
5148 }
5149 
5150 /*
5151  * dp_rxdma_ring_cleanup() - Deinit the RXDMA rings and reap timer
5152  * @soc: data path SoC handle
5153  * @pdev: Physical device handle
5154  *
5155  * Return: void
5156  */
5157 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
5158 {
5159 	int i;
5160 
5161 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
5162 		dp_srng_deinit(soc, &pdev->rx_mac_buf_ring[i], RXDMA_BUF, 1);
5163 
5164 	dp_reap_timer_deinit(soc);
5165 }
5166 
5167 /*
5168  * dp_rxdma_ring_free() - Free the RXDMA rings
5169  * @pdev: Physical device handle
5170  *
5171  * Return: void
5172  */
5173 static void dp_rxdma_ring_free(struct dp_pdev *pdev)
5174 {
5175 	int i;
5176 
5177 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
5178 		dp_srng_free(pdev->soc, &pdev->rx_mac_buf_ring[i]);
5179 }
5180 
5181 #else
5182 static int dp_rxdma_ring_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
5183 {
5184 	return QDF_STATUS_SUCCESS;
5185 }
5186 
5187 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
5188 {
5189 	return QDF_STATUS_SUCCESS;
5190 }
5191 
5192 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
5193 {
5194 	dp_reap_timer_deinit(soc);
5195 }
5196 
5197 static void dp_rxdma_ring_free(struct dp_pdev *pdev)
5198 {
5199 }
5200 #endif
5201 
5202 /**
5203  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
5204  * @pdev - DP_PDEV handle
5205  *
5206  * Return: void
5207  */
5208 static inline void
5209 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
5210 {
5211 	uint8_t map_id;
5212 	struct dp_soc *soc = pdev->soc;
5213 
5214 	if (!soc)
5215 		return;
5216 
5217 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
5218 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
5219 			     default_dscp_tid_map,
5220 			     sizeof(default_dscp_tid_map));
5221 	}
5222 
5223 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
5224 		hal_tx_set_dscp_tid_map(soc->hal_soc,
5225 					default_dscp_tid_map,
5226 					map_id);
5227 	}
5228 }
5229 
5230 /**
5231  * dp_pcp_tid_map_setup(): Initialize the pcp-tid maps
5232  * @pdev - DP_PDEV handle
5233  *
5234  * Return: void
5235  */
5236 static inline void
5237 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
5238 {
5239 	struct dp_soc *soc = pdev->soc;
5240 
5241 	if (!soc)
5242 		return;
5243 
5244 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
5245 		     sizeof(default_pcp_tid_map));
5246 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
5247 }
5248 
5249 #ifdef IPA_OFFLOAD
5250 /**
5251  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
5252  * @soc: data path instance
5253  * @pdev: core txrx pdev context
5254  *
5255  * Return: QDF_STATUS_SUCCESS: success
5256  *         QDF_STATUS_E_RESOURCES: Error return
5257  */
5258 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5259 					   struct dp_pdev *pdev)
5260 {
5261 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5262 	int entries;
5263 
5264 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5265 		soc_cfg_ctx = soc->wlan_cfg_ctx;
5266 		entries =
5267 			wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
5268 
5269 		/* Setup second Rx refill buffer ring */
5270 		if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
5271 				  entries, 0)) {
5272 			dp_init_err("%pK: dp_srng_alloc failed second"
5273 				    "rx refill ring", soc);
5274 			return QDF_STATUS_E_FAILURE;
5275 		}
5276 	}
5277 
5278 	return QDF_STATUS_SUCCESS;
5279 }
5280 
5281 #ifdef IPA_WDI3_VLAN_SUPPORT
5282 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5283 					       struct dp_pdev *pdev)
5284 {
5285 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5286 	int entries;
5287 
5288 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5289 	    wlan_ipa_is_vlan_enabled()) {
5290 		soc_cfg_ctx = soc->wlan_cfg_ctx;
5291 		entries =
5292 			wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
5293 
5294 		/* Setup second Rx refill buffer ring */
5295 		if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF,
5296 				  entries, 0)) {
5297 			dp_init_err("%pK: alloc failed for 3rd rx refill ring",
5298 				    soc);
5299 			return QDF_STATUS_E_FAILURE;
5300 		}
5301 	}
5302 
5303 	return QDF_STATUS_SUCCESS;
5304 }
5305 
5306 static int dp_init_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5307 					      struct dp_pdev *pdev)
5308 {
5309 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5310 	    wlan_ipa_is_vlan_enabled()) {
5311 		if (dp_srng_init(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF,
5312 				 IPA_RX_ALT_REFILL_BUF_RING_IDX,
5313 				 pdev->pdev_id)) {
5314 			dp_init_err("%pK: init failed for 3rd rx refill ring",
5315 				    soc);
5316 			return QDF_STATUS_E_FAILURE;
5317 		}
5318 	}
5319 
5320 	return QDF_STATUS_SUCCESS;
5321 }
5322 
5323 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5324 						 struct dp_pdev *pdev)
5325 {
5326 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5327 	    wlan_ipa_is_vlan_enabled())
5328 		dp_srng_deinit(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF, 0);
5329 }
5330 
5331 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5332 					       struct dp_pdev *pdev)
5333 {
5334 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5335 	    wlan_ipa_is_vlan_enabled())
5336 		dp_srng_free(soc, &pdev->rx_refill_buf_ring3);
5337 }
5338 #else
5339 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5340 					       struct dp_pdev *pdev)
5341 {
5342 	return QDF_STATUS_SUCCESS;
5343 }
5344 
5345 static int dp_init_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5346 					      struct dp_pdev *pdev)
5347 {
5348 	return QDF_STATUS_SUCCESS;
5349 }
5350 
5351 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5352 						 struct dp_pdev *pdev)
5353 {
5354 }
5355 
5356 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5357 					       struct dp_pdev *pdev)
5358 {
5359 }
5360 #endif
5361 
5362 /**
5363  * dp_deinit_ipa_rx_refill_buf_ring - deinit second Rx refill buffer ring
5364  * @soc: data path instance
5365  * @pdev: core txrx pdev context
5366  *
5367  * Return: void
5368  */
5369 static void dp_deinit_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5370 					     struct dp_pdev *pdev)
5371 {
5372 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
5373 		dp_srng_deinit(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 0);
5374 }
5375 
5376 /**
5377  * dp_init_ipa_rx_refill_buf_ring - Init second Rx refill buffer ring
5378  * @soc: data path instance
5379  * @pdev: core txrx pdev context
5380  *
5381  * Return: QDF_STATUS_SUCCESS: success
5382  *         QDF_STATUS_E_RESOURCES: Error return
5383  */
5384 static int dp_init_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5385 					  struct dp_pdev *pdev)
5386 {
5387 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5388 		if (dp_srng_init(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
5389 				 IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id)) {
5390 			dp_init_err("%pK: dp_srng_init failed second"
5391 				    "rx refill ring", soc);
5392 			return QDF_STATUS_E_FAILURE;
5393 		}
5394 	}
5395 
5396 	if (dp_init_ipa_rx_alt_refill_buf_ring(soc, pdev)) {
5397 		dp_deinit_ipa_rx_refill_buf_ring(soc, pdev);
5398 		return QDF_STATUS_E_FAILURE;
5399 	}
5400 
5401 	return QDF_STATUS_SUCCESS;
5402 }
5403 
5404 /**
5405  * dp_free_ipa_rx_refill_buf_ring - free second Rx refill buffer ring
5406  * @soc: data path instance
5407  * @pdev: core txrx pdev context
5408  *
5409  * Return: void
5410  */
5411 static void dp_free_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5412 					   struct dp_pdev *pdev)
5413 {
5414 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
5415 		dp_srng_free(soc, &pdev->rx_refill_buf_ring2);
5416 }
5417 #else
5418 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5419 					   struct dp_pdev *pdev)
5420 {
5421 	return QDF_STATUS_SUCCESS;
5422 }
5423 
5424 static int dp_init_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5425 					  struct dp_pdev *pdev)
5426 {
5427 	return QDF_STATUS_SUCCESS;
5428 }
5429 
5430 static void dp_deinit_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5431 					     struct dp_pdev *pdev)
5432 {
5433 }
5434 
5435 static void dp_free_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5436 					   struct dp_pdev *pdev)
5437 {
5438 }
5439 
5440 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5441 					       struct dp_pdev *pdev)
5442 {
5443 	return QDF_STATUS_SUCCESS;
5444 }
5445 
5446 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5447 						 struct dp_pdev *pdev)
5448 {
5449 }
5450 
5451 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5452 					       struct dp_pdev *pdev)
5453 {
5454 }
5455 #endif
5456 
5457 #ifdef DP_TX_HW_DESC_HISTORY
5458 /**
5459  * dp_soc_tx_hw_desc_history_attach - Attach TX HW descriptor history
5460  *
5461  * @soc: DP soc handle
5462  *
5463  * Return: None
5464  */
5465 static void dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
5466 {
5467 	dp_soc_frag_history_attach(soc, &soc->tx_hw_desc_history,
5468 				   DP_TX_HW_DESC_HIST_MAX_SLOTS,
5469 				   DP_TX_HW_DESC_HIST_PER_SLOT_MAX,
5470 				   sizeof(struct dp_tx_hw_desc_evt),
5471 				   true, DP_TX_HW_DESC_HIST_TYPE);
5472 }
5473 
5474 static void dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
5475 {
5476 	dp_soc_frag_history_detach(soc, &soc->tx_hw_desc_history,
5477 				   DP_TX_HW_DESC_HIST_MAX_SLOTS,
5478 				   true, DP_TX_HW_DESC_HIST_TYPE);
5479 }
5480 
5481 #else /* DP_TX_HW_DESC_HISTORY */
5482 static inline void
5483 dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
5484 {
5485 }
5486 
5487 static inline void
5488 dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
5489 {
5490 }
5491 #endif /* DP_TX_HW_DESC_HISTORY */
5492 
5493 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
5494 #ifndef RX_DEFRAG_DO_NOT_REINJECT
5495 /**
5496  * dp_soc_rx_reinject_ring_history_attach - Attach the reo reinject ring
5497  *					    history.
5498  * @soc: DP soc handle
5499  *
5500  * Return: None
5501  */
5502 static void dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
5503 {
5504 	soc->rx_reinject_ring_history =
5505 		dp_context_alloc_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
5506 				     sizeof(struct dp_rx_reinject_history));
5507 	if (soc->rx_reinject_ring_history)
5508 		qdf_atomic_init(&soc->rx_reinject_ring_history->index);
5509 }
5510 #else /* RX_DEFRAG_DO_NOT_REINJECT */
5511 static inline void
5512 dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
5513 {
5514 }
5515 #endif /* RX_DEFRAG_DO_NOT_REINJECT */
5516 
5517 /**
5518  * dp_soc_rx_history_attach() - Attach the ring history record buffers
5519  * @soc: DP soc structure
5520  *
5521  * This function allocates the memory for recording the rx ring, rx error
5522  * ring and the reinject ring entries. There is no error returned in case
5523  * of allocation failure since the record function checks if the history is
5524  * initialized or not. We do not want to fail the driver load in case of
5525  * failure to allocate memory for debug history.
5526  *
5527  * Returns: None
5528  */
5529 static void dp_soc_rx_history_attach(struct dp_soc *soc)
5530 {
5531 	int i;
5532 	uint32_t rx_ring_hist_size;
5533 	uint32_t rx_refill_ring_hist_size;
5534 
5535 	rx_ring_hist_size = sizeof(*soc->rx_ring_history[0]);
5536 	rx_refill_ring_hist_size = sizeof(*soc->rx_refill_ring_history[0]);
5537 
5538 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5539 		soc->rx_ring_history[i] = dp_context_alloc_mem(
5540 				soc, DP_RX_RING_HIST_TYPE, rx_ring_hist_size);
5541 		if (soc->rx_ring_history[i])
5542 			qdf_atomic_init(&soc->rx_ring_history[i]->index);
5543 	}
5544 
5545 	soc->rx_err_ring_history = dp_context_alloc_mem(
5546 			soc, DP_RX_ERR_RING_HIST_TYPE, rx_ring_hist_size);
5547 	if (soc->rx_err_ring_history)
5548 		qdf_atomic_init(&soc->rx_err_ring_history->index);
5549 
5550 	dp_soc_rx_reinject_ring_history_attach(soc);
5551 
5552 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5553 		soc->rx_refill_ring_history[i] = dp_context_alloc_mem(
5554 						soc,
5555 						DP_RX_REFILL_RING_HIST_TYPE,
5556 						rx_refill_ring_hist_size);
5557 
5558 		if (soc->rx_refill_ring_history[i])
5559 			qdf_atomic_init(&soc->rx_refill_ring_history[i]->index);
5560 	}
5561 }
5562 
5563 static void dp_soc_rx_history_detach(struct dp_soc *soc)
5564 {
5565 	int i;
5566 
5567 	for (i = 0; i < MAX_REO_DEST_RINGS; i++)
5568 		dp_context_free_mem(soc, DP_RX_RING_HIST_TYPE,
5569 				    soc->rx_ring_history[i]);
5570 
5571 	dp_context_free_mem(soc, DP_RX_ERR_RING_HIST_TYPE,
5572 			    soc->rx_err_ring_history);
5573 
5574 	/*
5575 	 * No need for a featurized detach since qdf_mem_free takes
5576 	 * care of NULL pointer.
5577 	 */
5578 	dp_context_free_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
5579 			    soc->rx_reinject_ring_history);
5580 
5581 	for (i = 0; i < MAX_PDEV_CNT; i++)
5582 		dp_context_free_mem(soc, DP_RX_REFILL_RING_HIST_TYPE,
5583 				    soc->rx_refill_ring_history[i]);
5584 }
5585 
5586 #else
5587 static inline void dp_soc_rx_history_attach(struct dp_soc *soc)
5588 {
5589 }
5590 
5591 static inline void dp_soc_rx_history_detach(struct dp_soc *soc)
5592 {
5593 }
5594 #endif
5595 
5596 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY
5597 /**
5598  * dp_soc_mon_status_ring_history_attach() - Attach the monitor status
5599  *					     buffer record history.
5600  * @soc: DP soc handle
5601  *
5602  * This function allocates memory to track the event for a monitor
5603  * status buffer, before its parsed and freed.
5604  *
5605  * Return: None
5606  */
5607 static void dp_soc_mon_status_ring_history_attach(struct dp_soc *soc)
5608 {
5609 	soc->mon_status_ring_history = dp_context_alloc_mem(soc,
5610 				DP_MON_STATUS_BUF_HIST_TYPE,
5611 				sizeof(struct dp_mon_status_ring_history));
5612 	if (!soc->mon_status_ring_history) {
5613 		dp_err("Failed to alloc memory for mon status ring history");
5614 		return;
5615 	}
5616 }
5617 
5618 /**
5619  * dp_soc_mon_status_ring_history_detach() - Detach the monitor status buffer
5620  *					     record history.
5621  * @soc: DP soc handle
5622  *
5623  * Return: None
5624  */
5625 static void dp_soc_mon_status_ring_history_detach(struct dp_soc *soc)
5626 {
5627 	dp_context_free_mem(soc, DP_MON_STATUS_BUF_HIST_TYPE,
5628 			    soc->mon_status_ring_history);
5629 }
5630 #else
5631 static void dp_soc_mon_status_ring_history_attach(struct dp_soc *soc)
5632 {
5633 }
5634 
5635 static void dp_soc_mon_status_ring_history_detach(struct dp_soc *soc)
5636 {
5637 }
5638 #endif
5639 
5640 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
5641 /**
5642  * dp_soc_tx_history_attach() - Attach the ring history record buffers
5643  * @soc: DP soc structure
5644  *
5645  * This function allocates the memory for recording the tx tcl ring and
5646  * the tx comp ring entries. There is no error returned in case
5647  * of allocation failure since the record function checks if the history is
5648  * initialized or not. We do not want to fail the driver load in case of
5649  * failure to allocate memory for debug history.
5650  *
5651  * Returns: None
5652  */
5653 static void dp_soc_tx_history_attach(struct dp_soc *soc)
5654 {
5655 	dp_soc_frag_history_attach(soc, &soc->tx_tcl_history,
5656 				   DP_TX_TCL_HIST_MAX_SLOTS,
5657 				   DP_TX_TCL_HIST_PER_SLOT_MAX,
5658 				   sizeof(struct dp_tx_desc_event),
5659 				   true, DP_TX_TCL_HIST_TYPE);
5660 	dp_soc_frag_history_attach(soc, &soc->tx_comp_history,
5661 				   DP_TX_COMP_HIST_MAX_SLOTS,
5662 				   DP_TX_COMP_HIST_PER_SLOT_MAX,
5663 				   sizeof(struct dp_tx_desc_event),
5664 				   true, DP_TX_COMP_HIST_TYPE);
5665 }
5666 
5667 /**
5668  * dp_soc_tx_history_detach() - Detach the ring history record buffers
5669  * @soc: DP soc structure
5670  *
5671  * This function frees the memory for recording the tx tcl ring and
5672  * the tx comp ring entries.
5673  *
5674  * Returns: None
5675  */
5676 static void dp_soc_tx_history_detach(struct dp_soc *soc)
5677 {
5678 	dp_soc_frag_history_detach(soc, &soc->tx_tcl_history,
5679 				   DP_TX_TCL_HIST_MAX_SLOTS,
5680 				   true, DP_TX_TCL_HIST_TYPE);
5681 	dp_soc_frag_history_detach(soc, &soc->tx_comp_history,
5682 				   DP_TX_COMP_HIST_MAX_SLOTS,
5683 				   true, DP_TX_COMP_HIST_TYPE);
5684 }
5685 
5686 #else
5687 static inline void dp_soc_tx_history_attach(struct dp_soc *soc)
5688 {
5689 }
5690 
5691 static inline void dp_soc_tx_history_detach(struct dp_soc *soc)
5692 {
5693 }
5694 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
5695 
5696 /*
5697 * dp_pdev_attach_wifi3() - attach txrx pdev
5698 * @txrx_soc: Datapath SOC handle
5699 * @params: Params for PDEV attach
5700 *
5701 * Return: QDF_STATUS
5702 */
5703 static inline
5704 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
5705 				struct cdp_pdev_attach_params *params)
5706 {
5707 	qdf_size_t pdev_context_size;
5708 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5709 	struct dp_pdev *pdev = NULL;
5710 	uint8_t pdev_id = params->pdev_id;
5711 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5712 	int nss_cfg;
5713 
5714 	pdev_context_size =
5715 		soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_PDEV);
5716 	if (pdev_context_size)
5717 		pdev = dp_context_alloc_mem(soc, DP_PDEV_TYPE, pdev_context_size);
5718 
5719 	if (!pdev) {
5720 		dp_init_err("%pK: DP PDEV memory allocation failed",
5721 			    soc);
5722 		goto fail0;
5723 	}
5724 	wlan_minidump_log(pdev, sizeof(*pdev), soc->ctrl_psoc,
5725 			  WLAN_MD_DP_PDEV, "dp_pdev");
5726 
5727 	soc_cfg_ctx = soc->wlan_cfg_ctx;
5728 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
5729 
5730 	if (!pdev->wlan_cfg_ctx) {
5731 		dp_init_err("%pK: pdev cfg_attach failed", soc);
5732 		goto fail1;
5733 	}
5734 
5735 	/*
5736 	 * set nss pdev config based on soc config
5737 	 */
5738 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
5739 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
5740 					 (nss_cfg & (1 << pdev_id)));
5741 
5742 	pdev->soc = soc;
5743 	pdev->pdev_id = pdev_id;
5744 	soc->pdev_list[pdev_id] = pdev;
5745 
5746 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
5747 	soc->pdev_count++;
5748 
5749 	/* Allocate memory for pdev srng rings */
5750 	if (dp_pdev_srng_alloc(pdev)) {
5751 		dp_init_err("%pK: dp_pdev_srng_alloc failed", soc);
5752 		goto fail2;
5753 	}
5754 
5755 	/* Setup second Rx refill buffer ring */
5756 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev)) {
5757 		dp_init_err("%pK: dp_srng_alloc failed rxrefill2 ring",
5758 			    soc);
5759 		goto fail3;
5760 	}
5761 
5762 	/* Allocate memory for pdev rxdma rings */
5763 	if (dp_rxdma_ring_alloc(soc, pdev)) {
5764 		dp_init_err("%pK: dp_rxdma_ring_alloc failed", soc);
5765 		goto fail4;
5766 	}
5767 
5768 	/* Rx specific init */
5769 	if (dp_rx_pdev_desc_pool_alloc(pdev)) {
5770 		dp_init_err("%pK: dp_rx_pdev_attach failed", soc);
5771 		goto fail4;
5772 	}
5773 
5774 	if (dp_monitor_pdev_attach(pdev)) {
5775 		dp_init_err("%pK: dp_monitor_pdev_attach failed", soc);
5776 		goto fail5;
5777 	}
5778 
5779 	soc->arch_ops.txrx_pdev_attach(pdev, params);
5780 
5781 	/* Setup third Rx refill buffer ring */
5782 	if (dp_setup_ipa_rx_alt_refill_buf_ring(soc, pdev)) {
5783 		dp_init_err("%pK: dp_srng_alloc failed rxrefill3 ring",
5784 			    soc);
5785 		goto fail6;
5786 	}
5787 
5788 	return QDF_STATUS_SUCCESS;
5789 
5790 fail6:
5791 	dp_monitor_pdev_detach(pdev);
5792 fail5:
5793 	dp_rx_pdev_desc_pool_free(pdev);
5794 fail4:
5795 	dp_rxdma_ring_free(pdev);
5796 	dp_free_ipa_rx_refill_buf_ring(soc, pdev);
5797 fail3:
5798 	dp_pdev_srng_free(pdev);
5799 fail2:
5800 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
5801 fail1:
5802 	soc->pdev_list[pdev_id] = NULL;
5803 	qdf_mem_free(pdev);
5804 fail0:
5805 	return QDF_STATUS_E_FAILURE;
5806 }
5807 
5808 /**
5809  * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev
5810  * @pdev: Datapath PDEV handle
5811  *
5812  * This is the last chance to flush all pending dp vdevs/peers,
5813  * some peer/vdev leak case like Non-SSR + peer unmap missing
5814  * will be covered here.
5815  *
5816  * Return: None
5817  */
5818 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
5819 {
5820 	struct dp_soc *soc = pdev->soc;
5821 	struct dp_vdev *vdev_arr[MAX_VDEV_CNT] = {0};
5822 	uint32_t i = 0;
5823 	uint32_t num_vdevs = 0;
5824 	struct dp_vdev *vdev = NULL;
5825 
5826 	if (TAILQ_EMPTY(&soc->inactive_vdev_list))
5827 		return;
5828 
5829 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
5830 	TAILQ_FOREACH(vdev, &soc->inactive_vdev_list,
5831 		      inactive_list_elem) {
5832 		if (vdev->pdev != pdev)
5833 			continue;
5834 
5835 		vdev_arr[num_vdevs] = vdev;
5836 		num_vdevs++;
5837 		/* take reference to free */
5838 		dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CDP);
5839 	}
5840 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
5841 
5842 	for (i = 0; i < num_vdevs; i++) {
5843 		dp_vdev_flush_peers((struct cdp_vdev *)vdev_arr[i], 0, 0);
5844 		dp_vdev_unref_delete(soc, vdev_arr[i], DP_MOD_ID_CDP);
5845 	}
5846 }
5847 
5848 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
5849 /**
5850  * dp_vdev_stats_hw_offload_target_config() - Send HTT command to FW
5851  *                                          for enable/disable of HW vdev stats
5852  * @soc: Datapath soc handle
5853  * @pdev_id: INVALID_PDEV_ID for all pdevs or 0,1,2 for individual pdev
5854  * @enable: flag to reprsent enable/disable of hw vdev stats
5855  *
5856  * Return: none
5857  */
5858 static void dp_vdev_stats_hw_offload_target_config(struct dp_soc *soc,
5859 						   uint8_t pdev_id,
5860 						   bool enable)
5861 {
5862 	/* Check SOC level config for HW offload vdev stats support */
5863 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
5864 		dp_debug("%pK: HW vdev offload stats is disabled", soc);
5865 		return;
5866 	}
5867 
5868 	/* Send HTT command to FW for enable of stats */
5869 	dp_h2t_hw_vdev_stats_config_send(soc, pdev_id, enable, false, 0);
5870 }
5871 
5872 /**
5873  * dp_vdev_stats_hw_offload_target_clear() - Clear HW vdev stats on target
5874  * @soc: Datapath soc handle
5875  * @pdev_id: pdev_id (0,1,2)
5876  * @bitmask: bitmask with vdev_id(s) for which stats are to be cleared on HW
5877  *
5878  * Return: none
5879  */
5880 static
5881 void dp_vdev_stats_hw_offload_target_clear(struct dp_soc *soc, uint8_t pdev_id,
5882 					   uint64_t vdev_id_bitmask)
5883 {
5884 	/* Check SOC level config for HW offload vdev stats support */
5885 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
5886 		dp_debug("%pK: HW vdev offload stats is disabled", soc);
5887 		return;
5888 	}
5889 
5890 	/* Send HTT command to FW for reset of stats */
5891 	dp_h2t_hw_vdev_stats_config_send(soc, pdev_id, true, true,
5892 					 vdev_id_bitmask);
5893 }
5894 #else
5895 static void
5896 dp_vdev_stats_hw_offload_target_config(struct dp_soc *soc, uint8_t pdev_id,
5897 				       bool enable)
5898 {
5899 }
5900 
5901 static
5902 void dp_vdev_stats_hw_offload_target_clear(struct dp_soc *soc, uint8_t pdev_id,
5903 					   uint64_t vdev_id_bitmask)
5904 {
5905 }
5906 #endif /*QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT */
5907 
5908 /**
5909  * dp_pdev_deinit() - Deinit txrx pdev
5910  * @txrx_pdev: Datapath PDEV handle
5911  * @force: Force deinit
5912  *
5913  * Return: None
5914  */
5915 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
5916 {
5917 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
5918 	qdf_nbuf_t curr_nbuf, next_nbuf;
5919 
5920 	if (pdev->pdev_deinit)
5921 		return;
5922 
5923 	dp_tx_me_exit(pdev);
5924 	dp_rx_fst_detach(pdev->soc, pdev);
5925 	dp_rx_pdev_buffers_free(pdev);
5926 	dp_rx_pdev_desc_pool_deinit(pdev);
5927 	dp_pdev_bkp_stats_detach(pdev);
5928 	qdf_event_destroy(&pdev->fw_peer_stats_event);
5929 	qdf_event_destroy(&pdev->fw_stats_event);
5930 	if (pdev->sojourn_buf)
5931 		qdf_nbuf_free(pdev->sojourn_buf);
5932 
5933 	dp_pdev_flush_pending_vdevs(pdev);
5934 	dp_tx_desc_flush(pdev, NULL, true);
5935 
5936 	qdf_spinlock_destroy(&pdev->tx_mutex);
5937 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
5938 
5939 	dp_monitor_pdev_deinit(pdev);
5940 
5941 	dp_pdev_srng_deinit(pdev);
5942 
5943 	dp_ipa_uc_detach(pdev->soc, pdev);
5944 	dp_deinit_ipa_rx_alt_refill_buf_ring(pdev->soc, pdev);
5945 	dp_deinit_ipa_rx_refill_buf_ring(pdev->soc, pdev);
5946 	dp_rxdma_ring_cleanup(pdev->soc, pdev);
5947 
5948 	curr_nbuf = pdev->invalid_peer_head_msdu;
5949 	while (curr_nbuf) {
5950 		next_nbuf = qdf_nbuf_next(curr_nbuf);
5951 		dp_rx_nbuf_free(curr_nbuf);
5952 		curr_nbuf = next_nbuf;
5953 	}
5954 	pdev->invalid_peer_head_msdu = NULL;
5955 	pdev->invalid_peer_tail_msdu = NULL;
5956 
5957 	dp_wdi_event_detach(pdev);
5958 	pdev->pdev_deinit = 1;
5959 }
5960 
5961 /**
5962  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
5963  * @psoc: Datapath psoc handle
5964  * @pdev_id: Id of datapath PDEV handle
5965  * @force: Force deinit
5966  *
5967  * Return: QDF_STATUS
5968  */
5969 static QDF_STATUS
5970 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
5971 		     int force)
5972 {
5973 	struct dp_pdev *txrx_pdev;
5974 
5975 	txrx_pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
5976 						       pdev_id);
5977 
5978 	if (!txrx_pdev)
5979 		return QDF_STATUS_E_FAILURE;
5980 
5981 	dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force);
5982 
5983 	return QDF_STATUS_SUCCESS;
5984 }
5985 
5986 /*
5987  * dp_pdev_post_attach() - Do post pdev attach after dev_alloc_name
5988  * @txrx_pdev: Datapath PDEV handle
5989  *
5990  * Return: None
5991  */
5992 static void dp_pdev_post_attach(struct cdp_pdev *txrx_pdev)
5993 {
5994 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
5995 
5996 	dp_monitor_tx_capture_debugfs_init(pdev);
5997 
5998 	if (dp_pdev_htt_stats_dbgfs_init(pdev)) {
5999 		dp_init_err("%pK: Failed to initialize pdev HTT stats debugfs", pdev->soc);
6000 	}
6001 }
6002 
6003 /*
6004  * dp_pdev_post_attach_wifi3() - attach txrx pdev post
6005  * @psoc: Datapath soc handle
6006  * @pdev_id: pdev id of pdev
6007  *
6008  * Return: QDF_STATUS
6009  */
6010 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *soc,
6011 				     uint8_t pdev_id)
6012 {
6013 	struct dp_pdev *pdev;
6014 
6015 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
6016 						  pdev_id);
6017 
6018 	if (!pdev) {
6019 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
6020 			    (struct dp_soc *)soc, pdev_id);
6021 		return QDF_STATUS_E_FAILURE;
6022 	}
6023 
6024 	dp_pdev_post_attach((struct cdp_pdev *)pdev);
6025 	return QDF_STATUS_SUCCESS;
6026 }
6027 
6028 /*
6029  * dp_pdev_detach() - Complete rest of pdev detach
6030  * @txrx_pdev: Datapath PDEV handle
6031  * @force: Force deinit
6032  *
6033  * Return: None
6034  */
6035 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
6036 {
6037 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
6038 	struct dp_soc *soc = pdev->soc;
6039 
6040 	dp_pdev_htt_stats_dbgfs_deinit(pdev);
6041 	dp_rx_pdev_desc_pool_free(pdev);
6042 	dp_monitor_pdev_detach(pdev);
6043 	dp_rxdma_ring_free(pdev);
6044 	dp_free_ipa_rx_refill_buf_ring(soc, pdev);
6045 	dp_free_ipa_rx_alt_refill_buf_ring(soc, pdev);
6046 	dp_pdev_srng_free(pdev);
6047 
6048 	soc->pdev_count--;
6049 	soc->pdev_list[pdev->pdev_id] = NULL;
6050 
6051 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
6052 	wlan_minidump_remove(pdev, sizeof(*pdev), soc->ctrl_psoc,
6053 			     WLAN_MD_DP_PDEV, "dp_pdev");
6054 	dp_context_free_mem(soc, DP_PDEV_TYPE, pdev);
6055 }
6056 
6057 /*
6058  * dp_pdev_detach_wifi3() - detach txrx pdev
6059  * @psoc: Datapath soc handle
6060  * @pdev_id: pdev id of pdev
6061  * @force: Force detach
6062  *
6063  * Return: QDF_STATUS
6064  */
6065 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
6066 				       int force)
6067 {
6068 	struct dp_pdev *pdev;
6069 	struct dp_soc *soc = (struct dp_soc *)psoc;
6070 
6071 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
6072 						  pdev_id);
6073 
6074 	if (!pdev) {
6075 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
6076 			    (struct dp_soc *)psoc, pdev_id);
6077 		return QDF_STATUS_E_FAILURE;
6078 	}
6079 
6080 	soc->arch_ops.txrx_pdev_detach(pdev);
6081 
6082 	dp_pdev_detach((struct cdp_pdev *)pdev, force);
6083 	return QDF_STATUS_SUCCESS;
6084 }
6085 
6086 /*
6087  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
6088  * @soc: DP SOC handle
6089  */
6090 #ifndef DP_UMAC_HW_RESET_SUPPORT
6091 static inline
6092 #endif
6093 void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
6094 {
6095 	struct reo_desc_list_node *desc;
6096 	struct dp_rx_tid *rx_tid;
6097 
6098 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
6099 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
6100 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
6101 		rx_tid = &desc->rx_tid;
6102 		qdf_mem_unmap_nbytes_single(soc->osdev,
6103 			rx_tid->hw_qdesc_paddr,
6104 			QDF_DMA_BIDIRECTIONAL,
6105 			rx_tid->hw_qdesc_alloc_size);
6106 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
6107 		qdf_mem_free(desc);
6108 	}
6109 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
6110 	qdf_list_destroy(&soc->reo_desc_freelist);
6111 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
6112 }
6113 
6114 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
6115 /*
6116  * dp_reo_desc_deferred_freelist_create() - Initialize the resources used
6117  *                                          for deferred reo desc list
6118  * @psoc: Datapath soc handle
6119  *
6120  * Return: void
6121  */
6122 static void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
6123 {
6124 	qdf_spinlock_create(&soc->reo_desc_deferred_freelist_lock);
6125 	qdf_list_create(&soc->reo_desc_deferred_freelist,
6126 			REO_DESC_DEFERRED_FREELIST_SIZE);
6127 	soc->reo_desc_deferred_freelist_init = true;
6128 }
6129 
6130 /*
6131  * dp_reo_desc_deferred_freelist_destroy() - loop the deferred free list &
6132  *                                           free the leftover REO QDESCs
6133  * @psoc: Datapath soc handle
6134  *
6135  * Return: void
6136  */
6137 static void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
6138 {
6139 	struct reo_desc_deferred_freelist_node *desc;
6140 
6141 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
6142 	soc->reo_desc_deferred_freelist_init = false;
6143 	while (qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
6144 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
6145 		qdf_mem_unmap_nbytes_single(soc->osdev,
6146 					    desc->hw_qdesc_paddr,
6147 					    QDF_DMA_BIDIRECTIONAL,
6148 					    desc->hw_qdesc_alloc_size);
6149 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
6150 		qdf_mem_free(desc);
6151 	}
6152 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
6153 
6154 	qdf_list_destroy(&soc->reo_desc_deferred_freelist);
6155 	qdf_spinlock_destroy(&soc->reo_desc_deferred_freelist_lock);
6156 }
6157 #else
6158 static inline void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
6159 {
6160 }
6161 
6162 static inline void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
6163 {
6164 }
6165 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
6166 
6167 /*
6168  * dp_soc_reset_txrx_ring_map() - reset tx ring map
6169  * @soc: DP SOC handle
6170  *
6171  */
6172 static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc)
6173 {
6174 	uint32_t i;
6175 
6176 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++)
6177 		soc->tx_ring_map[i] = 0;
6178 }
6179 
6180 /*
6181  * dp_soc_print_inactive_objects() - prints inactive peer and vdev list
6182  * @soc: DP SOC handle
6183  *
6184  */
6185 static void dp_soc_print_inactive_objects(struct dp_soc *soc)
6186 {
6187 	struct dp_peer *peer = NULL;
6188 	struct dp_peer *tmp_peer = NULL;
6189 	struct dp_vdev *vdev = NULL;
6190 	struct dp_vdev *tmp_vdev = NULL;
6191 	int i = 0;
6192 	uint32_t count;
6193 
6194 	if (TAILQ_EMPTY(&soc->inactive_peer_list) &&
6195 	    TAILQ_EMPTY(&soc->inactive_vdev_list))
6196 		return;
6197 
6198 	TAILQ_FOREACH_SAFE(peer, &soc->inactive_peer_list,
6199 			   inactive_list_elem, tmp_peer) {
6200 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
6201 			count = qdf_atomic_read(&peer->mod_refs[i]);
6202 			if (count)
6203 				DP_PRINT_STATS("peer %pK Module id %u ==> %u",
6204 					       peer, i, count);
6205 		}
6206 	}
6207 
6208 	TAILQ_FOREACH_SAFE(vdev, &soc->inactive_vdev_list,
6209 			   inactive_list_elem, tmp_vdev) {
6210 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
6211 			count = qdf_atomic_read(&vdev->mod_refs[i]);
6212 			if (count)
6213 				DP_PRINT_STATS("vdev %pK Module id %u ==> %u",
6214 					       vdev, i, count);
6215 		}
6216 	}
6217 	QDF_BUG(0);
6218 }
6219 
6220 /**
6221  * dp_soc_deinit() - Deinitialize txrx SOC
6222  * @txrx_soc: Opaque DP SOC handle
6223  *
6224  * Return: None
6225  */
6226 static void dp_soc_deinit(void *txrx_soc)
6227 {
6228 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6229 	struct htt_soc *htt_soc = soc->htt_handle;
6230 
6231 	qdf_atomic_set(&soc->cmn_init_done, 0);
6232 
6233 	soc->arch_ops.txrx_soc_deinit(soc);
6234 
6235 	dp_monitor_soc_deinit(soc);
6236 
6237 	/* free peer tables & AST tables allocated during peer_map_attach */
6238 	if (soc->peer_map_attach_success) {
6239 		dp_peer_find_detach(soc);
6240 		soc->arch_ops.txrx_peer_map_detach(soc);
6241 		soc->peer_map_attach_success = FALSE;
6242 	}
6243 
6244 	qdf_flush_work(&soc->htt_stats.work);
6245 	qdf_disable_work(&soc->htt_stats.work);
6246 
6247 	qdf_spinlock_destroy(&soc->htt_stats.lock);
6248 
6249 	dp_soc_reset_txrx_ring_map(soc);
6250 
6251 	dp_reo_desc_freelist_destroy(soc);
6252 	dp_reo_desc_deferred_freelist_destroy(soc);
6253 
6254 	DEINIT_RX_HW_STATS_LOCK(soc);
6255 
6256 	qdf_spinlock_destroy(&soc->ast_lock);
6257 
6258 	dp_peer_mec_spinlock_destroy(soc);
6259 
6260 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
6261 
6262 	qdf_nbuf_queue_free(&soc->invalid_buf_queue);
6263 
6264 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
6265 
6266 	qdf_spinlock_destroy(&soc->vdev_map_lock);
6267 
6268 	dp_reo_cmdlist_destroy(soc);
6269 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
6270 
6271 	dp_soc_tx_desc_sw_pools_deinit(soc);
6272 
6273 	dp_soc_srng_deinit(soc);
6274 
6275 	dp_hw_link_desc_ring_deinit(soc);
6276 
6277 	dp_soc_print_inactive_objects(soc);
6278 	qdf_spinlock_destroy(&soc->inactive_peer_list_lock);
6279 	qdf_spinlock_destroy(&soc->inactive_vdev_list_lock);
6280 
6281 	htt_soc_htc_dealloc(soc->htt_handle);
6282 
6283 	htt_soc_detach(htt_soc);
6284 
6285 	/* Free wbm sg list and reset flags in down path */
6286 	dp_rx_wbm_sg_list_deinit(soc);
6287 
6288 	wlan_minidump_remove(soc, sizeof(*soc), soc->ctrl_psoc,
6289 			     WLAN_MD_DP_SOC, "dp_soc");
6290 }
6291 
6292 /**
6293  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
6294  * @txrx_soc: Opaque DP SOC handle
6295  *
6296  * Return: None
6297  */
6298 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc)
6299 {
6300 	dp_soc_deinit(txrx_soc);
6301 }
6302 
6303 /*
6304  * dp_soc_detach() - Detach rest of txrx SOC
6305  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
6306  *
6307  * Return: None
6308  */
6309 static void dp_soc_detach(struct cdp_soc_t *txrx_soc)
6310 {
6311 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6312 
6313 	soc->arch_ops.txrx_soc_detach(soc);
6314 
6315 	dp_runtime_deinit();
6316 
6317 	dp_sysfs_deinitialize_stats(soc);
6318 	dp_soc_swlm_detach(soc);
6319 	dp_soc_tx_desc_sw_pools_free(soc);
6320 	dp_soc_srng_free(soc);
6321 	dp_hw_link_desc_ring_free(soc);
6322 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
6323 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
6324 	dp_soc_tx_hw_desc_history_detach(soc);
6325 	dp_soc_tx_history_detach(soc);
6326 	dp_soc_mon_status_ring_history_detach(soc);
6327 	dp_soc_rx_history_detach(soc);
6328 
6329 	if (!dp_monitor_modularized_enable()) {
6330 		dp_mon_soc_detach_wrapper(soc);
6331 	}
6332 
6333 	qdf_mem_free(soc->cdp_soc.ops);
6334 	qdf_mem_free(soc);
6335 }
6336 
6337 /*
6338  * dp_soc_detach_wifi3() - Detach txrx SOC
6339  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
6340  *
6341  * Return: None
6342  */
6343 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc)
6344 {
6345 	dp_soc_detach(txrx_soc);
6346 }
6347 
6348 /*
6349  * dp_rxdma_ring_config() - configure the RX DMA rings
6350  *
6351  * This function is used to configure the MAC rings.
6352  * On MCL host provides buffers in Host2FW ring
6353  * FW refills (copies) buffers to the ring and updates
6354  * ring_idx in register
6355  *
6356  * @soc: data path SoC handle
6357  *
6358  * Return: zero on success, non-zero on failure
6359  */
6360 #ifdef QCA_HOST2FW_RXBUF_RING
6361 static inline void
6362 dp_htt_setup_rxdma_err_dst_ring(struct dp_soc *soc, int mac_id,
6363 				int lmac_id)
6364 {
6365 	if (soc->rxdma_err_dst_ring[lmac_id].hal_srng)
6366 		htt_srng_setup(soc->htt_handle, mac_id,
6367 			       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
6368 			       RXDMA_DST);
6369 }
6370 
6371 #ifdef IPA_WDI3_VLAN_SUPPORT
6372 static inline
6373 void dp_rxdma_setup_refill_ring3(struct dp_soc *soc,
6374 				 struct dp_pdev *pdev,
6375 				 uint8_t idx)
6376 {
6377 	if (pdev->rx_refill_buf_ring3.hal_srng)
6378 		htt_srng_setup(soc->htt_handle, idx,
6379 			       pdev->rx_refill_buf_ring3.hal_srng,
6380 			       RXDMA_BUF);
6381 }
6382 #else
6383 static inline
6384 void dp_rxdma_setup_refill_ring3(struct dp_soc *soc,
6385 				 struct dp_pdev *pdev,
6386 				 uint8_t idx)
6387 { }
6388 #endif
6389 
6390 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
6391 {
6392 	int i;
6393 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6394 
6395 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6396 		struct dp_pdev *pdev = soc->pdev_list[i];
6397 
6398 		if (pdev) {
6399 			int mac_id;
6400 			int max_mac_rings =
6401 				 wlan_cfg_get_num_mac_rings
6402 				(pdev->wlan_cfg_ctx);
6403 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
6404 
6405 			htt_srng_setup(soc->htt_handle, i,
6406 				       soc->rx_refill_buf_ring[lmac_id]
6407 				       .hal_srng,
6408 				       RXDMA_BUF);
6409 
6410 			if (pdev->rx_refill_buf_ring2.hal_srng)
6411 				htt_srng_setup(soc->htt_handle, i,
6412 					       pdev->rx_refill_buf_ring2
6413 					       .hal_srng,
6414 					       RXDMA_BUF);
6415 
6416 			dp_rxdma_setup_refill_ring3(soc, pdev, i);
6417 
6418 			dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
6419 			dp_err("pdev_id %d max_mac_rings %d",
6420 			       pdev->pdev_id, max_mac_rings);
6421 
6422 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
6423 				int mac_for_pdev =
6424 					dp_get_mac_id_for_pdev(mac_id,
6425 							       pdev->pdev_id);
6426 				/*
6427 				 * Obtain lmac id from pdev to access the LMAC
6428 				 * ring in soc context
6429 				 */
6430 				lmac_id =
6431 				dp_get_lmac_id_for_pdev_id(soc,
6432 							   mac_id,
6433 							   pdev->pdev_id);
6434 				QDF_TRACE(QDF_MODULE_ID_TXRX,
6435 					 QDF_TRACE_LEVEL_ERROR,
6436 					 FL("mac_id %d"), mac_for_pdev);
6437 
6438 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
6439 					 pdev->rx_mac_buf_ring[mac_id]
6440 						.hal_srng,
6441 					 RXDMA_BUF);
6442 
6443 				if (!soc->rxdma2sw_rings_not_supported)
6444 					dp_htt_setup_rxdma_err_dst_ring(soc,
6445 						mac_for_pdev, lmac_id);
6446 
6447 				/* Configure monitor mode rings */
6448 				status = dp_monitor_htt_srng_setup(soc, pdev,
6449 								   lmac_id,
6450 								   mac_for_pdev);
6451 				if (status != QDF_STATUS_SUCCESS) {
6452 					dp_err("Failed to send htt monitor messages to target");
6453 					return status;
6454 				}
6455 
6456 			}
6457 		}
6458 	}
6459 
6460 	dp_reap_timer_init(soc);
6461 	return status;
6462 }
6463 #else
6464 /* This is only for WIN */
6465 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
6466 {
6467 	int i;
6468 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6469 	int mac_for_pdev;
6470 	int lmac_id;
6471 
6472 	/* Configure monitor mode rings */
6473 	dp_monitor_soc_htt_srng_setup(soc);
6474 
6475 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6476 		struct dp_pdev *pdev =  soc->pdev_list[i];
6477 
6478 		if (!pdev)
6479 			continue;
6480 
6481 		mac_for_pdev = i;
6482 		lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
6483 
6484 		if (soc->rx_refill_buf_ring[lmac_id].hal_srng)
6485 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
6486 				       soc->rx_refill_buf_ring[lmac_id].
6487 				       hal_srng, RXDMA_BUF);
6488 
6489 		/* Configure monitor mode rings */
6490 		dp_monitor_htt_srng_setup(soc, pdev,
6491 					  lmac_id,
6492 					  mac_for_pdev);
6493 		if (!soc->rxdma2sw_rings_not_supported)
6494 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
6495 				       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
6496 				       RXDMA_DST);
6497 	}
6498 
6499 	dp_reap_timer_init(soc);
6500 	return status;
6501 }
6502 #endif
6503 
6504 /*
6505  * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine
6506  *
6507  * This function is used to configure the FSE HW block in RX OLE on a
6508  * per pdev basis. Here, we will be programming parameters related to
6509  * the Flow Search Table.
6510  *
6511  * @soc: data path SoC handle
6512  *
6513  * Return: zero on success, non-zero on failure
6514  */
6515 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
6516 static QDF_STATUS
6517 dp_rx_target_fst_config(struct dp_soc *soc)
6518 {
6519 	int i;
6520 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6521 
6522 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6523 		struct dp_pdev *pdev = soc->pdev_list[i];
6524 
6525 		/* Flow search is not enabled if NSS offload is enabled */
6526 		if (pdev &&
6527 		    !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
6528 			status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev);
6529 			if (status != QDF_STATUS_SUCCESS)
6530 				break;
6531 		}
6532 	}
6533 	return status;
6534 }
6535 #elif defined(WLAN_SUPPORT_RX_FISA)
6536 /**
6537  * dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW
6538  * @soc: SoC handle
6539  *
6540  * Return: Success
6541  */
6542 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
6543 {
6544 	QDF_STATUS status;
6545 	struct dp_rx_fst *fst = soc->rx_fst;
6546 
6547 	/* Check if it is enabled in the INI */
6548 	if (!soc->fisa_enable) {
6549 		dp_err("RX FISA feature is disabled");
6550 		return QDF_STATUS_E_NOSUPPORT;
6551 	}
6552 
6553 	status = dp_rx_flow_send_fst_fw_setup(soc, soc->pdev_list[0]);
6554 	if (QDF_IS_STATUS_ERROR(status)) {
6555 		dp_err("dp_rx_flow_send_fst_fw_setup failed %d",
6556 		       status);
6557 		return status;
6558 	}
6559 
6560 	if (soc->fst_cmem_base) {
6561 		soc->fst_in_cmem = true;
6562 		dp_rx_fst_update_cmem_params(soc, fst->max_entries,
6563 					     soc->fst_cmem_base & 0xffffffff,
6564 					     soc->fst_cmem_base >> 32);
6565 	}
6566 	return status;
6567 }
6568 
6569 #define FISA_MAX_TIMEOUT 0xffffffff
6570 #define FISA_DISABLE_TIMEOUT 0
6571 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
6572 {
6573 	struct dp_htt_rx_fisa_cfg fisa_config;
6574 
6575 	fisa_config.pdev_id = 0;
6576 	fisa_config.fisa_timeout = FISA_MAX_TIMEOUT;
6577 
6578 	return dp_htt_rx_fisa_config(soc->pdev_list[0], &fisa_config);
6579 }
6580 
6581 #else /* !WLAN_SUPPORT_RX_FISA */
6582 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
6583 {
6584 	return QDF_STATUS_SUCCESS;
6585 }
6586 #endif /* !WLAN_SUPPORT_RX_FISA */
6587 
6588 #ifndef WLAN_SUPPORT_RX_FISA
6589 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
6590 {
6591 	return QDF_STATUS_SUCCESS;
6592 }
6593 
6594 static QDF_STATUS dp_rx_dump_fisa_stats(struct dp_soc *soc)
6595 {
6596 	return QDF_STATUS_SUCCESS;
6597 }
6598 
6599 static void dp_rx_dump_fisa_table(struct dp_soc *soc)
6600 {
6601 }
6602 
6603 static void dp_suspend_fse_cache_flush(struct dp_soc *soc)
6604 {
6605 }
6606 
6607 static void dp_resume_fse_cache_flush(struct dp_soc *soc)
6608 {
6609 }
6610 #endif /* !WLAN_SUPPORT_RX_FISA */
6611 
6612 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR
6613 static inline QDF_STATUS dp_print_swlm_stats(struct dp_soc *soc)
6614 {
6615 	return QDF_STATUS_SUCCESS;
6616 }
6617 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */
6618 
6619 #ifdef WLAN_SUPPORT_PPEDS
6620 /*
6621  * dp_soc_target_ppe_rxole_rxdma_cfg() - Configure the RxOLe and RxDMA for PPE
6622  * @soc: DP Tx/Rx handle
6623  *
6624  * Return: QDF_STATUS
6625  */
6626 static
6627 QDF_STATUS dp_soc_target_ppe_rxole_rxdma_cfg(struct dp_soc *soc)
6628 {
6629 	struct dp_htt_rxdma_rxole_ppe_config htt_cfg = {0};
6630 	QDF_STATUS status;
6631 
6632 	/*
6633 	 * Program RxDMA to override the reo destination indication
6634 	 * with REO2PPE_DST_IND, when use_ppe is set to 1 in RX_MSDU_END,
6635 	 * thereby driving the packet to REO2PPE ring.
6636 	 * If the MSDU is spanning more than 1 buffer, then this
6637 	 * override is not done.
6638 	 */
6639 	htt_cfg.override = 1;
6640 	htt_cfg.reo_destination_indication = REO2PPE_DST_IND;
6641 	htt_cfg.multi_buffer_msdu_override_en = 0;
6642 
6643 	/*
6644 	 * Override use_ppe to 0 in RxOLE for the following
6645 	 * cases.
6646 	 */
6647 	htt_cfg.intra_bss_override = 1;
6648 	htt_cfg.decap_raw_override = 1;
6649 	htt_cfg.decap_nwifi_override = 1;
6650 	htt_cfg.ip_frag_override = 1;
6651 
6652 	status = dp_htt_rxdma_rxole_ppe_cfg_set(soc, &htt_cfg);
6653 	if (status != QDF_STATUS_SUCCESS)
6654 		dp_err("RxOLE and RxDMA PPE config failed %d", status);
6655 
6656 	return status;
6657 }
6658 #else
6659 static inline
6660 QDF_STATUS dp_soc_target_ppe_rxole_rxdma_cfg(struct dp_soc *soc)
6661 {
6662 	return QDF_STATUS_SUCCESS;
6663 }
6664 #endif /* WLAN_SUPPORT_PPEDS */
6665 
6666 #ifdef DP_UMAC_HW_RESET_SUPPORT
6667 static void dp_register_umac_reset_handlers(struct dp_soc *soc)
6668 {
6669 	dp_umac_reset_register_rx_action_callback(soc,
6670 		dp_umac_reset_handle_pre_reset, UMAC_RESET_ACTION_DO_PRE_RESET);
6671 
6672 	dp_umac_reset_register_rx_action_callback(soc,
6673 					dp_umac_reset_handle_post_reset,
6674 					UMAC_RESET_ACTION_DO_POST_RESET_START);
6675 
6676 	dp_umac_reset_register_rx_action_callback(soc,
6677 				dp_umac_reset_handle_post_reset_complete,
6678 				UMAC_RESET_ACTION_DO_POST_RESET_COMPLETE);
6679 
6680 }
6681 #else
6682 static void dp_register_umac_reset_handlers(struct dp_soc *soc)
6683 {
6684 }
6685 #endif
6686 /*
6687  * dp_soc_attach_target_wifi3() - SOC initialization in the target
6688  * @cdp_soc: Opaque Datapath SOC handle
6689  *
6690  * Return: zero on success, non-zero on failure
6691  */
6692 static QDF_STATUS
6693 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
6694 {
6695 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
6696 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6697 	struct hal_reo_params reo_params;
6698 
6699 	htt_soc_attach_target(soc->htt_handle);
6700 
6701 	status = dp_soc_target_ppe_rxole_rxdma_cfg(soc);
6702 	if (status != QDF_STATUS_SUCCESS) {
6703 		dp_err("Failed to send htt RxOLE and RxDMA messages to target");
6704 		return status;
6705 	}
6706 
6707 	status = dp_rxdma_ring_config(soc);
6708 	if (status != QDF_STATUS_SUCCESS) {
6709 		dp_err("Failed to send htt srng setup messages to target");
6710 		return status;
6711 	}
6712 
6713 	status = soc->arch_ops.dp_rxdma_ring_sel_cfg(soc);
6714 	if (status != QDF_STATUS_SUCCESS) {
6715 		dp_err("Failed to send htt ring config message to target");
6716 		return status;
6717 	}
6718 
6719 	status = dp_soc_umac_reset_init(soc);
6720 	if (status != QDF_STATUS_SUCCESS &&
6721 	    status != QDF_STATUS_E_NOSUPPORT) {
6722 		dp_err("Failed to initialize UMAC reset");
6723 		return status;
6724 	}
6725 
6726 	dp_register_umac_reset_handlers(soc);
6727 
6728 	status = dp_rx_target_fst_config(soc);
6729 	if (status != QDF_STATUS_SUCCESS &&
6730 	    status != QDF_STATUS_E_NOSUPPORT) {
6731 		dp_err("Failed to send htt fst setup config message to target");
6732 		return status;
6733 	}
6734 
6735 	if (status == QDF_STATUS_SUCCESS) {
6736 		status = dp_rx_fisa_config(soc);
6737 		if (status != QDF_STATUS_SUCCESS) {
6738 			dp_err("Failed to send htt FISA config message to target");
6739 			return status;
6740 		}
6741 	}
6742 
6743 	DP_STATS_INIT(soc);
6744 
6745 	dp_runtime_init(soc);
6746 
6747 	/* Enable HW vdev offload stats if feature is supported */
6748 	dp_vdev_stats_hw_offload_target_config(soc, INVALID_PDEV_ID, true);
6749 
6750 	/* initialize work queue for stats processing */
6751 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
6752 
6753 	wlan_cfg_soc_update_tgt_params(soc->wlan_cfg_ctx,
6754 				       soc->ctrl_psoc);
6755 	/* Setup HW REO */
6756 	qdf_mem_zero(&reo_params, sizeof(reo_params));
6757 
6758 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
6759 		/*
6760 		 * Reo ring remap is not required if both radios
6761 		 * are offloaded to NSS
6762 		 */
6763 
6764 		if (soc->arch_ops.reo_remap_config(soc, &reo_params.remap0,
6765 						   &reo_params.remap1,
6766 						   &reo_params.remap2))
6767 			reo_params.rx_hash_enabled = true;
6768 		else
6769 			reo_params.rx_hash_enabled = false;
6770 	}
6771 
6772 	/*
6773 	 * set the fragment destination ring
6774 	 */
6775 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
6776 
6777 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
6778 		reo_params.alt_dst_ind_0 = REO_REMAP_RELEASE;
6779 
6780 	hal_reo_setup(soc->hal_soc, &reo_params, 1);
6781 
6782 	hal_reo_set_err_dst_remap(soc->hal_soc);
6783 
6784 	soc->features.pn_in_reo_dest = hal_reo_enable_pn_in_dest(soc->hal_soc);
6785 
6786 	return QDF_STATUS_SUCCESS;
6787 }
6788 
6789 /*
6790  * dp_vdev_id_map_tbl_add() - Add vdev into vdev_id table
6791  * @soc: SoC handle
6792  * @vdev: vdev handle
6793  * @vdev_id: vdev_id
6794  *
6795  * Return: None
6796  */
6797 static void dp_vdev_id_map_tbl_add(struct dp_soc *soc,
6798 				   struct dp_vdev *vdev,
6799 				   uint8_t vdev_id)
6800 {
6801 	QDF_ASSERT(vdev_id <= MAX_VDEV_CNT);
6802 
6803 	qdf_spin_lock_bh(&soc->vdev_map_lock);
6804 
6805 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
6806 			QDF_STATUS_SUCCESS) {
6807 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK vdev_id %u",
6808 			     soc, vdev, vdev_id);
6809 		qdf_spin_unlock_bh(&soc->vdev_map_lock);
6810 		return;
6811 	}
6812 
6813 	if (!soc->vdev_id_map[vdev_id])
6814 		soc->vdev_id_map[vdev_id] = vdev;
6815 	else
6816 		QDF_ASSERT(0);
6817 
6818 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
6819 }
6820 
6821 /*
6822  * dp_vdev_id_map_tbl_remove() - remove vdev from vdev_id table
6823  * @soc: SoC handle
6824  * @vdev: vdev handle
6825  *
6826  * Return: None
6827  */
6828 static void dp_vdev_id_map_tbl_remove(struct dp_soc *soc,
6829 				      struct dp_vdev *vdev)
6830 {
6831 	qdf_spin_lock_bh(&soc->vdev_map_lock);
6832 	QDF_ASSERT(soc->vdev_id_map[vdev->vdev_id] == vdev);
6833 
6834 	soc->vdev_id_map[vdev->vdev_id] = NULL;
6835 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
6836 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
6837 }
6838 
6839 /*
6840  * dp_vdev_pdev_list_add() - add vdev into pdev's list
6841  * @soc: soc handle
6842  * @pdev: pdev handle
6843  * @vdev: vdev handle
6844  *
6845  * return: none
6846  */
6847 static void dp_vdev_pdev_list_add(struct dp_soc *soc,
6848 				  struct dp_pdev *pdev,
6849 				  struct dp_vdev *vdev)
6850 {
6851 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
6852 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
6853 			QDF_STATUS_SUCCESS) {
6854 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK",
6855 			     soc, vdev);
6856 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6857 		return;
6858 	}
6859 	/* add this vdev into the pdev's list */
6860 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
6861 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6862 }
6863 
6864 /*
6865  * dp_vdev_pdev_list_remove() - remove vdev from pdev's list
6866  * @soc: SoC handle
6867  * @pdev: pdev handle
6868  * @vdev: VDEV handle
6869  *
6870  * Return: none
6871  */
6872 static void dp_vdev_pdev_list_remove(struct dp_soc *soc,
6873 				     struct dp_pdev *pdev,
6874 				     struct dp_vdev *vdev)
6875 {
6876 	uint8_t found = 0;
6877 	struct dp_vdev *tmpvdev = NULL;
6878 
6879 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
6880 	TAILQ_FOREACH(tmpvdev, &pdev->vdev_list, vdev_list_elem) {
6881 		if (tmpvdev == vdev) {
6882 			found = 1;
6883 			break;
6884 		}
6885 	}
6886 
6887 	if (found) {
6888 		TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
6889 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
6890 	} else {
6891 		dp_vdev_debug("%pK: vdev:%pK not found in pdev:%pK vdevlist:%pK",
6892 			      soc, vdev, pdev, &pdev->vdev_list);
6893 		QDF_ASSERT(0);
6894 	}
6895 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6896 }
6897 
6898 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
6899 /*
6900  * dp_vdev_init_rx_eapol() - initializing osif_rx_eapol
6901  * @vdev: Datapath VDEV handle
6902  *
6903  * Return: None
6904  */
6905 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev)
6906 {
6907 	vdev->osif_rx_eapol = NULL;
6908 }
6909 
6910 /*
6911  * dp_vdev_register_rx_eapol() - Register VDEV operations for rx_eapol
6912  * @vdev: DP vdev handle
6913  * @txrx_ops: Tx and Rx operations
6914  *
6915  * Return: None
6916  */
6917 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev,
6918 					     struct ol_txrx_ops *txrx_ops)
6919 {
6920 	vdev->osif_rx_eapol = txrx_ops->rx.rx_eapol;
6921 }
6922 #else
6923 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev)
6924 {
6925 }
6926 
6927 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev,
6928 					     struct ol_txrx_ops *txrx_ops)
6929 {
6930 }
6931 #endif
6932 
6933 #ifdef WLAN_FEATURE_11BE_MLO
6934 #if defined(WLAN_MLO_MULTI_CHIP) && defined(WLAN_MCAST_MLO)
6935 static inline void dp_vdev_save_mld_info(struct dp_vdev *vdev,
6936 					 struct cdp_vdev_info *vdev_info)
6937 {
6938 	if (qdf_is_macaddr_zero((struct qdf_mac_addr *)vdev_info->mld_mac_addr))
6939 		vdev->mlo_vdev = false;
6940 	else
6941 		vdev->mlo_vdev = true;
6942 }
6943 #else
6944 static inline void dp_vdev_save_mld_info(struct dp_vdev *vdev,
6945 					 struct cdp_vdev_info *vdev_info)
6946 {
6947 }
6948 #endif
6949 static inline void dp_vdev_save_mld_addr(struct dp_vdev *vdev,
6950 					 struct cdp_vdev_info *vdev_info)
6951 {
6952 	if (vdev_info->mld_mac_addr)
6953 		qdf_mem_copy(&vdev->mld_mac_addr.raw[0],
6954 			     vdev_info->mld_mac_addr, QDF_MAC_ADDR_SIZE);
6955 
6956 	dp_vdev_save_mld_info(vdev, vdev_info);
6957 
6958 }
6959 #else
6960 static inline void dp_vdev_save_mld_addr(struct dp_vdev *vdev,
6961 					 struct cdp_vdev_info *vdev_info)
6962 {
6963 
6964 }
6965 #endif
6966 
6967 #ifdef DP_TRAFFIC_END_INDICATION
6968 /*
6969  * dp_tx_traffic_end_indication_attach() - Initialize data end indication
6970  *                                         related members in VDEV
6971  * @vdev: DP vdev handle
6972  *
6973  * Return: None
6974  */
6975 static inline void
6976 dp_tx_vdev_traffic_end_indication_attach(struct dp_vdev *vdev)
6977 {
6978 	qdf_nbuf_queue_init(&vdev->end_ind_pkt_q);
6979 }
6980 
6981 /*
6982  * dp_tx_vdev_traffic_end_indication_detach() - De-init data end indication
6983  *                                              related members in VDEV
6984  * @vdev: DP vdev handle
6985  *
6986  * Return: None
6987  */
6988 static inline void
6989 dp_tx_vdev_traffic_end_indication_detach(struct dp_vdev *vdev)
6990 {
6991 	qdf_nbuf_t nbuf;
6992 
6993 	while ((nbuf = qdf_nbuf_queue_remove(&vdev->end_ind_pkt_q)) != NULL)
6994 		qdf_nbuf_free(nbuf);
6995 }
6996 #else
6997 static inline void
6998 dp_tx_vdev_traffic_end_indication_attach(struct dp_vdev *vdev)
6999 {}
7000 
7001 static inline void
7002 dp_tx_vdev_traffic_end_indication_detach(struct dp_vdev *vdev)
7003 {}
7004 #endif
7005 
7006 /*
7007 * dp_vdev_attach_wifi3() - attach txrx vdev
7008 * @txrx_pdev: Datapath PDEV handle
7009 * @pdev_id: PDEV ID for vdev creation
7010 * @vdev_info: parameters used for vdev creation
7011 *
7012 * Return: status
7013 */
7014 static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc,
7015 				       uint8_t pdev_id,
7016 				       struct cdp_vdev_info *vdev_info)
7017 {
7018 	int i = 0;
7019 	qdf_size_t vdev_context_size;
7020 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
7021 	struct dp_pdev *pdev =
7022 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7023 						   pdev_id);
7024 	struct dp_vdev *vdev;
7025 	uint8_t *vdev_mac_addr = vdev_info->vdev_mac_addr;
7026 	uint8_t vdev_id = vdev_info->vdev_id;
7027 	enum wlan_op_mode op_mode = vdev_info->op_mode;
7028 	enum wlan_op_subtype subtype = vdev_info->subtype;
7029 	uint8_t vdev_stats_id = vdev_info->vdev_stats_id;
7030 
7031 	vdev_context_size =
7032 		soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_VDEV);
7033 	vdev = qdf_mem_malloc(vdev_context_size);
7034 
7035 	if (!pdev) {
7036 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
7037 			    cdp_soc, pdev_id);
7038 		qdf_mem_free(vdev);
7039 		goto fail0;
7040 	}
7041 
7042 	if (!vdev) {
7043 		dp_init_err("%pK: DP VDEV memory allocation failed",
7044 			    cdp_soc);
7045 		goto fail0;
7046 	}
7047 
7048 	wlan_minidump_log(vdev, sizeof(*vdev), soc->ctrl_psoc,
7049 			  WLAN_MD_DP_VDEV, "dp_vdev");
7050 
7051 	vdev->pdev = pdev;
7052 	vdev->vdev_id = vdev_id;
7053 	vdev->vdev_stats_id = vdev_stats_id;
7054 	vdev->opmode = op_mode;
7055 	vdev->subtype = subtype;
7056 	vdev->osdev = soc->osdev;
7057 
7058 	vdev->osif_rx = NULL;
7059 	vdev->osif_rsim_rx_decap = NULL;
7060 	vdev->osif_get_key = NULL;
7061 	vdev->osif_tx_free_ext = NULL;
7062 	vdev->osif_vdev = NULL;
7063 
7064 	vdev->delete.pending = 0;
7065 	vdev->safemode = 0;
7066 	vdev->drop_unenc = 1;
7067 	vdev->sec_type = cdp_sec_type_none;
7068 	vdev->multipass_en = false;
7069 	vdev->wrap_vdev = false;
7070 	dp_vdev_init_rx_eapol(vdev);
7071 	qdf_atomic_init(&vdev->ref_cnt);
7072 	for (i = 0; i < DP_MOD_ID_MAX; i++)
7073 		qdf_atomic_init(&vdev->mod_refs[i]);
7074 
7075 	/* Take one reference for create*/
7076 	qdf_atomic_inc(&vdev->ref_cnt);
7077 	qdf_atomic_inc(&vdev->mod_refs[DP_MOD_ID_CONFIG]);
7078 	vdev->num_peers = 0;
7079 #ifdef notyet
7080 	vdev->filters_num = 0;
7081 #endif
7082 	vdev->lmac_id = pdev->lmac_id;
7083 
7084 	qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
7085 
7086 	dp_vdev_save_mld_addr(vdev, vdev_info);
7087 
7088 	/* TODO: Initialize default HTT meta data that will be used in
7089 	 * TCL descriptors for packets transmitted from this VDEV
7090 	 */
7091 
7092 	qdf_spinlock_create(&vdev->peer_list_lock);
7093 	TAILQ_INIT(&vdev->peer_list);
7094 	dp_peer_multipass_list_init(vdev);
7095 	if ((soc->intr_mode == DP_INTR_POLL) &&
7096 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
7097 		if ((pdev->vdev_count == 0) ||
7098 		    (wlan_op_mode_monitor == vdev->opmode))
7099 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7100 	} else if (dp_soc_get_con_mode(soc) == QDF_GLOBAL_MISSION_MODE &&
7101 		   soc->intr_mode == DP_INTR_MSI &&
7102 		   wlan_op_mode_monitor == vdev->opmode) {
7103 		/* Timer to reap status ring in mission mode */
7104 		dp_monitor_vdev_timer_start(soc);
7105 	}
7106 
7107 	dp_vdev_id_map_tbl_add(soc, vdev, vdev_id);
7108 
7109 	if (wlan_op_mode_monitor == vdev->opmode) {
7110 		if (dp_monitor_vdev_attach(vdev) == QDF_STATUS_SUCCESS) {
7111 			dp_monitor_pdev_set_mon_vdev(vdev);
7112 			return dp_monitor_vdev_set_monitor_mode_buf_rings(pdev);
7113 		}
7114 		return QDF_STATUS_E_FAILURE;
7115 	}
7116 
7117 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
7118 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
7119 	vdev->dscp_tid_map_id = 0;
7120 	vdev->mcast_enhancement_en = 0;
7121 	vdev->igmp_mcast_enhanc_en = 0;
7122 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
7123 	vdev->prev_tx_enq_tstamp = 0;
7124 	vdev->prev_rx_deliver_tstamp = 0;
7125 	vdev->skip_sw_tid_classification = DP_TX_HW_DSCP_TID_MAP_VALID;
7126 	dp_tx_vdev_traffic_end_indication_attach(vdev);
7127 
7128 	dp_vdev_pdev_list_add(soc, pdev, vdev);
7129 	pdev->vdev_count++;
7130 
7131 	if (wlan_op_mode_sta != vdev->opmode &&
7132 	    wlan_op_mode_ndi != vdev->opmode)
7133 		vdev->ap_bridge_enabled = true;
7134 	else
7135 		vdev->ap_bridge_enabled = false;
7136 	dp_init_info("%pK: wlan_cfg_ap_bridge_enabled %d",
7137 		     cdp_soc, vdev->ap_bridge_enabled);
7138 
7139 	dp_tx_vdev_attach(vdev);
7140 
7141 	dp_monitor_vdev_attach(vdev);
7142 	if (!pdev->is_lro_hash_configured) {
7143 		if (QDF_IS_STATUS_SUCCESS(dp_lro_hash_setup(soc, pdev)))
7144 			pdev->is_lro_hash_configured = true;
7145 		else
7146 			dp_err("LRO hash setup failure!");
7147 	}
7148 
7149 	dp_info("Created vdev %pK ("QDF_MAC_ADDR_FMT")", vdev,
7150 		QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
7151 	DP_STATS_INIT(vdev);
7152 
7153 	if (QDF_IS_STATUS_ERROR(soc->arch_ops.txrx_vdev_attach(soc, vdev)))
7154 		goto fail0;
7155 
7156 	if (wlan_op_mode_sta == vdev->opmode)
7157 		dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id,
7158 				     vdev->mac_addr.raw, CDP_LINK_PEER_TYPE);
7159 
7160 	dp_pdev_update_fast_rx_flag(soc, pdev);
7161 
7162 	return QDF_STATUS_SUCCESS;
7163 
7164 fail0:
7165 	return QDF_STATUS_E_FAILURE;
7166 }
7167 
7168 #ifndef QCA_HOST_MODE_WIFI_DISABLED
7169 /**
7170  * dp_vdev_fetch_tx_handlers() - Fetch Tx handlers
7171  * @vdev: struct dp_vdev *
7172  * @soc: struct dp_soc *
7173  * @ctx: struct ol_txrx_hardtart_ctxt *
7174  */
7175 static inline void dp_vdev_fetch_tx_handler(struct dp_vdev *vdev,
7176 					    struct dp_soc *soc,
7177 					    struct ol_txrx_hardtart_ctxt *ctx)
7178 {
7179 	/* Enable vdev_id check only for ap, if flag is enabled */
7180 	if (vdev->mesh_vdev)
7181 		ctx->tx = dp_tx_send_mesh;
7182 	else if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
7183 		 (vdev->opmode == wlan_op_mode_ap)) {
7184 		ctx->tx = dp_tx_send_vdev_id_check;
7185 		ctx->tx_fast = dp_tx_send_vdev_id_check;
7186 	} else {
7187 		ctx->tx = dp_tx_send;
7188 		if (vdev->opmode == wlan_op_mode_ap)
7189 			ctx->tx_fast = soc->arch_ops.dp_tx_send_fast;
7190 		else
7191 			ctx->tx_fast = dp_tx_send;
7192 	}
7193 
7194 	/* Avoid check in regular exception Path */
7195 	if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
7196 	    (vdev->opmode == wlan_op_mode_ap))
7197 		ctx->tx_exception = dp_tx_send_exception_vdev_id_check;
7198 	else
7199 		ctx->tx_exception = dp_tx_send_exception;
7200 }
7201 
7202 /**
7203  * dp_vdev_register_tx_handler() - Register Tx handler
7204  * @vdev: struct dp_vdev *
7205  * @soc: struct dp_soc *
7206  * @txrx_ops: struct ol_txrx_ops *
7207  */
7208 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
7209 					       struct dp_soc *soc,
7210 					       struct ol_txrx_ops *txrx_ops)
7211 {
7212 	struct ol_txrx_hardtart_ctxt ctx = {0};
7213 
7214 	dp_vdev_fetch_tx_handler(vdev, soc, &ctx);
7215 
7216 	txrx_ops->tx.tx = ctx.tx;
7217 	txrx_ops->tx.tx_fast = ctx.tx_fast;
7218 	txrx_ops->tx.tx_exception = ctx.tx_exception;
7219 
7220 	dp_info("Configure tx_vdev_id_chk_handler Feature Flag: %d and mode:%d for vdev_id:%d",
7221 		wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx),
7222 		vdev->opmode, vdev->vdev_id);
7223 }
7224 #else /* QCA_HOST_MODE_WIFI_DISABLED */
7225 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
7226 					       struct dp_soc *soc,
7227 					       struct ol_txrx_ops *txrx_ops)
7228 {
7229 }
7230 
7231 static inline void dp_vdev_fetch_tx_handler(struct dp_vdev *vdev,
7232 					    struct dp_soc *soc,
7233 					    struct ol_txrx_hardtart_ctxt *ctx)
7234 {
7235 }
7236 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
7237 
7238 /**
7239  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
7240  * @soc: Datapath soc handle
7241  * @vdev_id: id of Datapath VDEV handle
7242  * @osif_vdev: OSIF vdev handle
7243  * @txrx_ops: Tx and Rx operations
7244  *
7245  * Return: DP VDEV handle on success, NULL on failure
7246  */
7247 static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc_hdl,
7248 					 uint8_t vdev_id,
7249 					 ol_osif_vdev_handle osif_vdev,
7250 					 struct ol_txrx_ops *txrx_ops)
7251 {
7252 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7253 	struct dp_vdev *vdev =	dp_vdev_get_ref_by_id(soc, vdev_id,
7254 						      DP_MOD_ID_CDP);
7255 
7256 	if (!vdev)
7257 		return QDF_STATUS_E_FAILURE;
7258 
7259 	vdev->osif_vdev = osif_vdev;
7260 	vdev->osif_rx = txrx_ops->rx.rx;
7261 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
7262 	vdev->osif_rx_flush = txrx_ops->rx.rx_flush;
7263 	vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush;
7264 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
7265 	vdev->osif_fisa_rx = txrx_ops->rx.osif_fisa_rx;
7266 	vdev->osif_fisa_flush = txrx_ops->rx.osif_fisa_flush;
7267 	vdev->osif_get_key = txrx_ops->get_key;
7268 	dp_monitor_vdev_register_osif(vdev, txrx_ops);
7269 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
7270 	vdev->tx_comp = txrx_ops->tx.tx_comp;
7271 	vdev->stats_cb = txrx_ops->rx.stats_rx;
7272 	vdev->tx_classify_critical_pkt_cb =
7273 		txrx_ops->tx.tx_classify_critical_pkt_cb;
7274 #ifdef notyet
7275 #if ATH_SUPPORT_WAPI
7276 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
7277 #endif
7278 #endif
7279 #ifdef UMAC_SUPPORT_PROXY_ARP
7280 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
7281 #endif
7282 	vdev->me_convert = txrx_ops->me_convert;
7283 	vdev->get_tsf_time = txrx_ops->get_tsf_time;
7284 
7285 	dp_vdev_register_rx_eapol(vdev, txrx_ops);
7286 
7287 	dp_vdev_register_tx_handler(vdev, soc, txrx_ops);
7288 
7289 	dp_init_info("%pK: DP Vdev Register success", soc);
7290 
7291 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7292 	return QDF_STATUS_SUCCESS;
7293 }
7294 
7295 #ifdef WLAN_FEATURE_11BE_MLO
7296 void dp_peer_delete(struct dp_soc *soc,
7297 		    struct dp_peer *peer,
7298 		    void *arg)
7299 {
7300 	if (!peer->valid)
7301 		return;
7302 
7303 	dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
7304 			     peer->vdev->vdev_id,
7305 			     peer->mac_addr.raw, 0,
7306 			     peer->peer_type);
7307 }
7308 #else
7309 void dp_peer_delete(struct dp_soc *soc,
7310 		    struct dp_peer *peer,
7311 		    void *arg)
7312 {
7313 	if (!peer->valid)
7314 		return;
7315 
7316 	dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
7317 			     peer->vdev->vdev_id,
7318 			     peer->mac_addr.raw, 0,
7319 			     CDP_LINK_PEER_TYPE);
7320 }
7321 #endif
7322 
7323 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
7324 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg)
7325 {
7326 	if (!peer->valid)
7327 		return;
7328 
7329 	if (IS_MLO_DP_LINK_PEER(peer))
7330 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
7331 				     peer->vdev->vdev_id,
7332 				     peer->mac_addr.raw, 0,
7333 				     CDP_LINK_PEER_TYPE);
7334 }
7335 #else
7336 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg)
7337 {
7338 }
7339 #endif
7340 /**
7341  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
7342  * @vdev: Datapath VDEV handle
7343  * @unmap_only: Flag to indicate "only unmap"
7344  *
7345  * Return: void
7346  */
7347 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
7348 				bool unmap_only,
7349 				bool mlo_peers_only)
7350 {
7351 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7352 	struct dp_pdev *pdev = vdev->pdev;
7353 	struct dp_soc *soc = pdev->soc;
7354 	struct dp_peer *peer;
7355 	uint32_t i = 0;
7356 
7357 
7358 	if (!unmap_only) {
7359 		if (!mlo_peers_only)
7360 			dp_vdev_iterate_peer_lock_safe(vdev,
7361 						       dp_peer_delete,
7362 						       NULL,
7363 						       DP_MOD_ID_CDP);
7364 		else
7365 			dp_vdev_iterate_peer_lock_safe(vdev,
7366 						       dp_mlo_peer_delete,
7367 						       NULL,
7368 						       DP_MOD_ID_CDP);
7369 	}
7370 
7371 	for (i = 0; i < soc->max_peer_id ; i++) {
7372 		peer = __dp_peer_get_ref_by_id(soc, i, DP_MOD_ID_CDP);
7373 
7374 		if (!peer)
7375 			continue;
7376 
7377 		if (peer->vdev != vdev) {
7378 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7379 			continue;
7380 		}
7381 
7382 		if (!mlo_peers_only) {
7383 			dp_info("peer: " QDF_MAC_ADDR_FMT " is getting unmap",
7384 				QDF_MAC_ADDR_REF(peer->mac_addr.raw));
7385 			dp_rx_peer_unmap_handler(soc, i,
7386 						 vdev->vdev_id,
7387 						 peer->mac_addr.raw, 0,
7388 						 DP_PEER_WDS_COUNT_INVALID);
7389 			SET_PEER_REF_CNT_ONE(peer);
7390 		} else if (IS_MLO_DP_LINK_PEER(peer) ||
7391 			   IS_MLO_DP_MLD_PEER(peer)) {
7392 			dp_info("peer: " QDF_MAC_ADDR_FMT " is getting unmap",
7393 				QDF_MAC_ADDR_REF(peer->mac_addr.raw));
7394 			dp_rx_peer_unmap_handler(soc, i,
7395 						 vdev->vdev_id,
7396 						 peer->mac_addr.raw, 0,
7397 						 DP_PEER_WDS_COUNT_INVALID);
7398 			SET_PEER_REF_CNT_ONE(peer);
7399 		}
7400 
7401 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7402 	}
7403 }
7404 
7405 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
7406 /*
7407  * dp_txrx_alloc_vdev_stats_id()- Allocate vdev_stats_id
7408  * @soc_hdl: Datapath soc handle
7409  * @vdev_stats_id: Address of vdev_stats_id
7410  *
7411  * Return: QDF_STATUS
7412  */
7413 static QDF_STATUS dp_txrx_alloc_vdev_stats_id(struct cdp_soc_t *soc_hdl,
7414 					      uint8_t *vdev_stats_id)
7415 {
7416 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7417 	uint8_t id = 0;
7418 
7419 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
7420 		*vdev_stats_id = CDP_INVALID_VDEV_STATS_ID;
7421 		return QDF_STATUS_E_FAILURE;
7422 	}
7423 
7424 	while (id < CDP_MAX_VDEV_STATS_ID) {
7425 		if (!qdf_atomic_test_and_set_bit(id, &soc->vdev_stats_id_map)) {
7426 			*vdev_stats_id = id;
7427 			return QDF_STATUS_SUCCESS;
7428 		}
7429 		id++;
7430 	}
7431 
7432 	*vdev_stats_id = CDP_INVALID_VDEV_STATS_ID;
7433 	return QDF_STATUS_E_FAILURE;
7434 }
7435 
7436 /*
7437  * dp_txrx_reset_vdev_stats_id() - Reset vdev_stats_id in dp_soc
7438  * @soc_hdl: Datapath soc handle
7439  * @vdev_stats_id: vdev_stats_id to reset in dp_soc
7440  *
7441  * Return: none
7442  */
7443 static void dp_txrx_reset_vdev_stats_id(struct cdp_soc_t *soc_hdl,
7444 					uint8_t vdev_stats_id)
7445 {
7446 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7447 
7448 	if ((!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) ||
7449 	    (vdev_stats_id >= CDP_MAX_VDEV_STATS_ID))
7450 		return;
7451 
7452 	qdf_atomic_clear_bit(vdev_stats_id, &soc->vdev_stats_id_map);
7453 }
7454 #else
7455 static void dp_txrx_reset_vdev_stats_id(struct cdp_soc_t *soc,
7456 					uint8_t vdev_stats_id)
7457 {}
7458 #endif
7459 /*
7460  * dp_vdev_detach_wifi3() - Detach txrx vdev
7461  * @cdp_soc: Datapath soc handle
7462  * @vdev_id: VDEV Id
7463  * @callback: Callback OL_IF on completion of detach
7464  * @cb_context:	Callback context
7465  *
7466  */
7467 static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc,
7468 				       uint8_t vdev_id,
7469 				       ol_txrx_vdev_delete_cb callback,
7470 				       void *cb_context)
7471 {
7472 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
7473 	struct dp_pdev *pdev;
7474 	struct dp_neighbour_peer *peer = NULL;
7475 	struct dp_peer *vap_self_peer = NULL;
7476 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7477 						     DP_MOD_ID_CDP);
7478 
7479 	if (!vdev)
7480 		return QDF_STATUS_E_FAILURE;
7481 
7482 	soc->arch_ops.txrx_vdev_detach(soc, vdev);
7483 
7484 	pdev = vdev->pdev;
7485 
7486 	vap_self_peer = dp_sta_vdev_self_peer_ref_n_get(soc, vdev,
7487 							DP_MOD_ID_CONFIG);
7488 	if (vap_self_peer) {
7489 		qdf_spin_lock_bh(&soc->ast_lock);
7490 		if (vap_self_peer->self_ast_entry) {
7491 			dp_peer_del_ast(soc, vap_self_peer->self_ast_entry);
7492 			vap_self_peer->self_ast_entry = NULL;
7493 		}
7494 		qdf_spin_unlock_bh(&soc->ast_lock);
7495 
7496 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc, vdev->vdev_id,
7497 				     vap_self_peer->mac_addr.raw, 0,
7498 				     CDP_LINK_PEER_TYPE);
7499 		dp_peer_unref_delete(vap_self_peer, DP_MOD_ID_CONFIG);
7500 	}
7501 
7502 	/*
7503 	 * If Target is hung, flush all peers before detaching vdev
7504 	 * this will free all references held due to missing
7505 	 * unmap commands from Target
7506 	 */
7507 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
7508 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, false, false);
7509 	else if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
7510 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, true, false);
7511 
7512 	/* indicate that the vdev needs to be deleted */
7513 	vdev->delete.pending = 1;
7514 	dp_rx_vdev_detach(vdev);
7515 	/*
7516 	 * move it after dp_rx_vdev_detach(),
7517 	 * as the call back done in dp_rx_vdev_detach()
7518 	 * still need to get vdev pointer by vdev_id.
7519 	 */
7520 	dp_vdev_id_map_tbl_remove(soc, vdev);
7521 
7522 	dp_monitor_neighbour_peer_list_remove(pdev, vdev, peer);
7523 
7524 	dp_txrx_reset_vdev_stats_id(cdp_soc, vdev->vdev_stats_id);
7525 
7526 	dp_tx_vdev_multipass_deinit(vdev);
7527 	dp_tx_vdev_traffic_end_indication_detach(vdev);
7528 
7529 	if (vdev->vdev_dp_ext_handle) {
7530 		qdf_mem_free(vdev->vdev_dp_ext_handle);
7531 		vdev->vdev_dp_ext_handle = NULL;
7532 	}
7533 	vdev->delete.callback = callback;
7534 	vdev->delete.context = cb_context;
7535 
7536 	if (vdev->opmode != wlan_op_mode_monitor)
7537 		dp_vdev_pdev_list_remove(soc, pdev, vdev);
7538 
7539 	pdev->vdev_count--;
7540 	/* release reference taken above for find */
7541 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7542 
7543 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
7544 	TAILQ_INSERT_TAIL(&soc->inactive_vdev_list, vdev, inactive_list_elem);
7545 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
7546 
7547 	/* release reference taken at dp_vdev_create */
7548 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
7549 
7550 	return QDF_STATUS_SUCCESS;
7551 }
7552 
7553 #ifdef WLAN_FEATURE_11BE_MLO
7554 /**
7555  * is_dp_peer_can_reuse() - check if the dp_peer match condition to be reused
7556  * @vdev: Target DP vdev handle
7557  * @peer: DP peer handle to be checked
7558  * @peer_mac_addr: Target peer mac address
7559  * @peer_type: Target peer type
7560  *
7561  * Return: true - if match, false - not match
7562  */
7563 static inline
7564 bool is_dp_peer_can_reuse(struct dp_vdev *vdev,
7565 			  struct dp_peer *peer,
7566 			  uint8_t *peer_mac_addr,
7567 			  enum cdp_peer_type peer_type)
7568 {
7569 	if (peer->bss_peer && (peer->vdev == vdev) &&
7570 	    (peer->peer_type == peer_type) &&
7571 	    (qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw,
7572 			 QDF_MAC_ADDR_SIZE) == 0))
7573 		return true;
7574 
7575 	return false;
7576 }
7577 #else
7578 static inline
7579 bool is_dp_peer_can_reuse(struct dp_vdev *vdev,
7580 			  struct dp_peer *peer,
7581 			  uint8_t *peer_mac_addr,
7582 			  enum cdp_peer_type peer_type)
7583 {
7584 	if (peer->bss_peer && (peer->vdev == vdev) &&
7585 	    (qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw,
7586 			 QDF_MAC_ADDR_SIZE) == 0))
7587 		return true;
7588 
7589 	return false;
7590 }
7591 #endif
7592 
7593 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
7594 						uint8_t *peer_mac_addr,
7595 						enum cdp_peer_type peer_type)
7596 {
7597 	struct dp_peer *peer;
7598 	struct dp_soc *soc = vdev->pdev->soc;
7599 
7600 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
7601 	TAILQ_FOREACH(peer, &soc->inactive_peer_list,
7602 		      inactive_list_elem) {
7603 
7604 		/* reuse bss peer only when vdev matches*/
7605 		if (is_dp_peer_can_reuse(vdev, peer,
7606 					 peer_mac_addr, peer_type)) {
7607 			/* increment ref count for cdp_peer_create*/
7608 			if (dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG) ==
7609 						QDF_STATUS_SUCCESS) {
7610 				TAILQ_REMOVE(&soc->inactive_peer_list, peer,
7611 					     inactive_list_elem);
7612 				qdf_spin_unlock_bh
7613 					(&soc->inactive_peer_list_lock);
7614 				return peer;
7615 			}
7616 		}
7617 	}
7618 
7619 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
7620 	return NULL;
7621 }
7622 
7623 #ifdef FEATURE_AST
7624 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
7625 					       struct dp_pdev *pdev,
7626 					       uint8_t *peer_mac_addr)
7627 {
7628 	struct dp_ast_entry *ast_entry;
7629 
7630 	if (soc->ast_offload_support)
7631 		return;
7632 
7633 	qdf_spin_lock_bh(&soc->ast_lock);
7634 	if (soc->ast_override_support)
7635 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
7636 							    pdev->pdev_id);
7637 	else
7638 		ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
7639 
7640 	if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress)
7641 		dp_peer_del_ast(soc, ast_entry);
7642 
7643 	qdf_spin_unlock_bh(&soc->ast_lock);
7644 }
7645 #else
7646 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
7647 					       struct dp_pdev *pdev,
7648 					       uint8_t *peer_mac_addr)
7649 {
7650 }
7651 #endif
7652 
7653 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
7654 /*
7655  * dp_peer_hw_txrx_stats_init() - Initialize hw_txrx_stats_en in dp_peer
7656  * @soc: Datapath soc handle
7657  * @peer: Datapath peer handle
7658  *
7659  * Return: none
7660  */
7661 static inline
7662 void dp_peer_hw_txrx_stats_init(struct dp_soc *soc,
7663 				struct dp_txrx_peer *txrx_peer)
7664 {
7665 	txrx_peer->hw_txrx_stats_en =
7666 		wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx);
7667 }
7668 #else
7669 static inline
7670 void dp_peer_hw_txrx_stats_init(struct dp_soc *soc,
7671 				struct dp_txrx_peer *txrx_peer)
7672 {
7673 	txrx_peer->hw_txrx_stats_en = 0;
7674 }
7675 #endif
7676 
7677 static QDF_STATUS dp_txrx_peer_detach(struct dp_soc *soc, struct dp_peer *peer)
7678 {
7679 	struct dp_txrx_peer *txrx_peer;
7680 	struct dp_pdev *pdev;
7681 
7682 	/* dp_txrx_peer exists for mld peer and legacy peer */
7683 	if (peer->txrx_peer) {
7684 		txrx_peer = peer->txrx_peer;
7685 		peer->txrx_peer = NULL;
7686 		pdev = txrx_peer->vdev->pdev;
7687 
7688 		dp_peer_defrag_rx_tids_deinit(txrx_peer);
7689 		/*
7690 		 * Deallocate the extended stats contenxt
7691 		 */
7692 		dp_peer_delay_stats_ctx_dealloc(soc, txrx_peer);
7693 		dp_peer_rx_bufq_resources_deinit(txrx_peer);
7694 		dp_peer_jitter_stats_ctx_dealloc(pdev, txrx_peer);
7695 		dp_peer_sawf_stats_ctx_free(soc, txrx_peer);
7696 
7697 		qdf_mem_free(txrx_peer);
7698 	}
7699 
7700 	return QDF_STATUS_SUCCESS;
7701 }
7702 
7703 static QDF_STATUS dp_txrx_peer_attach(struct dp_soc *soc, struct dp_peer *peer)
7704 {
7705 	struct dp_txrx_peer *txrx_peer;
7706 	struct dp_pdev *pdev;
7707 
7708 	txrx_peer = (struct dp_txrx_peer *)qdf_mem_malloc(sizeof(*txrx_peer));
7709 
7710 	if (!txrx_peer)
7711 		return QDF_STATUS_E_NOMEM; /* failure */
7712 
7713 	txrx_peer->peer_id = HTT_INVALID_PEER;
7714 	/* initialize the peer_id */
7715 	txrx_peer->vdev = peer->vdev;
7716 	pdev = peer->vdev->pdev;
7717 
7718 	DP_STATS_INIT(txrx_peer);
7719 
7720 	dp_wds_ext_peer_init(txrx_peer);
7721 	dp_peer_rx_bufq_resources_init(txrx_peer);
7722 	dp_peer_hw_txrx_stats_init(soc, txrx_peer);
7723 	/*
7724 	 * Allocate peer extended stats context. Fall through in
7725 	 * case of failure as its not an implicit requirement to have
7726 	 * this object for regular statistics updates.
7727 	 */
7728 	if (dp_peer_delay_stats_ctx_alloc(soc, txrx_peer) !=
7729 					  QDF_STATUS_SUCCESS)
7730 		dp_warn("peer delay_stats ctx alloc failed");
7731 
7732 	/*
7733 	 * Alloctate memory for jitter stats. Fall through in
7734 	 * case of failure as its not an implicit requirement to have
7735 	 * this object for regular statistics updates.
7736 	 */
7737 	if (dp_peer_jitter_stats_ctx_alloc(pdev, txrx_peer) !=
7738 					   QDF_STATUS_SUCCESS)
7739 		dp_warn("peer jitter_stats ctx alloc failed");
7740 
7741 	dp_set_peer_isolation(txrx_peer, false);
7742 
7743 	dp_peer_defrag_rx_tids_init(txrx_peer);
7744 
7745 	if (dp_peer_sawf_stats_ctx_alloc(soc, txrx_peer) != QDF_STATUS_SUCCESS)
7746 		dp_warn("peer sawf stats alloc failed");
7747 
7748 	dp_txrx_peer_attach_add(soc, peer, txrx_peer);
7749 
7750 	return QDF_STATUS_SUCCESS;
7751 }
7752 
7753 static inline
7754 void dp_txrx_peer_stats_clr(struct dp_txrx_peer *txrx_peer)
7755 {
7756 	if (!txrx_peer)
7757 		return;
7758 
7759 	txrx_peer->tx_failed = 0;
7760 	txrx_peer->comp_pkt.num = 0;
7761 	txrx_peer->comp_pkt.bytes = 0;
7762 	txrx_peer->to_stack.num = 0;
7763 	txrx_peer->to_stack.bytes = 0;
7764 
7765 	DP_STATS_CLR(txrx_peer);
7766 	dp_peer_delay_stats_ctx_clr(txrx_peer);
7767 	dp_peer_jitter_stats_ctx_clr(txrx_peer);
7768 }
7769 
7770 /*
7771  * dp_peer_create_wifi3() - attach txrx peer
7772  * @soc_hdl: Datapath soc handle
7773  * @vdev_id: id of vdev
7774  * @peer_mac_addr: Peer MAC address
7775  * @peer_type: link or MLD peer type
7776  *
7777  * Return: 0 on success, -1 on failure
7778  */
7779 static QDF_STATUS
7780 dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
7781 		     uint8_t *peer_mac_addr, enum cdp_peer_type peer_type)
7782 {
7783 	struct dp_peer *peer;
7784 	int i;
7785 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7786 	struct dp_pdev *pdev;
7787 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
7788 	struct dp_vdev *vdev = NULL;
7789 
7790 	if (!peer_mac_addr)
7791 		return QDF_STATUS_E_FAILURE;
7792 
7793 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
7794 
7795 	if (!vdev)
7796 		return QDF_STATUS_E_FAILURE;
7797 
7798 	pdev = vdev->pdev;
7799 	soc = pdev->soc;
7800 
7801 	/*
7802 	 * If a peer entry with given MAC address already exists,
7803 	 * reuse the peer and reset the state of peer.
7804 	 */
7805 	peer = dp_peer_can_reuse(vdev, peer_mac_addr, peer_type);
7806 
7807 	if (peer) {
7808 		qdf_atomic_init(&peer->is_default_route_set);
7809 		dp_peer_cleanup(vdev, peer);
7810 
7811 		dp_peer_vdev_list_add(soc, vdev, peer);
7812 		dp_peer_find_hash_add(soc, peer);
7813 
7814 		dp_peer_rx_tids_create(peer);
7815 		if (IS_MLO_DP_MLD_PEER(peer))
7816 			dp_mld_peer_init_link_peers_info(peer);
7817 
7818 		qdf_spin_lock_bh(&soc->ast_lock);
7819 		dp_peer_delete_ast_entries(soc, peer);
7820 		qdf_spin_unlock_bh(&soc->ast_lock);
7821 
7822 		if ((vdev->opmode == wlan_op_mode_sta) &&
7823 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
7824 		     QDF_MAC_ADDR_SIZE)) {
7825 			ast_type = CDP_TXRX_AST_TYPE_SELF;
7826 		}
7827 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
7828 
7829 		peer->valid = 1;
7830 		peer->is_tdls_peer = false;
7831 		dp_local_peer_id_alloc(pdev, peer);
7832 
7833 		qdf_spinlock_create(&peer->peer_info_lock);
7834 
7835 		DP_STATS_INIT(peer);
7836 
7837 		/*
7838 		 * In tx_monitor mode, filter may be set for unassociated peer
7839 		 * when unassociated peer get associated peer need to
7840 		 * update tx_cap_enabled flag to support peer filter.
7841 		 */
7842 		if (!IS_MLO_DP_MLD_PEER(peer)) {
7843 			dp_monitor_peer_tx_capture_filter_check(pdev, peer);
7844 			dp_monitor_peer_reset_stats(soc, peer);
7845 		}
7846 
7847 		if (peer->txrx_peer) {
7848 			dp_peer_rx_bufq_resources_init(peer->txrx_peer);
7849 			dp_txrx_peer_stats_clr(peer->txrx_peer);
7850 			dp_set_peer_isolation(peer->txrx_peer, false);
7851 			dp_wds_ext_peer_init(peer->txrx_peer);
7852 			dp_peer_hw_txrx_stats_init(soc, peer->txrx_peer);
7853 		}
7854 
7855 		dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
7856 
7857 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7858 		return QDF_STATUS_SUCCESS;
7859 	} else {
7860 		/*
7861 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
7862 		 * need to remove the AST entry which was earlier added as a WDS
7863 		 * entry.
7864 		 * If an AST entry exists, but no peer entry exists with a given
7865 		 * MAC addresses, we could deduce it as a WDS entry
7866 		 */
7867 		dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
7868 	}
7869 
7870 #ifdef notyet
7871 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
7872 		soc->mempool_ol_ath_peer);
7873 #else
7874 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
7875 #endif
7876 	wlan_minidump_log(peer,
7877 			  sizeof(*peer),
7878 			  soc->ctrl_psoc,
7879 			  WLAN_MD_DP_PEER, "dp_peer");
7880 	if (!peer) {
7881 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7882 		return QDF_STATUS_E_FAILURE; /* failure */
7883 	}
7884 
7885 	qdf_mem_zero(peer, sizeof(struct dp_peer));
7886 
7887 	/* store provided params */
7888 	peer->vdev = vdev;
7889 
7890 	/* initialize the peer_id */
7891 	peer->peer_id = HTT_INVALID_PEER;
7892 
7893 	qdf_mem_copy(
7894 		&peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
7895 
7896 	DP_PEER_SET_TYPE(peer, peer_type);
7897 	if (IS_MLO_DP_MLD_PEER(peer)) {
7898 		if (dp_txrx_peer_attach(soc, peer) !=
7899 				QDF_STATUS_SUCCESS)
7900 			goto fail; /* failure */
7901 
7902 		dp_mld_peer_init_link_peers_info(peer);
7903 	} else if (dp_monitor_peer_attach(soc, peer) !=
7904 				QDF_STATUS_SUCCESS)
7905 		dp_warn("peer monitor ctx alloc failed");
7906 
7907 	TAILQ_INIT(&peer->ast_entry_list);
7908 
7909 	/* get the vdev reference for new peer */
7910 	dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CHILD);
7911 
7912 	if ((vdev->opmode == wlan_op_mode_sta) &&
7913 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
7914 			 QDF_MAC_ADDR_SIZE)) {
7915 		ast_type = CDP_TXRX_AST_TYPE_SELF;
7916 	}
7917 	qdf_spinlock_create(&peer->peer_state_lock);
7918 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
7919 	qdf_spinlock_create(&peer->peer_info_lock);
7920 
7921 	/* reset the ast index to flowid table */
7922 	dp_peer_reset_flowq_map(peer);
7923 
7924 	qdf_atomic_init(&peer->ref_cnt);
7925 
7926 	for (i = 0; i < DP_MOD_ID_MAX; i++)
7927 		qdf_atomic_init(&peer->mod_refs[i]);
7928 
7929 	/* keep one reference for attach */
7930 	qdf_atomic_inc(&peer->ref_cnt);
7931 	qdf_atomic_inc(&peer->mod_refs[DP_MOD_ID_CONFIG]);
7932 
7933 	dp_peer_vdev_list_add(soc, vdev, peer);
7934 
7935 	/* TODO: See if hash based search is required */
7936 	dp_peer_find_hash_add(soc, peer);
7937 
7938 	/* Initialize the peer state */
7939 	peer->state = OL_TXRX_PEER_STATE_DISC;
7940 
7941 	dp_info("vdev %pK created peer %pK ("QDF_MAC_ADDR_FMT") ref_cnt: %d",
7942 		vdev, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
7943 		qdf_atomic_read(&peer->ref_cnt));
7944 	/*
7945 	 * For every peer MAp message search and set if bss_peer
7946 	 */
7947 	if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
7948 			QDF_MAC_ADDR_SIZE) == 0 &&
7949 			(wlan_op_mode_sta != vdev->opmode)) {
7950 		dp_info("vdev bss_peer!!");
7951 		peer->bss_peer = 1;
7952 		if (peer->txrx_peer)
7953 			peer->txrx_peer->bss_peer = 1;
7954 	}
7955 
7956 	if (wlan_op_mode_sta == vdev->opmode &&
7957 	    qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
7958 			QDF_MAC_ADDR_SIZE) == 0) {
7959 		peer->sta_self_peer = 1;
7960 	}
7961 
7962 	dp_peer_rx_tids_create(peer);
7963 
7964 	peer->valid = 1;
7965 	dp_local_peer_id_alloc(pdev, peer);
7966 	DP_STATS_INIT(peer);
7967 
7968 	if (dp_peer_sawf_ctx_alloc(soc, peer) != QDF_STATUS_SUCCESS)
7969 		dp_warn("peer sawf context alloc failed");
7970 
7971 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
7972 
7973 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7974 
7975 	return QDF_STATUS_SUCCESS;
7976 fail:
7977 	qdf_mem_free(peer);
7978 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7979 
7980 	return QDF_STATUS_E_FAILURE;
7981 }
7982 
7983 static QDF_STATUS dp_peer_legacy_setup(struct dp_soc *soc, struct dp_peer *peer)
7984 {
7985 	/* txrx_peer might exist already in peer reuse case */
7986 	if (peer->txrx_peer)
7987 		return QDF_STATUS_SUCCESS;
7988 
7989 	if (dp_txrx_peer_attach(soc, peer) !=
7990 				QDF_STATUS_SUCCESS) {
7991 		dp_err("peer txrx ctx alloc failed");
7992 		return QDF_STATUS_E_FAILURE;
7993 	}
7994 
7995 	return QDF_STATUS_SUCCESS;
7996 }
7997 
7998 #ifdef WLAN_FEATURE_11BE_MLO
7999 QDF_STATUS dp_peer_mlo_setup(
8000 			struct dp_soc *soc,
8001 			struct dp_peer *peer,
8002 			uint8_t vdev_id,
8003 			struct cdp_peer_setup_info *setup_info)
8004 {
8005 	struct dp_peer *mld_peer = NULL;
8006 
8007 	/* Non-MLO connection, do nothing */
8008 	if (!setup_info || !setup_info->mld_peer_mac)
8009 		return QDF_STATUS_SUCCESS;
8010 
8011 	dp_info("link peer:" QDF_MAC_ADDR_FMT "mld peer:" QDF_MAC_ADDR_FMT
8012 		"assoc_link %d, primary_link %d",
8013 		QDF_MAC_ADDR_REF(peer->mac_addr.raw),
8014 		QDF_MAC_ADDR_REF(setup_info->mld_peer_mac),
8015 		setup_info->is_first_link,
8016 		setup_info->is_primary_link);
8017 
8018 	/* if this is the first link peer */
8019 	if (setup_info->is_first_link)
8020 		/* create MLD peer */
8021 		dp_peer_create_wifi3((struct cdp_soc_t *)soc,
8022 				     vdev_id,
8023 				     setup_info->mld_peer_mac,
8024 				     CDP_MLD_PEER_TYPE);
8025 
8026 	peer->first_link = setup_info->is_first_link;
8027 	peer->primary_link = setup_info->is_primary_link;
8028 	mld_peer = dp_mld_peer_find_hash_find(soc,
8029 					      setup_info->mld_peer_mac,
8030 					      0, vdev_id, DP_MOD_ID_CDP);
8031 	if (mld_peer) {
8032 		if (setup_info->is_first_link) {
8033 			/* assign rx_tid to mld peer */
8034 			mld_peer->rx_tid = peer->rx_tid;
8035 			/* no cdp_peer_setup for MLD peer,
8036 			 * set it for addba processing
8037 			 */
8038 			qdf_atomic_set(&mld_peer->is_default_route_set, 1);
8039 		} else {
8040 			/* free link peer origial rx_tids mem */
8041 			dp_peer_rx_tids_destroy(peer);
8042 			/* assign mld peer rx_tid to link peer */
8043 			peer->rx_tid = mld_peer->rx_tid;
8044 		}
8045 
8046 		if (setup_info->is_primary_link &&
8047 		    !setup_info->is_first_link) {
8048 			/*
8049 			 * if first link is not the primary link,
8050 			 * then need to change mld_peer->vdev as
8051 			 * primary link dp_vdev is not same one
8052 			 * during mld peer creation.
8053 			 */
8054 
8055 			/* relase the ref to original dp_vdev */
8056 			dp_vdev_unref_delete(soc, mld_peer->vdev,
8057 					     DP_MOD_ID_CHILD);
8058 			/*
8059 			 * get the ref to new dp_vdev,
8060 			 * increase dp_vdev ref_cnt
8061 			 */
8062 			mld_peer->vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8063 							       DP_MOD_ID_CHILD);
8064 			mld_peer->txrx_peer->vdev = mld_peer->vdev;
8065 		}
8066 
8067 		/* associate mld and link peer */
8068 		dp_link_peer_add_mld_peer(peer, mld_peer);
8069 		dp_mld_peer_add_link_peer(mld_peer, peer);
8070 
8071 		mld_peer->txrx_peer->mld_peer = 1;
8072 		dp_peer_unref_delete(mld_peer, DP_MOD_ID_CDP);
8073 	} else {
8074 		peer->mld_peer = NULL;
8075 		dp_err("mld peer" QDF_MAC_ADDR_FMT "not found!",
8076 		       QDF_MAC_ADDR_REF(setup_info->mld_peer_mac));
8077 		return QDF_STATUS_E_FAILURE;
8078 	}
8079 
8080 	return QDF_STATUS_SUCCESS;
8081 }
8082 
8083 /*
8084  * dp_mlo_peer_authorize() - authorize MLO peer
8085  * @soc: soc handle
8086  * @peer: pointer to link peer
8087  *
8088  * return void
8089  */
8090 static void dp_mlo_peer_authorize(struct dp_soc *soc,
8091 				  struct dp_peer *peer)
8092 {
8093 	int i;
8094 	struct dp_peer *link_peer = NULL;
8095 	struct dp_peer *mld_peer = peer->mld_peer;
8096 	struct dp_mld_link_peers link_peers_info;
8097 
8098 	if (!mld_peer)
8099 		return;
8100 
8101 	/* get link peers with reference */
8102 	dp_get_link_peers_ref_from_mld_peer(soc, mld_peer,
8103 					    &link_peers_info,
8104 					    DP_MOD_ID_CDP);
8105 
8106 	for (i = 0; i < link_peers_info.num_links; i++) {
8107 		link_peer = link_peers_info.link_peers[i];
8108 
8109 		if (!link_peer->authorize) {
8110 			dp_release_link_peers_ref(&link_peers_info,
8111 						  DP_MOD_ID_CDP);
8112 			mld_peer->authorize = false;
8113 			return;
8114 		}
8115 	}
8116 
8117 	/* if we are here all link peers are authorized,
8118 	 * authorize ml_peer also
8119 	 */
8120 	mld_peer->authorize = true;
8121 
8122 	/* release link peers reference */
8123 	dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
8124 }
8125 #endif
8126 
8127 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
8128 				  enum cdp_host_reo_dest_ring *reo_dest,
8129 				  bool *hash_based)
8130 {
8131 	struct dp_soc *soc;
8132 	struct dp_pdev *pdev;
8133 
8134 	pdev = vdev->pdev;
8135 	soc = pdev->soc;
8136 	/*
8137 	 * hash based steering is disabled for Radios which are offloaded
8138 	 * to NSS
8139 	 */
8140 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
8141 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
8142 
8143 	/*
8144 	 * Below line of code will ensure the proper reo_dest ring is chosen
8145 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
8146 	 */
8147 	*reo_dest = pdev->reo_dest;
8148 }
8149 
8150 #ifdef IPA_OFFLOAD
8151 /**
8152  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
8153  * @vdev: Virtual device
8154  *
8155  * Return: true if the vdev is of subtype P2P
8156  *	   false if the vdev is of any other subtype
8157  */
8158 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
8159 {
8160 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
8161 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
8162 	    vdev->subtype == wlan_op_subtype_p2p_go)
8163 		return true;
8164 
8165 	return false;
8166 }
8167 
8168 /*
8169  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
8170  * @vdev: Datapath VDEV handle
8171  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
8172  * @hash_based: pointer to hash value (enabled/disabled) to be populated
8173  *
8174  * If IPA is enabled in ini, for SAP mode, disable hash based
8175  * steering, use default reo_dst ring for RX. Use config values for other modes.
8176  * Return: None
8177  */
8178 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
8179 				       struct cdp_peer_setup_info *setup_info,
8180 				       enum cdp_host_reo_dest_ring *reo_dest,
8181 				       bool *hash_based,
8182 				       uint8_t *lmac_peer_id_msb)
8183 {
8184 	struct dp_soc *soc;
8185 	struct dp_pdev *pdev;
8186 
8187 	pdev = vdev->pdev;
8188 	soc = pdev->soc;
8189 
8190 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
8191 
8192 	/* For P2P-GO interfaces we do not need to change the REO
8193 	 * configuration even if IPA config is enabled
8194 	 */
8195 	if (dp_is_vdev_subtype_p2p(vdev))
8196 		return;
8197 
8198 	/*
8199 	 * If IPA is enabled, disable hash-based flow steering and set
8200 	 * reo_dest_ring_4 as the REO ring to receive packets on.
8201 	 * IPA is configured to reap reo_dest_ring_4.
8202 	 *
8203 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
8204 	 * value enum value is from 1 - 4.
8205 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
8206 	 */
8207 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
8208 		if (vdev->opmode == wlan_op_mode_ap) {
8209 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
8210 			*hash_based = 0;
8211 		} else if (vdev->opmode == wlan_op_mode_sta &&
8212 			   dp_ipa_is_mdm_platform()) {
8213 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
8214 		}
8215 	}
8216 }
8217 
8218 #else
8219 
8220 /*
8221  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
8222  * @vdev: Datapath VDEV handle
8223  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
8224  * @hash_based: pointer to hash value (enabled/disabled) to be populated
8225  *
8226  * Use system config values for hash based steering.
8227  * Return: None
8228  */
8229 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
8230 				       struct cdp_peer_setup_info *setup_info,
8231 				       enum cdp_host_reo_dest_ring *reo_dest,
8232 				       bool *hash_based,
8233 				       uint8_t *lmac_peer_id_msb)
8234 {
8235 	struct dp_soc *soc = vdev->pdev->soc;
8236 
8237 	soc->arch_ops.peer_get_reo_hash(vdev, setup_info, reo_dest, hash_based,
8238 					lmac_peer_id_msb);
8239 }
8240 #endif /* IPA_OFFLOAD */
8241 
8242 /*
8243  * dp_peer_setup_wifi3() - initialize the peer
8244  * @soc_hdl: soc handle object
8245  * @vdev_id : vdev_id of vdev object
8246  * @peer_mac: Peer's mac address
8247  * @peer_setup_info: peer setup info for MLO
8248  *
8249  * Return: QDF_STATUS
8250  */
8251 static QDF_STATUS
8252 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8253 		    uint8_t *peer_mac,
8254 		    struct cdp_peer_setup_info *setup_info)
8255 {
8256 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8257 	struct dp_pdev *pdev;
8258 	bool hash_based = 0;
8259 	enum cdp_host_reo_dest_ring reo_dest;
8260 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8261 	struct dp_vdev *vdev = NULL;
8262 	struct dp_peer *peer =
8263 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
8264 					       DP_MOD_ID_CDP);
8265 	struct dp_peer *mld_peer = NULL;
8266 	enum wlan_op_mode vdev_opmode;
8267 	uint8_t lmac_peer_id_msb = 0;
8268 
8269 	if (!peer)
8270 		return QDF_STATUS_E_FAILURE;
8271 
8272 	vdev = peer->vdev;
8273 	if (!vdev) {
8274 		status = QDF_STATUS_E_FAILURE;
8275 		goto fail;
8276 	}
8277 
8278 	/* save vdev related member in case vdev freed */
8279 	vdev_opmode = vdev->opmode;
8280 	pdev = vdev->pdev;
8281 	dp_peer_setup_get_reo_hash(vdev, setup_info,
8282 				   &reo_dest, &hash_based,
8283 				   &lmac_peer_id_msb);
8284 
8285 	dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
8286 		pdev->pdev_id, vdev->vdev_id,
8287 		vdev->opmode, hash_based, reo_dest);
8288 
8289 	/*
8290 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
8291 	 * i.e both the devices have same MAC address. In these
8292 	 * cases we want such pkts to be processed in NULL Q handler
8293 	 * which is REO2TCL ring. for this reason we should
8294 	 * not setup reo_queues and default route for bss_peer.
8295 	 */
8296 	if (!IS_MLO_DP_MLD_PEER(peer))
8297 		dp_monitor_peer_tx_init(pdev, peer);
8298 
8299 	if (!setup_info)
8300 		if (dp_peer_legacy_setup(soc, peer) !=
8301 				QDF_STATUS_SUCCESS) {
8302 			status = QDF_STATUS_E_RESOURCES;
8303 			goto fail;
8304 		}
8305 
8306 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
8307 		status = QDF_STATUS_E_FAILURE;
8308 		goto fail;
8309 	}
8310 
8311 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
8312 		/* TODO: Check the destination ring number to be passed to FW */
8313 		soc->cdp_soc.ol_ops->peer_set_default_routing(
8314 				soc->ctrl_psoc,
8315 				peer->vdev->pdev->pdev_id,
8316 				peer->mac_addr.raw,
8317 				peer->vdev->vdev_id, hash_based, reo_dest,
8318 				lmac_peer_id_msb);
8319 	}
8320 
8321 	qdf_atomic_set(&peer->is_default_route_set, 1);
8322 
8323 	status = dp_peer_mlo_setup(soc, peer, vdev->vdev_id, setup_info);
8324 	if (QDF_IS_STATUS_ERROR(status)) {
8325 		dp_peer_err("peer mlo setup failed");
8326 		qdf_assert_always(0);
8327 	}
8328 
8329 	if (vdev_opmode != wlan_op_mode_monitor) {
8330 		/* In case of MLD peer, switch peer to mld peer and
8331 		 * do peer_rx_init.
8332 		 */
8333 		if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
8334 		    IS_MLO_DP_LINK_PEER(peer)) {
8335 			if (setup_info && setup_info->is_first_link) {
8336 				mld_peer = DP_GET_MLD_PEER_FROM_PEER(peer);
8337 				if (mld_peer)
8338 					dp_peer_rx_init(pdev, mld_peer);
8339 				else
8340 					dp_peer_err("MLD peer null. Primary link peer:%pK", peer);
8341 			}
8342 		} else {
8343 			dp_peer_rx_init(pdev, peer);
8344 		}
8345 	}
8346 
8347 	if (!IS_MLO_DP_MLD_PEER(peer))
8348 		dp_peer_ppdu_delayed_ba_init(peer);
8349 
8350 fail:
8351 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8352 	return status;
8353 }
8354 
8355 /*
8356  * dp_cp_peer_del_resp_handler - Handle the peer delete response
8357  * @soc_hdl: Datapath SOC handle
8358  * @vdev_id: id of virtual device object
8359  * @mac_addr: Mac address of the peer
8360  *
8361  * Return: QDF_STATUS
8362  */
8363 static QDF_STATUS dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl,
8364 					      uint8_t vdev_id,
8365 					      uint8_t *mac_addr)
8366 {
8367 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8368 	struct dp_ast_entry  *ast_entry = NULL;
8369 	txrx_ast_free_cb cb = NULL;
8370 	void *cookie;
8371 
8372 	if (soc->ast_offload_support)
8373 		return QDF_STATUS_E_INVAL;
8374 
8375 	qdf_spin_lock_bh(&soc->ast_lock);
8376 
8377 	ast_entry =
8378 		dp_peer_ast_hash_find_by_vdevid(soc, mac_addr,
8379 						vdev_id);
8380 
8381 	/* in case of qwrap we have multiple BSS peers
8382 	 * with same mac address
8383 	 *
8384 	 * AST entry for this mac address will be created
8385 	 * only for one peer hence it will be NULL here
8386 	 */
8387 	if ((!ast_entry || !ast_entry->delete_in_progress) ||
8388 	    (ast_entry->peer_id != HTT_INVALID_PEER)) {
8389 		qdf_spin_unlock_bh(&soc->ast_lock);
8390 		return QDF_STATUS_E_FAILURE;
8391 	}
8392 
8393 	if (ast_entry->is_mapped)
8394 		soc->ast_table[ast_entry->ast_idx] = NULL;
8395 
8396 	DP_STATS_INC(soc, ast.deleted, 1);
8397 	dp_peer_ast_hash_remove(soc, ast_entry);
8398 
8399 	cb = ast_entry->callback;
8400 	cookie = ast_entry->cookie;
8401 	ast_entry->callback = NULL;
8402 	ast_entry->cookie = NULL;
8403 
8404 	soc->num_ast_entries--;
8405 	qdf_spin_unlock_bh(&soc->ast_lock);
8406 
8407 	if (cb) {
8408 		cb(soc->ctrl_psoc,
8409 		   dp_soc_to_cdp_soc(soc),
8410 		   cookie,
8411 		   CDP_TXRX_AST_DELETED);
8412 	}
8413 	qdf_mem_free(ast_entry);
8414 
8415 	return QDF_STATUS_SUCCESS;
8416 }
8417 
8418 /*
8419  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
8420  * @txrx_soc: cdp soc handle
8421  * @ac: Access category
8422  * @value: timeout value in millisec
8423  *
8424  * Return: void
8425  */
8426 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
8427 				    uint8_t ac, uint32_t value)
8428 {
8429 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
8430 
8431 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
8432 }
8433 
8434 /*
8435  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
8436  * @txrx_soc: cdp soc handle
8437  * @ac: access category
8438  * @value: timeout value in millisec
8439  *
8440  * Return: void
8441  */
8442 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
8443 				    uint8_t ac, uint32_t *value)
8444 {
8445 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
8446 
8447 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
8448 }
8449 
8450 /*
8451  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
8452  * @txrx_soc: cdp soc handle
8453  * @pdev_id: id of physical device object
8454  * @val: reo destination ring index (1 - 4)
8455  *
8456  * Return: QDF_STATUS
8457  */
8458 static QDF_STATUS
8459 dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
8460 		     enum cdp_host_reo_dest_ring val)
8461 {
8462 	struct dp_pdev *pdev =
8463 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
8464 						   pdev_id);
8465 
8466 	if (pdev) {
8467 		pdev->reo_dest = val;
8468 		return QDF_STATUS_SUCCESS;
8469 	}
8470 
8471 	return QDF_STATUS_E_FAILURE;
8472 }
8473 
8474 /*
8475  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
8476  * @txrx_soc: cdp soc handle
8477  * @pdev_id: id of physical device object
8478  *
8479  * Return: reo destination ring index
8480  */
8481 static enum cdp_host_reo_dest_ring
8482 dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id)
8483 {
8484 	struct dp_pdev *pdev =
8485 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
8486 						   pdev_id);
8487 
8488 	if (pdev)
8489 		return pdev->reo_dest;
8490 	else
8491 		return cdp_host_reo_dest_ring_unknown;
8492 }
8493 
8494 #ifdef WLAN_SUPPORT_MSCS
8495 /*
8496  * dp_record_mscs_params - MSCS parameters sent by the STA in
8497  * the MSCS Request to the AP. The AP makes a note of these
8498  * parameters while comparing the MSDUs sent by the STA, to
8499  * send the downlink traffic with correct User priority.
8500  * @soc - Datapath soc handle
8501  * @peer_mac - STA Mac address
8502  * @vdev_id - ID of the vdev handle
8503  * @mscs_params - Structure having MSCS parameters obtained
8504  * from handshake
8505  * @active - Flag to set MSCS active/inactive
8506  * return type - QDF_STATUS - Success/Invalid
8507  */
8508 static QDF_STATUS
8509 dp_record_mscs_params(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
8510 		      uint8_t vdev_id, struct cdp_mscs_params *mscs_params,
8511 		      bool active)
8512 {
8513 	struct dp_peer *peer;
8514 	QDF_STATUS status = QDF_STATUS_E_INVAL;
8515 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8516 
8517 	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
8518 				      DP_MOD_ID_CDP);
8519 
8520 	if (!peer) {
8521 		dp_err("Peer is NULL!");
8522 		goto fail;
8523 	}
8524 	if (!active) {
8525 		dp_info("MSCS Procedure is terminated");
8526 		peer->mscs_active = active;
8527 		goto fail;
8528 	}
8529 
8530 	if (mscs_params->classifier_type == IEEE80211_TCLAS_MASK_CLA_TYPE_4) {
8531 		/* Populate entries inside IPV4 database first */
8532 		peer->mscs_ipv4_parameter.user_priority_bitmap =
8533 			mscs_params->user_pri_bitmap;
8534 		peer->mscs_ipv4_parameter.user_priority_limit =
8535 			mscs_params->user_pri_limit;
8536 		peer->mscs_ipv4_parameter.classifier_mask =
8537 			mscs_params->classifier_mask;
8538 
8539 		/* Populate entries inside IPV6 database */
8540 		peer->mscs_ipv6_parameter.user_priority_bitmap =
8541 			mscs_params->user_pri_bitmap;
8542 		peer->mscs_ipv6_parameter.user_priority_limit =
8543 			mscs_params->user_pri_limit;
8544 		peer->mscs_ipv6_parameter.classifier_mask =
8545 			mscs_params->classifier_mask;
8546 		peer->mscs_active = 1;
8547 		dp_info("\n\tMSCS Procedure request based parameters for "QDF_MAC_ADDR_FMT"\n"
8548 			"\tClassifier_type = %d\tUser priority bitmap = %x\n"
8549 			"\tUser priority limit = %x\tClassifier mask = %x",
8550 			QDF_MAC_ADDR_REF(peer_mac),
8551 			mscs_params->classifier_type,
8552 			peer->mscs_ipv4_parameter.user_priority_bitmap,
8553 			peer->mscs_ipv4_parameter.user_priority_limit,
8554 			peer->mscs_ipv4_parameter.classifier_mask);
8555 	}
8556 
8557 	status = QDF_STATUS_SUCCESS;
8558 fail:
8559 	if (peer)
8560 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8561 	return status;
8562 }
8563 #endif
8564 
8565 /*
8566  * dp_get_sec_type() - Get the security type
8567  * @soc: soc handle
8568  * @vdev_id: id of dp handle
8569  * @peer_mac: mac of datapath PEER handle
8570  * @sec_idx:    Security id (mcast, ucast)
8571  *
8572  * return sec_type: Security type
8573  */
8574 static int dp_get_sec_type(struct cdp_soc_t *soc, uint8_t vdev_id,
8575 			   uint8_t *peer_mac, uint8_t sec_idx)
8576 {
8577 	int sec_type = 0;
8578 	struct dp_peer *peer =
8579 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
8580 						       peer_mac, 0, vdev_id,
8581 						       DP_MOD_ID_CDP);
8582 
8583 	if (!peer) {
8584 		dp_cdp_err("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
8585 		return sec_type;
8586 	}
8587 
8588 	if (!peer->txrx_peer) {
8589 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8590 		dp_peer_debug("%pK: txrx peer is NULL!\n", soc);
8591 		return sec_type;
8592 	}
8593 	sec_type = peer->txrx_peer->security[sec_idx].sec_type;
8594 
8595 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8596 	return sec_type;
8597 }
8598 
8599 /*
8600  * dp_peer_authorize() - authorize txrx peer
8601  * @soc: soc handle
8602  * @vdev_id: id of dp handle
8603  * @peer_mac: mac of datapath PEER handle
8604  * @authorize
8605  *
8606  */
8607 static QDF_STATUS
8608 dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8609 		  uint8_t *peer_mac, uint32_t authorize)
8610 {
8611 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8612 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8613 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(soc, peer_mac,
8614 							      0, vdev_id,
8615 							      DP_MOD_ID_CDP);
8616 
8617 	if (!peer) {
8618 		dp_cdp_debug("%pK: Peer is NULL!\n", soc);
8619 		status = QDF_STATUS_E_FAILURE;
8620 	} else {
8621 		peer->authorize = authorize ? 1 : 0;
8622 		if (peer->txrx_peer)
8623 			peer->txrx_peer->authorize = peer->authorize;
8624 
8625 		if (!peer->authorize)
8626 			dp_peer_flush_frags(soc_hdl, vdev_id, peer_mac);
8627 
8628 		dp_mlo_peer_authorize(soc, peer);
8629 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8630 	}
8631 
8632 	return status;
8633 }
8634 
8635 /*
8636  * dp_peer_get_authorize() - get peer authorize status
8637  * @soc: soc handle
8638  * @vdev_id: id of dp handle
8639  * @peer_mac: mac of datapath PEER handle
8640  *
8641  * Retusn: true is peer is authorized, false otherwise
8642  */
8643 static bool
8644 dp_peer_get_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8645 		      uint8_t *peer_mac)
8646 {
8647 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8648 	bool authorize = false;
8649 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
8650 						      0, vdev_id,
8651 						      DP_MOD_ID_CDP);
8652 
8653 	if (!peer) {
8654 		dp_cdp_debug("%pK: Peer is NULL!\n", soc);
8655 		return authorize;
8656 	}
8657 
8658 	authorize = peer->authorize;
8659 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8660 
8661 	return authorize;
8662 }
8663 
8664 /**
8665  * dp_vdev_unref_delete() - check and process vdev delete
8666  * @soc : DP specific soc pointer
8667  * @vdev: DP specific vdev pointer
8668  * @mod_id: module id
8669  *
8670  */
8671 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev,
8672 			  enum dp_mod_id mod_id)
8673 {
8674 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
8675 	void *vdev_delete_context = NULL;
8676 	uint8_t vdev_id = vdev->vdev_id;
8677 	struct dp_pdev *pdev = vdev->pdev;
8678 	struct dp_vdev *tmp_vdev = NULL;
8679 	uint8_t found = 0;
8680 
8681 	QDF_ASSERT(qdf_atomic_dec_return(&vdev->mod_refs[mod_id]) >= 0);
8682 
8683 	/* Return if this is not the last reference*/
8684 	if (!qdf_atomic_dec_and_test(&vdev->ref_cnt))
8685 		return;
8686 
8687 	/*
8688 	 * This should be set as last reference need to released
8689 	 * after cdp_vdev_detach() is called
8690 	 *
8691 	 * if this assert is hit there is a ref count issue
8692 	 */
8693 	QDF_ASSERT(vdev->delete.pending);
8694 
8695 	vdev_delete_cb = vdev->delete.callback;
8696 	vdev_delete_context = vdev->delete.context;
8697 
8698 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")- its last peer is done",
8699 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
8700 
8701 	if (wlan_op_mode_monitor == vdev->opmode) {
8702 		dp_monitor_vdev_delete(soc, vdev);
8703 		goto free_vdev;
8704 	}
8705 
8706 	/* all peers are gone, go ahead and delete it */
8707 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
8708 			FLOW_TYPE_VDEV, vdev_id);
8709 	dp_tx_vdev_detach(vdev);
8710 	dp_monitor_vdev_detach(vdev);
8711 
8712 free_vdev:
8713 	qdf_spinlock_destroy(&vdev->peer_list_lock);
8714 
8715 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
8716 	TAILQ_FOREACH(tmp_vdev, &soc->inactive_vdev_list,
8717 		      inactive_list_elem) {
8718 		if (tmp_vdev == vdev) {
8719 			found = 1;
8720 			break;
8721 		}
8722 	}
8723 	if (found)
8724 		TAILQ_REMOVE(&soc->inactive_vdev_list, vdev,
8725 			     inactive_list_elem);
8726 	/* delete this peer from the list */
8727 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
8728 
8729 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")",
8730 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
8731 	wlan_minidump_remove(vdev, sizeof(*vdev), soc->ctrl_psoc,
8732 			     WLAN_MD_DP_VDEV, "dp_vdev");
8733 	qdf_mem_free(vdev);
8734 	vdev = NULL;
8735 
8736 	if (vdev_delete_cb)
8737 		vdev_delete_cb(vdev_delete_context);
8738 }
8739 
8740 qdf_export_symbol(dp_vdev_unref_delete);
8741 
8742 /*
8743  * dp_peer_unref_delete() - unref and delete peer
8744  * @peer_handle:    Datapath peer handle
8745  * @mod_id:         ID of module releasing reference
8746  *
8747  */
8748 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id mod_id)
8749 {
8750 	struct dp_vdev *vdev = peer->vdev;
8751 	struct dp_pdev *pdev = vdev->pdev;
8752 	struct dp_soc *soc = pdev->soc;
8753 	uint16_t peer_id;
8754 	struct dp_peer *tmp_peer;
8755 	bool found = false;
8756 
8757 	if (mod_id > DP_MOD_ID_RX)
8758 		QDF_ASSERT(qdf_atomic_dec_return(&peer->mod_refs[mod_id]) >= 0);
8759 
8760 	/*
8761 	 * Hold the lock all the way from checking if the peer ref count
8762 	 * is zero until the peer references are removed from the hash
8763 	 * table and vdev list (if the peer ref count is zero).
8764 	 * This protects against a new HL tx operation starting to use the
8765 	 * peer object just after this function concludes it's done being used.
8766 	 * Furthermore, the lock needs to be held while checking whether the
8767 	 * vdev's list of peers is empty, to make sure that list is not modified
8768 	 * concurrently with the empty check.
8769 	 */
8770 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
8771 		peer_id = peer->peer_id;
8772 
8773 		/*
8774 		 * Make sure that the reference to the peer in
8775 		 * peer object map is removed
8776 		 */
8777 		QDF_ASSERT(peer_id == HTT_INVALID_PEER);
8778 
8779 		dp_peer_debug("Deleting peer %pK ("QDF_MAC_ADDR_FMT")", peer,
8780 			      QDF_MAC_ADDR_REF(peer->mac_addr.raw));
8781 
8782 		dp_peer_sawf_ctx_free(soc, peer);
8783 
8784 		wlan_minidump_remove(peer, sizeof(*peer), soc->ctrl_psoc,
8785 				     WLAN_MD_DP_PEER, "dp_peer");
8786 
8787 		qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
8788 		TAILQ_FOREACH(tmp_peer, &soc->inactive_peer_list,
8789 			      inactive_list_elem) {
8790 			if (tmp_peer == peer) {
8791 				found = 1;
8792 				break;
8793 			}
8794 		}
8795 		if (found)
8796 			TAILQ_REMOVE(&soc->inactive_peer_list, peer,
8797 				     inactive_list_elem);
8798 		/* delete this peer from the list */
8799 		qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
8800 		DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
8801 		dp_peer_update_state(soc, peer, DP_PEER_STATE_FREED);
8802 
8803 		/* cleanup the peer data */
8804 		dp_peer_cleanup(vdev, peer);
8805 
8806 		if (!IS_MLO_DP_MLD_PEER(peer))
8807 			dp_monitor_peer_detach(soc, peer);
8808 
8809 		qdf_spinlock_destroy(&peer->peer_state_lock);
8810 
8811 		dp_txrx_peer_detach(soc, peer);
8812 		qdf_mem_free(peer);
8813 
8814 		/*
8815 		 * Decrement ref count taken at peer create
8816 		 */
8817 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CHILD);
8818 	}
8819 }
8820 
8821 qdf_export_symbol(dp_peer_unref_delete);
8822 
8823 /*
8824  * dp_txrx_peer_unref_delete() - unref and delete peer
8825  * @handle: Datapath txrx ref handle
8826  * @mod_id: Module ID of the caller
8827  *
8828  */
8829 void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle,
8830 			       enum dp_mod_id mod_id)
8831 {
8832 	dp_peer_unref_delete((struct dp_peer *)handle, mod_id);
8833 }
8834 
8835 qdf_export_symbol(dp_txrx_peer_unref_delete);
8836 
8837 /*
8838  * dp_peer_delete_wifi3() – Delete txrx peer
8839  * @soc_hdl: soc handle
8840  * @vdev_id: id of dp handle
8841  * @peer_mac: mac of datapath PEER handle
8842  * @bitmap: bitmap indicating special handling of request.
8843  * @peer_type: peer type (link or MLD)
8844  *
8845  */
8846 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
8847 				       uint8_t vdev_id,
8848 				       uint8_t *peer_mac, uint32_t bitmap,
8849 				       enum cdp_peer_type peer_type)
8850 {
8851 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8852 	struct dp_peer *peer;
8853 	struct cdp_peer_info peer_info = { 0 };
8854 	struct dp_vdev *vdev = NULL;
8855 
8856 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac,
8857 				 false, peer_type);
8858 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP);
8859 
8860 	/* Peer can be null for monitor vap mac address */
8861 	if (!peer) {
8862 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
8863 			  "%s: Invalid peer\n", __func__);
8864 		return QDF_STATUS_E_FAILURE;
8865 	}
8866 
8867 	if (!peer->valid) {
8868 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8869 		dp_err("Invalid peer: "QDF_MAC_ADDR_FMT,
8870 			QDF_MAC_ADDR_REF(peer_mac));
8871 		return QDF_STATUS_E_ALREADY;
8872 	}
8873 
8874 	vdev = peer->vdev;
8875 
8876 	if (!vdev) {
8877 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8878 		return QDF_STATUS_E_FAILURE;
8879 	}
8880 
8881 	peer->valid = 0;
8882 
8883 	dp_init_info("%pK: peer %pK (" QDF_MAC_ADDR_FMT ")",
8884 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
8885 
8886 	dp_local_peer_id_free(peer->vdev->pdev, peer);
8887 
8888 	/* Drop all rx packets before deleting peer */
8889 	dp_clear_peer_internal(soc, peer);
8890 
8891 	qdf_spinlock_destroy(&peer->peer_info_lock);
8892 	dp_peer_multipass_list_remove(peer);
8893 
8894 	/* remove the reference to the peer from the hash table */
8895 	dp_peer_find_hash_remove(soc, peer);
8896 
8897 	dp_peer_vdev_list_remove(soc, vdev, peer);
8898 
8899 	dp_peer_mlo_delete(peer);
8900 
8901 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
8902 	TAILQ_INSERT_TAIL(&soc->inactive_peer_list, peer,
8903 			  inactive_list_elem);
8904 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
8905 
8906 	/*
8907 	 * Remove the reference added during peer_attach.
8908 	 * The peer will still be left allocated until the
8909 	 * PEER_UNMAP message arrives to remove the other
8910 	 * reference, added by the PEER_MAP message.
8911 	 */
8912 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
8913 	/*
8914 	 * Remove the reference taken above
8915 	 */
8916 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8917 
8918 	return QDF_STATUS_SUCCESS;
8919 }
8920 
8921 #ifdef DP_RX_UDP_OVER_PEER_ROAM
8922 static QDF_STATUS dp_update_roaming_peer_wifi3(struct cdp_soc_t *soc_hdl,
8923 					       uint8_t vdev_id,
8924 					       uint8_t *peer_mac,
8925 					       uint32_t auth_status)
8926 {
8927 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8928 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8929 						     DP_MOD_ID_CDP);
8930 	if (!vdev)
8931 		return QDF_STATUS_E_FAILURE;
8932 
8933 	vdev->roaming_peer_status = auth_status;
8934 	qdf_mem_copy(vdev->roaming_peer_mac.raw, peer_mac,
8935 		     QDF_MAC_ADDR_SIZE);
8936 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8937 
8938 	return QDF_STATUS_SUCCESS;
8939 }
8940 #endif
8941 /*
8942  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
8943  * @soc_hdl: Datapath soc handle
8944  * @vdev_id: virtual interface id
8945  *
8946  * Return: MAC address on success, NULL on failure.
8947  *
8948  */
8949 static uint8_t *dp_get_vdev_mac_addr_wifi3(struct cdp_soc_t *soc_hdl,
8950 					   uint8_t vdev_id)
8951 {
8952 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8953 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8954 						     DP_MOD_ID_CDP);
8955 	uint8_t *mac = NULL;
8956 
8957 	if (!vdev)
8958 		return NULL;
8959 
8960 	mac = vdev->mac_addr.raw;
8961 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8962 
8963 	return mac;
8964 }
8965 
8966 /*
8967  * dp_vdev_set_wds() - Enable per packet stats
8968  * @soc: DP soc handle
8969  * @vdev_id: id of DP VDEV handle
8970  * @val: value
8971  *
8972  * Return: none
8973  */
8974 static int dp_vdev_set_wds(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8975 			   uint32_t val)
8976 {
8977 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8978 	struct dp_vdev *vdev =
8979 		dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id,
8980 				      DP_MOD_ID_CDP);
8981 
8982 	if (!vdev)
8983 		return QDF_STATUS_E_FAILURE;
8984 
8985 	vdev->wds_enabled = val;
8986 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8987 
8988 	return QDF_STATUS_SUCCESS;
8989 }
8990 
8991 static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
8992 {
8993 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8994 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8995 						     DP_MOD_ID_CDP);
8996 	int opmode;
8997 
8998 	if (!vdev) {
8999 		dp_err("vdev for id %d is NULL", vdev_id);
9000 		return -EINVAL;
9001 	}
9002 	opmode = vdev->opmode;
9003 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9004 
9005 	return opmode;
9006 }
9007 
9008 /**
9009  * dp_get_os_rx_handles_from_vdev_wifi3() - Get os rx handles for a vdev
9010  * @soc_hdl: ol_txrx_soc_handle handle
9011  * @vdev_id: vdev id for which os rx handles are needed
9012  * @stack_fn_p: pointer to stack function pointer
9013  * @osif_handle_p: pointer to ol_osif_vdev_handle
9014  *
9015  * Return: void
9016  */
9017 static
9018 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_soc_t *soc_hdl,
9019 					  uint8_t vdev_id,
9020 					  ol_txrx_rx_fp *stack_fn_p,
9021 					  ol_osif_vdev_handle *osif_vdev_p)
9022 {
9023 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9024 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9025 						     DP_MOD_ID_CDP);
9026 
9027 	if (qdf_unlikely(!vdev)) {
9028 		*stack_fn_p = NULL;
9029 		*osif_vdev_p = NULL;
9030 		return;
9031 	}
9032 	*stack_fn_p = vdev->osif_rx_stack;
9033 	*osif_vdev_p = vdev->osif_vdev;
9034 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9035 }
9036 
9037 /**
9038  * dp_get_ctrl_pdev_from_vdev() - Get control pdev of vdev
9039  * @soc_hdl: datapath soc handle
9040  * @vdev_id: virtual device/interface id
9041  *
9042  * Return: Handle to control pdev
9043  */
9044 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(
9045 						struct cdp_soc_t *soc_hdl,
9046 						uint8_t vdev_id)
9047 {
9048 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9049 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9050 						     DP_MOD_ID_CDP);
9051 	struct dp_pdev *pdev;
9052 
9053 	if (!vdev)
9054 		return NULL;
9055 
9056 	pdev = vdev->pdev;
9057 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9058 	return pdev ? (struct cdp_cfg *)pdev->wlan_cfg_ctx : NULL;
9059 }
9060 
9061 /**
9062  * dp_get_tx_pending() - read pending tx
9063  * @pdev_handle: Datapath PDEV handle
9064  *
9065  * Return: outstanding tx
9066  */
9067 static int32_t dp_get_tx_pending(struct cdp_pdev *pdev_handle)
9068 {
9069 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9070 
9071 	return qdf_atomic_read(&pdev->num_tx_outstanding);
9072 }
9073 
9074 /**
9075  * dp_get_peer_mac_from_peer_id() - get peer mac
9076  * @pdev_handle: Datapath PDEV handle
9077  * @peer_id: Peer ID
9078  * @peer_mac: MAC addr of PEER
9079  *
9080  * Return: QDF_STATUS
9081  */
9082 static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc,
9083 					       uint32_t peer_id,
9084 					       uint8_t *peer_mac)
9085 {
9086 	struct dp_peer *peer;
9087 
9088 	if (soc && peer_mac) {
9089 		peer = dp_peer_get_ref_by_id((struct dp_soc *)soc,
9090 					     (uint16_t)peer_id,
9091 					     DP_MOD_ID_CDP);
9092 		if (peer) {
9093 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
9094 				     QDF_MAC_ADDR_SIZE);
9095 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9096 			return QDF_STATUS_SUCCESS;
9097 		}
9098 	}
9099 
9100 	return QDF_STATUS_E_FAILURE;
9101 }
9102 
9103 #ifdef MESH_MODE_SUPPORT
9104 static
9105 void dp_vdev_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
9106 {
9107 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
9108 
9109 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
9110 	vdev->mesh_vdev = val;
9111 	if (val)
9112 		vdev->skip_sw_tid_classification |=
9113 			DP_TX_MESH_ENABLED;
9114 	else
9115 		vdev->skip_sw_tid_classification &=
9116 			~DP_TX_MESH_ENABLED;
9117 }
9118 
9119 /*
9120  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
9121  * @vdev_hdl: virtual device object
9122  * @val: value to be set
9123  *
9124  * Return: void
9125  */
9126 static
9127 void dp_vdev_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
9128 {
9129 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
9130 
9131 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
9132 	vdev->mesh_rx_filter = val;
9133 }
9134 #endif
9135 
9136 /*
9137  * dp_vdev_set_hlos_tid_override() - to set hlos tid override
9138  * @vdev_hdl: virtual device object
9139  * @val: value to be set
9140  *
9141  * Return: void
9142  */
9143 static
9144 void dp_vdev_set_hlos_tid_override(struct dp_vdev *vdev, uint32_t val)
9145 {
9146 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
9147 	if (val)
9148 		vdev->skip_sw_tid_classification |=
9149 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
9150 	else
9151 		vdev->skip_sw_tid_classification &=
9152 			~DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
9153 }
9154 
9155 /*
9156  * dp_vdev_get_hlos_tid_override() - to get hlos tid override flag
9157  * @vdev_hdl: virtual device object
9158  * @val: value to be set
9159  *
9160  * Return: 1 if this flag is set
9161  */
9162 static
9163 uint8_t dp_vdev_get_hlos_tid_override(struct cdp_vdev *vdev_hdl)
9164 {
9165 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
9166 
9167 	return !!(vdev->skip_sw_tid_classification &
9168 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED);
9169 }
9170 
9171 #ifdef VDEV_PEER_PROTOCOL_COUNT
9172 static void dp_enable_vdev_peer_protocol_count(struct cdp_soc_t *soc_hdl,
9173 					       int8_t vdev_id,
9174 					       bool enable)
9175 {
9176 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9177 	struct dp_vdev *vdev;
9178 
9179 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9180 	if (!vdev)
9181 		return;
9182 
9183 	dp_info("enable %d vdev_id %d", enable, vdev_id);
9184 	vdev->peer_protocol_count_track = enable;
9185 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9186 }
9187 
9188 static void dp_enable_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
9189 						   int8_t vdev_id,
9190 						   int drop_mask)
9191 {
9192 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9193 	struct dp_vdev *vdev;
9194 
9195 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9196 	if (!vdev)
9197 		return;
9198 
9199 	dp_info("drop_mask %d vdev_id %d", drop_mask, vdev_id);
9200 	vdev->peer_protocol_count_dropmask = drop_mask;
9201 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9202 }
9203 
9204 static int dp_is_vdev_peer_protocol_count_enabled(struct cdp_soc_t *soc_hdl,
9205 						  int8_t vdev_id)
9206 {
9207 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9208 	struct dp_vdev *vdev;
9209 	int peer_protocol_count_track;
9210 
9211 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9212 	if (!vdev)
9213 		return 0;
9214 
9215 	dp_info("enable %d vdev_id %d", vdev->peer_protocol_count_track,
9216 		vdev_id);
9217 	peer_protocol_count_track =
9218 		vdev->peer_protocol_count_track;
9219 
9220 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9221 	return peer_protocol_count_track;
9222 }
9223 
9224 static int dp_get_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
9225 					       int8_t vdev_id)
9226 {
9227 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9228 	struct dp_vdev *vdev;
9229 	int peer_protocol_count_dropmask;
9230 
9231 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9232 	if (!vdev)
9233 		return 0;
9234 
9235 	dp_info("drop_mask %d vdev_id %d", vdev->peer_protocol_count_dropmask,
9236 		vdev_id);
9237 	peer_protocol_count_dropmask =
9238 		vdev->peer_protocol_count_dropmask;
9239 
9240 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9241 	return peer_protocol_count_dropmask;
9242 }
9243 
9244 #endif
9245 
9246 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data)
9247 {
9248 	uint8_t pdev_count;
9249 
9250 	for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) {
9251 		if (soc->pdev_list[pdev_count] &&
9252 		    soc->pdev_list[pdev_count] == data)
9253 			return true;
9254 	}
9255 	return false;
9256 }
9257 
9258 /**
9259  * dp_rx_bar_stats_cb(): BAR received stats callback
9260  * @soc: SOC handle
9261  * @cb_ctxt: Call back context
9262  * @reo_status: Reo status
9263  *
9264  * return: void
9265  */
9266 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
9267 	union hal_reo_status *reo_status)
9268 {
9269 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
9270 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
9271 
9272 	if (!dp_check_pdev_exists(soc, pdev)) {
9273 		dp_err_rl("pdev doesn't exist");
9274 		return;
9275 	}
9276 
9277 	if (!qdf_atomic_read(&soc->cmn_init_done))
9278 		return;
9279 
9280 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
9281 		DP_PRINT_STATS("REO stats failure %d",
9282 			       queue_status->header.status);
9283 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
9284 		return;
9285 	}
9286 
9287 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
9288 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
9289 
9290 }
9291 
9292 /**
9293  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
9294  * @vdev: DP VDEV handle
9295  *
9296  * return: void
9297  */
9298 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
9299 			     struct cdp_vdev_stats *vdev_stats)
9300 {
9301 	struct dp_soc *soc = NULL;
9302 
9303 	if (!vdev || !vdev->pdev)
9304 		return;
9305 
9306 	soc = vdev->pdev->soc;
9307 
9308 	dp_update_vdev_ingress_stats(vdev);
9309 
9310 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
9311 
9312 	dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats,
9313 			     DP_MOD_ID_GENERIC_STATS);
9314 
9315 	dp_update_vdev_rate_stats(vdev_stats, &vdev->stats);
9316 
9317 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9318 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
9319 			     vdev_stats, vdev->vdev_id,
9320 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
9321 #endif
9322 }
9323 
9324 void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
9325 {
9326 	struct dp_vdev *vdev = NULL;
9327 	struct dp_soc *soc;
9328 	struct cdp_vdev_stats *vdev_stats =
9329 			qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
9330 
9331 	if (!vdev_stats) {
9332 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
9333 			   pdev->soc);
9334 		return;
9335 	}
9336 
9337 	soc = pdev->soc;
9338 
9339 	qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
9340 	qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
9341 	qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
9342 	qdf_mem_zero(&pdev->stats.rx_i, sizeof(pdev->stats.rx_i));
9343 
9344 	if (dp_monitor_is_enable_mcopy_mode(pdev))
9345 		dp_monitor_invalid_peer_update_pdev_stats(soc, pdev);
9346 
9347 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
9348 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
9349 
9350 		dp_aggregate_vdev_stats(vdev, vdev_stats);
9351 		dp_update_pdev_stats(pdev, vdev_stats);
9352 		dp_update_pdev_ingress_stats(pdev, vdev);
9353 	}
9354 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
9355 	qdf_mem_free(vdev_stats);
9356 
9357 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9358 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
9359 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
9360 #endif
9361 }
9362 
9363 /**
9364  * dp_vdev_getstats() - get vdev packet level stats
9365  * @vdev_handle: Datapath VDEV handle
9366  * @stats: cdp network device stats structure
9367  *
9368  * Return: QDF_STATUS
9369  */
9370 static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle,
9371 				   struct cdp_dev_stats *stats)
9372 {
9373 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
9374 	struct dp_pdev *pdev;
9375 	struct dp_soc *soc;
9376 	struct cdp_vdev_stats *vdev_stats;
9377 
9378 	if (!vdev)
9379 		return QDF_STATUS_E_FAILURE;
9380 
9381 	pdev = vdev->pdev;
9382 	if (!pdev)
9383 		return QDF_STATUS_E_FAILURE;
9384 
9385 	soc = pdev->soc;
9386 
9387 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
9388 
9389 	if (!vdev_stats) {
9390 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
9391 			   soc);
9392 		return QDF_STATUS_E_FAILURE;
9393 	}
9394 
9395 	dp_aggregate_vdev_stats(vdev, vdev_stats);
9396 
9397 	stats->tx_packets = vdev_stats->tx.comp_pkt.num;
9398 	stats->tx_bytes = vdev_stats->tx.comp_pkt.bytes;
9399 
9400 	stats->tx_errors = vdev_stats->tx.tx_failed;
9401 	stats->tx_dropped = vdev_stats->tx_i.dropped.dropped_pkt.num +
9402 			    vdev_stats->tx_i.sg.dropped_host.num +
9403 			    vdev_stats->tx_i.mcast_en.dropped_map_error +
9404 			    vdev_stats->tx_i.mcast_en.dropped_self_mac +
9405 			    vdev_stats->tx_i.mcast_en.dropped_send_fail +
9406 			    vdev_stats->tx.nawds_mcast_drop;
9407 
9408 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
9409 		stats->rx_packets = vdev_stats->rx.to_stack.num;
9410 		stats->rx_bytes = vdev_stats->rx.to_stack.bytes;
9411 	} else {
9412 		stats->rx_packets = vdev_stats->rx_i.reo_rcvd_pkt.num +
9413 				    vdev_stats->rx_i.null_q_desc_pkt.num +
9414 				    vdev_stats->rx_i.routed_eapol_pkt.num;
9415 		stats->rx_bytes = vdev_stats->rx_i.reo_rcvd_pkt.bytes +
9416 				  vdev_stats->rx_i.null_q_desc_pkt.bytes +
9417 				  vdev_stats->rx_i.routed_eapol_pkt.bytes;
9418 	}
9419 
9420 	stats->rx_errors = vdev_stats->rx.err.mic_err +
9421 			   vdev_stats->rx.err.decrypt_err +
9422 			   vdev_stats->rx.err.fcserr +
9423 			   vdev_stats->rx.err.pn_err +
9424 			   vdev_stats->rx.err.oor_err +
9425 			   vdev_stats->rx.err.jump_2k_err +
9426 			   vdev_stats->rx.err.rxdma_wifi_parse_err;
9427 
9428 	stats->rx_dropped = vdev_stats->rx.mec_drop.num +
9429 			    vdev_stats->rx.multipass_rx_pkt_drop +
9430 			    vdev_stats->rx.peer_unauth_rx_pkt_drop +
9431 			    vdev_stats->rx.policy_check_drop +
9432 			    vdev_stats->rx.nawds_mcast_drop +
9433 			    vdev_stats->rx.mcast_3addr_drop;
9434 
9435 	qdf_mem_free(vdev_stats);
9436 
9437 	return QDF_STATUS_SUCCESS;
9438 }
9439 
9440 /**
9441  * dp_pdev_getstats() - get pdev packet level stats
9442  * @pdev_handle: Datapath PDEV handle
9443  * @stats: cdp network device stats structure
9444  *
9445  * Return: QDF_STATUS
9446  */
9447 static void dp_pdev_getstats(struct cdp_pdev *pdev_handle,
9448 			     struct cdp_dev_stats *stats)
9449 {
9450 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9451 
9452 	dp_aggregate_pdev_stats(pdev);
9453 
9454 	stats->tx_packets = pdev->stats.tx.comp_pkt.num;
9455 	stats->tx_bytes = pdev->stats.tx.comp_pkt.bytes;
9456 
9457 	stats->tx_errors = pdev->stats.tx.tx_failed;
9458 	stats->tx_dropped = pdev->stats.tx_i.dropped.dropped_pkt.num +
9459 			    pdev->stats.tx_i.sg.dropped_host.num +
9460 			    pdev->stats.tx_i.mcast_en.dropped_map_error +
9461 			    pdev->stats.tx_i.mcast_en.dropped_self_mac +
9462 			    pdev->stats.tx_i.mcast_en.dropped_send_fail +
9463 			    pdev->stats.tx.nawds_mcast_drop +
9464 			    pdev->stats.tso_stats.dropped_host.num;
9465 
9466 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(pdev->soc->wlan_cfg_ctx)) {
9467 		stats->rx_packets = pdev->stats.rx.to_stack.num;
9468 		stats->rx_bytes = pdev->stats.rx.to_stack.bytes;
9469 	} else {
9470 		stats->rx_packets = pdev->stats.rx_i.reo_rcvd_pkt.num +
9471 				    pdev->stats.rx_i.null_q_desc_pkt.num +
9472 				    pdev->stats.rx_i.routed_eapol_pkt.num;
9473 		stats->rx_bytes = pdev->stats.rx_i.reo_rcvd_pkt.bytes +
9474 				  pdev->stats.rx_i.null_q_desc_pkt.bytes +
9475 				  pdev->stats.rx_i.routed_eapol_pkt.bytes;
9476 	}
9477 
9478 	stats->rx_errors = pdev->stats.err.ip_csum_err +
9479 		pdev->stats.err.tcp_udp_csum_err +
9480 		pdev->stats.rx.err.mic_err +
9481 		pdev->stats.rx.err.decrypt_err +
9482 		pdev->stats.rx.err.fcserr +
9483 		pdev->stats.rx.err.pn_err +
9484 		pdev->stats.rx.err.oor_err +
9485 		pdev->stats.rx.err.jump_2k_err +
9486 		pdev->stats.rx.err.rxdma_wifi_parse_err;
9487 	stats->rx_dropped = pdev->stats.dropped.msdu_not_done +
9488 		pdev->stats.dropped.mec +
9489 		pdev->stats.dropped.mesh_filter +
9490 		pdev->stats.dropped.wifi_parse +
9491 		pdev->stats.dropped.mon_rx_drop +
9492 		pdev->stats.dropped.mon_radiotap_update_err +
9493 		pdev->stats.rx.mec_drop.num +
9494 		pdev->stats.rx.multipass_rx_pkt_drop +
9495 		pdev->stats.rx.peer_unauth_rx_pkt_drop +
9496 		pdev->stats.rx.policy_check_drop +
9497 		pdev->stats.rx.nawds_mcast_drop +
9498 		pdev->stats.rx.mcast_3addr_drop;
9499 }
9500 
9501 /**
9502  * dp_get_device_stats() - get interface level packet stats
9503  * @soc: soc handle
9504  * @id : vdev_id or pdev_id based on type
9505  * @stats: cdp network device stats structure
9506  * @type: device type pdev/vdev
9507  *
9508  * Return: QDF_STATUS
9509  */
9510 static QDF_STATUS dp_get_device_stats(struct cdp_soc_t *soc_hdl, uint8_t id,
9511 				      struct cdp_dev_stats *stats,
9512 				      uint8_t type)
9513 {
9514 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9515 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
9516 	struct dp_vdev *vdev;
9517 
9518 	switch (type) {
9519 	case UPDATE_VDEV_STATS:
9520 		vdev = dp_vdev_get_ref_by_id(soc, id, DP_MOD_ID_CDP);
9521 
9522 		if (vdev) {
9523 			status = dp_vdev_getstats((struct cdp_vdev *)vdev,
9524 						  stats);
9525 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9526 		}
9527 		return status;
9528 	case UPDATE_PDEV_STATS:
9529 		{
9530 			struct dp_pdev *pdev =
9531 				dp_get_pdev_from_soc_pdev_id_wifi3(
9532 						(struct dp_soc *)soc,
9533 						 id);
9534 			if (pdev) {
9535 				dp_pdev_getstats((struct cdp_pdev *)pdev,
9536 						 stats);
9537 				return QDF_STATUS_SUCCESS;
9538 			}
9539 		}
9540 		break;
9541 	default:
9542 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9543 			"apstats cannot be updated for this input "
9544 			"type %d", type);
9545 		break;
9546 	}
9547 
9548 	return QDF_STATUS_E_FAILURE;
9549 }
9550 
9551 const
9552 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
9553 {
9554 	switch (ring_type) {
9555 	case REO_DST:
9556 		return "Reo_dst";
9557 	case REO_EXCEPTION:
9558 		return "Reo_exception";
9559 	case REO_CMD:
9560 		return "Reo_cmd";
9561 	case REO_REINJECT:
9562 		return "Reo_reinject";
9563 	case REO_STATUS:
9564 		return "Reo_status";
9565 	case WBM2SW_RELEASE:
9566 		return "wbm2sw_release";
9567 	case TCL_DATA:
9568 		return "tcl_data";
9569 	case TCL_CMD_CREDIT:
9570 		return "tcl_cmd_credit";
9571 	case TCL_STATUS:
9572 		return "tcl_status";
9573 	case SW2WBM_RELEASE:
9574 		return "sw2wbm_release";
9575 	case RXDMA_BUF:
9576 		return "Rxdma_buf";
9577 	case RXDMA_DST:
9578 		return "Rxdma_dst";
9579 	case RXDMA_MONITOR_BUF:
9580 		return "Rxdma_monitor_buf";
9581 	case RXDMA_MONITOR_DESC:
9582 		return "Rxdma_monitor_desc";
9583 	case RXDMA_MONITOR_STATUS:
9584 		return "Rxdma_monitor_status";
9585 	case RXDMA_MONITOR_DST:
9586 		return "Rxdma_monitor_destination";
9587 	case WBM_IDLE_LINK:
9588 		return "WBM_hw_idle_link";
9589 	default:
9590 		dp_err("Invalid ring type");
9591 		break;
9592 	}
9593 	return "Invalid";
9594 }
9595 
9596 /*
9597  * dp_print_napi_stats(): NAPI stats
9598  * @soc - soc handle
9599  */
9600 void dp_print_napi_stats(struct dp_soc *soc)
9601 {
9602 	hif_print_napi_stats(soc->hif_handle);
9603 }
9604 
9605 /**
9606  * dp_txrx_host_peer_stats_clr): Reinitialize the txrx peer stats
9607  * @soc: Datapath soc
9608  * @peer: Datatpath peer
9609  * @arg: argument to iter function
9610  *
9611  * Return: QDF_STATUS
9612  */
9613 static inline void
9614 dp_txrx_host_peer_stats_clr(struct dp_soc *soc,
9615 			    struct dp_peer *peer,
9616 			    void *arg)
9617 {
9618 	struct dp_txrx_peer *txrx_peer = NULL;
9619 	struct dp_peer *tgt_peer = NULL;
9620 	struct cdp_interface_peer_stats peer_stats_intf;
9621 
9622 	qdf_mem_zero(&peer_stats_intf, sizeof(struct cdp_interface_peer_stats));
9623 
9624 	DP_STATS_CLR(peer);
9625 	/* Clear monitor peer stats */
9626 	dp_monitor_peer_reset_stats(soc, peer);
9627 
9628 	/* Clear MLD peer stats only when link peer is primary */
9629 	if (dp_peer_is_primary_link_peer(peer)) {
9630 		tgt_peer = dp_get_tgt_peer_from_peer(peer);
9631 		if (tgt_peer) {
9632 			DP_STATS_CLR(tgt_peer);
9633 			txrx_peer = tgt_peer->txrx_peer;
9634 			dp_txrx_peer_stats_clr(txrx_peer);
9635 		}
9636 	}
9637 
9638 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9639 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, peer->vdev->pdev->soc,
9640 			     &peer_stats_intf,  peer->peer_id,
9641 			     UPDATE_PEER_STATS, peer->vdev->pdev->pdev_id);
9642 #endif
9643 }
9644 
9645 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING
9646 static inline void dp_srng_clear_ring_usage_wm_stats(struct dp_soc *soc)
9647 {
9648 	int ring;
9649 
9650 	for (ring = 0; ring < soc->num_reo_dest_rings; ring++)
9651 		hal_srng_clear_ring_usage_wm_locked(soc->hal_soc,
9652 					    soc->reo_dest_ring[ring].hal_srng);
9653 }
9654 #else
9655 static inline void dp_srng_clear_ring_usage_wm_stats(struct dp_soc *soc)
9656 {
9657 }
9658 #endif
9659 
9660 /**
9661  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
9662  * @vdev: DP_VDEV handle
9663  * @dp_soc: DP_SOC handle
9664  *
9665  * Return: QDF_STATUS
9666  */
9667 static inline QDF_STATUS
9668 dp_txrx_host_stats_clr(struct dp_vdev *vdev, struct dp_soc *soc)
9669 {
9670 	if (!vdev || !vdev->pdev)
9671 		return QDF_STATUS_E_FAILURE;
9672 
9673 	/*
9674 	 * if NSS offload is enabled, then send message
9675 	 * to NSS FW to clear the stats. Once NSS FW clears the statistics
9676 	 * then clear host statistics.
9677 	 */
9678 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
9679 		if (soc->cdp_soc.ol_ops->nss_stats_clr)
9680 			soc->cdp_soc.ol_ops->nss_stats_clr(soc->ctrl_psoc,
9681 							   vdev->vdev_id);
9682 	}
9683 
9684 	dp_vdev_stats_hw_offload_target_clear(soc, vdev->pdev->pdev_id,
9685 					      (1 << vdev->vdev_id));
9686 
9687 	DP_STATS_CLR(vdev->pdev);
9688 	DP_STATS_CLR(vdev->pdev->soc);
9689 	DP_STATS_CLR(vdev);
9690 
9691 	hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
9692 
9693 	dp_vdev_iterate_peer(vdev, dp_txrx_host_peer_stats_clr, NULL,
9694 			     DP_MOD_ID_GENERIC_STATS);
9695 
9696 	dp_srng_clear_ring_usage_wm_stats(soc);
9697 
9698 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9699 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
9700 			     &vdev->stats,  vdev->vdev_id,
9701 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
9702 #endif
9703 	return QDF_STATUS_SUCCESS;
9704 }
9705 
9706 /**
9707  * dp_get_peer_calibr_stats()- Get peer calibrated stats
9708  * @peer: Datapath peer
9709  * @peer_stats: buffer for peer stats
9710  *
9711  * Return: none
9712  */
9713 static inline
9714 void dp_get_peer_calibr_stats(struct dp_peer *peer,
9715 			      struct cdp_peer_stats *peer_stats)
9716 {
9717 	struct dp_peer *tgt_peer;
9718 
9719 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
9720 	if (!tgt_peer)
9721 		return;
9722 
9723 	peer_stats->tx.last_per = tgt_peer->stats.tx.last_per;
9724 	peer_stats->tx.tx_bytes_success_last =
9725 				tgt_peer->stats.tx.tx_bytes_success_last;
9726 	peer_stats->tx.tx_data_success_last =
9727 					tgt_peer->stats.tx.tx_data_success_last;
9728 	peer_stats->tx.tx_byte_rate = tgt_peer->stats.tx.tx_byte_rate;
9729 	peer_stats->tx.tx_data_rate = tgt_peer->stats.tx.tx_data_rate;
9730 	peer_stats->tx.tx_data_ucast_last =
9731 					tgt_peer->stats.tx.tx_data_ucast_last;
9732 	peer_stats->tx.tx_data_ucast_rate =
9733 					tgt_peer->stats.tx.tx_data_ucast_rate;
9734 	peer_stats->tx.inactive_time = tgt_peer->stats.tx.inactive_time;
9735 	peer_stats->rx.rx_bytes_success_last =
9736 				tgt_peer->stats.rx.rx_bytes_success_last;
9737 	peer_stats->rx.rx_data_success_last =
9738 				tgt_peer->stats.rx.rx_data_success_last;
9739 	peer_stats->rx.rx_byte_rate = tgt_peer->stats.rx.rx_byte_rate;
9740 	peer_stats->rx.rx_data_rate = tgt_peer->stats.rx.rx_data_rate;
9741 }
9742 
9743 /**
9744  * dp_get_peer_basic_stats()- Get peer basic stats
9745  * @peer: Datapath peer
9746  * @peer_stats: buffer for peer stats
9747  *
9748  * Return: none
9749  */
9750 #ifdef QCA_ENHANCED_STATS_SUPPORT
9751 static inline
9752 void dp_get_peer_basic_stats(struct dp_peer *peer,
9753 			     struct cdp_peer_stats *peer_stats)
9754 {
9755 	struct dp_txrx_peer *txrx_peer;
9756 
9757 	txrx_peer = dp_get_txrx_peer(peer);
9758 	if (!txrx_peer)
9759 		return;
9760 
9761 	peer_stats->tx.comp_pkt.num += txrx_peer->comp_pkt.num;
9762 	peer_stats->tx.comp_pkt.bytes += txrx_peer->comp_pkt.bytes;
9763 	peer_stats->tx.tx_failed += txrx_peer->tx_failed;
9764 	peer_stats->rx.to_stack.num += txrx_peer->to_stack.num;
9765 	peer_stats->rx.to_stack.bytes += txrx_peer->to_stack.bytes;
9766 }
9767 #else
9768 static inline
9769 void dp_get_peer_basic_stats(struct dp_peer *peer,
9770 			     struct cdp_peer_stats *peer_stats)
9771 {
9772 	struct dp_txrx_peer *txrx_peer;
9773 
9774 	txrx_peer = peer->txrx_peer;
9775 	if (!txrx_peer)
9776 		return;
9777 
9778 	peer_stats->tx.comp_pkt.num += txrx_peer->comp_pkt.num;
9779 	peer_stats->tx.comp_pkt.bytes += txrx_peer->comp_pkt.bytes;
9780 	peer_stats->tx.tx_failed += txrx_peer->tx_failed;
9781 	peer_stats->rx.to_stack.num += txrx_peer->to_stack.num;
9782 	peer_stats->rx.to_stack.bytes += txrx_peer->to_stack.bytes;
9783 }
9784 #endif
9785 
9786 /**
9787  * dp_get_peer_per_pkt_stats()- Get peer per pkt stats
9788  * @peer: Datapath peer
9789  * @peer_stats: buffer for peer stats
9790  *
9791  * Return: none
9792  */
9793 #ifdef QCA_ENHANCED_STATS_SUPPORT
9794 static inline
9795 void dp_get_peer_per_pkt_stats(struct dp_peer *peer,
9796 			       struct cdp_peer_stats *peer_stats)
9797 {
9798 	struct dp_txrx_peer *txrx_peer;
9799 	struct dp_peer_per_pkt_stats *per_pkt_stats;
9800 
9801 	txrx_peer = dp_get_txrx_peer(peer);
9802 	if (!txrx_peer)
9803 		return;
9804 
9805 	per_pkt_stats = &txrx_peer->stats.per_pkt_stats;
9806 	DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats);
9807 }
9808 #else
9809 static inline
9810 void dp_get_peer_per_pkt_stats(struct dp_peer *peer,
9811 			       struct cdp_peer_stats *peer_stats)
9812 {
9813 	struct dp_txrx_peer *txrx_peer;
9814 	struct dp_peer_per_pkt_stats *per_pkt_stats;
9815 
9816 	txrx_peer = peer->txrx_peer;
9817 	if (!txrx_peer)
9818 		return;
9819 
9820 	per_pkt_stats = &txrx_peer->stats.per_pkt_stats;
9821 	DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats);
9822 }
9823 #endif
9824 
9825 /**
9826  * dp_get_peer_extd_stats()- Get peer extd stats
9827  * @peer: Datapath peer
9828  * @peer_stats: buffer for peer stats
9829  *
9830  * Return: none
9831  */
9832 #ifdef QCA_ENHANCED_STATS_SUPPORT
9833 #ifdef WLAN_FEATURE_11BE_MLO
9834 static inline
9835 void dp_get_peer_extd_stats(struct dp_peer *peer,
9836 			    struct cdp_peer_stats *peer_stats)
9837 {
9838 	struct dp_soc *soc = peer->vdev->pdev->soc;
9839 
9840 	if (IS_MLO_DP_MLD_PEER(peer)) {
9841 		uint8_t i;
9842 		struct dp_peer *link_peer;
9843 		struct dp_soc *link_peer_soc;
9844 		struct dp_mld_link_peers link_peers_info;
9845 
9846 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
9847 						    &link_peers_info,
9848 						    DP_MOD_ID_CDP);
9849 		for (i = 0; i < link_peers_info.num_links; i++) {
9850 			link_peer = link_peers_info.link_peers[i];
9851 			link_peer_soc = link_peer->vdev->pdev->soc;
9852 			dp_monitor_peer_get_stats(link_peer_soc, link_peer,
9853 						  peer_stats,
9854 						  UPDATE_PEER_STATS);
9855 		}
9856 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
9857 	} else {
9858 		dp_monitor_peer_get_stats(soc, peer, peer_stats,
9859 					  UPDATE_PEER_STATS);
9860 	}
9861 }
9862 #else
9863 static inline
9864 void dp_get_peer_extd_stats(struct dp_peer *peer,
9865 			    struct cdp_peer_stats *peer_stats)
9866 {
9867 	struct dp_soc *soc = peer->vdev->pdev->soc;
9868 
9869 	dp_monitor_peer_get_stats(soc, peer, peer_stats, UPDATE_PEER_STATS);
9870 }
9871 #endif
9872 #else
9873 static inline
9874 void dp_get_peer_extd_stats(struct dp_peer *peer,
9875 			    struct cdp_peer_stats *peer_stats)
9876 {
9877 	struct dp_txrx_peer *txrx_peer;
9878 	struct dp_peer_extd_stats *extd_stats;
9879 
9880 	txrx_peer = peer->txrx_peer;
9881 	if (!txrx_peer)
9882 		return;
9883 
9884 	extd_stats = &txrx_peer->stats.extd_stats;
9885 	DP_UPDATE_EXTD_STATS(peer_stats, extd_stats);
9886 }
9887 #endif
9888 
9889 /**
9890  * dp_get_peer_tx_per()- Get peer packet error ratio
9891  * @peer_stats: buffer for peer stats
9892  *
9893  * Return: none
9894  */
9895 static inline
9896 void dp_get_peer_tx_per(struct cdp_peer_stats *peer_stats)
9897 {
9898 	if (peer_stats->tx.tx_success.num + peer_stats->tx.retries > 0)
9899 		peer_stats->tx.per = (peer_stats->tx.retries * 100) /
9900 				  (peer_stats->tx.tx_success.num +
9901 				   peer_stats->tx.retries);
9902 	else
9903 		peer_stats->tx.per = 0;
9904 }
9905 
9906 /**
9907  * dp_get_peer_stats()- Get peer stats
9908  * @peer: Datapath peer
9909  * @peer_stats: buffer for peer stats
9910  *
9911  * Return: none
9912  */
9913 static inline
9914 void dp_get_peer_stats(struct dp_peer *peer, struct cdp_peer_stats *peer_stats)
9915 {
9916 	dp_get_peer_calibr_stats(peer, peer_stats);
9917 
9918 	dp_get_peer_basic_stats(peer, peer_stats);
9919 
9920 	dp_get_peer_per_pkt_stats(peer, peer_stats);
9921 
9922 	dp_get_peer_extd_stats(peer, peer_stats);
9923 
9924 	dp_get_peer_tx_per(peer_stats);
9925 }
9926 
9927 /*
9928  * dp_get_host_peer_stats()- function to print peer stats
9929  * @soc: dp_soc handle
9930  * @mac_addr: mac address of the peer
9931  *
9932  * Return: QDF_STATUS
9933  */
9934 static QDF_STATUS
9935 dp_get_host_peer_stats(struct cdp_soc_t *soc, uint8_t *mac_addr)
9936 {
9937 	struct dp_peer *peer = NULL;
9938 	struct cdp_peer_stats *peer_stats = NULL;
9939 
9940 	if (!mac_addr) {
9941 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9942 			  "%s: NULL peer mac addr\n", __func__);
9943 		return QDF_STATUS_E_FAILURE;
9944 	}
9945 
9946 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
9947 				      mac_addr, 0,
9948 				      DP_VDEV_ALL,
9949 				      DP_MOD_ID_CDP);
9950 	if (!peer) {
9951 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9952 			  "%s: Invalid peer\n", __func__);
9953 		return QDF_STATUS_E_FAILURE;
9954 	}
9955 
9956 	peer_stats = qdf_mem_malloc(sizeof(struct cdp_peer_stats));
9957 	if (!peer_stats) {
9958 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9959 			  "%s: Memory allocation failed for cdp_peer_stats\n",
9960 			  __func__);
9961 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9962 		return QDF_STATUS_E_NOMEM;
9963 	}
9964 
9965 	qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats));
9966 
9967 	dp_get_peer_stats(peer, peer_stats);
9968 	dp_print_peer_stats(peer, peer_stats);
9969 
9970 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
9971 
9972 	qdf_mem_free(peer_stats);
9973 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9974 
9975 	return QDF_STATUS_SUCCESS;
9976 }
9977 
9978 /* *
9979  * dp_dump_wbm_idle_hptp() -dump wbm idle ring, hw hp tp info.
9980  * @soc: dp soc.
9981  * @pdev: dp pdev.
9982  *
9983  * Return: None.
9984  */
9985 static void
9986 dp_dump_wbm_idle_hptp(struct dp_soc *soc, struct dp_pdev *pdev)
9987 {
9988 	uint32_t hw_head;
9989 	uint32_t hw_tail;
9990 	struct dp_srng *srng;
9991 
9992 	if (!soc) {
9993 		dp_err("soc is NULL");
9994 		return;
9995 	}
9996 
9997 	if (!pdev) {
9998 		dp_err("pdev is NULL");
9999 		return;
10000 	}
10001 
10002 	srng = &pdev->soc->wbm_idle_link_ring;
10003 	if (!srng) {
10004 		dp_err("wbm_idle_link_ring srng is NULL");
10005 		return;
10006 	}
10007 
10008 	hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &hw_head,
10009 			&hw_tail, WBM_IDLE_LINK);
10010 
10011 	dp_debug("WBM_IDLE_LINK: HW hp: %d, HW tp: %d",
10012 			hw_head, hw_tail);
10013 }
10014 
10015 
10016 /**
10017  * dp_txrx_stats_help() - Helper function for Txrx_Stats
10018  *
10019  * Return: None
10020  */
10021 static void dp_txrx_stats_help(void)
10022 {
10023 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
10024 	dp_info("stats_option:");
10025 	dp_info("  1 -- HTT Tx Statistics");
10026 	dp_info("  2 -- HTT Rx Statistics");
10027 	dp_info("  3 -- HTT Tx HW Queue Statistics");
10028 	dp_info("  4 -- HTT Tx HW Sched Statistics");
10029 	dp_info("  5 -- HTT Error Statistics");
10030 	dp_info("  6 -- HTT TQM Statistics");
10031 	dp_info("  7 -- HTT TQM CMDQ Statistics");
10032 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
10033 	dp_info("  9 -- HTT Tx Rate Statistics");
10034 	dp_info(" 10 -- HTT Rx Rate Statistics");
10035 	dp_info(" 11 -- HTT Peer Statistics");
10036 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
10037 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
10038 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
10039 	dp_info(" 15 -- HTT SRNG Statistics");
10040 	dp_info(" 16 -- HTT SFM Info Statistics");
10041 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
10042 	dp_info(" 18 -- HTT Peer List Details");
10043 	dp_info(" 20 -- Clear Host Statistics");
10044 	dp_info(" 21 -- Host Rx Rate Statistics");
10045 	dp_info(" 22 -- Host Tx Rate Statistics");
10046 	dp_info(" 23 -- Host Tx Statistics");
10047 	dp_info(" 24 -- Host Rx Statistics");
10048 	dp_info(" 25 -- Host AST Statistics");
10049 	dp_info(" 26 -- Host SRNG PTR Statistics");
10050 	dp_info(" 27 -- Host Mon Statistics");
10051 	dp_info(" 28 -- Host REO Queue Statistics");
10052 	dp_info(" 29 -- Host Soc cfg param Statistics");
10053 	dp_info(" 30 -- Host pdev cfg param Statistics");
10054 	dp_info(" 31 -- Host NAPI stats");
10055 	dp_info(" 32 -- Host Interrupt stats");
10056 	dp_info(" 33 -- Host FISA stats");
10057 	dp_info(" 34 -- Host Register Work stats");
10058 	dp_info(" 35 -- HW REO Queue stats");
10059 	dp_info(" 36 -- Host WBM IDLE link desc ring HP/TP");
10060 	dp_info(" 37 -- Host SRNG usage watermark stats");
10061 }
10062 
10063 /**
10064  * dp_print_host_stats()- Function to print the stats aggregated at host
10065  * @vdev_handle: DP_VDEV handle
10066  * @req: host stats type
10067  * @soc: dp soc handler
10068  *
10069  * Return: 0 on success, print error message in case of failure
10070  */
10071 static int
10072 dp_print_host_stats(struct dp_vdev *vdev,
10073 		    struct cdp_txrx_stats_req *req,
10074 		    struct dp_soc *soc)
10075 {
10076 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
10077 	enum cdp_host_txrx_stats type =
10078 			dp_stats_mapping_table[req->stats][STATS_HOST];
10079 
10080 	dp_aggregate_pdev_stats(pdev);
10081 
10082 	switch (type) {
10083 	case TXRX_CLEAR_STATS:
10084 		dp_txrx_host_stats_clr(vdev, soc);
10085 		break;
10086 	case TXRX_RX_RATE_STATS:
10087 		dp_print_rx_rates(vdev);
10088 		break;
10089 	case TXRX_TX_RATE_STATS:
10090 		dp_print_tx_rates(vdev);
10091 		break;
10092 	case TXRX_TX_HOST_STATS:
10093 		dp_print_pdev_tx_stats(pdev);
10094 		dp_print_soc_tx_stats(pdev->soc);
10095 		break;
10096 	case TXRX_RX_HOST_STATS:
10097 		dp_print_pdev_rx_stats(pdev);
10098 		dp_print_soc_rx_stats(pdev->soc);
10099 		break;
10100 	case TXRX_AST_STATS:
10101 		dp_print_ast_stats(pdev->soc);
10102 		dp_print_mec_stats(pdev->soc);
10103 		dp_print_peer_table(vdev);
10104 		break;
10105 	case TXRX_SRNG_PTR_STATS:
10106 		dp_print_ring_stats(pdev);
10107 		break;
10108 	case TXRX_RX_MON_STATS:
10109 		dp_monitor_print_pdev_rx_mon_stats(pdev);
10110 		break;
10111 	case TXRX_REO_QUEUE_STATS:
10112 		dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc,
10113 				       req->peer_addr);
10114 		break;
10115 	case TXRX_SOC_CFG_PARAMS:
10116 		dp_print_soc_cfg_params(pdev->soc);
10117 		break;
10118 	case TXRX_PDEV_CFG_PARAMS:
10119 		dp_print_pdev_cfg_params(pdev);
10120 		break;
10121 	case TXRX_NAPI_STATS:
10122 		dp_print_napi_stats(pdev->soc);
10123 		break;
10124 	case TXRX_SOC_INTERRUPT_STATS:
10125 		dp_print_soc_interrupt_stats(pdev->soc);
10126 		break;
10127 	case TXRX_SOC_FSE_STATS:
10128 		dp_rx_dump_fisa_table(pdev->soc);
10129 		break;
10130 	case TXRX_HAL_REG_WRITE_STATS:
10131 		hal_dump_reg_write_stats(pdev->soc->hal_soc);
10132 		hal_dump_reg_write_srng_stats(pdev->soc->hal_soc);
10133 		break;
10134 	case TXRX_SOC_REO_HW_DESC_DUMP:
10135 		dp_get_rx_reo_queue_info((struct cdp_soc_t *)pdev->soc,
10136 					 vdev->vdev_id);
10137 		break;
10138 	case TXRX_SOC_WBM_IDLE_HPTP_DUMP:
10139 		dp_dump_wbm_idle_hptp(pdev->soc, pdev);
10140 		break;
10141 	case TXRX_SRNG_USAGE_WM_STATS:
10142 		/* Dump usage watermark stats for all SRNGs */
10143 		dp_dump_srng_high_wm_stats(soc, 0xFF);
10144 		break;
10145 	default:
10146 		dp_info("Wrong Input For TxRx Host Stats");
10147 		dp_txrx_stats_help();
10148 		break;
10149 	}
10150 	return 0;
10151 }
10152 
10153 /*
10154  * dp_pdev_tid_stats_ingress_inc
10155  * @pdev: pdev handle
10156  * @val: increase in value
10157  *
10158  * Return: void
10159  */
10160 static void
10161 dp_pdev_tid_stats_ingress_inc(struct dp_pdev *pdev, uint32_t val)
10162 {
10163 	pdev->stats.tid_stats.ingress_stack += val;
10164 }
10165 
10166 /*
10167  * dp_pdev_tid_stats_osif_drop
10168  * @pdev: pdev handle
10169  * @val: increase in value
10170  *
10171  * Return: void
10172  */
10173 static void
10174 dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val)
10175 {
10176 	pdev->stats.tid_stats.osif_drop += val;
10177 }
10178 
10179 /*
10180  * dp_get_fw_peer_stats()- function to print peer stats
10181  * @soc: soc handle
10182  * @pdev_id : id of the pdev handle
10183  * @mac_addr: mac address of the peer
10184  * @cap: Type of htt stats requested
10185  * @is_wait: if set, wait on completion from firmware response
10186  *
10187  * Currently Supporting only MAC ID based requests Only
10188  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
10189  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
10190  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
10191  *
10192  * Return: QDF_STATUS
10193  */
10194 static QDF_STATUS
10195 dp_get_fw_peer_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
10196 		     uint8_t *mac_addr,
10197 		     uint32_t cap, uint32_t is_wait)
10198 {
10199 	int i;
10200 	uint32_t config_param0 = 0;
10201 	uint32_t config_param1 = 0;
10202 	uint32_t config_param2 = 0;
10203 	uint32_t config_param3 = 0;
10204 	struct dp_pdev *pdev =
10205 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10206 						   pdev_id);
10207 
10208 	if (!pdev)
10209 		return QDF_STATUS_E_FAILURE;
10210 
10211 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
10212 	config_param0 |= (1 << (cap + 1));
10213 
10214 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
10215 		config_param1 |= (1 << i);
10216 	}
10217 
10218 	config_param2 |= (mac_addr[0] & 0x000000ff);
10219 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
10220 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
10221 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
10222 
10223 	config_param3 |= (mac_addr[4] & 0x000000ff);
10224 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
10225 
10226 	if (is_wait) {
10227 		qdf_event_reset(&pdev->fw_peer_stats_event);
10228 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
10229 					  config_param0, config_param1,
10230 					  config_param2, config_param3,
10231 					  0, DBG_STATS_COOKIE_DP_STATS, 0);
10232 		qdf_wait_single_event(&pdev->fw_peer_stats_event,
10233 				      DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
10234 	} else {
10235 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
10236 					  config_param0, config_param1,
10237 					  config_param2, config_param3,
10238 					  0, DBG_STATS_COOKIE_DEFAULT, 0);
10239 	}
10240 
10241 	return QDF_STATUS_SUCCESS;
10242 
10243 }
10244 
10245 /* This struct definition will be removed from here
10246  * once it get added in FW headers*/
10247 struct httstats_cmd_req {
10248     uint32_t    config_param0;
10249     uint32_t    config_param1;
10250     uint32_t    config_param2;
10251     uint32_t    config_param3;
10252     int cookie;
10253     u_int8_t    stats_id;
10254 };
10255 
10256 /*
10257  * dp_get_htt_stats: function to process the httstas request
10258  * @soc: DP soc handle
10259  * @pdev_id: id of pdev handle
10260  * @data: pointer to request data
10261  * @data_len: length for request data
10262  *
10263  * return: QDF_STATUS
10264  */
10265 static QDF_STATUS
10266 dp_get_htt_stats(struct cdp_soc_t *soc, uint8_t pdev_id, void *data,
10267 		 uint32_t data_len)
10268 {
10269 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
10270 	struct dp_pdev *pdev =
10271 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10272 						   pdev_id);
10273 
10274 	if (!pdev)
10275 		return QDF_STATUS_E_FAILURE;
10276 
10277 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
10278 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
10279 				req->config_param0, req->config_param1,
10280 				req->config_param2, req->config_param3,
10281 				req->cookie, DBG_STATS_COOKIE_DEFAULT, 0);
10282 
10283 	return QDF_STATUS_SUCCESS;
10284 }
10285 
10286 /**
10287  * dp_set_pdev_tidmap_prty_wifi3(): update tidmap priority in pdev
10288  * @pdev: DP_PDEV handle
10289  * @prio: tidmap priority value passed by the user
10290  *
10291  * Return: QDF_STATUS_SUCCESS on success
10292  */
10293 static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct dp_pdev *pdev,
10294 						uint8_t prio)
10295 {
10296 	struct dp_soc *soc = pdev->soc;
10297 
10298 	soc->tidmap_prty = prio;
10299 
10300 	hal_tx_set_tidmap_prty(soc->hal_soc, prio);
10301 	return QDF_STATUS_SUCCESS;
10302 }
10303 
10304 /*
10305  * dp_get_peer_param: function to get parameters in peer
10306  * @cdp_soc: DP soc handle
10307  * @vdev_id: id of vdev handle
10308  * @peer_mac: peer mac address
10309  * @param: parameter type to be set
10310  * @val : address of buffer
10311  *
10312  * Return: val
10313  */
10314 static QDF_STATUS dp_get_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
10315 				    uint8_t *peer_mac,
10316 				    enum cdp_peer_param_type param,
10317 				    cdp_config_param_type *val)
10318 {
10319 	return QDF_STATUS_SUCCESS;
10320 }
10321 
10322 /*
10323  * dp_set_peer_param: function to set parameters in peer
10324  * @cdp_soc: DP soc handle
10325  * @vdev_id: id of vdev handle
10326  * @peer_mac: peer mac address
10327  * @param: parameter type to be set
10328  * @val: value of parameter to be set
10329  *
10330  * Return: 0 for success. nonzero for failure.
10331  */
10332 static QDF_STATUS dp_set_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
10333 				    uint8_t *peer_mac,
10334 				    enum cdp_peer_param_type param,
10335 				    cdp_config_param_type val)
10336 {
10337 	struct dp_peer *peer =
10338 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
10339 						       peer_mac, 0, vdev_id,
10340 						       DP_MOD_ID_CDP);
10341 	struct dp_txrx_peer *txrx_peer;
10342 
10343 	if (!peer)
10344 		return QDF_STATUS_E_FAILURE;
10345 
10346 	txrx_peer = peer->txrx_peer;
10347 	if (!txrx_peer) {
10348 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10349 		return QDF_STATUS_E_FAILURE;
10350 	}
10351 
10352 	switch (param) {
10353 	case CDP_CONFIG_NAWDS:
10354 		txrx_peer->nawds_enabled = val.cdp_peer_param_nawds;
10355 		break;
10356 	case CDP_CONFIG_ISOLATION:
10357 		dp_set_peer_isolation(txrx_peer, val.cdp_peer_param_isolation);
10358 		break;
10359 	case CDP_CONFIG_IN_TWT:
10360 		txrx_peer->in_twt = !!(val.cdp_peer_param_in_twt);
10361 		break;
10362 	default:
10363 		break;
10364 	}
10365 
10366 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10367 
10368 	return QDF_STATUS_SUCCESS;
10369 }
10370 
10371 /*
10372  * dp_get_pdev_param: function to get parameters from pdev
10373  * @cdp_soc: DP soc handle
10374  * @pdev_id: id of pdev handle
10375  * @param: parameter type to be get
10376  * @value : buffer for value
10377  *
10378  * Return: status
10379  */
10380 static QDF_STATUS dp_get_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
10381 				    enum cdp_pdev_param_type param,
10382 				    cdp_config_param_type *val)
10383 {
10384 	struct cdp_pdev *pdev = (struct cdp_pdev *)
10385 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10386 						   pdev_id);
10387 	if (!pdev)
10388 		return QDF_STATUS_E_FAILURE;
10389 
10390 	switch (param) {
10391 	case CDP_CONFIG_VOW:
10392 		val->cdp_pdev_param_cfg_vow =
10393 				((struct dp_pdev *)pdev)->delay_stats_flag;
10394 		break;
10395 	case CDP_TX_PENDING:
10396 		val->cdp_pdev_param_tx_pending = dp_get_tx_pending(pdev);
10397 		break;
10398 	case CDP_FILTER_MCAST_DATA:
10399 		val->cdp_pdev_param_fltr_mcast =
10400 				dp_monitor_pdev_get_filter_mcast_data(pdev);
10401 		break;
10402 	case CDP_FILTER_NO_DATA:
10403 		val->cdp_pdev_param_fltr_none =
10404 				dp_monitor_pdev_get_filter_non_data(pdev);
10405 		break;
10406 	case CDP_FILTER_UCAST_DATA:
10407 		val->cdp_pdev_param_fltr_ucast =
10408 				dp_monitor_pdev_get_filter_ucast_data(pdev);
10409 		break;
10410 	case CDP_MONITOR_CHANNEL:
10411 		val->cdp_pdev_param_monitor_chan =
10412 			dp_monitor_get_chan_num((struct dp_pdev *)pdev);
10413 		break;
10414 	case CDP_MONITOR_FREQUENCY:
10415 		val->cdp_pdev_param_mon_freq =
10416 			dp_monitor_get_chan_freq((struct dp_pdev *)pdev);
10417 		break;
10418 	default:
10419 		return QDF_STATUS_E_FAILURE;
10420 	}
10421 
10422 	return QDF_STATUS_SUCCESS;
10423 }
10424 
10425 /*
10426  * dp_set_pdev_param: function to set parameters in pdev
10427  * @cdp_soc: DP soc handle
10428  * @pdev_id: id of pdev handle
10429  * @param: parameter type to be set
10430  * @val: value of parameter to be set
10431  *
10432  * Return: 0 for success. nonzero for failure.
10433  */
10434 static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
10435 				    enum cdp_pdev_param_type param,
10436 				    cdp_config_param_type val)
10437 {
10438 	int target_type;
10439 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
10440 	struct dp_pdev *pdev =
10441 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10442 						   pdev_id);
10443 	enum reg_wifi_band chan_band;
10444 
10445 	if (!pdev)
10446 		return QDF_STATUS_E_FAILURE;
10447 
10448 	target_type = hal_get_target_type(soc->hal_soc);
10449 	switch (target_type) {
10450 	case TARGET_TYPE_QCA6750:
10451 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC0_LMAC_ID;
10452 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
10453 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
10454 		break;
10455 	case TARGET_TYPE_KIWI:
10456 	case TARGET_TYPE_MANGO:
10457 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC0_LMAC_ID;
10458 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
10459 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
10460 		break;
10461 	default:
10462 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC1_LMAC_ID;
10463 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
10464 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
10465 		break;
10466 	}
10467 
10468 	switch (param) {
10469 	case CDP_CONFIG_TX_CAPTURE:
10470 		return dp_monitor_config_debug_sniffer(pdev,
10471 						val.cdp_pdev_param_tx_capture);
10472 	case CDP_CONFIG_DEBUG_SNIFFER:
10473 		return dp_monitor_config_debug_sniffer(pdev,
10474 						val.cdp_pdev_param_dbg_snf);
10475 	case CDP_CONFIG_BPR_ENABLE:
10476 		return dp_monitor_set_bpr_enable(pdev,
10477 						 val.cdp_pdev_param_bpr_enable);
10478 	case CDP_CONFIG_PRIMARY_RADIO:
10479 		pdev->is_primary = val.cdp_pdev_param_primary_radio;
10480 		break;
10481 	case CDP_CONFIG_CAPTURE_LATENCY:
10482 		pdev->latency_capture_enable = val.cdp_pdev_param_cptr_latcy;
10483 		break;
10484 	case CDP_INGRESS_STATS:
10485 		dp_pdev_tid_stats_ingress_inc(pdev,
10486 					      val.cdp_pdev_param_ingrs_stats);
10487 		break;
10488 	case CDP_OSIF_DROP:
10489 		dp_pdev_tid_stats_osif_drop(pdev,
10490 					    val.cdp_pdev_param_osif_drop);
10491 		break;
10492 	case CDP_CONFIG_ENH_RX_CAPTURE:
10493 		return dp_monitor_config_enh_rx_capture(pdev,
10494 						val.cdp_pdev_param_en_rx_cap);
10495 	case CDP_CONFIG_ENH_TX_CAPTURE:
10496 		return dp_monitor_config_enh_tx_capture(pdev,
10497 						val.cdp_pdev_param_en_tx_cap);
10498 	case CDP_CONFIG_HMMC_TID_OVERRIDE:
10499 		pdev->hmmc_tid_override_en = val.cdp_pdev_param_hmmc_tid_ovrd;
10500 		break;
10501 	case CDP_CONFIG_HMMC_TID_VALUE:
10502 		pdev->hmmc_tid = val.cdp_pdev_param_hmmc_tid;
10503 		break;
10504 	case CDP_CHAN_NOISE_FLOOR:
10505 		pdev->chan_noise_floor = val.cdp_pdev_param_chn_noise_flr;
10506 		break;
10507 	case CDP_TIDMAP_PRTY:
10508 		dp_set_pdev_tidmap_prty_wifi3(pdev,
10509 					      val.cdp_pdev_param_tidmap_prty);
10510 		break;
10511 	case CDP_FILTER_NEIGH_PEERS:
10512 		dp_monitor_set_filter_neigh_peers(pdev,
10513 					val.cdp_pdev_param_fltr_neigh_peers);
10514 		break;
10515 	case CDP_MONITOR_CHANNEL:
10516 		dp_monitor_set_chan_num(pdev, val.cdp_pdev_param_monitor_chan);
10517 		break;
10518 	case CDP_MONITOR_FREQUENCY:
10519 		chan_band = wlan_reg_freq_to_band(val.cdp_pdev_param_mon_freq);
10520 		dp_monitor_set_chan_freq(pdev, val.cdp_pdev_param_mon_freq);
10521 		dp_monitor_set_chan_band(pdev, chan_band);
10522 		break;
10523 	case CDP_CONFIG_BSS_COLOR:
10524 		dp_monitor_set_bsscolor(pdev, val.cdp_pdev_param_bss_color);
10525 		break;
10526 	case CDP_SET_ATF_STATS_ENABLE:
10527 		dp_monitor_set_atf_stats_enable(pdev,
10528 					val.cdp_pdev_param_atf_stats_enable);
10529 		break;
10530 	case CDP_CONFIG_SPECIAL_VAP:
10531 		dp_monitor_pdev_config_scan_spcl_vap(pdev,
10532 					val.cdp_pdev_param_config_special_vap);
10533 		dp_monitor_vdev_set_monitor_mode_buf_rings(pdev);
10534 		break;
10535 	case CDP_RESET_SCAN_SPCL_VAP_STATS_ENABLE:
10536 		dp_monitor_pdev_reset_scan_spcl_vap_stats_enable(pdev,
10537 				val.cdp_pdev_param_reset_scan_spcl_vap_stats_enable);
10538 		break;
10539 	case CDP_CONFIG_ENHANCED_STATS_ENABLE:
10540 		pdev->enhanced_stats_en = val.cdp_pdev_param_enhanced_stats_enable;
10541 		break;
10542 	case CDP_ISOLATION:
10543 		pdev->isolation = val.cdp_pdev_param_isolation;
10544 		break;
10545 	case CDP_CONFIG_UNDECODED_METADATA_CAPTURE_ENABLE:
10546 		return dp_monitor_config_undecoded_metadata_capture(pdev,
10547 				val.cdp_pdev_param_undecoded_metadata_enable);
10548 		break;
10549 	default:
10550 		return QDF_STATUS_E_INVAL;
10551 	}
10552 	return QDF_STATUS_SUCCESS;
10553 }
10554 
10555 #ifdef QCA_UNDECODED_METADATA_SUPPORT
10556 static
10557 QDF_STATUS dp_set_pdev_phyrx_error_mask(struct cdp_soc_t *cdp_soc,
10558 					uint8_t pdev_id, uint32_t mask,
10559 					uint32_t mask_cont)
10560 {
10561 	struct dp_pdev *pdev =
10562 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10563 						   pdev_id);
10564 
10565 	if (!pdev)
10566 		return QDF_STATUS_E_FAILURE;
10567 
10568 	return dp_monitor_config_undecoded_metadata_phyrx_error_mask(pdev,
10569 				mask, mask_cont);
10570 }
10571 
10572 static
10573 QDF_STATUS dp_get_pdev_phyrx_error_mask(struct cdp_soc_t *cdp_soc,
10574 					uint8_t pdev_id, uint32_t *mask,
10575 					uint32_t *mask_cont)
10576 {
10577 	struct dp_pdev *pdev =
10578 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10579 						   pdev_id);
10580 
10581 	if (!pdev)
10582 		return QDF_STATUS_E_FAILURE;
10583 
10584 	return dp_monitor_get_undecoded_metadata_phyrx_error_mask(pdev,
10585 				mask, mask_cont);
10586 }
10587 #endif
10588 
10589 #ifdef QCA_PEER_EXT_STATS
10590 static void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
10591 					  qdf_nbuf_t nbuf)
10592 {
10593 	struct dp_peer *peer = NULL;
10594 	uint16_t peer_id, ring_id;
10595 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
10596 	struct dp_peer_delay_stats *delay_stats = NULL;
10597 
10598 	peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
10599 	if (peer_id > soc->max_peer_id)
10600 		return;
10601 
10602 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
10603 	if (qdf_unlikely(!peer))
10604 		return;
10605 
10606 	if (qdf_unlikely(!peer->txrx_peer)) {
10607 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10608 		return;
10609 	}
10610 
10611 	if (qdf_likely(peer->txrx_peer->delay_stats)) {
10612 		delay_stats = peer->txrx_peer->delay_stats;
10613 		ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
10614 		dp_rx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
10615 					nbuf);
10616 	}
10617 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10618 }
10619 #else
10620 static inline void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
10621 						 qdf_nbuf_t nbuf)
10622 {
10623 }
10624 #endif
10625 
10626 /*
10627  * dp_calculate_delay_stats: function to get rx delay stats
10628  * @cdp_soc: DP soc handle
10629  * @vdev_id: id of DP vdev handle
10630  * @nbuf: skb
10631  *
10632  * Return: QDF_STATUS
10633  */
10634 static QDF_STATUS
10635 dp_calculate_delay_stats(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
10636 			 qdf_nbuf_t nbuf)
10637 {
10638 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
10639 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10640 						     DP_MOD_ID_CDP);
10641 
10642 	if (!vdev)
10643 		return QDF_STATUS_SUCCESS;
10644 
10645 	if (vdev->pdev->delay_stats_flag)
10646 		dp_rx_compute_delay(vdev, nbuf);
10647 	else
10648 		dp_rx_update_peer_delay_stats(soc, nbuf);
10649 
10650 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10651 	return QDF_STATUS_SUCCESS;
10652 }
10653 
10654 /*
10655  * dp_get_vdev_param: function to get parameters from vdev
10656  * @cdp_soc : DP soc handle
10657  * @vdev_id: id of DP vdev handle
10658  * @param: parameter type to get value
10659  * @val: buffer address
10660  *
10661  * return: status
10662  */
10663 static QDF_STATUS dp_get_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
10664 				    enum cdp_vdev_param_type param,
10665 				    cdp_config_param_type *val)
10666 {
10667 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
10668 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10669 						     DP_MOD_ID_CDP);
10670 
10671 	if (!vdev)
10672 		return QDF_STATUS_E_FAILURE;
10673 
10674 	switch (param) {
10675 	case CDP_ENABLE_WDS:
10676 		val->cdp_vdev_param_wds = vdev->wds_enabled;
10677 		break;
10678 	case CDP_ENABLE_MEC:
10679 		val->cdp_vdev_param_mec = vdev->mec_enabled;
10680 		break;
10681 	case CDP_ENABLE_DA_WAR:
10682 		val->cdp_vdev_param_da_war = vdev->pdev->soc->da_war_enabled;
10683 		break;
10684 	case CDP_ENABLE_IGMP_MCAST_EN:
10685 		val->cdp_vdev_param_igmp_mcast_en = vdev->igmp_mcast_enhanc_en;
10686 		break;
10687 	case CDP_ENABLE_MCAST_EN:
10688 		val->cdp_vdev_param_mcast_en = vdev->mcast_enhancement_en;
10689 		break;
10690 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
10691 		val->cdp_vdev_param_hlos_tid_override =
10692 			    dp_vdev_get_hlos_tid_override((struct cdp_vdev *)vdev);
10693 		break;
10694 	case CDP_ENABLE_PEER_AUTHORIZE:
10695 		val->cdp_vdev_param_peer_authorize =
10696 			    vdev->peer_authorize;
10697 		break;
10698 	case CDP_TX_ENCAP_TYPE:
10699 		val->cdp_vdev_param_tx_encap = vdev->tx_encap_type;
10700 		break;
10701 	case CDP_ENABLE_CIPHER:
10702 		val->cdp_vdev_param_cipher_en = vdev->sec_type;
10703 		break;
10704 #ifdef WLAN_SUPPORT_MESH_LATENCY
10705 	case CDP_ENABLE_PEER_TID_LATENCY:
10706 		val->cdp_vdev_param_peer_tid_latency_enable =
10707 			vdev->peer_tid_latency_enabled;
10708 		break;
10709 	case CDP_SET_VAP_MESH_TID:
10710 		val->cdp_vdev_param_mesh_tid =
10711 				vdev->mesh_tid_latency_config.latency_tid;
10712 		break;
10713 #endif
10714 	case CDP_DROP_3ADDR_MCAST:
10715 		val->cdp_drop_3addr_mcast = vdev->drop_3addr_mcast;
10716 		break;
10717 	default:
10718 		dp_cdp_err("%pK: param value %d is wrong",
10719 			   soc, param);
10720 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10721 		return QDF_STATUS_E_FAILURE;
10722 	}
10723 
10724 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10725 	return QDF_STATUS_SUCCESS;
10726 }
10727 
10728 /*
10729  * dp_set_vdev_param: function to set parameters in vdev
10730  * @cdp_soc : DP soc handle
10731  * @vdev_id: id of DP vdev handle
10732  * @param: parameter type to get value
10733  * @val: value
10734  *
10735  * return: QDF_STATUS
10736  */
10737 static QDF_STATUS
10738 dp_set_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
10739 		  enum cdp_vdev_param_type param, cdp_config_param_type val)
10740 {
10741 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
10742 	struct dp_vdev *vdev =
10743 		dp_vdev_get_ref_by_id(dsoc, vdev_id, DP_MOD_ID_CDP);
10744 	uint32_t var = 0;
10745 
10746 	if (!vdev)
10747 		return QDF_STATUS_E_FAILURE;
10748 
10749 	switch (param) {
10750 	case CDP_ENABLE_WDS:
10751 		dp_cdp_err("%pK: wds_enable %d for vdev(%pK) id(%d)\n",
10752 			   dsoc, val.cdp_vdev_param_wds, vdev, vdev->vdev_id);
10753 		vdev->wds_enabled = val.cdp_vdev_param_wds;
10754 		break;
10755 	case CDP_ENABLE_MEC:
10756 		dp_cdp_err("%pK: mec_enable %d for vdev(%pK) id(%d)\n",
10757 			   dsoc, val.cdp_vdev_param_mec, vdev, vdev->vdev_id);
10758 		vdev->mec_enabled = val.cdp_vdev_param_mec;
10759 		break;
10760 	case CDP_ENABLE_DA_WAR:
10761 		dp_cdp_err("%pK: da_war_enable %d for vdev(%pK) id(%d)\n",
10762 			   dsoc, val.cdp_vdev_param_da_war, vdev, vdev->vdev_id);
10763 		vdev->pdev->soc->da_war_enabled = val.cdp_vdev_param_da_war;
10764 		dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
10765 					     vdev->pdev->soc));
10766 		break;
10767 	case CDP_ENABLE_NAWDS:
10768 		vdev->nawds_enabled = val.cdp_vdev_param_nawds;
10769 		break;
10770 	case CDP_ENABLE_MCAST_EN:
10771 		vdev->mcast_enhancement_en = val.cdp_vdev_param_mcast_en;
10772 		break;
10773 	case CDP_ENABLE_IGMP_MCAST_EN:
10774 		vdev->igmp_mcast_enhanc_en = val.cdp_vdev_param_igmp_mcast_en;
10775 		break;
10776 	case CDP_ENABLE_PROXYSTA:
10777 		vdev->proxysta_vdev = val.cdp_vdev_param_proxysta;
10778 		break;
10779 	case CDP_UPDATE_TDLS_FLAGS:
10780 		vdev->tdls_link_connected = val.cdp_vdev_param_tdls_flags;
10781 		break;
10782 	case CDP_CFG_WDS_AGING_TIMER:
10783 		var = val.cdp_vdev_param_aging_tmr;
10784 		if (!var)
10785 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
10786 		else if (var != vdev->wds_aging_timer_val)
10787 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, var);
10788 
10789 		vdev->wds_aging_timer_val = var;
10790 		break;
10791 	case CDP_ENABLE_AP_BRIDGE:
10792 		if (wlan_op_mode_sta != vdev->opmode)
10793 			vdev->ap_bridge_enabled = val.cdp_vdev_param_ap_brdg_en;
10794 		else
10795 			vdev->ap_bridge_enabled = false;
10796 		break;
10797 	case CDP_ENABLE_CIPHER:
10798 		vdev->sec_type = val.cdp_vdev_param_cipher_en;
10799 		break;
10800 	case CDP_ENABLE_QWRAP_ISOLATION:
10801 		vdev->isolation_vdev = val.cdp_vdev_param_qwrap_isolation;
10802 		break;
10803 	case CDP_UPDATE_MULTIPASS:
10804 		vdev->multipass_en = val.cdp_vdev_param_update_multipass;
10805 		break;
10806 	case CDP_TX_ENCAP_TYPE:
10807 		vdev->tx_encap_type = val.cdp_vdev_param_tx_encap;
10808 		break;
10809 	case CDP_RX_DECAP_TYPE:
10810 		vdev->rx_decap_type = val.cdp_vdev_param_rx_decap;
10811 		break;
10812 	case CDP_TID_VDEV_PRTY:
10813 		vdev->tidmap_prty = val.cdp_vdev_param_tidmap_prty;
10814 		break;
10815 	case CDP_TIDMAP_TBL_ID:
10816 		vdev->tidmap_tbl_id = val.cdp_vdev_param_tidmap_tbl_id;
10817 		break;
10818 #ifdef MESH_MODE_SUPPORT
10819 	case CDP_MESH_RX_FILTER:
10820 		dp_vdev_set_mesh_rx_filter((struct cdp_vdev *)vdev,
10821 					   val.cdp_vdev_param_mesh_rx_filter);
10822 		break;
10823 	case CDP_MESH_MODE:
10824 		dp_vdev_set_mesh_mode((struct cdp_vdev *)vdev,
10825 				      val.cdp_vdev_param_mesh_mode);
10826 		break;
10827 #endif
10828 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
10829 		dp_info("vdev_id %d enable hlod tid override %d", vdev_id,
10830 			val.cdp_vdev_param_hlos_tid_override);
10831 		dp_vdev_set_hlos_tid_override(vdev,
10832 				val.cdp_vdev_param_hlos_tid_override);
10833 		break;
10834 #ifdef QCA_SUPPORT_WDS_EXTENDED
10835 	case CDP_CFG_WDS_EXT:
10836 		if (vdev->opmode == wlan_op_mode_ap)
10837 			vdev->wds_ext_enabled = val.cdp_vdev_param_wds_ext;
10838 		break;
10839 #endif
10840 	case CDP_ENABLE_PEER_AUTHORIZE:
10841 		vdev->peer_authorize = val.cdp_vdev_param_peer_authorize;
10842 		break;
10843 #ifdef WLAN_SUPPORT_MESH_LATENCY
10844 	case CDP_ENABLE_PEER_TID_LATENCY:
10845 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
10846 			val.cdp_vdev_param_peer_tid_latency_enable);
10847 		vdev->peer_tid_latency_enabled =
10848 			val.cdp_vdev_param_peer_tid_latency_enable;
10849 		break;
10850 	case CDP_SET_VAP_MESH_TID:
10851 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
10852 			val.cdp_vdev_param_mesh_tid);
10853 		vdev->mesh_tid_latency_config.latency_tid
10854 				= val.cdp_vdev_param_mesh_tid;
10855 		break;
10856 #endif
10857 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
10858 	case CDP_SKIP_BAR_UPDATE_AP:
10859 		dp_info("vdev_id %d skip BAR update: %u", vdev_id,
10860 			val.cdp_skip_bar_update);
10861 		vdev->skip_bar_update = val.cdp_skip_bar_update;
10862 		vdev->skip_bar_update_last_ts = 0;
10863 		break;
10864 #endif
10865 	case CDP_DROP_3ADDR_MCAST:
10866 		dp_info("vdev_id %d drop 3 addr mcast :%d", vdev_id,
10867 			val.cdp_drop_3addr_mcast);
10868 		vdev->drop_3addr_mcast = val.cdp_drop_3addr_mcast;
10869 		break;
10870 	case CDP_ENABLE_WRAP:
10871 		vdev->wrap_vdev = val.cdp_vdev_param_wrap;
10872 		break;
10873 #ifdef DP_TRAFFIC_END_INDICATION
10874 	case CDP_ENABLE_TRAFFIC_END_INDICATION:
10875 		vdev->traffic_end_ind_en = val.cdp_vdev_param_traffic_end_ind;
10876 		break;
10877 #endif
10878 	default:
10879 		break;
10880 	}
10881 
10882 	dp_tx_vdev_update_search_flags((struct dp_vdev *)vdev);
10883 	dsoc->arch_ops.txrx_set_vdev_param(dsoc, vdev, param, val);
10884 
10885 	/* Update PDEV flags as VDEV flags are updated */
10886 	dp_pdev_update_fast_rx_flag(dsoc, vdev->pdev);
10887 	dp_vdev_unref_delete(dsoc, vdev, DP_MOD_ID_CDP);
10888 
10889 	return QDF_STATUS_SUCCESS;
10890 }
10891 
10892 /*
10893  * dp_set_psoc_param: function to set parameters in psoc
10894  * @cdp_soc : DP soc handle
10895  * @param: parameter type to be set
10896  * @val: value of parameter to be set
10897  *
10898  * return: QDF_STATUS
10899  */
10900 static QDF_STATUS
10901 dp_set_psoc_param(struct cdp_soc_t *cdp_soc,
10902 		  enum cdp_psoc_param_type param, cdp_config_param_type val)
10903 {
10904 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
10905 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->wlan_cfg_ctx;
10906 
10907 	switch (param) {
10908 	case CDP_ENABLE_RATE_STATS:
10909 		soc->peerstats_enabled = val.cdp_psoc_param_en_rate_stats;
10910 		break;
10911 	case CDP_SET_NSS_CFG:
10912 		wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx,
10913 					    val.cdp_psoc_param_en_nss_cfg);
10914 		/*
10915 		 * TODO: masked out based on the per offloaded radio
10916 		 */
10917 		switch (val.cdp_psoc_param_en_nss_cfg) {
10918 		case dp_nss_cfg_default:
10919 			break;
10920 		case dp_nss_cfg_first_radio:
10921 		/*
10922 		 * This configuration is valid for single band radio which
10923 		 * is also NSS offload.
10924 		 */
10925 		case dp_nss_cfg_dbdc:
10926 		case dp_nss_cfg_dbtc:
10927 			wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
10928 			wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
10929 			wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
10930 			wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
10931 			break;
10932 		default:
10933 			dp_cdp_err("%pK: Invalid offload config %d",
10934 				   soc, val.cdp_psoc_param_en_nss_cfg);
10935 		}
10936 
10937 			dp_cdp_err("%pK: nss-wifi<0> nss config is enabled"
10938 				   , soc);
10939 		break;
10940 	case CDP_SET_PREFERRED_HW_MODE:
10941 		soc->preferred_hw_mode = val.cdp_psoc_param_preferred_hw_mode;
10942 		break;
10943 	case CDP_IPA_ENABLE:
10944 		soc->wlan_cfg_ctx->ipa_enabled = val.cdp_ipa_enabled;
10945 		break;
10946 	case CDP_CFG_VDEV_STATS_HW_OFFLOAD:
10947 		wlan_cfg_set_vdev_stats_hw_offload_config(wlan_cfg_ctx,
10948 				val.cdp_psoc_param_vdev_stats_hw_offload);
10949 		break;
10950 	case CDP_SAWF_ENABLE:
10951 		wlan_cfg_set_sawf_config(wlan_cfg_ctx, val.cdp_sawf_enabled);
10952 		break;
10953 	default:
10954 		break;
10955 	}
10956 
10957 	return QDF_STATUS_SUCCESS;
10958 }
10959 
10960 /*
10961  * dp_get_psoc_param: function to get parameters in soc
10962  * @cdp_soc : DP soc handle
10963  * @param: parameter type to be set
10964  * @val: address of buffer
10965  *
10966  * return: status
10967  */
10968 static QDF_STATUS dp_get_psoc_param(struct cdp_soc_t *cdp_soc,
10969 				    enum cdp_psoc_param_type param,
10970 				    cdp_config_param_type *val)
10971 {
10972 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
10973 
10974 	if (!soc)
10975 		return QDF_STATUS_E_FAILURE;
10976 
10977 	switch (param) {
10978 	case CDP_CFG_PEER_EXT_STATS:
10979 		val->cdp_psoc_param_pext_stats =
10980 			wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx);
10981 		break;
10982 	case CDP_CFG_VDEV_STATS_HW_OFFLOAD:
10983 		val->cdp_psoc_param_vdev_stats_hw_offload =
10984 			wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx);
10985 		break;
10986 	default:
10987 		dp_warn("Invalid param");
10988 		break;
10989 	}
10990 
10991 	return QDF_STATUS_SUCCESS;
10992 }
10993 
10994 /*
10995  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
10996  * @soc: DP_SOC handle
10997  * @vdev_id: id of DP_VDEV handle
10998  * @map_id:ID of map that needs to be updated
10999  *
11000  * Return: QDF_STATUS
11001  */
11002 static QDF_STATUS dp_set_vdev_dscp_tid_map_wifi3(ol_txrx_soc_handle cdp_soc,
11003 						 uint8_t vdev_id,
11004 						 uint8_t map_id)
11005 {
11006 	cdp_config_param_type val;
11007 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
11008 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11009 						     DP_MOD_ID_CDP);
11010 	if (vdev) {
11011 		vdev->dscp_tid_map_id = map_id;
11012 		val.cdp_vdev_param_dscp_tid_map_id = map_id;
11013 		soc->arch_ops.txrx_set_vdev_param(soc,
11014 						  vdev,
11015 						  CDP_UPDATE_DSCP_TO_TID_MAP,
11016 						  val);
11017 		/* Updatr flag for transmit tid classification */
11018 		if (vdev->dscp_tid_map_id < soc->num_hw_dscp_tid_map)
11019 			vdev->skip_sw_tid_classification |=
11020 				DP_TX_HW_DSCP_TID_MAP_VALID;
11021 		else
11022 			vdev->skip_sw_tid_classification &=
11023 				~DP_TX_HW_DSCP_TID_MAP_VALID;
11024 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11025 		return QDF_STATUS_SUCCESS;
11026 	}
11027 
11028 	return QDF_STATUS_E_FAILURE;
11029 }
11030 
11031 #ifdef DP_RATETABLE_SUPPORT
11032 static int dp_txrx_get_ratekbps(int preamb, int mcs,
11033 				int htflag, int gintval)
11034 {
11035 	uint32_t rix;
11036 	uint16_t ratecode;
11037 	enum cdp_punctured_modes punc_mode = NO_PUNCTURE;
11038 
11039 	return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
11040 			       (uint8_t)preamb, 1, punc_mode,
11041 			       &rix, &ratecode);
11042 }
11043 #else
11044 static int dp_txrx_get_ratekbps(int preamb, int mcs,
11045 				int htflag, int gintval)
11046 {
11047 	return 0;
11048 }
11049 #endif
11050 
11051 /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
11052  * @soc: DP soc handle
11053  * @pdev_id: id of DP pdev handle
11054  * @pdev_stats: buffer to copy to
11055  *
11056  * return : status success/failure
11057  */
11058 static QDF_STATUS
11059 dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
11060 		       struct cdp_pdev_stats *pdev_stats)
11061 {
11062 	struct dp_pdev *pdev =
11063 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11064 						   pdev_id);
11065 	if (!pdev)
11066 		return QDF_STATUS_E_FAILURE;
11067 
11068 	dp_aggregate_pdev_stats(pdev);
11069 
11070 	qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats));
11071 	return QDF_STATUS_SUCCESS;
11072 }
11073 
11074 /* dp_txrx_update_vdev_me_stats(): Update vdev ME stats sent from CDP
11075  * @vdev: DP vdev handle
11076  * @buf: buffer containing specific stats structure
11077  *
11078  * Returns: void
11079  */
11080 static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev,
11081 					 void *buf)
11082 {
11083 	struct cdp_tx_ingress_stats *host_stats = NULL;
11084 
11085 	if (!buf) {
11086 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
11087 		return;
11088 	}
11089 	host_stats = (struct cdp_tx_ingress_stats *)buf;
11090 
11091 	DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt,
11092 			 host_stats->mcast_en.mcast_pkt.num,
11093 			 host_stats->mcast_en.mcast_pkt.bytes);
11094 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error,
11095 		     host_stats->mcast_en.dropped_map_error);
11096 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac,
11097 		     host_stats->mcast_en.dropped_self_mac);
11098 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail,
11099 		     host_stats->mcast_en.dropped_send_fail);
11100 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast,
11101 		     host_stats->mcast_en.ucast);
11102 	DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc,
11103 		     host_stats->mcast_en.fail_seg_alloc);
11104 	DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail,
11105 		     host_stats->mcast_en.clone_fail);
11106 }
11107 
11108 /* dp_txrx_update_vdev_igmp_me_stats(): Update vdev IGMP ME stats sent from CDP
11109  * @vdev: DP vdev handle
11110  * @buf: buffer containing specific stats structure
11111  *
11112  * Returns: void
11113  */
11114 static void dp_txrx_update_vdev_igmp_me_stats(struct dp_vdev *vdev,
11115 					      void *buf)
11116 {
11117 	struct cdp_tx_ingress_stats *host_stats = NULL;
11118 
11119 	if (!buf) {
11120 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
11121 		return;
11122 	}
11123 	host_stats = (struct cdp_tx_ingress_stats *)buf;
11124 
11125 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_rcvd,
11126 		     host_stats->igmp_mcast_en.igmp_rcvd);
11127 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_ucast_converted,
11128 		     host_stats->igmp_mcast_en.igmp_ucast_converted);
11129 }
11130 
11131 /* dp_txrx_update_vdev_host_stats(): Update stats sent through CDP
11132  * @soc: DP soc handle
11133  * @vdev_id: id of DP vdev handle
11134  * @buf: buffer containing specific stats structure
11135  * @stats_id: stats type
11136  *
11137  * Returns: QDF_STATUS
11138  */
11139 static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc_hdl,
11140 						 uint8_t vdev_id,
11141 						 void *buf,
11142 						 uint16_t stats_id)
11143 {
11144 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11145 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11146 						     DP_MOD_ID_CDP);
11147 
11148 	if (!vdev) {
11149 		dp_cdp_err("%pK: Invalid vdev handle", soc);
11150 		return QDF_STATUS_E_FAILURE;
11151 	}
11152 
11153 	switch (stats_id) {
11154 	case DP_VDEV_STATS_PKT_CNT_ONLY:
11155 		break;
11156 	case DP_VDEV_STATS_TX_ME:
11157 		dp_txrx_update_vdev_me_stats(vdev, buf);
11158 		dp_txrx_update_vdev_igmp_me_stats(vdev, buf);
11159 		break;
11160 	default:
11161 		qdf_info("Invalid stats_id %d", stats_id);
11162 		break;
11163 	}
11164 
11165 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11166 	return QDF_STATUS_SUCCESS;
11167 }
11168 
11169 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
11170  * @soc: soc handle
11171  * @vdev_id: id of vdev handle
11172  * @peer_mac: mac of DP_PEER handle
11173  * @peer_stats: buffer to copy to
11174  * return : status success/failure
11175  */
11176 static QDF_STATUS
11177 dp_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
11178 		       uint8_t *peer_mac, struct cdp_peer_stats *peer_stats)
11179 {
11180 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11181 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
11182 						       peer_mac, 0, vdev_id,
11183 						       DP_MOD_ID_CDP);
11184 
11185 	qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats));
11186 
11187 	if (!peer)
11188 		return QDF_STATUS_E_FAILURE;
11189 
11190 	dp_get_peer_stats(peer, peer_stats);
11191 
11192 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11193 
11194 	return status;
11195 }
11196 
11197 /* dp_txrx_get_peer_stats_param - will return specified cdp_peer_stats
11198  * @param soc - soc handle
11199  * @param vdev_id - vdev_id of vdev object
11200  * @param peer_mac - mac address of the peer
11201  * @param type - enum of required stats
11202  * @param buf - buffer to hold the value
11203  * return : status success/failure
11204  */
11205 static QDF_STATUS
11206 dp_txrx_get_peer_stats_param(struct cdp_soc_t *soc, uint8_t vdev_id,
11207 			     uint8_t *peer_mac, enum cdp_peer_stats_type type,
11208 			     cdp_peer_stats_param_t *buf)
11209 {
11210 	QDF_STATUS ret;
11211 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
11212 						      peer_mac, 0, vdev_id,
11213 						      DP_MOD_ID_CDP);
11214 
11215 	if (!peer) {
11216 		dp_peer_err("%pK: Invalid Peer for Mac " QDF_MAC_ADDR_FMT,
11217 			    soc, QDF_MAC_ADDR_REF(peer_mac));
11218 		return QDF_STATUS_E_FAILURE;
11219 	}
11220 
11221 	if (type >= cdp_peer_per_pkt_stats_min &&
11222 	    type < cdp_peer_per_pkt_stats_max) {
11223 		ret = dp_txrx_get_peer_per_pkt_stats_param(peer, type, buf);
11224 	} else if (type >= cdp_peer_extd_stats_min &&
11225 		   type < cdp_peer_extd_stats_max) {
11226 		ret = dp_txrx_get_peer_extd_stats_param(peer, type, buf);
11227 	} else {
11228 		dp_err("%pK: Invalid stat type requested", soc);
11229 		ret = QDF_STATUS_E_FAILURE;
11230 	}
11231 
11232 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11233 
11234 	return ret;
11235 }
11236 
11237 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
11238  * @soc: soc handle
11239  * @vdev_id: id of vdev handle
11240  * @peer_mac: mac of DP_PEER handle
11241  *
11242  * return : QDF_STATUS
11243  */
11244 #ifdef WLAN_FEATURE_11BE_MLO
11245 static QDF_STATUS
11246 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
11247 			 uint8_t *peer_mac)
11248 {
11249 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11250 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11251 	struct dp_peer *peer =
11252 			dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
11253 						       vdev_id, DP_MOD_ID_CDP);
11254 
11255 	if (!peer)
11256 		return QDF_STATUS_E_FAILURE;
11257 
11258 	DP_STATS_CLR(peer);
11259 	dp_txrx_peer_stats_clr(peer->txrx_peer);
11260 
11261 	if (IS_MLO_DP_MLD_PEER(peer)) {
11262 		uint8_t i;
11263 		struct dp_peer *link_peer;
11264 		struct dp_soc *link_peer_soc;
11265 		struct dp_mld_link_peers link_peers_info;
11266 
11267 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
11268 						    &link_peers_info,
11269 						    DP_MOD_ID_CDP);
11270 		for (i = 0; i < link_peers_info.num_links; i++) {
11271 			link_peer = link_peers_info.link_peers[i];
11272 			link_peer_soc = link_peer->vdev->pdev->soc;
11273 
11274 			DP_STATS_CLR(link_peer);
11275 			dp_monitor_peer_reset_stats(link_peer_soc, link_peer);
11276 		}
11277 
11278 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
11279 	} else {
11280 		dp_monitor_peer_reset_stats(soc, peer);
11281 	}
11282 
11283 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11284 
11285 	return status;
11286 }
11287 #else
11288 static QDF_STATUS
11289 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
11290 			 uint8_t *peer_mac)
11291 {
11292 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11293 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
11294 						      peer_mac, 0, vdev_id,
11295 						      DP_MOD_ID_CDP);
11296 
11297 	if (!peer)
11298 		return QDF_STATUS_E_FAILURE;
11299 
11300 	DP_STATS_CLR(peer);
11301 	dp_txrx_peer_stats_clr(peer->txrx_peer);
11302 	dp_monitor_peer_reset_stats((struct dp_soc *)soc, peer);
11303 
11304 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11305 
11306 	return status;
11307 }
11308 #endif
11309 
11310 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
11311  * @vdev_handle: DP_VDEV handle
11312  * @buf: buffer for vdev stats
11313  *
11314  * return : int
11315  */
11316 static int dp_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
11317 				  void *buf, bool is_aggregate)
11318 {
11319 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11320 	struct cdp_vdev_stats *vdev_stats;
11321 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11322 						     DP_MOD_ID_CDP);
11323 
11324 	if (!vdev)
11325 		return 1;
11326 
11327 	vdev_stats = (struct cdp_vdev_stats *)buf;
11328 
11329 	if (is_aggregate) {
11330 		dp_aggregate_vdev_stats(vdev, buf);
11331 	} else {
11332 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
11333 	}
11334 
11335 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11336 	return 0;
11337 }
11338 
11339 /*
11340  * dp_get_total_per(): get total per
11341  * @soc: DP soc handle
11342  * @pdev_id: id of DP_PDEV handle
11343  *
11344  * Return: % error rate using retries per packet and success packets
11345  */
11346 static int dp_get_total_per(struct cdp_soc_t *soc, uint8_t pdev_id)
11347 {
11348 	struct dp_pdev *pdev =
11349 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11350 						   pdev_id);
11351 
11352 	if (!pdev)
11353 		return 0;
11354 
11355 	dp_aggregate_pdev_stats(pdev);
11356 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
11357 		return 0;
11358 	return ((pdev->stats.tx.retries * 100) /
11359 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
11360 }
11361 
11362 /*
11363  * dp_txrx_stats_publish(): publish pdev stats into a buffer
11364  * @soc: DP soc handle
11365  * @pdev_id: id of DP_PDEV handle
11366  * @buf: to hold pdev_stats
11367  *
11368  * Return: int
11369  */
11370 static int
11371 dp_txrx_stats_publish(struct cdp_soc_t *soc, uint8_t pdev_id,
11372 		      struct cdp_stats_extd *buf)
11373 {
11374 	struct cdp_txrx_stats_req req = {0,};
11375 	QDF_STATUS status;
11376 	struct dp_pdev *pdev =
11377 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11378 						   pdev_id);
11379 
11380 	if (!pdev)
11381 		return TXRX_STATS_LEVEL_OFF;
11382 
11383 	if (pdev->pending_fw_stats_response)
11384 		return TXRX_STATS_LEVEL_OFF;
11385 
11386 	dp_aggregate_pdev_stats(pdev);
11387 
11388 	pdev->pending_fw_stats_response = true;
11389 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
11390 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
11391 	pdev->fw_stats_tlv_bitmap_rcvd = 0;
11392 	qdf_event_reset(&pdev->fw_stats_event);
11393 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
11394 				req.param1, req.param2, req.param3, 0,
11395 				req.cookie_val, 0);
11396 
11397 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
11398 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
11399 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
11400 				req.param1, req.param2, req.param3, 0,
11401 				req.cookie_val, 0);
11402 
11403 	status =
11404 		qdf_wait_single_event(&pdev->fw_stats_event, DP_MAX_SLEEP_TIME);
11405 
11406 	if (status != QDF_STATUS_SUCCESS) {
11407 		if (status == QDF_STATUS_E_TIMEOUT)
11408 			qdf_debug("TIMEOUT_OCCURS");
11409 		pdev->pending_fw_stats_response = false;
11410 		return TXRX_STATS_LEVEL_OFF;
11411 	}
11412 	qdf_mem_copy(buf, &pdev->stats, sizeof(struct cdp_pdev_stats));
11413 	pdev->pending_fw_stats_response = false;
11414 
11415 	return TXRX_STATS_LEVEL;
11416 }
11417 
11418 /**
11419  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
11420  * @soc: soc handle
11421  * @pdev_id: id of DP_PDEV handle
11422  * @map_id: ID of map that needs to be updated
11423  * @tos: index value in map
11424  * @tid: tid value passed by the user
11425  *
11426  * Return: QDF_STATUS
11427  */
11428 static QDF_STATUS
11429 dp_set_pdev_dscp_tid_map_wifi3(struct cdp_soc_t *soc_handle,
11430 			       uint8_t pdev_id,
11431 			       uint8_t map_id,
11432 			       uint8_t tos, uint8_t tid)
11433 {
11434 	uint8_t dscp;
11435 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11436 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11437 
11438 	if (!pdev)
11439 		return QDF_STATUS_E_FAILURE;
11440 
11441 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
11442 	pdev->dscp_tid_map[map_id][dscp] = tid;
11443 
11444 	if (map_id < soc->num_hw_dscp_tid_map)
11445 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
11446 				       map_id, dscp);
11447 	else
11448 		return QDF_STATUS_E_FAILURE;
11449 
11450 	return QDF_STATUS_SUCCESS;
11451 }
11452 
11453 #ifdef WLAN_SYSFS_DP_STATS
11454 /*
11455  * dp_sysfs_event_trigger(): Trigger event to wait for firmware
11456  * stats request response.
11457  * @soc: soc handle
11458  * @cookie_val: cookie value
11459  *
11460  * @Return: QDF_STATUS
11461  */
11462 static QDF_STATUS
11463 dp_sysfs_event_trigger(struct dp_soc *soc, uint32_t cookie_val)
11464 {
11465 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11466 	/* wait for firmware response for sysfs stats request */
11467 	if (cookie_val == DBG_SYSFS_STATS_COOKIE) {
11468 		if (!soc) {
11469 			dp_cdp_err("soc is NULL");
11470 			return QDF_STATUS_E_FAILURE;
11471 		}
11472 		/* wait for event completion */
11473 		status = qdf_wait_single_event(&soc->sysfs_config->sysfs_txrx_fw_request_done,
11474 					       WLAN_SYSFS_STAT_REQ_WAIT_MS);
11475 		if (status == QDF_STATUS_SUCCESS)
11476 			dp_cdp_info("sysfs_txrx_fw_request_done event completed");
11477 		else if (status == QDF_STATUS_E_TIMEOUT)
11478 			dp_cdp_warn("sysfs_txrx_fw_request_done event expired");
11479 		else
11480 			dp_cdp_warn("sysfs_txrx_fw_request_done event erro code %d", status);
11481 	}
11482 
11483 	return status;
11484 }
11485 #else /* WLAN_SYSFS_DP_STATS */
11486 /*
11487  * dp_sysfs_event_trigger(): Trigger event to wait for firmware
11488  * stats request response.
11489  * @soc: soc handle
11490  * @cookie_val: cookie value
11491  *
11492  * @Return: QDF_STATUS
11493  */
11494 static QDF_STATUS
11495 dp_sysfs_event_trigger(struct dp_soc *soc, uint32_t cookie_val)
11496 {
11497 	return QDF_STATUS_SUCCESS;
11498 }
11499 #endif /* WLAN_SYSFS_DP_STATS */
11500 
11501 /**
11502  * dp_fw_stats_process(): Process TXRX FW stats request.
11503  * @vdev_handle: DP VDEV handle
11504  * @req: stats request
11505  *
11506  * return: QDF_STATUS
11507  */
11508 static QDF_STATUS
11509 dp_fw_stats_process(struct dp_vdev *vdev,
11510 		    struct cdp_txrx_stats_req *req)
11511 {
11512 	struct dp_pdev *pdev = NULL;
11513 	struct dp_soc *soc = NULL;
11514 	uint32_t stats = req->stats;
11515 	uint8_t mac_id = req->mac_id;
11516 	uint32_t cookie_val = DBG_STATS_COOKIE_DEFAULT;
11517 
11518 	if (!vdev) {
11519 		DP_TRACE(NONE, "VDEV not found");
11520 		return QDF_STATUS_E_FAILURE;
11521 	}
11522 
11523 	pdev = vdev->pdev;
11524 	if (!pdev) {
11525 		DP_TRACE(NONE, "PDEV not found");
11526 		return QDF_STATUS_E_FAILURE;
11527 	}
11528 
11529 	soc = pdev->soc;
11530 	if (!soc) {
11531 		DP_TRACE(NONE, "soc not found");
11532 		return QDF_STATUS_E_FAILURE;
11533 	}
11534 
11535 	/* In case request is from host sysfs for displaying stats on console */
11536 	if (req->cookie_val == DBG_SYSFS_STATS_COOKIE)
11537 		cookie_val = DBG_SYSFS_STATS_COOKIE;
11538 
11539 	/*
11540 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
11541 	 * from param0 to param3 according to below rule:
11542 	 *
11543 	 * PARAM:
11544 	 *   - config_param0 : start_offset (stats type)
11545 	 *   - config_param1 : stats bmask from start offset
11546 	 *   - config_param2 : stats bmask from start offset + 32
11547 	 *   - config_param3 : stats bmask from start offset + 64
11548 	 */
11549 	if (req->stats == CDP_TXRX_STATS_0) {
11550 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
11551 		req->param1 = 0xFFFFFFFF;
11552 		req->param2 = 0xFFFFFFFF;
11553 		req->param3 = 0xFFFFFFFF;
11554 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
11555 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
11556 	}
11557 
11558 	if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT) {
11559 		dp_h2t_ext_stats_msg_send(pdev,
11560 					  HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT,
11561 					  req->param0, req->param1, req->param2,
11562 					  req->param3, 0, cookie_val,
11563 					  mac_id);
11564 	} else {
11565 		dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
11566 					  req->param1, req->param2, req->param3,
11567 					  0, cookie_val, mac_id);
11568 	}
11569 
11570 	dp_sysfs_event_trigger(soc, cookie_val);
11571 
11572 	return QDF_STATUS_SUCCESS;
11573 }
11574 
11575 /**
11576  * dp_txrx_stats_request - function to map to firmware and host stats
11577  * @soc: soc handle
11578  * @vdev_id: virtual device ID
11579  * @req: stats request
11580  *
11581  * Return: QDF_STATUS
11582  */
11583 static
11584 QDF_STATUS dp_txrx_stats_request(struct cdp_soc_t *soc_handle,
11585 				 uint8_t vdev_id,
11586 				 struct cdp_txrx_stats_req *req)
11587 {
11588 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_handle);
11589 	int host_stats;
11590 	int fw_stats;
11591 	enum cdp_stats stats;
11592 	int num_stats;
11593 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11594 						     DP_MOD_ID_CDP);
11595 	QDF_STATUS status = QDF_STATUS_E_INVAL;
11596 
11597 	if (!vdev || !req) {
11598 		dp_cdp_err("%pK: Invalid vdev/req instance", soc);
11599 		status = QDF_STATUS_E_INVAL;
11600 		goto fail0;
11601 	}
11602 
11603 	if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) {
11604 		dp_err("Invalid mac id request");
11605 		status = QDF_STATUS_E_INVAL;
11606 		goto fail0;
11607 	}
11608 
11609 	stats = req->stats;
11610 	if (stats >= CDP_TXRX_MAX_STATS) {
11611 		status = QDF_STATUS_E_INVAL;
11612 		goto fail0;
11613 	}
11614 
11615 	/*
11616 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
11617 	 *			has to be updated if new FW HTT stats added
11618 	 */
11619 	if (stats > CDP_TXRX_STATS_HTT_MAX)
11620 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
11621 
11622 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
11623 
11624 	if (stats >= num_stats) {
11625 		dp_cdp_err("%pK : Invalid stats option: %d", soc, stats);
11626 		status = QDF_STATUS_E_INVAL;
11627 		goto fail0;
11628 	}
11629 
11630 	req->stats = stats;
11631 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
11632 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
11633 
11634 	dp_info("stats: %u fw_stats_type: %d host_stats: %d",
11635 		stats, fw_stats, host_stats);
11636 
11637 	if (fw_stats != TXRX_FW_STATS_INVALID) {
11638 		/* update request with FW stats type */
11639 		req->stats = fw_stats;
11640 		status = dp_fw_stats_process(vdev, req);
11641 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
11642 			(host_stats <= TXRX_HOST_STATS_MAX))
11643 		status = dp_print_host_stats(vdev, req, soc);
11644 	else
11645 		dp_cdp_info("%pK: Wrong Input for TxRx Stats", soc);
11646 fail0:
11647 	if (vdev)
11648 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11649 	return status;
11650 }
11651 
11652 /*
11653  * dp_txrx_dump_stats() -  Dump statistics
11654  * @value - Statistics option
11655  */
11656 static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value,
11657 				     enum qdf_stats_verbosity_level level)
11658 {
11659 	struct dp_soc *soc =
11660 		(struct dp_soc *)psoc;
11661 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11662 
11663 	if (!soc) {
11664 		dp_cdp_err("%pK: soc is NULL", soc);
11665 		return QDF_STATUS_E_INVAL;
11666 	}
11667 
11668 	switch (value) {
11669 	case CDP_TXRX_PATH_STATS:
11670 		dp_txrx_path_stats(soc);
11671 		dp_print_soc_interrupt_stats(soc);
11672 		hal_dump_reg_write_stats(soc->hal_soc);
11673 		dp_pdev_print_tx_delay_stats(soc);
11674 		/* Dump usage watermark stats for core TX/RX SRNGs */
11675 		dp_dump_srng_high_wm_stats(soc, (1 << REO_DST));
11676 		dp_print_fisa_stats(soc);
11677 		break;
11678 
11679 	case CDP_RX_RING_STATS:
11680 		dp_print_per_ring_stats(soc);
11681 		break;
11682 
11683 	case CDP_TXRX_TSO_STATS:
11684 		dp_print_tso_stats(soc, level);
11685 		break;
11686 
11687 	case CDP_DUMP_TX_FLOW_POOL_INFO:
11688 		if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH)
11689 			cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
11690 		else
11691 			dp_tx_dump_flow_pool_info_compact(soc);
11692 		break;
11693 
11694 	case CDP_DP_NAPI_STATS:
11695 		dp_print_napi_stats(soc);
11696 		break;
11697 
11698 	case CDP_TXRX_DESC_STATS:
11699 		/* TODO: NOT IMPLEMENTED */
11700 		break;
11701 
11702 	case CDP_DP_RX_FISA_STATS:
11703 		dp_rx_dump_fisa_stats(soc);
11704 		break;
11705 
11706 	case CDP_DP_SWLM_STATS:
11707 		dp_print_swlm_stats(soc);
11708 		break;
11709 
11710 	case CDP_DP_TX_HW_LATENCY_STATS:
11711 		dp_pdev_print_tx_delay_stats(soc);
11712 		break;
11713 
11714 	default:
11715 		status = QDF_STATUS_E_INVAL;
11716 		break;
11717 	}
11718 
11719 	return status;
11720 
11721 }
11722 
11723 #ifdef WLAN_SYSFS_DP_STATS
11724 static
11725 void dp_sysfs_get_stat_type(struct dp_soc *soc, uint32_t *mac_id,
11726 			    uint32_t *stat_type)
11727 {
11728 	qdf_spinlock_acquire(&soc->sysfs_config->rw_stats_lock);
11729 	*stat_type = soc->sysfs_config->stat_type_requested;
11730 	*mac_id   = soc->sysfs_config->mac_id;
11731 
11732 	qdf_spinlock_release(&soc->sysfs_config->rw_stats_lock);
11733 }
11734 
11735 static
11736 void dp_sysfs_update_config_buf_params(struct dp_soc *soc,
11737 				       uint32_t curr_len,
11738 				       uint32_t max_buf_len,
11739 				       char *buf)
11740 {
11741 	qdf_spinlock_acquire(&soc->sysfs_config->sysfs_write_user_buffer);
11742 	/* set sysfs_config parameters */
11743 	soc->sysfs_config->buf = buf;
11744 	soc->sysfs_config->curr_buffer_length = curr_len;
11745 	soc->sysfs_config->max_buffer_length = max_buf_len;
11746 	qdf_spinlock_release(&soc->sysfs_config->sysfs_write_user_buffer);
11747 }
11748 
11749 static
11750 QDF_STATUS dp_sysfs_fill_stats(ol_txrx_soc_handle soc_hdl,
11751 			       char *buf, uint32_t buf_size)
11752 {
11753 	uint32_t mac_id = 0;
11754 	uint32_t stat_type = 0;
11755 	uint32_t fw_stats = 0;
11756 	uint32_t host_stats = 0;
11757 	enum cdp_stats stats;
11758 	struct cdp_txrx_stats_req req;
11759 	uint32_t num_stats;
11760 	struct dp_soc *soc = NULL;
11761 
11762 	if (!soc_hdl) {
11763 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
11764 		return QDF_STATUS_E_INVAL;
11765 	}
11766 
11767 	soc = cdp_soc_t_to_dp_soc(soc_hdl);
11768 
11769 	if (!soc) {
11770 		dp_cdp_err("%pK: soc is NULL", soc);
11771 		return QDF_STATUS_E_INVAL;
11772 	}
11773 
11774 	dp_sysfs_get_stat_type(soc, &mac_id, &stat_type);
11775 
11776 	stats = stat_type;
11777 	if (stats >= CDP_TXRX_MAX_STATS) {
11778 		dp_cdp_info("sysfs stat type requested is invalid");
11779 		return QDF_STATUS_E_INVAL;
11780 	}
11781 	/*
11782 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
11783 	 *			has to be updated if new FW HTT stats added
11784 	 */
11785 	if (stats > CDP_TXRX_MAX_STATS)
11786 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
11787 
11788 	num_stats = QDF_ARRAY_SIZE(dp_stats_mapping_table);
11789 
11790 	if (stats >= num_stats) {
11791 		dp_cdp_err("%pK : Invalid stats option: %d, max num stats: %d",
11792 				soc, stats, num_stats);
11793 		return QDF_STATUS_E_INVAL;
11794 	}
11795 
11796 	/* build request */
11797 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
11798 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
11799 
11800 	req.stats = stat_type;
11801 	req.mac_id = mac_id;
11802 	/* request stats to be printed */
11803 	qdf_mutex_acquire(&soc->sysfs_config->sysfs_read_lock);
11804 
11805 	if (fw_stats != TXRX_FW_STATS_INVALID) {
11806 		/* update request with FW stats type */
11807 		req.cookie_val = DBG_SYSFS_STATS_COOKIE;
11808 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
11809 			(host_stats <= TXRX_HOST_STATS_MAX)) {
11810 		req.cookie_val = DBG_STATS_COOKIE_DEFAULT;
11811 		soc->sysfs_config->process_id = qdf_get_current_pid();
11812 		soc->sysfs_config->printing_mode = PRINTING_MODE_ENABLED;
11813 	}
11814 
11815 	dp_sysfs_update_config_buf_params(soc, 0, buf_size, buf);
11816 
11817 	dp_txrx_stats_request(soc_hdl, mac_id, &req);
11818 	soc->sysfs_config->process_id = 0;
11819 	soc->sysfs_config->printing_mode = PRINTING_MODE_DISABLED;
11820 
11821 	dp_sysfs_update_config_buf_params(soc, 0, 0, NULL);
11822 
11823 	qdf_mutex_release(&soc->sysfs_config->sysfs_read_lock);
11824 	return QDF_STATUS_SUCCESS;
11825 }
11826 
11827 static
11828 QDF_STATUS dp_sysfs_set_stat_type(ol_txrx_soc_handle soc_hdl,
11829 				  uint32_t stat_type, uint32_t mac_id)
11830 {
11831 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11832 
11833 	if (!soc_hdl) {
11834 		dp_cdp_err("%pK: soc is NULL", soc);
11835 		return QDF_STATUS_E_INVAL;
11836 	}
11837 
11838 	qdf_spinlock_acquire(&soc->sysfs_config->rw_stats_lock);
11839 
11840 	soc->sysfs_config->stat_type_requested = stat_type;
11841 	soc->sysfs_config->mac_id = mac_id;
11842 
11843 	qdf_spinlock_release(&soc->sysfs_config->rw_stats_lock);
11844 
11845 	return QDF_STATUS_SUCCESS;
11846 }
11847 
11848 static
11849 QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl)
11850 {
11851 	struct dp_soc *soc;
11852 	QDF_STATUS status;
11853 
11854 	if (!soc_hdl) {
11855 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
11856 		return QDF_STATUS_E_INVAL;
11857 	}
11858 
11859 	soc = soc_hdl;
11860 
11861 	soc->sysfs_config = qdf_mem_malloc(sizeof(struct sysfs_stats_config));
11862 	if (!soc->sysfs_config) {
11863 		dp_cdp_err("failed to allocate memory for sysfs_config no memory");
11864 		return QDF_STATUS_E_NOMEM;
11865 	}
11866 
11867 	status = qdf_event_create(&soc->sysfs_config->sysfs_txrx_fw_request_done);
11868 	/* create event for fw stats request from sysfs */
11869 	if (status != QDF_STATUS_SUCCESS) {
11870 		dp_cdp_err("failed to create event sysfs_txrx_fw_request_done");
11871 		qdf_mem_free(soc->sysfs_config);
11872 		soc->sysfs_config = NULL;
11873 		return QDF_STATUS_E_FAILURE;
11874 	}
11875 
11876 	qdf_spinlock_create(&soc->sysfs_config->rw_stats_lock);
11877 	qdf_mutex_create(&soc->sysfs_config->sysfs_read_lock);
11878 	qdf_spinlock_create(&soc->sysfs_config->sysfs_write_user_buffer);
11879 
11880 	return QDF_STATUS_SUCCESS;
11881 }
11882 
11883 static
11884 QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl)
11885 {
11886 	struct dp_soc *soc;
11887 	QDF_STATUS status;
11888 
11889 	if (!soc_hdl) {
11890 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
11891 		return QDF_STATUS_E_INVAL;
11892 	}
11893 
11894 	soc = soc_hdl;
11895 	if (!soc->sysfs_config) {
11896 		dp_cdp_err("soc->sysfs_config is NULL");
11897 		return QDF_STATUS_E_FAILURE;
11898 	}
11899 
11900 	status = qdf_event_destroy(&soc->sysfs_config->sysfs_txrx_fw_request_done);
11901 	if (status != QDF_STATUS_SUCCESS)
11902 		dp_cdp_err("Failed to detroy event sysfs_txrx_fw_request_done ");
11903 
11904 	qdf_mutex_destroy(&soc->sysfs_config->sysfs_read_lock);
11905 	qdf_spinlock_destroy(&soc->sysfs_config->rw_stats_lock);
11906 	qdf_spinlock_destroy(&soc->sysfs_config->sysfs_write_user_buffer);
11907 
11908 	qdf_mem_free(soc->sysfs_config);
11909 
11910 	return QDF_STATUS_SUCCESS;
11911 }
11912 
11913 #else /* WLAN_SYSFS_DP_STATS */
11914 
11915 static
11916 QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl)
11917 {
11918 	return QDF_STATUS_SUCCESS;
11919 }
11920 
11921 static
11922 QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl)
11923 {
11924 	return QDF_STATUS_SUCCESS;
11925 }
11926 #endif /* WLAN_SYSFS_DP_STATS */
11927 
11928 /**
11929  * dp_txrx_clear_dump_stats() - clear dumpStats
11930  * @soc- soc handle
11931  * @value - stats option
11932  *
11933  * Return: 0 - Success, non-zero - failure
11934  */
11935 static
11936 QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
11937 				    uint8_t value)
11938 {
11939 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11940 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11941 
11942 	if (!soc) {
11943 		dp_err("soc is NULL");
11944 		return QDF_STATUS_E_INVAL;
11945 	}
11946 
11947 	switch (value) {
11948 	case CDP_TXRX_TSO_STATS:
11949 		dp_txrx_clear_tso_stats(soc);
11950 		break;
11951 
11952 	case CDP_DP_TX_HW_LATENCY_STATS:
11953 		dp_pdev_clear_tx_delay_stats(soc);
11954 		break;
11955 
11956 	default:
11957 		status = QDF_STATUS_E_INVAL;
11958 		break;
11959 	}
11960 
11961 	return status;
11962 }
11963 
11964 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
11965 /**
11966  * dp_update_flow_control_parameters() - API to store datapath
11967  *                            config parameters
11968  * @soc: soc handle
11969  * @cfg: ini parameter handle
11970  *
11971  * Return: void
11972  */
11973 static inline
11974 void dp_update_flow_control_parameters(struct dp_soc *soc,
11975 				struct cdp_config_params *params)
11976 {
11977 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
11978 					params->tx_flow_stop_queue_threshold;
11979 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
11980 					params->tx_flow_start_queue_offset;
11981 }
11982 #else
11983 static inline
11984 void dp_update_flow_control_parameters(struct dp_soc *soc,
11985 				struct cdp_config_params *params)
11986 {
11987 }
11988 #endif
11989 
11990 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
11991 /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */
11992 #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024
11993 
11994 /* Max packet limit for RX REAP Loop (dp_rx_process) */
11995 #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024
11996 
11997 static
11998 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
11999 					struct cdp_config_params *params)
12000 {
12001 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit =
12002 				params->tx_comp_loop_pkt_limit;
12003 
12004 	if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX)
12005 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true;
12006 	else
12007 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false;
12008 
12009 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit =
12010 				params->rx_reap_loop_pkt_limit;
12011 
12012 	if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX)
12013 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = true;
12014 	else
12015 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = false;
12016 
12017 	soc->wlan_cfg_ctx->rx_hp_oos_update_limit =
12018 				params->rx_hp_oos_update_limit;
12019 
12020 	dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u",
12021 		soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit,
12022 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check,
12023 		soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit,
12024 		soc->wlan_cfg_ctx->rx_enable_eol_data_check,
12025 		soc->wlan_cfg_ctx->rx_hp_oos_update_limit);
12026 }
12027 
12028 static void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
12029 				      uint32_t rx_limit)
12030 {
12031 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit = tx_limit;
12032 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit = rx_limit;
12033 }
12034 
12035 #else
12036 static inline
12037 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
12038 					struct cdp_config_params *params)
12039 { }
12040 
12041 static inline
12042 void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
12043 			       uint32_t rx_limit)
12044 {
12045 }
12046 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
12047 
12048 /**
12049  * dp_update_config_parameters() - API to store datapath
12050  *                            config parameters
12051  * @soc: soc handle
12052  * @cfg: ini parameter handle
12053  *
12054  * Return: status
12055  */
12056 static
12057 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
12058 				struct cdp_config_params *params)
12059 {
12060 	struct dp_soc *soc = (struct dp_soc *)psoc;
12061 
12062 	if (!(soc)) {
12063 		dp_cdp_err("%pK: Invalid handle", soc);
12064 		return QDF_STATUS_E_INVAL;
12065 	}
12066 
12067 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
12068 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
12069 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
12070 	soc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload =
12071 				params->p2p_tcp_udp_checksumoffload;
12072 	soc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload =
12073 				params->nan_tcp_udp_checksumoffload;
12074 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
12075 				params->tcp_udp_checksumoffload;
12076 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
12077 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
12078 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
12079 
12080 	dp_update_rx_soft_irq_limit_params(soc, params);
12081 	dp_update_flow_control_parameters(soc, params);
12082 
12083 	return QDF_STATUS_SUCCESS;
12084 }
12085 
12086 static struct cdp_wds_ops dp_ops_wds = {
12087 	.vdev_set_wds = dp_vdev_set_wds,
12088 #ifdef WDS_VENDOR_EXTENSION
12089 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
12090 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
12091 #endif
12092 };
12093 
12094 /*
12095  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
12096  * @soc_hdl - datapath soc handle
12097  * @vdev_id - virtual interface id
12098  * @callback - callback function
12099  * @ctxt: callback context
12100  *
12101  */
12102 static void
12103 dp_txrx_data_tx_cb_set(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
12104 		       ol_txrx_data_tx_cb callback, void *ctxt)
12105 {
12106 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12107 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12108 						     DP_MOD_ID_CDP);
12109 
12110 	if (!vdev)
12111 		return;
12112 
12113 	vdev->tx_non_std_data_callback.func = callback;
12114 	vdev->tx_non_std_data_callback.ctxt = ctxt;
12115 
12116 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12117 }
12118 
12119 /**
12120  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
12121  * @soc: datapath soc handle
12122  * @pdev_id: id of datapath pdev handle
12123  *
12124  * Return: opaque pointer to dp txrx handle
12125  */
12126 static void *dp_pdev_get_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id)
12127 {
12128 	struct dp_pdev *pdev =
12129 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12130 						   pdev_id);
12131 	if (qdf_unlikely(!pdev))
12132 		return NULL;
12133 
12134 	return pdev->dp_txrx_handle;
12135 }
12136 
12137 /**
12138  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
12139  * @soc: datapath soc handle
12140  * @pdev_id: id of datapath pdev handle
12141  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
12142  *
12143  * Return: void
12144  */
12145 static void
12146 dp_pdev_set_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id,
12147 			   void *dp_txrx_hdl)
12148 {
12149 	struct dp_pdev *pdev =
12150 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12151 						   pdev_id);
12152 
12153 	if (!pdev)
12154 		return;
12155 
12156 	pdev->dp_txrx_handle = dp_txrx_hdl;
12157 }
12158 
12159 /**
12160  * dp_vdev_get_dp_ext_handle() - get dp handle from vdev
12161  * @soc: datapath soc handle
12162  * @vdev_id: vdev id
12163  *
12164  * Return: opaque pointer to dp txrx handle
12165  */
12166 static void *dp_vdev_get_dp_ext_handle(ol_txrx_soc_handle soc_hdl,
12167 				       uint8_t vdev_id)
12168 {
12169 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12170 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12171 						     DP_MOD_ID_CDP);
12172 	void *dp_ext_handle;
12173 
12174 	if (!vdev)
12175 		return NULL;
12176 	dp_ext_handle = vdev->vdev_dp_ext_handle;
12177 
12178 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12179 	return dp_ext_handle;
12180 }
12181 
12182 /**
12183  * dp_vdev_set_dp_ext_handle() - set dp handle in vdev
12184  * @soc: datapath soc handle
12185  * @vdev_id: vdev id
12186  * @size: size of advance dp handle
12187  *
12188  * Return: QDF_STATUS
12189  */
12190 static QDF_STATUS
12191 dp_vdev_set_dp_ext_handle(ol_txrx_soc_handle soc_hdl, uint8_t vdev_id,
12192 			  uint16_t size)
12193 {
12194 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12195 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12196 						     DP_MOD_ID_CDP);
12197 	void *dp_ext_handle;
12198 
12199 	if (!vdev)
12200 		return QDF_STATUS_E_FAILURE;
12201 
12202 	dp_ext_handle = qdf_mem_malloc(size);
12203 
12204 	if (!dp_ext_handle) {
12205 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12206 		return QDF_STATUS_E_FAILURE;
12207 	}
12208 
12209 	vdev->vdev_dp_ext_handle = dp_ext_handle;
12210 
12211 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12212 	return QDF_STATUS_SUCCESS;
12213 }
12214 
12215 /**
12216  * dp_vdev_inform_ll_conn() - Inform vdev to add/delete a latency critical
12217  *			      connection for this vdev
12218  * @soc_hdl: CDP soc handle
12219  * @vdev_id: vdev ID
12220  * @action: Add/Delete action
12221  *
12222  * Returns: QDF_STATUS.
12223  */
12224 static QDF_STATUS
12225 dp_vdev_inform_ll_conn(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
12226 		       enum vdev_ll_conn_actions action)
12227 {
12228 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12229 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12230 						     DP_MOD_ID_CDP);
12231 
12232 	if (!vdev) {
12233 		dp_err("LL connection action for invalid vdev %d", vdev_id);
12234 		return QDF_STATUS_E_FAILURE;
12235 	}
12236 
12237 	switch (action) {
12238 	case CDP_VDEV_LL_CONN_ADD:
12239 		vdev->num_latency_critical_conn++;
12240 		break;
12241 
12242 	case CDP_VDEV_LL_CONN_DEL:
12243 		vdev->num_latency_critical_conn--;
12244 		break;
12245 
12246 	default:
12247 		dp_err("LL connection action invalid %d", action);
12248 		break;
12249 	}
12250 
12251 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12252 	return QDF_STATUS_SUCCESS;
12253 }
12254 
12255 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
12256 /**
12257  * dp_soc_set_swlm_enable() - Enable/Disable SWLM if initialized.
12258  * @soc_hdl: CDP Soc handle
12259  * @value: Enable/Disable value
12260  *
12261  * Returns: QDF_STATUS
12262  */
12263 static QDF_STATUS dp_soc_set_swlm_enable(struct cdp_soc_t *soc_hdl,
12264 					 uint8_t value)
12265 {
12266 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12267 
12268 	if (!soc->swlm.is_init) {
12269 		dp_err("SWLM is not initialized");
12270 		return QDF_STATUS_E_FAILURE;
12271 	}
12272 
12273 	soc->swlm.is_enabled = !!value;
12274 
12275 	return QDF_STATUS_SUCCESS;
12276 }
12277 
12278 /**
12279  * dp_soc_is_swlm_enabled() - Check if SWLM is enabled.
12280  * @soc_hdl: CDP Soc handle
12281  *
12282  * Returns: QDF_STATUS
12283  */
12284 static uint8_t dp_soc_is_swlm_enabled(struct cdp_soc_t *soc_hdl)
12285 {
12286 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12287 
12288 	return soc->swlm.is_enabled;
12289 }
12290 #endif
12291 
12292 /**
12293  * dp_display_srng_info() - Dump the srng HP TP info
12294  * @soc_hdl: CDP Soc handle
12295  *
12296  * This function dumps the SW hp/tp values for the important rings.
12297  * HW hp/tp values are not being dumped, since it can lead to
12298  * READ NOC error when UMAC is in low power state. MCC does not have
12299  * device force wake working yet.
12300  *
12301  * Return: none
12302  */
12303 static void dp_display_srng_info(struct cdp_soc_t *soc_hdl)
12304 {
12305 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12306 	hal_soc_handle_t hal_soc = soc->hal_soc;
12307 	uint32_t hp, tp, i;
12308 
12309 	dp_info("SRNG HP-TP data:");
12310 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
12311 		hal_get_sw_hptp(hal_soc, soc->tcl_data_ring[i].hal_srng,
12312 				&tp, &hp);
12313 		dp_info("TCL DATA ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
12314 
12315 		if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, i) ==
12316 		    INVALID_WBM_RING_NUM)
12317 			continue;
12318 
12319 		hal_get_sw_hptp(hal_soc, soc->tx_comp_ring[i].hal_srng,
12320 				&tp, &hp);
12321 		dp_info("TX comp ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
12322 	}
12323 
12324 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
12325 		hal_get_sw_hptp(hal_soc, soc->reo_dest_ring[i].hal_srng,
12326 				&tp, &hp);
12327 		dp_info("REO DST ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
12328 	}
12329 
12330 	hal_get_sw_hptp(hal_soc, soc->reo_exception_ring.hal_srng, &tp, &hp);
12331 	dp_info("REO exception ring: hp=0x%x, tp=0x%x", hp, tp);
12332 
12333 	hal_get_sw_hptp(hal_soc, soc->rx_rel_ring.hal_srng, &tp, &hp);
12334 	dp_info("WBM RX release ring: hp=0x%x, tp=0x%x", hp, tp);
12335 
12336 	hal_get_sw_hptp(hal_soc, soc->wbm_desc_rel_ring.hal_srng, &tp, &hp);
12337 	dp_info("WBM desc release ring: hp=0x%x, tp=0x%x", hp, tp);
12338 }
12339 
12340 /**
12341  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
12342  * @soc_handle: datapath soc handle
12343  *
12344  * Return: opaque pointer to external dp (non-core DP)
12345  */
12346 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
12347 {
12348 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12349 
12350 	return soc->external_txrx_handle;
12351 }
12352 
12353 /**
12354  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
12355  * @soc_handle: datapath soc handle
12356  * @txrx_handle: opaque pointer to external dp (non-core DP)
12357  *
12358  * Return: void
12359  */
12360 static void
12361 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
12362 {
12363 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12364 
12365 	soc->external_txrx_handle = txrx_handle;
12366 }
12367 
12368 /**
12369  * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
12370  * @soc_hdl: datapath soc handle
12371  * @pdev_id: id of the datapath pdev handle
12372  * @lmac_id: lmac id
12373  *
12374  * Return: QDF_STATUS
12375  */
12376 static QDF_STATUS
12377 dp_soc_map_pdev_to_lmac
12378 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12379 	 uint32_t lmac_id)
12380 {
12381 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12382 
12383 	wlan_cfg_set_hw_mac_idx(soc->wlan_cfg_ctx,
12384 				pdev_id,
12385 				lmac_id);
12386 
12387 	/*Set host PDEV ID for lmac_id*/
12388 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
12389 			      pdev_id,
12390 			      lmac_id);
12391 
12392 	return QDF_STATUS_SUCCESS;
12393 }
12394 
12395 /**
12396  * dp_soc_handle_pdev_mode_change() - Update pdev to lmac mapping
12397  * @soc_hdl: datapath soc handle
12398  * @pdev_id: id of the datapath pdev handle
12399  * @lmac_id: lmac id
12400  *
12401  * In the event of a dynamic mode change, update the pdev to lmac mapping
12402  *
12403  * Return: QDF_STATUS
12404  */
12405 static QDF_STATUS
12406 dp_soc_handle_pdev_mode_change
12407 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12408 	 uint32_t lmac_id)
12409 {
12410 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12411 	struct dp_vdev *vdev = NULL;
12412 	uint8_t hw_pdev_id, mac_id;
12413 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc,
12414 								  pdev_id);
12415 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
12416 
12417 	if (qdf_unlikely(!pdev))
12418 		return QDF_STATUS_E_FAILURE;
12419 
12420 	pdev->lmac_id = lmac_id;
12421 	pdev->target_pdev_id =
12422 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
12423 	dp_info(" mode change %d %d\n", pdev->pdev_id, pdev->lmac_id);
12424 
12425 	/*Set host PDEV ID for lmac_id*/
12426 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
12427 			      pdev->pdev_id,
12428 			      lmac_id);
12429 
12430 	hw_pdev_id =
12431 		dp_get_target_pdev_id_for_host_pdev_id(soc,
12432 						       pdev->pdev_id);
12433 
12434 	/*
12435 	 * When NSS offload is enabled, send pdev_id->lmac_id
12436 	 * and pdev_id to hw_pdev_id to NSS FW
12437 	 */
12438 	if (nss_config) {
12439 		mac_id = pdev->lmac_id;
12440 		if (soc->cdp_soc.ol_ops->pdev_update_lmac_n_target_pdev_id)
12441 			soc->cdp_soc.ol_ops->
12442 				pdev_update_lmac_n_target_pdev_id(
12443 				soc->ctrl_psoc,
12444 				&pdev_id, &mac_id, &hw_pdev_id);
12445 	}
12446 
12447 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
12448 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
12449 		DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
12450 					       hw_pdev_id);
12451 		vdev->lmac_id = pdev->lmac_id;
12452 	}
12453 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
12454 
12455 	return QDF_STATUS_SUCCESS;
12456 }
12457 
12458 /**
12459  * dp_soc_set_pdev_status_down() - set pdev down/up status
12460  * @soc: datapath soc handle
12461  * @pdev_id: id of datapath pdev handle
12462  * @is_pdev_down: pdev down/up status
12463  *
12464  * Return: QDF_STATUS
12465  */
12466 static QDF_STATUS
12467 dp_soc_set_pdev_status_down(struct cdp_soc_t *soc, uint8_t pdev_id,
12468 			    bool is_pdev_down)
12469 {
12470 	struct dp_pdev *pdev =
12471 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12472 						   pdev_id);
12473 	if (!pdev)
12474 		return QDF_STATUS_E_FAILURE;
12475 
12476 	pdev->is_pdev_down = is_pdev_down;
12477 	return QDF_STATUS_SUCCESS;
12478 }
12479 
12480 /**
12481  * dp_get_cfg_capabilities() - get dp capabilities
12482  * @soc_handle: datapath soc handle
12483  * @dp_caps: enum for dp capabilities
12484  *
12485  * Return: bool to determine if dp caps is enabled
12486  */
12487 static bool
12488 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
12489 			enum cdp_capabilities dp_caps)
12490 {
12491 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12492 
12493 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
12494 }
12495 
12496 #ifdef FEATURE_AST
12497 static QDF_STATUS
12498 dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
12499 		       uint8_t *peer_mac)
12500 {
12501 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12502 	QDF_STATUS status = QDF_STATUS_SUCCESS;
12503 	struct dp_peer *peer =
12504 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
12505 					       DP_MOD_ID_CDP);
12506 
12507 	/* Peer can be null for monitor vap mac address */
12508 	if (!peer) {
12509 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
12510 			  "%s: Invalid peer\n", __func__);
12511 		return QDF_STATUS_E_FAILURE;
12512 	}
12513 
12514 	dp_peer_update_state(soc, peer, DP_PEER_STATE_LOGICAL_DELETE);
12515 
12516 	qdf_spin_lock_bh(&soc->ast_lock);
12517 	dp_peer_send_wds_disconnect(soc, peer);
12518 	dp_peer_delete_ast_entries(soc, peer);
12519 	qdf_spin_unlock_bh(&soc->ast_lock);
12520 
12521 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
12522 	return status;
12523 }
12524 #endif
12525 
12526 #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS
12527 /**
12528  * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
12529  * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
12530  * @soc: cdp_soc handle
12531  * @pdev_id: id of cdp_pdev handle
12532  * @protocol_type: protocol type for which stats should be displayed
12533  *
12534  * Return: none
12535  */
12536 static inline void
12537 dp_dump_pdev_rx_protocol_tag_stats(struct cdp_soc_t  *soc, uint8_t pdev_id,
12538 				   uint16_t protocol_type)
12539 {
12540 }
12541 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
12542 
12543 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
12544 /**
12545  * dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be
12546  * applied to the desired protocol type packets
12547  * @soc: soc handle
12548  * @pdev_id: id of cdp_pdev handle
12549  * @enable_rx_protocol_tag - bitmask that indicates what protocol types
12550  * are enabled for tagging. zero indicates disable feature, non-zero indicates
12551  * enable feature
12552  * @protocol_type: new protocol type for which the tag is being added
12553  * @tag: user configured tag for the new protocol
12554  *
12555  * Return: Success
12556  */
12557 static inline QDF_STATUS
12558 dp_update_pdev_rx_protocol_tag(struct cdp_soc_t  *soc, uint8_t pdev_id,
12559 			       uint32_t enable_rx_protocol_tag,
12560 			       uint16_t protocol_type,
12561 			       uint16_t tag)
12562 {
12563 	return QDF_STATUS_SUCCESS;
12564 }
12565 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
12566 
12567 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
12568 /**
12569  * dp_set_rx_flow_tag - add/delete a flow
12570  * @soc: soc handle
12571  * @pdev_id: id of cdp_pdev handle
12572  * @flow_info: flow tuple that is to be added to/deleted from flow search table
12573  *
12574  * Return: Success
12575  */
12576 static inline QDF_STATUS
12577 dp_set_rx_flow_tag(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
12578 		   struct cdp_rx_flow_info *flow_info)
12579 {
12580 	return QDF_STATUS_SUCCESS;
12581 }
12582 /**
12583  * dp_dump_rx_flow_tag_stats - dump the number of packets tagged for
12584  * given flow 5-tuple
12585  * @cdp_soc: soc handle
12586  * @pdev_id: id of cdp_pdev handle
12587  * @flow_info: flow 5-tuple for which stats should be displayed
12588  *
12589  * Return: Success
12590  */
12591 static inline QDF_STATUS
12592 dp_dump_rx_flow_tag_stats(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
12593 			  struct cdp_rx_flow_info *flow_info)
12594 {
12595 	return QDF_STATUS_SUCCESS;
12596 }
12597 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
12598 
12599 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
12600 					   uint32_t max_peers,
12601 					   uint32_t max_ast_index,
12602 					   uint8_t peer_map_unmap_versions)
12603 {
12604 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12605 	QDF_STATUS status;
12606 
12607 	soc->max_peers = max_peers;
12608 
12609 	wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
12610 
12611 	status = soc->arch_ops.txrx_peer_map_attach(soc);
12612 	if (!QDF_IS_STATUS_SUCCESS(status)) {
12613 		dp_err("failure in allocating peer tables");
12614 		return QDF_STATUS_E_FAILURE;
12615 	}
12616 
12617 	dp_info("max_peers %u, calculated max_peers %u max_ast_index: %u\n",
12618 		max_peers, soc->max_peer_id, max_ast_index);
12619 
12620 	status = dp_peer_find_attach(soc);
12621 	if (!QDF_IS_STATUS_SUCCESS(status)) {
12622 		dp_err("Peer find attach failure");
12623 		goto fail;
12624 	}
12625 
12626 	soc->peer_map_unmap_versions = peer_map_unmap_versions;
12627 	soc->peer_map_attach_success = TRUE;
12628 
12629 	return QDF_STATUS_SUCCESS;
12630 fail:
12631 	soc->arch_ops.txrx_peer_map_detach(soc);
12632 
12633 	return status;
12634 }
12635 
12636 static QDF_STATUS dp_soc_set_param(struct cdp_soc_t  *soc_hdl,
12637 				   enum cdp_soc_param_t param,
12638 				   uint32_t value)
12639 {
12640 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12641 
12642 	switch (param) {
12643 	case DP_SOC_PARAM_MSDU_EXCEPTION_DESC:
12644 		soc->num_msdu_exception_desc = value;
12645 		dp_info("num_msdu exception_desc %u",
12646 			value);
12647 		break;
12648 	case DP_SOC_PARAM_CMEM_FSE_SUPPORT:
12649 		if (wlan_cfg_is_fst_in_cmem_enabled(soc->wlan_cfg_ctx))
12650 			soc->fst_in_cmem = !!value;
12651 		dp_info("FW supports CMEM FSE %u", value);
12652 		break;
12653 	case DP_SOC_PARAM_MAX_AST_AGEOUT:
12654 		soc->max_ast_ageout_count = value;
12655 		dp_info("Max ast ageout count %u", soc->max_ast_ageout_count);
12656 		break;
12657 	case DP_SOC_PARAM_EAPOL_OVER_CONTROL_PORT:
12658 		soc->eapol_over_control_port = value;
12659 		dp_info("Eapol over control_port:%d",
12660 			soc->eapol_over_control_port);
12661 		break;
12662 	case DP_SOC_PARAM_MULTI_PEER_GRP_CMD_SUPPORT:
12663 		soc->multi_peer_grp_cmd_supported = value;
12664 		dp_info("Multi Peer group command support:%d",
12665 			soc->multi_peer_grp_cmd_supported);
12666 		break;
12667 	case DP_SOC_PARAM_RSSI_DBM_CONV_SUPPORT:
12668 		soc->features.rssi_dbm_conv_support = value;
12669 		dp_info("Rssi dbm converstion support:%u",
12670 			soc->features.rssi_dbm_conv_support);
12671 		break;
12672 	case DP_SOC_PARAM_UMAC_HW_RESET_SUPPORT:
12673 		soc->features.umac_hw_reset_support = value;
12674 		dp_info("UMAC HW reset support :%u",
12675 			soc->features.umac_hw_reset_support);
12676 		break;
12677 	default:
12678 		dp_info("not handled param %d ", param);
12679 		break;
12680 	}
12681 
12682 	return QDF_STATUS_SUCCESS;
12683 }
12684 
12685 static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
12686 				      void *stats_ctx)
12687 {
12688 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12689 
12690 	soc->rate_stats_ctx = (struct cdp_soc_rate_stats_ctx *)stats_ctx;
12691 }
12692 
12693 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
12694 /**
12695  * dp_peer_flush_rate_stats_req(): Flush peer rate stats
12696  * @soc: Datapath SOC handle
12697  * @peer: Datapath peer
12698  * @arg: argument to iter function
12699  *
12700  * Return: QDF_STATUS
12701  */
12702 static void
12703 dp_peer_flush_rate_stats_req(struct dp_soc *soc, struct dp_peer *peer,
12704 			     void *arg)
12705 {
12706 	if (peer->bss_peer)
12707 		return;
12708 
12709 	dp_wdi_event_handler(
12710 		WDI_EVENT_FLUSH_RATE_STATS_REQ,
12711 		soc, dp_monitor_peer_get_peerstats_ctx(soc, peer),
12712 		peer->peer_id,
12713 		WDI_NO_VAL, peer->vdev->pdev->pdev_id);
12714 }
12715 
12716 /**
12717  * dp_flush_rate_stats_req(): Flush peer rate stats in pdev
12718  * @soc_hdl: Datapath SOC handle
12719  * @pdev_id: pdev_id
12720  *
12721  * Return: QDF_STATUS
12722  */
12723 static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
12724 					  uint8_t pdev_id)
12725 {
12726 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12727 	struct dp_pdev *pdev =
12728 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12729 						   pdev_id);
12730 	if (!pdev)
12731 		return QDF_STATUS_E_FAILURE;
12732 
12733 	dp_pdev_iterate_peer(pdev, dp_peer_flush_rate_stats_req, NULL,
12734 			     DP_MOD_ID_CDP);
12735 
12736 	return QDF_STATUS_SUCCESS;
12737 }
12738 #else
12739 static inline QDF_STATUS
12740 dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
12741 			uint8_t pdev_id)
12742 {
12743 	return QDF_STATUS_SUCCESS;
12744 }
12745 #endif
12746 
12747 static void *dp_peer_get_peerstats_ctx(struct cdp_soc_t *soc_hdl,
12748 				       uint8_t vdev_id,
12749 				       uint8_t *mac_addr)
12750 {
12751 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12752 	struct dp_peer *peer;
12753 	void *peerstats_ctx = NULL;
12754 
12755 	if (mac_addr) {
12756 		peer = dp_peer_find_hash_find(soc, mac_addr,
12757 					      0, vdev_id,
12758 					      DP_MOD_ID_CDP);
12759 		if (!peer)
12760 			return NULL;
12761 
12762 		if (!IS_MLO_DP_MLD_PEER(peer))
12763 			peerstats_ctx = dp_monitor_peer_get_peerstats_ctx(soc,
12764 									  peer);
12765 
12766 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
12767 	}
12768 
12769 	return peerstats_ctx;
12770 }
12771 
12772 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
12773 static QDF_STATUS dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
12774 					   uint8_t pdev_id,
12775 					   void *buf)
12776 {
12777 	 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS,
12778 			      (struct dp_soc *)soc, buf, HTT_INVALID_PEER,
12779 			      WDI_NO_VAL, pdev_id);
12780 	return QDF_STATUS_SUCCESS;
12781 }
12782 #else
12783 static inline QDF_STATUS
12784 dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
12785 			 uint8_t pdev_id,
12786 			 void *buf)
12787 {
12788 	return QDF_STATUS_SUCCESS;
12789 }
12790 #endif
12791 
12792 static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle)
12793 {
12794 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12795 
12796 	return soc->rate_stats_ctx;
12797 }
12798 
12799 /*
12800  * dp_get_cfg() - get dp cfg
12801  * @soc: cdp soc handle
12802  * @cfg: cfg enum
12803  *
12804  * Return: cfg value
12805  */
12806 static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg)
12807 {
12808 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
12809 	uint32_t value = 0;
12810 
12811 	switch (cfg) {
12812 	case cfg_dp_enable_data_stall:
12813 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
12814 		break;
12815 	case cfg_dp_enable_p2p_ip_tcp_udp_checksum_offload:
12816 		value = dpsoc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload;
12817 		break;
12818 	case cfg_dp_enable_nan_ip_tcp_udp_checksum_offload:
12819 		value = dpsoc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload;
12820 		break;
12821 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
12822 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
12823 		break;
12824 	case cfg_dp_disable_legacy_mode_csum_offload:
12825 		value = dpsoc->wlan_cfg_ctx->
12826 					legacy_mode_checksumoffload_disable;
12827 		break;
12828 	case cfg_dp_tso_enable:
12829 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
12830 		break;
12831 	case cfg_dp_lro_enable:
12832 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
12833 		break;
12834 	case cfg_dp_gro_enable:
12835 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
12836 		break;
12837 	case cfg_dp_tc_based_dyn_gro_enable:
12838 		value = dpsoc->wlan_cfg_ctx->tc_based_dynamic_gro;
12839 		break;
12840 	case cfg_dp_tc_ingress_prio:
12841 		value = dpsoc->wlan_cfg_ctx->tc_ingress_prio;
12842 		break;
12843 	case cfg_dp_sg_enable:
12844 		value = dpsoc->wlan_cfg_ctx->sg_enabled;
12845 		break;
12846 	case cfg_dp_tx_flow_start_queue_offset:
12847 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
12848 		break;
12849 	case cfg_dp_tx_flow_stop_queue_threshold:
12850 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
12851 		break;
12852 	case cfg_dp_disable_intra_bss_fwd:
12853 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
12854 		break;
12855 	case cfg_dp_pktlog_buffer_size:
12856 		value = dpsoc->wlan_cfg_ctx->pktlog_buffer_size;
12857 		break;
12858 	case cfg_dp_wow_check_rx_pending:
12859 		value = dpsoc->wlan_cfg_ctx->wow_check_rx_pending_enable;
12860 		break;
12861 	default:
12862 		value =  0;
12863 	}
12864 
12865 	return value;
12866 }
12867 
12868 #ifdef PEER_FLOW_CONTROL
12869 /**
12870  * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params
12871  * @soc_handle: datapath soc handle
12872  * @pdev_id: id of datapath pdev handle
12873  * @param: ol ath params
12874  * @value: value of the flag
12875  * @buff: Buffer to be passed
12876  *
12877  * Implemented this function same as legacy function. In legacy code, single
12878  * function is used to display stats and update pdev params.
12879  *
12880  * Return: 0 for success. nonzero for failure.
12881  */
12882 static uint32_t dp_tx_flow_ctrl_configure_pdev(struct cdp_soc_t *soc_handle,
12883 					       uint8_t pdev_id,
12884 					       enum _dp_param_t param,
12885 					       uint32_t value, void *buff)
12886 {
12887 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12888 	struct dp_pdev *pdev =
12889 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12890 						   pdev_id);
12891 
12892 	if (qdf_unlikely(!pdev))
12893 		return 1;
12894 
12895 	soc = pdev->soc;
12896 	if (!soc)
12897 		return 1;
12898 
12899 	switch (param) {
12900 #ifdef QCA_ENH_V3_STATS_SUPPORT
12901 	case DP_PARAM_VIDEO_DELAY_STATS_FC:
12902 		if (value)
12903 			pdev->delay_stats_flag = true;
12904 		else
12905 			pdev->delay_stats_flag = false;
12906 		break;
12907 	case DP_PARAM_VIDEO_STATS_FC:
12908 		qdf_print("------- TID Stats ------\n");
12909 		dp_pdev_print_tid_stats(pdev);
12910 		qdf_print("------ Delay Stats ------\n");
12911 		dp_pdev_print_delay_stats(pdev);
12912 		qdf_print("------ Rx Error Stats ------\n");
12913 		dp_pdev_print_rx_error_stats(pdev);
12914 		break;
12915 #endif
12916 	case DP_PARAM_TOTAL_Q_SIZE:
12917 		{
12918 			uint32_t tx_min, tx_max;
12919 
12920 			tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx);
12921 			tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
12922 
12923 			if (!buff) {
12924 				if ((value >= tx_min) && (value <= tx_max)) {
12925 					pdev->num_tx_allowed = value;
12926 				} else {
12927 					dp_tx_info("%pK: Failed to update num_tx_allowed, Q_min = %d Q_max = %d",
12928 						   soc, tx_min, tx_max);
12929 					break;
12930 				}
12931 			} else {
12932 				*(int *)buff = pdev->num_tx_allowed;
12933 			}
12934 		}
12935 		break;
12936 	default:
12937 		dp_tx_info("%pK: not handled param %d ", soc, param);
12938 		break;
12939 	}
12940 
12941 	return 0;
12942 }
12943 #endif
12944 
12945 /**
12946  * dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev
12947  * @psoc: dp soc handle
12948  * @pdev_id: id of DP_PDEV handle
12949  * @pcp: pcp value
12950  * @tid: tid value passed by the user
12951  *
12952  * Return: QDF_STATUS_SUCCESS on success
12953  */
12954 static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
12955 						uint8_t pdev_id,
12956 						uint8_t pcp, uint8_t tid)
12957 {
12958 	struct dp_soc *soc = (struct dp_soc *)psoc;
12959 
12960 	soc->pcp_tid_map[pcp] = tid;
12961 
12962 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
12963 	return QDF_STATUS_SUCCESS;
12964 }
12965 
12966 /**
12967  * dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev
12968  * @soc: DP soc handle
12969  * @vdev_id: id of DP_VDEV handle
12970  * @pcp: pcp value
12971  * @tid: tid value passed by the user
12972  *
12973  * Return: QDF_STATUS_SUCCESS on success
12974  */
12975 static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc_hdl,
12976 						uint8_t vdev_id,
12977 						uint8_t pcp, uint8_t tid)
12978 {
12979 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12980 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12981 						     DP_MOD_ID_CDP);
12982 
12983 	if (!vdev)
12984 		return QDF_STATUS_E_FAILURE;
12985 
12986 	vdev->pcp_tid_map[pcp] = tid;
12987 
12988 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12989 	return QDF_STATUS_SUCCESS;
12990 }
12991 
12992 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
12993 static void dp_drain_txrx(struct cdp_soc_t *soc_handle)
12994 {
12995 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12996 	uint32_t cur_tx_limit, cur_rx_limit;
12997 	uint32_t budget = 0xffff;
12998 	uint32_t val;
12999 	int i;
13000 	int cpu = dp_srng_get_cpu();
13001 
13002 	cur_tx_limit = soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit;
13003 	cur_rx_limit = soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit;
13004 
13005 	/* Temporarily increase soft irq limits when going to drain
13006 	 * the UMAC/LMAC SRNGs and restore them after polling.
13007 	 * Though the budget is on higher side, the TX/RX reaping loops
13008 	 * will not execute longer as both TX and RX would be suspended
13009 	 * by the time this API is called.
13010 	 */
13011 	dp_update_soft_irq_limits(soc, budget, budget);
13012 
13013 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
13014 		dp_service_srngs(&soc->intr_ctx[i], budget, cpu);
13015 
13016 	dp_update_soft_irq_limits(soc, cur_tx_limit, cur_rx_limit);
13017 
13018 	/* Do a dummy read at offset 0; this will ensure all
13019 	 * pendings writes(HP/TP) are flushed before read returns.
13020 	 */
13021 	val = HAL_REG_READ((struct hal_soc *)soc->hal_soc, 0);
13022 	dp_debug("Register value at offset 0: %u\n", val);
13023 }
13024 #endif
13025 
13026 #ifdef DP_UMAC_HW_RESET_SUPPORT
13027 /**
13028  * dp_reset_interrupt_ring_masks(): Reset rx interrupt masks
13029  * @soc: dp soc handle
13030  *
13031  * Return: void
13032  */
13033 static void dp_reset_interrupt_ring_masks(struct dp_soc *soc)
13034 {
13035 	struct dp_intr_bkp *intr_bkp;
13036 	struct dp_intr *intr_ctx;
13037 	int num_ctxt = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
13038 	int i;
13039 
13040 	intr_bkp =
13041 	(struct dp_intr_bkp *)qdf_mem_malloc_atomic(sizeof(struct dp_intr_bkp) *
13042 			num_ctxt);
13043 
13044 	qdf_assert_always(intr_bkp);
13045 
13046 	soc->umac_reset_ctx.intr_ctx_bkp = intr_bkp;
13047 	for (i = 0; i < num_ctxt; i++) {
13048 		intr_ctx = &soc->intr_ctx[i];
13049 
13050 		intr_bkp->tx_ring_mask = intr_ctx->tx_ring_mask;
13051 		intr_bkp->rx_ring_mask = intr_ctx->rx_ring_mask;
13052 		intr_bkp->rx_mon_ring_mask = intr_ctx->rx_mon_ring_mask;
13053 		intr_bkp->rx_err_ring_mask = intr_ctx->rx_err_ring_mask;
13054 		intr_bkp->rx_wbm_rel_ring_mask = intr_ctx->rx_wbm_rel_ring_mask;
13055 		intr_bkp->reo_status_ring_mask = intr_ctx->reo_status_ring_mask;
13056 		intr_bkp->rxdma2host_ring_mask = intr_ctx->rxdma2host_ring_mask;
13057 		intr_bkp->host2rxdma_ring_mask = intr_ctx->host2rxdma_ring_mask;
13058 		intr_bkp->host2rxdma_mon_ring_mask =
13059 					intr_ctx->host2rxdma_mon_ring_mask;
13060 		intr_bkp->tx_mon_ring_mask = intr_ctx->tx_mon_ring_mask;
13061 
13062 		intr_ctx->tx_ring_mask = 0;
13063 		intr_ctx->rx_ring_mask = 0;
13064 		intr_ctx->rx_mon_ring_mask = 0;
13065 		intr_ctx->rx_err_ring_mask = 0;
13066 		intr_ctx->rx_wbm_rel_ring_mask = 0;
13067 		intr_ctx->reo_status_ring_mask = 0;
13068 		intr_ctx->rxdma2host_ring_mask = 0;
13069 		intr_ctx->host2rxdma_ring_mask = 0;
13070 		intr_ctx->host2rxdma_mon_ring_mask = 0;
13071 		intr_ctx->tx_mon_ring_mask = 0;
13072 
13073 		intr_bkp++;
13074 	}
13075 }
13076 
13077 /**
13078  * dp_restore_interrupt_ring_masks(): Restore rx interrupt masks
13079  * @soc: dp soc handle
13080  *
13081  * Return: void
13082  */
13083 static void dp_restore_interrupt_ring_masks(struct dp_soc *soc)
13084 {
13085 	struct dp_intr_bkp *intr_bkp = soc->umac_reset_ctx.intr_ctx_bkp;
13086 	struct dp_intr_bkp *intr_bkp_base = intr_bkp;
13087 	struct dp_intr *intr_ctx;
13088 	int num_ctxt = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
13089 	int i;
13090 
13091 	qdf_assert_always(intr_bkp);
13092 
13093 	for (i = 0; i < num_ctxt; i++) {
13094 		intr_ctx = &soc->intr_ctx[i];
13095 
13096 		intr_ctx->tx_ring_mask = intr_bkp->tx_ring_mask;
13097 		intr_ctx->rx_ring_mask = intr_bkp->rx_ring_mask;
13098 		intr_ctx->rx_mon_ring_mask = intr_bkp->rx_mon_ring_mask;
13099 		intr_ctx->rx_err_ring_mask = intr_bkp->rx_err_ring_mask;
13100 		intr_ctx->rx_wbm_rel_ring_mask = intr_bkp->rx_wbm_rel_ring_mask;
13101 		intr_ctx->reo_status_ring_mask = intr_bkp->reo_status_ring_mask;
13102 		intr_ctx->rxdma2host_ring_mask = intr_bkp->rxdma2host_ring_mask;
13103 		intr_ctx->host2rxdma_ring_mask = intr_bkp->host2rxdma_ring_mask;
13104 		intr_ctx->host2rxdma_mon_ring_mask =
13105 			intr_bkp->host2rxdma_mon_ring_mask;
13106 		intr_ctx->tx_mon_ring_mask = intr_bkp->tx_mon_ring_mask;
13107 
13108 		intr_bkp++;
13109 	}
13110 
13111 	qdf_mem_free(intr_bkp_base);
13112 	soc->umac_reset_ctx.intr_ctx_bkp = NULL;
13113 }
13114 
13115 /**
13116  * dp_resume_tx_hardstart(): Restore the old Tx hardstart functions
13117  * @soc: dp soc handle
13118  *
13119  * Return: void
13120  */
13121 static void dp_resume_tx_hardstart(struct dp_soc *soc)
13122 {
13123 	struct dp_vdev *vdev;
13124 	struct ol_txrx_hardtart_ctxt ctxt = {0};
13125 	struct cdp_ctrl_objmgr_psoc *psoc = soc->ctrl_psoc;
13126 	int i;
13127 
13128 	for (i = 0; i < MAX_PDEV_CNT; i++) {
13129 		struct dp_pdev *pdev = soc->pdev_list[i];
13130 
13131 		if (!pdev)
13132 			continue;
13133 
13134 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
13135 			uint8_t vdev_id = vdev->vdev_id;
13136 
13137 			dp_vdev_fetch_tx_handler(vdev, soc, &ctxt);
13138 			soc->cdp_soc.ol_ops->dp_update_tx_hardstart(psoc,
13139 								    vdev_id,
13140 								    &ctxt);
13141 		}
13142 	}
13143 }
13144 
13145 /**
13146  * dp_pause_tx_hardstart(): Register Tx hardstart functions to drop packets
13147  * @soc: dp soc handle
13148  *
13149  * Return: void
13150  */
13151 static void dp_pause_tx_hardstart(struct dp_soc *soc)
13152 {
13153 	struct dp_vdev *vdev;
13154 	struct ol_txrx_hardtart_ctxt ctxt;
13155 	struct cdp_ctrl_objmgr_psoc *psoc = soc->ctrl_psoc;
13156 	int i;
13157 
13158 	ctxt.tx = &dp_tx_drop;
13159 	ctxt.tx_fast = &dp_tx_drop;
13160 	ctxt.tx_exception = &dp_tx_exc_drop;
13161 
13162 	for (i = 0; i < MAX_PDEV_CNT; i++) {
13163 		struct dp_pdev *pdev = soc->pdev_list[i];
13164 
13165 		if (!pdev)
13166 			continue;
13167 
13168 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
13169 			uint8_t vdev_id = vdev->vdev_id;
13170 
13171 			soc->cdp_soc.ol_ops->dp_update_tx_hardstart(psoc,
13172 								    vdev_id,
13173 								    &ctxt);
13174 		}
13175 	}
13176 }
13177 
13178 /**
13179  * dp_unregister_notify_umac_pre_reset_fw_callback(): unregister notify_fw_cb
13180  * @soc: dp soc handle
13181  *
13182  * Return: void
13183  */
13184 static inline
13185 void dp_unregister_notify_umac_pre_reset_fw_callback(struct dp_soc *soc)
13186 {
13187 	soc->notify_fw_callback = NULL;
13188 }
13189 
13190 /**
13191  * dp_check_n_notify_umac_prereset_done(): Send pre reset done to firmware
13192  * @soc: dp soc handle
13193  *
13194  * Return: void
13195  */
13196 static inline
13197 void dp_check_n_notify_umac_prereset_done(struct dp_soc *soc)
13198 {
13199 	/* Some Cpu(s) is processing the umac rings*/
13200 	if (soc->service_rings_running)
13201 		return;
13202 
13203 	/* Notify the firmware that Umac pre reset is complete */
13204 	dp_umac_reset_notify_action_completion(soc,
13205 					       UMAC_RESET_ACTION_DO_PRE_RESET);
13206 
13207 	/* Unregister the callback */
13208 	dp_unregister_notify_umac_pre_reset_fw_callback(soc);
13209 }
13210 
13211 /**
13212  * dp_register_notify_umac_pre_reset_fw_callback(): register notify_fw_cb
13213  * @soc: dp soc handle
13214  *
13215  * Return: void
13216  */
13217 static inline
13218 void dp_register_notify_umac_pre_reset_fw_callback(struct dp_soc *soc)
13219 {
13220 	soc->notify_fw_callback = dp_check_n_notify_umac_prereset_done;
13221 }
13222 
13223 #ifdef DP_UMAC_HW_HARD_RESET
13224 /**
13225  * dp_set_umac_regs(): Reinitialize host umac registers
13226  * @soc: dp soc handle
13227  *
13228  * Return: void
13229  */
13230 static void dp_set_umac_regs(struct dp_soc *soc)
13231 {
13232 	int i;
13233 	struct hal_reo_params reo_params;
13234 
13235 	qdf_mem_zero(&reo_params, sizeof(reo_params));
13236 
13237 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
13238 		if (soc->arch_ops.reo_remap_config(soc, &reo_params.remap0,
13239 						   &reo_params.remap1,
13240 						   &reo_params.remap2))
13241 			reo_params.rx_hash_enabled = true;
13242 		else
13243 			reo_params.rx_hash_enabled = false;
13244 	}
13245 
13246 	hal_reo_setup(soc->hal_soc, &reo_params, 0);
13247 
13248 	soc->arch_ops.dp_cc_reg_cfg_init(soc, true);
13249 
13250 	for (i = 0; i < PCP_TID_MAP_MAX; i++)
13251 		hal_tx_update_pcp_tid_map(soc->hal_soc, soc->pcp_tid_map[i], i);
13252 
13253 	for (i = 0; i < MAX_PDEV_CNT; i++) {
13254 		struct dp_vdev *vdev = NULL;
13255 		struct dp_pdev *pdev = soc->pdev_list[i];
13256 
13257 		if (!pdev)
13258 			continue;
13259 
13260 		for (i = 0; i < soc->num_hw_dscp_tid_map; i++)
13261 			hal_tx_set_dscp_tid_map(soc->hal_soc,
13262 						pdev->dscp_tid_map[i], i);
13263 
13264 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
13265 			soc->arch_ops.dp_bank_reconfig(soc, vdev);
13266 			soc->arch_ops.dp_reconfig_tx_vdev_mcast_ctrl(soc,
13267 								      vdev);
13268 		}
13269 	}
13270 }
13271 #else
13272 static void dp_set_umac_regs(struct dp_soc *soc)
13273 {
13274 }
13275 #endif
13276 
13277 /**
13278  * dp_reinit_rings(): Reinitialize host managed rings
13279  * @soc: dp soc handle
13280  *
13281  * Return: QDF_STATUS
13282  */
13283 static void dp_reinit_rings(struct dp_soc *soc)
13284 {
13285 	unsigned long end;
13286 
13287 	dp_soc_srng_deinit(soc);
13288 	dp_hw_link_desc_ring_deinit(soc);
13289 
13290 	/* Busy wait for 2 ms to make sure the rings are in idle state
13291 	 * before we enable them again
13292 	 */
13293 	end = jiffies + msecs_to_jiffies(2);
13294 	while (time_before(jiffies, end))
13295 		;
13296 
13297 	dp_hw_link_desc_ring_init(soc);
13298 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
13299 	dp_soc_srng_init(soc);
13300 }
13301 
13302 /**
13303  * dp_umac_reset_handle_pre_reset(): Handle Umac prereset interrupt from FW
13304  * @soc: dp soc handle
13305  *
13306  * Return: QDF_STATUS
13307  */
13308 static QDF_STATUS dp_umac_reset_handle_pre_reset(struct dp_soc *soc)
13309 {
13310 	dp_reset_interrupt_ring_masks(soc);
13311 
13312 	dp_pause_tx_hardstart(soc);
13313 	dp_pause_reo_send_cmd(soc);
13314 
13315 	dp_check_n_notify_umac_prereset_done(soc);
13316 
13317 	soc->umac_reset_ctx.nbuf_list = NULL;
13318 
13319 	return QDF_STATUS_SUCCESS;
13320 }
13321 
13322 /**
13323  * dp_umac_reset_handle_post_reset(): Handle Umac postreset interrupt from FW
13324  * @soc: dp soc handle
13325  *
13326  * Return: QDF_STATUS
13327  */
13328 static QDF_STATUS dp_umac_reset_handle_post_reset(struct dp_soc *soc)
13329 {
13330 	qdf_nbuf_t *nbuf_list = &soc->umac_reset_ctx.nbuf_list;
13331 
13332 	dp_set_umac_regs(soc);
13333 
13334 	dp_reinit_rings(soc);
13335 
13336 	dp_rx_desc_reuse(soc, nbuf_list);
13337 
13338 	dp_cleanup_reo_cmd_module(soc);
13339 
13340 	dp_tx_desc_pool_cleanup(soc, nbuf_list);
13341 
13342 	dp_reset_tid_q_setup(soc);
13343 
13344 	return dp_umac_reset_notify_action_completion(soc,
13345 					UMAC_RESET_ACTION_DO_POST_RESET_START);
13346 }
13347 
13348 /**
13349  * dp_umac_reset_handle_post_reset_complete(): Handle Umac postreset_complete
13350  *						interrupt from FW
13351  * @soc: dp soc handle
13352  *
13353  * Return: QDF_STATUS
13354  */
13355 static QDF_STATUS dp_umac_reset_handle_post_reset_complete(struct dp_soc *soc)
13356 {
13357 	QDF_STATUS status;
13358 	qdf_nbuf_t nbuf_list = soc->umac_reset_ctx.nbuf_list;
13359 
13360 	soc->umac_reset_ctx.nbuf_list = NULL;
13361 
13362 	dp_resume_reo_send_cmd(soc);
13363 
13364 	dp_restore_interrupt_ring_masks(soc);
13365 
13366 	dp_resume_tx_hardstart(soc);
13367 
13368 	status = dp_umac_reset_notify_action_completion(soc,
13369 				UMAC_RESET_ACTION_DO_POST_RESET_COMPLETE);
13370 
13371 	while (nbuf_list) {
13372 		qdf_nbuf_t nbuf = nbuf_list->next;
13373 
13374 		qdf_nbuf_free(nbuf_list);
13375 		nbuf_list = nbuf;
13376 	}
13377 
13378 	return status;
13379 }
13380 #endif
13381 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
13382 static void
13383 dp_set_pkt_capture_mode(struct cdp_soc_t *soc_handle, bool val)
13384 {
13385 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13386 
13387 	soc->wlan_cfg_ctx->pkt_capture_mode = val;
13388 }
13389 #endif
13390 
13391 #ifdef HW_TX_DELAY_STATS_ENABLE
13392 /**
13393  * dp_enable_disable_vdev_tx_delay_stats(): Start/Stop tx delay stats capture
13394  * @soc: DP soc handle
13395  * @vdev_id: vdev id
13396  * @value: value
13397  *
13398  * Return: None
13399  */
13400 static void
13401 dp_enable_disable_vdev_tx_delay_stats(struct cdp_soc_t *soc_hdl,
13402 				      uint8_t vdev_id,
13403 				      uint8_t value)
13404 {
13405 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13406 	struct dp_vdev *vdev = NULL;
13407 
13408 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
13409 	if (!vdev)
13410 		return;
13411 
13412 	vdev->hw_tx_delay_stats_enabled = value;
13413 
13414 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13415 }
13416 
13417 /**
13418  * dp_check_vdev_tx_delay_stats_enabled() - check the feature is enabled or not
13419  * @soc: DP soc handle
13420  * @vdev_id: vdev id
13421  *
13422  * Returns: 1 if enabled, 0 if disabled
13423  */
13424 static uint8_t
13425 dp_check_vdev_tx_delay_stats_enabled(struct cdp_soc_t *soc_hdl,
13426 				     uint8_t vdev_id)
13427 {
13428 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13429 	struct dp_vdev *vdev;
13430 	uint8_t ret_val = 0;
13431 
13432 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
13433 	if (!vdev)
13434 		return ret_val;
13435 
13436 	ret_val = vdev->hw_tx_delay_stats_enabled;
13437 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13438 
13439 	return ret_val;
13440 }
13441 #endif
13442 
13443 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
13444 static void
13445 dp_recovery_vdev_flush_peers(struct cdp_soc_t *cdp_soc,
13446 			     uint8_t vdev_id,
13447 			     bool mlo_peers_only)
13448 {
13449 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
13450 	struct dp_vdev *vdev;
13451 
13452 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
13453 
13454 	if (!vdev)
13455 		return;
13456 
13457 	dp_vdev_flush_peers((struct cdp_vdev *)vdev, false, mlo_peers_only);
13458 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13459 }
13460 #endif
13461 
13462 static struct cdp_cmn_ops dp_ops_cmn = {
13463 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
13464 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
13465 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
13466 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
13467 	.txrx_pdev_post_attach = dp_pdev_post_attach_wifi3,
13468 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
13469 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
13470 	.txrx_peer_create = dp_peer_create_wifi3,
13471 	.txrx_peer_setup = dp_peer_setup_wifi3,
13472 #ifdef FEATURE_AST
13473 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
13474 #else
13475 	.txrx_peer_teardown = NULL,
13476 #endif
13477 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
13478 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
13479 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
13480 	.txrx_peer_get_ast_info_by_pdev =
13481 		dp_peer_get_ast_info_by_pdevid_wifi3,
13482 	.txrx_peer_ast_delete_by_soc =
13483 		dp_peer_ast_entry_del_by_soc,
13484 	.txrx_peer_ast_delete_by_pdev =
13485 		dp_peer_ast_entry_del_by_pdev,
13486 	.txrx_peer_delete = dp_peer_delete_wifi3,
13487 #ifdef DP_RX_UDP_OVER_PEER_ROAM
13488 	.txrx_update_roaming_peer = dp_update_roaming_peer_wifi3,
13489 #endif
13490 	.txrx_vdev_register = dp_vdev_register_wifi3,
13491 	.txrx_soc_detach = dp_soc_detach_wifi3,
13492 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
13493 	.txrx_soc_init = dp_soc_init_wifi3,
13494 #ifndef QCA_HOST_MODE_WIFI_DISABLED
13495 	.txrx_tso_soc_attach = dp_tso_soc_attach,
13496 	.txrx_tso_soc_detach = dp_tso_soc_detach,
13497 	.tx_send = dp_tx_send,
13498 	.tx_send_exc = dp_tx_send_exception,
13499 #endif
13500 	.txrx_pdev_init = dp_pdev_init_wifi3,
13501 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
13502 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
13503 	.txrx_ath_getstats = dp_get_device_stats,
13504 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
13505 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
13506 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
13507 	.delba_process = dp_delba_process_wifi3,
13508 	.set_addba_response = dp_set_addba_response,
13509 	.flush_cache_rx_queue = NULL,
13510 	.tid_update_ba_win_size = dp_rx_tid_update_ba_win_size,
13511 	/* TODO: get API's for dscp-tid need to be added*/
13512 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
13513 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
13514 	.txrx_get_total_per = dp_get_total_per,
13515 	.txrx_stats_request = dp_txrx_stats_request,
13516 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
13517 	.display_stats = dp_txrx_dump_stats,
13518 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
13519 	.txrx_intr_detach = dp_soc_interrupt_detach,
13520 	.set_pn_check = dp_set_pn_check_wifi3,
13521 	.set_key_sec_type = dp_set_key_sec_type_wifi3,
13522 	.update_config_parameters = dp_update_config_parameters,
13523 	/* TODO: Add other functions */
13524 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
13525 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
13526 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
13527 	.get_vdev_dp_ext_txrx_handle = dp_vdev_get_dp_ext_handle,
13528 	.set_vdev_dp_ext_txrx_handle = dp_vdev_set_dp_ext_handle,
13529 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
13530 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
13531 	.map_pdev_to_lmac = dp_soc_map_pdev_to_lmac,
13532 	.handle_mode_change = dp_soc_handle_pdev_mode_change,
13533 	.set_pdev_status_down = dp_soc_set_pdev_status_down,
13534 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
13535 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
13536 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
13537 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
13538 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
13539 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
13540 	.set_soc_param = dp_soc_set_param,
13541 	.txrx_get_os_rx_handles_from_vdev =
13542 					dp_get_os_rx_handles_from_vdev_wifi3,
13543 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
13544 	.get_dp_capabilities = dp_get_cfg_capabilities,
13545 	.txrx_get_cfg = dp_get_cfg,
13546 	.set_rate_stats_ctx = dp_soc_set_rate_stats_ctx,
13547 	.get_rate_stats_ctx = dp_soc_get_rate_stats_ctx,
13548 	.txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats,
13549 	.txrx_flush_rate_stats_request = dp_flush_rate_stats_req,
13550 	.txrx_peer_get_peerstats_ctx = dp_peer_get_peerstats_ctx,
13551 
13552 	.set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3,
13553 	.set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3,
13554 
13555 	.txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler,
13556 #ifdef QCA_MULTIPASS_SUPPORT
13557 	.set_vlan_groupkey = dp_set_vlan_groupkey,
13558 #endif
13559 	.get_peer_mac_list = dp_get_peer_mac_list,
13560 	.get_peer_id = dp_get_peer_id,
13561 #ifdef QCA_SUPPORT_WDS_EXTENDED
13562 	.set_wds_ext_peer_rx = dp_wds_ext_set_peer_rx,
13563 #endif /* QCA_SUPPORT_WDS_EXTENDED */
13564 
13565 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
13566 	.txrx_drain = dp_drain_txrx,
13567 #endif
13568 #if defined(FEATURE_RUNTIME_PM)
13569 	.set_rtpm_tput_policy = dp_set_rtpm_tput_policy_requirement,
13570 #endif
13571 #ifdef WLAN_SYSFS_DP_STATS
13572 	.txrx_sysfs_fill_stats = dp_sysfs_fill_stats,
13573 	.txrx_sysfs_set_stat_type = dp_sysfs_set_stat_type,
13574 #endif /* WLAN_SYSFS_DP_STATS */
13575 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
13576 	.set_pkt_capture_mode = dp_set_pkt_capture_mode,
13577 #endif
13578 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
13579 	.txrx_recovery_vdev_flush_peers = dp_recovery_vdev_flush_peers,
13580 #endif
13581 	.txrx_umac_reset_deinit = dp_soc_umac_reset_deinit,
13582 };
13583 
13584 static struct cdp_ctrl_ops dp_ops_ctrl = {
13585 	.txrx_peer_authorize = dp_peer_authorize,
13586 	.txrx_peer_get_authorize = dp_peer_get_authorize,
13587 #ifdef VDEV_PEER_PROTOCOL_COUNT
13588 	.txrx_enable_peer_protocol_count = dp_enable_vdev_peer_protocol_count,
13589 	.txrx_set_peer_protocol_drop_mask =
13590 		dp_enable_vdev_peer_protocol_drop_mask,
13591 	.txrx_is_peer_protocol_count_enabled =
13592 		dp_is_vdev_peer_protocol_count_enabled,
13593 	.txrx_get_peer_protocol_drop_mask = dp_get_vdev_peer_protocol_drop_mask,
13594 #endif
13595 	.txrx_set_vdev_param = dp_set_vdev_param,
13596 	.txrx_set_psoc_param = dp_set_psoc_param,
13597 	.txrx_get_psoc_param = dp_get_psoc_param,
13598 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
13599 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
13600 	.txrx_get_sec_type = dp_get_sec_type,
13601 	.txrx_wdi_event_sub = dp_wdi_event_sub,
13602 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
13603 	.txrx_set_pdev_param = dp_set_pdev_param,
13604 	.txrx_get_pdev_param = dp_get_pdev_param,
13605 	.txrx_set_peer_param = dp_set_peer_param,
13606 	.txrx_get_peer_param = dp_get_peer_param,
13607 #ifdef VDEV_PEER_PROTOCOL_COUNT
13608 	.txrx_peer_protocol_cnt = dp_peer_stats_update_protocol_cnt,
13609 #endif
13610 #ifdef WLAN_SUPPORT_MSCS
13611 	.txrx_record_mscs_params = dp_record_mscs_params,
13612 #endif
13613 	.set_key = dp_set_michael_key,
13614 	.txrx_get_vdev_param = dp_get_vdev_param,
13615 	.calculate_delay_stats = dp_calculate_delay_stats,
13616 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
13617 	.txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag,
13618 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
13619 	.txrx_dump_pdev_rx_protocol_tag_stats =
13620 				dp_dump_pdev_rx_protocol_tag_stats,
13621 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
13622 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
13623 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
13624 	.txrx_set_rx_flow_tag = dp_set_rx_flow_tag,
13625 	.txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats,
13626 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
13627 #ifdef QCA_MULTIPASS_SUPPORT
13628 	.txrx_peer_set_vlan_id = dp_peer_set_vlan_id,
13629 #endif /*QCA_MULTIPASS_SUPPORT*/
13630 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
13631 	.txrx_set_delta_tsf = dp_set_delta_tsf,
13632 #endif
13633 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
13634 	.txrx_set_tsf_ul_delay_report = dp_set_tsf_ul_delay_report,
13635 	.txrx_get_uplink_delay = dp_get_uplink_delay,
13636 #endif
13637 #ifdef QCA_UNDECODED_METADATA_SUPPORT
13638 	.txrx_set_pdev_phyrx_error_mask = dp_set_pdev_phyrx_error_mask,
13639 	.txrx_get_pdev_phyrx_error_mask = dp_get_pdev_phyrx_error_mask,
13640 #endif
13641 	.txrx_peer_flush_frags = dp_peer_flush_frags,
13642 };
13643 
13644 static struct cdp_me_ops dp_ops_me = {
13645 #ifndef QCA_HOST_MODE_WIFI_DISABLED
13646 #ifdef ATH_SUPPORT_IQUE
13647 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
13648 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
13649 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
13650 #endif
13651 #endif
13652 };
13653 
13654 static struct cdp_host_stats_ops dp_ops_host_stats = {
13655 	.txrx_per_peer_stats = dp_get_host_peer_stats,
13656 	.get_fw_peer_stats = dp_get_fw_peer_stats,
13657 	.get_htt_stats = dp_get_htt_stats,
13658 	.txrx_stats_publish = dp_txrx_stats_publish,
13659 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
13660 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
13661 	.txrx_get_soc_stats = dp_txrx_get_soc_stats,
13662 	.txrx_get_peer_stats_param = dp_txrx_get_peer_stats_param,
13663 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
13664 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
13665 	.txrx_get_ratekbps = dp_txrx_get_ratekbps,
13666 	.txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats,
13667 	.txrx_get_peer_delay_stats = dp_txrx_get_peer_delay_stats,
13668 	.txrx_get_peer_jitter_stats = dp_txrx_get_peer_jitter_stats,
13669 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
13670 	.txrx_alloc_vdev_stats_id = dp_txrx_alloc_vdev_stats_id,
13671 	.txrx_reset_vdev_stats_id = dp_txrx_reset_vdev_stats_id,
13672 #endif
13673 #ifdef WLAN_TX_PKT_CAPTURE_ENH
13674 	.get_peer_tx_capture_stats = dp_peer_get_tx_capture_stats,
13675 	.get_pdev_tx_capture_stats = dp_pdev_get_tx_capture_stats,
13676 #endif /* WLAN_TX_PKT_CAPTURE_ENH */
13677 #ifdef HW_TX_DELAY_STATS_ENABLE
13678 	.enable_disable_vdev_tx_delay_stats =
13679 				dp_enable_disable_vdev_tx_delay_stats,
13680 	.is_tx_delay_stats_enabled = dp_check_vdev_tx_delay_stats_enabled,
13681 #endif
13682 	.txrx_get_pdev_tid_stats = dp_pdev_get_tid_stats,
13683 #ifdef WLAN_TELEMETRY_STATS_SUPPORT
13684 	.txrx_pdev_telemetry_stats = dp_get_pdev_telemetry_stats,
13685 	.txrx_peer_telemetry_stats = dp_get_peer_telemetry_stats,
13686 #endif
13687 	/* TODO */
13688 };
13689 
13690 static struct cdp_raw_ops dp_ops_raw = {
13691 	/* TODO */
13692 };
13693 
13694 #ifdef PEER_FLOW_CONTROL
13695 static struct cdp_pflow_ops dp_ops_pflow = {
13696 	dp_tx_flow_ctrl_configure_pdev,
13697 };
13698 #endif /* CONFIG_WIN */
13699 
13700 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
13701 static struct cdp_cfr_ops dp_ops_cfr = {
13702 	.txrx_cfr_filter = NULL,
13703 	.txrx_get_cfr_rcc = dp_get_cfr_rcc,
13704 	.txrx_set_cfr_rcc = dp_set_cfr_rcc,
13705 	.txrx_get_cfr_dbg_stats = dp_get_cfr_dbg_stats,
13706 	.txrx_clear_cfr_dbg_stats = dp_clear_cfr_dbg_stats,
13707 };
13708 #endif
13709 
13710 #ifdef WLAN_SUPPORT_MSCS
13711 static struct cdp_mscs_ops dp_ops_mscs = {
13712 	.mscs_peer_lookup_n_get_priority = dp_mscs_peer_lookup_n_get_priority,
13713 };
13714 #endif
13715 
13716 #ifdef WLAN_SUPPORT_MESH_LATENCY
13717 static struct cdp_mesh_latency_ops dp_ops_mesh_latency = {
13718 	.mesh_latency_update_peer_parameter =
13719 		dp_mesh_latency_update_peer_parameter,
13720 };
13721 #endif
13722 
13723 #ifdef WLAN_SUPPORT_SCS
13724 static struct cdp_scs_ops dp_ops_scs = {
13725 	.scs_peer_lookup_n_rule_match = dp_scs_peer_lookup_n_rule_match,
13726 };
13727 #endif
13728 
13729 #ifdef CONFIG_SAWF_DEF_QUEUES
13730 static struct cdp_sawf_ops dp_ops_sawf = {
13731 	.sawf_def_queues_map_req = dp_sawf_def_queues_map_req,
13732 	.sawf_def_queues_unmap_req = dp_sawf_def_queues_unmap_req,
13733 	.sawf_def_queues_get_map_report =
13734 		dp_sawf_def_queues_get_map_report,
13735 #ifdef CONFIG_SAWF
13736 	.txrx_get_peer_sawf_delay_stats = dp_sawf_get_peer_delay_stats,
13737 	.txrx_get_peer_sawf_tx_stats = dp_sawf_get_peer_tx_stats,
13738 	.sawf_mpdu_stats_req = dp_sawf_mpdu_stats_req,
13739 	.sawf_mpdu_details_stats_req = dp_sawf_mpdu_details_stats_req,
13740 	.txrx_sawf_set_mov_avg_params = dp_sawf_set_mov_avg_params,
13741 	.txrx_sawf_set_sla_params = dp_sawf_set_sla_params,
13742 	.txrx_sawf_init_telemtery_params = dp_sawf_init_telemetry_params,
13743 	.telemetry_get_throughput_stats = dp_sawf_get_tx_stats,
13744 	.telemetry_get_mpdu_stats = dp_sawf_get_mpdu_sched_stats,
13745 	.telemetry_get_drop_stats = dp_sawf_get_drop_stats,
13746 #endif
13747 };
13748 #endif
13749 
13750 #if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
13751 /**
13752  * dp_flush_ring_hptp() - Update ring shadow
13753  *			  register HP/TP address when runtime
13754  *                        resume
13755  * @opaque_soc: DP soc context
13756  *
13757  * Return: None
13758  */
13759 static
13760 void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
13761 {
13762 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
13763 						 HAL_SRNG_FLUSH_EVENT)) {
13764 		/* Acquire the lock */
13765 		hal_srng_access_start(soc->hal_soc, hal_srng);
13766 
13767 		hal_srng_access_end(soc->hal_soc, hal_srng);
13768 
13769 		hal_srng_set_flush_last_ts(hal_srng);
13770 
13771 		dp_debug("flushed");
13772 	}
13773 }
13774 #endif
13775 
13776 #ifdef DP_TX_TRACKING
13777 
13778 #define DP_TX_COMP_MAX_LATENCY_MS 30000
13779 /**
13780  * dp_tx_comp_delay_check() - calculate time latency for tx completion per pkt
13781  * @tx_desc: tx descriptor
13782  *
13783  * Calculate time latency for tx completion per pkt and trigger self recovery
13784  * when the delay is more than threshold value.
13785  *
13786  * Return: True if delay is more than threshold
13787  */
13788 static bool dp_tx_comp_delay_check(struct dp_tx_desc_s *tx_desc)
13789 {
13790 	uint64_t time_latency, timestamp_tick = tx_desc->timestamp_tick;
13791 	qdf_ktime_t current_time = qdf_ktime_real_get();
13792 	qdf_ktime_t timestamp = tx_desc->timestamp;
13793 
13794 	if (!timestamp)
13795 		return false;
13796 
13797 	if (dp_tx_pkt_tracepoints_enabled()) {
13798 		time_latency = qdf_ktime_to_ms(current_time) -
13799 				qdf_ktime_to_ms(timestamp);
13800 		if (time_latency >= DP_TX_COMP_MAX_LATENCY_MS) {
13801 			dp_err_rl("enqueued: %llu ms, current : %llu ms",
13802 				  timestamp, current_time);
13803 			return true;
13804 		}
13805 	} else {
13806 		current_time = qdf_system_ticks();
13807 		time_latency = qdf_system_ticks_to_msecs(current_time -
13808 							 timestamp_tick);
13809 		if (time_latency >= DP_TX_COMP_MAX_LATENCY_MS) {
13810 			dp_err_rl("enqueued: %u ms, current : %u ms",
13811 				  qdf_system_ticks_to_msecs(timestamp),
13812 				  qdf_system_ticks_to_msecs(current_time));
13813 			return true;
13814 		}
13815 	}
13816 
13817 	return false;
13818 }
13819 
13820 #if defined(CONFIG_SLUB_DEBUG_ON)
13821 /**
13822  * dp_find_missing_tx_comp() - check for leaked descriptor in tx path
13823  * @soc - DP SOC context
13824  *
13825  * Parse through descriptors in all pools and validate magic number and
13826  * completion time. Trigger self recovery if magic value is corrupted.
13827  *
13828  * Return: None.
13829  */
13830 static void dp_find_missing_tx_comp(struct dp_soc *soc)
13831 {
13832 	uint8_t i;
13833 	uint32_t j;
13834 	uint32_t num_desc, page_id, offset;
13835 	uint16_t num_desc_per_page;
13836 	struct dp_tx_desc_s *tx_desc = NULL;
13837 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
13838 	bool send_fw_stats_cmd = false;
13839 	uint8_t vdev_id;
13840 
13841 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
13842 		tx_desc_pool = &soc->tx_desc[i];
13843 		if (!(tx_desc_pool->pool_size) ||
13844 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
13845 		    !(tx_desc_pool->desc_pages.cacheable_pages))
13846 			continue;
13847 
13848 		num_desc = tx_desc_pool->pool_size;
13849 		num_desc_per_page =
13850 			tx_desc_pool->desc_pages.num_element_per_page;
13851 		for (j = 0; j < num_desc; j++) {
13852 			page_id = j / num_desc_per_page;
13853 			offset = j % num_desc_per_page;
13854 
13855 			if (qdf_unlikely(!(tx_desc_pool->
13856 					 desc_pages.cacheable_pages)))
13857 				break;
13858 
13859 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
13860 			if (tx_desc->magic == DP_TX_MAGIC_PATTERN_FREE) {
13861 				continue;
13862 			} else if (tx_desc->magic ==
13863 				   DP_TX_MAGIC_PATTERN_INUSE) {
13864 				if (dp_tx_comp_delay_check(tx_desc)) {
13865 					dp_err_rl("Tx completion not rcvd for id: %u",
13866 						  tx_desc->id);
13867 
13868 					if (!send_fw_stats_cmd) {
13869 						send_fw_stats_cmd = true;
13870 						vdev_id = i;
13871 					}
13872 				}
13873 			} else {
13874 				dp_err_rl("tx desc %u corrupted, flags: 0x%x",
13875 				       tx_desc->id, tx_desc->flags);
13876 			}
13877 		}
13878 	}
13879 
13880 	/*
13881 	 * The unit test command to dump FW stats is required only once as the
13882 	 * stats are dumped at pdev level and not vdev level.
13883 	 */
13884 	if (send_fw_stats_cmd && soc->cdp_soc.ol_ops->dp_send_unit_test_cmd) {
13885 		uint32_t fw_stats_args[2] = {533, 1};
13886 
13887 		soc->cdp_soc.ol_ops->dp_send_unit_test_cmd(vdev_id,
13888 							   WLAN_MODULE_TX, 2,
13889 							   fw_stats_args);
13890 	}
13891 }
13892 #else
13893 static void dp_find_missing_tx_comp(struct dp_soc *soc)
13894 {
13895 	uint8_t i;
13896 	uint32_t j;
13897 	uint32_t num_desc, page_id, offset;
13898 	uint16_t num_desc_per_page;
13899 	struct dp_tx_desc_s *tx_desc = NULL;
13900 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
13901 
13902 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
13903 		tx_desc_pool = &soc->tx_desc[i];
13904 		if (!(tx_desc_pool->pool_size) ||
13905 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
13906 		    !(tx_desc_pool->desc_pages.cacheable_pages))
13907 			continue;
13908 
13909 		num_desc = tx_desc_pool->pool_size;
13910 		num_desc_per_page =
13911 			tx_desc_pool->desc_pages.num_element_per_page;
13912 		for (j = 0; j < num_desc; j++) {
13913 			page_id = j / num_desc_per_page;
13914 			offset = j % num_desc_per_page;
13915 
13916 			if (qdf_unlikely(!(tx_desc_pool->
13917 					 desc_pages.cacheable_pages)))
13918 				break;
13919 
13920 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
13921 			if (tx_desc->magic == DP_TX_MAGIC_PATTERN_FREE) {
13922 				continue;
13923 			} else if (tx_desc->magic ==
13924 				   DP_TX_MAGIC_PATTERN_INUSE) {
13925 				if (dp_tx_comp_delay_check(tx_desc)) {
13926 					dp_err_rl("Tx completion not rcvd for id: %u",
13927 						  tx_desc->id);
13928 					if (tx_desc->vdev_id == DP_INVALID_VDEV_ID) {
13929 						tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
13930 						dp_tx_comp_free_buf(soc,
13931 								    tx_desc,
13932 								    false);
13933 						dp_tx_desc_release(tx_desc, i);
13934 						DP_STATS_INC(soc,
13935 							     tx.tx_comp_force_freed, 1);
13936 						dp_err_rl("Tx completion force freed");
13937 					}
13938 				}
13939 			} else {
13940 				dp_err_rl("tx desc %u corrupted, flags: 0x%x",
13941 					  tx_desc->id, tx_desc->flags);
13942 			}
13943 		}
13944 	}
13945 }
13946 #endif /* CONFIG_SLUB_DEBUG_ON */
13947 #else
13948 static inline void dp_find_missing_tx_comp(struct dp_soc *soc)
13949 {
13950 }
13951 #endif
13952 
13953 #ifdef FEATURE_RUNTIME_PM
13954 /**
13955  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
13956  * @soc_hdl: Datapath soc handle
13957  * @pdev_id: id of data path pdev handle
13958  *
13959  * DP is ready to runtime suspend if there are no pending TX packets.
13960  *
13961  * Return: QDF_STATUS
13962  */
13963 static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
13964 {
13965 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13966 	struct dp_pdev *pdev;
13967 	uint8_t i;
13968 	int32_t tx_pending;
13969 
13970 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13971 	if (!pdev) {
13972 		dp_err("pdev is NULL");
13973 		return QDF_STATUS_E_INVAL;
13974 	}
13975 
13976 	/* Abort if there are any pending TX packets */
13977 	tx_pending = dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev));
13978 	if (tx_pending) {
13979 		dp_info_rl("%pK: Abort suspend due to pending TX packets %d",
13980 			   soc, tx_pending);
13981 		dp_find_missing_tx_comp(soc);
13982 		/* perform a force flush if tx is pending */
13983 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
13984 			hal_srng_set_event(soc->tcl_data_ring[i].hal_srng,
13985 					   HAL_SRNG_FLUSH_EVENT);
13986 			dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
13987 		}
13988 		qdf_atomic_set(&soc->tx_pending_rtpm, 0);
13989 
13990 		return QDF_STATUS_E_AGAIN;
13991 	}
13992 
13993 	if (dp_runtime_get_refcount(soc)) {
13994 		dp_init_info("refcount: %d", dp_runtime_get_refcount(soc));
13995 
13996 		return QDF_STATUS_E_AGAIN;
13997 	}
13998 
13999 	if (soc->intr_mode == DP_INTR_POLL)
14000 		qdf_timer_stop(&soc->int_timer);
14001 
14002 	dp_rx_fst_update_pm_suspend_status(soc, true);
14003 
14004 	return QDF_STATUS_SUCCESS;
14005 }
14006 
14007 #define DP_FLUSH_WAIT_CNT 10
14008 #define DP_RUNTIME_SUSPEND_WAIT_MS 10
14009 /**
14010  * dp_runtime_resume() - ensure DP is ready to runtime resume
14011  * @soc_hdl: Datapath soc handle
14012  * @pdev_id: id of data path pdev handle
14013  *
14014  * Resume DP for runtime PM.
14015  *
14016  * Return: QDF_STATUS
14017  */
14018 static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
14019 {
14020 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14021 	int i, suspend_wait = 0;
14022 
14023 	if (soc->intr_mode == DP_INTR_POLL)
14024 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
14025 
14026 	/*
14027 	 * Wait until dp runtime refcount becomes zero or time out, then flush
14028 	 * pending tx for runtime suspend.
14029 	 */
14030 	while (dp_runtime_get_refcount(soc) &&
14031 	       suspend_wait < DP_FLUSH_WAIT_CNT) {
14032 		qdf_sleep(DP_RUNTIME_SUSPEND_WAIT_MS);
14033 		suspend_wait++;
14034 	}
14035 
14036 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
14037 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
14038 	}
14039 	qdf_atomic_set(&soc->tx_pending_rtpm, 0);
14040 
14041 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
14042 	dp_rx_fst_update_pm_suspend_status(soc, false);
14043 
14044 	return QDF_STATUS_SUCCESS;
14045 }
14046 #endif /* FEATURE_RUNTIME_PM */
14047 
14048 /**
14049  * dp_tx_get_success_ack_stats() - get tx success completion count
14050  * @soc_hdl: Datapath soc handle
14051  * @vdevid: vdev identifier
14052  *
14053  * Return: tx success ack count
14054  */
14055 static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl,
14056 					    uint8_t vdev_id)
14057 {
14058 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14059 	struct cdp_vdev_stats *vdev_stats = NULL;
14060 	uint32_t tx_success;
14061 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
14062 						     DP_MOD_ID_CDP);
14063 
14064 	if (!vdev) {
14065 		dp_cdp_err("%pK: Invalid vdev id %d", soc, vdev_id);
14066 		return 0;
14067 	}
14068 
14069 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
14070 	if (!vdev_stats) {
14071 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats", soc);
14072 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
14073 		return 0;
14074 	}
14075 
14076 	dp_aggregate_vdev_stats(vdev, vdev_stats);
14077 
14078 	tx_success = vdev_stats->tx.tx_success.num;
14079 	qdf_mem_free(vdev_stats);
14080 
14081 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
14082 	return tx_success;
14083 }
14084 
14085 #ifdef WLAN_SUPPORT_DATA_STALL
14086 /**
14087  * dp_register_data_stall_detect_cb() - register data stall callback
14088  * @soc_hdl: Datapath soc handle
14089  * @pdev_id: id of data path pdev handle
14090  * @data_stall_detect_callback: data stall callback function
14091  *
14092  * Return: QDF_STATUS Enumeration
14093  */
14094 static
14095 QDF_STATUS dp_register_data_stall_detect_cb(
14096 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14097 			data_stall_detect_cb data_stall_detect_callback)
14098 {
14099 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14100 	struct dp_pdev *pdev;
14101 
14102 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14103 	if (!pdev) {
14104 		dp_err("pdev NULL!");
14105 		return QDF_STATUS_E_INVAL;
14106 	}
14107 
14108 	pdev->data_stall_detect_callback = data_stall_detect_callback;
14109 	return QDF_STATUS_SUCCESS;
14110 }
14111 
14112 /**
14113  * dp_deregister_data_stall_detect_cb() - de-register data stall callback
14114  * @soc_hdl: Datapath soc handle
14115  * @pdev_id: id of data path pdev handle
14116  * @data_stall_detect_callback: data stall callback function
14117  *
14118  * Return: QDF_STATUS Enumeration
14119  */
14120 static
14121 QDF_STATUS dp_deregister_data_stall_detect_cb(
14122 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14123 			data_stall_detect_cb data_stall_detect_callback)
14124 {
14125 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14126 	struct dp_pdev *pdev;
14127 
14128 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14129 	if (!pdev) {
14130 		dp_err("pdev NULL!");
14131 		return QDF_STATUS_E_INVAL;
14132 	}
14133 
14134 	pdev->data_stall_detect_callback = NULL;
14135 	return QDF_STATUS_SUCCESS;
14136 }
14137 
14138 /**
14139  * dp_txrx_post_data_stall_event() - post data stall event
14140  * @soc_hdl: Datapath soc handle
14141  * @indicator: Module triggering data stall
14142  * @data_stall_type: data stall event type
14143  * @pdev_id: pdev id
14144  * @vdev_id_bitmap: vdev id bitmap
14145  * @recovery_type: data stall recovery type
14146  *
14147  * Return: None
14148  */
14149 static void
14150 dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl,
14151 			      enum data_stall_log_event_indicator indicator,
14152 			      enum data_stall_log_event_type data_stall_type,
14153 			      uint32_t pdev_id, uint32_t vdev_id_bitmap,
14154 			      enum data_stall_log_recovery_type recovery_type)
14155 {
14156 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14157 	struct data_stall_event_info data_stall_info;
14158 	struct dp_pdev *pdev;
14159 
14160 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14161 	if (!pdev) {
14162 		dp_err("pdev NULL!");
14163 		return;
14164 	}
14165 
14166 	if (!pdev->data_stall_detect_callback) {
14167 		dp_err("data stall cb not registered!");
14168 		return;
14169 	}
14170 
14171 	dp_info("data_stall_type: %x pdev_id: %d",
14172 		data_stall_type, pdev_id);
14173 
14174 	data_stall_info.indicator = indicator;
14175 	data_stall_info.data_stall_type = data_stall_type;
14176 	data_stall_info.vdev_id_bitmap = vdev_id_bitmap;
14177 	data_stall_info.pdev_id = pdev_id;
14178 	data_stall_info.recovery_type = recovery_type;
14179 
14180 	pdev->data_stall_detect_callback(&data_stall_info);
14181 }
14182 #endif /* WLAN_SUPPORT_DATA_STALL */
14183 
14184 #ifdef WLAN_FEATURE_STATS_EXT
14185 /* rx hw stats event wait timeout in ms */
14186 #define DP_REO_STATUS_STATS_TIMEOUT 1500
14187 /**
14188  * dp_txrx_ext_stats_request - request dp txrx extended stats request
14189  * @soc_hdl: soc handle
14190  * @pdev_id: pdev id
14191  * @req: stats request
14192  *
14193  * Return: QDF_STATUS
14194  */
14195 static QDF_STATUS
14196 dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14197 			  struct cdp_txrx_ext_stats *req)
14198 {
14199 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
14200 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14201 	int i = 0;
14202 	int tcl_ring_full = 0;
14203 
14204 	if (!pdev) {
14205 		dp_err("pdev is null");
14206 		return QDF_STATUS_E_INVAL;
14207 	}
14208 
14209 	dp_aggregate_pdev_stats(pdev);
14210 
14211 	for(i = 0 ; i < MAX_TCL_DATA_RINGS; i++)
14212 		tcl_ring_full += soc->stats.tx.tcl_ring_full[i];
14213 
14214 	req->tx_msdu_enqueue = pdev->stats.tx_i.processed.num;
14215 	req->tx_msdu_overflow = tcl_ring_full;
14216 	req->rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
14217 	req->rx_mpdu_delivered = soc->ext_stats.rx_mpdu_received;
14218 	req->rx_mpdu_missed = pdev->stats.err.reo_error;
14219 	/* only count error source from RXDMA */
14220 	req->rx_mpdu_error = pdev->stats.err.rxdma_error;
14221 
14222 	dp_info("ext stats: tx_msdu_enq = %u, tx_msdu_overflow = %u, "
14223 		"tx_mpdu_recieve = %u, rx_mpdu_delivered = %u, "
14224 		"rx_mpdu_missed = %u, rx_mpdu_error = %u",
14225 		req->tx_msdu_enqueue,
14226 		req->tx_msdu_overflow,
14227 		req->rx_mpdu_received,
14228 		req->rx_mpdu_delivered,
14229 		req->rx_mpdu_missed,
14230 		req->rx_mpdu_error);
14231 
14232 	return QDF_STATUS_SUCCESS;
14233 }
14234 
14235 /**
14236  * dp_rx_hw_stats_cb - request rx hw stats response callback
14237  * @soc: soc handle
14238  * @cb_ctxt: callback context
14239  * @reo_status: reo command response status
14240  *
14241  * Return: None
14242  */
14243 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
14244 			      union hal_reo_status *reo_status)
14245 {
14246 	struct dp_req_rx_hw_stats_t *rx_hw_stats = cb_ctxt;
14247 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
14248 	bool is_query_timeout;
14249 
14250 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
14251 	is_query_timeout = rx_hw_stats->is_query_timeout;
14252 	/* free the cb_ctxt if all pending tid stats query is received */
14253 	if (qdf_atomic_dec_and_test(&rx_hw_stats->pending_tid_stats_cnt)) {
14254 		if (!is_query_timeout) {
14255 			qdf_event_set(&soc->rx_hw_stats_event);
14256 			soc->is_last_stats_ctx_init = false;
14257 		}
14258 
14259 		qdf_mem_free(rx_hw_stats);
14260 	}
14261 
14262 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
14263 		dp_info("REO stats failure %d",
14264 			queue_status->header.status);
14265 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14266 		return;
14267 	}
14268 
14269 	if (!is_query_timeout) {
14270 		soc->ext_stats.rx_mpdu_received +=
14271 					queue_status->mpdu_frms_cnt;
14272 		soc->ext_stats.rx_mpdu_missed +=
14273 					queue_status->hole_cnt;
14274 	}
14275 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14276 }
14277 
14278 /**
14279  * dp_request_rx_hw_stats - request rx hardware stats
14280  * @soc_hdl: soc handle
14281  * @vdev_id: vdev id
14282  *
14283  * Return: None
14284  */
14285 static QDF_STATUS
14286 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
14287 {
14288 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
14289 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
14290 						     DP_MOD_ID_CDP);
14291 	struct dp_peer *peer = NULL;
14292 	QDF_STATUS status;
14293 	struct dp_req_rx_hw_stats_t *rx_hw_stats;
14294 	int rx_stats_sent_cnt = 0;
14295 	uint32_t last_rx_mpdu_received;
14296 	uint32_t last_rx_mpdu_missed;
14297 
14298 	if (!vdev) {
14299 		dp_err("vdev is null for vdev_id: %u", vdev_id);
14300 		status = QDF_STATUS_E_INVAL;
14301 		goto out;
14302 	}
14303 
14304 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_CDP);
14305 
14306 	if (!peer) {
14307 		dp_err("Peer is NULL");
14308 		status = QDF_STATUS_E_INVAL;
14309 		goto out;
14310 	}
14311 
14312 	rx_hw_stats = qdf_mem_malloc(sizeof(*rx_hw_stats));
14313 
14314 	if (!rx_hw_stats) {
14315 		dp_err("malloc failed for hw stats structure");
14316 		status = QDF_STATUS_E_INVAL;
14317 		goto out;
14318 	}
14319 
14320 	qdf_event_reset(&soc->rx_hw_stats_event);
14321 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
14322 	/* save the last soc cumulative stats and reset it to 0 */
14323 	last_rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
14324 	last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
14325 	soc->ext_stats.rx_mpdu_received = 0;
14326 
14327 	rx_stats_sent_cnt =
14328 		dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
14329 	if (!rx_stats_sent_cnt) {
14330 		dp_err("no tid stats sent successfully");
14331 		qdf_mem_free(rx_hw_stats);
14332 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14333 		status = QDF_STATUS_E_INVAL;
14334 		goto out;
14335 	}
14336 	qdf_atomic_set(&rx_hw_stats->pending_tid_stats_cnt,
14337 		       rx_stats_sent_cnt);
14338 	rx_hw_stats->is_query_timeout = false;
14339 	soc->is_last_stats_ctx_init = true;
14340 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14341 
14342 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
14343 				       DP_REO_STATUS_STATS_TIMEOUT);
14344 
14345 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
14346 	if (status != QDF_STATUS_SUCCESS) {
14347 		dp_info("rx hw stats event timeout");
14348 		if (soc->is_last_stats_ctx_init)
14349 			rx_hw_stats->is_query_timeout = true;
14350 		/**
14351 		 * If query timeout happened, use the last saved stats
14352 		 * for this time query.
14353 		 */
14354 		soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
14355 		soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
14356 	}
14357 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14358 
14359 out:
14360 	if (peer)
14361 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
14362 	if (vdev)
14363 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
14364 
14365 	return status;
14366 }
14367 
14368 /**
14369  * dp_reset_rx_hw_ext_stats - Reset rx hardware ext stats
14370  * @soc_hdl: soc handle
14371  *
14372  * Return: None
14373  */
14374 static
14375 void dp_reset_rx_hw_ext_stats(struct cdp_soc_t *soc_hdl)
14376 {
14377 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
14378 
14379 	soc->ext_stats.rx_mpdu_received = 0;
14380 	soc->ext_stats.rx_mpdu_missed = 0;
14381 }
14382 #endif /* WLAN_FEATURE_STATS_EXT */
14383 
14384 static
14385 uint32_t dp_get_tx_rings_grp_bitmap(struct cdp_soc_t *soc_hdl)
14386 {
14387 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
14388 
14389 	return soc->wlan_cfg_ctx->tx_rings_grp_bitmap;
14390 }
14391 
14392 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
14393 /**
14394  * dp_mark_first_wakeup_packet() - set flag to indicate that
14395  *    fw is compatible for marking first packet after wow wakeup
14396  * @soc_hdl: Datapath soc handle
14397  * @pdev_id: id of data path pdev handle
14398  * @value: 1 for enabled/ 0 for disabled
14399  *
14400  * Return: None
14401  */
14402 static void dp_mark_first_wakeup_packet(struct cdp_soc_t *soc_hdl,
14403 					uint8_t pdev_id, uint8_t value)
14404 {
14405 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14406 	struct dp_pdev *pdev;
14407 
14408 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14409 	if (!pdev) {
14410 		dp_err("pdev is NULL");
14411 		return;
14412 	}
14413 
14414 	pdev->is_first_wakeup_packet = value;
14415 }
14416 #endif
14417 
14418 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
14419 /**
14420  * dp_set_peer_txq_flush_config() - Set the peer txq flush configuration
14421  * @soc_hdl: Opaque handle to the DP soc object
14422  * @vdev_id: VDEV identifier
14423  * @mac: MAC address of the peer
14424  * @ac: access category mask
14425  * @tid: TID mask
14426  * @policy: Flush policy
14427  *
14428  * Return: 0 on success, errno on failure
14429  */
14430 static int dp_set_peer_txq_flush_config(struct cdp_soc_t *soc_hdl,
14431 					uint8_t vdev_id, uint8_t *mac,
14432 					uint8_t ac, uint32_t tid,
14433 					enum cdp_peer_txq_flush_policy policy)
14434 {
14435 	struct dp_soc *soc;
14436 
14437 	if (!soc_hdl) {
14438 		dp_err("soc is null");
14439 		return -EINVAL;
14440 	}
14441 	soc = cdp_soc_t_to_dp_soc(soc_hdl);
14442 	return target_if_peer_txq_flush_config(soc->ctrl_psoc, vdev_id,
14443 					       mac, ac, tid, policy);
14444 }
14445 #endif
14446 
14447 #ifdef CONNECTIVITY_PKTLOG
14448 /**
14449  * dp_register_packetdump_callback() - registers
14450  *  tx data packet, tx mgmt. packet and rx data packet
14451  *  dump callback handler.
14452  *
14453  * @soc_hdl: Datapath soc handle
14454  * @pdev_id: id of data path pdev handle
14455  * @dp_tx_packetdump_cb: tx packetdump cb
14456  * @dp_rx_packetdump_cb: rx packetdump cb
14457  *
14458  * This function is used to register tx data pkt, tx mgmt.
14459  * pkt and rx data pkt dump callback
14460  *
14461  * Return: None
14462  *
14463  */
14464 static inline
14465 void dp_register_packetdump_callback(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14466 				     ol_txrx_pktdump_cb dp_tx_packetdump_cb,
14467 				     ol_txrx_pktdump_cb dp_rx_packetdump_cb)
14468 {
14469 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14470 	struct dp_pdev *pdev;
14471 
14472 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14473 	if (!pdev) {
14474 		dp_err("pdev is NULL!");
14475 		return;
14476 	}
14477 
14478 	pdev->dp_tx_packetdump_cb = dp_tx_packetdump_cb;
14479 	pdev->dp_rx_packetdump_cb = dp_rx_packetdump_cb;
14480 }
14481 
14482 /**
14483  * dp_deregister_packetdump_callback() - deregidters
14484  *  tx data packet, tx mgmt. packet and rx data packet
14485  *  dump callback handler
14486  * @soc_hdl: Datapath soc handle
14487  * @pdev_id: id of data path pdev handle
14488  *
14489  * This function is used to deregidter tx data pkt.,
14490  * tx mgmt. pkt and rx data pkt. dump callback
14491  *
14492  * Return: None
14493  *
14494  */
14495 static inline
14496 void dp_deregister_packetdump_callback(struct cdp_soc_t *soc_hdl,
14497 				       uint8_t pdev_id)
14498 {
14499 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14500 	struct dp_pdev *pdev;
14501 
14502 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14503 	if (!pdev) {
14504 		dp_err("pdev is NULL!");
14505 		return;
14506 	}
14507 
14508 	pdev->dp_tx_packetdump_cb = NULL;
14509 	pdev->dp_rx_packetdump_cb = NULL;
14510 }
14511 #endif
14512 
14513 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
14514 /**
14515  * dp_set_bus_vote_lvl_high() - Take a vote on bus bandwidth from dp
14516  * @soc_hdl: Datapath soc handle
14517  * @high: whether the bus bw is high or not
14518  *
14519  * Return: void
14520  */
14521 static void
14522 dp_set_bus_vote_lvl_high(ol_txrx_soc_handle soc_hdl, bool high)
14523 {
14524 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14525 
14526 	soc->high_throughput = high;
14527 }
14528 
14529 /**
14530  * dp_get_bus_vote_lvl_high() - get bus bandwidth vote to dp
14531  * @soc_hdl: Datapath soc handle
14532  *
14533  * Return: bool
14534  */
14535 static bool
14536 dp_get_bus_vote_lvl_high(ol_txrx_soc_handle soc_hdl)
14537 {
14538 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14539 
14540 	return soc->high_throughput;
14541 }
14542 #endif
14543 
14544 #ifdef DP_PEER_EXTENDED_API
14545 static struct cdp_misc_ops dp_ops_misc = {
14546 #ifdef FEATURE_WLAN_TDLS
14547 	.tx_non_std = dp_tx_non_std,
14548 #endif /* FEATURE_WLAN_TDLS */
14549 	.get_opmode = dp_get_opmode,
14550 #ifdef FEATURE_RUNTIME_PM
14551 	.runtime_suspend = dp_runtime_suspend,
14552 	.runtime_resume = dp_runtime_resume,
14553 #endif /* FEATURE_RUNTIME_PM */
14554 	.get_num_rx_contexts = dp_get_num_rx_contexts,
14555 	.get_tx_ack_stats = dp_tx_get_success_ack_stats,
14556 #ifdef WLAN_SUPPORT_DATA_STALL
14557 	.txrx_data_stall_cb_register = dp_register_data_stall_detect_cb,
14558 	.txrx_data_stall_cb_deregister = dp_deregister_data_stall_detect_cb,
14559 	.txrx_post_data_stall_event = dp_txrx_post_data_stall_event,
14560 #endif
14561 
14562 #ifdef WLAN_FEATURE_STATS_EXT
14563 	.txrx_ext_stats_request = dp_txrx_ext_stats_request,
14564 	.request_rx_hw_stats = dp_request_rx_hw_stats,
14565 	.reset_rx_hw_ext_stats = dp_reset_rx_hw_ext_stats,
14566 #endif /* WLAN_FEATURE_STATS_EXT */
14567 	.vdev_inform_ll_conn = dp_vdev_inform_ll_conn,
14568 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
14569 	.set_swlm_enable = dp_soc_set_swlm_enable,
14570 	.is_swlm_enabled = dp_soc_is_swlm_enabled,
14571 #endif
14572 	.display_txrx_hw_info = dp_display_srng_info,
14573 	.get_tx_rings_grp_bitmap = dp_get_tx_rings_grp_bitmap,
14574 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
14575 	.mark_first_wakeup_packet = dp_mark_first_wakeup_packet,
14576 #endif
14577 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
14578 	.set_peer_txq_flush_config = dp_set_peer_txq_flush_config,
14579 #endif
14580 #ifdef CONNECTIVITY_PKTLOG
14581 	.register_pktdump_cb = dp_register_packetdump_callback,
14582 	.unregister_pktdump_cb = dp_deregister_packetdump_callback,
14583 #endif
14584 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
14585 	.set_bus_vote_lvl_high = dp_set_bus_vote_lvl_high,
14586 	.get_bus_vote_lvl_high = dp_get_bus_vote_lvl_high,
14587 #endif
14588 };
14589 #endif
14590 
14591 #ifdef DP_FLOW_CTL
14592 static struct cdp_flowctl_ops dp_ops_flowctl = {
14593 	/* WIFI 3.0 DP implement as required. */
14594 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
14595 	.flow_pool_map_handler = dp_tx_flow_pool_map,
14596 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
14597 	.register_pause_cb = dp_txrx_register_pause_cb,
14598 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
14599 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
14600 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
14601 };
14602 
14603 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
14604 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
14605 };
14606 #endif
14607 
14608 #ifdef IPA_OFFLOAD
14609 static struct cdp_ipa_ops dp_ops_ipa = {
14610 	.ipa_get_resource = dp_ipa_get_resource,
14611 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
14612 	.ipa_iounmap_doorbell_vaddr = dp_ipa_iounmap_doorbell_vaddr,
14613 	.ipa_op_response = dp_ipa_op_response,
14614 	.ipa_register_op_cb = dp_ipa_register_op_cb,
14615 	.ipa_deregister_op_cb = dp_ipa_deregister_op_cb,
14616 	.ipa_get_stat = dp_ipa_get_stat,
14617 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
14618 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
14619 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
14620 	.ipa_setup = dp_ipa_setup,
14621 	.ipa_cleanup = dp_ipa_cleanup,
14622 	.ipa_setup_iface = dp_ipa_setup_iface,
14623 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
14624 	.ipa_enable_pipes = dp_ipa_enable_pipes,
14625 	.ipa_disable_pipes = dp_ipa_disable_pipes,
14626 	.ipa_set_perf_level = dp_ipa_set_perf_level,
14627 	.ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd,
14628 	.ipa_tx_buf_smmu_mapping = dp_ipa_tx_buf_smmu_mapping,
14629 	.ipa_tx_buf_smmu_unmapping = dp_ipa_tx_buf_smmu_unmapping,
14630 #ifdef IPA_WDS_EASYMESH_FEATURE
14631 	.ipa_ast_create = dp_ipa_ast_create,
14632 #endif
14633 };
14634 #endif
14635 
14636 #ifdef DP_POWER_SAVE
14637 static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
14638 {
14639 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14640 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14641 	int timeout = SUSPEND_DRAIN_WAIT;
14642 	int drain_wait_delay = 50; /* 50 ms */
14643 	int32_t tx_pending;
14644 
14645 	if (qdf_unlikely(!pdev)) {
14646 		dp_err("pdev is NULL");
14647 		return QDF_STATUS_E_INVAL;
14648 	}
14649 
14650 	/* Abort if there are any pending TX packets */
14651 	while ((tx_pending = dp_get_tx_pending((struct cdp_pdev *)pdev))) {
14652 		qdf_sleep(drain_wait_delay);
14653 		if (timeout <= 0) {
14654 			dp_info("TX frames are pending %d, abort suspend",
14655 				tx_pending);
14656 			dp_find_missing_tx_comp(soc);
14657 			return QDF_STATUS_E_TIMEOUT;
14658 		}
14659 		timeout = timeout - drain_wait_delay;
14660 	}
14661 
14662 	if (soc->intr_mode == DP_INTR_POLL)
14663 		qdf_timer_stop(&soc->int_timer);
14664 
14665 	/* Stop monitor reap timer and reap any pending frames in ring */
14666 	dp_monitor_reap_timer_suspend(soc);
14667 
14668 	dp_suspend_fse_cache_flush(soc);
14669 
14670 	return QDF_STATUS_SUCCESS;
14671 }
14672 
14673 static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
14674 {
14675 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14676 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14677 	uint8_t i;
14678 
14679 	if (qdf_unlikely(!pdev)) {
14680 		dp_err("pdev is NULL");
14681 		return QDF_STATUS_E_INVAL;
14682 	}
14683 
14684 	if (soc->intr_mode == DP_INTR_POLL)
14685 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
14686 
14687 	/* Start monitor reap timer */
14688 	dp_monitor_reap_timer_start(soc, CDP_MON_REAP_SOURCE_ANY);
14689 
14690 	dp_resume_fse_cache_flush(soc);
14691 
14692 	for (i = 0; i < soc->num_tcl_data_rings; i++)
14693 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
14694 
14695 	return QDF_STATUS_SUCCESS;
14696 }
14697 
14698 /**
14699  * dp_process_wow_ack_rsp() - process wow ack response
14700  * @soc_hdl: datapath soc handle
14701  * @pdev_id: data path pdev handle id
14702  *
14703  * Return: none
14704  */
14705 static void dp_process_wow_ack_rsp(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
14706 {
14707 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14708 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14709 
14710 	if (qdf_unlikely(!pdev)) {
14711 		dp_err("pdev is NULL");
14712 		return;
14713 	}
14714 
14715 	/*
14716 	 * As part of wow enable FW disables the mon status ring and in wow ack
14717 	 * response from FW reap mon status ring to make sure no packets pending
14718 	 * in the ring.
14719 	 */
14720 	dp_monitor_reap_timer_suspend(soc);
14721 }
14722 
14723 /**
14724  * dp_process_target_suspend_req() - process target suspend request
14725  * @soc_hdl: datapath soc handle
14726  * @pdev_id: data path pdev handle id
14727  *
14728  * Return: none
14729  */
14730 static void dp_process_target_suspend_req(struct cdp_soc_t *soc_hdl,
14731 					  uint8_t pdev_id)
14732 {
14733 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14734 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14735 
14736 	if (qdf_unlikely(!pdev)) {
14737 		dp_err("pdev is NULL");
14738 		return;
14739 	}
14740 
14741 	/* Stop monitor reap timer and reap any pending frames in ring */
14742 	dp_monitor_reap_timer_suspend(soc);
14743 }
14744 
14745 static struct cdp_bus_ops dp_ops_bus = {
14746 	.bus_suspend = dp_bus_suspend,
14747 	.bus_resume = dp_bus_resume,
14748 	.process_wow_ack_rsp = dp_process_wow_ack_rsp,
14749 	.process_target_suspend_req = dp_process_target_suspend_req
14750 };
14751 #endif
14752 
14753 #ifdef DP_FLOW_CTL
14754 static struct cdp_throttle_ops dp_ops_throttle = {
14755 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
14756 };
14757 
14758 static struct cdp_cfg_ops dp_ops_cfg = {
14759 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
14760 };
14761 #endif
14762 
14763 #ifdef DP_PEER_EXTENDED_API
14764 static struct cdp_ocb_ops dp_ops_ocb = {
14765 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
14766 };
14767 
14768 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
14769 	.clear_stats = dp_txrx_clear_dump_stats,
14770 };
14771 
14772 static struct cdp_peer_ops dp_ops_peer = {
14773 	.register_peer = dp_register_peer,
14774 	.clear_peer = dp_clear_peer,
14775 	.find_peer_exist = dp_find_peer_exist,
14776 	.find_peer_exist_on_vdev = dp_find_peer_exist_on_vdev,
14777 	.find_peer_exist_on_other_vdev = dp_find_peer_exist_on_other_vdev,
14778 	.peer_state_update = dp_peer_state_update,
14779 	.get_vdevid = dp_get_vdevid,
14780 	.get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr,
14781 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
14782 	.get_peer_state = dp_get_peer_state,
14783 	.peer_flush_frags = dp_peer_flush_frags,
14784 	.set_peer_as_tdls_peer = dp_set_peer_as_tdls_peer,
14785 };
14786 #endif
14787 
14788 static void dp_soc_txrx_ops_attach(struct dp_soc *soc)
14789 {
14790 	soc->cdp_soc.ops->cmn_drv_ops = &dp_ops_cmn;
14791 	soc->cdp_soc.ops->ctrl_ops = &dp_ops_ctrl;
14792 	soc->cdp_soc.ops->me_ops = &dp_ops_me;
14793 	soc->cdp_soc.ops->host_stats_ops = &dp_ops_host_stats;
14794 	soc->cdp_soc.ops->wds_ops = &dp_ops_wds;
14795 	soc->cdp_soc.ops->raw_ops = &dp_ops_raw;
14796 #ifdef PEER_FLOW_CONTROL
14797 	soc->cdp_soc.ops->pflow_ops = &dp_ops_pflow;
14798 #endif /* PEER_FLOW_CONTROL */
14799 #ifdef DP_PEER_EXTENDED_API
14800 	soc->cdp_soc.ops->misc_ops = &dp_ops_misc;
14801 	soc->cdp_soc.ops->ocb_ops = &dp_ops_ocb;
14802 	soc->cdp_soc.ops->peer_ops = &dp_ops_peer;
14803 	soc->cdp_soc.ops->mob_stats_ops = &dp_ops_mob_stats;
14804 #endif
14805 #ifdef DP_FLOW_CTL
14806 	soc->cdp_soc.ops->cfg_ops = &dp_ops_cfg;
14807 	soc->cdp_soc.ops->flowctl_ops = &dp_ops_flowctl;
14808 	soc->cdp_soc.ops->l_flowctl_ops = &dp_ops_l_flowctl;
14809 	soc->cdp_soc.ops->throttle_ops = &dp_ops_throttle;
14810 #endif
14811 #ifdef IPA_OFFLOAD
14812 	soc->cdp_soc.ops->ipa_ops = &dp_ops_ipa;
14813 #endif
14814 #ifdef DP_POWER_SAVE
14815 	soc->cdp_soc.ops->bus_ops = &dp_ops_bus;
14816 #endif
14817 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
14818 	soc->cdp_soc.ops->cfr_ops = &dp_ops_cfr;
14819 #endif
14820 #ifdef WLAN_SUPPORT_MSCS
14821 	soc->cdp_soc.ops->mscs_ops = &dp_ops_mscs;
14822 #endif
14823 #ifdef WLAN_SUPPORT_MESH_LATENCY
14824 	soc->cdp_soc.ops->mesh_latency_ops = &dp_ops_mesh_latency;
14825 #endif
14826 #ifdef CONFIG_SAWF_DEF_QUEUES
14827 	soc->cdp_soc.ops->sawf_ops = &dp_ops_sawf;
14828 #endif
14829 #ifdef WLAN_SUPPORT_SCS
14830 	soc->cdp_soc.ops->scs_ops = &dp_ops_scs;
14831 #endif
14832 };
14833 
14834 /*
14835  * dp_soc_set_txrx_ring_map()
14836  * @dp_soc: DP handler for soc
14837  *
14838  * Return: Void
14839  */
14840 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
14841 {
14842 	uint32_t i;
14843 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
14844 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
14845 	}
14846 }
14847 
14848 qdf_export_symbol(dp_soc_set_txrx_ring_map);
14849 
14850 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
14851 	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574) || \
14852 	defined(QCA_WIFI_QCA5332)
14853 /**
14854  * dp_soc_attach_wifi3() - Attach txrx SOC
14855  * @ctrl_psoc: Opaque SOC handle from control plane
14856  * @params: SOC attach params
14857  *
14858  * Return: DP SOC handle on success, NULL on failure
14859  */
14860 struct cdp_soc_t *
14861 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
14862 		    struct cdp_soc_attach_params *params)
14863 {
14864 	struct dp_soc *dp_soc = NULL;
14865 
14866 	dp_soc = dp_soc_attach(ctrl_psoc, params);
14867 
14868 	return dp_soc_to_cdp_soc_t(dp_soc);
14869 }
14870 
14871 static inline void dp_soc_set_def_pdev(struct dp_soc *soc)
14872 {
14873 	int lmac_id;
14874 
14875 	for (lmac_id = 0; lmac_id < MAX_NUM_LMAC_HW; lmac_id++) {
14876 		/*Set default host PDEV ID for lmac_id*/
14877 		wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
14878 				      INVALID_PDEV_ID, lmac_id);
14879 	}
14880 }
14881 
14882 static uint32_t
14883 dp_get_link_desc_id_start(uint16_t arch_id)
14884 {
14885 	switch (arch_id) {
14886 	case CDP_ARCH_TYPE_LI:
14887 		return LINK_DESC_ID_START_21_BITS_COOKIE;
14888 	case CDP_ARCH_TYPE_BE:
14889 		return LINK_DESC_ID_START_20_BITS_COOKIE;
14890 	default:
14891 		dp_err("unkonwn arch_id 0x%x", arch_id);
14892 		QDF_BUG(0);
14893 		return LINK_DESC_ID_START_21_BITS_COOKIE;
14894 	}
14895 }
14896 
14897 /**
14898  * dp_soc_attach() - Attach txrx SOC
14899  * @ctrl_psoc: Opaque SOC handle from control plane
14900  * @params: SOC attach params
14901  *
14902  * Return: DP SOC handle on success, NULL on failure
14903  */
14904 static struct dp_soc *
14905 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
14906 	      struct cdp_soc_attach_params *params)
14907 {
14908 	int int_ctx;
14909 	struct dp_soc *soc =  NULL;
14910 	uint16_t arch_id;
14911 	struct hif_opaque_softc *hif_handle = params->hif_handle;
14912 	qdf_device_t qdf_osdev = params->qdf_osdev;
14913 	struct ol_if_ops *ol_ops = params->ol_ops;
14914 	uint16_t device_id = params->device_id;
14915 
14916 	if (!hif_handle) {
14917 		dp_err("HIF handle is NULL");
14918 		goto fail0;
14919 	}
14920 	arch_id = cdp_get_arch_type_from_devid(device_id);
14921 	soc = qdf_mem_malloc(dp_get_soc_context_size(device_id));
14922 	if (!soc) {
14923 		dp_err("DP SOC memory allocation failed");
14924 		goto fail0;
14925 	}
14926 
14927 	dp_info("soc memory allocated %pK", soc);
14928 	soc->hif_handle = hif_handle;
14929 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
14930 	if (!soc->hal_soc)
14931 		goto fail1;
14932 
14933 	hif_get_cmem_info(soc->hif_handle,
14934 			  &soc->cmem_base,
14935 			  &soc->cmem_total_size);
14936 	soc->cmem_avail_size = soc->cmem_total_size;
14937 	int_ctx = 0;
14938 	soc->device_id = device_id;
14939 	soc->cdp_soc.ops =
14940 		(struct cdp_ops *)qdf_mem_malloc(sizeof(struct cdp_ops));
14941 	if (!soc->cdp_soc.ops)
14942 		goto fail1;
14943 
14944 	dp_soc_txrx_ops_attach(soc);
14945 	soc->cdp_soc.ol_ops = ol_ops;
14946 	soc->ctrl_psoc = ctrl_psoc;
14947 	soc->osdev = qdf_osdev;
14948 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
14949 	hal_rx_get_tlv_size(soc->hal_soc, &soc->rx_pkt_tlv_size,
14950 			    &soc->rx_mon_pkt_tlv_size);
14951 	soc->idle_link_bm_id = hal_get_idle_link_bm_id(soc->hal_soc,
14952 						       params->mlo_chip_id);
14953 	soc->features.dmac_cmn_src_rxbuf_ring_enabled =
14954 		hal_dmac_cmn_src_rxbuf_ring_get(soc->hal_soc);
14955 	soc->arch_id = arch_id;
14956 	soc->link_desc_id_start =
14957 			dp_get_link_desc_id_start(soc->arch_id);
14958 	dp_configure_arch_ops(soc);
14959 
14960 	/* Reset wbm sg list and flags */
14961 	dp_rx_wbm_sg_list_reset(soc);
14962 
14963 	dp_soc_tx_hw_desc_history_attach(soc);
14964 	dp_soc_rx_history_attach(soc);
14965 	dp_soc_mon_status_ring_history_attach(soc);
14966 	dp_soc_tx_history_attach(soc);
14967 	wlan_set_srng_cfg(&soc->wlan_srng_cfg);
14968 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
14969 	if (!soc->wlan_cfg_ctx) {
14970 		dp_err("wlan_cfg_ctx failed\n");
14971 		goto fail2;
14972 	}
14973 	dp_soc_cfg_attach(soc);
14974 
14975 	if (dp_hw_link_desc_pool_banks_alloc(soc, WLAN_INVALID_PDEV_ID)) {
14976 		dp_err("failed to allocate link desc pool banks");
14977 		goto fail3;
14978 	}
14979 
14980 	if (dp_hw_link_desc_ring_alloc(soc)) {
14981 		dp_err("failed to allocate link_desc_ring");
14982 		goto fail4;
14983 	}
14984 
14985 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_attach(soc,
14986 								 params))) {
14987 		dp_err("unable to do target specific attach");
14988 		goto fail5;
14989 	}
14990 
14991 	if (dp_soc_srng_alloc(soc)) {
14992 		dp_err("failed to allocate soc srng rings");
14993 		goto fail6;
14994 	}
14995 
14996 	if (dp_soc_tx_desc_sw_pools_alloc(soc)) {
14997 		dp_err("dp_soc_tx_desc_sw_pools_alloc failed");
14998 		goto fail7;
14999 	}
15000 
15001 	if (!dp_monitor_modularized_enable()) {
15002 		if (dp_mon_soc_attach_wrapper(soc)) {
15003 			dp_err("failed to attach monitor");
15004 			goto fail8;
15005 		}
15006 	}
15007 
15008 	if (dp_sysfs_initialize_stats(soc) != QDF_STATUS_SUCCESS) {
15009 		dp_err("failed to initialize dp stats sysfs file");
15010 		dp_sysfs_deinitialize_stats(soc);
15011 	}
15012 
15013 	dp_soc_swlm_attach(soc);
15014 	dp_soc_set_interrupt_mode(soc);
15015 	dp_soc_set_def_pdev(soc);
15016 
15017 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
15018 		qdf_dma_mem_stats_read(),
15019 		qdf_heap_mem_stats_read(),
15020 		qdf_skb_total_mem_stats_read());
15021 
15022 	return soc;
15023 fail8:
15024 	dp_soc_tx_desc_sw_pools_free(soc);
15025 fail7:
15026 	dp_soc_srng_free(soc);
15027 fail6:
15028 	soc->arch_ops.txrx_soc_detach(soc);
15029 fail5:
15030 	dp_hw_link_desc_ring_free(soc);
15031 fail4:
15032 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
15033 fail3:
15034 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
15035 fail2:
15036 	qdf_mem_free(soc->cdp_soc.ops);
15037 fail1:
15038 	qdf_mem_free(soc);
15039 fail0:
15040 	return NULL;
15041 }
15042 
15043 /**
15044  * dp_soc_init() - Initialize txrx SOC
15045  * @dp_soc: Opaque DP SOC handle
15046  * @htc_handle: Opaque HTC handle
15047  * @hif_handle: Opaque HIF handle
15048  *
15049  * Return: DP SOC handle on success, NULL on failure
15050  */
15051 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
15052 		  struct hif_opaque_softc *hif_handle)
15053 {
15054 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
15055 	bool is_monitor_mode = false;
15056 	uint8_t i;
15057 	int num_dp_msi;
15058 
15059 	wlan_minidump_log(soc, sizeof(*soc), soc->ctrl_psoc,
15060 			  WLAN_MD_DP_SOC, "dp_soc");
15061 
15062 	soc->hif_handle = hif_handle;
15063 
15064 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
15065 	if (!soc->hal_soc)
15066 		goto fail0;
15067 
15068 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_init(soc))) {
15069 		dp_err("unable to do target specific init");
15070 		goto fail0;
15071 	}
15072 
15073 	htt_soc = htt_soc_attach(soc, htc_handle);
15074 	if (!htt_soc)
15075 		goto fail1;
15076 
15077 	soc->htt_handle = htt_soc;
15078 
15079 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
15080 		goto fail2;
15081 
15082 	htt_set_htc_handle(htt_soc, htc_handle);
15083 
15084 	dp_soc_cfg_init(soc);
15085 
15086 	dp_monitor_soc_cfg_init(soc);
15087 	/* Reset/Initialize wbm sg list and flags */
15088 	dp_rx_wbm_sg_list_reset(soc);
15089 
15090 	/* Note: Any SRNG ring initialization should happen only after
15091 	 * Interrupt mode is set and followed by filling up the
15092 	 * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER.
15093 	 */
15094 	dp_soc_set_interrupt_mode(soc);
15095 	if (soc->cdp_soc.ol_ops->get_con_mode &&
15096 	    soc->cdp_soc.ol_ops->get_con_mode() ==
15097 	    QDF_GLOBAL_MONITOR_MODE)
15098 		is_monitor_mode = true;
15099 
15100 	num_dp_msi = dp_get_num_msi_available(soc, soc->intr_mode);
15101 	if (num_dp_msi < 0) {
15102 		dp_init_err("%pK: dp_interrupt assignment failed", soc);
15103 		goto fail3;
15104 	}
15105 
15106 	wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, num_dp_msi,
15107 				     soc->intr_mode, is_monitor_mode);
15108 
15109 	/* initialize WBM_IDLE_LINK ring */
15110 	if (dp_hw_link_desc_ring_init(soc)) {
15111 		dp_init_err("%pK: dp_hw_link_desc_ring_init failed", soc);
15112 		goto fail3;
15113 	}
15114 
15115 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
15116 
15117 	if (dp_soc_srng_init(soc)) {
15118 		dp_init_err("%pK: dp_soc_srng_init failed", soc);
15119 		goto fail4;
15120 	}
15121 
15122 	if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
15123 			       htt_get_htc_handle(htt_soc),
15124 			       soc->hal_soc, soc->osdev) == NULL)
15125 		goto fail5;
15126 
15127 	/* Initialize descriptors in TCL Rings */
15128 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
15129 		hal_tx_init_data_ring(soc->hal_soc,
15130 				      soc->tcl_data_ring[i].hal_srng);
15131 	}
15132 
15133 	if (dp_soc_tx_desc_sw_pools_init(soc)) {
15134 		dp_init_err("%pK: dp_tx_soc_attach failed", soc);
15135 		goto fail6;
15136 	}
15137 
15138 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
15139 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
15140 	soc->cce_disable = false;
15141 	soc->max_ast_ageout_count = MAX_AST_AGEOUT_COUNT;
15142 
15143 	soc->sta_mode_search_policy = DP_TX_ADDR_SEARCH_ADDR_POLICY;
15144 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
15145 	qdf_spinlock_create(&soc->vdev_map_lock);
15146 	qdf_atomic_init(&soc->num_tx_outstanding);
15147 	qdf_atomic_init(&soc->num_tx_exception);
15148 	soc->num_tx_allowed =
15149 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
15150 
15151 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
15152 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
15153 				CDP_CFG_MAX_PEER_ID);
15154 
15155 		if (ret != -EINVAL)
15156 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
15157 
15158 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
15159 				CDP_CFG_CCE_DISABLE);
15160 		if (ret == 1)
15161 			soc->cce_disable = true;
15162 	}
15163 
15164 	/*
15165 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
15166 	 * and IPQ5018 WMAC2 is not there in these platforms.
15167 	 */
15168 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018 ||
15169 	    soc->disable_mac2_intr)
15170 		dp_soc_disable_unused_mac_intr_mask(soc, 0x2);
15171 
15172 	/*
15173 	 * Skip registering hw ring interrupts for WMAC1 on IPQ5018
15174 	 * WMAC1 is not there in this platform.
15175 	 */
15176 	if (soc->disable_mac1_intr)
15177 		dp_soc_disable_unused_mac_intr_mask(soc, 0x1);
15178 
15179 	/* setup the global rx defrag waitlist */
15180 	TAILQ_INIT(&soc->rx.defrag.waitlist);
15181 	soc->rx.defrag.timeout_ms =
15182 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
15183 	soc->rx.defrag.next_flush_ms = 0;
15184 	soc->rx.flags.defrag_timeout_check =
15185 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
15186 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
15187 
15188 	dp_monitor_soc_init(soc);
15189 
15190 	qdf_atomic_set(&soc->cmn_init_done, 1);
15191 
15192 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
15193 
15194 	qdf_spinlock_create(&soc->ast_lock);
15195 	dp_peer_mec_spinlock_create(soc);
15196 
15197 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
15198 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
15199 	INIT_RX_HW_STATS_LOCK(soc);
15200 
15201 	qdf_nbuf_queue_init(&soc->invalid_buf_queue);
15202 	/* fill the tx/rx cpu ring map*/
15203 	dp_soc_set_txrx_ring_map(soc);
15204 
15205 	TAILQ_INIT(&soc->inactive_peer_list);
15206 	qdf_spinlock_create(&soc->inactive_peer_list_lock);
15207 	TAILQ_INIT(&soc->inactive_vdev_list);
15208 	qdf_spinlock_create(&soc->inactive_vdev_list_lock);
15209 	qdf_spinlock_create(&soc->htt_stats.lock);
15210 	/* initialize work queue for stats processing */
15211 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
15212 
15213 	dp_reo_desc_deferred_freelist_create(soc);
15214 
15215 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
15216 		qdf_dma_mem_stats_read(),
15217 		qdf_heap_mem_stats_read(),
15218 		qdf_skb_total_mem_stats_read());
15219 
15220 	soc->vdev_stats_id_map = 0;
15221 
15222 	return soc;
15223 fail6:
15224 	htt_soc_htc_dealloc(soc->htt_handle);
15225 fail5:
15226 	dp_soc_srng_deinit(soc);
15227 fail4:
15228 	dp_hw_link_desc_ring_deinit(soc);
15229 fail3:
15230 	htt_htc_pkt_pool_free(htt_soc);
15231 fail2:
15232 	htt_soc_detach(htt_soc);
15233 fail1:
15234 	soc->arch_ops.txrx_soc_deinit(soc);
15235 fail0:
15236 	return NULL;
15237 }
15238 
15239 /**
15240  * dp_soc_init_wifi3() - Initialize txrx SOC
15241  * @soc: Opaque DP SOC handle
15242  * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
15243  * @hif_handle: Opaque HIF handle
15244  * @htc_handle: Opaque HTC handle
15245  * @qdf_osdev: QDF device (Unused)
15246  * @ol_ops: Offload Operations (Unused)
15247  * @device_id: Device ID (Unused)
15248  *
15249  * Return: DP SOC handle on success, NULL on failure
15250  */
15251 void *dp_soc_init_wifi3(struct cdp_soc_t *soc,
15252 			struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
15253 			struct hif_opaque_softc *hif_handle,
15254 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
15255 			struct ol_if_ops *ol_ops, uint16_t device_id)
15256 {
15257 	return dp_soc_init((struct dp_soc *)soc, htc_handle, hif_handle);
15258 }
15259 
15260 #endif
15261 
15262 /*
15263  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
15264  *
15265  * @soc: handle to DP soc
15266  * @mac_id: MAC id
15267  *
15268  * Return: Return pdev corresponding to MAC
15269  */
15270 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
15271 {
15272 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
15273 		return (mac_id < MAX_PDEV_CNT) ? soc->pdev_list[mac_id] : NULL;
15274 
15275 	/* Typically for MCL as there only 1 PDEV*/
15276 	return soc->pdev_list[0];
15277 }
15278 
15279 void dp_update_num_mac_rings_for_dbs(struct dp_soc *soc,
15280 				     int *max_mac_rings)
15281 {
15282 	bool dbs_enable = false;
15283 
15284 	if (soc->cdp_soc.ol_ops->is_hw_dbs_capable)
15285 		dbs_enable = soc->cdp_soc.ol_ops->
15286 				is_hw_dbs_capable((void *)soc->ctrl_psoc);
15287 
15288 	*max_mac_rings = dbs_enable ? (*max_mac_rings) : 1;
15289 	dp_info("dbs_enable %d, max_mac_rings %d",
15290 		dbs_enable, *max_mac_rings);
15291 }
15292 
15293 qdf_export_symbol(dp_update_num_mac_rings_for_dbs);
15294 
15295 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
15296 /**
15297  * dp_get_cfr_rcc() - get cfr rcc config
15298  * @soc_hdl: Datapath soc handle
15299  * @pdev_id: id of objmgr pdev
15300  *
15301  * Return: true/false based on cfr mode setting
15302  */
15303 static
15304 bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
15305 {
15306 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15307 	struct dp_pdev *pdev = NULL;
15308 
15309 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15310 	if (!pdev) {
15311 		dp_err("pdev is NULL");
15312 		return false;
15313 	}
15314 
15315 	return pdev->cfr_rcc_mode;
15316 }
15317 
15318 /**
15319  * dp_set_cfr_rcc() - enable/disable cfr rcc config
15320  * @soc_hdl: Datapath soc handle
15321  * @pdev_id: id of objmgr pdev
15322  * @enable: Enable/Disable cfr rcc mode
15323  *
15324  * Return: none
15325  */
15326 static
15327 void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, bool enable)
15328 {
15329 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15330 	struct dp_pdev *pdev = NULL;
15331 
15332 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15333 	if (!pdev) {
15334 		dp_err("pdev is NULL");
15335 		return;
15336 	}
15337 
15338 	pdev->cfr_rcc_mode = enable;
15339 }
15340 
15341 /*
15342  * dp_get_cfr_dbg_stats - Get the debug statistics for CFR
15343  * @soc_hdl: Datapath soc handle
15344  * @pdev_id: id of data path pdev handle
15345  * @cfr_rcc_stats: CFR RCC debug statistics buffer
15346  *
15347  * Return: none
15348  */
15349 static inline void
15350 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
15351 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats)
15352 {
15353 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15354 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15355 
15356 	if (!pdev) {
15357 		dp_err("Invalid pdev");
15358 		return;
15359 	}
15360 
15361 	qdf_mem_copy(cfr_rcc_stats, &pdev->stats.rcc,
15362 		     sizeof(struct cdp_cfr_rcc_stats));
15363 }
15364 
15365 /*
15366  * dp_clear_cfr_dbg_stats - Clear debug statistics for CFR
15367  * @soc_hdl: Datapath soc handle
15368  * @pdev_id: id of data path pdev handle
15369  *
15370  * Return: none
15371  */
15372 static void dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl,
15373 				   uint8_t pdev_id)
15374 {
15375 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15376 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15377 
15378 	if (!pdev) {
15379 		dp_err("dp pdev is NULL");
15380 		return;
15381 	}
15382 
15383 	qdf_mem_zero(&pdev->stats.rcc, sizeof(pdev->stats.rcc));
15384 }
15385 #endif
15386 
15387 /**
15388  * dp_bucket_index() - Return index from array
15389  *
15390  * @delay: delay measured
15391  * @array: array used to index corresponding delay
15392  * @delay_in_us: flag to indicate whether the delay in ms or us
15393  *
15394  * Return: index
15395  */
15396 static uint8_t
15397 dp_bucket_index(uint32_t delay, uint16_t *array, bool delay_in_us)
15398 {
15399 	uint8_t i = CDP_DELAY_BUCKET_0;
15400 	uint32_t thr_low, thr_high;
15401 
15402 	for (; i < CDP_DELAY_BUCKET_MAX - 1; i++) {
15403 		thr_low = array[i];
15404 		thr_high = array[i + 1];
15405 
15406 		if (delay_in_us) {
15407 			thr_low = thr_low * USEC_PER_MSEC;
15408 			thr_high = thr_high * USEC_PER_MSEC;
15409 		}
15410 		if (delay >= thr_low && delay <= thr_high)
15411 			return i;
15412 	}
15413 	return (CDP_DELAY_BUCKET_MAX - 1);
15414 }
15415 
15416 #ifdef HW_TX_DELAY_STATS_ENABLE
15417 /*
15418  * cdp_fw_to_hw_delay_range
15419  * Fw to hw delay ranges in milliseconds
15420  */
15421 static uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
15422 	0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
15423 #else
15424 static uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
15425 	0, 2, 4, 6, 8, 10, 20, 30, 40, 50, 100, 250, 500};
15426 #endif
15427 
15428 /*
15429  * cdp_sw_enq_delay_range
15430  * Software enqueue delay ranges in milliseconds
15431  */
15432 static uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
15433 	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
15434 
15435 /*
15436  * cdp_intfrm_delay_range
15437  * Interframe delay ranges in milliseconds
15438  */
15439 static uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
15440 	0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
15441 
15442 /**
15443  * dp_fill_delay_buckets() - Fill delay statistics bucket for each
15444  *				type of delay
15445  * @tstats: tid tx stats
15446  * @rstats: tid rx stats
15447  * @delay: delay in ms
15448  * @tid: tid value
15449  * @mode: type of tx delay mode
15450  * @ring_id: ring number
15451  * @delay_in_us: flag to indicate whether the delay in ms or us
15452  *
15453  * Return: pointer to cdp_delay_stats structure
15454  */
15455 static struct cdp_delay_stats *
15456 dp_fill_delay_buckets(struct cdp_tid_tx_stats *tstats,
15457 		      struct cdp_tid_rx_stats *rstats, uint32_t delay,
15458 		      uint8_t tid, uint8_t mode, uint8_t ring_id,
15459 		      bool delay_in_us)
15460 {
15461 	uint8_t delay_index = 0;
15462 	struct cdp_delay_stats *stats = NULL;
15463 
15464 	/*
15465 	 * Update delay stats in proper bucket
15466 	 */
15467 	switch (mode) {
15468 	/* Software Enqueue delay ranges */
15469 	case CDP_DELAY_STATS_SW_ENQ:
15470 		if (!tstats)
15471 			break;
15472 
15473 		delay_index = dp_bucket_index(delay, cdp_sw_enq_delay,
15474 					      delay_in_us);
15475 		tstats->swq_delay.delay_bucket[delay_index]++;
15476 		stats = &tstats->swq_delay;
15477 		break;
15478 
15479 	/* Tx Completion delay ranges */
15480 	case CDP_DELAY_STATS_FW_HW_TRANSMIT:
15481 		if (!tstats)
15482 			break;
15483 
15484 		delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay,
15485 					      delay_in_us);
15486 		tstats->hwtx_delay.delay_bucket[delay_index]++;
15487 		stats = &tstats->hwtx_delay;
15488 		break;
15489 
15490 	/* Interframe tx delay ranges */
15491 	case CDP_DELAY_STATS_TX_INTERFRAME:
15492 		if (!tstats)
15493 			break;
15494 
15495 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
15496 					      delay_in_us);
15497 		tstats->intfrm_delay.delay_bucket[delay_index]++;
15498 		stats = &tstats->intfrm_delay;
15499 		break;
15500 
15501 	/* Interframe rx delay ranges */
15502 	case CDP_DELAY_STATS_RX_INTERFRAME:
15503 		if (!rstats)
15504 			break;
15505 
15506 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
15507 					      delay_in_us);
15508 		rstats->intfrm_delay.delay_bucket[delay_index]++;
15509 		stats = &rstats->intfrm_delay;
15510 		break;
15511 
15512 	/* Ring reap to indication to network stack */
15513 	case CDP_DELAY_STATS_REAP_STACK:
15514 		if (!rstats)
15515 			break;
15516 
15517 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
15518 					      delay_in_us);
15519 		rstats->to_stack_delay.delay_bucket[delay_index]++;
15520 		stats = &rstats->to_stack_delay;
15521 		break;
15522 	default:
15523 		dp_debug("Incorrect delay mode: %d", mode);
15524 	}
15525 
15526 	return stats;
15527 }
15528 
15529 void dp_update_delay_stats(struct cdp_tid_tx_stats *tstats,
15530 			   struct cdp_tid_rx_stats *rstats, uint32_t delay,
15531 			   uint8_t tid, uint8_t mode, uint8_t ring_id,
15532 			   bool delay_in_us)
15533 {
15534 	struct cdp_delay_stats *dstats = NULL;
15535 
15536 	/*
15537 	 * Delay ranges are different for different delay modes
15538 	 * Get the correct index to update delay bucket
15539 	 */
15540 	dstats = dp_fill_delay_buckets(tstats, rstats, delay, tid, mode,
15541 				       ring_id, delay_in_us);
15542 	if (qdf_unlikely(!dstats))
15543 		return;
15544 
15545 	if (delay != 0) {
15546 		/*
15547 		 * Compute minimum,average and maximum
15548 		 * delay
15549 		 */
15550 		if (delay < dstats->min_delay)
15551 			dstats->min_delay = delay;
15552 
15553 		if (delay > dstats->max_delay)
15554 			dstats->max_delay = delay;
15555 
15556 		/*
15557 		 * Average over delay measured till now
15558 		 */
15559 		if (!dstats->avg_delay)
15560 			dstats->avg_delay = delay;
15561 		else
15562 			dstats->avg_delay = ((delay + dstats->avg_delay) >> 1);
15563 	}
15564 }
15565 
15566 /**
15567  * dp_get_peer_mac_list(): function to get peer mac list of vdev
15568  * @soc: Datapath soc handle
15569  * @vdev_id: vdev id
15570  * @newmac: Table of the clients mac
15571  * @mac_cnt: No. of MACs required
15572  * @limit: Limit the number of clients
15573  *
15574  * return: no of clients
15575  */
15576 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id,
15577 			      u_int8_t newmac[][QDF_MAC_ADDR_SIZE],
15578 			      u_int16_t mac_cnt, bool limit)
15579 {
15580 	struct dp_soc *dp_soc = (struct dp_soc *)soc;
15581 	struct dp_vdev *vdev =
15582 		dp_vdev_get_ref_by_id(dp_soc, vdev_id, DP_MOD_ID_CDP);
15583 	struct dp_peer *peer;
15584 	uint16_t new_mac_cnt = 0;
15585 
15586 	if (!vdev)
15587 		return new_mac_cnt;
15588 
15589 	if (limit && (vdev->num_peers > mac_cnt))
15590 		return 0;
15591 
15592 	qdf_spin_lock_bh(&vdev->peer_list_lock);
15593 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
15594 		if (peer->bss_peer)
15595 			continue;
15596 		if (new_mac_cnt < mac_cnt) {
15597 			WLAN_ADDR_COPY(newmac[new_mac_cnt], peer->mac_addr.raw);
15598 			new_mac_cnt++;
15599 		}
15600 	}
15601 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
15602 	dp_vdev_unref_delete(dp_soc, vdev, DP_MOD_ID_CDP);
15603 	return new_mac_cnt;
15604 }
15605 
15606 uint16_t dp_get_peer_id(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *mac)
15607 {
15608 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
15609 						       mac, 0, vdev_id,
15610 						       DP_MOD_ID_CDP);
15611 	uint16_t peer_id = HTT_INVALID_PEER;
15612 
15613 	if (!peer) {
15614 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
15615 		return peer_id;
15616 	}
15617 
15618 	peer_id = peer->peer_id;
15619 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
15620 	return peer_id;
15621 }
15622 
15623 #ifdef QCA_SUPPORT_WDS_EXTENDED
15624 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc,
15625 				  uint8_t vdev_id,
15626 				  uint8_t *mac,
15627 				  ol_txrx_rx_fp rx,
15628 				  ol_osif_peer_handle osif_peer)
15629 {
15630 	struct dp_txrx_peer *txrx_peer = NULL;
15631 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
15632 						       mac, 0, vdev_id,
15633 						       DP_MOD_ID_CDP);
15634 	QDF_STATUS status = QDF_STATUS_E_INVAL;
15635 
15636 	if (!peer) {
15637 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
15638 		return status;
15639 	}
15640 
15641 	txrx_peer = dp_get_txrx_peer(peer);
15642 	if (!txrx_peer) {
15643 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
15644 		return status;
15645 	}
15646 
15647 	if (rx) {
15648 		if (txrx_peer->osif_rx) {
15649 			status = QDF_STATUS_E_ALREADY;
15650 		} else {
15651 			txrx_peer->osif_rx = rx;
15652 			status = QDF_STATUS_SUCCESS;
15653 		}
15654 	} else {
15655 		if (txrx_peer->osif_rx) {
15656 			txrx_peer->osif_rx = NULL;
15657 			status = QDF_STATUS_SUCCESS;
15658 		} else {
15659 			status = QDF_STATUS_E_ALREADY;
15660 		}
15661 	}
15662 
15663 	txrx_peer->wds_ext.osif_peer = osif_peer;
15664 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
15665 
15666 	return status;
15667 }
15668 #endif /* QCA_SUPPORT_WDS_EXTENDED */
15669 
15670 /**
15671  * dp_pdev_srng_deinit() - de-initialize all pdev srng ring including
15672  *			   monitor rings
15673  * @pdev: Datapath pdev handle
15674  *
15675  */
15676 static void dp_pdev_srng_deinit(struct dp_pdev *pdev)
15677 {
15678 	struct dp_soc *soc = pdev->soc;
15679 	uint8_t i;
15680 
15681 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled)
15682 		dp_srng_deinit(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
15683 			       RXDMA_BUF,
15684 			       pdev->lmac_id);
15685 
15686 	if (!soc->rxdma2sw_rings_not_supported) {
15687 		for (i = 0;
15688 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
15689 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
15690 								 pdev->pdev_id);
15691 
15692 			wlan_minidump_remove(soc->rxdma_err_dst_ring[lmac_id].
15693 							base_vaddr_unaligned,
15694 					     soc->rxdma_err_dst_ring[lmac_id].
15695 								alloc_size,
15696 					     soc->ctrl_psoc,
15697 					     WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
15698 					     "rxdma_err_dst");
15699 			dp_srng_deinit(soc, &soc->rxdma_err_dst_ring[lmac_id],
15700 				       RXDMA_DST, lmac_id);
15701 		}
15702 	}
15703 
15704 
15705 }
15706 
15707 /**
15708  * dp_pdev_srng_init() - initialize all pdev srng rings including
15709  *			   monitor rings
15710  * @pdev: Datapath pdev handle
15711  *
15712  * return: QDF_STATUS_SUCCESS on success
15713  *	   QDF_STATUS_E_NOMEM on failure
15714  */
15715 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev)
15716 {
15717 	struct dp_soc *soc = pdev->soc;
15718 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
15719 	uint32_t i;
15720 
15721 	soc_cfg_ctx = soc->wlan_cfg_ctx;
15722 
15723 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
15724 		if (dp_srng_init(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
15725 				 RXDMA_BUF, 0, pdev->lmac_id)) {
15726 			dp_init_err("%pK: dp_srng_init failed rx refill ring",
15727 				    soc);
15728 			goto fail1;
15729 		}
15730 	}
15731 
15732 	/* LMAC RxDMA to SW Rings configuration */
15733 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
15734 		/* Only valid for MCL */
15735 		pdev = soc->pdev_list[0];
15736 
15737 	if (!soc->rxdma2sw_rings_not_supported) {
15738 		for (i = 0;
15739 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
15740 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
15741 								 pdev->pdev_id);
15742 			struct dp_srng *srng =
15743 				&soc->rxdma_err_dst_ring[lmac_id];
15744 
15745 			if (srng->hal_srng)
15746 				continue;
15747 
15748 			if (dp_srng_init(soc, srng, RXDMA_DST, 0, lmac_id)) {
15749 				dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring",
15750 					    soc);
15751 				goto fail1;
15752 			}
15753 			wlan_minidump_log(soc->rxdma_err_dst_ring[lmac_id].
15754 						base_vaddr_unaligned,
15755 					  soc->rxdma_err_dst_ring[lmac_id].
15756 						alloc_size,
15757 					  soc->ctrl_psoc,
15758 					  WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
15759 					  "rxdma_err_dst");
15760 		}
15761 	}
15762 	return QDF_STATUS_SUCCESS;
15763 
15764 fail1:
15765 	dp_pdev_srng_deinit(pdev);
15766 	return QDF_STATUS_E_NOMEM;
15767 }
15768 
15769 /**
15770  * dp_pdev_srng_free() - free all pdev srng rings including monitor rings
15771  * pdev: Datapath pdev handle
15772  *
15773  */
15774 static void dp_pdev_srng_free(struct dp_pdev *pdev)
15775 {
15776 	struct dp_soc *soc = pdev->soc;
15777 	uint8_t i;
15778 
15779 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled)
15780 		dp_srng_free(soc, &soc->rx_refill_buf_ring[pdev->lmac_id]);
15781 
15782 	if (!soc->rxdma2sw_rings_not_supported) {
15783 		for (i = 0;
15784 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
15785 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
15786 								 pdev->pdev_id);
15787 
15788 			dp_srng_free(soc, &soc->rxdma_err_dst_ring[lmac_id]);
15789 		}
15790 	}
15791 }
15792 
15793 /**
15794  * dp_pdev_srng_alloc() - allocate memory for all pdev srng rings including
15795  *			  monitor rings
15796  * pdev: Datapath pdev handle
15797  *
15798  * return: QDF_STATUS_SUCCESS on success
15799  *	   QDF_STATUS_E_NOMEM on failure
15800  */
15801 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev)
15802 {
15803 	struct dp_soc *soc = pdev->soc;
15804 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
15805 	uint32_t ring_size;
15806 	uint32_t i;
15807 
15808 	soc_cfg_ctx = soc->wlan_cfg_ctx;
15809 
15810 	ring_size = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
15811 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
15812 		if (dp_srng_alloc(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
15813 				  RXDMA_BUF, ring_size, 0)) {
15814 			dp_init_err("%pK: dp_srng_alloc failed rx refill ring",
15815 				    soc);
15816 			goto fail1;
15817 		}
15818 	}
15819 
15820 	ring_size = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
15821 	/* LMAC RxDMA to SW Rings configuration */
15822 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
15823 		/* Only valid for MCL */
15824 		pdev = soc->pdev_list[0];
15825 
15826 	if (!soc->rxdma2sw_rings_not_supported) {
15827 		for (i = 0;
15828 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
15829 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
15830 								 pdev->pdev_id);
15831 			struct dp_srng *srng =
15832 				&soc->rxdma_err_dst_ring[lmac_id];
15833 
15834 			if (srng->base_vaddr_unaligned)
15835 				continue;
15836 
15837 			if (dp_srng_alloc(soc, srng, RXDMA_DST, ring_size, 0)) {
15838 				dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring",
15839 					    soc);
15840 				goto fail1;
15841 			}
15842 		}
15843 	}
15844 
15845 	return QDF_STATUS_SUCCESS;
15846 fail1:
15847 	dp_pdev_srng_free(pdev);
15848 	return QDF_STATUS_E_NOMEM;
15849 }
15850 
15851 #ifndef WLAN_DP_DISABLE_TCL_CMD_CRED_SRNG
15852 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
15853 {
15854 	QDF_STATUS status;
15855 
15856 	if (soc->init_tcl_cmd_cred_ring) {
15857 		status =  dp_srng_init(soc, &soc->tcl_cmd_credit_ring,
15858 				       TCL_CMD_CREDIT, 0, 0);
15859 		if (QDF_IS_STATUS_ERROR(status))
15860 			return status;
15861 
15862 		wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
15863 				  soc->tcl_cmd_credit_ring.alloc_size,
15864 				  soc->ctrl_psoc,
15865 				  WLAN_MD_DP_SRNG_TCL_CMD,
15866 				  "wbm_desc_rel_ring");
15867 	}
15868 
15869 	return QDF_STATUS_SUCCESS;
15870 }
15871 
15872 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
15873 {
15874 	if (soc->init_tcl_cmd_cred_ring) {
15875 		wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
15876 				     soc->tcl_cmd_credit_ring.alloc_size,
15877 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_CMD,
15878 				     "wbm_desc_rel_ring");
15879 		dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring,
15880 			       TCL_CMD_CREDIT, 0);
15881 	}
15882 }
15883 
15884 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
15885 {
15886 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
15887 	uint32_t entries;
15888 	QDF_STATUS status;
15889 
15890 	entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx);
15891 	if (soc->init_tcl_cmd_cred_ring) {
15892 		status = dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring,
15893 				       TCL_CMD_CREDIT, entries, 0);
15894 		if (QDF_IS_STATUS_ERROR(status))
15895 			return status;
15896 	}
15897 
15898 	return QDF_STATUS_SUCCESS;
15899 }
15900 
15901 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
15902 {
15903 	if (soc->init_tcl_cmd_cred_ring)
15904 		dp_srng_free(soc, &soc->tcl_cmd_credit_ring);
15905 }
15906 
15907 static inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
15908 {
15909 	if (soc->init_tcl_cmd_cred_ring)
15910 		hal_tx_init_cmd_credit_ring(soc->hal_soc,
15911 					    soc->tcl_cmd_credit_ring.hal_srng);
15912 }
15913 #else
15914 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
15915 {
15916 	return QDF_STATUS_SUCCESS;
15917 }
15918 
15919 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
15920 {
15921 }
15922 
15923 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
15924 {
15925 	return QDF_STATUS_SUCCESS;
15926 }
15927 
15928 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
15929 {
15930 }
15931 
15932 static inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
15933 {
15934 }
15935 #endif
15936 
15937 #ifndef WLAN_DP_DISABLE_TCL_STATUS_SRNG
15938 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
15939 {
15940 	QDF_STATUS status;
15941 
15942 	status =  dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0);
15943 	if (QDF_IS_STATUS_ERROR(status))
15944 		return status;
15945 
15946 	wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned,
15947 			  soc->tcl_status_ring.alloc_size,
15948 			  soc->ctrl_psoc,
15949 			  WLAN_MD_DP_SRNG_TCL_STATUS,
15950 			  "wbm_desc_rel_ring");
15951 
15952 	return QDF_STATUS_SUCCESS;
15953 }
15954 
15955 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
15956 {
15957 	wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned,
15958 			     soc->tcl_status_ring.alloc_size,
15959 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_STATUS,
15960 			     "wbm_desc_rel_ring");
15961 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
15962 }
15963 
15964 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
15965 {
15966 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
15967 	uint32_t entries;
15968 	QDF_STATUS status = QDF_STATUS_SUCCESS;
15969 
15970 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
15971 	status = dp_srng_alloc(soc, &soc->tcl_status_ring,
15972 			       TCL_STATUS, entries, 0);
15973 
15974 	return status;
15975 }
15976 
15977 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
15978 {
15979 	dp_srng_free(soc, &soc->tcl_status_ring);
15980 }
15981 #else
15982 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
15983 {
15984 	return QDF_STATUS_SUCCESS;
15985 }
15986 
15987 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
15988 {
15989 }
15990 
15991 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
15992 {
15993 	return QDF_STATUS_SUCCESS;
15994 }
15995 
15996 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
15997 {
15998 }
15999 #endif
16000 
16001 /**
16002  * dp_soc_srng_deinit() - de-initialize soc srng rings
16003  * @soc: Datapath soc handle
16004  *
16005  */
16006 static void dp_soc_srng_deinit(struct dp_soc *soc)
16007 {
16008 	uint32_t i;
16009 
16010 	if (soc->arch_ops.txrx_soc_srng_deinit)
16011 		soc->arch_ops.txrx_soc_srng_deinit(soc);
16012 
16013 	/* Free the ring memories */
16014 	/* Common rings */
16015 	wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
16016 			     soc->wbm_desc_rel_ring.alloc_size,
16017 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_WBM_DESC_REL,
16018 			     "wbm_desc_rel_ring");
16019 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
16020 
16021 	/* Tx data rings */
16022 	for (i = 0; i < soc->num_tcl_data_rings; i++)
16023 		dp_deinit_tx_pair_by_index(soc, i);
16024 
16025 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16026 		dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
16027 		dp_ipa_deinit_alt_tx_ring(soc);
16028 	}
16029 
16030 	/* TCL command and status rings */
16031 	dp_soc_tcl_cmd_cred_srng_deinit(soc);
16032 	dp_soc_tcl_status_srng_deinit(soc);
16033 
16034 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
16035 		/* TODO: Get number of rings and ring sizes
16036 		 * from wlan_cfg
16037 		 */
16038 		wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned,
16039 				     soc->reo_dest_ring[i].alloc_size,
16040 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_DEST,
16041 				     "reo_dest_ring");
16042 		dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i);
16043 	}
16044 
16045 	/* REO reinjection ring */
16046 	wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned,
16047 			     soc->reo_reinject_ring.alloc_size,
16048 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_REINJECT,
16049 			     "reo_reinject_ring");
16050 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
16051 
16052 	/* Rx release ring */
16053 	wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned,
16054 			     soc->rx_rel_ring.alloc_size,
16055 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_RX_REL,
16056 			     "reo_release_ring");
16057 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
16058 
16059 	/* Rx exception ring */
16060 	/* TODO: Better to store ring_type and ring_num in
16061 	 * dp_srng during setup
16062 	 */
16063 	wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned,
16064 			     soc->reo_exception_ring.alloc_size,
16065 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_EXCEPTION,
16066 			     "reo_exception_ring");
16067 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
16068 
16069 	/* REO command and status rings */
16070 	wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned,
16071 			     soc->reo_cmd_ring.alloc_size,
16072 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_CMD,
16073 			     "reo_cmd_ring");
16074 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
16075 	wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned,
16076 			     soc->reo_status_ring.alloc_size,
16077 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_STATUS,
16078 			     "reo_status_ring");
16079 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
16080 }
16081 
16082 /**
16083  * dp_soc_srng_init() - Initialize soc level srng rings
16084  * @soc: Datapath soc handle
16085  *
16086  * return: QDF_STATUS_SUCCESS on success
16087  *	   QDF_STATUS_E_FAILURE on failure
16088  */
16089 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc)
16090 {
16091 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16092 	uint8_t i;
16093 	uint8_t wbm2_sw_rx_rel_ring_id;
16094 
16095 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16096 
16097 	dp_enable_verbose_debug(soc);
16098 
16099 	/* WBM descriptor release ring */
16100 	if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) {
16101 		dp_init_err("%pK: dp_srng_init failed for wbm_desc_rel_ring", soc);
16102 		goto fail1;
16103 	}
16104 
16105 	wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
16106 			  soc->wbm_desc_rel_ring.alloc_size,
16107 			  soc->ctrl_psoc,
16108 			  WLAN_MD_DP_SRNG_WBM_DESC_REL,
16109 			  "wbm_desc_rel_ring");
16110 
16111 	/* TCL command and status rings */
16112 	if (dp_soc_tcl_cmd_cred_srng_init(soc)) {
16113 		dp_init_err("%pK: dp_srng_init failed for tcl_cmd_ring", soc);
16114 		goto fail1;
16115 	}
16116 
16117 	if (dp_soc_tcl_status_srng_init(soc)) {
16118 		dp_init_err("%pK: dp_srng_init failed for tcl_status_ring", soc);
16119 		goto fail1;
16120 	}
16121 
16122 	/* REO reinjection ring */
16123 	if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) {
16124 		dp_init_err("%pK: dp_srng_init failed for reo_reinject_ring", soc);
16125 		goto fail1;
16126 	}
16127 
16128 	wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned,
16129 			  soc->reo_reinject_ring.alloc_size,
16130 			  soc->ctrl_psoc,
16131 			  WLAN_MD_DP_SRNG_REO_REINJECT,
16132 			  "reo_reinject_ring");
16133 
16134 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc_cfg_ctx);
16135 	/* Rx release ring */
16136 	if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
16137 			 wbm2_sw_rx_rel_ring_id, 0)) {
16138 		dp_init_err("%pK: dp_srng_init failed for rx_rel_ring", soc);
16139 		goto fail1;
16140 	}
16141 
16142 	wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned,
16143 			  soc->rx_rel_ring.alloc_size,
16144 			  soc->ctrl_psoc,
16145 			  WLAN_MD_DP_SRNG_RX_REL,
16146 			  "reo_release_ring");
16147 
16148 	/* Rx exception ring */
16149 	if (dp_srng_init(soc, &soc->reo_exception_ring,
16150 			 REO_EXCEPTION, 0, MAX_REO_DEST_RINGS)) {
16151 		dp_init_err("%pK: dp_srng_init failed - reo_exception", soc);
16152 		goto fail1;
16153 	}
16154 
16155 	wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned,
16156 			  soc->reo_exception_ring.alloc_size,
16157 			  soc->ctrl_psoc,
16158 			  WLAN_MD_DP_SRNG_REO_EXCEPTION,
16159 			  "reo_exception_ring");
16160 
16161 	/* REO command and status rings */
16162 	if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) {
16163 		dp_init_err("%pK: dp_srng_init failed for reo_cmd_ring", soc);
16164 		goto fail1;
16165 	}
16166 
16167 	wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned,
16168 			  soc->reo_cmd_ring.alloc_size,
16169 			  soc->ctrl_psoc,
16170 			  WLAN_MD_DP_SRNG_REO_CMD,
16171 			  "reo_cmd_ring");
16172 
16173 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
16174 	TAILQ_INIT(&soc->rx.reo_cmd_list);
16175 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
16176 
16177 	if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) {
16178 		dp_init_err("%pK: dp_srng_init failed for reo_status_ring", soc);
16179 		goto fail1;
16180 	}
16181 
16182 	wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned,
16183 			  soc->reo_status_ring.alloc_size,
16184 			  soc->ctrl_psoc,
16185 			  WLAN_MD_DP_SRNG_REO_STATUS,
16186 			  "reo_status_ring");
16187 
16188 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
16189 		if (dp_init_tx_ring_pair_by_index(soc, i))
16190 			goto fail1;
16191 	}
16192 
16193 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16194 		if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
16195 			goto fail1;
16196 
16197 		if (dp_ipa_init_alt_tx_ring(soc))
16198 			goto fail1;
16199 	}
16200 
16201 	dp_create_ext_stats_event(soc);
16202 
16203 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
16204 		/* Initialize REO destination ring */
16205 		if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) {
16206 			dp_init_err("%pK: dp_srng_init failed for reo_dest_ringn", soc);
16207 			goto fail1;
16208 		}
16209 
16210 		wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned,
16211 				  soc->reo_dest_ring[i].alloc_size,
16212 				  soc->ctrl_psoc,
16213 				  WLAN_MD_DP_SRNG_REO_DEST,
16214 				  "reo_dest_ring");
16215 	}
16216 
16217 	if (soc->arch_ops.txrx_soc_srng_init) {
16218 		if (soc->arch_ops.txrx_soc_srng_init(soc)) {
16219 			dp_init_err("%pK: dp_srng_init failed for arch rings",
16220 				    soc);
16221 			goto fail1;
16222 		}
16223 	}
16224 
16225 	return QDF_STATUS_SUCCESS;
16226 fail1:
16227 	/*
16228 	 * Cleanup will be done as part of soc_detach, which will
16229 	 * be called on pdev attach failure
16230 	 */
16231 	dp_soc_srng_deinit(soc);
16232 	return QDF_STATUS_E_FAILURE;
16233 }
16234 
16235 /**
16236  * dp_soc_srng_free() - free soc level srng rings
16237  * @soc: Datapath soc handle
16238  *
16239  */
16240 static void dp_soc_srng_free(struct dp_soc *soc)
16241 {
16242 	uint32_t i;
16243 
16244 	if (soc->arch_ops.txrx_soc_srng_free)
16245 		soc->arch_ops.txrx_soc_srng_free(soc);
16246 
16247 	dp_srng_free(soc, &soc->wbm_desc_rel_ring);
16248 
16249 	for (i = 0; i < soc->num_tcl_data_rings; i++)
16250 		dp_free_tx_ring_pair_by_index(soc, i);
16251 
16252 	/* Free IPA rings for TCL_TX and TCL_COMPL ring */
16253 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16254 		dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
16255 		dp_ipa_free_alt_tx_ring(soc);
16256 	}
16257 
16258 	dp_soc_tcl_cmd_cred_srng_free(soc);
16259 	dp_soc_tcl_status_srng_free(soc);
16260 
16261 	for (i = 0; i < soc->num_reo_dest_rings; i++)
16262 		dp_srng_free(soc, &soc->reo_dest_ring[i]);
16263 
16264 	dp_srng_free(soc, &soc->reo_reinject_ring);
16265 	dp_srng_free(soc, &soc->rx_rel_ring);
16266 
16267 	dp_srng_free(soc, &soc->reo_exception_ring);
16268 
16269 	dp_srng_free(soc, &soc->reo_cmd_ring);
16270 	dp_srng_free(soc, &soc->reo_status_ring);
16271 }
16272 
16273 /**
16274  * dp_soc_srng_alloc() - Allocate memory for soc level srng rings
16275  * @soc: Datapath soc handle
16276  *
16277  * return: QDF_STATUS_SUCCESS on success
16278  *	   QDF_STATUS_E_NOMEM on failure
16279  */
16280 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc)
16281 {
16282 	uint32_t entries;
16283 	uint32_t i;
16284 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16285 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
16286 	uint32_t tx_comp_ring_size, tx_ring_size, reo_dst_ring_size;
16287 
16288 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16289 
16290 	/* sw2wbm link descriptor release ring */
16291 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
16292 	if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE,
16293 			  entries, 0)) {
16294 		dp_init_err("%pK: dp_srng_alloc failed for wbm_desc_rel_ring", soc);
16295 		goto fail1;
16296 	}
16297 
16298 	/* TCL command and status rings */
16299 	if (dp_soc_tcl_cmd_cred_srng_alloc(soc)) {
16300 		dp_init_err("%pK: dp_srng_alloc failed for tcl_cmd_ring", soc);
16301 		goto fail1;
16302 	}
16303 
16304 	if (dp_soc_tcl_status_srng_alloc(soc)) {
16305 		dp_init_err("%pK: dp_srng_alloc failed for tcl_status_ring", soc);
16306 		goto fail1;
16307 	}
16308 
16309 	/* REO reinjection ring */
16310 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
16311 	if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT,
16312 			  entries, 0)) {
16313 		dp_init_err("%pK: dp_srng_alloc failed for reo_reinject_ring", soc);
16314 		goto fail1;
16315 	}
16316 
16317 	/* Rx release ring */
16318 	entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx);
16319 	if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
16320 			  entries, 0)) {
16321 		dp_init_err("%pK: dp_srng_alloc failed for rx_rel_ring", soc);
16322 		goto fail1;
16323 	}
16324 
16325 	/* Rx exception ring */
16326 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
16327 	if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION,
16328 			  entries, 0)) {
16329 		dp_init_err("%pK: dp_srng_alloc failed - reo_exception", soc);
16330 		goto fail1;
16331 	}
16332 
16333 	/* REO command and status rings */
16334 	entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx);
16335 	if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) {
16336 		dp_init_err("%pK: dp_srng_alloc failed for reo_cmd_ring", soc);
16337 		goto fail1;
16338 	}
16339 
16340 	entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx);
16341 	if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS,
16342 			  entries, 0)) {
16343 		dp_init_err("%pK: dp_srng_alloc failed for reo_status_ring", soc);
16344 		goto fail1;
16345 	}
16346 
16347 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
16348 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
16349 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx);
16350 
16351 	/* Disable cached desc if NSS offload is enabled */
16352 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
16353 		cached = 0;
16354 
16355 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
16356 		if (dp_alloc_tx_ring_pair_by_index(soc, i))
16357 			goto fail1;
16358 	}
16359 
16360 	/* IPA rings for TCL_TX and TX_COMP will be allocated here */
16361 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16362 		if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
16363 			goto fail1;
16364 
16365 		if (dp_ipa_alloc_alt_tx_ring(soc))
16366 			goto fail1;
16367 	}
16368 
16369 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
16370 		/* Setup REO destination ring */
16371 		if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST,
16372 				  reo_dst_ring_size, cached)) {
16373 			dp_init_err("%pK: dp_srng_alloc failed for reo_dest_ring", soc);
16374 			goto fail1;
16375 		}
16376 	}
16377 
16378 	if (soc->arch_ops.txrx_soc_srng_alloc) {
16379 		if (soc->arch_ops.txrx_soc_srng_alloc(soc)) {
16380 			dp_init_err("%pK: dp_srng_alloc failed for arch rings",
16381 				    soc);
16382 			goto fail1;
16383 		}
16384 	}
16385 
16386 	return QDF_STATUS_SUCCESS;
16387 
16388 fail1:
16389 	dp_soc_srng_free(soc);
16390 	return QDF_STATUS_E_NOMEM;
16391 }
16392 
16393 static void dp_soc_cfg_dump(struct dp_soc *soc, uint32_t target_type)
16394 {
16395 	dp_init_info("DP soc Dump for Target = %d", target_type);
16396 	dp_init_info("ast_override_support = %d, da_war_enabled = %d,",
16397 		     soc->ast_override_support, soc->da_war_enabled);
16398 
16399 	wlan_cfg_dp_soc_ctx_dump(soc->wlan_cfg_ctx);
16400 }
16401 
16402 /**
16403  * dp_soc_cfg_init() - initialize target specific configuration
16404  *		       during dp_soc_init
16405  * @soc: dp soc handle
16406  */
16407 static void dp_soc_cfg_init(struct dp_soc *soc)
16408 {
16409 	uint32_t target_type;
16410 
16411 	target_type = hal_get_target_type(soc->hal_soc);
16412 	switch (target_type) {
16413 	case TARGET_TYPE_QCA6290:
16414 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
16415 					       REO_DST_RING_SIZE_QCA6290);
16416 		soc->ast_override_support = 1;
16417 		soc->da_war_enabled = false;
16418 		break;
16419 	case TARGET_TYPE_QCA6390:
16420 	case TARGET_TYPE_QCA6490:
16421 	case TARGET_TYPE_QCA6750:
16422 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
16423 					       REO_DST_RING_SIZE_QCA6290);
16424 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
16425 		soc->ast_override_support = 1;
16426 		if (soc->cdp_soc.ol_ops->get_con_mode &&
16427 		    soc->cdp_soc.ol_ops->get_con_mode() ==
16428 		    QDF_GLOBAL_MONITOR_MODE) {
16429 			int int_ctx;
16430 
16431 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
16432 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
16433 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
16434 			}
16435 		}
16436 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
16437 		break;
16438 	case TARGET_TYPE_KIWI:
16439 	case TARGET_TYPE_MANGO:
16440 		soc->ast_override_support = 1;
16441 		soc->per_tid_basize_max_tid = 8;
16442 
16443 		if (soc->cdp_soc.ol_ops->get_con_mode &&
16444 		    soc->cdp_soc.ol_ops->get_con_mode() ==
16445 		    QDF_GLOBAL_MONITOR_MODE) {
16446 			int int_ctx;
16447 
16448 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS;
16449 			     int_ctx++) {
16450 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
16451 				if (dp_is_monitor_mode_using_poll(soc))
16452 					soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
16453 			}
16454 		}
16455 
16456 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
16457 		soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev = 1;
16458 		break;
16459 	case TARGET_TYPE_QCA8074:
16460 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
16461 		soc->da_war_enabled = true;
16462 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
16463 		break;
16464 	case TARGET_TYPE_QCA8074V2:
16465 	case TARGET_TYPE_QCA6018:
16466 	case TARGET_TYPE_QCA9574:
16467 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
16468 		soc->ast_override_support = 1;
16469 		soc->per_tid_basize_max_tid = 8;
16470 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
16471 		soc->da_war_enabled = false;
16472 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
16473 		break;
16474 	case TARGET_TYPE_QCN9000:
16475 		soc->ast_override_support = 1;
16476 		soc->da_war_enabled = false;
16477 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
16478 		soc->per_tid_basize_max_tid = 8;
16479 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
16480 		soc->lmac_polled_mode = 0;
16481 		soc->wbm_release_desc_rx_sg_support = 1;
16482 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
16483 		break;
16484 	case TARGET_TYPE_QCA5018:
16485 	case TARGET_TYPE_QCN6122:
16486 		soc->ast_override_support = 1;
16487 		soc->da_war_enabled = false;
16488 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
16489 		soc->per_tid_basize_max_tid = 8;
16490 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS_11AX;
16491 		soc->disable_mac1_intr = 1;
16492 		soc->disable_mac2_intr = 1;
16493 		soc->wbm_release_desc_rx_sg_support = 1;
16494 		break;
16495 	case TARGET_TYPE_QCN9224:
16496 		soc->ast_override_support = 1;
16497 		soc->da_war_enabled = false;
16498 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
16499 		soc->per_tid_basize_max_tid = 8;
16500 		soc->wbm_release_desc_rx_sg_support = 1;
16501 		soc->rxdma2sw_rings_not_supported = 1;
16502 		soc->wbm_sg_last_msdu_war = 1;
16503 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
16504 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
16505 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
16506 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
16507 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
16508 						  CFG_DP_HOST_AST_DB_ENABLE);
16509 		break;
16510 	case TARGET_TYPE_QCA5332:
16511 		soc->ast_override_support = 1;
16512 		soc->da_war_enabled = false;
16513 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
16514 		soc->per_tid_basize_max_tid = 8;
16515 		soc->wbm_release_desc_rx_sg_support = 1;
16516 		soc->rxdma2sw_rings_not_supported = 1;
16517 		soc->wbm_sg_last_msdu_war = 1;
16518 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
16519 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
16520 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS_5332;
16521 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
16522 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
16523 						  CFG_DP_HOST_AST_DB_ENABLE);
16524 		break;
16525 	default:
16526 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
16527 		qdf_assert_always(0);
16528 		break;
16529 	}
16530 	dp_soc_cfg_dump(soc, target_type);
16531 }
16532 
16533 /**
16534  * dp_soc_cfg_attach() - set target specific configuration in
16535  *			 dp soc cfg.
16536  * @soc: dp soc handle
16537  */
16538 static void dp_soc_cfg_attach(struct dp_soc *soc)
16539 {
16540 	int target_type;
16541 	int nss_cfg = 0;
16542 
16543 	target_type = hal_get_target_type(soc->hal_soc);
16544 	switch (target_type) {
16545 	case TARGET_TYPE_QCA6290:
16546 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
16547 					       REO_DST_RING_SIZE_QCA6290);
16548 		break;
16549 	case TARGET_TYPE_QCA6390:
16550 	case TARGET_TYPE_QCA6490:
16551 	case TARGET_TYPE_QCA6750:
16552 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
16553 					       REO_DST_RING_SIZE_QCA6290);
16554 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
16555 		break;
16556 	case TARGET_TYPE_KIWI:
16557 	case TARGET_TYPE_MANGO:
16558 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
16559 		break;
16560 	case TARGET_TYPE_QCA8074:
16561 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
16562 		break;
16563 	case TARGET_TYPE_QCA8074V2:
16564 	case TARGET_TYPE_QCA6018:
16565 	case TARGET_TYPE_QCA9574:
16566 	case TARGET_TYPE_QCN6122:
16567 	case TARGET_TYPE_QCA5018:
16568 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
16569 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
16570 		break;
16571 	case TARGET_TYPE_QCN9000:
16572 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
16573 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
16574 		break;
16575 	case TARGET_TYPE_QCN9224:
16576 	case TARGET_TYPE_QCA5332:
16577 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
16578 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
16579 		break;
16580 	default:
16581 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
16582 		qdf_assert_always(0);
16583 		break;
16584 	}
16585 
16586 	if (soc->cdp_soc.ol_ops->get_soc_nss_cfg)
16587 		nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc);
16588 
16589 	wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg);
16590 
16591 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
16592 		wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0);
16593 		wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0);
16594 		wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0);
16595 		wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0);
16596 		soc->init_tcl_cmd_cred_ring = false;
16597 		soc->num_tcl_data_rings =
16598 			wlan_cfg_num_nss_tcl_data_rings(soc->wlan_cfg_ctx);
16599 		soc->num_reo_dest_rings =
16600 			wlan_cfg_num_nss_reo_dest_rings(soc->wlan_cfg_ctx);
16601 
16602 	} else {
16603 		soc->init_tcl_cmd_cred_ring = true;
16604 		soc->num_tx_comp_rings =
16605 			wlan_cfg_num_tx_comp_rings(soc->wlan_cfg_ctx);
16606 		soc->num_tcl_data_rings =
16607 			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
16608 		soc->num_reo_dest_rings =
16609 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
16610 	}
16611 
16612 	soc->arch_ops.soc_cfg_attach(soc);
16613 }
16614 
16615 static inline  void dp_pdev_set_default_reo(struct dp_pdev *pdev)
16616 {
16617 	struct dp_soc *soc = pdev->soc;
16618 
16619 	switch (pdev->pdev_id) {
16620 	case 0:
16621 		pdev->reo_dest =
16622 			wlan_cfg_radio0_default_reo_get(soc->wlan_cfg_ctx);
16623 		break;
16624 
16625 	case 1:
16626 		pdev->reo_dest =
16627 			wlan_cfg_radio1_default_reo_get(soc->wlan_cfg_ctx);
16628 		break;
16629 
16630 	case 2:
16631 		pdev->reo_dest =
16632 			wlan_cfg_radio2_default_reo_get(soc->wlan_cfg_ctx);
16633 		break;
16634 
16635 	default:
16636 		dp_init_err("%pK: Invalid pdev_id %d for reo selection",
16637 			    soc, pdev->pdev_id);
16638 		break;
16639 	}
16640 }
16641 
16642 static QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc,
16643 				      HTC_HANDLE htc_handle,
16644 				      qdf_device_t qdf_osdev,
16645 				      uint8_t pdev_id)
16646 {
16647 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16648 	int nss_cfg;
16649 	void *sojourn_buf;
16650 	QDF_STATUS ret;
16651 
16652 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
16653 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
16654 
16655 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16656 	pdev->soc = soc;
16657 	pdev->pdev_id = pdev_id;
16658 
16659 	/*
16660 	 * Variable to prevent double pdev deinitialization during
16661 	 * radio detach execution .i.e. in the absence of any vdev.
16662 	 */
16663 	pdev->pdev_deinit = 0;
16664 
16665 	if (dp_wdi_event_attach(pdev)) {
16666 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
16667 			  "dp_wdi_evet_attach failed");
16668 		goto fail0;
16669 	}
16670 
16671 	if (dp_pdev_srng_init(pdev)) {
16672 		dp_init_err("%pK: Failed to initialize pdev srng rings", soc);
16673 		goto fail1;
16674 	}
16675 
16676 	/* Initialize descriptors in TCL Rings used by IPA */
16677 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16678 		hal_tx_init_data_ring(soc->hal_soc,
16679 				      soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng);
16680 		dp_ipa_hal_tx_init_alt_data_ring(soc);
16681 	}
16682 
16683 	/*
16684 	 * Initialize command/credit ring descriptor
16685 	 * Command/CREDIT ring also used for sending DATA cmds
16686 	 */
16687 	dp_tx_init_cmd_credit_ring(soc);
16688 
16689 	dp_tx_pdev_init(pdev);
16690 
16691 	/*
16692 	 * set nss pdev config based on soc config
16693 	 */
16694 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
16695 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
16696 					 (nss_cfg & (1 << pdev_id)));
16697 	pdev->target_pdev_id =
16698 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
16699 
16700 	if (soc->preferred_hw_mode == WMI_HOST_HW_MODE_2G_PHYB &&
16701 	    pdev->lmac_id == PHYB_2G_LMAC_ID) {
16702 		pdev->target_pdev_id = PHYB_2G_TARGET_PDEV_ID;
16703 	}
16704 
16705 	/* Reset the cpu ring map if radio is NSS offloaded */
16706 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
16707 		dp_soc_reset_cpu_ring_map(soc);
16708 		dp_soc_reset_intr_mask(soc);
16709 	}
16710 
16711 	/* Reset the cpu ring map if radio is NSS offloaded */
16712 	dp_soc_reset_ipa_vlan_intr_mask(soc);
16713 
16714 	TAILQ_INIT(&pdev->vdev_list);
16715 	qdf_spinlock_create(&pdev->vdev_list_lock);
16716 	pdev->vdev_count = 0;
16717 	pdev->is_lro_hash_configured = 0;
16718 
16719 	qdf_spinlock_create(&pdev->tx_mutex);
16720 	pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_INVALID_LMAC_ID;
16721 	pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_INVALID_LMAC_ID;
16722 	pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_INVALID_LMAC_ID;
16723 
16724 	DP_STATS_INIT(pdev);
16725 
16726 	dp_local_peer_id_pool_init(pdev);
16727 
16728 	dp_dscp_tid_map_setup(pdev);
16729 	dp_pcp_tid_map_setup(pdev);
16730 
16731 	/* set the reo destination during initialization */
16732 	dp_pdev_set_default_reo(pdev);
16733 
16734 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
16735 
16736 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
16737 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
16738 			      TRUE);
16739 
16740 	if (!pdev->sojourn_buf) {
16741 		dp_init_err("%pK: Failed to allocate sojourn buf", soc);
16742 		goto fail2;
16743 	}
16744 	sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
16745 	qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
16746 
16747 	qdf_event_create(&pdev->fw_peer_stats_event);
16748 	qdf_event_create(&pdev->fw_stats_event);
16749 
16750 	pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
16751 
16752 	if (dp_rxdma_ring_setup(soc, pdev)) {
16753 		dp_init_err("%pK: RXDMA ring config failed", soc);
16754 		goto fail3;
16755 	}
16756 
16757 	if (dp_init_ipa_rx_refill_buf_ring(soc, pdev))
16758 		goto fail3;
16759 
16760 	if (dp_ipa_ring_resource_setup(soc, pdev))
16761 		goto fail4;
16762 
16763 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
16764 		dp_init_err("%pK: dp_ipa_uc_attach failed", soc);
16765 		goto fail4;
16766 	}
16767 
16768 	ret = dp_rx_fst_attach(soc, pdev);
16769 	if ((ret != QDF_STATUS_SUCCESS) &&
16770 	    (ret != QDF_STATUS_E_NOSUPPORT)) {
16771 		dp_init_err("%pK: RX Flow Search Table attach failed: pdev %d err %d",
16772 			    soc, pdev_id, ret);
16773 		goto fail5;
16774 	}
16775 
16776 	if (dp_pdev_bkp_stats_attach(pdev) != QDF_STATUS_SUCCESS) {
16777 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
16778 			  FL("dp_pdev_bkp_stats_attach failed"));
16779 		goto fail6;
16780 	}
16781 
16782 	if (dp_monitor_pdev_init(pdev)) {
16783 		dp_init_err("%pK: dp_monitor_pdev_init failed\n", soc);
16784 		goto fail7;
16785 	}
16786 
16787 	/* initialize sw rx descriptors */
16788 	dp_rx_pdev_desc_pool_init(pdev);
16789 	/* allocate buffers and replenish the RxDMA ring */
16790 	dp_rx_pdev_buffers_alloc(pdev);
16791 
16792 	dp_init_tso_stats(pdev);
16793 
16794 	pdev->rx_fast_flag = false;
16795 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
16796 		qdf_dma_mem_stats_read(),
16797 		qdf_heap_mem_stats_read(),
16798 		qdf_skb_total_mem_stats_read());
16799 
16800 	return QDF_STATUS_SUCCESS;
16801 fail7:
16802 	dp_pdev_bkp_stats_detach(pdev);
16803 fail6:
16804 	dp_rx_fst_detach(soc, pdev);
16805 fail5:
16806 	dp_ipa_uc_detach(soc, pdev);
16807 fail4:
16808 	dp_deinit_ipa_rx_refill_buf_ring(soc, pdev);
16809 fail3:
16810 	dp_rxdma_ring_cleanup(soc, pdev);
16811 	qdf_nbuf_free(pdev->sojourn_buf);
16812 fail2:
16813 	qdf_spinlock_destroy(&pdev->tx_mutex);
16814 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
16815 	dp_pdev_srng_deinit(pdev);
16816 fail1:
16817 	dp_wdi_event_detach(pdev);
16818 fail0:
16819 	return QDF_STATUS_E_FAILURE;
16820 }
16821 
16822 /*
16823  * dp_pdev_init_wifi3() - Init txrx pdev
16824  * @htc_handle: HTC handle for host-target interface
16825  * @qdf_osdev: QDF OS device
16826  * @force: Force deinit
16827  *
16828  * Return: QDF_STATUS
16829  */
16830 static QDF_STATUS dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
16831 				     HTC_HANDLE htc_handle,
16832 				     qdf_device_t qdf_osdev,
16833 				     uint8_t pdev_id)
16834 {
16835 	return dp_pdev_init(txrx_soc, htc_handle, qdf_osdev, pdev_id);
16836 }
16837 
16838