xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision de1e7e7e129e3f35eaee7ba04135d2734e70c50a)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <wlan_ipa_obj_mgmt_api.h>
21 #include <qdf_types.h>
22 #include <qdf_lock.h>
23 #include <qdf_net_types.h>
24 #include <qdf_lro.h>
25 #include <qdf_module.h>
26 #include <hal_hw_headers.h>
27 #include <hal_api.h>
28 #include <hif.h>
29 #include <htt.h>
30 #include <wdi_event.h>
31 #include <queue.h>
32 #include "dp_types.h"
33 #include "dp_internal.h"
34 #include "dp_tx.h"
35 #include "dp_tx_desc.h"
36 #include "dp_rx.h"
37 #ifdef DP_RATETABLE_SUPPORT
38 #include "dp_ratetable.h"
39 #endif
40 #include <cdp_txrx_handle.h>
41 #include <wlan_cfg.h>
42 #include <wlan_utility.h>
43 #include "cdp_txrx_cmn_struct.h"
44 #include "cdp_txrx_stats_struct.h"
45 #include "cdp_txrx_cmn_reg.h"
46 #include <qdf_util.h>
47 #include "dp_peer.h"
48 #include "htt_stats.h"
49 #include "dp_htt.h"
50 #ifdef WLAN_SUPPORT_RX_FISA
51 #include <wlan_dp_fisa_rx.h>
52 #endif
53 #include "htt_ppdu_stats.h"
54 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
55 #include "cfg_ucfg_api.h"
56 #include <wlan_module_ids.h>
57 
58 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
59 #include "cdp_txrx_flow_ctrl_v2.h"
60 #else
61 
62 static inline void
63 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
64 {
65 	return;
66 }
67 #endif
68 #ifdef WIFI_MONITOR_SUPPORT
69 #include <dp_mon.h>
70 #endif
71 #include "dp_ipa.h"
72 #ifdef FEATURE_WDS
73 #include "dp_txrx_wds.h"
74 #endif
75 #ifdef WLAN_SUPPORT_MSCS
76 #include "dp_mscs.h"
77 #endif
78 #ifdef WLAN_SUPPORT_MESH_LATENCY
79 #include "dp_mesh_latency.h"
80 #endif
81 #ifdef WLAN_SUPPORT_SCS
82 #include "dp_scs.h"
83 #endif
84 #ifdef ATH_SUPPORT_IQUE
85 #include "dp_txrx_me.h"
86 #endif
87 #if defined(DP_CON_MON)
88 #ifndef REMOVE_PKT_LOG
89 #include <pktlog_ac_api.h>
90 #include <pktlog_ac.h>
91 #endif
92 #endif
93 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
94 #include <wlan_dp_swlm.h>
95 #endif
96 #ifdef CONFIG_SAWF_DEF_QUEUES
97 #include "dp_sawf.h"
98 #endif
99 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
100 #include <target_if_dp.h>
101 #endif
102 
103 #ifdef WLAN_FEATURE_STATS_EXT
104 #define INIT_RX_HW_STATS_LOCK(_soc) \
105 	qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
106 #define DEINIT_RX_HW_STATS_LOCK(_soc) \
107 	qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
108 #else
109 #define INIT_RX_HW_STATS_LOCK(_soc)  /* no op */
110 #define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
111 #endif
112 
113 #if defined(DP_PEER_EXTENDED_API) || defined(WLAN_DP_PENDING_MEM_FLUSH)
114 #define SET_PEER_REF_CNT_ONE(_peer) \
115 	qdf_atomic_set(&(_peer)->ref_cnt, 1)
116 #else
117 #define SET_PEER_REF_CNT_ONE(_peer)
118 #endif
119 
120 #ifdef WLAN_SYSFS_DP_STATS
121 /* sysfs event wait time for firmware stat request unit milliseconds */
122 #define WLAN_SYSFS_STAT_REQ_WAIT_MS 3000
123 #endif
124 
125 #ifdef QCA_DP_ENABLE_TX_COMP_RING4
126 #define TXCOMP_RING4_NUM 3
127 #else
128 #define TXCOMP_RING4_NUM WBM2SW_TXCOMP_RING4_NUM
129 #endif
130 
131 #ifdef QCA_DP_TX_FW_METADATA_V2
132 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) \
133 		HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
134 #else
135 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) \
136 		HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
137 #endif
138 
139 QDF_COMPILE_TIME_ASSERT(max_rx_rings_check,
140 			MAX_REO_DEST_RINGS == CDP_MAX_RX_RINGS);
141 
142 QDF_COMPILE_TIME_ASSERT(max_tx_rings_check,
143 			MAX_TCL_DATA_RINGS == CDP_MAX_TX_COMP_RINGS);
144 
145 #define dp_init_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_INIT, params)
146 #define dp_init_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_INIT, params)
147 #define dp_init_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_INIT, params)
148 #define dp_init_info(params...) \
149 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_INIT, ## params)
150 #define dp_init_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_INIT, params)
151 
152 #define dp_vdev_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_VDEV, params)
153 #define dp_vdev_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_VDEV, params)
154 #define dp_vdev_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_VDEV, params)
155 #define dp_vdev_info(params...) \
156 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_VDEV, ## params)
157 #define dp_vdev_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_VDEV, params)
158 
159 void dp_configure_arch_ops(struct dp_soc *soc);
160 qdf_size_t dp_get_soc_context_size(uint16_t device_id);
161 
162 /*
163  * The max size of cdp_peer_stats_param_t is limited to 16 bytes.
164  * If the buffer size is exceeding this size limit,
165  * dp_txrx_get_peer_stats is to be used instead.
166  */
167 QDF_COMPILE_TIME_ASSERT(cdp_peer_stats_param_t_max_size,
168 			(sizeof(cdp_peer_stats_param_t) <= 16));
169 
170 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
171 /*
172  * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS
173  * also should be updated accordingly
174  */
175 QDF_COMPILE_TIME_ASSERT(num_intr_grps,
176 			HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS);
177 
178 /*
179  * HIF_EVENT_HIST_MAX should always be power of 2
180  */
181 QDF_COMPILE_TIME_ASSERT(hif_event_history_size,
182 			(HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0);
183 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
184 
185 /*
186  * If WLAN_CFG_INT_NUM_CONTEXTS is changed,
187  * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated
188  */
189 QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs,
190 			WLAN_CFG_INT_NUM_CONTEXTS_MAX >=
191 			WLAN_CFG_INT_NUM_CONTEXTS);
192 
193 static QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl);
194 static QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl);
195 
196 static void dp_pdev_srng_deinit(struct dp_pdev *pdev);
197 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev);
198 static void dp_pdev_srng_free(struct dp_pdev *pdev);
199 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev);
200 
201 static void dp_soc_srng_deinit(struct dp_soc *soc);
202 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc);
203 static void dp_soc_srng_free(struct dp_soc *soc);
204 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc);
205 
206 static void dp_soc_cfg_init(struct dp_soc *soc);
207 static void dp_soc_cfg_attach(struct dp_soc *soc);
208 
209 static inline
210 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
211 				struct cdp_pdev_attach_params *params);
212 
213 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id);
214 
215 static QDF_STATUS
216 dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
217 		   HTC_HANDLE htc_handle,
218 		   qdf_device_t qdf_osdev,
219 		   uint8_t pdev_id);
220 
221 static QDF_STATUS
222 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, int force);
223 
224 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc);
225 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc);
226 
227 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
228 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc,
229 				       uint8_t pdev_id,
230 				       int force);
231 static struct dp_soc *
232 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
233 	      struct cdp_soc_attach_params *params);
234 static inline QDF_STATUS dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl,
235 					      uint8_t vdev_id,
236 					      uint8_t *peer_mac_addr,
237 					      enum cdp_peer_type peer_type);
238 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
239 				       uint8_t vdev_id,
240 				       uint8_t *peer_mac, uint32_t bitmap,
241 				       enum cdp_peer_type peer_type);
242 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
243 				bool unmap_only,
244 				bool mlo_peers_only);
245 #ifdef ENABLE_VERBOSE_DEBUG
246 bool is_dp_verbose_debug_enabled;
247 #endif
248 
249 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
250 static bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
251 static void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
252 			   bool enable);
253 static inline void
254 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
255 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats);
256 static inline void
257 dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
258 #endif
259 
260 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
261 						uint8_t index);
262 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index);
263 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index);
264 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
265 						 uint8_t index);
266 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
267 					    enum hal_ring_type ring_type,
268 					    int ring_num);
269 #ifdef DP_UMAC_HW_RESET_SUPPORT
270 static QDF_STATUS dp_umac_reset_handle_pre_reset(struct dp_soc *soc);
271 static QDF_STATUS dp_umac_reset_handle_post_reset(struct dp_soc *soc);
272 static QDF_STATUS dp_umac_reset_handle_post_reset_complete(struct dp_soc *soc);
273 #endif
274 
275 #define DP_INTR_POLL_TIMER_MS	5
276 
277 #define MON_VDEV_TIMER_INIT 0x1
278 #define MON_VDEV_TIMER_RUNNING 0x2
279 
280 #define DP_MCS_LENGTH (6*MAX_MCS)
281 
282 #define DP_CURR_FW_STATS_AVAIL 19
283 #define DP_HTT_DBG_EXT_STATS_MAX 256
284 #define DP_MAX_SLEEP_TIME 100
285 #ifndef QCA_WIFI_3_0_EMU
286 #define SUSPEND_DRAIN_WAIT 500
287 #else
288 #define SUSPEND_DRAIN_WAIT 3000
289 #endif
290 
291 #ifdef IPA_OFFLOAD
292 /* Exclude IPA rings from the interrupt context */
293 #define TX_RING_MASK_VAL	0xb
294 #define RX_RING_MASK_VAL	0x7
295 #else
296 #define TX_RING_MASK_VAL	0xF
297 #define RX_RING_MASK_VAL	0xF
298 #endif
299 
300 #define STR_MAXLEN	64
301 
302 #define RNG_ERR		"SRNG setup failed for"
303 
304 /*
305  * default_dscp_tid_map - Default DSCP-TID mapping
306  *
307  * DSCP        TID
308  * 000000      0
309  * 001000      1
310  * 010000      2
311  * 011000      3
312  * 100000      4
313  * 101000      5
314  * 110000      6
315  * 111000      7
316  */
317 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
318 	0, 0, 0, 0, 0, 0, 0, 0,
319 	1, 1, 1, 1, 1, 1, 1, 1,
320 	2, 2, 2, 2, 2, 2, 2, 2,
321 	3, 3, 3, 3, 3, 3, 3, 3,
322 	4, 4, 4, 4, 4, 4, 4, 4,
323 	5, 5, 5, 5, 5, 5, 5, 5,
324 	6, 6, 6, 6, 6, 6, 6, 6,
325 	7, 7, 7, 7, 7, 7, 7, 7,
326 };
327 
328 /*
329  * default_pcp_tid_map - Default PCP-TID mapping
330  *
331  * PCP     TID
332  * 000      0
333  * 001      1
334  * 010      2
335  * 011      3
336  * 100      4
337  * 101      5
338  * 110      6
339  * 111      7
340  */
341 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
342 	0, 1, 2, 3, 4, 5, 6, 7,
343 };
344 
345 /*
346  * Cpu to tx ring map
347  */
348 uint8_t
349 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
350 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
351 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
352 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
353 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
354 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
355 #ifdef WLAN_TX_PKT_CAPTURE_ENH
356 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
357 #endif
358 };
359 
360 qdf_export_symbol(dp_cpu_ring_map);
361 
362 /**
363  * enum dp_stats_type - Select the type of statistics
364  * @STATS_FW: Firmware-based statistic
365  * @STATS_HOST: Host-based statistic
366  * @STATS_TYPE_MAX: maximum enumeration
367  */
368 enum dp_stats_type {
369 	STATS_FW = 0,
370 	STATS_HOST = 1,
371 	STATS_TYPE_MAX = 2,
372 };
373 
374 /**
375  * enum dp_fw_stats - General Firmware statistics options
376  * @TXRX_FW_STATS_INVALID: statistic is not available
377  */
378 enum dp_fw_stats {
379 	TXRX_FW_STATS_INVALID	= -1,
380 };
381 
382 /*
383  * dp_stats_mapping_table - Firmware and Host statistics
384  * currently supported
385  */
386 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
387 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
388 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
389 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
390 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
391 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
392 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
393 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
394 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
395 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
396 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
397 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
398 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
399 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
400 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
401 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
402 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
403 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
404 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
405 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
406 	/* Last ENUM for HTT FW STATS */
407 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
408 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
409 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
410 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
411 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
412 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
413 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
414 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
415 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
416 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
417 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
418 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
419 	{TXRX_FW_STATS_INVALID, TXRX_NAPI_STATS},
420 	{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
421 	{TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS},
422 	{TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS},
423 	{TXRX_FW_STATS_INVALID, TXRX_SOC_REO_HW_DESC_DUMP},
424 	{TXRX_FW_STATS_INVALID, TXRX_SOC_WBM_IDLE_HPTP_DUMP},
425 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_USAGE_WM_STATS},
426 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, TXRX_HOST_STATS_INVALID},
427 	{HTT_DBG_EXT_STATS_TX_SOUNDING_INFO, TXRX_HOST_STATS_INVALID}
428 };
429 
430 /* MCL specific functions */
431 #if defined(DP_CON_MON)
432 
433 #ifdef DP_CON_MON_MSI_ENABLED
434 /**
435  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
436  * @soc: pointer to dp_soc handle
437  * @intr_ctx_num: interrupt context number for which mon mask is needed
438  *
439  * For MCL, monitor mode rings are being processed in timer contexts (polled).
440  * This function is returning 0, since in interrupt mode(softirq based RX),
441  * we donot want to process monitor mode rings in a softirq.
442  *
443  * So, in case packet log is enabled for SAP/STA/P2P modes,
444  * regular interrupt processing will not process monitor mode rings. It would be
445  * done in a separate timer context.
446  *
447  * Return: 0
448  */
449 static inline uint32_t
450 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
451 {
452 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
453 }
454 #else
455 /**
456  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
457  * @soc: pointer to dp_soc handle
458  * @intr_ctx_num: interrupt context number for which mon mask is needed
459  *
460  * For MCL, monitor mode rings are being processed in timer contexts (polled).
461  * This function is returning 0, since in interrupt mode(softirq based RX),
462  * we donot want to process monitor mode rings in a softirq.
463  *
464  * So, in case packet log is enabled for SAP/STA/P2P modes,
465  * regular interrupt processing will not process monitor mode rings. It would be
466  * done in a separate timer context.
467  *
468  * Return: 0
469  */
470 static inline uint32_t
471 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
472 {
473 	return 0;
474 }
475 #endif
476 
477 #ifdef IPA_OFFLOAD
478 /**
479  * dp_get_num_rx_contexts() - get number of RX contexts
480  * @soc_hdl: cdp opaque soc handle
481  *
482  * Return: number of RX contexts
483  */
484 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
485 {
486 	int num_rx_contexts;
487 	uint32_t reo_ring_map;
488 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
489 
490 	reo_ring_map = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
491 
492 	switch (soc->arch_id) {
493 	case CDP_ARCH_TYPE_BE:
494 		/* 2 REO rings are used for IPA */
495 		reo_ring_map &=  ~(BIT(3) | BIT(7));
496 
497 		break;
498 	case CDP_ARCH_TYPE_LI:
499 		/* 1 REO ring is used for IPA */
500 		reo_ring_map &=  ~BIT(3);
501 		break;
502 	default:
503 		dp_err("unknown arch_id 0x%x", soc->arch_id);
504 		QDF_BUG(0);
505 	}
506 	/*
507 	 * qdf_get_hweight32 prefer over qdf_get_hweight8 in case map is scaled
508 	 * in future
509 	 */
510 	num_rx_contexts = qdf_get_hweight32(reo_ring_map);
511 
512 	return num_rx_contexts;
513 }
514 #else
515 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
516 {
517 	int num_rx_contexts;
518 	uint32_t reo_config;
519 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
520 
521 	reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
522 	/*
523 	 * qdf_get_hweight32 prefer over qdf_get_hweight8 in case map is scaled
524 	 * in future
525 	 */
526 	num_rx_contexts = qdf_get_hweight32(reo_config);
527 
528 	return num_rx_contexts;
529 }
530 #endif
531 
532 #else
533 
534 /**
535  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
536  * @soc: pointer to dp_soc handle
537  * @intr_ctx_num: interrupt context number for which mon mask is needed
538  *
539  * Return: mon mask value
540  */
541 static inline
542 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
543 {
544 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
545 }
546 
547 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc)
548 {
549 	int i;
550 
551 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
552 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
553 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
554 	}
555 }
556 
557 qdf_export_symbol(dp_soc_reset_mon_intr_mask);
558 
559 /**
560  * dp_service_lmac_rings()- timer to reap lmac rings
561  * @arg: SoC Handle
562  *
563  * Return:
564  *
565  */
566 static void dp_service_lmac_rings(void *arg)
567 {
568 	struct dp_soc *soc = (struct dp_soc *)arg;
569 	int ring = 0, i;
570 	struct dp_pdev *pdev = NULL;
571 	union dp_rx_desc_list_elem_t *desc_list = NULL;
572 	union dp_rx_desc_list_elem_t *tail = NULL;
573 
574 	/* Process LMAC interrupts */
575 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
576 		int mac_for_pdev = ring;
577 		struct dp_srng *rx_refill_buf_ring;
578 
579 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
580 		if (!pdev)
581 			continue;
582 
583 		rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev];
584 
585 		dp_monitor_process(soc, NULL, mac_for_pdev,
586 				   QCA_NAPI_BUDGET);
587 
588 		for (i = 0;
589 		     i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
590 			dp_rxdma_err_process(&soc->intr_ctx[i], soc,
591 					     mac_for_pdev,
592 					     QCA_NAPI_BUDGET);
593 
594 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF,
595 						  mac_for_pdev))
596 			dp_rx_buffers_replenish(soc, mac_for_pdev,
597 						rx_refill_buf_ring,
598 						&soc->rx_desc_buf[mac_for_pdev],
599 						0, &desc_list, &tail, false);
600 	}
601 
602 	qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
603 }
604 
605 #endif
606 
607 #ifdef FEATURE_MEC
608 void dp_peer_mec_flush_entries(struct dp_soc *soc)
609 {
610 	unsigned int index;
611 	struct dp_mec_entry *mecentry, *mecentry_next;
612 
613 	TAILQ_HEAD(, dp_mec_entry) free_list;
614 	TAILQ_INIT(&free_list);
615 
616 	if (!soc->mec_hash.mask)
617 		return;
618 
619 	if (!soc->mec_hash.bins)
620 		return;
621 
622 	if (!qdf_atomic_read(&soc->mec_cnt))
623 		return;
624 
625 	qdf_spin_lock_bh(&soc->mec_lock);
626 	for (index = 0; index <= soc->mec_hash.mask; index++) {
627 		if (!TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
628 			TAILQ_FOREACH_SAFE(mecentry, &soc->mec_hash.bins[index],
629 					   hash_list_elem, mecentry_next) {
630 			    dp_peer_mec_detach_entry(soc, mecentry, &free_list);
631 			}
632 		}
633 	}
634 	qdf_spin_unlock_bh(&soc->mec_lock);
635 
636 	dp_peer_mec_free_list(soc, &free_list);
637 }
638 
639 /**
640  * dp_print_mec_stats() - Dump MEC entries in table
641  * @soc: Datapath soc handle
642  *
643  * Return: none
644  */
645 static void dp_print_mec_stats(struct dp_soc *soc)
646 {
647 	int i;
648 	uint32_t index;
649 	struct dp_mec_entry *mecentry = NULL, *mec_list;
650 	uint32_t num_entries = 0;
651 
652 	DP_PRINT_STATS("MEC Stats:");
653 	DP_PRINT_STATS("   Entries Added   = %d", soc->stats.mec.added);
654 	DP_PRINT_STATS("   Entries Deleted = %d", soc->stats.mec.deleted);
655 
656 	if (!qdf_atomic_read(&soc->mec_cnt))
657 		return;
658 
659 	mec_list = qdf_mem_malloc(sizeof(*mecentry) * DP_PEER_MAX_MEC_ENTRY);
660 	if (!mec_list) {
661 		dp_peer_warn("%pK: failed to allocate mec_list", soc);
662 		return;
663 	}
664 
665 	DP_PRINT_STATS("MEC Table:");
666 	for (index = 0; index <= soc->mec_hash.mask; index++) {
667 		qdf_spin_lock_bh(&soc->mec_lock);
668 		if (TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
669 			qdf_spin_unlock_bh(&soc->mec_lock);
670 			continue;
671 		}
672 
673 		TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index],
674 			      hash_list_elem) {
675 			qdf_mem_copy(&mec_list[num_entries], mecentry,
676 				     sizeof(*mecentry));
677 			num_entries++;
678 		}
679 		qdf_spin_unlock_bh(&soc->mec_lock);
680 	}
681 
682 	if (!num_entries) {
683 		qdf_mem_free(mec_list);
684 		return;
685 	}
686 
687 	for (i = 0; i < num_entries; i++) {
688 		DP_PRINT_STATS("%6d mac_addr = " QDF_MAC_ADDR_FMT
689 			       " is_active = %d pdev_id = %d vdev_id = %d",
690 			       i,
691 			       QDF_MAC_ADDR_REF(mec_list[i].mac_addr.raw),
692 			       mec_list[i].is_active,
693 			       mec_list[i].pdev_id,
694 			       mec_list[i].vdev_id);
695 	}
696 	qdf_mem_free(mec_list);
697 }
698 #else
699 static void dp_print_mec_stats(struct dp_soc *soc)
700 {
701 }
702 #endif
703 
704 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
705 				 uint8_t vdev_id,
706 				 uint8_t *peer_mac,
707 				 uint8_t *mac_addr,
708 				 enum cdp_txrx_ast_entry_type type,
709 				 uint32_t flags)
710 {
711 	int ret = -1;
712 	QDF_STATUS status = QDF_STATUS_SUCCESS;
713 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
714 						       peer_mac, 0, vdev_id,
715 						       DP_MOD_ID_CDP);
716 
717 	if (!peer) {
718 		dp_peer_debug("Peer is NULL!");
719 		return ret;
720 	}
721 
722 	status = dp_peer_add_ast((struct dp_soc *)soc_hdl,
723 				 peer,
724 				 mac_addr,
725 				 type,
726 				 flags);
727 	if ((status == QDF_STATUS_SUCCESS) ||
728 	    (status == QDF_STATUS_E_ALREADY) ||
729 	    (status == QDF_STATUS_E_AGAIN))
730 		ret = 0;
731 
732 	dp_hmwds_ast_add_notify(peer, mac_addr,
733 				type, status, false);
734 
735 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
736 
737 	return ret;
738 }
739 
740 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
741 						uint8_t vdev_id,
742 						uint8_t *peer_mac,
743 						uint8_t *wds_macaddr,
744 						uint32_t flags)
745 {
746 	int status = -1;
747 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
748 	struct dp_ast_entry  *ast_entry = NULL;
749 	struct dp_peer *peer;
750 
751 	if (soc->ast_offload_support)
752 		return status;
753 
754 	peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
755 				      peer_mac, 0, vdev_id,
756 				      DP_MOD_ID_CDP);
757 
758 	if (!peer) {
759 		dp_peer_debug("Peer is NULL!");
760 		return status;
761 	}
762 
763 	qdf_spin_lock_bh(&soc->ast_lock);
764 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
765 						    peer->vdev->pdev->pdev_id);
766 
767 	if (ast_entry) {
768 		status = dp_peer_update_ast(soc,
769 					    peer,
770 					    ast_entry, flags);
771 	}
772 	qdf_spin_unlock_bh(&soc->ast_lock);
773 
774 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
775 
776 	return status;
777 }
778 
779 /**
780  * dp_peer_reset_ast_entries() - Deletes all HMWDS entries for a peer
781  * @soc:		Datapath SOC handle
782  * @peer:		DP peer
783  * @arg:		callback argument
784  *
785  * Return: None
786  */
787 static void
788 dp_peer_reset_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
789 {
790 	struct dp_ast_entry *ast_entry = NULL;
791 	struct dp_ast_entry *tmp_ast_entry;
792 
793 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
794 		if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
795 		    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
796 			dp_peer_del_ast(soc, ast_entry);
797 	}
798 }
799 
800 /**
801  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
802  * @soc_hdl:		Datapath SOC handle
803  * @wds_macaddr:	WDS entry MAC Address
804  * @peer_mac_addr:	WDS entry MAC Address
805  * @vdev_id:		id of vdev handle
806  *
807  * Return: QDF_STATUS
808  */
809 static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
810 					 uint8_t *wds_macaddr,
811 					 uint8_t *peer_mac_addr,
812 					 uint8_t vdev_id)
813 {
814 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
815 	struct dp_ast_entry *ast_entry = NULL;
816 	struct dp_peer *peer;
817 	struct dp_pdev *pdev;
818 	struct dp_vdev *vdev;
819 
820 	if (soc->ast_offload_support)
821 		return QDF_STATUS_E_FAILURE;
822 
823 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
824 
825 	if (!vdev)
826 		return QDF_STATUS_E_FAILURE;
827 
828 	pdev = vdev->pdev;
829 
830 	if (peer_mac_addr) {
831 		peer = dp_peer_find_hash_find(soc, peer_mac_addr,
832 					      0, vdev->vdev_id,
833 					      DP_MOD_ID_CDP);
834 		if (!peer) {
835 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
836 			return QDF_STATUS_E_FAILURE;
837 		}
838 
839 		qdf_spin_lock_bh(&soc->ast_lock);
840 		dp_peer_reset_ast_entries(soc, peer, NULL);
841 		qdf_spin_unlock_bh(&soc->ast_lock);
842 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
843 	} else if (wds_macaddr) {
844 		qdf_spin_lock_bh(&soc->ast_lock);
845 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
846 							    pdev->pdev_id);
847 
848 		if (ast_entry) {
849 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
850 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
851 				dp_peer_del_ast(soc, ast_entry);
852 		}
853 		qdf_spin_unlock_bh(&soc->ast_lock);
854 	}
855 
856 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
857 	return QDF_STATUS_SUCCESS;
858 }
859 
860 /**
861  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
862  * @soc_hdl:		Datapath SOC handle
863  * @vdev_id:		id of vdev object
864  *
865  * Return: QDF_STATUS
866  */
867 static QDF_STATUS
868 dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
869 			     uint8_t vdev_id)
870 {
871 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
872 
873 	if (soc->ast_offload_support)
874 		return QDF_STATUS_SUCCESS;
875 
876 	qdf_spin_lock_bh(&soc->ast_lock);
877 
878 	dp_soc_iterate_peer(soc, dp_peer_reset_ast_entries, NULL,
879 			    DP_MOD_ID_CDP);
880 	qdf_spin_unlock_bh(&soc->ast_lock);
881 
882 	return QDF_STATUS_SUCCESS;
883 }
884 
885 /**
886  * dp_peer_flush_ast_entries() - Delete all wds and hmwds ast entries of a peer
887  * @soc:		Datapath SOC
888  * @peer:		Datapath peer
889  * @arg:		arg to callback
890  *
891  * Return: None
892  */
893 static void
894 dp_peer_flush_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
895 {
896 	struct dp_ast_entry *ase = NULL;
897 	struct dp_ast_entry *temp_ase;
898 
899 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
900 		if ((ase->type ==
901 			CDP_TXRX_AST_TYPE_STATIC) ||
902 			(ase->type ==
903 			 CDP_TXRX_AST_TYPE_SELF) ||
904 			(ase->type ==
905 			 CDP_TXRX_AST_TYPE_STA_BSS))
906 			continue;
907 		dp_peer_del_ast(soc, ase);
908 	}
909 }
910 
911 /**
912  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
913  * @soc_hdl:		Datapath SOC handle
914  *
915  * Return: None
916  */
917 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
918 {
919 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
920 
921 	qdf_spin_lock_bh(&soc->ast_lock);
922 
923 	dp_soc_iterate_peer(soc, dp_peer_flush_ast_entries, NULL,
924 			    DP_MOD_ID_CDP);
925 
926 	qdf_spin_unlock_bh(&soc->ast_lock);
927 	dp_peer_mec_flush_entries(soc);
928 }
929 
930 #if defined(IPA_WDS_EASYMESH_FEATURE) && defined(FEATURE_AST)
931 /**
932  * dp_peer_send_wds_disconnect() - Send Disconnect event to IPA for each peer
933  * @soc: Datapath SOC
934  * @peer: Datapath peer
935  *
936  * Return: None
937  */
938 static void
939 dp_peer_send_wds_disconnect(struct dp_soc *soc, struct dp_peer *peer)
940 {
941 	struct dp_ast_entry *ase = NULL;
942 	struct dp_ast_entry *temp_ase;
943 
944 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
945 		if (ase->type == CDP_TXRX_AST_TYPE_WDS) {
946 			soc->cdp_soc.ol_ops->peer_send_wds_disconnect(soc->ctrl_psoc,
947 								      ase->mac_addr.raw,
948 								      ase->vdev_id);
949 		}
950 	}
951 }
952 #elif defined(FEATURE_AST)
953 static void
954 dp_peer_send_wds_disconnect(struct dp_soc *soc, struct dp_peer *peer)
955 {
956 }
957 #endif
958 
959 /**
960  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
961  *                                       and return ast entry information
962  *                                       of first ast entry found in the
963  *                                       table with given mac address
964  * @soc_hdl: data path soc handle
965  * @ast_mac_addr: AST entry mac address
966  * @ast_entry_info: ast entry information
967  *
968  * Return: true if ast entry found with ast_mac_addr
969  *          false if ast entry not found
970  */
971 static bool dp_peer_get_ast_info_by_soc_wifi3
972 	(struct cdp_soc_t *soc_hdl,
973 	 uint8_t *ast_mac_addr,
974 	 struct cdp_ast_entry_info *ast_entry_info)
975 {
976 	struct dp_ast_entry *ast_entry = NULL;
977 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
978 	struct dp_peer *peer = NULL;
979 
980 	if (soc->ast_offload_support)
981 		return false;
982 
983 	qdf_spin_lock_bh(&soc->ast_lock);
984 
985 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
986 	if ((!ast_entry) ||
987 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
988 		qdf_spin_unlock_bh(&soc->ast_lock);
989 		return false;
990 	}
991 
992 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
993 				     DP_MOD_ID_AST);
994 	if (!peer) {
995 		qdf_spin_unlock_bh(&soc->ast_lock);
996 		return false;
997 	}
998 
999 	ast_entry_info->type = ast_entry->type;
1000 	ast_entry_info->pdev_id = ast_entry->pdev_id;
1001 	ast_entry_info->vdev_id = ast_entry->vdev_id;
1002 	ast_entry_info->peer_id = ast_entry->peer_id;
1003 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
1004 		     &peer->mac_addr.raw[0],
1005 		     QDF_MAC_ADDR_SIZE);
1006 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1007 	qdf_spin_unlock_bh(&soc->ast_lock);
1008 	return true;
1009 }
1010 
1011 /**
1012  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
1013  *                                          and return ast entry information
1014  *                                          if mac address and pdev_id matches
1015  * @soc_hdl: data path soc handle
1016  * @ast_mac_addr: AST entry mac address
1017  * @pdev_id: pdev_id
1018  * @ast_entry_info: ast entry information
1019  *
1020  * Return: true if ast entry found with ast_mac_addr
1021  *          false if ast entry not found
1022  */
1023 static bool dp_peer_get_ast_info_by_pdevid_wifi3
1024 		(struct cdp_soc_t *soc_hdl,
1025 		 uint8_t *ast_mac_addr,
1026 		 uint8_t pdev_id,
1027 		 struct cdp_ast_entry_info *ast_entry_info)
1028 {
1029 	struct dp_ast_entry *ast_entry;
1030 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
1031 	struct dp_peer *peer = NULL;
1032 
1033 	if (soc->ast_offload_support)
1034 		return false;
1035 
1036 	qdf_spin_lock_bh(&soc->ast_lock);
1037 
1038 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr,
1039 						    pdev_id);
1040 
1041 	if ((!ast_entry) ||
1042 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
1043 		qdf_spin_unlock_bh(&soc->ast_lock);
1044 		return false;
1045 	}
1046 
1047 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1048 				     DP_MOD_ID_AST);
1049 	if (!peer) {
1050 		qdf_spin_unlock_bh(&soc->ast_lock);
1051 		return false;
1052 	}
1053 
1054 	ast_entry_info->type = ast_entry->type;
1055 	ast_entry_info->pdev_id = ast_entry->pdev_id;
1056 	ast_entry_info->vdev_id = ast_entry->vdev_id;
1057 	ast_entry_info->peer_id = ast_entry->peer_id;
1058 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
1059 		     &peer->mac_addr.raw[0],
1060 		     QDF_MAC_ADDR_SIZE);
1061 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1062 	qdf_spin_unlock_bh(&soc->ast_lock);
1063 	return true;
1064 }
1065 
1066 /**
1067  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
1068  *                            with given mac address
1069  * @soc_handle: data path soc handle
1070  * @mac_addr: AST entry mac address
1071  * @callback: callback function to called on ast delete response from FW
1072  * @cookie: argument to be passed to callback
1073  *
1074  * Return: QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
1075  *          is sent
1076  *          QDF_STATUS_E_INVAL false if ast entry not found
1077  */
1078 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
1079 					       uint8_t *mac_addr,
1080 					       txrx_ast_free_cb callback,
1081 					       void *cookie)
1082 
1083 {
1084 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1085 	struct dp_ast_entry *ast_entry = NULL;
1086 	txrx_ast_free_cb cb = NULL;
1087 	void *arg = NULL;
1088 
1089 	if (soc->ast_offload_support)
1090 		return -QDF_STATUS_E_INVAL;
1091 
1092 	qdf_spin_lock_bh(&soc->ast_lock);
1093 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1094 	if (!ast_entry) {
1095 		qdf_spin_unlock_bh(&soc->ast_lock);
1096 		return -QDF_STATUS_E_INVAL;
1097 	}
1098 
1099 	if (ast_entry->callback) {
1100 		cb = ast_entry->callback;
1101 		arg = ast_entry->cookie;
1102 	}
1103 
1104 	ast_entry->callback = callback;
1105 	ast_entry->cookie = cookie;
1106 
1107 	/*
1108 	 * if delete_in_progress is set AST delete is sent to target
1109 	 * and host is waiting for response should not send delete
1110 	 * again
1111 	 */
1112 	if (!ast_entry->delete_in_progress)
1113 		dp_peer_del_ast(soc, ast_entry);
1114 
1115 	qdf_spin_unlock_bh(&soc->ast_lock);
1116 	if (cb) {
1117 		cb(soc->ctrl_psoc,
1118 		   dp_soc_to_cdp_soc(soc),
1119 		   arg,
1120 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1121 	}
1122 	return QDF_STATUS_SUCCESS;
1123 }
1124 
1125 /**
1126  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
1127  *                                   table if mac address and pdev_id matches
1128  * @soc_handle: data path soc handle
1129  * @mac_addr: AST entry mac address
1130  * @pdev_id: pdev id
1131  * @callback: callback function to called on ast delete response from FW
1132  * @cookie: argument to be passed to callback
1133  *
1134  * Return: QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
1135  *          is sent
1136  *          QDF_STATUS_E_INVAL false if ast entry not found
1137  */
1138 
1139 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
1140 						uint8_t *mac_addr,
1141 						uint8_t pdev_id,
1142 						txrx_ast_free_cb callback,
1143 						void *cookie)
1144 
1145 {
1146 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1147 	struct dp_ast_entry *ast_entry;
1148 	txrx_ast_free_cb cb = NULL;
1149 	void *arg = NULL;
1150 
1151 	if (soc->ast_offload_support)
1152 		return -QDF_STATUS_E_INVAL;
1153 
1154 	qdf_spin_lock_bh(&soc->ast_lock);
1155 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
1156 
1157 	if (!ast_entry) {
1158 		qdf_spin_unlock_bh(&soc->ast_lock);
1159 		return -QDF_STATUS_E_INVAL;
1160 	}
1161 
1162 	if (ast_entry->callback) {
1163 		cb = ast_entry->callback;
1164 		arg = ast_entry->cookie;
1165 	}
1166 
1167 	ast_entry->callback = callback;
1168 	ast_entry->cookie = cookie;
1169 
1170 	/*
1171 	 * if delete_in_progress is set AST delete is sent to target
1172 	 * and host is waiting for response should not sent delete
1173 	 * again
1174 	 */
1175 	if (!ast_entry->delete_in_progress)
1176 		dp_peer_del_ast(soc, ast_entry);
1177 
1178 	qdf_spin_unlock_bh(&soc->ast_lock);
1179 
1180 	if (cb) {
1181 		cb(soc->ctrl_psoc,
1182 		   dp_soc_to_cdp_soc(soc),
1183 		   arg,
1184 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1185 	}
1186 	return QDF_STATUS_SUCCESS;
1187 }
1188 
1189 /**
1190  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
1191  * @ring_num: ring num of the ring being queried
1192  * @grp_mask: the grp_mask array for the ring type in question.
1193  *
1194  * The grp_mask array is indexed by group number and the bit fields correspond
1195  * to ring numbers.  We are finding which interrupt group a ring belongs to.
1196  *
1197  * Return: the index in the grp_mask array with the ring number.
1198  * -QDF_STATUS_E_NOENT if no entry is found
1199  */
1200 static int dp_srng_find_ring_in_mask(int ring_num, uint8_t *grp_mask)
1201 {
1202 	int ext_group_num;
1203 	uint8_t mask = 1 << ring_num;
1204 
1205 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
1206 	     ext_group_num++) {
1207 		if (mask & grp_mask[ext_group_num])
1208 			return ext_group_num;
1209 	}
1210 
1211 	return -QDF_STATUS_E_NOENT;
1212 }
1213 
1214 /**
1215  * dp_is_msi_group_number_invalid() - check msi_group_number valid or not
1216  * @soc: dp_soc
1217  * @msi_group_number: MSI group number.
1218  * @msi_data_count: MSI data count.
1219  *
1220  * Return: true if msi_group_number is invalid.
1221  */
1222 static bool dp_is_msi_group_number_invalid(struct dp_soc *soc,
1223 					   int msi_group_number,
1224 					   int msi_data_count)
1225 {
1226 	if (soc && soc->osdev && soc->osdev->dev &&
1227 	    pld_is_one_msi(soc->osdev->dev))
1228 		return false;
1229 
1230 	return msi_group_number > msi_data_count;
1231 }
1232 
1233 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1234 /**
1235  * dp_is_reo_ring_num_in_nf_grp1() - Check if the current reo ring is part of
1236  *				rx_near_full_grp1 mask
1237  * @soc: Datapath SoC Handle
1238  * @ring_num: REO ring number
1239  *
1240  * Return: 1 if the ring_num belongs to reo_nf_grp1,
1241  *	   0, otherwise.
1242  */
1243 static inline int
1244 dp_is_reo_ring_num_in_nf_grp1(struct dp_soc *soc, int ring_num)
1245 {
1246 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_1 & (1 << ring_num));
1247 }
1248 
1249 /**
1250  * dp_is_reo_ring_num_in_nf_grp2() - Check if the current reo ring is part of
1251  *				rx_near_full_grp2 mask
1252  * @soc: Datapath SoC Handle
1253  * @ring_num: REO ring number
1254  *
1255  * Return: 1 if the ring_num belongs to reo_nf_grp2,
1256  *	   0, otherwise.
1257  */
1258 static inline int
1259 dp_is_reo_ring_num_in_nf_grp2(struct dp_soc *soc, int ring_num)
1260 {
1261 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_2 & (1 << ring_num));
1262 }
1263 
1264 /**
1265  * dp_srng_get_near_full_irq_mask() - Get near-full irq mask for a particular
1266  *				ring type and number
1267  * @soc: Datapath SoC handle
1268  * @ring_type: SRNG type
1269  * @ring_num: ring num
1270  *
1271  * Return: near-full irq mask pointer
1272  */
1273 static inline
1274 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
1275 					enum hal_ring_type ring_type,
1276 					int ring_num)
1277 {
1278 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
1279 	uint8_t wbm2_sw_rx_rel_ring_id;
1280 	uint8_t *nf_irq_mask = NULL;
1281 
1282 	switch (ring_type) {
1283 	case WBM2SW_RELEASE:
1284 		wbm2_sw_rx_rel_ring_id =
1285 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
1286 		if (ring_num != wbm2_sw_rx_rel_ring_id) {
1287 			nf_irq_mask = &soc->wlan_cfg_ctx->
1288 					int_tx_ring_near_full_irq_mask[0];
1289 		}
1290 		break;
1291 	case REO_DST:
1292 		if (dp_is_reo_ring_num_in_nf_grp1(soc, ring_num))
1293 			nf_irq_mask =
1294 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_1_mask[0];
1295 		else if (dp_is_reo_ring_num_in_nf_grp2(soc, ring_num))
1296 			nf_irq_mask =
1297 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_2_mask[0];
1298 		else
1299 			qdf_assert(0);
1300 		break;
1301 	default:
1302 		break;
1303 	}
1304 
1305 	return nf_irq_mask;
1306 }
1307 
1308 /**
1309  * dp_srng_set_msi2_ring_params() - Set the msi2 addr/data in the ring params
1310  * @soc: Datapath SoC handle
1311  * @ring_params: srng params handle
1312  * @msi2_addr: MSI2 addr to be set for the SRNG
1313  * @msi2_data: MSI2 data to be set for the SRNG
1314  *
1315  * Return: None
1316  */
1317 static inline
1318 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
1319 				  struct hal_srng_params *ring_params,
1320 				  qdf_dma_addr_t msi2_addr,
1321 				  uint32_t msi2_data)
1322 {
1323 	ring_params->msi2_addr = msi2_addr;
1324 	ring_params->msi2_data = msi2_data;
1325 }
1326 
1327 /**
1328  * dp_srng_msi2_setup() - Setup MSI2 details for near full IRQ of an SRNG
1329  * @soc: Datapath SoC handle
1330  * @ring_params: ring_params for SRNG
1331  * @ring_type: SENG type
1332  * @ring_num: ring number for the SRNG
1333  * @nf_msi_grp_num: near full msi group number
1334  *
1335  * Return: None
1336  */
1337 static inline void
1338 dp_srng_msi2_setup(struct dp_soc *soc,
1339 		   struct hal_srng_params *ring_params,
1340 		   int ring_type, int ring_num, int nf_msi_grp_num)
1341 {
1342 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1343 	int msi_data_count, ret;
1344 
1345 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1346 					  &msi_data_count, &msi_data_start,
1347 					  &msi_irq_start);
1348 	if (ret)
1349 		return;
1350 
1351 	if (nf_msi_grp_num < 0) {
1352 		dp_init_info("%pK: ring near full IRQ not part of an ext_group; ring_type: %d,ring_num %d",
1353 			     soc, ring_type, ring_num);
1354 		ring_params->msi2_addr = 0;
1355 		ring_params->msi2_data = 0;
1356 		return;
1357 	}
1358 
1359 	if (dp_is_msi_group_number_invalid(soc, nf_msi_grp_num,
1360 					   msi_data_count)) {
1361 		dp_init_warn("%pK: 2 msi_groups will share an msi for near full IRQ; msi_group_num %d",
1362 			     soc, nf_msi_grp_num);
1363 		QDF_ASSERT(0);
1364 	}
1365 
1366 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1367 
1368 	ring_params->nf_irq_support = 1;
1369 	ring_params->msi2_addr = addr_low;
1370 	ring_params->msi2_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1371 	ring_params->msi2_data = (nf_msi_grp_num % msi_data_count)
1372 		+ msi_data_start;
1373 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1374 }
1375 
1376 /* Percentage of ring entries considered as nearly full */
1377 #define DP_NF_HIGH_THRESH_PERCENTAGE	75
1378 /* Percentage of ring entries considered as critically full */
1379 #define DP_NF_CRIT_THRESH_PERCENTAGE	90
1380 /* Percentage of ring entries considered as safe threshold */
1381 #define DP_NF_SAFE_THRESH_PERCENTAGE	50
1382 
1383 /**
1384  * dp_srng_configure_nf_interrupt_thresholds() - Configure the thresholds for
1385  *			near full irq
1386  * @soc: Datapath SoC handle
1387  * @ring_params: ring params for SRNG
1388  * @ring_type: ring type
1389  */
1390 static inline void
1391 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
1392 					  struct hal_srng_params *ring_params,
1393 					  int ring_type)
1394 {
1395 	if (ring_params->nf_irq_support) {
1396 		ring_params->high_thresh = (ring_params->num_entries *
1397 					    DP_NF_HIGH_THRESH_PERCENTAGE) / 100;
1398 		ring_params->crit_thresh = (ring_params->num_entries *
1399 					    DP_NF_CRIT_THRESH_PERCENTAGE) / 100;
1400 		ring_params->safe_thresh = (ring_params->num_entries *
1401 					    DP_NF_SAFE_THRESH_PERCENTAGE) /100;
1402 	}
1403 }
1404 
1405 /**
1406  * dp_srng_set_nf_thresholds() - Set the near full thresholds to srng data
1407  *			structure from the ring params
1408  * @soc: Datapath SoC handle
1409  * @srng: SRNG handle
1410  * @ring_params: ring params for a SRNG
1411  *
1412  * Return: None
1413  */
1414 static inline void
1415 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
1416 			  struct hal_srng_params *ring_params)
1417 {
1418 	srng->crit_thresh = ring_params->crit_thresh;
1419 	srng->safe_thresh = ring_params->safe_thresh;
1420 }
1421 
1422 #else
1423 static inline
1424 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
1425 					enum hal_ring_type ring_type,
1426 					int ring_num)
1427 {
1428 	return NULL;
1429 }
1430 
1431 static inline
1432 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
1433 				  struct hal_srng_params *ring_params,
1434 				  qdf_dma_addr_t msi2_addr,
1435 				  uint32_t msi2_data)
1436 {
1437 }
1438 
1439 static inline void
1440 dp_srng_msi2_setup(struct dp_soc *soc,
1441 		   struct hal_srng_params *ring_params,
1442 		   int ring_type, int ring_num, int nf_msi_grp_num)
1443 {
1444 }
1445 
1446 static inline void
1447 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
1448 					  struct hal_srng_params *ring_params,
1449 					  int ring_type)
1450 {
1451 }
1452 
1453 static inline void
1454 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
1455 			  struct hal_srng_params *ring_params)
1456 {
1457 }
1458 #endif
1459 
1460 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
1461 				       enum hal_ring_type ring_type,
1462 				       int ring_num,
1463 				       int *reg_msi_grp_num,
1464 				       bool nf_irq_support,
1465 				       int *nf_msi_grp_num)
1466 {
1467 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
1468 	uint8_t *grp_mask, *nf_irq_mask = NULL;
1469 	bool nf_irq_enabled = false;
1470 	uint8_t wbm2_sw_rx_rel_ring_id;
1471 
1472 	switch (ring_type) {
1473 	case WBM2SW_RELEASE:
1474 		wbm2_sw_rx_rel_ring_id =
1475 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
1476 		if (ring_num == wbm2_sw_rx_rel_ring_id) {
1477 			/* dp_rx_wbm_err_process - soc->rx_rel_ring */
1478 			grp_mask = &cfg_ctx->int_rx_wbm_rel_ring_mask[0];
1479 			ring_num = 0;
1480 		} else if (ring_num == WBM2_SW_PPE_REL_RING_ID) {
1481 			grp_mask = &cfg_ctx->int_ppeds_wbm_release_ring_mask[0];
1482 			ring_num = 0;
1483 		}  else { /* dp_tx_comp_handler - soc->tx_comp_ring */
1484 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1485 			nf_irq_mask = dp_srng_get_near_full_irq_mask(soc,
1486 								     ring_type,
1487 								     ring_num);
1488 			if (nf_irq_mask)
1489 				nf_irq_enabled = true;
1490 
1491 			/*
1492 			 * Using ring 4 as 4th tx completion ring since ring 3
1493 			 * is Rx error ring
1494 			 */
1495 			if (ring_num == WBM2SW_TXCOMP_RING4_NUM)
1496 				ring_num = TXCOMP_RING4_NUM;
1497 		}
1498 	break;
1499 
1500 	case REO_EXCEPTION:
1501 		/* dp_rx_err_process - &soc->reo_exception_ring */
1502 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
1503 	break;
1504 
1505 	case REO_DST:
1506 		/* dp_rx_process - soc->reo_dest_ring */
1507 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1508 		nf_irq_mask = dp_srng_get_near_full_irq_mask(soc, ring_type,
1509 							     ring_num);
1510 		if (nf_irq_mask)
1511 			nf_irq_enabled = true;
1512 	break;
1513 
1514 	case REO_STATUS:
1515 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
1516 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
1517 	break;
1518 
1519 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
1520 	case RXDMA_MONITOR_STATUS:
1521 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
1522 	case RXDMA_MONITOR_DST:
1523 		/* dp_mon_process */
1524 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
1525 	break;
1526 	case TX_MONITOR_DST:
1527 		/* dp_tx_mon_process */
1528 		grp_mask = &soc->wlan_cfg_ctx->int_tx_mon_ring_mask[0];
1529 	break;
1530 	case RXDMA_DST:
1531 		/* dp_rxdma_err_process */
1532 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
1533 	break;
1534 
1535 	case RXDMA_BUF:
1536 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1537 	break;
1538 
1539 	case RXDMA_MONITOR_BUF:
1540 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
1541 	break;
1542 
1543 	case TX_MONITOR_BUF:
1544 		grp_mask = &soc->wlan_cfg_ctx->int_host2txmon_ring_mask[0];
1545 	break;
1546 
1547 	case REO2PPE:
1548 		grp_mask = &soc->wlan_cfg_ctx->int_reo2ppe_ring_mask[0];
1549 	break;
1550 
1551 	case PPE2TCL:
1552 		grp_mask = &soc->wlan_cfg_ctx->int_ppe2tcl_ring_mask[0];
1553 	break;
1554 
1555 	case TCL_DATA:
1556 	/* CMD_CREDIT_RING is used as command in 8074 and credit in 9000 */
1557 	case TCL_CMD_CREDIT:
1558 	case REO_CMD:
1559 	case SW2WBM_RELEASE:
1560 	case WBM_IDLE_LINK:
1561 		/* normally empty SW_TO_HW rings */
1562 		return -QDF_STATUS_E_NOENT;
1563 	break;
1564 
1565 	case TCL_STATUS:
1566 	case REO_REINJECT:
1567 		/* misc unused rings */
1568 		return -QDF_STATUS_E_NOENT;
1569 	break;
1570 
1571 	case CE_SRC:
1572 	case CE_DST:
1573 	case CE_DST_STATUS:
1574 		/* CE_rings - currently handled by hif */
1575 	default:
1576 		return -QDF_STATUS_E_NOENT;
1577 	break;
1578 	}
1579 
1580 	*reg_msi_grp_num = dp_srng_find_ring_in_mask(ring_num, grp_mask);
1581 
1582 	if (nf_irq_support && nf_irq_enabled) {
1583 		*nf_msi_grp_num = dp_srng_find_ring_in_mask(ring_num,
1584 							    nf_irq_mask);
1585 	}
1586 
1587 	return QDF_STATUS_SUCCESS;
1588 }
1589 
1590 /**
1591  * dp_get_num_msi_available()- API to get number of MSIs available
1592  * @soc: DP soc Handle
1593  * @interrupt_mode: Mode of interrupts
1594  *
1595  * Return: Number of MSIs available or 0 in case of integrated
1596  */
1597 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
1598 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1599 {
1600 	return 0;
1601 }
1602 #else
1603 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1604 {
1605 	int msi_data_count;
1606 	int msi_data_start;
1607 	int msi_irq_start;
1608 	int ret;
1609 
1610 	if (interrupt_mode == DP_INTR_INTEGRATED) {
1611 		return 0;
1612 	} else if (interrupt_mode == DP_INTR_MSI || interrupt_mode ==
1613 		   DP_INTR_POLL) {
1614 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1615 						  &msi_data_count,
1616 						  &msi_data_start,
1617 						  &msi_irq_start);
1618 		if (ret) {
1619 			qdf_err("Unable to get DP MSI assignment %d",
1620 				interrupt_mode);
1621 			return -EINVAL;
1622 		}
1623 		return msi_data_count;
1624 	}
1625 	qdf_err("Interrupt mode invalid %d", interrupt_mode);
1626 	return -EINVAL;
1627 }
1628 #endif
1629 
1630 #if defined(IPA_OFFLOAD) && defined(IPA_WDI3_VLAN_SUPPORT)
1631 static void
1632 dp_ipa_vlan_srng_msi_setup(struct hal_srng_params *ring_params, int ring_type,
1633 			   int ring_num)
1634 {
1635 	if (wlan_ipa_is_vlan_enabled()) {
1636 		if ((ring_type == REO_DST) &&
1637 				(ring_num == IPA_ALT_REO_DEST_RING_IDX)) {
1638 			ring_params->msi_addr = 0;
1639 			ring_params->msi_data = 0;
1640 			ring_params->flags &= ~HAL_SRNG_MSI_INTR;
1641 		}
1642 	}
1643 }
1644 #else
1645 static inline void
1646 dp_ipa_vlan_srng_msi_setup(struct hal_srng_params *ring_params, int ring_type,
1647 			   int ring_num)
1648 {
1649 }
1650 #endif
1651 
1652 static void dp_srng_msi_setup(struct dp_soc *soc, struct dp_srng *srng,
1653 			      struct hal_srng_params *ring_params,
1654 			      int ring_type, int ring_num)
1655 {
1656 	int reg_msi_grp_num;
1657 	/*
1658 	 * nf_msi_grp_num needs to be initialized with negative value,
1659 	 * to avoid configuring near-full msi for WBM2SW3 ring
1660 	 */
1661 	int nf_msi_grp_num = -1;
1662 	int msi_data_count;
1663 	int ret;
1664 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1665 	bool nf_irq_support;
1666 	int vector;
1667 
1668 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1669 					    &msi_data_count, &msi_data_start,
1670 					    &msi_irq_start);
1671 
1672 	if (ret)
1673 		return;
1674 
1675 	nf_irq_support = hal_srng_is_near_full_irq_supported(soc->hal_soc,
1676 							     ring_type,
1677 							     ring_num);
1678 	ret = dp_srng_calculate_msi_group(soc, ring_type, ring_num,
1679 					  &reg_msi_grp_num,
1680 					  nf_irq_support,
1681 					  &nf_msi_grp_num);
1682 	if (ret < 0) {
1683 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1684 			     soc, ring_type, ring_num);
1685 		ring_params->msi_addr = 0;
1686 		ring_params->msi_data = 0;
1687 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
1688 		return;
1689 	}
1690 
1691 	if (reg_msi_grp_num < 0) {
1692 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1693 			     soc, ring_type, ring_num);
1694 		ring_params->msi_addr = 0;
1695 		ring_params->msi_data = 0;
1696 		goto configure_msi2;
1697 	}
1698 
1699 	if (dp_is_msi_group_number_invalid(soc, reg_msi_grp_num,
1700 					   msi_data_count)) {
1701 		dp_init_warn("%pK: 2 msi_groups will share an msi; msi_group_num %d",
1702 			     soc, reg_msi_grp_num);
1703 		QDF_ASSERT(0);
1704 	}
1705 
1706 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1707 
1708 	ring_params->msi_addr = addr_low;
1709 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1710 	ring_params->msi_data = (reg_msi_grp_num % msi_data_count)
1711 		+ msi_data_start;
1712 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1713 
1714 	dp_ipa_vlan_srng_msi_setup(ring_params, ring_type, ring_num);
1715 
1716 	dp_debug("ring type %u ring_num %u msi->data %u msi_addr %llx",
1717 		 ring_type, ring_num, ring_params->msi_data,
1718 		 (uint64_t)ring_params->msi_addr);
1719 
1720 	vector = msi_irq_start + (reg_msi_grp_num % msi_data_count);
1721 	if (soc->arch_ops.dp_register_ppeds_interrupts)
1722 		if (soc->arch_ops.dp_register_ppeds_interrupts(soc, srng,
1723 							       vector,
1724 							       ring_type,
1725 							       ring_num))
1726 			return;
1727 
1728 configure_msi2:
1729 	if (!nf_irq_support) {
1730 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
1731 		return;
1732 	}
1733 
1734 	dp_srng_msi2_setup(soc, ring_params, ring_type, ring_num,
1735 			   nf_msi_grp_num);
1736 }
1737 
1738 #ifdef FEATURE_AST
1739 /**
1740  * dp_print_mlo_ast_stats() - Print AST stats for MLO peers
1741  *
1742  * @soc: core DP soc context
1743  *
1744  * Return: void
1745  */
1746 static void dp_print_mlo_ast_stats(struct dp_soc *soc)
1747 {
1748 	if (soc->arch_ops.print_mlo_ast_stats)
1749 		soc->arch_ops.print_mlo_ast_stats(soc);
1750 }
1751 
1752 void
1753 dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1754 {
1755 	struct dp_ast_entry *ase, *tmp_ase;
1756 	uint32_t num_entries = 0;
1757 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1758 			"NONE", "STATIC", "SELF", "WDS", "HMWDS", "BSS",
1759 			"DA", "HMWDS_SEC", "MLD"};
1760 
1761 	DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1762 	    DP_PRINT_STATS("%6d mac_addr = "QDF_MAC_ADDR_FMT
1763 		    " peer_mac_addr = "QDF_MAC_ADDR_FMT
1764 		    " peer_id = %u"
1765 		    " type = %s"
1766 		    " next_hop = %d"
1767 		    " is_active = %d"
1768 		    " ast_idx = %d"
1769 		    " ast_hash = %d"
1770 		    " delete_in_progress = %d"
1771 		    " pdev_id = %d"
1772 		    " vdev_id = %d",
1773 		    ++num_entries,
1774 		    QDF_MAC_ADDR_REF(ase->mac_addr.raw),
1775 		    QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1776 		    ase->peer_id,
1777 		    type[ase->type],
1778 		    ase->next_hop,
1779 		    ase->is_active,
1780 		    ase->ast_idx,
1781 		    ase->ast_hash_value,
1782 		    ase->delete_in_progress,
1783 		    ase->pdev_id,
1784 		    ase->vdev_id);
1785 	}
1786 }
1787 
1788 void dp_print_ast_stats(struct dp_soc *soc)
1789 {
1790 	DP_PRINT_STATS("AST Stats:");
1791 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1792 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1793 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1794 	DP_PRINT_STATS("	Entries MAP ERR  = %d", soc->stats.ast.map_err);
1795 	DP_PRINT_STATS("	Entries Mismatch ERR  = %d",
1796 		       soc->stats.ast.ast_mismatch);
1797 
1798 	DP_PRINT_STATS("AST Table:");
1799 
1800 	qdf_spin_lock_bh(&soc->ast_lock);
1801 
1802 	dp_soc_iterate_peer(soc, dp_print_peer_ast_entries, NULL,
1803 			    DP_MOD_ID_GENERIC_STATS);
1804 
1805 	qdf_spin_unlock_bh(&soc->ast_lock);
1806 
1807 	dp_print_mlo_ast_stats(soc);
1808 }
1809 #else
1810 void dp_print_ast_stats(struct dp_soc *soc)
1811 {
1812 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1813 	return;
1814 }
1815 #endif
1816 
1817 /**
1818  * dp_print_peer_info() - Dump peer info
1819  * @soc: Datapath soc handle
1820  * @peer: Datapath peer handle
1821  * @arg: argument to iter function
1822  *
1823  * Return: void
1824  */
1825 static void
1826 dp_print_peer_info(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1827 {
1828 	struct dp_txrx_peer *txrx_peer = NULL;
1829 
1830 	txrx_peer = dp_get_txrx_peer(peer);
1831 	if (!txrx_peer)
1832 		return;
1833 
1834 	DP_PRINT_STATS(" peer id = %d"
1835 		       " peer_mac_addr = "QDF_MAC_ADDR_FMT
1836 		       " nawds_enabled = %d"
1837 		       " bss_peer = %d"
1838 		       " wds_enabled = %d"
1839 		       " tx_cap_enabled = %d"
1840 		       " rx_cap_enabled = %d",
1841 		       peer->peer_id,
1842 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1843 		       txrx_peer->nawds_enabled,
1844 		       txrx_peer->bss_peer,
1845 		       txrx_peer->wds_enabled,
1846 		       dp_monitor_is_tx_cap_enabled(peer),
1847 		       dp_monitor_is_rx_cap_enabled(peer));
1848 }
1849 
1850 /**
1851  * dp_print_peer_table() - Dump all Peer stats
1852  * @vdev: Datapath Vdev handle
1853  *
1854  * Return: void
1855  */
1856 static void dp_print_peer_table(struct dp_vdev *vdev)
1857 {
1858 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1859 	dp_vdev_iterate_peer(vdev, dp_print_peer_info, NULL,
1860 			     DP_MOD_ID_GENERIC_STATS);
1861 }
1862 
1863 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
1864 /**
1865  * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt
1866  * threshold values from the wlan_srng_cfg table for each ring type
1867  * @soc: device handle
1868  * @ring_params: per ring specific parameters
1869  * @ring_type: Ring type
1870  * @ring_num: Ring number for a given ring type
1871  * @num_entries: number of entries to fill
1872  *
1873  * Fill the ring params with the interrupt threshold
1874  * configuration parameters available in the per ring type wlan_srng_cfg
1875  * table.
1876  *
1877  * Return: None
1878  */
1879 static void
1880 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1881 				       struct hal_srng_params *ring_params,
1882 				       int ring_type, int ring_num,
1883 				       int num_entries)
1884 {
1885 	uint8_t wbm2_sw_rx_rel_ring_id;
1886 
1887 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
1888 
1889 	if (ring_type == REO_DST) {
1890 		ring_params->intr_timer_thres_us =
1891 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1892 		ring_params->intr_batch_cntr_thres_entries =
1893 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1894 	} else if (ring_type == WBM2SW_RELEASE &&
1895 		   (ring_num == wbm2_sw_rx_rel_ring_id)) {
1896 		ring_params->intr_timer_thres_us =
1897 				wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1898 		ring_params->intr_batch_cntr_thres_entries =
1899 				wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1900 	} else {
1901 		ring_params->intr_timer_thres_us =
1902 				soc->wlan_srng_cfg[ring_type].timer_threshold;
1903 		ring_params->intr_batch_cntr_thres_entries =
1904 				soc->wlan_srng_cfg[ring_type].batch_count_threshold;
1905 	}
1906 	ring_params->low_threshold =
1907 			soc->wlan_srng_cfg[ring_type].low_threshold;
1908 	if (ring_params->low_threshold)
1909 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1910 
1911 	dp_srng_configure_nf_interrupt_thresholds(soc, ring_params, ring_type);
1912 }
1913 #else
1914 static void
1915 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1916 				       struct hal_srng_params *ring_params,
1917 				       int ring_type, int ring_num,
1918 				       int num_entries)
1919 {
1920 	uint8_t wbm2_sw_rx_rel_ring_id;
1921 	bool rx_refill_lt_disable;
1922 
1923 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
1924 
1925 	if (ring_type == REO_DST || ring_type == REO2PPE) {
1926 		ring_params->intr_timer_thres_us =
1927 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1928 		ring_params->intr_batch_cntr_thres_entries =
1929 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1930 	} else if (ring_type == WBM2SW_RELEASE &&
1931 		   (ring_num < wbm2_sw_rx_rel_ring_id ||
1932 		   ring_num == WBM2SW_TXCOMP_RING4_NUM ||
1933 		   ring_num == WBM2_SW_PPE_REL_RING_ID)) {
1934 		ring_params->intr_timer_thres_us =
1935 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1936 		ring_params->intr_batch_cntr_thres_entries =
1937 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1938 	} else if (ring_type == RXDMA_BUF) {
1939 		rx_refill_lt_disable =
1940 			wlan_cfg_get_dp_soc_rxdma_refill_lt_disable
1941 							(soc->wlan_cfg_ctx);
1942 		ring_params->intr_timer_thres_us =
1943 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1944 
1945 		if (!rx_refill_lt_disable) {
1946 			ring_params->low_threshold = num_entries >> 3;
1947 			ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1948 			ring_params->intr_batch_cntr_thres_entries = 0;
1949 		}
1950 	} else {
1951 		ring_params->intr_timer_thres_us =
1952 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1953 		ring_params->intr_batch_cntr_thres_entries =
1954 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1955 	}
1956 
1957 	/* These rings donot require interrupt to host. Make them zero */
1958 	switch (ring_type) {
1959 	case REO_REINJECT:
1960 	case REO_CMD:
1961 	case TCL_DATA:
1962 	case TCL_CMD_CREDIT:
1963 	case TCL_STATUS:
1964 	case WBM_IDLE_LINK:
1965 	case SW2WBM_RELEASE:
1966 	case SW2RXDMA_NEW:
1967 		ring_params->intr_timer_thres_us = 0;
1968 		ring_params->intr_batch_cntr_thres_entries = 0;
1969 		break;
1970 	case PPE2TCL:
1971 		ring_params->intr_timer_thres_us =
1972 			wlan_cfg_get_int_timer_threshold_ppe2tcl(soc->wlan_cfg_ctx);
1973 		ring_params->intr_batch_cntr_thres_entries =
1974 			wlan_cfg_get_int_batch_threshold_ppe2tcl(soc->wlan_cfg_ctx);
1975 		break;
1976 	}
1977 
1978 	/* Enable low threshold interrupts for rx buffer rings (regular and
1979 	 * monitor buffer rings.
1980 	 * TODO: See if this is required for any other ring
1981 	 */
1982 	if ((ring_type == RXDMA_MONITOR_BUF) ||
1983 	    (ring_type == RXDMA_MONITOR_STATUS ||
1984 	    (ring_type == TX_MONITOR_BUF))) {
1985 		/* TODO: Setting low threshold to 1/8th of ring size
1986 		 * see if this needs to be configurable
1987 		 */
1988 		ring_params->low_threshold = num_entries >> 3;
1989 		ring_params->intr_timer_thres_us =
1990 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1991 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1992 		ring_params->intr_batch_cntr_thres_entries = 0;
1993 	}
1994 
1995 	/* During initialisation monitor rings are only filled with
1996 	 * MON_BUF_MIN_ENTRIES entries. So low threshold needs to be set to
1997 	 * a value less than that. Low threshold value is reconfigured again
1998 	 * to 1/8th of the ring size when monitor vap is created.
1999 	 */
2000 	if (ring_type == RXDMA_MONITOR_BUF)
2001 		ring_params->low_threshold = MON_BUF_MIN_ENTRIES >> 1;
2002 
2003 	/* In case of PCI chipsets, we dont have PPDU end interrupts,
2004 	 * so MONITOR STATUS ring is reaped by receiving MSI from srng.
2005 	 * Keep batch threshold as 8 so that interrupt is received for
2006 	 * every 4 packets in MONITOR_STATUS ring
2007 	 */
2008 	if ((ring_type == RXDMA_MONITOR_STATUS) &&
2009 	    (soc->intr_mode == DP_INTR_MSI))
2010 		ring_params->intr_batch_cntr_thres_entries = 4;
2011 }
2012 #endif
2013 
2014 #ifdef DP_MEM_PRE_ALLOC
2015 
2016 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
2017 			   size_t ctxt_size)
2018 {
2019 	void *ctxt_mem;
2020 
2021 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_context) {
2022 		dp_warn("dp_prealloc_get_context null!");
2023 		goto dynamic_alloc;
2024 	}
2025 
2026 	ctxt_mem = soc->cdp_soc.ol_ops->dp_prealloc_get_context(ctxt_type,
2027 								ctxt_size);
2028 
2029 	if (ctxt_mem)
2030 		goto end;
2031 
2032 dynamic_alloc:
2033 	dp_info("switch to dynamic-alloc for type %d, size %zu",
2034 		ctxt_type, ctxt_size);
2035 	ctxt_mem = qdf_mem_malloc(ctxt_size);
2036 end:
2037 	return ctxt_mem;
2038 }
2039 
2040 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
2041 			 void *vaddr)
2042 {
2043 	QDF_STATUS status;
2044 
2045 	if (soc->cdp_soc.ol_ops->dp_prealloc_put_context) {
2046 		status = soc->cdp_soc.ol_ops->dp_prealloc_put_context(
2047 								ctxt_type,
2048 								vaddr);
2049 	} else {
2050 		dp_warn("dp_prealloc_put_context null!");
2051 		status = QDF_STATUS_E_NOSUPPORT;
2052 	}
2053 
2054 	if (QDF_IS_STATUS_ERROR(status)) {
2055 		dp_info("Context type %d not pre-allocated", ctxt_type);
2056 		qdf_mem_free(vaddr);
2057 	}
2058 }
2059 
2060 static inline
2061 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
2062 					   struct dp_srng *srng,
2063 					   uint32_t ring_type)
2064 {
2065 	void *mem;
2066 
2067 	qdf_assert(!srng->is_mem_prealloc);
2068 
2069 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_consistent) {
2070 		dp_warn("dp_prealloc_get_consistent is null!");
2071 		goto qdf;
2072 	}
2073 
2074 	mem =
2075 		soc->cdp_soc.ol_ops->dp_prealloc_get_consistent
2076 						(&srng->alloc_size,
2077 						 &srng->base_vaddr_unaligned,
2078 						 &srng->base_paddr_unaligned,
2079 						 &srng->base_paddr_aligned,
2080 						 DP_RING_BASE_ALIGN, ring_type);
2081 
2082 	if (mem) {
2083 		srng->is_mem_prealloc = true;
2084 		goto end;
2085 	}
2086 qdf:
2087 	mem =  qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
2088 						&srng->base_vaddr_unaligned,
2089 						&srng->base_paddr_unaligned,
2090 						&srng->base_paddr_aligned,
2091 						DP_RING_BASE_ALIGN);
2092 end:
2093 	dp_info("%s memory %pK dp_srng %pK ring_type %d alloc_size %d num_entries %d",
2094 		srng->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", mem,
2095 		srng, ring_type, srng->alloc_size, srng->num_entries);
2096 	return mem;
2097 }
2098 
2099 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
2100 					       struct dp_srng *srng)
2101 {
2102 	if (srng->is_mem_prealloc) {
2103 		if (!soc->cdp_soc.ol_ops->dp_prealloc_put_consistent) {
2104 			dp_warn("dp_prealloc_put_consistent is null!");
2105 			QDF_BUG(0);
2106 			return;
2107 		}
2108 		soc->cdp_soc.ol_ops->dp_prealloc_put_consistent
2109 						(srng->alloc_size,
2110 						 srng->base_vaddr_unaligned,
2111 						 srng->base_paddr_unaligned);
2112 
2113 	} else {
2114 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2115 					srng->alloc_size,
2116 					srng->base_vaddr_unaligned,
2117 					srng->base_paddr_unaligned, 0);
2118 	}
2119 }
2120 
2121 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
2122 				   enum dp_desc_type desc_type,
2123 				   struct qdf_mem_multi_page_t *pages,
2124 				   size_t element_size,
2125 				   uint32_t element_num,
2126 				   qdf_dma_context_t memctxt,
2127 				   bool cacheable)
2128 {
2129 	if (!soc->cdp_soc.ol_ops->dp_get_multi_pages) {
2130 		dp_warn("dp_get_multi_pages is null!");
2131 		goto qdf;
2132 	}
2133 
2134 	pages->num_pages = 0;
2135 	pages->is_mem_prealloc = 0;
2136 	soc->cdp_soc.ol_ops->dp_get_multi_pages(desc_type,
2137 						element_size,
2138 						element_num,
2139 						pages,
2140 						cacheable);
2141 	if (pages->num_pages)
2142 		goto end;
2143 
2144 qdf:
2145 	qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size,
2146 				  element_num, memctxt, cacheable);
2147 end:
2148 	dp_info("%s desc_type %d element_size %d element_num %d cacheable %d",
2149 		pages->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc",
2150 		desc_type, (int)element_size, element_num, cacheable);
2151 }
2152 
2153 void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
2154 				  enum dp_desc_type desc_type,
2155 				  struct qdf_mem_multi_page_t *pages,
2156 				  qdf_dma_context_t memctxt,
2157 				  bool cacheable)
2158 {
2159 	if (pages->is_mem_prealloc) {
2160 		if (!soc->cdp_soc.ol_ops->dp_put_multi_pages) {
2161 			dp_warn("dp_put_multi_pages is null!");
2162 			QDF_BUG(0);
2163 			return;
2164 		}
2165 
2166 		soc->cdp_soc.ol_ops->dp_put_multi_pages(desc_type, pages);
2167 		qdf_mem_zero(pages, sizeof(*pages));
2168 	} else {
2169 		qdf_mem_multi_pages_free(soc->osdev, pages,
2170 					 memctxt, cacheable);
2171 	}
2172 }
2173 
2174 #else
2175 
2176 static inline
2177 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
2178 					   struct dp_srng *srng,
2179 					   uint32_t ring_type)
2180 
2181 {
2182 	void *mem;
2183 
2184 	mem = qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
2185 					       &srng->base_vaddr_unaligned,
2186 					       &srng->base_paddr_unaligned,
2187 					       &srng->base_paddr_aligned,
2188 					       DP_RING_BASE_ALIGN);
2189 	if (mem)
2190 		qdf_mem_set(srng->base_vaddr_unaligned, 0, srng->alloc_size);
2191 
2192 	return mem;
2193 }
2194 
2195 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
2196 					       struct dp_srng *srng)
2197 {
2198 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2199 				srng->alloc_size,
2200 				srng->base_vaddr_unaligned,
2201 				srng->base_paddr_unaligned, 0);
2202 }
2203 
2204 #endif /* DP_MEM_PRE_ALLOC */
2205 
2206 #ifdef QCA_SUPPORT_WDS_EXTENDED
2207 bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev)
2208 {
2209 	return vdev->wds_ext_enabled;
2210 }
2211 #else
2212 bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev)
2213 {
2214 	return false;
2215 }
2216 #endif
2217 
2218 void dp_pdev_update_fast_rx_flag(struct dp_soc *soc, struct dp_pdev *pdev)
2219 {
2220 	struct dp_vdev *vdev = NULL;
2221 	uint8_t rx_fast_flag = true;
2222 
2223 	if (wlan_cfg_is_rx_flow_tag_enabled(soc->wlan_cfg_ctx)) {
2224 		rx_fast_flag = false;
2225 		goto update_flag;
2226 	}
2227 
2228 	/* Check if protocol tagging enable */
2229 	if (pdev->is_rx_protocol_tagging_enabled) {
2230 		rx_fast_flag = false;
2231 		goto update_flag;
2232 	}
2233 
2234 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
2235 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2236 		/* Check if any VDEV has NAWDS enabled */
2237 		if (vdev->nawds_enabled) {
2238 			rx_fast_flag = false;
2239 			break;
2240 		}
2241 
2242 		/* Check if any VDEV has multipass enabled */
2243 		if (vdev->multipass_en) {
2244 			rx_fast_flag = false;
2245 			break;
2246 		}
2247 
2248 		/* Check if any VDEV has mesh enabled */
2249 		if (vdev->mesh_vdev) {
2250 			rx_fast_flag = false;
2251 			break;
2252 		}
2253 
2254 		/* Check if any VDEV has WDS ext enabled */
2255 		if (dp_vdev_is_wds_ext_enabled(vdev)) {
2256 			rx_fast_flag = false;
2257 			break;
2258 		}
2259 	}
2260 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
2261 
2262 update_flag:
2263 	dp_init_info("Updated Rx fast flag to %u", rx_fast_flag);
2264 	pdev->rx_fast_flag = rx_fast_flag;
2265 }
2266 
2267 void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng)
2268 {
2269 	if (srng->alloc_size && srng->base_vaddr_unaligned) {
2270 		if (!srng->cached) {
2271 			dp_srng_mem_free_consistent(soc, srng);
2272 		} else {
2273 			qdf_mem_free(srng->base_vaddr_unaligned);
2274 		}
2275 		srng->alloc_size = 0;
2276 		srng->base_vaddr_unaligned = NULL;
2277 	}
2278 	srng->hal_srng = NULL;
2279 }
2280 
2281 qdf_export_symbol(dp_srng_free);
2282 
2283 #ifdef DISABLE_MON_RING_MSI_CFG
2284 /**
2285  * dp_skip_msi_cfg() - Check if msi cfg has to be skipped for ring_type
2286  * @soc: DP SoC context
2287  * @ring_type: sring type
2288  *
2289  * Return: True if msi cfg should be skipped for srng type else false
2290  */
2291 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2292 {
2293 	if (ring_type == RXDMA_MONITOR_STATUS)
2294 		return true;
2295 
2296 	return false;
2297 }
2298 #else
2299 #ifdef DP_CON_MON_MSI_ENABLED
2300 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2301 {
2302 	if (soc->cdp_soc.ol_ops->get_con_mode &&
2303 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE) {
2304 		if (ring_type == REO_DST || ring_type == RXDMA_DST)
2305 			return true;
2306 	} else if (ring_type == RXDMA_MONITOR_STATUS) {
2307 		return true;
2308 	}
2309 
2310 	return false;
2311 }
2312 #else
2313 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2314 {
2315 	return false;
2316 }
2317 #endif /* DP_CON_MON_MSI_ENABLED */
2318 #endif /* DISABLE_MON_RING_MSI_CFG */
2319 
2320 #ifdef DP_UMAC_HW_RESET_SUPPORT
2321 static bool dp_check_umac_reset_in_progress(struct dp_soc *soc)
2322 {
2323 	return !!soc->umac_reset_ctx.intr_ctx_bkp;
2324 }
2325 #else
2326 static bool dp_check_umac_reset_in_progress(struct dp_soc *soc)
2327 {
2328 	return false;
2329 }
2330 #endif
2331 
2332 QDF_STATUS dp_srng_init_idx(struct dp_soc *soc, struct dp_srng *srng,
2333 			    int ring_type, int ring_num, int mac_id,
2334 			    uint32_t idx)
2335 {
2336 	bool idle_check;
2337 
2338 	hal_soc_handle_t hal_soc = soc->hal_soc;
2339 	struct hal_srng_params ring_params;
2340 
2341 	if (srng->hal_srng) {
2342 		dp_init_err("%pK: Ring type: %d, num:%d is already initialized",
2343 			    soc, ring_type, ring_num);
2344 		return QDF_STATUS_SUCCESS;
2345 	}
2346 
2347 	/* memset the srng ring to zero */
2348 	qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size);
2349 
2350 	qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params));
2351 	ring_params.ring_base_paddr = srng->base_paddr_aligned;
2352 	ring_params.ring_base_vaddr = srng->base_vaddr_aligned;
2353 
2354 	ring_params.num_entries = srng->num_entries;
2355 
2356 	dp_info("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
2357 		ring_type, ring_num,
2358 		(void *)ring_params.ring_base_vaddr,
2359 		(void *)ring_params.ring_base_paddr,
2360 		ring_params.num_entries);
2361 
2362 	if (soc->intr_mode == DP_INTR_MSI && !dp_skip_msi_cfg(soc, ring_type)) {
2363 		dp_srng_msi_setup(soc, srng, &ring_params, ring_type, ring_num);
2364 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
2365 				 ring_type, ring_num);
2366 	} else {
2367 		ring_params.msi_data = 0;
2368 		ring_params.msi_addr = 0;
2369 		dp_srng_set_msi2_ring_params(soc, &ring_params, 0, 0);
2370 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
2371 				 ring_type, ring_num);
2372 	}
2373 
2374 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
2375 					       ring_type, ring_num,
2376 					       srng->num_entries);
2377 
2378 	dp_srng_set_nf_thresholds(soc, srng, &ring_params);
2379 
2380 	if (srng->cached)
2381 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
2382 
2383 	idle_check = dp_check_umac_reset_in_progress(soc);
2384 
2385 	srng->hal_srng = hal_srng_setup_idx(hal_soc, ring_type, ring_num,
2386 					    mac_id, &ring_params, idle_check,
2387 					    idx);
2388 
2389 	if (!srng->hal_srng) {
2390 		dp_srng_free(soc, srng);
2391 		return QDF_STATUS_E_FAILURE;
2392 	}
2393 
2394 	return QDF_STATUS_SUCCESS;
2395 }
2396 
2397 qdf_export_symbol(dp_srng_init_idx);
2398 
2399 QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng, int ring_type,
2400 			int ring_num, int mac_id)
2401 {
2402 	return dp_srng_init_idx(soc, srng, ring_type, ring_num, mac_id, 0);
2403 }
2404 
2405 qdf_export_symbol(dp_srng_init);
2406 
2407 QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
2408 			 int ring_type, uint32_t num_entries,
2409 			 bool cached)
2410 {
2411 	hal_soc_handle_t hal_soc = soc->hal_soc;
2412 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
2413 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
2414 
2415 	if (srng->base_vaddr_unaligned) {
2416 		dp_init_err("%pK: Ring type: %d, is already allocated",
2417 			    soc, ring_type);
2418 		return QDF_STATUS_SUCCESS;
2419 	}
2420 
2421 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
2422 	srng->hal_srng = NULL;
2423 	srng->alloc_size = num_entries * entry_size;
2424 	srng->num_entries = num_entries;
2425 	srng->cached = cached;
2426 
2427 	if (!cached) {
2428 		srng->base_vaddr_aligned =
2429 		    dp_srng_aligned_mem_alloc_consistent(soc,
2430 							 srng,
2431 							 ring_type);
2432 	} else {
2433 		srng->base_vaddr_aligned = qdf_aligned_malloc(
2434 					&srng->alloc_size,
2435 					&srng->base_vaddr_unaligned,
2436 					&srng->base_paddr_unaligned,
2437 					&srng->base_paddr_aligned,
2438 					DP_RING_BASE_ALIGN);
2439 	}
2440 
2441 	if (!srng->base_vaddr_aligned)
2442 		return QDF_STATUS_E_NOMEM;
2443 
2444 	return QDF_STATUS_SUCCESS;
2445 }
2446 
2447 qdf_export_symbol(dp_srng_alloc);
2448 
2449 void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
2450 		    int ring_type, int ring_num)
2451 {
2452 	if (!srng->hal_srng) {
2453 		dp_init_err("%pK: Ring type: %d, num:%d not setup",
2454 			    soc, ring_type, ring_num);
2455 		return;
2456 	}
2457 
2458 	if (soc->arch_ops.dp_free_ppeds_interrupts)
2459 		soc->arch_ops.dp_free_ppeds_interrupts(soc, srng, ring_type,
2460 						       ring_num);
2461 
2462 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
2463 	srng->hal_srng = NULL;
2464 }
2465 
2466 qdf_export_symbol(dp_srng_deinit);
2467 
2468 /* TODO: Need this interface from HIF */
2469 void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle);
2470 
2471 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2472 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2473 			 hal_ring_handle_t hal_ring_hdl)
2474 {
2475 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2476 	uint32_t hp, tp;
2477 	uint8_t ring_id;
2478 
2479 	if (!int_ctx)
2480 		return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2481 
2482 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2483 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2484 
2485 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2486 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START);
2487 
2488 	return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2489 }
2490 
2491 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2492 			hal_ring_handle_t hal_ring_hdl)
2493 {
2494 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2495 	uint32_t hp, tp;
2496 	uint8_t ring_id;
2497 
2498 	if (!int_ctx)
2499 		return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2500 
2501 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2502 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2503 
2504 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2505 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END);
2506 
2507 	return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2508 }
2509 
2510 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2511 					      uint8_t hist_group_id)
2512 {
2513 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2514 			 0, 0, 0, HIF_EVENT_TIMER_ENTRY);
2515 }
2516 
2517 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2518 					     uint8_t hist_group_id)
2519 {
2520 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2521 			 0, 0, 0, HIF_EVENT_TIMER_EXIT);
2522 }
2523 #else
2524 
2525 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2526 					      uint8_t hist_group_id)
2527 {
2528 }
2529 
2530 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2531 					     uint8_t hist_group_id)
2532 {
2533 }
2534 
2535 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
2536 
2537 enum timer_yield_status
2538 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
2539 			  uint64_t start_time)
2540 {
2541 	uint64_t cur_time = qdf_get_log_timestamp();
2542 
2543 	if (!work_done)
2544 		return DP_TIMER_WORK_DONE;
2545 
2546 	if (cur_time - start_time > DP_MAX_TIMER_EXEC_TIME_TICKS)
2547 		return DP_TIMER_TIME_EXHAUST;
2548 
2549 	return DP_TIMER_NO_YIELD;
2550 }
2551 
2552 qdf_export_symbol(dp_should_timer_irq_yield);
2553 
2554 static int dp_process_rxdma_dst_ring(struct dp_soc *soc,
2555 				     struct dp_intr *int_ctx,
2556 				     int mac_for_pdev,
2557 				     int total_budget)
2558 {
2559 	return dp_rxdma_err_process(int_ctx, soc, mac_for_pdev,
2560 				    total_budget);
2561 }
2562 
2563 /**
2564  * dp_process_lmac_rings() - Process LMAC rings
2565  * @int_ctx: interrupt context
2566  * @total_budget: budget of work which can be done
2567  *
2568  * Return: work done
2569  */
2570 static int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget)
2571 {
2572 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2573 	struct dp_soc *soc = int_ctx->soc;
2574 	uint32_t remaining_quota = total_budget;
2575 	struct dp_pdev *pdev = NULL;
2576 	uint32_t work_done  = 0;
2577 	int budget = total_budget;
2578 	int ring = 0;
2579 
2580 	/* Process LMAC interrupts */
2581 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
2582 		int mac_for_pdev = ring;
2583 
2584 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
2585 		if (!pdev)
2586 			continue;
2587 		if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
2588 			work_done = dp_monitor_process(soc, int_ctx,
2589 						       mac_for_pdev,
2590 						       remaining_quota);
2591 			if (work_done)
2592 				intr_stats->num_rx_mon_ring_masks++;
2593 			budget -= work_done;
2594 			if (budget <= 0)
2595 				goto budget_done;
2596 			remaining_quota = budget;
2597 		}
2598 
2599 		if (int_ctx->tx_mon_ring_mask & (1 << mac_for_pdev)) {
2600 			work_done = dp_tx_mon_process(soc, int_ctx,
2601 						      mac_for_pdev,
2602 						      remaining_quota);
2603 			if (work_done)
2604 				intr_stats->num_tx_mon_ring_masks++;
2605 			budget -= work_done;
2606 			if (budget <= 0)
2607 				goto budget_done;
2608 			remaining_quota = budget;
2609 		}
2610 
2611 		if (int_ctx->rxdma2host_ring_mask &
2612 				(1 << mac_for_pdev)) {
2613 			work_done = dp_process_rxdma_dst_ring(soc, int_ctx,
2614 							      mac_for_pdev,
2615 							      remaining_quota);
2616 			if (work_done)
2617 				intr_stats->num_rxdma2host_ring_masks++;
2618 			budget -=  work_done;
2619 			if (budget <= 0)
2620 				goto budget_done;
2621 			remaining_quota = budget;
2622 		}
2623 
2624 		if (int_ctx->host2rxdma_ring_mask & (1 << mac_for_pdev)) {
2625 			union dp_rx_desc_list_elem_t *desc_list = NULL;
2626 			union dp_rx_desc_list_elem_t *tail = NULL;
2627 			struct dp_srng *rx_refill_buf_ring;
2628 			struct rx_desc_pool *rx_desc_pool;
2629 
2630 			rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2631 			if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
2632 				rx_refill_buf_ring =
2633 					&soc->rx_refill_buf_ring[mac_for_pdev];
2634 			else
2635 				rx_refill_buf_ring =
2636 					&soc->rx_refill_buf_ring[pdev->lmac_id];
2637 
2638 			intr_stats->num_host2rxdma_ring_masks++;
2639 			dp_rx_buffers_lt_replenish_simple(soc, mac_for_pdev,
2640 							  rx_refill_buf_ring,
2641 							  rx_desc_pool,
2642 							  0,
2643 							  &desc_list,
2644 							  &tail);
2645 		}
2646 
2647 	}
2648 
2649 	if (int_ctx->host2rxdma_mon_ring_mask)
2650 		dp_rx_mon_buf_refill(int_ctx);
2651 
2652 	if (int_ctx->host2txmon_ring_mask)
2653 		dp_tx_mon_buf_refill(int_ctx);
2654 
2655 budget_done:
2656 	return total_budget - budget;
2657 }
2658 
2659 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
2660 /**
2661  * dp_service_near_full_srngs() - Bottom half handler to process the near
2662  *				full IRQ on a SRNG
2663  * @dp_ctx: Datapath SoC handle
2664  * @dp_budget: Number of SRNGs which can be processed in a single attempt
2665  *		without rescheduling
2666  * @cpu: cpu id
2667  *
2668  * Return: remaining budget/quota for the soc device
2669  */
2670 static
2671 uint32_t dp_service_near_full_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
2672 {
2673 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2674 	struct dp_soc *soc = int_ctx->soc;
2675 
2676 	/*
2677 	 * dp_service_near_full_srngs arch ops should be initialized always
2678 	 * if the NEAR FULL IRQ feature is enabled.
2679 	 */
2680 	return soc->arch_ops.dp_service_near_full_srngs(soc, int_ctx,
2681 							dp_budget);
2682 }
2683 #endif
2684 
2685 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2686 
2687 /**
2688  * dp_srng_get_cpu() - Get the smp processor id for srng processing
2689  *
2690  * Return: smp processor id
2691  */
2692 static inline int dp_srng_get_cpu(void)
2693 {
2694 	return smp_processor_id();
2695 }
2696 
2697 /**
2698  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
2699  * @dp_ctx: DP SOC handle
2700  * @dp_budget: Number of frames/descriptors that can be processed in one shot
2701  * @cpu: CPU on which this instance is running
2702  *
2703  * Return: remaining budget/quota for the soc device
2704  */
2705 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
2706 {
2707 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2708 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2709 	struct dp_soc *soc = int_ctx->soc;
2710 	int ring = 0;
2711 	int index;
2712 	uint32_t work_done  = 0;
2713 	int budget = dp_budget;
2714 	uint8_t tx_mask = int_ctx->tx_ring_mask;
2715 	uint8_t rx_mask = int_ctx->rx_ring_mask;
2716 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
2717 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
2718 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2719 	uint32_t remaining_quota = dp_budget;
2720 
2721 	qdf_atomic_set_bit(cpu, &soc->service_rings_running);
2722 
2723 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n",
2724 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
2725 			 reo_status_mask,
2726 			 int_ctx->rx_mon_ring_mask,
2727 			 int_ctx->host2rxdma_ring_mask,
2728 			 int_ctx->rxdma2host_ring_mask);
2729 
2730 	/* Process Tx completion interrupts first to return back buffers */
2731 	for (index = 0; index < soc->num_tx_comp_rings; index++) {
2732 		if (!(1 << wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) & tx_mask))
2733 			continue;
2734 		work_done = dp_tx_comp_handler(int_ctx,
2735 					       soc,
2736 					       soc->tx_comp_ring[index].hal_srng,
2737 					       index, remaining_quota);
2738 		if (work_done) {
2739 			intr_stats->num_tx_ring_masks[index]++;
2740 			dp_verbose_debug("tx mask 0x%x index %d, budget %d, work_done %d",
2741 					 tx_mask, index, budget,
2742 					 work_done);
2743 		}
2744 		budget -= work_done;
2745 		if (budget <= 0)
2746 			goto budget_done;
2747 
2748 		remaining_quota = budget;
2749 	}
2750 
2751 	/* Process REO Exception ring interrupt */
2752 	if (rx_err_mask) {
2753 		work_done = dp_rx_err_process(int_ctx, soc,
2754 					      soc->reo_exception_ring.hal_srng,
2755 					      remaining_quota);
2756 
2757 		if (work_done) {
2758 			intr_stats->num_rx_err_ring_masks++;
2759 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
2760 					 work_done, budget);
2761 		}
2762 
2763 		budget -=  work_done;
2764 		if (budget <= 0) {
2765 			goto budget_done;
2766 		}
2767 		remaining_quota = budget;
2768 	}
2769 
2770 	/* Process Rx WBM release ring interrupt */
2771 	if (rx_wbm_rel_mask) {
2772 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
2773 						  soc->rx_rel_ring.hal_srng,
2774 						  remaining_quota);
2775 
2776 		if (work_done) {
2777 			intr_stats->num_rx_wbm_rel_ring_masks++;
2778 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
2779 					 work_done, budget);
2780 		}
2781 
2782 		budget -=  work_done;
2783 		if (budget <= 0) {
2784 			goto budget_done;
2785 		}
2786 		remaining_quota = budget;
2787 	}
2788 
2789 	/* Process Rx interrupts */
2790 	if (rx_mask) {
2791 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
2792 			if (!(rx_mask & (1 << ring)))
2793 				continue;
2794 			work_done = soc->arch_ops.dp_rx_process(int_ctx,
2795 						  soc->reo_dest_ring[ring].hal_srng,
2796 						  ring,
2797 						  remaining_quota);
2798 			if (work_done) {
2799 				intr_stats->num_rx_ring_masks[ring]++;
2800 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
2801 						 rx_mask, ring,
2802 						 work_done, budget);
2803 				budget -=  work_done;
2804 				if (budget <= 0)
2805 					goto budget_done;
2806 				remaining_quota = budget;
2807 			}
2808 		}
2809 	}
2810 
2811 	if (reo_status_mask) {
2812 		if (dp_reo_status_ring_handler(int_ctx, soc))
2813 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2814 	}
2815 
2816 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
2817 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2818 		if (work_done) {
2819 			budget -=  work_done;
2820 			if (budget <= 0)
2821 				goto budget_done;
2822 			remaining_quota = budget;
2823 		}
2824 	}
2825 
2826 	qdf_lro_flush(int_ctx->lro_ctx);
2827 	intr_stats->num_masks++;
2828 
2829 budget_done:
2830 	qdf_atomic_clear_bit(cpu, &soc->service_rings_running);
2831 
2832 	if (soc->notify_fw_callback)
2833 		soc->notify_fw_callback(soc);
2834 
2835 	return dp_budget - budget;
2836 }
2837 
2838 #else /* QCA_HOST_MODE_WIFI_DISABLED */
2839 
2840 /**
2841  * dp_srng_get_cpu() - Get the smp processor id for srng processing
2842  *
2843  * Return: smp processor id
2844  */
2845 static inline int dp_srng_get_cpu(void)
2846 {
2847 	return 0;
2848 }
2849 
2850 /**
2851  * dp_service_srngs() - Top level handler for DP Monitor Ring interrupts
2852  * @dp_ctx: DP SOC handle
2853  * @dp_budget: Number of frames/descriptors that can be processed in one shot
2854  * @cpu: CPU on which this instance is running
2855  *
2856  * Return: remaining budget/quota for the soc device
2857  */
2858 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
2859 {
2860 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2861 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2862 	struct dp_soc *soc = int_ctx->soc;
2863 	uint32_t remaining_quota = dp_budget;
2864 	uint32_t work_done  = 0;
2865 	int budget = dp_budget;
2866 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2867 
2868 	if (reo_status_mask) {
2869 		if (dp_reo_status_ring_handler(int_ctx, soc))
2870 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2871 	}
2872 
2873 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
2874 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2875 		if (work_done) {
2876 			budget -=  work_done;
2877 			if (budget <= 0)
2878 				goto budget_done;
2879 			remaining_quota = budget;
2880 		}
2881 	}
2882 
2883 	qdf_lro_flush(int_ctx->lro_ctx);
2884 	intr_stats->num_masks++;
2885 
2886 budget_done:
2887 	return dp_budget - budget;
2888 }
2889 
2890 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2891 
2892 /**
2893  * dp_interrupt_timer() - timer poll for interrupts
2894  * @arg: SoC Handle
2895  *
2896  * Return:
2897  *
2898  */
2899 static void dp_interrupt_timer(void *arg)
2900 {
2901 	struct dp_soc *soc = (struct dp_soc *) arg;
2902 	struct dp_pdev *pdev = soc->pdev_list[0];
2903 	enum timer_yield_status yield = DP_TIMER_NO_YIELD;
2904 	uint32_t work_done  = 0, total_work_done = 0;
2905 	int budget = 0xffff, i;
2906 	uint32_t remaining_quota = budget;
2907 	uint64_t start_time;
2908 	uint32_t lmac_id = DP_MON_INVALID_LMAC_ID;
2909 	uint8_t dp_intr_id = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
2910 	uint32_t lmac_iter;
2911 	int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
2912 	enum reg_wifi_band mon_band;
2913 	int cpu = dp_srng_get_cpu();
2914 
2915 	/*
2916 	 * this logic makes all data path interfacing rings (UMAC/LMAC)
2917 	 * and Monitor rings polling mode when NSS offload is disabled
2918 	 */
2919 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx) &&
2920 	    !wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2921 		if (qdf_atomic_read(&soc->cmn_init_done)) {
2922 			for (i = 0; i < wlan_cfg_get_num_contexts(
2923 						soc->wlan_cfg_ctx); i++)
2924 				dp_service_srngs(&soc->intr_ctx[i], 0xffff,
2925 						 cpu);
2926 
2927 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
2928 		}
2929 		return;
2930 	}
2931 
2932 	if (!qdf_atomic_read(&soc->cmn_init_done))
2933 		return;
2934 
2935 	if (dp_monitor_is_chan_band_known(pdev)) {
2936 		mon_band = dp_monitor_get_chan_band(pdev);
2937 		lmac_id = pdev->ch_band_lmac_id_mapping[mon_band];
2938 		if (qdf_likely(lmac_id != DP_MON_INVALID_LMAC_ID)) {
2939 			dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id];
2940 			dp_srng_record_timer_entry(soc, dp_intr_id);
2941 		}
2942 	}
2943 
2944 	start_time = qdf_get_log_timestamp();
2945 	dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
2946 
2947 	while (yield == DP_TIMER_NO_YIELD) {
2948 		for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) {
2949 			if (lmac_iter == lmac_id)
2950 				work_done = dp_monitor_process(soc,
2951 						&soc->intr_ctx[dp_intr_id],
2952 						lmac_iter, remaining_quota);
2953 			else
2954 				work_done =
2955 					dp_monitor_drop_packets_for_mac(pdev,
2956 							     lmac_iter,
2957 							     remaining_quota);
2958 			if (work_done) {
2959 				budget -=  work_done;
2960 				if (budget <= 0) {
2961 					yield = DP_TIMER_WORK_EXHAUST;
2962 					goto budget_done;
2963 				}
2964 				remaining_quota = budget;
2965 				total_work_done += work_done;
2966 			}
2967 		}
2968 
2969 		yield = dp_should_timer_irq_yield(soc, total_work_done,
2970 						  start_time);
2971 		total_work_done = 0;
2972 	}
2973 
2974 budget_done:
2975 	if (yield == DP_TIMER_WORK_EXHAUST ||
2976 	    yield == DP_TIMER_TIME_EXHAUST)
2977 		qdf_timer_mod(&soc->int_timer, 1);
2978 	else
2979 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
2980 
2981 	if (lmac_id != DP_MON_INVALID_LMAC_ID)
2982 		dp_srng_record_timer_exit(soc, dp_intr_id);
2983 }
2984 
2985 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2986 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
2987 					struct dp_intr *intr_ctx)
2988 {
2989 	if (intr_ctx->rx_mon_ring_mask)
2990 		return true;
2991 
2992 	return false;
2993 }
2994 #else
2995 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
2996 					struct dp_intr *intr_ctx)
2997 {
2998 	return false;
2999 }
3000 #endif
3001 
3002 /**
3003  * dp_soc_attach_poll() - Register handlers for DP interrupts
3004  * @txrx_soc: DP SOC handle
3005  *
3006  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
3007  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
3008  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
3009  *
3010  * Return: 0 for success, nonzero for failure.
3011  */
3012 static QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
3013 {
3014 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3015 	int i;
3016 	int lmac_id = 0;
3017 
3018 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3019 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
3020 	soc->intr_mode = DP_INTR_POLL;
3021 
3022 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3023 		soc->intr_ctx[i].dp_intr_id = i;
3024 		soc->intr_ctx[i].tx_ring_mask =
3025 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
3026 		soc->intr_ctx[i].rx_ring_mask =
3027 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
3028 		soc->intr_ctx[i].rx_mon_ring_mask =
3029 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
3030 		soc->intr_ctx[i].rx_err_ring_mask =
3031 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
3032 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
3033 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
3034 		soc->intr_ctx[i].reo_status_ring_mask =
3035 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
3036 		soc->intr_ctx[i].rxdma2host_ring_mask =
3037 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
3038 		soc->intr_ctx[i].soc = soc;
3039 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
3040 
3041 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
3042 			hif_event_history_init(soc->hif_handle, i);
3043 			soc->mon_intr_id_lmac_map[lmac_id] = i;
3044 			lmac_id++;
3045 		}
3046 	}
3047 
3048 	qdf_timer_init(soc->osdev, &soc->int_timer,
3049 			dp_interrupt_timer, (void *)soc,
3050 			QDF_TIMER_TYPE_WAKE_APPS);
3051 
3052 	return QDF_STATUS_SUCCESS;
3053 }
3054 
3055 /**
3056  * dp_soc_set_interrupt_mode() - Set the interrupt mode in soc
3057  * @soc: DP soc handle
3058  *
3059  * Set the appropriate interrupt mode flag in the soc
3060  */
3061 static void dp_soc_set_interrupt_mode(struct dp_soc *soc)
3062 {
3063 	uint32_t msi_base_data, msi_vector_start;
3064 	int msi_vector_count, ret;
3065 
3066 	soc->intr_mode = DP_INTR_INTEGRATED;
3067 
3068 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
3069 	    (dp_is_monitor_mode_using_poll(soc) &&
3070 	     soc->cdp_soc.ol_ops->get_con_mode &&
3071 	     soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) {
3072 		soc->intr_mode = DP_INTR_POLL;
3073 	} else {
3074 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
3075 						  &msi_vector_count,
3076 						  &msi_base_data,
3077 						  &msi_vector_start);
3078 		if (ret)
3079 			return;
3080 
3081 		soc->intr_mode = DP_INTR_MSI;
3082 	}
3083 }
3084 
3085 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc);
3086 #if defined(DP_INTR_POLL_BOTH)
3087 /**
3088  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
3089  * @txrx_soc: DP SOC handle
3090  *
3091  * Call the appropriate attach function based on the mode of operation.
3092  * This is a WAR for enabling monitor mode.
3093  *
3094  * Return: 0 for success. nonzero for failure.
3095  */
3096 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
3097 {
3098 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3099 
3100 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
3101 	    (dp_is_monitor_mode_using_poll(soc) &&
3102 	     soc->cdp_soc.ol_ops->get_con_mode &&
3103 	     soc->cdp_soc.ol_ops->get_con_mode() ==
3104 	     QDF_GLOBAL_MONITOR_MODE)) {
3105 		dp_info("Poll mode");
3106 		return dp_soc_attach_poll(txrx_soc);
3107 	} else {
3108 		dp_info("Interrupt  mode");
3109 		return dp_soc_interrupt_attach(txrx_soc);
3110 	}
3111 }
3112 #else
3113 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
3114 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
3115 {
3116 	return dp_soc_attach_poll(txrx_soc);
3117 }
3118 #else
3119 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
3120 {
3121 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3122 
3123 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx))
3124 		return dp_soc_attach_poll(txrx_soc);
3125 	else
3126 		return dp_soc_interrupt_attach(txrx_soc);
3127 }
3128 #endif
3129 #endif
3130 
3131 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
3132 /**
3133  * dp_soc_interrupt_map_calculate_wifi3_pci_legacy() -
3134  * Calculate interrupt map for legacy interrupts
3135  * @soc: DP soc handle
3136  * @intr_ctx_num: Interrupt context number
3137  * @irq_id_map: IRQ map
3138  * @num_irq_r: Number of interrupts assigned for this context
3139  *
3140  * Return: void
3141  */
3142 static void dp_soc_interrupt_map_calculate_wifi3_pci_legacy(struct dp_soc *soc,
3143 							    int intr_ctx_num,
3144 							    int *irq_id_map,
3145 							    int *num_irq_r)
3146 {
3147 	int j;
3148 	int num_irq = 0;
3149 	int tx_mask = wlan_cfg_get_tx_ring_mask(
3150 					soc->wlan_cfg_ctx, intr_ctx_num);
3151 	int rx_mask = wlan_cfg_get_rx_ring_mask(
3152 					soc->wlan_cfg_ctx, intr_ctx_num);
3153 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
3154 					soc->wlan_cfg_ctx, intr_ctx_num);
3155 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3156 					soc->wlan_cfg_ctx, intr_ctx_num);
3157 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3158 					soc->wlan_cfg_ctx, intr_ctx_num);
3159 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3160 					soc->wlan_cfg_ctx, intr_ctx_num);
3161 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3162 					soc->wlan_cfg_ctx, intr_ctx_num);
3163 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3164 					soc->wlan_cfg_ctx, intr_ctx_num);
3165 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3166 					soc->wlan_cfg_ctx, intr_ctx_num);
3167 	soc->intr_mode = DP_INTR_LEGACY_VIRTUAL_IRQ;
3168 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
3169 		if (tx_mask & (1 << j))
3170 			irq_id_map[num_irq++] = (wbm2sw0_release - j);
3171 		if (rx_mask & (1 << j))
3172 			irq_id_map[num_irq++] = (reo2sw1_intr - j);
3173 		if (rx_mon_mask & (1 << j))
3174 			irq_id_map[num_irq++] = (rxmon2sw_p0_dest0 - j);
3175 		if (rx_err_ring_mask & (1 << j))
3176 			irq_id_map[num_irq++] = (reo2sw0_intr - j);
3177 		if (rx_wbm_rel_ring_mask & (1 << j))
3178 			irq_id_map[num_irq++] = (wbm2sw5_release - j);
3179 		if (reo_status_ring_mask & (1 << j))
3180 			irq_id_map[num_irq++] = (reo_status - j);
3181 		if (rxdma2host_ring_mask & (1 << j))
3182 			irq_id_map[num_irq++] = (rxdma2sw_dst_ring0 - j);
3183 		if (host2rxdma_ring_mask & (1 << j))
3184 			irq_id_map[num_irq++] = (sw2rxdma_0 - j);
3185 		if (host2rxdma_mon_ring_mask & (1 << j))
3186 			irq_id_map[num_irq++] = (sw2rxmon_src_ring - j);
3187 	}
3188 	*num_irq_r = num_irq;
3189 }
3190 #else
3191 static void dp_soc_interrupt_map_calculate_wifi3_pci_legacy(struct dp_soc *soc,
3192 							    int intr_ctx_num,
3193 							    int *irq_id_map,
3194 							    int *num_irq_r)
3195 {
3196 }
3197 #endif
3198 
3199 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
3200 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
3201 {
3202 	int j;
3203 	int num_irq = 0;
3204 
3205 	int tx_mask =
3206 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3207 	int rx_mask =
3208 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3209 	int rx_mon_mask =
3210 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3211 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3212 					soc->wlan_cfg_ctx, intr_ctx_num);
3213 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3214 					soc->wlan_cfg_ctx, intr_ctx_num);
3215 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3216 					soc->wlan_cfg_ctx, intr_ctx_num);
3217 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3218 					soc->wlan_cfg_ctx, intr_ctx_num);
3219 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3220 					soc->wlan_cfg_ctx, intr_ctx_num);
3221 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3222 					soc->wlan_cfg_ctx, intr_ctx_num);
3223 	int host2txmon_ring_mask = wlan_cfg_get_host2txmon_ring_mask(
3224 					soc->wlan_cfg_ctx, intr_ctx_num);
3225 	int txmon2host_mon_ring_mask = wlan_cfg_get_tx_mon_ring_mask(
3226 					soc->wlan_cfg_ctx, intr_ctx_num);
3227 	int umac_reset_mask = wlan_cfg_get_umac_reset_intr_mask(
3228 					soc->wlan_cfg_ctx, intr_ctx_num);
3229 
3230 	soc->intr_mode = DP_INTR_INTEGRATED;
3231 
3232 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
3233 
3234 		if (tx_mask & (1 << j)) {
3235 			irq_id_map[num_irq++] =
3236 				(wbm2host_tx_completions_ring1 - j);
3237 		}
3238 
3239 		if (rx_mask & (1 << j)) {
3240 			irq_id_map[num_irq++] =
3241 				(reo2host_destination_ring1 - j);
3242 		}
3243 
3244 		if (rxdma2host_ring_mask & (1 << j)) {
3245 			irq_id_map[num_irq++] =
3246 				rxdma2host_destination_ring_mac1 - j;
3247 		}
3248 
3249 		if (host2rxdma_ring_mask & (1 << j)) {
3250 			irq_id_map[num_irq++] =
3251 				host2rxdma_host_buf_ring_mac1 -	j;
3252 		}
3253 
3254 		if (host2rxdma_mon_ring_mask & (1 << j)) {
3255 			irq_id_map[num_irq++] =
3256 				host2rxdma_monitor_ring1 - j;
3257 		}
3258 
3259 		if (rx_mon_mask & (1 << j)) {
3260 			irq_id_map[num_irq++] =
3261 				ppdu_end_interrupts_mac1 - j;
3262 			irq_id_map[num_irq++] =
3263 				rxdma2host_monitor_status_ring_mac1 - j;
3264 			irq_id_map[num_irq++] =
3265 				rxdma2host_monitor_destination_mac1 - j;
3266 		}
3267 
3268 		if (rx_wbm_rel_ring_mask & (1 << j))
3269 			irq_id_map[num_irq++] = wbm2host_rx_release;
3270 
3271 		if (rx_err_ring_mask & (1 << j))
3272 			irq_id_map[num_irq++] = reo2host_exception;
3273 
3274 		if (reo_status_ring_mask & (1 << j))
3275 			irq_id_map[num_irq++] = reo2host_status;
3276 
3277 		if (host2txmon_ring_mask & (1 << j))
3278 			irq_id_map[num_irq++] = host2tx_monitor_ring1;
3279 
3280 		if (txmon2host_mon_ring_mask & (1 << j)) {
3281 			irq_id_map[num_irq++] =
3282 				(txmon2host_monitor_destination_mac1 - j);
3283 		}
3284 
3285 		if (umac_reset_mask & (1 << j))
3286 			irq_id_map[num_irq++] = (umac_reset - j);
3287 
3288 	}
3289 	*num_irq_r = num_irq;
3290 }
3291 
3292 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
3293 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
3294 		int msi_vector_count, int msi_vector_start)
3295 {
3296 	int tx_mask = wlan_cfg_get_tx_ring_mask(
3297 					soc->wlan_cfg_ctx, intr_ctx_num);
3298 	int rx_mask = wlan_cfg_get_rx_ring_mask(
3299 					soc->wlan_cfg_ctx, intr_ctx_num);
3300 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
3301 					soc->wlan_cfg_ctx, intr_ctx_num);
3302 	int tx_mon_mask = wlan_cfg_get_tx_mon_ring_mask(
3303 					soc->wlan_cfg_ctx, intr_ctx_num);
3304 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3305 					soc->wlan_cfg_ctx, intr_ctx_num);
3306 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3307 					soc->wlan_cfg_ctx, intr_ctx_num);
3308 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3309 					soc->wlan_cfg_ctx, intr_ctx_num);
3310 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3311 					soc->wlan_cfg_ctx, intr_ctx_num);
3312 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3313 					soc->wlan_cfg_ctx, intr_ctx_num);
3314 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3315 					soc->wlan_cfg_ctx, intr_ctx_num);
3316 	int rx_near_full_grp_1_mask =
3317 		wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
3318 						     intr_ctx_num);
3319 	int rx_near_full_grp_2_mask =
3320 		wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
3321 						     intr_ctx_num);
3322 	int tx_ring_near_full_mask =
3323 		wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
3324 						    intr_ctx_num);
3325 
3326 	int host2txmon_ring_mask =
3327 		wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx,
3328 						  intr_ctx_num);
3329 	unsigned int vector =
3330 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
3331 	int num_irq = 0;
3332 
3333 	soc->intr_mode = DP_INTR_MSI;
3334 
3335 	if (tx_mask | rx_mask | rx_mon_mask | tx_mon_mask | rx_err_ring_mask |
3336 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask |
3337 	    host2rxdma_ring_mask | host2rxdma_mon_ring_mask |
3338 	    rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
3339 	    tx_ring_near_full_mask | host2txmon_ring_mask)
3340 		irq_id_map[num_irq++] =
3341 			pld_get_msi_irq(soc->osdev->dev, vector);
3342 
3343 	*num_irq_r = num_irq;
3344 }
3345 
3346 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
3347 				    int *irq_id_map, int *num_irq)
3348 {
3349 	int msi_vector_count, ret;
3350 	uint32_t msi_base_data, msi_vector_start;
3351 
3352 	if (pld_get_enable_intx(soc->osdev->dev)) {
3353 		return dp_soc_interrupt_map_calculate_wifi3_pci_legacy(soc,
3354 				intr_ctx_num, irq_id_map, num_irq);
3355 	}
3356 
3357 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
3358 					    &msi_vector_count,
3359 					    &msi_base_data,
3360 					    &msi_vector_start);
3361 	if (ret)
3362 		return dp_soc_interrupt_map_calculate_integrated(soc,
3363 				intr_ctx_num, irq_id_map, num_irq);
3364 
3365 	else
3366 		dp_soc_interrupt_map_calculate_msi(soc,
3367 				intr_ctx_num, irq_id_map, num_irq,
3368 				msi_vector_count, msi_vector_start);
3369 }
3370 
3371 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
3372 /**
3373  * dp_soc_near_full_interrupt_attach() - Register handler for DP near fill irq
3374  * @soc: DP soc handle
3375  * @num_irq: IRQ number
3376  * @irq_id_map: IRQ map
3377  * @intr_id: interrupt context ID
3378  *
3379  * Return: 0 for success. nonzero for failure.
3380  */
3381 static inline int
3382 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
3383 				  int irq_id_map[], int intr_id)
3384 {
3385 	return hif_register_ext_group(soc->hif_handle,
3386 				      num_irq, irq_id_map,
3387 				      dp_service_near_full_srngs,
3388 				      &soc->intr_ctx[intr_id], "dp_nf_intr",
3389 				      HIF_EXEC_NAPI_TYPE,
3390 				      QCA_NAPI_DEF_SCALE_BIN_SHIFT);
3391 }
3392 #else
3393 static inline int
3394 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
3395 				  int *irq_id_map, int intr_id)
3396 {
3397 	return 0;
3398 }
3399 #endif
3400 
3401 #ifdef DP_CON_MON_MSI_SKIP_SET
3402 static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
3403 {
3404 	return !!(soc->cdp_soc.ol_ops->get_con_mode() !=
3405 			QDF_GLOBAL_MONITOR_MODE);
3406 }
3407 #else
3408 static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
3409 {
3410 	return false;
3411 }
3412 #endif
3413 
3414 /**
3415  * dp_soc_ppeds_stop() - Stop PPE DS processing
3416  * @soc_handle: DP SOC handle
3417  *
3418  * Return: none
3419  */
3420 static void dp_soc_ppeds_stop(struct cdp_soc_t *soc_handle)
3421 {
3422 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
3423 
3424 	if (soc->arch_ops.txrx_soc_ppeds_stop)
3425 		soc->arch_ops.txrx_soc_ppeds_stop(soc);
3426 }
3427 
3428 void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
3429 {
3430 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3431 	int i;
3432 
3433 	if (soc->intr_mode == DP_INTR_POLL) {
3434 		qdf_timer_free(&soc->int_timer);
3435 	} else {
3436 		hif_deconfigure_ext_group_interrupts(soc->hif_handle);
3437 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
3438 		hif_deregister_exec_group(soc->hif_handle, "dp_nf_intr");
3439 	}
3440 
3441 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3442 		soc->intr_ctx[i].tx_ring_mask = 0;
3443 		soc->intr_ctx[i].rx_ring_mask = 0;
3444 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
3445 		soc->intr_ctx[i].rx_err_ring_mask = 0;
3446 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
3447 		soc->intr_ctx[i].reo_status_ring_mask = 0;
3448 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
3449 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
3450 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
3451 		soc->intr_ctx[i].rx_near_full_grp_1_mask = 0;
3452 		soc->intr_ctx[i].rx_near_full_grp_2_mask = 0;
3453 		soc->intr_ctx[i].tx_ring_near_full_mask = 0;
3454 		soc->intr_ctx[i].tx_mon_ring_mask = 0;
3455 		soc->intr_ctx[i].host2txmon_ring_mask = 0;
3456 		soc->intr_ctx[i].umac_reset_intr_mask = 0;
3457 
3458 		hif_event_history_deinit(soc->hif_handle, i);
3459 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
3460 	}
3461 
3462 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3463 		    sizeof(soc->mon_intr_id_lmac_map),
3464 		    DP_MON_INVALID_LMAC_ID);
3465 }
3466 
3467 /**
3468  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
3469  * @txrx_soc: DP SOC handle
3470  *
3471  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
3472  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
3473  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
3474  *
3475  * Return: 0 for success. nonzero for failure.
3476  */
3477 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
3478 {
3479 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3480 
3481 	int i = 0;
3482 	int num_irq = 0;
3483 	int rx_err_ring_intr_ctxt_id = HIF_MAX_GROUP;
3484 	int lmac_id = 0;
3485 	int napi_scale;
3486 
3487 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3488 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
3489 
3490 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3491 		int ret = 0;
3492 
3493 		/* Map of IRQ ids registered with one interrupt context */
3494 		int irq_id_map[HIF_MAX_GRP_IRQ];
3495 
3496 		int tx_mask =
3497 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
3498 		int rx_mask =
3499 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
3500 		int rx_mon_mask =
3501 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
3502 		int tx_mon_ring_mask =
3503 			wlan_cfg_get_tx_mon_ring_mask(soc->wlan_cfg_ctx, i);
3504 		int rx_err_ring_mask =
3505 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
3506 		int rx_wbm_rel_ring_mask =
3507 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
3508 		int reo_status_ring_mask =
3509 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
3510 		int rxdma2host_ring_mask =
3511 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
3512 		int host2rxdma_ring_mask =
3513 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
3514 		int host2rxdma_mon_ring_mask =
3515 			wlan_cfg_get_host2rxdma_mon_ring_mask(
3516 				soc->wlan_cfg_ctx, i);
3517 		int rx_near_full_grp_1_mask =
3518 			wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
3519 							     i);
3520 		int rx_near_full_grp_2_mask =
3521 			wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
3522 							     i);
3523 		int tx_ring_near_full_mask =
3524 			wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
3525 							    i);
3526 		int host2txmon_ring_mask =
3527 			wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx, i);
3528 		int umac_reset_intr_mask =
3529 			wlan_cfg_get_umac_reset_intr_mask(soc->wlan_cfg_ctx, i);
3530 
3531 		if (dp_skip_rx_mon_ring_mask_set(soc))
3532 			rx_mon_mask = 0;
3533 
3534 		soc->intr_ctx[i].dp_intr_id = i;
3535 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
3536 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
3537 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
3538 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
3539 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
3540 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
3541 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
3542 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
3543 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
3544 			 host2rxdma_mon_ring_mask;
3545 		soc->intr_ctx[i].rx_near_full_grp_1_mask =
3546 						rx_near_full_grp_1_mask;
3547 		soc->intr_ctx[i].rx_near_full_grp_2_mask =
3548 						rx_near_full_grp_2_mask;
3549 		soc->intr_ctx[i].tx_ring_near_full_mask =
3550 						tx_ring_near_full_mask;
3551 		soc->intr_ctx[i].tx_mon_ring_mask = tx_mon_ring_mask;
3552 		soc->intr_ctx[i].host2txmon_ring_mask = host2txmon_ring_mask;
3553 		soc->intr_ctx[i].umac_reset_intr_mask = umac_reset_intr_mask;
3554 
3555 		soc->intr_ctx[i].soc = soc;
3556 
3557 		num_irq = 0;
3558 
3559 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
3560 					       &num_irq);
3561 
3562 		if (rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
3563 		    tx_ring_near_full_mask) {
3564 			dp_soc_near_full_interrupt_attach(soc, num_irq,
3565 							  irq_id_map, i);
3566 		} else {
3567 			napi_scale = wlan_cfg_get_napi_scale_factor(
3568 							    soc->wlan_cfg_ctx);
3569 			if (!napi_scale)
3570 				napi_scale = QCA_NAPI_DEF_SCALE_BIN_SHIFT;
3571 
3572 			ret = hif_register_ext_group(soc->hif_handle,
3573 				num_irq, irq_id_map, dp_service_srngs,
3574 				&soc->intr_ctx[i], "dp_intr",
3575 				HIF_EXEC_NAPI_TYPE, napi_scale);
3576 		}
3577 
3578 		dp_debug(" int ctx %u num_irq %u irq_id_map %u %u",
3579 			 i, num_irq, irq_id_map[0], irq_id_map[1]);
3580 
3581 		if (ret) {
3582 			dp_init_err("%pK: failed, ret = %d", soc, ret);
3583 			dp_soc_interrupt_detach(txrx_soc);
3584 			return QDF_STATUS_E_FAILURE;
3585 		}
3586 
3587 		hif_event_history_init(soc->hif_handle, i);
3588 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
3589 
3590 		if (rx_err_ring_mask)
3591 			rx_err_ring_intr_ctxt_id = i;
3592 
3593 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
3594 			soc->mon_intr_id_lmac_map[lmac_id] = i;
3595 			lmac_id++;
3596 		}
3597 	}
3598 
3599 	hif_configure_ext_group_interrupts(soc->hif_handle);
3600 	if (rx_err_ring_intr_ctxt_id != HIF_MAX_GROUP)
3601 		hif_config_irq_clear_cpu_affinity(soc->hif_handle,
3602 						  rx_err_ring_intr_ctxt_id, 0);
3603 
3604 	return QDF_STATUS_SUCCESS;
3605 }
3606 
3607 #define AVG_MAX_MPDUS_PER_TID 128
3608 #define AVG_TIDS_PER_CLIENT 2
3609 #define AVG_FLOWS_PER_TID 2
3610 #define AVG_MSDUS_PER_FLOW 128
3611 #define AVG_MSDUS_PER_MPDU 4
3612 
3613 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
3614 {
3615 	struct qdf_mem_multi_page_t *pages;
3616 
3617 	if (mac_id != WLAN_INVALID_PDEV_ID) {
3618 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3619 	} else {
3620 		pages = &soc->link_desc_pages;
3621 	}
3622 
3623 	if (!pages) {
3624 		dp_err("can not get link desc pages");
3625 		QDF_ASSERT(0);
3626 		return;
3627 	}
3628 
3629 	if (pages->dma_pages) {
3630 		wlan_minidump_remove((void *)
3631 				     pages->dma_pages->page_v_addr_start,
3632 				     pages->num_pages * pages->page_size,
3633 				     soc->ctrl_psoc,
3634 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3635 				     "hw_link_desc_bank");
3636 		dp_desc_multi_pages_mem_free(soc, DP_HW_LINK_DESC_TYPE,
3637 					     pages, 0, false);
3638 	}
3639 }
3640 
3641 qdf_export_symbol(dp_hw_link_desc_pool_banks_free);
3642 
3643 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
3644 {
3645 	hal_soc_handle_t hal_soc = soc->hal_soc;
3646 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
3647 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
3648 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
3649 	uint32_t num_mpdus_per_link_desc = hal_num_mpdus_per_link_desc(hal_soc);
3650 	uint32_t num_msdus_per_link_desc = hal_num_msdus_per_link_desc(hal_soc);
3651 	uint32_t num_mpdu_links_per_queue_desc =
3652 		hal_num_mpdu_links_per_queue_desc(hal_soc);
3653 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3654 	uint32_t *total_link_descs, total_mem_size;
3655 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
3656 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
3657 	uint32_t num_entries;
3658 	struct qdf_mem_multi_page_t *pages;
3659 	struct dp_srng *dp_srng;
3660 	uint8_t minidump_str[MINIDUMP_STR_SIZE];
3661 
3662 	/* Only Tx queue descriptors are allocated from common link descriptor
3663 	 * pool Rx queue descriptors are not included in this because (REO queue
3664 	 * extension descriptors) they are expected to be allocated contiguously
3665 	 * with REO queue descriptors
3666 	 */
3667 	if (mac_id != WLAN_INVALID_PDEV_ID) {
3668 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3669 		/* dp_monitor_get_link_desc_pages returns NULL only
3670 		 * if monitor SOC is  NULL
3671 		 */
3672 		if (!pages) {
3673 			dp_err("can not get link desc pages");
3674 			QDF_ASSERT(0);
3675 			return QDF_STATUS_E_FAULT;
3676 		}
3677 		dp_srng = &soc->rxdma_mon_desc_ring[mac_id];
3678 		num_entries = dp_srng->alloc_size /
3679 			hal_srng_get_entrysize(soc->hal_soc,
3680 					       RXDMA_MONITOR_DESC);
3681 		total_link_descs = dp_monitor_get_total_link_descs(soc, mac_id);
3682 		qdf_str_lcopy(minidump_str, "mon_link_desc_bank",
3683 			      MINIDUMP_STR_SIZE);
3684 	} else {
3685 		num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3686 			AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
3687 
3688 		num_mpdu_queue_descs = num_mpdu_link_descs /
3689 			num_mpdu_links_per_queue_desc;
3690 
3691 		num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3692 			AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
3693 			num_msdus_per_link_desc;
3694 
3695 		num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3696 			AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
3697 
3698 		num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
3699 			num_tx_msdu_link_descs + num_rx_msdu_link_descs;
3700 
3701 		pages = &soc->link_desc_pages;
3702 		total_link_descs = &soc->total_link_descs;
3703 		qdf_str_lcopy(minidump_str, "link_desc_bank",
3704 			      MINIDUMP_STR_SIZE);
3705 	}
3706 
3707 	/* If link descriptor banks are allocated, return from here */
3708 	if (pages->num_pages)
3709 		return QDF_STATUS_SUCCESS;
3710 
3711 	/* Round up to power of 2 */
3712 	*total_link_descs = 1;
3713 	while (*total_link_descs < num_entries)
3714 		*total_link_descs <<= 1;
3715 
3716 	dp_init_info("%pK: total_link_descs: %u, link_desc_size: %d",
3717 		     soc, *total_link_descs, link_desc_size);
3718 	total_mem_size =  *total_link_descs * link_desc_size;
3719 	total_mem_size += link_desc_align;
3720 
3721 	dp_init_info("%pK: total_mem_size: %d",
3722 		     soc, total_mem_size);
3723 
3724 	dp_set_max_page_size(pages, max_alloc_size);
3725 	dp_desc_multi_pages_mem_alloc(soc, DP_HW_LINK_DESC_TYPE,
3726 				      pages,
3727 				      link_desc_size,
3728 				      *total_link_descs,
3729 				      0, false);
3730 	if (!pages->num_pages) {
3731 		dp_err("Multi page alloc fail for hw link desc pool");
3732 		return QDF_STATUS_E_FAULT;
3733 	}
3734 
3735 	wlan_minidump_log(pages->dma_pages->page_v_addr_start,
3736 			  pages->num_pages * pages->page_size,
3737 			  soc->ctrl_psoc,
3738 			  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3739 			  "hw_link_desc_bank");
3740 
3741 	return QDF_STATUS_SUCCESS;
3742 }
3743 
3744 /**
3745  * dp_hw_link_desc_ring_free() - Free h/w link desc rings
3746  * @soc: DP SOC handle
3747  *
3748  * Return: none
3749  */
3750 static void dp_hw_link_desc_ring_free(struct dp_soc *soc)
3751 {
3752 	uint32_t i;
3753 	uint32_t size = soc->wbm_idle_scatter_buf_size;
3754 	void *vaddr = soc->wbm_idle_link_ring.base_vaddr_unaligned;
3755 	qdf_dma_addr_t paddr;
3756 
3757 	if (soc->wbm_idle_scatter_buf_base_vaddr[0]) {
3758 		for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3759 			vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3760 			paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3761 			if (vaddr) {
3762 				qdf_mem_free_consistent(soc->osdev,
3763 							soc->osdev->dev,
3764 							size,
3765 							vaddr,
3766 							paddr,
3767 							0);
3768 				vaddr = NULL;
3769 			}
3770 		}
3771 	} else {
3772 		wlan_minidump_remove(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3773 				     soc->wbm_idle_link_ring.alloc_size,
3774 				     soc->ctrl_psoc,
3775 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3776 				     "wbm_idle_link_ring");
3777 		dp_srng_free(soc, &soc->wbm_idle_link_ring);
3778 	}
3779 }
3780 
3781 /**
3782  * dp_hw_link_desc_ring_alloc() - Allocate hw link desc rings
3783  * @soc: DP SOC handle
3784  *
3785  * Allocate memory for WBM_IDLE_LINK srng ring if the number of
3786  * link descriptors is less then the max_allocated size. else
3787  * allocate memory for wbm_idle_scatter_buffer.
3788  *
3789  * Return: QDF_STATUS_SUCCESS: success
3790  *         QDF_STATUS_E_NO_MEM: No memory (Failure)
3791  */
3792 static QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc)
3793 {
3794 	uint32_t entry_size, i;
3795 	uint32_t total_mem_size;
3796 	qdf_dma_addr_t *baseaddr = NULL;
3797 	struct dp_srng *dp_srng;
3798 	uint32_t ring_type;
3799 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3800 	uint32_t tlds;
3801 
3802 	ring_type = WBM_IDLE_LINK;
3803 	dp_srng = &soc->wbm_idle_link_ring;
3804 	tlds = soc->total_link_descs;
3805 
3806 	entry_size = hal_srng_get_entrysize(soc->hal_soc, ring_type);
3807 	total_mem_size = entry_size * tlds;
3808 
3809 	if (total_mem_size <= max_alloc_size) {
3810 		if (dp_srng_alloc(soc, dp_srng, ring_type, tlds, 0)) {
3811 			dp_init_err("%pK: Link desc idle ring setup failed",
3812 				    soc);
3813 			goto fail;
3814 		}
3815 
3816 		wlan_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3817 				  soc->wbm_idle_link_ring.alloc_size,
3818 				  soc->ctrl_psoc,
3819 				  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3820 				  "wbm_idle_link_ring");
3821 	} else {
3822 		uint32_t num_scatter_bufs;
3823 		uint32_t buf_size = 0;
3824 
3825 		soc->wbm_idle_scatter_buf_size =
3826 			hal_idle_list_scatter_buf_size(soc->hal_soc);
3827 		hal_idle_scatter_buf_num_entries(
3828 					soc->hal_soc,
3829 					soc->wbm_idle_scatter_buf_size);
3830 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
3831 					soc->hal_soc, total_mem_size,
3832 					soc->wbm_idle_scatter_buf_size);
3833 
3834 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
3835 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3836 				  FL("scatter bufs size out of bounds"));
3837 			goto fail;
3838 		}
3839 
3840 		for (i = 0; i < num_scatter_bufs; i++) {
3841 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
3842 			buf_size = soc->wbm_idle_scatter_buf_size;
3843 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
3844 				qdf_mem_alloc_consistent(soc->osdev,
3845 							 soc->osdev->dev,
3846 							 buf_size,
3847 							 baseaddr);
3848 
3849 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
3850 				QDF_TRACE(QDF_MODULE_ID_DP,
3851 					  QDF_TRACE_LEVEL_ERROR,
3852 					  FL("Scatter lst memory alloc fail"));
3853 				goto fail;
3854 			}
3855 		}
3856 		soc->num_scatter_bufs = num_scatter_bufs;
3857 	}
3858 	return QDF_STATUS_SUCCESS;
3859 
3860 fail:
3861 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3862 		void *vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3863 		qdf_dma_addr_t paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3864 
3865 		if (vaddr) {
3866 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
3867 						soc->wbm_idle_scatter_buf_size,
3868 						vaddr,
3869 						paddr, 0);
3870 			vaddr = NULL;
3871 		}
3872 	}
3873 	return QDF_STATUS_E_NOMEM;
3874 }
3875 
3876 qdf_export_symbol(dp_hw_link_desc_pool_banks_alloc);
3877 
3878 /**
3879  * dp_hw_link_desc_ring_init() - Initialize hw link desc rings
3880  * @soc: DP SOC handle
3881  *
3882  * Return: QDF_STATUS_SUCCESS: success
3883  *         QDF_STATUS_E_FAILURE: failure
3884  */
3885 static QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc)
3886 {
3887 	struct dp_srng *dp_srng = &soc->wbm_idle_link_ring;
3888 
3889 	if (dp_srng->base_vaddr_unaligned) {
3890 		if (dp_srng_init(soc, dp_srng, WBM_IDLE_LINK, 0, 0))
3891 			return QDF_STATUS_E_FAILURE;
3892 	}
3893 	return QDF_STATUS_SUCCESS;
3894 }
3895 
3896 /**
3897  * dp_hw_link_desc_ring_deinit() - Reset hw link desc rings
3898  * @soc: DP SOC handle
3899  *
3900  * Return: None
3901  */
3902 static void dp_hw_link_desc_ring_deinit(struct dp_soc *soc)
3903 {
3904 	dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0);
3905 }
3906 
3907 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id)
3908 {
3909 	uint32_t cookie = 0;
3910 	uint32_t page_idx = 0;
3911 	struct qdf_mem_multi_page_t *pages;
3912 	struct qdf_mem_dma_page_t *dma_pages;
3913 	uint32_t offset = 0;
3914 	uint32_t count = 0;
3915 	uint32_t desc_id = 0;
3916 	void *desc_srng;
3917 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
3918 	uint32_t *total_link_descs_addr;
3919 	uint32_t total_link_descs;
3920 	uint32_t scatter_buf_num;
3921 	uint32_t num_entries_per_buf = 0;
3922 	uint32_t rem_entries;
3923 	uint32_t num_descs_per_page;
3924 	uint32_t num_scatter_bufs = 0;
3925 	uint8_t *scatter_buf_ptr;
3926 	void *desc;
3927 
3928 	num_scatter_bufs = soc->num_scatter_bufs;
3929 
3930 	if (mac_id == WLAN_INVALID_PDEV_ID) {
3931 		pages = &soc->link_desc_pages;
3932 		total_link_descs = soc->total_link_descs;
3933 		desc_srng = soc->wbm_idle_link_ring.hal_srng;
3934 	} else {
3935 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3936 		/* dp_monitor_get_link_desc_pages returns NULL only
3937 		 * if monitor SOC is  NULL
3938 		 */
3939 		if (!pages) {
3940 			dp_err("can not get link desc pages");
3941 			QDF_ASSERT(0);
3942 			return;
3943 		}
3944 		total_link_descs_addr =
3945 				dp_monitor_get_total_link_descs(soc, mac_id);
3946 		total_link_descs = *total_link_descs_addr;
3947 		desc_srng = soc->rxdma_mon_desc_ring[mac_id].hal_srng;
3948 	}
3949 
3950 	dma_pages = pages->dma_pages;
3951 	do {
3952 		qdf_mem_zero(dma_pages[page_idx].page_v_addr_start,
3953 			     pages->page_size);
3954 		page_idx++;
3955 	} while (page_idx < pages->num_pages);
3956 
3957 	if (desc_srng) {
3958 		hal_srng_access_start_unlocked(soc->hal_soc, desc_srng);
3959 		page_idx = 0;
3960 		count = 0;
3961 		offset = 0;
3962 		pages = &soc->link_desc_pages;
3963 		while ((desc = hal_srng_src_get_next(soc->hal_soc,
3964 						     desc_srng)) &&
3965 			(count < total_link_descs)) {
3966 			page_idx = count / pages->num_element_per_page;
3967 			if (desc_id == pages->num_element_per_page)
3968 				desc_id = 0;
3969 
3970 			offset = count % pages->num_element_per_page;
3971 			cookie = LINK_DESC_COOKIE(desc_id, page_idx,
3972 						  soc->link_desc_id_start);
3973 
3974 			hal_set_link_desc_addr(soc->hal_soc, desc, cookie,
3975 					       dma_pages[page_idx].page_p_addr
3976 					       + (offset * link_desc_size),
3977 					       soc->idle_link_bm_id);
3978 			count++;
3979 			desc_id++;
3980 		}
3981 		hal_srng_access_end_unlocked(soc->hal_soc, desc_srng);
3982 	} else {
3983 		/* Populate idle list scatter buffers with link descriptor
3984 		 * pointers
3985 		 */
3986 		scatter_buf_num = 0;
3987 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
3988 					soc->hal_soc,
3989 					soc->wbm_idle_scatter_buf_size);
3990 
3991 		scatter_buf_ptr = (uint8_t *)(
3992 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
3993 		rem_entries = num_entries_per_buf;
3994 		pages = &soc->link_desc_pages;
3995 		page_idx = 0; count = 0;
3996 		offset = 0;
3997 		num_descs_per_page = pages->num_element_per_page;
3998 
3999 		while (count < total_link_descs) {
4000 			page_idx = count / num_descs_per_page;
4001 			offset = count % num_descs_per_page;
4002 			if (desc_id == pages->num_element_per_page)
4003 				desc_id = 0;
4004 
4005 			cookie = LINK_DESC_COOKIE(desc_id, page_idx,
4006 						  soc->link_desc_id_start);
4007 			hal_set_link_desc_addr(soc->hal_soc,
4008 					       (void *)scatter_buf_ptr,
4009 					       cookie,
4010 					       dma_pages[page_idx].page_p_addr +
4011 					       (offset * link_desc_size),
4012 					       soc->idle_link_bm_id);
4013 			rem_entries--;
4014 			if (rem_entries) {
4015 				scatter_buf_ptr += link_desc_size;
4016 			} else {
4017 				rem_entries = num_entries_per_buf;
4018 				scatter_buf_num++;
4019 				if (scatter_buf_num >= num_scatter_bufs)
4020 					break;
4021 				scatter_buf_ptr = (uint8_t *)
4022 					(soc->wbm_idle_scatter_buf_base_vaddr[
4023 					 scatter_buf_num]);
4024 			}
4025 			count++;
4026 			desc_id++;
4027 		}
4028 		/* Setup link descriptor idle list in HW */
4029 		hal_setup_link_idle_list(soc->hal_soc,
4030 			soc->wbm_idle_scatter_buf_base_paddr,
4031 			soc->wbm_idle_scatter_buf_base_vaddr,
4032 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
4033 			(uint32_t)(scatter_buf_ptr -
4034 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
4035 			scatter_buf_num-1])), total_link_descs);
4036 	}
4037 }
4038 
4039 qdf_export_symbol(dp_link_desc_ring_replenish);
4040 
4041 #ifdef IPA_OFFLOAD
4042 #define USE_1_IPA_RX_REO_RING 1
4043 #define USE_2_IPA_RX_REO_RINGS 2
4044 #define REO_DST_RING_SIZE_QCA6290 1023
4045 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
4046 #define REO_DST_RING_SIZE_QCA8074 1023
4047 #define REO_DST_RING_SIZE_QCN9000 2048
4048 #else
4049 #define REO_DST_RING_SIZE_QCA8074 8
4050 #define REO_DST_RING_SIZE_QCN9000 8
4051 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
4052 
4053 #ifdef IPA_WDI3_TX_TWO_PIPES
4054 #ifdef DP_MEMORY_OPT
4055 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4056 {
4057 	return dp_init_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4058 }
4059 
4060 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4061 {
4062 	dp_deinit_tx_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4063 }
4064 
4065 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4066 {
4067 	return dp_alloc_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4068 }
4069 
4070 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4071 {
4072 	dp_free_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4073 }
4074 
4075 #else /* !DP_MEMORY_OPT */
4076 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4077 {
4078 	return 0;
4079 }
4080 
4081 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4082 {
4083 }
4084 
4085 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4086 {
4087 	return 0
4088 }
4089 
4090 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4091 {
4092 }
4093 #endif /* DP_MEMORY_OPT */
4094 
4095 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
4096 {
4097 	hal_tx_init_data_ring(soc->hal_soc,
4098 			      soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng);
4099 }
4100 
4101 #else /* !IPA_WDI3_TX_TWO_PIPES */
4102 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4103 {
4104 	return 0;
4105 }
4106 
4107 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4108 {
4109 }
4110 
4111 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4112 {
4113 	return 0;
4114 }
4115 
4116 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4117 {
4118 }
4119 
4120 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
4121 {
4122 }
4123 
4124 #endif /* IPA_WDI3_TX_TWO_PIPES */
4125 
4126 #else
4127 
4128 #define REO_DST_RING_SIZE_QCA6290 1024
4129 
4130 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4131 {
4132 	return 0;
4133 }
4134 
4135 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4136 {
4137 }
4138 
4139 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4140 {
4141 	return 0;
4142 }
4143 
4144 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4145 {
4146 }
4147 
4148 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
4149 {
4150 }
4151 
4152 #endif /* IPA_OFFLOAD */
4153 
4154 /**
4155  * dp_soc_reset_cpu_ring_map() - Reset cpu ring map
4156  * @soc: Datapath soc handler
4157  *
4158  * This api resets the default cpu ring map
4159  */
4160 
4161 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
4162 {
4163 	uint8_t i;
4164 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4165 
4166 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
4167 		switch (nss_config) {
4168 		case dp_nss_cfg_first_radio:
4169 			/*
4170 			 * Setting Tx ring map for one nss offloaded radio
4171 			 */
4172 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
4173 			break;
4174 
4175 		case dp_nss_cfg_second_radio:
4176 			/*
4177 			 * Setting Tx ring for two nss offloaded radios
4178 			 */
4179 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
4180 			break;
4181 
4182 		case dp_nss_cfg_dbdc:
4183 			/*
4184 			 * Setting Tx ring map for 2 nss offloaded radios
4185 			 */
4186 			soc->tx_ring_map[i] =
4187 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
4188 			break;
4189 
4190 		case dp_nss_cfg_dbtc:
4191 			/*
4192 			 * Setting Tx ring map for 3 nss offloaded radios
4193 			 */
4194 			soc->tx_ring_map[i] =
4195 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
4196 			break;
4197 
4198 		default:
4199 			dp_err("tx_ring_map failed due to invalid nss cfg");
4200 			break;
4201 		}
4202 	}
4203 }
4204 
4205 /**
4206  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
4207  * @soc: DP soc handle
4208  * @ring_type: ring type
4209  * @ring_num: ring_num
4210  *
4211  * Return: 0 if the ring is not offloaded, non-0 if it is offloaded
4212  */
4213 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
4214 					    enum hal_ring_type ring_type, int ring_num)
4215 {
4216 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4217 	uint8_t status = 0;
4218 
4219 	switch (ring_type) {
4220 	case WBM2SW_RELEASE:
4221 	case REO_DST:
4222 	case RXDMA_BUF:
4223 	case REO_EXCEPTION:
4224 		status = ((nss_config) & (1 << ring_num));
4225 		break;
4226 	default:
4227 		break;
4228 	}
4229 
4230 	return status;
4231 }
4232 
4233 /**
4234  * dp_soc_disable_unused_mac_intr_mask() - reset interrupt mask for
4235  *					  unused WMAC hw rings
4236  * @soc: DP Soc handle
4237  * @mac_num: wmac num
4238  *
4239  * Return: Return void
4240  */
4241 static void dp_soc_disable_unused_mac_intr_mask(struct dp_soc *soc,
4242 						int mac_num)
4243 {
4244 	uint8_t *grp_mask = NULL;
4245 	int group_number;
4246 
4247 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
4248 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4249 	wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4250 					  group_number, 0x0);
4251 
4252 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
4253 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4254 	wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
4255 				      group_number, 0x0);
4256 
4257 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
4258 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4259 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
4260 					  group_number, 0x0);
4261 
4262 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
4263 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4264 	wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
4265 					      group_number, 0x0);
4266 }
4267 
4268 #ifdef IPA_OFFLOAD
4269 #ifdef IPA_WDI3_VLAN_SUPPORT
4270 /**
4271  * dp_soc_reset_ipa_vlan_intr_mask() - reset interrupt mask for IPA offloaded
4272  *                                     ring for vlan tagged traffic
4273  * @soc: DP Soc handle
4274  *
4275  * Return: Return void
4276  */
4277 static void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
4278 {
4279 	uint8_t *grp_mask = NULL;
4280 	int group_number, mask;
4281 
4282 	if (!wlan_ipa_is_vlan_enabled())
4283 		return;
4284 
4285 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
4286 
4287 	group_number = dp_srng_find_ring_in_mask(IPA_ALT_REO_DEST_RING_IDX, grp_mask);
4288 	if (group_number < 0) {
4289 		dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4290 			      soc, REO_DST, IPA_ALT_REO_DEST_RING_IDX);
4291 		return;
4292 	}
4293 
4294 	mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
4295 
4296 	/* reset the interrupt mask for offloaded ring */
4297 	mask &= (~(1 << IPA_ALT_REO_DEST_RING_IDX));
4298 
4299 	/*
4300 	 * set the interrupt mask to zero for rx offloaded radio.
4301 	 */
4302 	wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4303 }
4304 #else
4305 static inline
4306 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
4307 { }
4308 #endif /* IPA_WDI3_VLAN_SUPPORT */
4309 #else
4310 static inline
4311 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
4312 { }
4313 #endif /* IPA_OFFLOAD */
4314 
4315 /**
4316  * dp_soc_reset_intr_mask() - reset interrupt mask
4317  * @soc: DP Soc handle
4318  *
4319  * Return: Return void
4320  */
4321 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
4322 {
4323 	uint8_t j;
4324 	uint8_t *grp_mask = NULL;
4325 	int group_number, mask, num_ring;
4326 
4327 	/* number of tx ring */
4328 	num_ring = soc->num_tcl_data_rings;
4329 
4330 	/*
4331 	 * group mask for tx completion  ring.
4332 	 */
4333 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
4334 
4335 	/* loop and reset the mask for only offloaded ring */
4336 	for (j = 0; j < WLAN_CFG_NUM_TCL_DATA_RINGS; j++) {
4337 		/*
4338 		 * Group number corresponding to tx offloaded ring.
4339 		 */
4340 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4341 		if (group_number < 0) {
4342 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4343 				      soc, WBM2SW_RELEASE, j);
4344 			continue;
4345 		}
4346 
4347 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
4348 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j) &&
4349 		    (!mask)) {
4350 			continue;
4351 		}
4352 
4353 		/* reset the tx mask for offloaded ring */
4354 		mask &= (~(1 << j));
4355 
4356 		/*
4357 		 * reset the interrupt mask for offloaded ring.
4358 		 */
4359 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4360 	}
4361 
4362 	/* number of rx rings */
4363 	num_ring = soc->num_reo_dest_rings;
4364 
4365 	/*
4366 	 * group mask for reo destination ring.
4367 	 */
4368 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
4369 
4370 	/* loop and reset the mask for only offloaded ring */
4371 	for (j = 0; j < WLAN_CFG_NUM_REO_DEST_RING; j++) {
4372 		/*
4373 		 * Group number corresponding to rx offloaded ring.
4374 		 */
4375 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4376 		if (group_number < 0) {
4377 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4378 				      soc, REO_DST, j);
4379 			continue;
4380 		}
4381 
4382 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
4383 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j) &&
4384 		    (!mask)) {
4385 			continue;
4386 		}
4387 
4388 		/* reset the interrupt mask for offloaded ring */
4389 		mask &= (~(1 << j));
4390 
4391 		/*
4392 		 * set the interrupt mask to zero for rx offloaded radio.
4393 		 */
4394 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4395 	}
4396 
4397 	/*
4398 	 * group mask for Rx buffer refill ring
4399 	 */
4400 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
4401 
4402 	/* loop and reset the mask for only offloaded ring */
4403 	for (j = 0; j < MAX_PDEV_CNT; j++) {
4404 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
4405 
4406 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
4407 			continue;
4408 		}
4409 
4410 		/*
4411 		 * Group number corresponding to rx offloaded ring.
4412 		 */
4413 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
4414 		if (group_number < 0) {
4415 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4416 				      soc, REO_DST, lmac_id);
4417 			continue;
4418 		}
4419 
4420 		/* set the interrupt mask for offloaded ring */
4421 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4422 				group_number);
4423 		mask &= (~(1 << lmac_id));
4424 
4425 		/*
4426 		 * set the interrupt mask to zero for rx offloaded radio.
4427 		 */
4428 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4429 			group_number, mask);
4430 	}
4431 
4432 	grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
4433 
4434 	for (j = 0; j < num_ring; j++) {
4435 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_EXCEPTION, j)) {
4436 			continue;
4437 		}
4438 
4439 		/*
4440 		 * Group number corresponding to rx err ring.
4441 		 */
4442 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4443 		if (group_number < 0) {
4444 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4445 				      soc, REO_EXCEPTION, j);
4446 			continue;
4447 		}
4448 
4449 		wlan_cfg_set_rx_err_ring_mask(soc->wlan_cfg_ctx,
4450 					      group_number, 0);
4451 	}
4452 }
4453 
4454 #ifdef IPA_OFFLOAD
4455 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
4456 			 uint32_t *remap1, uint32_t *remap2)
4457 {
4458 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX] = {
4459 				REO_REMAP_SW1, REO_REMAP_SW2, REO_REMAP_SW3,
4460 				REO_REMAP_SW5, REO_REMAP_SW6, REO_REMAP_SW7};
4461 
4462 	switch (soc->arch_id) {
4463 	case CDP_ARCH_TYPE_BE:
4464 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4465 					      soc->num_reo_dest_rings -
4466 					      USE_2_IPA_RX_REO_RINGS, remap1,
4467 					      remap2);
4468 		break;
4469 
4470 	case CDP_ARCH_TYPE_LI:
4471 		if (wlan_ipa_is_vlan_enabled()) {
4472 			hal_compute_reo_remap_ix2_ix3(
4473 					soc->hal_soc, ring,
4474 					soc->num_reo_dest_rings -
4475 					USE_2_IPA_RX_REO_RINGS, remap1,
4476 					remap2);
4477 
4478 		} else {
4479 			hal_compute_reo_remap_ix2_ix3(
4480 					soc->hal_soc, ring,
4481 					soc->num_reo_dest_rings -
4482 					USE_1_IPA_RX_REO_RING, remap1,
4483 					remap2);
4484 		}
4485 
4486 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
4487 		break;
4488 	default:
4489 		dp_err("unknown arch_id 0x%x", soc->arch_id);
4490 		QDF_BUG(0);
4491 
4492 	}
4493 
4494 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
4495 
4496 	return true;
4497 }
4498 
4499 #ifdef IPA_WDI3_TX_TWO_PIPES
4500 static bool dp_ipa_is_alt_tx_ring(int index)
4501 {
4502 	return index == IPA_TX_ALT_RING_IDX;
4503 }
4504 
4505 static bool dp_ipa_is_alt_tx_comp_ring(int index)
4506 {
4507 	return index == IPA_TX_ALT_COMP_RING_IDX;
4508 }
4509 #else /* !IPA_WDI3_TX_TWO_PIPES */
4510 static bool dp_ipa_is_alt_tx_ring(int index)
4511 {
4512 	return false;
4513 }
4514 
4515 static bool dp_ipa_is_alt_tx_comp_ring(int index)
4516 {
4517 	return false;
4518 }
4519 #endif /* IPA_WDI3_TX_TWO_PIPES */
4520 
4521 /**
4522  * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA
4523  *
4524  * @tx_ring_num: Tx ring number
4525  * @tx_ipa_ring_sz: Return param only updated for IPA.
4526  * @soc_cfg_ctx: dp soc cfg context
4527  *
4528  * Return: None
4529  */
4530 static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz,
4531 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4532 {
4533 	if (!soc_cfg_ctx->ipa_enabled)
4534 		return;
4535 
4536 	if (tx_ring_num == IPA_TCL_DATA_RING_IDX)
4537 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_ring_size(soc_cfg_ctx);
4538 	else if (dp_ipa_is_alt_tx_ring(tx_ring_num))
4539 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_alt_ring_size(soc_cfg_ctx);
4540 }
4541 
4542 /**
4543  * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA
4544  *
4545  * @tx_comp_ring_num: Tx comp ring number
4546  * @tx_comp_ipa_ring_sz: Return param only updated for IPA.
4547  * @soc_cfg_ctx: dp soc cfg context
4548  *
4549  * Return: None
4550  */
4551 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
4552 					 int *tx_comp_ipa_ring_sz,
4553 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4554 {
4555 	if (!soc_cfg_ctx->ipa_enabled)
4556 		return;
4557 
4558 	if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX)
4559 		*tx_comp_ipa_ring_sz =
4560 				wlan_cfg_ipa_tx_comp_ring_size(soc_cfg_ctx);
4561 	else if (dp_ipa_is_alt_tx_comp_ring(tx_comp_ring_num))
4562 		*tx_comp_ipa_ring_sz =
4563 				wlan_cfg_ipa_tx_alt_comp_ring_size(soc_cfg_ctx);
4564 }
4565 #else
4566 static uint8_t dp_reo_ring_selection(uint32_t value, uint32_t *ring)
4567 {
4568 	uint8_t num = 0;
4569 
4570 	switch (value) {
4571 	/* should we have all the different possible ring configs */
4572 	case 0xFF:
4573 		num = 8;
4574 		ring[0] = REO_REMAP_SW1;
4575 		ring[1] = REO_REMAP_SW2;
4576 		ring[2] = REO_REMAP_SW3;
4577 		ring[3] = REO_REMAP_SW4;
4578 		ring[4] = REO_REMAP_SW5;
4579 		ring[5] = REO_REMAP_SW6;
4580 		ring[6] = REO_REMAP_SW7;
4581 		ring[7] = REO_REMAP_SW8;
4582 		break;
4583 
4584 	case 0x3F:
4585 		num = 6;
4586 		ring[0] = REO_REMAP_SW1;
4587 		ring[1] = REO_REMAP_SW2;
4588 		ring[2] = REO_REMAP_SW3;
4589 		ring[3] = REO_REMAP_SW4;
4590 		ring[4] = REO_REMAP_SW5;
4591 		ring[5] = REO_REMAP_SW6;
4592 		break;
4593 
4594 	case 0xF:
4595 		num = 4;
4596 		ring[0] = REO_REMAP_SW1;
4597 		ring[1] = REO_REMAP_SW2;
4598 		ring[2] = REO_REMAP_SW3;
4599 		ring[3] = REO_REMAP_SW4;
4600 		break;
4601 	case 0xE:
4602 		num = 3;
4603 		ring[0] = REO_REMAP_SW2;
4604 		ring[1] = REO_REMAP_SW3;
4605 		ring[2] = REO_REMAP_SW4;
4606 		break;
4607 	case 0xD:
4608 		num = 3;
4609 		ring[0] = REO_REMAP_SW1;
4610 		ring[1] = REO_REMAP_SW3;
4611 		ring[2] = REO_REMAP_SW4;
4612 		break;
4613 	case 0xC:
4614 		num = 2;
4615 		ring[0] = REO_REMAP_SW3;
4616 		ring[1] = REO_REMAP_SW4;
4617 		break;
4618 	case 0xB:
4619 		num = 3;
4620 		ring[0] = REO_REMAP_SW1;
4621 		ring[1] = REO_REMAP_SW2;
4622 		ring[2] = REO_REMAP_SW4;
4623 		break;
4624 	case 0xA:
4625 		num = 2;
4626 		ring[0] = REO_REMAP_SW2;
4627 		ring[1] = REO_REMAP_SW4;
4628 		break;
4629 	case 0x9:
4630 		num = 2;
4631 		ring[0] = REO_REMAP_SW1;
4632 		ring[1] = REO_REMAP_SW4;
4633 		break;
4634 	case 0x8:
4635 		num = 1;
4636 		ring[0] = REO_REMAP_SW4;
4637 		break;
4638 	case 0x7:
4639 		num = 3;
4640 		ring[0] = REO_REMAP_SW1;
4641 		ring[1] = REO_REMAP_SW2;
4642 		ring[2] = REO_REMAP_SW3;
4643 		break;
4644 	case 0x6:
4645 		num = 2;
4646 		ring[0] = REO_REMAP_SW2;
4647 		ring[1] = REO_REMAP_SW3;
4648 		break;
4649 	case 0x5:
4650 		num = 2;
4651 		ring[0] = REO_REMAP_SW1;
4652 		ring[1] = REO_REMAP_SW3;
4653 		break;
4654 	case 0x4:
4655 		num = 1;
4656 		ring[0] = REO_REMAP_SW3;
4657 		break;
4658 	case 0x3:
4659 		num = 2;
4660 		ring[0] = REO_REMAP_SW1;
4661 		ring[1] = REO_REMAP_SW2;
4662 		break;
4663 	case 0x2:
4664 		num = 1;
4665 		ring[0] = REO_REMAP_SW2;
4666 		break;
4667 	case 0x1:
4668 		num = 1;
4669 		ring[0] = REO_REMAP_SW1;
4670 		break;
4671 	default:
4672 		dp_err("unknown reo ring map 0x%x", value);
4673 		QDF_BUG(0);
4674 	}
4675 	return num;
4676 }
4677 
4678 bool dp_reo_remap_config(struct dp_soc *soc,
4679 			 uint32_t *remap0,
4680 			 uint32_t *remap1,
4681 			 uint32_t *remap2)
4682 {
4683 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4684 	uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
4685 	uint8_t num;
4686 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX];
4687 	uint32_t value;
4688 
4689 	switch (offload_radio) {
4690 	case dp_nss_cfg_default:
4691 		value = reo_config & WLAN_CFG_NUM_REO_RINGS_MAP_MAX;
4692 		num = dp_reo_ring_selection(value, ring);
4693 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4694 					      num, remap1, remap2);
4695 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
4696 
4697 		break;
4698 	case dp_nss_cfg_first_radio:
4699 		value = reo_config & 0xE;
4700 		num = dp_reo_ring_selection(value, ring);
4701 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4702 					      num, remap1, remap2);
4703 
4704 		break;
4705 	case dp_nss_cfg_second_radio:
4706 		value = reo_config & 0xD;
4707 		num = dp_reo_ring_selection(value, ring);
4708 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4709 					      num, remap1, remap2);
4710 
4711 		break;
4712 	case dp_nss_cfg_dbdc:
4713 	case dp_nss_cfg_dbtc:
4714 		/* return false if both or all are offloaded to NSS */
4715 		return false;
4716 
4717 	}
4718 
4719 	dp_debug("remap1 %x remap2 %x offload_radio %u",
4720 		 *remap1, *remap2, offload_radio);
4721 	return true;
4722 }
4723 
4724 static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz,
4725 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4726 {
4727 }
4728 
4729 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
4730 					 int *tx_comp_ipa_ring_sz,
4731 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4732 {
4733 }
4734 #endif /* IPA_OFFLOAD */
4735 
4736 /**
4737  * dp_reo_frag_dst_set() - configure reo register to set the
4738  *                        fragment destination ring
4739  * @soc: Datapath soc
4740  * @frag_dst_ring: output parameter to set fragment destination ring
4741  *
4742  * Based on offload_radio below fragment destination rings is selected
4743  * 0 - TCL
4744  * 1 - SW1
4745  * 2 - SW2
4746  * 3 - SW3
4747  * 4 - SW4
4748  * 5 - Release
4749  * 6 - FW
4750  * 7 - alternate select
4751  *
4752  * Return: void
4753  */
4754 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
4755 {
4756 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4757 
4758 	switch (offload_radio) {
4759 	case dp_nss_cfg_default:
4760 		*frag_dst_ring = REO_REMAP_TCL;
4761 		break;
4762 	case dp_nss_cfg_first_radio:
4763 		/*
4764 		 * This configuration is valid for single band radio which
4765 		 * is also NSS offload.
4766 		 */
4767 	case dp_nss_cfg_dbdc:
4768 	case dp_nss_cfg_dbtc:
4769 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
4770 		break;
4771 	default:
4772 		dp_init_err("%pK: dp_reo_frag_dst_set invalid offload radio config", soc);
4773 		break;
4774 	}
4775 }
4776 
4777 #ifdef ENABLE_VERBOSE_DEBUG
4778 static void dp_enable_verbose_debug(struct dp_soc *soc)
4779 {
4780 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4781 
4782 	soc_cfg_ctx = soc->wlan_cfg_ctx;
4783 
4784 	if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask)
4785 		is_dp_verbose_debug_enabled = true;
4786 
4787 	if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask)
4788 		hal_set_verbose_debug(true);
4789 	else
4790 		hal_set_verbose_debug(false);
4791 }
4792 #else
4793 static void dp_enable_verbose_debug(struct dp_soc *soc)
4794 {
4795 }
4796 #endif
4797 
4798 #ifdef WLAN_FEATURE_STATS_EXT
4799 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
4800 {
4801 	qdf_event_create(&soc->rx_hw_stats_event);
4802 }
4803 #else
4804 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
4805 {
4806 }
4807 #endif
4808 
4809 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index)
4810 {
4811 	int tcl_ring_num, wbm_ring_num;
4812 
4813 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
4814 						index,
4815 						&tcl_ring_num,
4816 						&wbm_ring_num);
4817 
4818 	if (tcl_ring_num == -1) {
4819 		dp_err("incorrect tcl ring num for index %u", index);
4820 		return;
4821 	}
4822 
4823 	wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned,
4824 			     soc->tcl_data_ring[index].alloc_size,
4825 			     soc->ctrl_psoc,
4826 			     WLAN_MD_DP_SRNG_TCL_DATA,
4827 			     "tcl_data_ring");
4828 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
4829 	dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA,
4830 		       tcl_ring_num);
4831 
4832 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
4833 		return;
4834 
4835 	wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned,
4836 			     soc->tx_comp_ring[index].alloc_size,
4837 			     soc->ctrl_psoc,
4838 			     WLAN_MD_DP_SRNG_TX_COMP,
4839 			     "tcl_comp_ring");
4840 	dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4841 		       wbm_ring_num);
4842 }
4843 
4844 /**
4845  * dp_init_tx_ring_pair_by_index() - The function inits tcl data/wbm completion
4846  * ring pair
4847  * @soc: DP soc pointer
4848  * @index: index of soc->tcl_data or soc->tx_comp to initialize
4849  *
4850  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
4851  */
4852 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
4853 						uint8_t index)
4854 {
4855 	int tcl_ring_num, wbm_ring_num;
4856 	uint8_t bm_id;
4857 
4858 	if (index >= MAX_TCL_DATA_RINGS) {
4859 		dp_err("unexpected index!");
4860 		QDF_BUG(0);
4861 		goto fail1;
4862 	}
4863 
4864 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
4865 						index,
4866 						&tcl_ring_num,
4867 						&wbm_ring_num);
4868 
4869 	if (tcl_ring_num == -1) {
4870 		dp_err("incorrect tcl ring num for index %u", index);
4871 		goto fail1;
4872 	}
4873 
4874 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
4875 	if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA,
4876 			 tcl_ring_num, 0)) {
4877 		dp_err("dp_srng_init failed for tcl_data_ring");
4878 		goto fail1;
4879 	}
4880 	wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned,
4881 			  soc->tcl_data_ring[index].alloc_size,
4882 			  soc->ctrl_psoc,
4883 			  WLAN_MD_DP_SRNG_TCL_DATA,
4884 			  "tcl_data_ring");
4885 
4886 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
4887 		goto set_rbm;
4888 
4889 	if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4890 			 wbm_ring_num, 0)) {
4891 		dp_err("dp_srng_init failed for tx_comp_ring");
4892 		goto fail1;
4893 	}
4894 
4895 	wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned,
4896 			  soc->tx_comp_ring[index].alloc_size,
4897 			  soc->ctrl_psoc,
4898 			  WLAN_MD_DP_SRNG_TX_COMP,
4899 			  "tcl_comp_ring");
4900 set_rbm:
4901 	bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_ring_num);
4902 
4903 	soc->arch_ops.tx_implicit_rbm_set(soc, tcl_ring_num, bm_id);
4904 
4905 	return QDF_STATUS_SUCCESS;
4906 
4907 fail1:
4908 	return QDF_STATUS_E_FAILURE;
4909 }
4910 
4911 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index)
4912 {
4913 	dp_debug("index %u", index);
4914 	dp_srng_free(soc, &soc->tcl_data_ring[index]);
4915 	dp_srng_free(soc, &soc->tx_comp_ring[index]);
4916 }
4917 
4918 /**
4919  * dp_alloc_tx_ring_pair_by_index() - The function allocs tcl data/wbm2sw
4920  * ring pair for the given "index"
4921  * @soc: DP soc pointer
4922  * @index: index of soc->tcl_data or soc->tx_comp to initialize
4923  *
4924  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
4925  */
4926 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
4927 						 uint8_t index)
4928 {
4929 	int tx_ring_size;
4930 	int tx_comp_ring_size;
4931 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
4932 	int cached = 0;
4933 
4934 	if (index >= MAX_TCL_DATA_RINGS) {
4935 		dp_err("unexpected index!");
4936 		QDF_BUG(0);
4937 		goto fail1;
4938 	}
4939 
4940 	dp_debug("index %u", index);
4941 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
4942 	dp_ipa_get_tx_ring_size(index, &tx_ring_size, soc_cfg_ctx);
4943 
4944 	if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA,
4945 			  tx_ring_size, cached)) {
4946 		dp_err("dp_srng_alloc failed for tcl_data_ring");
4947 		goto fail1;
4948 	}
4949 
4950 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
4951 	dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size, soc_cfg_ctx);
4952 	/* Enable cached TCL desc if NSS offload is disabled */
4953 	if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
4954 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
4955 
4956 	if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) ==
4957 	    INVALID_WBM_RING_NUM)
4958 		return QDF_STATUS_SUCCESS;
4959 
4960 	if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4961 			  tx_comp_ring_size, cached)) {
4962 		dp_err("dp_srng_alloc failed for tx_comp_ring");
4963 		goto fail1;
4964 	}
4965 
4966 	return QDF_STATUS_SUCCESS;
4967 
4968 fail1:
4969 	return QDF_STATUS_E_FAILURE;
4970 }
4971 
4972 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
4973 {
4974 	struct cdp_lro_hash_config lro_hash;
4975 	QDF_STATUS status;
4976 
4977 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
4978 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
4979 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
4980 		dp_err("LRO, GRO and RX hash disabled");
4981 		return QDF_STATUS_E_FAILURE;
4982 	}
4983 
4984 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
4985 
4986 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
4987 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
4988 		lro_hash.lro_enable = 1;
4989 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
4990 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
4991 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
4992 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
4993 	}
4994 
4995 	soc->arch_ops.get_rx_hash_key(soc, &lro_hash);
4996 
4997 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
4998 
4999 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
5000 		QDF_BUG(0);
5001 		dp_err("lro_hash_config not configured");
5002 		return QDF_STATUS_E_FAILURE;
5003 	}
5004 
5005 	status = soc->cdp_soc.ol_ops->lro_hash_config(soc->ctrl_psoc,
5006 						      pdev->pdev_id,
5007 						      &lro_hash);
5008 	if (!QDF_IS_STATUS_SUCCESS(status)) {
5009 		dp_err("failed to send lro_hash_config to FW %u", status);
5010 		return status;
5011 	}
5012 
5013 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
5014 		lro_hash.lro_enable, lro_hash.tcp_flag,
5015 		lro_hash.tcp_flag_mask);
5016 
5017 	dp_info("toeplitz_hash_ipv4:");
5018 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5019 			   lro_hash.toeplitz_hash_ipv4,
5020 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
5021 			   LRO_IPV4_SEED_ARR_SZ));
5022 
5023 	dp_info("toeplitz_hash_ipv6:");
5024 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5025 			   lro_hash.toeplitz_hash_ipv6,
5026 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
5027 			   LRO_IPV6_SEED_ARR_SZ));
5028 
5029 	return status;
5030 }
5031 
5032 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
5033 /**
5034  * dp_reap_timer_init() - initialize the reap timer
5035  * @soc: data path SoC handle
5036  *
5037  * Return: void
5038  */
5039 static void dp_reap_timer_init(struct dp_soc *soc)
5040 {
5041 	/*
5042 	 * Timer to reap rxdma status rings.
5043 	 * Needed until we enable ppdu end interrupts
5044 	 */
5045 	dp_monitor_reap_timer_init(soc);
5046 	dp_monitor_vdev_timer_init(soc);
5047 }
5048 
5049 /**
5050  * dp_reap_timer_deinit() - de-initialize the reap timer
5051  * @soc: data path SoC handle
5052  *
5053  * Return: void
5054  */
5055 static void dp_reap_timer_deinit(struct dp_soc *soc)
5056 {
5057 	dp_monitor_reap_timer_deinit(soc);
5058 }
5059 #else
5060 /* WIN use case */
5061 static void dp_reap_timer_init(struct dp_soc *soc)
5062 {
5063 	/* Configure LMAC rings in Polled mode */
5064 	if (soc->lmac_polled_mode) {
5065 		/*
5066 		 * Timer to reap lmac rings.
5067 		 */
5068 		qdf_timer_init(soc->osdev, &soc->lmac_reap_timer,
5069 			       dp_service_lmac_rings, (void *)soc,
5070 			       QDF_TIMER_TYPE_WAKE_APPS);
5071 		soc->lmac_timer_init = 1;
5072 		qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
5073 	}
5074 }
5075 
5076 static void dp_reap_timer_deinit(struct dp_soc *soc)
5077 {
5078 	if (soc->lmac_timer_init) {
5079 		qdf_timer_stop(&soc->lmac_reap_timer);
5080 		qdf_timer_free(&soc->lmac_reap_timer);
5081 		soc->lmac_timer_init = 0;
5082 	}
5083 }
5084 #endif
5085 
5086 #ifdef QCA_HOST2FW_RXBUF_RING
5087 /**
5088  * dp_rxdma_ring_alloc() - allocate the RXDMA rings
5089  * @soc: data path SoC handle
5090  * @pdev: Physical device handle
5091  *
5092  * Return: 0 - success, > 0 - failure
5093  */
5094 static int dp_rxdma_ring_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
5095 {
5096 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
5097 	int max_mac_rings;
5098 	int i;
5099 	int ring_size;
5100 
5101 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
5102 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
5103 	ring_size =  wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx);
5104 
5105 	for (i = 0; i < max_mac_rings; i++) {
5106 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
5107 		if (dp_srng_alloc(soc, &pdev->rx_mac_buf_ring[i],
5108 				  RXDMA_BUF, ring_size, 0)) {
5109 			dp_init_err("%pK: failed rx mac ring setup", soc);
5110 			return QDF_STATUS_E_FAILURE;
5111 		}
5112 	}
5113 	return QDF_STATUS_SUCCESS;
5114 }
5115 
5116 /**
5117  * dp_rxdma_ring_setup() - configure the RXDMA rings
5118  * @soc: data path SoC handle
5119  * @pdev: Physical device handle
5120  *
5121  * Return: 0 - success, > 0 - failure
5122  */
5123 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
5124 {
5125 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
5126 	int max_mac_rings;
5127 	int i;
5128 
5129 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
5130 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
5131 
5132 	for (i = 0; i < max_mac_rings; i++) {
5133 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
5134 		if (dp_srng_init(soc, &pdev->rx_mac_buf_ring[i],
5135 				 RXDMA_BUF, 1, i)) {
5136 			dp_init_err("%pK: failed rx mac ring setup", soc);
5137 			return QDF_STATUS_E_FAILURE;
5138 		}
5139 	}
5140 	return QDF_STATUS_SUCCESS;
5141 }
5142 
5143 /**
5144  * dp_rxdma_ring_cleanup() - Deinit the RXDMA rings and reap timer
5145  * @soc: data path SoC handle
5146  * @pdev: Physical device handle
5147  *
5148  * Return: void
5149  */
5150 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
5151 {
5152 	int i;
5153 
5154 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
5155 		dp_srng_deinit(soc, &pdev->rx_mac_buf_ring[i], RXDMA_BUF, 1);
5156 
5157 	dp_reap_timer_deinit(soc);
5158 }
5159 
5160 /**
5161  * dp_rxdma_ring_free() - Free the RXDMA rings
5162  * @pdev: Physical device handle
5163  *
5164  * Return: void
5165  */
5166 static void dp_rxdma_ring_free(struct dp_pdev *pdev)
5167 {
5168 	int i;
5169 
5170 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
5171 		dp_srng_free(pdev->soc, &pdev->rx_mac_buf_ring[i]);
5172 }
5173 
5174 #else
5175 static int dp_rxdma_ring_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
5176 {
5177 	return QDF_STATUS_SUCCESS;
5178 }
5179 
5180 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
5181 {
5182 	return QDF_STATUS_SUCCESS;
5183 }
5184 
5185 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
5186 {
5187 	dp_reap_timer_deinit(soc);
5188 }
5189 
5190 static void dp_rxdma_ring_free(struct dp_pdev *pdev)
5191 {
5192 }
5193 #endif
5194 
5195 /**
5196  * dp_dscp_tid_map_setup() - Initialize the dscp-tid maps
5197  * @pdev: DP_PDEV handle
5198  *
5199  * Return: void
5200  */
5201 static inline void
5202 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
5203 {
5204 	uint8_t map_id;
5205 	struct dp_soc *soc = pdev->soc;
5206 
5207 	if (!soc)
5208 		return;
5209 
5210 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
5211 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
5212 			     default_dscp_tid_map,
5213 			     sizeof(default_dscp_tid_map));
5214 	}
5215 
5216 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
5217 		hal_tx_set_dscp_tid_map(soc->hal_soc,
5218 					default_dscp_tid_map,
5219 					map_id);
5220 	}
5221 }
5222 
5223 /**
5224  * dp_pcp_tid_map_setup() - Initialize the pcp-tid maps
5225  * @pdev: DP_PDEV handle
5226  *
5227  * Return: void
5228  */
5229 static inline void
5230 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
5231 {
5232 	struct dp_soc *soc = pdev->soc;
5233 
5234 	if (!soc)
5235 		return;
5236 
5237 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
5238 		     sizeof(default_pcp_tid_map));
5239 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
5240 }
5241 
5242 #ifdef IPA_OFFLOAD
5243 /**
5244  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
5245  * @soc: data path instance
5246  * @pdev: core txrx pdev context
5247  *
5248  * Return: QDF_STATUS_SUCCESS: success
5249  *         QDF_STATUS_E_RESOURCES: Error return
5250  */
5251 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5252 					   struct dp_pdev *pdev)
5253 {
5254 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5255 	int entries;
5256 
5257 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5258 		soc_cfg_ctx = soc->wlan_cfg_ctx;
5259 		entries =
5260 			wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
5261 
5262 		/* Setup second Rx refill buffer ring */
5263 		if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
5264 				  entries, 0)) {
5265 			dp_init_err("%pK: dp_srng_alloc failed second"
5266 				    "rx refill ring", soc);
5267 			return QDF_STATUS_E_FAILURE;
5268 		}
5269 	}
5270 
5271 	return QDF_STATUS_SUCCESS;
5272 }
5273 
5274 #ifdef IPA_WDI3_VLAN_SUPPORT
5275 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5276 					       struct dp_pdev *pdev)
5277 {
5278 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5279 	int entries;
5280 
5281 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5282 	    wlan_ipa_is_vlan_enabled()) {
5283 		soc_cfg_ctx = soc->wlan_cfg_ctx;
5284 		entries =
5285 			wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
5286 
5287 		/* Setup second Rx refill buffer ring */
5288 		if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF,
5289 				  entries, 0)) {
5290 			dp_init_err("%pK: alloc failed for 3rd rx refill ring",
5291 				    soc);
5292 			return QDF_STATUS_E_FAILURE;
5293 		}
5294 	}
5295 
5296 	return QDF_STATUS_SUCCESS;
5297 }
5298 
5299 static int dp_init_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5300 					      struct dp_pdev *pdev)
5301 {
5302 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5303 	    wlan_ipa_is_vlan_enabled()) {
5304 		if (dp_srng_init(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF,
5305 				 IPA_RX_ALT_REFILL_BUF_RING_IDX,
5306 				 pdev->pdev_id)) {
5307 			dp_init_err("%pK: init failed for 3rd rx refill ring",
5308 				    soc);
5309 			return QDF_STATUS_E_FAILURE;
5310 		}
5311 	}
5312 
5313 	return QDF_STATUS_SUCCESS;
5314 }
5315 
5316 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5317 						 struct dp_pdev *pdev)
5318 {
5319 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5320 	    wlan_ipa_is_vlan_enabled())
5321 		dp_srng_deinit(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF, 0);
5322 }
5323 
5324 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5325 					       struct dp_pdev *pdev)
5326 {
5327 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5328 	    wlan_ipa_is_vlan_enabled())
5329 		dp_srng_free(soc, &pdev->rx_refill_buf_ring3);
5330 }
5331 #else
5332 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5333 					       struct dp_pdev *pdev)
5334 {
5335 	return QDF_STATUS_SUCCESS;
5336 }
5337 
5338 static int dp_init_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5339 					      struct dp_pdev *pdev)
5340 {
5341 	return QDF_STATUS_SUCCESS;
5342 }
5343 
5344 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5345 						 struct dp_pdev *pdev)
5346 {
5347 }
5348 
5349 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5350 					       struct dp_pdev *pdev)
5351 {
5352 }
5353 #endif
5354 
5355 /**
5356  * dp_deinit_ipa_rx_refill_buf_ring - deinit second Rx refill buffer ring
5357  * @soc: data path instance
5358  * @pdev: core txrx pdev context
5359  *
5360  * Return: void
5361  */
5362 static void dp_deinit_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5363 					     struct dp_pdev *pdev)
5364 {
5365 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
5366 		dp_srng_deinit(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 0);
5367 }
5368 
5369 /**
5370  * dp_init_ipa_rx_refill_buf_ring - Init second Rx refill buffer ring
5371  * @soc: data path instance
5372  * @pdev: core txrx pdev context
5373  *
5374  * Return: QDF_STATUS_SUCCESS: success
5375  *         QDF_STATUS_E_RESOURCES: Error return
5376  */
5377 static int dp_init_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5378 					  struct dp_pdev *pdev)
5379 {
5380 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5381 		if (dp_srng_init(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
5382 				 IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id)) {
5383 			dp_init_err("%pK: dp_srng_init failed second"
5384 				    "rx refill ring", soc);
5385 			return QDF_STATUS_E_FAILURE;
5386 		}
5387 	}
5388 
5389 	if (dp_init_ipa_rx_alt_refill_buf_ring(soc, pdev)) {
5390 		dp_deinit_ipa_rx_refill_buf_ring(soc, pdev);
5391 		return QDF_STATUS_E_FAILURE;
5392 	}
5393 
5394 	return QDF_STATUS_SUCCESS;
5395 }
5396 
5397 /**
5398  * dp_free_ipa_rx_refill_buf_ring - free second Rx refill buffer ring
5399  * @soc: data path instance
5400  * @pdev: core txrx pdev context
5401  *
5402  * Return: void
5403  */
5404 static void dp_free_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5405 					   struct dp_pdev *pdev)
5406 {
5407 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
5408 		dp_srng_free(soc, &pdev->rx_refill_buf_ring2);
5409 }
5410 #else
5411 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5412 					   struct dp_pdev *pdev)
5413 {
5414 	return QDF_STATUS_SUCCESS;
5415 }
5416 
5417 static int dp_init_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5418 					  struct dp_pdev *pdev)
5419 {
5420 	return QDF_STATUS_SUCCESS;
5421 }
5422 
5423 static void dp_deinit_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5424 					     struct dp_pdev *pdev)
5425 {
5426 }
5427 
5428 static void dp_free_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5429 					   struct dp_pdev *pdev)
5430 {
5431 }
5432 
5433 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5434 					       struct dp_pdev *pdev)
5435 {
5436 	return QDF_STATUS_SUCCESS;
5437 }
5438 
5439 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5440 						 struct dp_pdev *pdev)
5441 {
5442 }
5443 
5444 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5445 					       struct dp_pdev *pdev)
5446 {
5447 }
5448 #endif
5449 
5450 #ifdef WLAN_FEATURE_DP_CFG_EVENT_HISTORY
5451 
5452 /**
5453  * dp_soc_cfg_history_attach() - Allocate and attach datapath config events
5454  *				 history
5455  * @soc: DP soc handle
5456  *
5457  * Return: None
5458  */
5459 static void dp_soc_cfg_history_attach(struct dp_soc *soc)
5460 {
5461 	dp_soc_frag_history_attach(soc, &soc->cfg_event_history,
5462 				   DP_CFG_EVT_HIST_MAX_SLOTS,
5463 				   DP_CFG_EVT_HIST_PER_SLOT_MAX,
5464 				   sizeof(struct dp_cfg_event),
5465 				   true, DP_CFG_EVENT_HIST_TYPE);
5466 }
5467 
5468 /**
5469  * dp_soc_cfg_history_detach() - Detach and free DP config events history
5470  * @soc: DP soc handle
5471  *
5472  * Return: none
5473  */
5474 static void dp_soc_cfg_history_detach(struct dp_soc *soc)
5475 {
5476 	dp_soc_frag_history_detach(soc, &soc->cfg_event_history,
5477 				   DP_CFG_EVT_HIST_MAX_SLOTS,
5478 				   true, DP_CFG_EVENT_HIST_TYPE);
5479 }
5480 
5481 #else
5482 static void dp_soc_cfg_history_attach(struct dp_soc *soc)
5483 {
5484 }
5485 
5486 static void dp_soc_cfg_history_detach(struct dp_soc *soc)
5487 {
5488 }
5489 #endif
5490 
5491 #ifdef DP_TX_HW_DESC_HISTORY
5492 /**
5493  * dp_soc_tx_hw_desc_history_attach - Attach TX HW descriptor history
5494  *
5495  * @soc: DP soc handle
5496  *
5497  * Return: None
5498  */
5499 static void dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
5500 {
5501 	dp_soc_frag_history_attach(soc, &soc->tx_hw_desc_history,
5502 				   DP_TX_HW_DESC_HIST_MAX_SLOTS,
5503 				   DP_TX_HW_DESC_HIST_PER_SLOT_MAX,
5504 				   sizeof(struct dp_tx_hw_desc_evt),
5505 				   true, DP_TX_HW_DESC_HIST_TYPE);
5506 }
5507 
5508 static void dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
5509 {
5510 	dp_soc_frag_history_detach(soc, &soc->tx_hw_desc_history,
5511 				   DP_TX_HW_DESC_HIST_MAX_SLOTS,
5512 				   true, DP_TX_HW_DESC_HIST_TYPE);
5513 }
5514 
5515 #else /* DP_TX_HW_DESC_HISTORY */
5516 static inline void
5517 dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
5518 {
5519 }
5520 
5521 static inline void
5522 dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
5523 {
5524 }
5525 #endif /* DP_TX_HW_DESC_HISTORY */
5526 
5527 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
5528 #ifndef RX_DEFRAG_DO_NOT_REINJECT
5529 /**
5530  * dp_soc_rx_reinject_ring_history_attach - Attach the reo reinject ring
5531  *					    history.
5532  * @soc: DP soc handle
5533  *
5534  * Return: None
5535  */
5536 static void dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
5537 {
5538 	soc->rx_reinject_ring_history =
5539 		dp_context_alloc_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
5540 				     sizeof(struct dp_rx_reinject_history));
5541 	if (soc->rx_reinject_ring_history)
5542 		qdf_atomic_init(&soc->rx_reinject_ring_history->index);
5543 }
5544 #else /* RX_DEFRAG_DO_NOT_REINJECT */
5545 static inline void
5546 dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
5547 {
5548 }
5549 #endif /* RX_DEFRAG_DO_NOT_REINJECT */
5550 
5551 /**
5552  * dp_soc_rx_history_attach() - Attach the ring history record buffers
5553  * @soc: DP soc structure
5554  *
5555  * This function allocates the memory for recording the rx ring, rx error
5556  * ring and the reinject ring entries. There is no error returned in case
5557  * of allocation failure since the record function checks if the history is
5558  * initialized or not. We do not want to fail the driver load in case of
5559  * failure to allocate memory for debug history.
5560  *
5561  * Return: None
5562  */
5563 static void dp_soc_rx_history_attach(struct dp_soc *soc)
5564 {
5565 	int i;
5566 	uint32_t rx_ring_hist_size;
5567 	uint32_t rx_refill_ring_hist_size;
5568 
5569 	rx_ring_hist_size = sizeof(*soc->rx_ring_history[0]);
5570 	rx_refill_ring_hist_size = sizeof(*soc->rx_refill_ring_history[0]);
5571 
5572 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5573 		soc->rx_ring_history[i] = dp_context_alloc_mem(
5574 				soc, DP_RX_RING_HIST_TYPE, rx_ring_hist_size);
5575 		if (soc->rx_ring_history[i])
5576 			qdf_atomic_init(&soc->rx_ring_history[i]->index);
5577 	}
5578 
5579 	soc->rx_err_ring_history = dp_context_alloc_mem(
5580 			soc, DP_RX_ERR_RING_HIST_TYPE, rx_ring_hist_size);
5581 	if (soc->rx_err_ring_history)
5582 		qdf_atomic_init(&soc->rx_err_ring_history->index);
5583 
5584 	dp_soc_rx_reinject_ring_history_attach(soc);
5585 
5586 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5587 		soc->rx_refill_ring_history[i] = dp_context_alloc_mem(
5588 						soc,
5589 						DP_RX_REFILL_RING_HIST_TYPE,
5590 						rx_refill_ring_hist_size);
5591 
5592 		if (soc->rx_refill_ring_history[i])
5593 			qdf_atomic_init(&soc->rx_refill_ring_history[i]->index);
5594 	}
5595 }
5596 
5597 static void dp_soc_rx_history_detach(struct dp_soc *soc)
5598 {
5599 	int i;
5600 
5601 	for (i = 0; i < MAX_REO_DEST_RINGS; i++)
5602 		dp_context_free_mem(soc, DP_RX_RING_HIST_TYPE,
5603 				    soc->rx_ring_history[i]);
5604 
5605 	dp_context_free_mem(soc, DP_RX_ERR_RING_HIST_TYPE,
5606 			    soc->rx_err_ring_history);
5607 
5608 	/*
5609 	 * No need for a featurized detach since qdf_mem_free takes
5610 	 * care of NULL pointer.
5611 	 */
5612 	dp_context_free_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
5613 			    soc->rx_reinject_ring_history);
5614 
5615 	for (i = 0; i < MAX_PDEV_CNT; i++)
5616 		dp_context_free_mem(soc, DP_RX_REFILL_RING_HIST_TYPE,
5617 				    soc->rx_refill_ring_history[i]);
5618 }
5619 
5620 #else
5621 static inline void dp_soc_rx_history_attach(struct dp_soc *soc)
5622 {
5623 }
5624 
5625 static inline void dp_soc_rx_history_detach(struct dp_soc *soc)
5626 {
5627 }
5628 #endif
5629 
5630 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY
5631 /**
5632  * dp_soc_mon_status_ring_history_attach() - Attach the monitor status
5633  *					     buffer record history.
5634  * @soc: DP soc handle
5635  *
5636  * This function allocates memory to track the event for a monitor
5637  * status buffer, before its parsed and freed.
5638  *
5639  * Return: None
5640  */
5641 static void dp_soc_mon_status_ring_history_attach(struct dp_soc *soc)
5642 {
5643 	soc->mon_status_ring_history = dp_context_alloc_mem(soc,
5644 				DP_MON_STATUS_BUF_HIST_TYPE,
5645 				sizeof(struct dp_mon_status_ring_history));
5646 	if (!soc->mon_status_ring_history) {
5647 		dp_err("Failed to alloc memory for mon status ring history");
5648 		return;
5649 	}
5650 }
5651 
5652 /**
5653  * dp_soc_mon_status_ring_history_detach() - Detach the monitor status buffer
5654  *					     record history.
5655  * @soc: DP soc handle
5656  *
5657  * Return: None
5658  */
5659 static void dp_soc_mon_status_ring_history_detach(struct dp_soc *soc)
5660 {
5661 	dp_context_free_mem(soc, DP_MON_STATUS_BUF_HIST_TYPE,
5662 			    soc->mon_status_ring_history);
5663 }
5664 #else
5665 static void dp_soc_mon_status_ring_history_attach(struct dp_soc *soc)
5666 {
5667 }
5668 
5669 static void dp_soc_mon_status_ring_history_detach(struct dp_soc *soc)
5670 {
5671 }
5672 #endif
5673 
5674 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
5675 /**
5676  * dp_soc_tx_history_attach() - Attach the ring history record buffers
5677  * @soc: DP soc structure
5678  *
5679  * This function allocates the memory for recording the tx tcl ring and
5680  * the tx comp ring entries. There is no error returned in case
5681  * of allocation failure since the record function checks if the history is
5682  * initialized or not. We do not want to fail the driver load in case of
5683  * failure to allocate memory for debug history.
5684  *
5685  * Return: None
5686  */
5687 static void dp_soc_tx_history_attach(struct dp_soc *soc)
5688 {
5689 	dp_soc_frag_history_attach(soc, &soc->tx_tcl_history,
5690 				   DP_TX_TCL_HIST_MAX_SLOTS,
5691 				   DP_TX_TCL_HIST_PER_SLOT_MAX,
5692 				   sizeof(struct dp_tx_desc_event),
5693 				   true, DP_TX_TCL_HIST_TYPE);
5694 	dp_soc_frag_history_attach(soc, &soc->tx_comp_history,
5695 				   DP_TX_COMP_HIST_MAX_SLOTS,
5696 				   DP_TX_COMP_HIST_PER_SLOT_MAX,
5697 				   sizeof(struct dp_tx_desc_event),
5698 				   true, DP_TX_COMP_HIST_TYPE);
5699 }
5700 
5701 /**
5702  * dp_soc_tx_history_detach() - Detach the ring history record buffers
5703  * @soc: DP soc structure
5704  *
5705  * This function frees the memory for recording the tx tcl ring and
5706  * the tx comp ring entries.
5707  *
5708  * Return: None
5709  */
5710 static void dp_soc_tx_history_detach(struct dp_soc *soc)
5711 {
5712 	dp_soc_frag_history_detach(soc, &soc->tx_tcl_history,
5713 				   DP_TX_TCL_HIST_MAX_SLOTS,
5714 				   true, DP_TX_TCL_HIST_TYPE);
5715 	dp_soc_frag_history_detach(soc, &soc->tx_comp_history,
5716 				   DP_TX_COMP_HIST_MAX_SLOTS,
5717 				   true, DP_TX_COMP_HIST_TYPE);
5718 }
5719 
5720 #else
5721 static inline void dp_soc_tx_history_attach(struct dp_soc *soc)
5722 {
5723 }
5724 
5725 static inline void dp_soc_tx_history_detach(struct dp_soc *soc)
5726 {
5727 }
5728 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
5729 
5730 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
5731 QDF_STATUS
5732 dp_rx_fst_attach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev)
5733 {
5734 	struct dp_rx_fst *rx_fst = NULL;
5735 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
5736 
5737 	/* for Lithium the below API is not registered
5738 	 * hence fst attach happens for each pdev
5739 	 */
5740 	if (!soc->arch_ops.dp_get_rx_fst)
5741 		return dp_rx_fst_attach(soc, pdev);
5742 
5743 	rx_fst = soc->arch_ops.dp_get_rx_fst(soc);
5744 
5745 	/* for BE the FST attach is called only once per
5746 	 * ML context. if rx_fst is already registered
5747 	 * increase the ref count and return.
5748 	 */
5749 	if (rx_fst) {
5750 		soc->rx_fst = rx_fst;
5751 		pdev->rx_fst = rx_fst;
5752 		soc->arch_ops.dp_rx_fst_ref(soc);
5753 	} else {
5754 		ret = dp_rx_fst_attach(soc, pdev);
5755 		if ((ret != QDF_STATUS_SUCCESS) &&
5756 		    (ret != QDF_STATUS_E_NOSUPPORT))
5757 			return ret;
5758 
5759 		soc->arch_ops.dp_set_rx_fst(soc, soc->rx_fst);
5760 		soc->arch_ops.dp_rx_fst_ref(soc);
5761 	}
5762 	return ret;
5763 }
5764 
5765 void
5766 dp_rx_fst_detach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev)
5767 {
5768 	struct dp_rx_fst *rx_fst = NULL;
5769 
5770 	/* for Lithium the below API is not registered
5771 	 * hence fst detach happens for each pdev
5772 	 */
5773 	if (!soc->arch_ops.dp_get_rx_fst) {
5774 		dp_rx_fst_detach(soc, pdev);
5775 		return;
5776 	}
5777 
5778 	rx_fst = soc->arch_ops.dp_get_rx_fst(soc);
5779 
5780 	/* for BE the FST detach is called only when last
5781 	 * ref count reaches 1.
5782 	 */
5783 	if (rx_fst) {
5784 		if (soc->arch_ops.dp_rx_fst_deref(soc) == 1)
5785 			dp_rx_fst_detach(soc, pdev);
5786 	}
5787 	pdev->rx_fst = NULL;
5788 }
5789 #elif defined(WLAN_SUPPORT_RX_FISA)
5790 QDF_STATUS
5791 dp_rx_fst_attach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev)
5792 {
5793 	return dp_rx_fst_attach(soc, pdev);
5794 }
5795 
5796 void
5797 dp_rx_fst_detach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev)
5798 {
5799 	dp_rx_fst_detach(soc, pdev);
5800 }
5801 #else
5802 QDF_STATUS
5803 dp_rx_fst_attach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev)
5804 {
5805 	return QDF_STATUS_SUCCESS;
5806 }
5807 
5808 void
5809 dp_rx_fst_detach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev)
5810 {
5811 }
5812 #endif
5813 
5814 /**
5815  * dp_pdev_attach_wifi3() - attach txrx pdev
5816  * @txrx_soc: Datapath SOC handle
5817  * @params: Params for PDEV attach
5818  *
5819  * Return: QDF_STATUS
5820  */
5821 static inline
5822 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
5823 				struct cdp_pdev_attach_params *params)
5824 {
5825 	qdf_size_t pdev_context_size;
5826 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5827 	struct dp_pdev *pdev = NULL;
5828 	uint8_t pdev_id = params->pdev_id;
5829 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5830 	int nss_cfg;
5831 	QDF_STATUS ret;
5832 
5833 	pdev_context_size =
5834 		soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_PDEV);
5835 	if (pdev_context_size)
5836 		pdev = dp_context_alloc_mem(soc, DP_PDEV_TYPE,
5837 					    pdev_context_size);
5838 
5839 	if (!pdev) {
5840 		dp_init_err("%pK: DP PDEV memory allocation failed",
5841 			    soc);
5842 		goto fail0;
5843 	}
5844 	wlan_minidump_log(pdev, sizeof(*pdev), soc->ctrl_psoc,
5845 			  WLAN_MD_DP_PDEV, "dp_pdev");
5846 
5847 	soc_cfg_ctx = soc->wlan_cfg_ctx;
5848 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
5849 
5850 	if (!pdev->wlan_cfg_ctx) {
5851 		dp_init_err("%pK: pdev cfg_attach failed", soc);
5852 		goto fail1;
5853 	}
5854 
5855 	/*
5856 	 * set nss pdev config based on soc config
5857 	 */
5858 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
5859 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
5860 					 (nss_cfg & (1 << pdev_id)));
5861 
5862 	pdev->soc = soc;
5863 	pdev->pdev_id = pdev_id;
5864 	soc->pdev_list[pdev_id] = pdev;
5865 
5866 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
5867 	soc->pdev_count++;
5868 
5869 	/* Allocate memory for pdev srng rings */
5870 	if (dp_pdev_srng_alloc(pdev)) {
5871 		dp_init_err("%pK: dp_pdev_srng_alloc failed", soc);
5872 		goto fail2;
5873 	}
5874 
5875 	/* Setup second Rx refill buffer ring */
5876 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev)) {
5877 		dp_init_err("%pK: dp_srng_alloc failed rxrefill2 ring",
5878 			    soc);
5879 		goto fail3;
5880 	}
5881 
5882 	/* Allocate memory for pdev rxdma rings */
5883 	if (dp_rxdma_ring_alloc(soc, pdev)) {
5884 		dp_init_err("%pK: dp_rxdma_ring_alloc failed", soc);
5885 		goto fail4;
5886 	}
5887 
5888 	/* Rx specific init */
5889 	if (dp_rx_pdev_desc_pool_alloc(pdev)) {
5890 		dp_init_err("%pK: dp_rx_pdev_attach failed", soc);
5891 		goto fail4;
5892 	}
5893 
5894 	if (dp_monitor_pdev_attach(pdev)) {
5895 		dp_init_err("%pK: dp_monitor_pdev_attach failed", soc);
5896 		goto fail5;
5897 	}
5898 
5899 	soc->arch_ops.txrx_pdev_attach(pdev, params);
5900 
5901 	/* Setup third Rx refill buffer ring */
5902 	if (dp_setup_ipa_rx_alt_refill_buf_ring(soc, pdev)) {
5903 		dp_init_err("%pK: dp_srng_alloc failed rxrefill3 ring",
5904 			    soc);
5905 		goto fail6;
5906 	}
5907 
5908 	ret = dp_rx_fst_attach_wrapper(soc, pdev);
5909 	if ((ret != QDF_STATUS_SUCCESS) && (ret != QDF_STATUS_E_NOSUPPORT)) {
5910 		dp_init_err("%pK: RX FST attach failed: pdev %d err %d",
5911 			    soc, pdev_id, ret);
5912 		goto fail7;
5913 	}
5914 
5915 	return QDF_STATUS_SUCCESS;
5916 
5917 fail7:
5918 	dp_free_ipa_rx_alt_refill_buf_ring(soc, pdev);
5919 fail6:
5920 	dp_monitor_pdev_detach(pdev);
5921 fail5:
5922 	dp_rx_pdev_desc_pool_free(pdev);
5923 fail4:
5924 	dp_rxdma_ring_free(pdev);
5925 	dp_free_ipa_rx_refill_buf_ring(soc, pdev);
5926 fail3:
5927 	dp_pdev_srng_free(pdev);
5928 fail2:
5929 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
5930 fail1:
5931 	soc->pdev_list[pdev_id] = NULL;
5932 	qdf_mem_free(pdev);
5933 fail0:
5934 	return QDF_STATUS_E_FAILURE;
5935 }
5936 
5937 /**
5938  * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev
5939  * @pdev: Datapath PDEV handle
5940  *
5941  * This is the last chance to flush all pending dp vdevs/peers,
5942  * some peer/vdev leak case like Non-SSR + peer unmap missing
5943  * will be covered here.
5944  *
5945  * Return: None
5946  */
5947 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
5948 {
5949 	struct dp_soc *soc = pdev->soc;
5950 	struct dp_vdev *vdev_arr[MAX_VDEV_CNT] = {0};
5951 	uint32_t i = 0;
5952 	uint32_t num_vdevs = 0;
5953 	struct dp_vdev *vdev = NULL;
5954 
5955 	if (TAILQ_EMPTY(&soc->inactive_vdev_list))
5956 		return;
5957 
5958 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
5959 	TAILQ_FOREACH(vdev, &soc->inactive_vdev_list,
5960 		      inactive_list_elem) {
5961 		if (vdev->pdev != pdev)
5962 			continue;
5963 
5964 		vdev_arr[num_vdevs] = vdev;
5965 		num_vdevs++;
5966 		/* take reference to free */
5967 		dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CDP);
5968 	}
5969 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
5970 
5971 	for (i = 0; i < num_vdevs; i++) {
5972 		dp_vdev_flush_peers((struct cdp_vdev *)vdev_arr[i], 0, 0);
5973 		dp_vdev_unref_delete(soc, vdev_arr[i], DP_MOD_ID_CDP);
5974 	}
5975 }
5976 
5977 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
5978 /**
5979  * dp_vdev_stats_hw_offload_target_config() - Send HTT command to FW
5980  *                                          for enable/disable of HW vdev stats
5981  * @soc: Datapath soc handle
5982  * @pdev_id: INVALID_PDEV_ID for all pdevs or 0,1,2 for individual pdev
5983  * @enable: flag to represent enable/disable of hw vdev stats
5984  *
5985  * Return: none
5986  */
5987 static void dp_vdev_stats_hw_offload_target_config(struct dp_soc *soc,
5988 						   uint8_t pdev_id,
5989 						   bool enable)
5990 {
5991 	/* Check SOC level config for HW offload vdev stats support */
5992 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
5993 		dp_debug("%pK: HW vdev offload stats is disabled", soc);
5994 		return;
5995 	}
5996 
5997 	/* Send HTT command to FW for enable of stats */
5998 	dp_h2t_hw_vdev_stats_config_send(soc, pdev_id, enable, false, 0);
5999 }
6000 
6001 /**
6002  * dp_vdev_stats_hw_offload_target_clear() - Clear HW vdev stats on target
6003  * @soc: Datapath soc handle
6004  * @pdev_id: pdev_id (0,1,2)
6005  * @vdev_id_bitmask: bitmask with vdev_id(s) for which stats are to be
6006  *                   cleared on HW
6007  *
6008  * Return: none
6009  */
6010 static
6011 void dp_vdev_stats_hw_offload_target_clear(struct dp_soc *soc, uint8_t pdev_id,
6012 					   uint64_t vdev_id_bitmask)
6013 {
6014 	/* Check SOC level config for HW offload vdev stats support */
6015 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
6016 		dp_debug("%pK: HW vdev offload stats is disabled", soc);
6017 		return;
6018 	}
6019 
6020 	/* Send HTT command to FW for reset of stats */
6021 	dp_h2t_hw_vdev_stats_config_send(soc, pdev_id, true, true,
6022 					 vdev_id_bitmask);
6023 }
6024 #else
6025 static void
6026 dp_vdev_stats_hw_offload_target_config(struct dp_soc *soc, uint8_t pdev_id,
6027 				       bool enable)
6028 {
6029 }
6030 
6031 static
6032 void dp_vdev_stats_hw_offload_target_clear(struct dp_soc *soc, uint8_t pdev_id,
6033 					   uint64_t vdev_id_bitmask)
6034 {
6035 }
6036 #endif /*QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT */
6037 
6038 /**
6039  * dp_pdev_deinit() - Deinit txrx pdev
6040  * @txrx_pdev: Datapath PDEV handle
6041  * @force: Force deinit
6042  *
6043  * Return: None
6044  */
6045 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
6046 {
6047 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
6048 	qdf_nbuf_t curr_nbuf, next_nbuf;
6049 
6050 	if (pdev->pdev_deinit)
6051 		return;
6052 
6053 	dp_tx_me_exit(pdev);
6054 	dp_rx_pdev_buffers_free(pdev);
6055 	dp_rx_pdev_desc_pool_deinit(pdev);
6056 	dp_pdev_bkp_stats_detach(pdev);
6057 	qdf_event_destroy(&pdev->fw_peer_stats_event);
6058 	qdf_event_destroy(&pdev->fw_stats_event);
6059 	qdf_event_destroy(&pdev->fw_obss_stats_event);
6060 	if (pdev->sojourn_buf)
6061 		qdf_nbuf_free(pdev->sojourn_buf);
6062 
6063 	dp_pdev_flush_pending_vdevs(pdev);
6064 	dp_tx_desc_flush(pdev, NULL, true);
6065 
6066 	qdf_spinlock_destroy(&pdev->tx_mutex);
6067 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
6068 
6069 	dp_monitor_pdev_deinit(pdev);
6070 
6071 	dp_pdev_srng_deinit(pdev);
6072 
6073 	dp_ipa_uc_detach(pdev->soc, pdev);
6074 	dp_deinit_ipa_rx_alt_refill_buf_ring(pdev->soc, pdev);
6075 	dp_deinit_ipa_rx_refill_buf_ring(pdev->soc, pdev);
6076 	dp_rxdma_ring_cleanup(pdev->soc, pdev);
6077 
6078 	curr_nbuf = pdev->invalid_peer_head_msdu;
6079 	while (curr_nbuf) {
6080 		next_nbuf = qdf_nbuf_next(curr_nbuf);
6081 		dp_rx_nbuf_free(curr_nbuf);
6082 		curr_nbuf = next_nbuf;
6083 	}
6084 	pdev->invalid_peer_head_msdu = NULL;
6085 	pdev->invalid_peer_tail_msdu = NULL;
6086 
6087 	dp_wdi_event_detach(pdev);
6088 	pdev->pdev_deinit = 1;
6089 }
6090 
6091 /**
6092  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
6093  * @psoc: Datapath psoc handle
6094  * @pdev_id: Id of datapath PDEV handle
6095  * @force: Force deinit
6096  *
6097  * Return: QDF_STATUS
6098  */
6099 static QDF_STATUS
6100 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
6101 		     int force)
6102 {
6103 	struct dp_pdev *txrx_pdev;
6104 
6105 	txrx_pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
6106 						       pdev_id);
6107 
6108 	if (!txrx_pdev)
6109 		return QDF_STATUS_E_FAILURE;
6110 
6111 	dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force);
6112 
6113 	return QDF_STATUS_SUCCESS;
6114 }
6115 
6116 /**
6117  * dp_pdev_post_attach() - Do post pdev attach after dev_alloc_name
6118  * @txrx_pdev: Datapath PDEV handle
6119  *
6120  * Return: None
6121  */
6122 static void dp_pdev_post_attach(struct cdp_pdev *txrx_pdev)
6123 {
6124 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
6125 
6126 	dp_monitor_tx_capture_debugfs_init(pdev);
6127 
6128 	if (dp_pdev_htt_stats_dbgfs_init(pdev)) {
6129 		dp_init_err("%pK: Failed to initialize pdev HTT stats debugfs", pdev->soc);
6130 	}
6131 }
6132 
6133 /**
6134  * dp_pdev_post_attach_wifi3() - attach txrx pdev post
6135  * @soc: Datapath soc handle
6136  * @pdev_id: pdev id of pdev
6137  *
6138  * Return: QDF_STATUS
6139  */
6140 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *soc,
6141 				     uint8_t pdev_id)
6142 {
6143 	struct dp_pdev *pdev;
6144 
6145 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
6146 						  pdev_id);
6147 
6148 	if (!pdev) {
6149 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
6150 			    (struct dp_soc *)soc, pdev_id);
6151 		return QDF_STATUS_E_FAILURE;
6152 	}
6153 
6154 	dp_pdev_post_attach((struct cdp_pdev *)pdev);
6155 	return QDF_STATUS_SUCCESS;
6156 }
6157 
6158 /**
6159  * dp_pdev_detach() - Complete rest of pdev detach
6160  * @txrx_pdev: Datapath PDEV handle
6161  * @force: Force deinit
6162  *
6163  * Return: None
6164  */
6165 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
6166 {
6167 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
6168 	struct dp_soc *soc = pdev->soc;
6169 
6170 	dp_rx_fst_detach_wrapper(soc, pdev);
6171 	dp_pdev_htt_stats_dbgfs_deinit(pdev);
6172 	dp_rx_pdev_desc_pool_free(pdev);
6173 	dp_monitor_pdev_detach(pdev);
6174 	dp_rxdma_ring_free(pdev);
6175 	dp_free_ipa_rx_refill_buf_ring(soc, pdev);
6176 	dp_free_ipa_rx_alt_refill_buf_ring(soc, pdev);
6177 	dp_pdev_srng_free(pdev);
6178 
6179 	soc->pdev_count--;
6180 	soc->pdev_list[pdev->pdev_id] = NULL;
6181 
6182 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
6183 	wlan_minidump_remove(pdev, sizeof(*pdev), soc->ctrl_psoc,
6184 			     WLAN_MD_DP_PDEV, "dp_pdev");
6185 	dp_context_free_mem(soc, DP_PDEV_TYPE, pdev);
6186 }
6187 
6188 /**
6189  * dp_pdev_detach_wifi3() - detach txrx pdev
6190  * @psoc: Datapath soc handle
6191  * @pdev_id: pdev id of pdev
6192  * @force: Force detach
6193  *
6194  * Return: QDF_STATUS
6195  */
6196 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
6197 				       int force)
6198 {
6199 	struct dp_pdev *pdev;
6200 	struct dp_soc *soc = (struct dp_soc *)psoc;
6201 
6202 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
6203 						  pdev_id);
6204 
6205 	if (!pdev) {
6206 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
6207 			    (struct dp_soc *)psoc, pdev_id);
6208 		return QDF_STATUS_E_FAILURE;
6209 	}
6210 
6211 	soc->arch_ops.txrx_pdev_detach(pdev);
6212 
6213 	dp_pdev_detach((struct cdp_pdev *)pdev, force);
6214 	return QDF_STATUS_SUCCESS;
6215 }
6216 
6217 #ifndef DP_UMAC_HW_RESET_SUPPORT
6218 static inline
6219 #endif
6220 void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
6221 {
6222 	struct reo_desc_list_node *desc;
6223 	struct dp_rx_tid *rx_tid;
6224 
6225 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
6226 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
6227 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
6228 		rx_tid = &desc->rx_tid;
6229 		qdf_mem_unmap_nbytes_single(soc->osdev,
6230 			rx_tid->hw_qdesc_paddr,
6231 			QDF_DMA_BIDIRECTIONAL,
6232 			rx_tid->hw_qdesc_alloc_size);
6233 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
6234 		qdf_mem_free(desc);
6235 	}
6236 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
6237 	qdf_list_destroy(&soc->reo_desc_freelist);
6238 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
6239 }
6240 
6241 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
6242 /**
6243  * dp_reo_desc_deferred_freelist_create() - Initialize the resources used
6244  *                                          for deferred reo desc list
6245  * @soc: Datapath soc handle
6246  *
6247  * Return: void
6248  */
6249 static void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
6250 {
6251 	qdf_spinlock_create(&soc->reo_desc_deferred_freelist_lock);
6252 	qdf_list_create(&soc->reo_desc_deferred_freelist,
6253 			REO_DESC_DEFERRED_FREELIST_SIZE);
6254 	soc->reo_desc_deferred_freelist_init = true;
6255 }
6256 
6257 /**
6258  * dp_reo_desc_deferred_freelist_destroy() - loop the deferred free list &
6259  *                                           free the leftover REO QDESCs
6260  * @soc: Datapath soc handle
6261  *
6262  * Return: void
6263  */
6264 static void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
6265 {
6266 	struct reo_desc_deferred_freelist_node *desc;
6267 
6268 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
6269 	soc->reo_desc_deferred_freelist_init = false;
6270 	while (qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
6271 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
6272 		qdf_mem_unmap_nbytes_single(soc->osdev,
6273 					    desc->hw_qdesc_paddr,
6274 					    QDF_DMA_BIDIRECTIONAL,
6275 					    desc->hw_qdesc_alloc_size);
6276 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
6277 		qdf_mem_free(desc);
6278 	}
6279 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
6280 
6281 	qdf_list_destroy(&soc->reo_desc_deferred_freelist);
6282 	qdf_spinlock_destroy(&soc->reo_desc_deferred_freelist_lock);
6283 }
6284 #else
6285 static inline void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
6286 {
6287 }
6288 
6289 static inline void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
6290 {
6291 }
6292 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
6293 
6294 /**
6295  * dp_soc_reset_txrx_ring_map() - reset tx ring map
6296  * @soc: DP SOC handle
6297  *
6298  */
6299 static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc)
6300 {
6301 	uint32_t i;
6302 
6303 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++)
6304 		soc->tx_ring_map[i] = 0;
6305 }
6306 
6307 /**
6308  * dp_soc_print_inactive_objects() - prints inactive peer and vdev list
6309  * @soc: DP SOC handle
6310  *
6311  */
6312 static void dp_soc_print_inactive_objects(struct dp_soc *soc)
6313 {
6314 	struct dp_peer *peer = NULL;
6315 	struct dp_peer *tmp_peer = NULL;
6316 	struct dp_vdev *vdev = NULL;
6317 	struct dp_vdev *tmp_vdev = NULL;
6318 	int i = 0;
6319 	uint32_t count;
6320 
6321 	if (TAILQ_EMPTY(&soc->inactive_peer_list) &&
6322 	    TAILQ_EMPTY(&soc->inactive_vdev_list))
6323 		return;
6324 
6325 	TAILQ_FOREACH_SAFE(peer, &soc->inactive_peer_list,
6326 			   inactive_list_elem, tmp_peer) {
6327 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
6328 			count = qdf_atomic_read(&peer->mod_refs[i]);
6329 			if (count)
6330 				DP_PRINT_STATS("peer %pK Module id %u ==> %u",
6331 					       peer, i, count);
6332 		}
6333 	}
6334 
6335 	TAILQ_FOREACH_SAFE(vdev, &soc->inactive_vdev_list,
6336 			   inactive_list_elem, tmp_vdev) {
6337 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
6338 			count = qdf_atomic_read(&vdev->mod_refs[i]);
6339 			if (count)
6340 				DP_PRINT_STATS("vdev %pK Module id %u ==> %u",
6341 					       vdev, i, count);
6342 		}
6343 	}
6344 	QDF_BUG(0);
6345 }
6346 
6347 /**
6348  * dp_soc_deinit() - Deinitialize txrx SOC
6349  * @txrx_soc: Opaque DP SOC handle
6350  *
6351  * Return: None
6352  */
6353 static void dp_soc_deinit(void *txrx_soc)
6354 {
6355 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6356 	struct htt_soc *htt_soc = soc->htt_handle;
6357 
6358 	qdf_atomic_set(&soc->cmn_init_done, 0);
6359 
6360 	if (soc->arch_ops.txrx_soc_ppeds_stop)
6361 		soc->arch_ops.txrx_soc_ppeds_stop(soc);
6362 
6363 	soc->arch_ops.txrx_soc_deinit(soc);
6364 
6365 	dp_monitor_soc_deinit(soc);
6366 
6367 	/* free peer tables & AST tables allocated during peer_map_attach */
6368 	if (soc->peer_map_attach_success) {
6369 		dp_peer_find_detach(soc);
6370 		soc->arch_ops.txrx_peer_map_detach(soc);
6371 		soc->peer_map_attach_success = FALSE;
6372 	}
6373 
6374 	qdf_flush_work(&soc->htt_stats.work);
6375 	qdf_disable_work(&soc->htt_stats.work);
6376 
6377 	qdf_spinlock_destroy(&soc->htt_stats.lock);
6378 
6379 	dp_soc_reset_txrx_ring_map(soc);
6380 
6381 	dp_reo_desc_freelist_destroy(soc);
6382 	dp_reo_desc_deferred_freelist_destroy(soc);
6383 
6384 	DEINIT_RX_HW_STATS_LOCK(soc);
6385 
6386 	qdf_spinlock_destroy(&soc->ast_lock);
6387 
6388 	dp_peer_mec_spinlock_destroy(soc);
6389 
6390 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
6391 
6392 	qdf_nbuf_queue_free(&soc->invalid_buf_queue);
6393 
6394 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
6395 
6396 	qdf_spinlock_destroy(&soc->vdev_map_lock);
6397 
6398 	dp_reo_cmdlist_destroy(soc);
6399 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
6400 
6401 	dp_soc_tx_desc_sw_pools_deinit(soc);
6402 
6403 	dp_soc_srng_deinit(soc);
6404 
6405 	dp_hw_link_desc_ring_deinit(soc);
6406 
6407 	dp_soc_print_inactive_objects(soc);
6408 	qdf_spinlock_destroy(&soc->inactive_peer_list_lock);
6409 	qdf_spinlock_destroy(&soc->inactive_vdev_list_lock);
6410 
6411 	htt_soc_htc_dealloc(soc->htt_handle);
6412 
6413 	htt_soc_detach(htt_soc);
6414 
6415 	/* Free wbm sg list and reset flags in down path */
6416 	dp_rx_wbm_sg_list_deinit(soc);
6417 
6418 	wlan_minidump_remove(soc, sizeof(*soc), soc->ctrl_psoc,
6419 			     WLAN_MD_DP_SOC, "dp_soc");
6420 }
6421 
6422 /**
6423  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
6424  * @txrx_soc: Opaque DP SOC handle
6425  *
6426  * Return: None
6427  */
6428 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc)
6429 {
6430 	dp_soc_deinit(txrx_soc);
6431 }
6432 
6433 /**
6434  * dp_soc_detach() - Detach rest of txrx SOC
6435  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
6436  *
6437  * Return: None
6438  */
6439 static void dp_soc_detach(struct cdp_soc_t *txrx_soc)
6440 {
6441 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6442 
6443 	soc->arch_ops.txrx_soc_detach(soc);
6444 
6445 	dp_runtime_deinit();
6446 
6447 	dp_sysfs_deinitialize_stats(soc);
6448 	dp_soc_swlm_detach(soc);
6449 	dp_soc_tx_desc_sw_pools_free(soc);
6450 	dp_soc_srng_free(soc);
6451 	dp_hw_link_desc_ring_free(soc);
6452 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
6453 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
6454 	dp_soc_tx_hw_desc_history_detach(soc);
6455 	dp_soc_tx_history_detach(soc);
6456 	dp_soc_mon_status_ring_history_detach(soc);
6457 	dp_soc_rx_history_detach(soc);
6458 	dp_soc_cfg_history_detach(soc);
6459 
6460 	if (!dp_monitor_modularized_enable()) {
6461 		dp_mon_soc_detach_wrapper(soc);
6462 	}
6463 
6464 	qdf_mem_free(soc->cdp_soc.ops);
6465 	qdf_mem_free(soc);
6466 }
6467 
6468 /**
6469  * dp_soc_detach_wifi3() - Detach txrx SOC
6470  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
6471  *
6472  * Return: None
6473  */
6474 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc)
6475 {
6476 	dp_soc_detach(txrx_soc);
6477 }
6478 
6479 #ifdef QCA_HOST2FW_RXBUF_RING
6480 static inline void
6481 dp_htt_setup_rxdma_err_dst_ring(struct dp_soc *soc, int mac_id,
6482 				int lmac_id)
6483 {
6484 	if (soc->rxdma_err_dst_ring[lmac_id].hal_srng)
6485 		htt_srng_setup(soc->htt_handle, mac_id,
6486 			       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
6487 			       RXDMA_DST);
6488 }
6489 
6490 #ifdef IPA_WDI3_VLAN_SUPPORT
6491 static inline
6492 void dp_rxdma_setup_refill_ring3(struct dp_soc *soc,
6493 				 struct dp_pdev *pdev,
6494 				 uint8_t idx)
6495 {
6496 	if (pdev->rx_refill_buf_ring3.hal_srng)
6497 		htt_srng_setup(soc->htt_handle, idx,
6498 			       pdev->rx_refill_buf_ring3.hal_srng,
6499 			       RXDMA_BUF);
6500 }
6501 #else
6502 static inline
6503 void dp_rxdma_setup_refill_ring3(struct dp_soc *soc,
6504 				 struct dp_pdev *pdev,
6505 				 uint8_t idx)
6506 { }
6507 #endif
6508 
6509 /**
6510  * dp_rxdma_ring_config() - configure the RX DMA rings
6511  * @soc: data path SoC handle
6512  *
6513  * This function is used to configure the MAC rings.
6514  * On MCL host provides buffers in Host2FW ring
6515  * FW refills (copies) buffers to the ring and updates
6516  * ring_idx in register
6517  *
6518  * Return: zero on success, non-zero on failure
6519  */
6520 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
6521 {
6522 	int i;
6523 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6524 
6525 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6526 		struct dp_pdev *pdev = soc->pdev_list[i];
6527 
6528 		if (pdev) {
6529 			int mac_id;
6530 			int max_mac_rings =
6531 				 wlan_cfg_get_num_mac_rings
6532 				(pdev->wlan_cfg_ctx);
6533 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
6534 
6535 			htt_srng_setup(soc->htt_handle, i,
6536 				       soc->rx_refill_buf_ring[lmac_id]
6537 				       .hal_srng,
6538 				       RXDMA_BUF);
6539 
6540 			if (pdev->rx_refill_buf_ring2.hal_srng)
6541 				htt_srng_setup(soc->htt_handle, i,
6542 					       pdev->rx_refill_buf_ring2
6543 					       .hal_srng,
6544 					       RXDMA_BUF);
6545 
6546 			dp_rxdma_setup_refill_ring3(soc, pdev, i);
6547 
6548 			dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
6549 			dp_err("pdev_id %d max_mac_rings %d",
6550 			       pdev->pdev_id, max_mac_rings);
6551 
6552 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
6553 				int mac_for_pdev =
6554 					dp_get_mac_id_for_pdev(mac_id,
6555 							       pdev->pdev_id);
6556 				/*
6557 				 * Obtain lmac id from pdev to access the LMAC
6558 				 * ring in soc context
6559 				 */
6560 				lmac_id =
6561 				dp_get_lmac_id_for_pdev_id(soc,
6562 							   mac_id,
6563 							   pdev->pdev_id);
6564 				QDF_TRACE(QDF_MODULE_ID_TXRX,
6565 					 QDF_TRACE_LEVEL_ERROR,
6566 					 FL("mac_id %d"), mac_for_pdev);
6567 
6568 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
6569 					 pdev->rx_mac_buf_ring[mac_id]
6570 						.hal_srng,
6571 					 RXDMA_BUF);
6572 
6573 				if (!soc->rxdma2sw_rings_not_supported)
6574 					dp_htt_setup_rxdma_err_dst_ring(soc,
6575 						mac_for_pdev, lmac_id);
6576 
6577 				/* Configure monitor mode rings */
6578 				status = dp_monitor_htt_srng_setup(soc, pdev,
6579 								   lmac_id,
6580 								   mac_for_pdev);
6581 				if (status != QDF_STATUS_SUCCESS) {
6582 					dp_err("Failed to send htt monitor messages to target");
6583 					return status;
6584 				}
6585 
6586 			}
6587 		}
6588 	}
6589 
6590 	dp_reap_timer_init(soc);
6591 	return status;
6592 }
6593 #else
6594 /* This is only for WIN */
6595 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
6596 {
6597 	int i;
6598 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6599 	int mac_for_pdev;
6600 	int lmac_id;
6601 
6602 	/* Configure monitor mode rings */
6603 	dp_monitor_soc_htt_srng_setup(soc);
6604 
6605 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6606 		struct dp_pdev *pdev =  soc->pdev_list[i];
6607 
6608 		if (!pdev)
6609 			continue;
6610 
6611 		mac_for_pdev = i;
6612 		lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
6613 
6614 		if (soc->rx_refill_buf_ring[lmac_id].hal_srng)
6615 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
6616 				       soc->rx_refill_buf_ring[lmac_id].
6617 				       hal_srng, RXDMA_BUF);
6618 
6619 		/* Configure monitor mode rings */
6620 		dp_monitor_htt_srng_setup(soc, pdev,
6621 					  lmac_id,
6622 					  mac_for_pdev);
6623 		if (!soc->rxdma2sw_rings_not_supported)
6624 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
6625 				       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
6626 				       RXDMA_DST);
6627 	}
6628 
6629 	dp_reap_timer_init(soc);
6630 	return status;
6631 }
6632 #endif
6633 
6634 /**
6635  * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine
6636  *
6637  * This function is used to configure the FSE HW block in RX OLE on a
6638  * per pdev basis. Here, we will be programming parameters related to
6639  * the Flow Search Table.
6640  *
6641  * @soc: data path SoC handle
6642  *
6643  * Return: zero on success, non-zero on failure
6644  */
6645 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
6646 static QDF_STATUS
6647 dp_rx_target_fst_config(struct dp_soc *soc)
6648 {
6649 	int i;
6650 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6651 
6652 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6653 		struct dp_pdev *pdev = soc->pdev_list[i];
6654 
6655 		/* Flow search is not enabled if NSS offload is enabled */
6656 		if (pdev &&
6657 		    !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
6658 			status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev);
6659 			if (status != QDF_STATUS_SUCCESS)
6660 				break;
6661 		}
6662 	}
6663 	return status;
6664 }
6665 #elif defined(WLAN_SUPPORT_RX_FISA)
6666 /**
6667  * dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW
6668  * @soc: SoC handle
6669  *
6670  * Return: Success
6671  */
6672 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
6673 {
6674 	QDF_STATUS status;
6675 	struct dp_rx_fst *fst = soc->rx_fst;
6676 
6677 	/* Check if it is enabled in the INI */
6678 	if (!soc->fisa_enable) {
6679 		dp_err("RX FISA feature is disabled");
6680 		return QDF_STATUS_E_NOSUPPORT;
6681 	}
6682 
6683 	status = dp_rx_flow_send_fst_fw_setup(soc, soc->pdev_list[0]);
6684 	if (QDF_IS_STATUS_ERROR(status)) {
6685 		dp_err("dp_rx_flow_send_fst_fw_setup failed %d",
6686 		       status);
6687 		return status;
6688 	}
6689 
6690 	if (soc->fst_cmem_base) {
6691 		soc->fst_in_cmem = true;
6692 		dp_rx_fst_update_cmem_params(soc, fst->max_entries,
6693 					     soc->fst_cmem_base & 0xffffffff,
6694 					     soc->fst_cmem_base >> 32);
6695 	}
6696 	return status;
6697 }
6698 
6699 #define FISA_MAX_TIMEOUT 0xffffffff
6700 #define FISA_DISABLE_TIMEOUT 0
6701 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
6702 {
6703 	struct dp_htt_rx_fisa_cfg fisa_config;
6704 
6705 	fisa_config.pdev_id = 0;
6706 	fisa_config.fisa_timeout = FISA_MAX_TIMEOUT;
6707 
6708 	return dp_htt_rx_fisa_config(soc->pdev_list[0], &fisa_config);
6709 }
6710 
6711 #else /* !WLAN_SUPPORT_RX_FISA */
6712 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
6713 {
6714 	return QDF_STATUS_SUCCESS;
6715 }
6716 #endif /* !WLAN_SUPPORT_RX_FISA */
6717 
6718 #ifndef WLAN_SUPPORT_RX_FISA
6719 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
6720 {
6721 	return QDF_STATUS_SUCCESS;
6722 }
6723 
6724 static QDF_STATUS dp_rx_dump_fisa_stats(struct dp_soc *soc)
6725 {
6726 	return QDF_STATUS_SUCCESS;
6727 }
6728 
6729 static void dp_rx_dump_fisa_table(struct dp_soc *soc)
6730 {
6731 }
6732 
6733 static void dp_suspend_fse_cache_flush(struct dp_soc *soc)
6734 {
6735 }
6736 
6737 static void dp_resume_fse_cache_flush(struct dp_soc *soc)
6738 {
6739 }
6740 #endif /* !WLAN_SUPPORT_RX_FISA */
6741 
6742 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR
6743 static inline QDF_STATUS dp_print_swlm_stats(struct dp_soc *soc)
6744 {
6745 	return QDF_STATUS_SUCCESS;
6746 }
6747 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */
6748 
6749 #ifdef WLAN_SUPPORT_PPEDS
6750 /**
6751  * dp_soc_target_ppe_rxole_rxdma_cfg() - Configure the RxOLe and RxDMA for PPE
6752  * @soc: DP Tx/Rx handle
6753  *
6754  * Return: QDF_STATUS
6755  */
6756 static
6757 QDF_STATUS dp_soc_target_ppe_rxole_rxdma_cfg(struct dp_soc *soc)
6758 {
6759 	struct dp_htt_rxdma_rxole_ppe_config htt_cfg = {0};
6760 	QDF_STATUS status;
6761 
6762 	/*
6763 	 * Program RxDMA to override the reo destination indication
6764 	 * with REO2PPE_DST_IND, when use_ppe is set to 1 in RX_MSDU_END,
6765 	 * thereby driving the packet to REO2PPE ring.
6766 	 * If the MSDU is spanning more than 1 buffer, then this
6767 	 * override is not done.
6768 	 */
6769 	htt_cfg.override = 1;
6770 	htt_cfg.reo_destination_indication = REO2PPE_DST_IND;
6771 	htt_cfg.multi_buffer_msdu_override_en = 0;
6772 
6773 	/*
6774 	 * Override use_ppe to 0 in RxOLE for the following
6775 	 * cases.
6776 	 */
6777 	htt_cfg.intra_bss_override = 1;
6778 	htt_cfg.decap_raw_override = 1;
6779 	htt_cfg.decap_nwifi_override = 1;
6780 	htt_cfg.ip_frag_override = 1;
6781 
6782 	status = dp_htt_rxdma_rxole_ppe_cfg_set(soc, &htt_cfg);
6783 	if (status != QDF_STATUS_SUCCESS)
6784 		dp_err("RxOLE and RxDMA PPE config failed %d", status);
6785 
6786 	return status;
6787 }
6788 
6789 static inline
6790 void dp_soc_txrx_peer_setup(enum wlan_op_mode vdev_opmode, struct dp_soc *soc,
6791 			    struct dp_peer *peer)
6792 {
6793 	if (((vdev_opmode == wlan_op_mode_ap) ||
6794 	     (vdev_opmode == wlan_op_mode_sta)) &&
6795 	     (soc->arch_ops.txrx_peer_setup)) {
6796 		if (soc->arch_ops.txrx_peer_setup(soc, peer)
6797 				!= QDF_STATUS_SUCCESS) {
6798 			dp_err("unable to setup target peer features");
6799 			qdf_assert_always(0);
6800 		}
6801 	}
6802 }
6803 #else
6804 static inline
6805 QDF_STATUS dp_soc_target_ppe_rxole_rxdma_cfg(struct dp_soc *soc)
6806 {
6807 	return QDF_STATUS_SUCCESS;
6808 }
6809 
6810 static inline
6811 void dp_soc_txrx_peer_setup(enum wlan_op_mode vdev_opmode, struct dp_soc *soc,
6812 			    struct dp_peer *peer)
6813 {
6814 }
6815 #endif /* WLAN_SUPPORT_PPEDS */
6816 
6817 #ifdef DP_UMAC_HW_RESET_SUPPORT
6818 static void dp_register_umac_reset_handlers(struct dp_soc *soc)
6819 {
6820 	dp_umac_reset_register_rx_action_callback(soc,
6821 		dp_umac_reset_handle_pre_reset, UMAC_RESET_ACTION_DO_PRE_RESET);
6822 
6823 	dp_umac_reset_register_rx_action_callback(soc,
6824 					dp_umac_reset_handle_post_reset,
6825 					UMAC_RESET_ACTION_DO_POST_RESET_START);
6826 
6827 	dp_umac_reset_register_rx_action_callback(soc,
6828 				dp_umac_reset_handle_post_reset_complete,
6829 				UMAC_RESET_ACTION_DO_POST_RESET_COMPLETE);
6830 
6831 }
6832 #else
6833 static void dp_register_umac_reset_handlers(struct dp_soc *soc)
6834 {
6835 }
6836 #endif
6837 /**
6838  * dp_soc_attach_target_wifi3() - SOC initialization in the target
6839  * @cdp_soc: Opaque Datapath SOC handle
6840  *
6841  * Return: zero on success, non-zero on failure
6842  */
6843 static QDF_STATUS
6844 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
6845 {
6846 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
6847 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6848 	struct hal_reo_params reo_params;
6849 
6850 	htt_soc_attach_target(soc->htt_handle);
6851 
6852 	status = dp_soc_target_ppe_rxole_rxdma_cfg(soc);
6853 	if (status != QDF_STATUS_SUCCESS) {
6854 		dp_err("Failed to send htt RxOLE and RxDMA messages to target");
6855 		return status;
6856 	}
6857 
6858 	status = dp_rxdma_ring_config(soc);
6859 	if (status != QDF_STATUS_SUCCESS) {
6860 		dp_err("Failed to send htt srng setup messages to target");
6861 		return status;
6862 	}
6863 
6864 	status = soc->arch_ops.dp_rxdma_ring_sel_cfg(soc);
6865 	if (status != QDF_STATUS_SUCCESS) {
6866 		dp_err("Failed to send htt ring config message to target");
6867 		return status;
6868 	}
6869 
6870 	status = dp_soc_umac_reset_init(soc);
6871 	if (status != QDF_STATUS_SUCCESS &&
6872 	    status != QDF_STATUS_E_NOSUPPORT) {
6873 		dp_err("Failed to initialize UMAC reset");
6874 		return status;
6875 	}
6876 
6877 	dp_register_umac_reset_handlers(soc);
6878 
6879 	status = dp_rx_target_fst_config(soc);
6880 	if (status != QDF_STATUS_SUCCESS &&
6881 	    status != QDF_STATUS_E_NOSUPPORT) {
6882 		dp_err("Failed to send htt fst setup config message to target");
6883 		return status;
6884 	}
6885 
6886 	if (status == QDF_STATUS_SUCCESS) {
6887 		status = dp_rx_fisa_config(soc);
6888 		if (status != QDF_STATUS_SUCCESS) {
6889 			dp_err("Failed to send htt FISA config message to target");
6890 			return status;
6891 		}
6892 	}
6893 
6894 	DP_STATS_INIT(soc);
6895 
6896 	dp_runtime_init(soc);
6897 
6898 	/* Enable HW vdev offload stats if feature is supported */
6899 	dp_vdev_stats_hw_offload_target_config(soc, INVALID_PDEV_ID, true);
6900 
6901 	/* initialize work queue for stats processing */
6902 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
6903 
6904 	wlan_cfg_soc_update_tgt_params(soc->wlan_cfg_ctx,
6905 				       soc->ctrl_psoc);
6906 	/* Setup HW REO */
6907 	qdf_mem_zero(&reo_params, sizeof(reo_params));
6908 
6909 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
6910 		/*
6911 		 * Reo ring remap is not required if both radios
6912 		 * are offloaded to NSS
6913 		 */
6914 
6915 		if (soc->arch_ops.reo_remap_config(soc, &reo_params.remap0,
6916 						   &reo_params.remap1,
6917 						   &reo_params.remap2))
6918 			reo_params.rx_hash_enabled = true;
6919 		else
6920 			reo_params.rx_hash_enabled = false;
6921 	}
6922 
6923 	/*
6924 	 * set the fragment destination ring
6925 	 */
6926 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
6927 
6928 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
6929 		reo_params.alt_dst_ind_0 = REO_REMAP_RELEASE;
6930 
6931 	reo_params.reo_qref = &soc->reo_qref;
6932 	hal_reo_setup(soc->hal_soc, &reo_params, 1);
6933 
6934 	hal_reo_set_err_dst_remap(soc->hal_soc);
6935 
6936 	soc->features.pn_in_reo_dest = hal_reo_enable_pn_in_dest(soc->hal_soc);
6937 
6938 	return QDF_STATUS_SUCCESS;
6939 }
6940 
6941 /**
6942  * dp_vdev_id_map_tbl_add() - Add vdev into vdev_id table
6943  * @soc: SoC handle
6944  * @vdev: vdev handle
6945  * @vdev_id: vdev_id
6946  *
6947  * Return: None
6948  */
6949 static void dp_vdev_id_map_tbl_add(struct dp_soc *soc,
6950 				   struct dp_vdev *vdev,
6951 				   uint8_t vdev_id)
6952 {
6953 	QDF_ASSERT(vdev_id <= MAX_VDEV_CNT);
6954 
6955 	qdf_spin_lock_bh(&soc->vdev_map_lock);
6956 
6957 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
6958 			QDF_STATUS_SUCCESS) {
6959 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK vdev_id %u",
6960 			     soc, vdev, vdev_id);
6961 		qdf_spin_unlock_bh(&soc->vdev_map_lock);
6962 		return;
6963 	}
6964 
6965 	if (!soc->vdev_id_map[vdev_id])
6966 		soc->vdev_id_map[vdev_id] = vdev;
6967 	else
6968 		QDF_ASSERT(0);
6969 
6970 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
6971 }
6972 
6973 /**
6974  * dp_vdev_id_map_tbl_remove() - remove vdev from vdev_id table
6975  * @soc: SoC handle
6976  * @vdev: vdev handle
6977  *
6978  * Return: None
6979  */
6980 static void dp_vdev_id_map_tbl_remove(struct dp_soc *soc,
6981 				      struct dp_vdev *vdev)
6982 {
6983 	qdf_spin_lock_bh(&soc->vdev_map_lock);
6984 	QDF_ASSERT(soc->vdev_id_map[vdev->vdev_id] == vdev);
6985 
6986 	soc->vdev_id_map[vdev->vdev_id] = NULL;
6987 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
6988 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
6989 }
6990 
6991 /**
6992  * dp_vdev_pdev_list_add() - add vdev into pdev's list
6993  * @soc: soc handle
6994  * @pdev: pdev handle
6995  * @vdev: vdev handle
6996  *
6997  * Return: none
6998  */
6999 static void dp_vdev_pdev_list_add(struct dp_soc *soc,
7000 				  struct dp_pdev *pdev,
7001 				  struct dp_vdev *vdev)
7002 {
7003 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
7004 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
7005 			QDF_STATUS_SUCCESS) {
7006 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK",
7007 			     soc, vdev);
7008 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
7009 		return;
7010 	}
7011 	/* add this vdev into the pdev's list */
7012 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
7013 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
7014 }
7015 
7016 /**
7017  * dp_vdev_pdev_list_remove() - remove vdev from pdev's list
7018  * @soc: SoC handle
7019  * @pdev: pdev handle
7020  * @vdev: VDEV handle
7021  *
7022  * Return: none
7023  */
7024 static void dp_vdev_pdev_list_remove(struct dp_soc *soc,
7025 				     struct dp_pdev *pdev,
7026 				     struct dp_vdev *vdev)
7027 {
7028 	uint8_t found = 0;
7029 	struct dp_vdev *tmpvdev = NULL;
7030 
7031 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
7032 	TAILQ_FOREACH(tmpvdev, &pdev->vdev_list, vdev_list_elem) {
7033 		if (tmpvdev == vdev) {
7034 			found = 1;
7035 			break;
7036 		}
7037 	}
7038 
7039 	if (found) {
7040 		TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
7041 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
7042 	} else {
7043 		dp_vdev_debug("%pK: vdev:%pK not found in pdev:%pK vdevlist:%pK",
7044 			      soc, vdev, pdev, &pdev->vdev_list);
7045 		QDF_ASSERT(0);
7046 	}
7047 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
7048 }
7049 
7050 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
7051 /**
7052  * dp_vdev_init_rx_eapol() - initializing osif_rx_eapol
7053  * @vdev: Datapath VDEV handle
7054  *
7055  * Return: None
7056  */
7057 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev)
7058 {
7059 	vdev->osif_rx_eapol = NULL;
7060 }
7061 
7062 /**
7063  * dp_vdev_register_rx_eapol() - Register VDEV operations for rx_eapol
7064  * @vdev: DP vdev handle
7065  * @txrx_ops: Tx and Rx operations
7066  *
7067  * Return: None
7068  */
7069 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev,
7070 					     struct ol_txrx_ops *txrx_ops)
7071 {
7072 	vdev->osif_rx_eapol = txrx_ops->rx.rx_eapol;
7073 }
7074 #else
7075 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev)
7076 {
7077 }
7078 
7079 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev,
7080 					     struct ol_txrx_ops *txrx_ops)
7081 {
7082 }
7083 #endif
7084 
7085 #ifdef WLAN_FEATURE_11BE_MLO
7086 static inline void dp_vdev_save_mld_addr(struct dp_vdev *vdev,
7087 					 struct cdp_vdev_info *vdev_info)
7088 {
7089 	if (vdev_info->mld_mac_addr)
7090 		qdf_mem_copy(&vdev->mld_mac_addr.raw[0],
7091 			     vdev_info->mld_mac_addr, QDF_MAC_ADDR_SIZE);
7092 }
7093 #else
7094 static inline void dp_vdev_save_mld_addr(struct dp_vdev *vdev,
7095 					 struct cdp_vdev_info *vdev_info)
7096 {
7097 
7098 }
7099 #endif
7100 
7101 #ifdef DP_TRAFFIC_END_INDICATION
7102 /**
7103  * dp_tx_vdev_traffic_end_indication_attach() - Initialize data end indication
7104  *                                              related members in VDEV
7105  * @vdev: DP vdev handle
7106  *
7107  * Return: None
7108  */
7109 static inline void
7110 dp_tx_vdev_traffic_end_indication_attach(struct dp_vdev *vdev)
7111 {
7112 	qdf_nbuf_queue_init(&vdev->end_ind_pkt_q);
7113 }
7114 
7115 /**
7116  * dp_tx_vdev_traffic_end_indication_detach() - De-init data end indication
7117  *                                              related members in VDEV
7118  * @vdev: DP vdev handle
7119  *
7120  * Return: None
7121  */
7122 static inline void
7123 dp_tx_vdev_traffic_end_indication_detach(struct dp_vdev *vdev)
7124 {
7125 	qdf_nbuf_t nbuf;
7126 
7127 	while ((nbuf = qdf_nbuf_queue_remove(&vdev->end_ind_pkt_q)) != NULL)
7128 		qdf_nbuf_free(nbuf);
7129 }
7130 #else
7131 static inline void
7132 dp_tx_vdev_traffic_end_indication_attach(struct dp_vdev *vdev)
7133 {}
7134 
7135 static inline void
7136 dp_tx_vdev_traffic_end_indication_detach(struct dp_vdev *vdev)
7137 {}
7138 #endif
7139 
7140 /**
7141  * dp_vdev_attach_wifi3() - attach txrx vdev
7142  * @cdp_soc: CDP SoC context
7143  * @pdev_id: PDEV ID for vdev creation
7144  * @vdev_info: parameters used for vdev creation
7145  *
7146  * Return: status
7147  */
7148 static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc,
7149 				       uint8_t pdev_id,
7150 				       struct cdp_vdev_info *vdev_info)
7151 {
7152 	int i = 0;
7153 	qdf_size_t vdev_context_size;
7154 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
7155 	struct dp_pdev *pdev =
7156 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7157 						   pdev_id);
7158 	struct dp_vdev *vdev;
7159 	uint8_t *vdev_mac_addr = vdev_info->vdev_mac_addr;
7160 	uint8_t vdev_id = vdev_info->vdev_id;
7161 	enum wlan_op_mode op_mode = vdev_info->op_mode;
7162 	enum wlan_op_subtype subtype = vdev_info->subtype;
7163 	uint8_t vdev_stats_id = vdev_info->vdev_stats_id;
7164 
7165 	vdev_context_size =
7166 		soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_VDEV);
7167 	vdev = qdf_mem_malloc(vdev_context_size);
7168 
7169 	if (!pdev) {
7170 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
7171 			    cdp_soc, pdev_id);
7172 		qdf_mem_free(vdev);
7173 		goto fail0;
7174 	}
7175 
7176 	if (!vdev) {
7177 		dp_init_err("%pK: DP VDEV memory allocation failed",
7178 			    cdp_soc);
7179 		goto fail0;
7180 	}
7181 
7182 	wlan_minidump_log(vdev, sizeof(*vdev), soc->ctrl_psoc,
7183 			  WLAN_MD_DP_VDEV, "dp_vdev");
7184 
7185 	vdev->pdev = pdev;
7186 	vdev->vdev_id = vdev_id;
7187 	vdev->vdev_stats_id = vdev_stats_id;
7188 	vdev->opmode = op_mode;
7189 	vdev->subtype = subtype;
7190 	vdev->osdev = soc->osdev;
7191 
7192 	vdev->osif_rx = NULL;
7193 	vdev->osif_rsim_rx_decap = NULL;
7194 	vdev->osif_get_key = NULL;
7195 	vdev->osif_tx_free_ext = NULL;
7196 	vdev->osif_vdev = NULL;
7197 
7198 	vdev->delete.pending = 0;
7199 	vdev->safemode = 0;
7200 	vdev->drop_unenc = 1;
7201 	vdev->sec_type = cdp_sec_type_none;
7202 	vdev->multipass_en = false;
7203 	vdev->wrap_vdev = false;
7204 	dp_vdev_init_rx_eapol(vdev);
7205 	qdf_atomic_init(&vdev->ref_cnt);
7206 	for (i = 0; i < DP_MOD_ID_MAX; i++)
7207 		qdf_atomic_init(&vdev->mod_refs[i]);
7208 
7209 	/* Take one reference for create*/
7210 	qdf_atomic_inc(&vdev->ref_cnt);
7211 	qdf_atomic_inc(&vdev->mod_refs[DP_MOD_ID_CONFIG]);
7212 	vdev->num_peers = 0;
7213 #ifdef notyet
7214 	vdev->filters_num = 0;
7215 #endif
7216 	vdev->lmac_id = pdev->lmac_id;
7217 
7218 	qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
7219 
7220 	dp_vdev_save_mld_addr(vdev, vdev_info);
7221 
7222 	/* TODO: Initialize default HTT meta data that will be used in
7223 	 * TCL descriptors for packets transmitted from this VDEV
7224 	 */
7225 
7226 	qdf_spinlock_create(&vdev->peer_list_lock);
7227 	TAILQ_INIT(&vdev->peer_list);
7228 	dp_peer_multipass_list_init(vdev);
7229 	if ((soc->intr_mode == DP_INTR_POLL) &&
7230 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
7231 		if ((pdev->vdev_count == 0) ||
7232 		    (wlan_op_mode_monitor == vdev->opmode))
7233 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7234 	} else if (dp_soc_get_con_mode(soc) == QDF_GLOBAL_MISSION_MODE &&
7235 		   soc->intr_mode == DP_INTR_MSI &&
7236 		   wlan_op_mode_monitor == vdev->opmode) {
7237 		/* Timer to reap status ring in mission mode */
7238 		dp_monitor_vdev_timer_start(soc);
7239 	}
7240 
7241 	dp_vdev_id_map_tbl_add(soc, vdev, vdev_id);
7242 
7243 	if (wlan_op_mode_monitor == vdev->opmode) {
7244 		if (dp_monitor_vdev_attach(vdev) == QDF_STATUS_SUCCESS) {
7245 			dp_monitor_pdev_set_mon_vdev(vdev);
7246 			return dp_monitor_vdev_set_monitor_mode_buf_rings(pdev);
7247 		}
7248 		return QDF_STATUS_E_FAILURE;
7249 	}
7250 
7251 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
7252 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
7253 	vdev->dscp_tid_map_id = 0;
7254 	vdev->mcast_enhancement_en = 0;
7255 	vdev->igmp_mcast_enhanc_en = 0;
7256 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
7257 	vdev->prev_tx_enq_tstamp = 0;
7258 	vdev->prev_rx_deliver_tstamp = 0;
7259 	vdev->skip_sw_tid_classification = DP_TX_HW_DSCP_TID_MAP_VALID;
7260 	dp_tx_vdev_traffic_end_indication_attach(vdev);
7261 
7262 	dp_vdev_pdev_list_add(soc, pdev, vdev);
7263 	pdev->vdev_count++;
7264 
7265 	if (wlan_op_mode_sta != vdev->opmode &&
7266 	    wlan_op_mode_ndi != vdev->opmode)
7267 		vdev->ap_bridge_enabled = true;
7268 	else
7269 		vdev->ap_bridge_enabled = false;
7270 	dp_init_info("%pK: wlan_cfg_ap_bridge_enabled %d",
7271 		     cdp_soc, vdev->ap_bridge_enabled);
7272 
7273 	dp_tx_vdev_attach(vdev);
7274 
7275 	dp_monitor_vdev_attach(vdev);
7276 	if (!pdev->is_lro_hash_configured) {
7277 		if (QDF_IS_STATUS_SUCCESS(dp_lro_hash_setup(soc, pdev)))
7278 			pdev->is_lro_hash_configured = true;
7279 		else
7280 			dp_err("LRO hash setup failure!");
7281 	}
7282 
7283 	dp_cfg_event_record_vdev_evt(soc, DP_CFG_EVENT_VDEV_ATTACH, vdev);
7284 	dp_info("Created vdev %pK ("QDF_MAC_ADDR_FMT") vdev_id %d", vdev,
7285 		QDF_MAC_ADDR_REF(vdev->mac_addr.raw), vdev->vdev_id);
7286 	DP_STATS_INIT(vdev);
7287 
7288 	if (QDF_IS_STATUS_ERROR(soc->arch_ops.txrx_vdev_attach(soc, vdev)))
7289 		goto fail0;
7290 
7291 	if (wlan_op_mode_sta == vdev->opmode)
7292 		dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id,
7293 				     vdev->mac_addr.raw, CDP_LINK_PEER_TYPE);
7294 
7295 	dp_pdev_update_fast_rx_flag(soc, pdev);
7296 
7297 	return QDF_STATUS_SUCCESS;
7298 
7299 fail0:
7300 	return QDF_STATUS_E_FAILURE;
7301 }
7302 
7303 #ifndef QCA_HOST_MODE_WIFI_DISABLED
7304 /**
7305  * dp_vdev_fetch_tx_handler() - Fetch Tx handlers
7306  * @vdev: struct dp_vdev *
7307  * @soc: struct dp_soc *
7308  * @ctx: struct ol_txrx_hardtart_ctxt *
7309  */
7310 static inline void dp_vdev_fetch_tx_handler(struct dp_vdev *vdev,
7311 					    struct dp_soc *soc,
7312 					    struct ol_txrx_hardtart_ctxt *ctx)
7313 {
7314 	/* Enable vdev_id check only for ap, if flag is enabled */
7315 	if (vdev->mesh_vdev)
7316 		ctx->tx = dp_tx_send_mesh;
7317 	else if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
7318 		 (vdev->opmode == wlan_op_mode_ap)) {
7319 		ctx->tx = dp_tx_send_vdev_id_check;
7320 		ctx->tx_fast = dp_tx_send_vdev_id_check;
7321 	} else {
7322 		ctx->tx = dp_tx_send;
7323 		ctx->tx_fast = soc->arch_ops.dp_tx_send_fast;
7324 	}
7325 
7326 	/* Avoid check in regular exception Path */
7327 	if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
7328 	    (vdev->opmode == wlan_op_mode_ap))
7329 		ctx->tx_exception = dp_tx_send_exception_vdev_id_check;
7330 	else
7331 		ctx->tx_exception = dp_tx_send_exception;
7332 }
7333 
7334 /**
7335  * dp_vdev_register_tx_handler() - Register Tx handler
7336  * @vdev: struct dp_vdev *
7337  * @soc: struct dp_soc *
7338  * @txrx_ops: struct ol_txrx_ops *
7339  */
7340 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
7341 					       struct dp_soc *soc,
7342 					       struct ol_txrx_ops *txrx_ops)
7343 {
7344 	struct ol_txrx_hardtart_ctxt ctx = {0};
7345 
7346 	dp_vdev_fetch_tx_handler(vdev, soc, &ctx);
7347 
7348 	txrx_ops->tx.tx = ctx.tx;
7349 	txrx_ops->tx.tx_fast = ctx.tx_fast;
7350 	txrx_ops->tx.tx_exception = ctx.tx_exception;
7351 
7352 	dp_info("Configure tx_vdev_id_chk_handler Feature Flag: %d and mode:%d for vdev_id:%d",
7353 		wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx),
7354 		vdev->opmode, vdev->vdev_id);
7355 }
7356 #else /* QCA_HOST_MODE_WIFI_DISABLED */
7357 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
7358 					       struct dp_soc *soc,
7359 					       struct ol_txrx_ops *txrx_ops)
7360 {
7361 }
7362 
7363 static inline void dp_vdev_fetch_tx_handler(struct dp_vdev *vdev,
7364 					    struct dp_soc *soc,
7365 					    struct ol_txrx_hardtart_ctxt *ctx)
7366 {
7367 }
7368 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
7369 
7370 /**
7371  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
7372  * @soc_hdl: Datapath soc handle
7373  * @vdev_id: id of Datapath VDEV handle
7374  * @osif_vdev: OSIF vdev handle
7375  * @txrx_ops: Tx and Rx operations
7376  *
7377  * Return: DP VDEV handle on success, NULL on failure
7378  */
7379 static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc_hdl,
7380 					 uint8_t vdev_id,
7381 					 ol_osif_vdev_handle osif_vdev,
7382 					 struct ol_txrx_ops *txrx_ops)
7383 {
7384 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7385 	struct dp_vdev *vdev =	dp_vdev_get_ref_by_id(soc, vdev_id,
7386 						      DP_MOD_ID_CDP);
7387 
7388 	if (!vdev)
7389 		return QDF_STATUS_E_FAILURE;
7390 
7391 	vdev->osif_vdev = osif_vdev;
7392 	vdev->osif_rx = txrx_ops->rx.rx;
7393 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
7394 	vdev->osif_rx_flush = txrx_ops->rx.rx_flush;
7395 	vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush;
7396 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
7397 	vdev->osif_fisa_rx = txrx_ops->rx.osif_fisa_rx;
7398 	vdev->osif_fisa_flush = txrx_ops->rx.osif_fisa_flush;
7399 	vdev->osif_get_key = txrx_ops->get_key;
7400 	dp_monitor_vdev_register_osif(vdev, txrx_ops);
7401 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
7402 	vdev->tx_comp = txrx_ops->tx.tx_comp;
7403 	vdev->stats_cb = txrx_ops->rx.stats_rx;
7404 	vdev->tx_classify_critical_pkt_cb =
7405 		txrx_ops->tx.tx_classify_critical_pkt_cb;
7406 #ifdef notyet
7407 #if ATH_SUPPORT_WAPI
7408 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
7409 #endif
7410 #endif
7411 #ifdef UMAC_SUPPORT_PROXY_ARP
7412 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
7413 #endif
7414 	vdev->me_convert = txrx_ops->me_convert;
7415 	vdev->get_tsf_time = txrx_ops->get_tsf_time;
7416 
7417 	dp_vdev_register_rx_eapol(vdev, txrx_ops);
7418 
7419 	dp_vdev_register_tx_handler(vdev, soc, txrx_ops);
7420 
7421 	dp_init_info("%pK: DP Vdev Register success", soc);
7422 
7423 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7424 	return QDF_STATUS_SUCCESS;
7425 }
7426 
7427 #ifdef WLAN_FEATURE_11BE_MLO
7428 void dp_peer_delete(struct dp_soc *soc,
7429 		    struct dp_peer *peer,
7430 		    void *arg)
7431 {
7432 	if (!peer->valid)
7433 		return;
7434 
7435 	dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
7436 			     peer->vdev->vdev_id,
7437 			     peer->mac_addr.raw, 0,
7438 			     peer->peer_type);
7439 }
7440 #else
7441 void dp_peer_delete(struct dp_soc *soc,
7442 		    struct dp_peer *peer,
7443 		    void *arg)
7444 {
7445 	if (!peer->valid)
7446 		return;
7447 
7448 	dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
7449 			     peer->vdev->vdev_id,
7450 			     peer->mac_addr.raw, 0,
7451 			     CDP_LINK_PEER_TYPE);
7452 }
7453 #endif
7454 
7455 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
7456 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg)
7457 {
7458 	if (!peer->valid)
7459 		return;
7460 
7461 	if (IS_MLO_DP_LINK_PEER(peer))
7462 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
7463 				     peer->vdev->vdev_id,
7464 				     peer->mac_addr.raw, 0,
7465 				     CDP_LINK_PEER_TYPE);
7466 }
7467 #else
7468 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg)
7469 {
7470 }
7471 #endif
7472 /**
7473  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
7474  * @vdev_handle: Datapath VDEV handle
7475  * @unmap_only: Flag to indicate "only unmap"
7476  * @mlo_peers_only: true if only MLO peers should be flushed
7477  *
7478  * Return: void
7479  */
7480 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
7481 				bool unmap_only,
7482 				bool mlo_peers_only)
7483 {
7484 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7485 	struct dp_pdev *pdev = vdev->pdev;
7486 	struct dp_soc *soc = pdev->soc;
7487 	struct dp_peer *peer;
7488 	uint32_t i = 0;
7489 
7490 
7491 	if (!unmap_only) {
7492 		if (!mlo_peers_only)
7493 			dp_vdev_iterate_peer_lock_safe(vdev,
7494 						       dp_peer_delete,
7495 						       NULL,
7496 						       DP_MOD_ID_CDP);
7497 		else
7498 			dp_vdev_iterate_peer_lock_safe(vdev,
7499 						       dp_mlo_peer_delete,
7500 						       NULL,
7501 						       DP_MOD_ID_CDP);
7502 	}
7503 
7504 	for (i = 0; i < soc->max_peer_id ; i++) {
7505 		peer = __dp_peer_get_ref_by_id(soc, i, DP_MOD_ID_CDP);
7506 
7507 		if (!peer)
7508 			continue;
7509 
7510 		if (peer->vdev != vdev) {
7511 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7512 			continue;
7513 		}
7514 
7515 		if (!mlo_peers_only) {
7516 			dp_info("peer: " QDF_MAC_ADDR_FMT " is getting unmap",
7517 				QDF_MAC_ADDR_REF(peer->mac_addr.raw));
7518 			dp_rx_peer_unmap_handler(soc, i,
7519 						 vdev->vdev_id,
7520 						 peer->mac_addr.raw, 0,
7521 						 DP_PEER_WDS_COUNT_INVALID);
7522 			SET_PEER_REF_CNT_ONE(peer);
7523 		} else if (IS_MLO_DP_LINK_PEER(peer) ||
7524 			   IS_MLO_DP_MLD_PEER(peer)) {
7525 			dp_info("peer: " QDF_MAC_ADDR_FMT " is getting unmap",
7526 				QDF_MAC_ADDR_REF(peer->mac_addr.raw));
7527 			dp_rx_peer_unmap_handler(soc, i,
7528 						 vdev->vdev_id,
7529 						 peer->mac_addr.raw, 0,
7530 						 DP_PEER_WDS_COUNT_INVALID);
7531 			SET_PEER_REF_CNT_ONE(peer);
7532 		}
7533 
7534 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7535 	}
7536 }
7537 
7538 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
7539 /**
7540  * dp_txrx_alloc_vdev_stats_id()- Allocate vdev_stats_id
7541  * @soc_hdl: Datapath soc handle
7542  * @vdev_stats_id: Address of vdev_stats_id
7543  *
7544  * Return: QDF_STATUS
7545  */
7546 static QDF_STATUS dp_txrx_alloc_vdev_stats_id(struct cdp_soc_t *soc_hdl,
7547 					      uint8_t *vdev_stats_id)
7548 {
7549 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7550 	uint8_t id = 0;
7551 
7552 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
7553 		*vdev_stats_id = CDP_INVALID_VDEV_STATS_ID;
7554 		return QDF_STATUS_E_FAILURE;
7555 	}
7556 
7557 	while (id < CDP_MAX_VDEV_STATS_ID) {
7558 		if (!qdf_atomic_test_and_set_bit(id, &soc->vdev_stats_id_map)) {
7559 			*vdev_stats_id = id;
7560 			return QDF_STATUS_SUCCESS;
7561 		}
7562 		id++;
7563 	}
7564 
7565 	*vdev_stats_id = CDP_INVALID_VDEV_STATS_ID;
7566 	return QDF_STATUS_E_FAILURE;
7567 }
7568 
7569 /**
7570  * dp_txrx_reset_vdev_stats_id() - Reset vdev_stats_id in dp_soc
7571  * @soc_hdl: Datapath soc handle
7572  * @vdev_stats_id: vdev_stats_id to reset in dp_soc
7573  *
7574  * Return: none
7575  */
7576 static void dp_txrx_reset_vdev_stats_id(struct cdp_soc_t *soc_hdl,
7577 					uint8_t vdev_stats_id)
7578 {
7579 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7580 
7581 	if ((!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) ||
7582 	    (vdev_stats_id >= CDP_MAX_VDEV_STATS_ID))
7583 		return;
7584 
7585 	qdf_atomic_clear_bit(vdev_stats_id, &soc->vdev_stats_id_map);
7586 }
7587 #else
7588 static void dp_txrx_reset_vdev_stats_id(struct cdp_soc_t *soc,
7589 					uint8_t vdev_stats_id)
7590 {}
7591 #endif
7592 /**
7593  * dp_vdev_detach_wifi3() - Detach txrx vdev
7594  * @cdp_soc: Datapath soc handle
7595  * @vdev_id: VDEV Id
7596  * @callback: Callback OL_IF on completion of detach
7597  * @cb_context:	Callback context
7598  *
7599  */
7600 static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc,
7601 				       uint8_t vdev_id,
7602 				       ol_txrx_vdev_delete_cb callback,
7603 				       void *cb_context)
7604 {
7605 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
7606 	struct dp_pdev *pdev;
7607 	struct dp_neighbour_peer *peer = NULL;
7608 	struct dp_peer *vap_self_peer = NULL;
7609 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7610 						     DP_MOD_ID_CDP);
7611 
7612 	if (!vdev)
7613 		return QDF_STATUS_E_FAILURE;
7614 
7615 	soc->arch_ops.txrx_vdev_detach(soc, vdev);
7616 
7617 	pdev = vdev->pdev;
7618 
7619 	vap_self_peer = dp_sta_vdev_self_peer_ref_n_get(soc, vdev,
7620 							DP_MOD_ID_CONFIG);
7621 	if (vap_self_peer) {
7622 		qdf_spin_lock_bh(&soc->ast_lock);
7623 		if (vap_self_peer->self_ast_entry) {
7624 			dp_peer_del_ast(soc, vap_self_peer->self_ast_entry);
7625 			vap_self_peer->self_ast_entry = NULL;
7626 		}
7627 		qdf_spin_unlock_bh(&soc->ast_lock);
7628 
7629 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc, vdev->vdev_id,
7630 				     vap_self_peer->mac_addr.raw, 0,
7631 				     CDP_LINK_PEER_TYPE);
7632 		dp_peer_unref_delete(vap_self_peer, DP_MOD_ID_CONFIG);
7633 	}
7634 
7635 	/*
7636 	 * If Target is hung, flush all peers before detaching vdev
7637 	 * this will free all references held due to missing
7638 	 * unmap commands from Target
7639 	 */
7640 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
7641 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, false, false);
7642 	else if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
7643 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, true, false);
7644 
7645 	/* indicate that the vdev needs to be deleted */
7646 	vdev->delete.pending = 1;
7647 	dp_rx_vdev_detach(vdev);
7648 	/*
7649 	 * move it after dp_rx_vdev_detach(),
7650 	 * as the call back done in dp_rx_vdev_detach()
7651 	 * still need to get vdev pointer by vdev_id.
7652 	 */
7653 	dp_vdev_id_map_tbl_remove(soc, vdev);
7654 
7655 	dp_monitor_neighbour_peer_list_remove(pdev, vdev, peer);
7656 
7657 	dp_txrx_reset_vdev_stats_id(cdp_soc, vdev->vdev_stats_id);
7658 
7659 	dp_tx_vdev_multipass_deinit(vdev);
7660 	dp_tx_vdev_traffic_end_indication_detach(vdev);
7661 
7662 	if (vdev->vdev_dp_ext_handle) {
7663 		qdf_mem_free(vdev->vdev_dp_ext_handle);
7664 		vdev->vdev_dp_ext_handle = NULL;
7665 	}
7666 	vdev->delete.callback = callback;
7667 	vdev->delete.context = cb_context;
7668 
7669 	if (vdev->opmode != wlan_op_mode_monitor)
7670 		dp_vdev_pdev_list_remove(soc, pdev, vdev);
7671 
7672 	pdev->vdev_count--;
7673 	/* release reference taken above for find */
7674 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7675 
7676 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
7677 	TAILQ_INSERT_TAIL(&soc->inactive_vdev_list, vdev, inactive_list_elem);
7678 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
7679 
7680 	dp_cfg_event_record_vdev_evt(soc, DP_CFG_EVENT_VDEV_DETACH, vdev);
7681 	dp_info("detach vdev %pK id %d pending refs %d",
7682 		vdev, vdev->vdev_id, qdf_atomic_read(&vdev->ref_cnt));
7683 
7684 	/* release reference taken at dp_vdev_create */
7685 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
7686 
7687 	return QDF_STATUS_SUCCESS;
7688 }
7689 
7690 #ifdef WLAN_FEATURE_11BE_MLO
7691 /**
7692  * is_dp_peer_can_reuse() - check if the dp_peer match condition to be reused
7693  * @vdev: Target DP vdev handle
7694  * @peer: DP peer handle to be checked
7695  * @peer_mac_addr: Target peer mac address
7696  * @peer_type: Target peer type
7697  *
7698  * Return: true - if match, false - not match
7699  */
7700 static inline
7701 bool is_dp_peer_can_reuse(struct dp_vdev *vdev,
7702 			  struct dp_peer *peer,
7703 			  uint8_t *peer_mac_addr,
7704 			  enum cdp_peer_type peer_type)
7705 {
7706 	if (peer->bss_peer && (peer->vdev == vdev) &&
7707 	    (peer->peer_type == peer_type) &&
7708 	    (qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw,
7709 			 QDF_MAC_ADDR_SIZE) == 0))
7710 		return true;
7711 
7712 	return false;
7713 }
7714 #else
7715 static inline
7716 bool is_dp_peer_can_reuse(struct dp_vdev *vdev,
7717 			  struct dp_peer *peer,
7718 			  uint8_t *peer_mac_addr,
7719 			  enum cdp_peer_type peer_type)
7720 {
7721 	if (peer->bss_peer && (peer->vdev == vdev) &&
7722 	    (qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw,
7723 			 QDF_MAC_ADDR_SIZE) == 0))
7724 		return true;
7725 
7726 	return false;
7727 }
7728 #endif
7729 
7730 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
7731 						uint8_t *peer_mac_addr,
7732 						enum cdp_peer_type peer_type)
7733 {
7734 	struct dp_peer *peer;
7735 	struct dp_soc *soc = vdev->pdev->soc;
7736 
7737 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
7738 	TAILQ_FOREACH(peer, &soc->inactive_peer_list,
7739 		      inactive_list_elem) {
7740 
7741 		/* reuse bss peer only when vdev matches*/
7742 		if (is_dp_peer_can_reuse(vdev, peer,
7743 					 peer_mac_addr, peer_type)) {
7744 			/* increment ref count for cdp_peer_create*/
7745 			if (dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG) ==
7746 						QDF_STATUS_SUCCESS) {
7747 				TAILQ_REMOVE(&soc->inactive_peer_list, peer,
7748 					     inactive_list_elem);
7749 				qdf_spin_unlock_bh
7750 					(&soc->inactive_peer_list_lock);
7751 				return peer;
7752 			}
7753 		}
7754 	}
7755 
7756 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
7757 	return NULL;
7758 }
7759 
7760 #ifdef FEATURE_AST
7761 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
7762 					       struct dp_pdev *pdev,
7763 					       uint8_t *peer_mac_addr)
7764 {
7765 	struct dp_ast_entry *ast_entry;
7766 
7767 	if (soc->ast_offload_support)
7768 		return;
7769 
7770 	qdf_spin_lock_bh(&soc->ast_lock);
7771 	if (soc->ast_override_support)
7772 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
7773 							    pdev->pdev_id);
7774 	else
7775 		ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
7776 
7777 	if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress)
7778 		dp_peer_del_ast(soc, ast_entry);
7779 
7780 	qdf_spin_unlock_bh(&soc->ast_lock);
7781 }
7782 #else
7783 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
7784 					       struct dp_pdev *pdev,
7785 					       uint8_t *peer_mac_addr)
7786 {
7787 }
7788 #endif
7789 
7790 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
7791 /**
7792  * dp_peer_hw_txrx_stats_init() - Initialize hw_txrx_stats_en in dp_peer
7793  * @soc: Datapath soc handle
7794  * @txrx_peer: Datapath peer handle
7795  *
7796  * Return: none
7797  */
7798 static inline
7799 void dp_peer_hw_txrx_stats_init(struct dp_soc *soc,
7800 				struct dp_txrx_peer *txrx_peer)
7801 {
7802 	txrx_peer->hw_txrx_stats_en =
7803 		wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx);
7804 }
7805 #else
7806 static inline
7807 void dp_peer_hw_txrx_stats_init(struct dp_soc *soc,
7808 				struct dp_txrx_peer *txrx_peer)
7809 {
7810 	txrx_peer->hw_txrx_stats_en = 0;
7811 }
7812 #endif
7813 
7814 static QDF_STATUS dp_txrx_peer_detach(struct dp_soc *soc, struct dp_peer *peer)
7815 {
7816 	struct dp_txrx_peer *txrx_peer;
7817 	struct dp_pdev *pdev;
7818 	struct cdp_txrx_peer_params_update params = {0};
7819 
7820 	/* dp_txrx_peer exists for mld peer and legacy peer */
7821 	if (peer->txrx_peer) {
7822 		txrx_peer = peer->txrx_peer;
7823 		peer->txrx_peer = NULL;
7824 		pdev = txrx_peer->vdev->pdev;
7825 
7826 		params.osif_vdev = (void *)peer->vdev->osif_vdev;
7827 		params.peer_mac = peer->mac_addr.raw;
7828 
7829 		dp_wdi_event_handler(WDI_EVENT_PEER_DELETE, soc,
7830 				     (void *)&params, peer->peer_id,
7831 				     WDI_NO_VAL, pdev->pdev_id);
7832 
7833 		dp_peer_defrag_rx_tids_deinit(txrx_peer);
7834 		/*
7835 		 * Deallocate the extended stats contenxt
7836 		 */
7837 		dp_peer_delay_stats_ctx_dealloc(soc, txrx_peer);
7838 		dp_peer_rx_bufq_resources_deinit(txrx_peer);
7839 		dp_peer_jitter_stats_ctx_dealloc(pdev, txrx_peer);
7840 		dp_peer_sawf_stats_ctx_free(soc, txrx_peer);
7841 
7842 		qdf_mem_free(txrx_peer);
7843 	}
7844 
7845 	return QDF_STATUS_SUCCESS;
7846 }
7847 
7848 static QDF_STATUS dp_txrx_peer_attach(struct dp_soc *soc, struct dp_peer *peer)
7849 {
7850 	struct dp_txrx_peer *txrx_peer;
7851 	struct dp_pdev *pdev;
7852 	struct cdp_txrx_peer_params_update params = {0};
7853 
7854 	txrx_peer = (struct dp_txrx_peer *)qdf_mem_malloc(sizeof(*txrx_peer));
7855 
7856 	if (!txrx_peer)
7857 		return QDF_STATUS_E_NOMEM; /* failure */
7858 
7859 	txrx_peer->peer_id = HTT_INVALID_PEER;
7860 	/* initialize the peer_id */
7861 	txrx_peer->vdev = peer->vdev;
7862 	pdev = peer->vdev->pdev;
7863 
7864 	DP_STATS_INIT(txrx_peer);
7865 
7866 	dp_wds_ext_peer_init(txrx_peer);
7867 	dp_peer_rx_bufq_resources_init(txrx_peer);
7868 	dp_peer_hw_txrx_stats_init(soc, txrx_peer);
7869 	/*
7870 	 * Allocate peer extended stats context. Fall through in
7871 	 * case of failure as its not an implicit requirement to have
7872 	 * this object for regular statistics updates.
7873 	 */
7874 	if (dp_peer_delay_stats_ctx_alloc(soc, txrx_peer) !=
7875 					  QDF_STATUS_SUCCESS)
7876 		dp_warn("peer delay_stats ctx alloc failed");
7877 
7878 	/*
7879 	 * Alloctate memory for jitter stats. Fall through in
7880 	 * case of failure as its not an implicit requirement to have
7881 	 * this object for regular statistics updates.
7882 	 */
7883 	if (dp_peer_jitter_stats_ctx_alloc(pdev, txrx_peer) !=
7884 					   QDF_STATUS_SUCCESS)
7885 		dp_warn("peer jitter_stats ctx alloc failed");
7886 
7887 	dp_set_peer_isolation(txrx_peer, false);
7888 
7889 	dp_peer_defrag_rx_tids_init(txrx_peer);
7890 
7891 	if (dp_peer_sawf_stats_ctx_alloc(soc, txrx_peer) != QDF_STATUS_SUCCESS)
7892 		dp_warn("peer sawf stats alloc failed");
7893 
7894 	dp_txrx_peer_attach_add(soc, peer, txrx_peer);
7895 
7896 	params.peer_mac = peer->mac_addr.raw;
7897 	params.osif_vdev = (void *)peer->vdev->osif_vdev;
7898 	params.chip_id = dp_mlo_get_chip_id(soc);
7899 	params.pdev_id = peer->vdev->pdev->pdev_id;
7900 
7901 	dp_wdi_event_handler(WDI_EVENT_TXRX_PEER_CREATE, soc,
7902 			     (void *)&params, peer->peer_id,
7903 			     WDI_NO_VAL, params.pdev_id);
7904 
7905 	return QDF_STATUS_SUCCESS;
7906 }
7907 
7908 static inline
7909 void dp_txrx_peer_stats_clr(struct dp_txrx_peer *txrx_peer)
7910 {
7911 	if (!txrx_peer)
7912 		return;
7913 
7914 	txrx_peer->tx_failed = 0;
7915 	txrx_peer->comp_pkt.num = 0;
7916 	txrx_peer->comp_pkt.bytes = 0;
7917 	txrx_peer->to_stack.num = 0;
7918 	txrx_peer->to_stack.bytes = 0;
7919 
7920 	DP_STATS_CLR(txrx_peer);
7921 	dp_peer_delay_stats_ctx_clr(txrx_peer);
7922 	dp_peer_jitter_stats_ctx_clr(txrx_peer);
7923 }
7924 
7925 /**
7926  * dp_peer_create_wifi3() - attach txrx peer
7927  * @soc_hdl: Datapath soc handle
7928  * @vdev_id: id of vdev
7929  * @peer_mac_addr: Peer MAC address
7930  * @peer_type: link or MLD peer type
7931  *
7932  * Return: 0 on success, -1 on failure
7933  */
7934 static QDF_STATUS
7935 dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
7936 		     uint8_t *peer_mac_addr, enum cdp_peer_type peer_type)
7937 {
7938 	struct dp_peer *peer;
7939 	int i;
7940 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7941 	struct dp_pdev *pdev;
7942 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
7943 	struct dp_vdev *vdev = NULL;
7944 
7945 	if (!peer_mac_addr)
7946 		return QDF_STATUS_E_FAILURE;
7947 
7948 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
7949 
7950 	if (!vdev)
7951 		return QDF_STATUS_E_FAILURE;
7952 
7953 	pdev = vdev->pdev;
7954 	soc = pdev->soc;
7955 
7956 	/*
7957 	 * If a peer entry with given MAC address already exists,
7958 	 * reuse the peer and reset the state of peer.
7959 	 */
7960 	peer = dp_peer_can_reuse(vdev, peer_mac_addr, peer_type);
7961 
7962 	if (peer) {
7963 		qdf_atomic_init(&peer->is_default_route_set);
7964 		dp_peer_cleanup(vdev, peer);
7965 
7966 		dp_peer_vdev_list_add(soc, vdev, peer);
7967 		dp_peer_find_hash_add(soc, peer);
7968 
7969 		if (dp_peer_rx_tids_create(peer) != QDF_STATUS_SUCCESS) {
7970 			dp_alert("RX tid alloc fail for peer %pK (" QDF_MAC_ADDR_FMT ")",
7971 				 peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
7972 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7973 			return QDF_STATUS_E_FAILURE;
7974 		}
7975 
7976 		if (IS_MLO_DP_MLD_PEER(peer))
7977 			dp_mld_peer_init_link_peers_info(peer);
7978 
7979 		qdf_spin_lock_bh(&soc->ast_lock);
7980 		dp_peer_delete_ast_entries(soc, peer);
7981 		qdf_spin_unlock_bh(&soc->ast_lock);
7982 
7983 		if ((vdev->opmode == wlan_op_mode_sta) &&
7984 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
7985 		     QDF_MAC_ADDR_SIZE)) {
7986 			ast_type = CDP_TXRX_AST_TYPE_SELF;
7987 		}
7988 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
7989 
7990 		peer->valid = 1;
7991 		peer->is_tdls_peer = false;
7992 		dp_local_peer_id_alloc(pdev, peer);
7993 
7994 		qdf_spinlock_create(&peer->peer_info_lock);
7995 
7996 		DP_STATS_INIT(peer);
7997 
7998 		/*
7999 		 * In tx_monitor mode, filter may be set for unassociated peer
8000 		 * when unassociated peer get associated peer need to
8001 		 * update tx_cap_enabled flag to support peer filter.
8002 		 */
8003 		if (!IS_MLO_DP_MLD_PEER(peer)) {
8004 			dp_monitor_peer_tx_capture_filter_check(pdev, peer);
8005 			dp_monitor_peer_reset_stats(soc, peer);
8006 		}
8007 
8008 		if (peer->txrx_peer) {
8009 			dp_peer_rx_bufq_resources_init(peer->txrx_peer);
8010 			dp_txrx_peer_stats_clr(peer->txrx_peer);
8011 			dp_set_peer_isolation(peer->txrx_peer, false);
8012 			dp_wds_ext_peer_init(peer->txrx_peer);
8013 			dp_peer_hw_txrx_stats_init(soc, peer->txrx_peer);
8014 		}
8015 
8016 		dp_cfg_event_record_peer_evt(soc, DP_CFG_EVENT_PEER_CREATE,
8017 					     peer, vdev, 1);
8018 		dp_info("vdev %pK Reused peer %pK ("QDF_MAC_ADDR_FMT
8019 			") vdev_ref_cnt "
8020 			"%d peer_ref_cnt: %d",
8021 			vdev, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
8022 			qdf_atomic_read(&vdev->ref_cnt),
8023 			qdf_atomic_read(&peer->ref_cnt));
8024 			dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
8025 
8026 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8027 		return QDF_STATUS_SUCCESS;
8028 	} else {
8029 		/*
8030 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
8031 		 * need to remove the AST entry which was earlier added as a WDS
8032 		 * entry.
8033 		 * If an AST entry exists, but no peer entry exists with a given
8034 		 * MAC addresses, we could deduce it as a WDS entry
8035 		 */
8036 		dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
8037 	}
8038 
8039 #ifdef notyet
8040 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
8041 		soc->mempool_ol_ath_peer);
8042 #else
8043 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
8044 #endif
8045 	wlan_minidump_log(peer,
8046 			  sizeof(*peer),
8047 			  soc->ctrl_psoc,
8048 			  WLAN_MD_DP_PEER, "dp_peer");
8049 	if (!peer) {
8050 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8051 		return QDF_STATUS_E_FAILURE; /* failure */
8052 	}
8053 
8054 	qdf_mem_zero(peer, sizeof(struct dp_peer));
8055 
8056 	/* store provided params */
8057 	peer->vdev = vdev;
8058 
8059 	/* initialize the peer_id */
8060 	peer->peer_id = HTT_INVALID_PEER;
8061 
8062 	qdf_mem_copy(
8063 		&peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
8064 
8065 	DP_PEER_SET_TYPE(peer, peer_type);
8066 	if (IS_MLO_DP_MLD_PEER(peer)) {
8067 		if (dp_txrx_peer_attach(soc, peer) !=
8068 				QDF_STATUS_SUCCESS)
8069 			goto fail; /* failure */
8070 
8071 		dp_mld_peer_init_link_peers_info(peer);
8072 	} else if (dp_monitor_peer_attach(soc, peer) !=
8073 				QDF_STATUS_SUCCESS)
8074 		dp_warn("peer monitor ctx alloc failed");
8075 
8076 	TAILQ_INIT(&peer->ast_entry_list);
8077 
8078 	/* get the vdev reference for new peer */
8079 	dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CHILD);
8080 
8081 	if ((vdev->opmode == wlan_op_mode_sta) &&
8082 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
8083 			 QDF_MAC_ADDR_SIZE)) {
8084 		ast_type = CDP_TXRX_AST_TYPE_SELF;
8085 	}
8086 	qdf_spinlock_create(&peer->peer_state_lock);
8087 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
8088 	qdf_spinlock_create(&peer->peer_info_lock);
8089 
8090 	/* reset the ast index to flowid table */
8091 	dp_peer_reset_flowq_map(peer);
8092 
8093 	qdf_atomic_init(&peer->ref_cnt);
8094 
8095 	for (i = 0; i < DP_MOD_ID_MAX; i++)
8096 		qdf_atomic_init(&peer->mod_refs[i]);
8097 
8098 	/* keep one reference for attach */
8099 	qdf_atomic_inc(&peer->ref_cnt);
8100 	qdf_atomic_inc(&peer->mod_refs[DP_MOD_ID_CONFIG]);
8101 
8102 	dp_peer_vdev_list_add(soc, vdev, peer);
8103 
8104 	/* TODO: See if hash based search is required */
8105 	dp_peer_find_hash_add(soc, peer);
8106 
8107 	/* Initialize the peer state */
8108 	peer->state = OL_TXRX_PEER_STATE_DISC;
8109 
8110 	dp_cfg_event_record_peer_evt(soc, DP_CFG_EVENT_PEER_CREATE,
8111 				     peer, vdev, 0);
8112 	dp_info("vdev %pK created peer %pK ("QDF_MAC_ADDR_FMT") vdev_ref_cnt "
8113 		"%d peer_ref_cnt: %d",
8114 		vdev, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
8115 		qdf_atomic_read(&vdev->ref_cnt),
8116 		qdf_atomic_read(&peer->ref_cnt));
8117 	/*
8118 	 * For every peer MAp message search and set if bss_peer
8119 	 */
8120 	if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
8121 			QDF_MAC_ADDR_SIZE) == 0 &&
8122 			(wlan_op_mode_sta != vdev->opmode)) {
8123 		dp_info("vdev bss_peer!!");
8124 		peer->bss_peer = 1;
8125 		if (peer->txrx_peer)
8126 			peer->txrx_peer->bss_peer = 1;
8127 	}
8128 
8129 	if (wlan_op_mode_sta == vdev->opmode &&
8130 	    qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
8131 			QDF_MAC_ADDR_SIZE) == 0) {
8132 		peer->sta_self_peer = 1;
8133 	}
8134 
8135 	if (dp_peer_rx_tids_create(peer) != QDF_STATUS_SUCCESS) {
8136 		dp_alert("RX tid alloc fail for peer %pK (" QDF_MAC_ADDR_FMT ")",
8137 			 peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
8138 		goto fail;
8139 	}
8140 
8141 	peer->valid = 1;
8142 	dp_local_peer_id_alloc(pdev, peer);
8143 	DP_STATS_INIT(peer);
8144 
8145 	if (dp_peer_sawf_ctx_alloc(soc, peer) != QDF_STATUS_SUCCESS)
8146 		dp_warn("peer sawf context alloc failed");
8147 
8148 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
8149 
8150 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8151 
8152 	return QDF_STATUS_SUCCESS;
8153 fail:
8154 	qdf_mem_free(peer);
8155 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8156 
8157 	return QDF_STATUS_E_FAILURE;
8158 }
8159 
8160 static QDF_STATUS dp_peer_legacy_setup(struct dp_soc *soc, struct dp_peer *peer)
8161 {
8162 	/* txrx_peer might exist already in peer reuse case */
8163 	if (peer->txrx_peer)
8164 		return QDF_STATUS_SUCCESS;
8165 
8166 	if (dp_txrx_peer_attach(soc, peer) !=
8167 				QDF_STATUS_SUCCESS) {
8168 		dp_err("peer txrx ctx alloc failed");
8169 		return QDF_STATUS_E_FAILURE;
8170 	}
8171 
8172 	return QDF_STATUS_SUCCESS;
8173 }
8174 
8175 #ifdef WLAN_FEATURE_11BE_MLO
8176 QDF_STATUS dp_peer_mlo_setup(
8177 			struct dp_soc *soc,
8178 			struct dp_peer *peer,
8179 			uint8_t vdev_id,
8180 			struct cdp_peer_setup_info *setup_info)
8181 {
8182 	struct dp_peer *mld_peer = NULL;
8183 	struct cdp_txrx_peer_params_update params = {0};
8184 
8185 	/* Non-MLO connection, do nothing */
8186 	if (!setup_info || !setup_info->mld_peer_mac)
8187 		return QDF_STATUS_SUCCESS;
8188 
8189 	dp_cfg_event_record_peer_setup_evt(soc, DP_CFG_EVENT_MLO_SETUP,
8190 					   peer, NULL, vdev_id, setup_info);
8191 	dp_info("link peer: " QDF_MAC_ADDR_FMT "mld peer: " QDF_MAC_ADDR_FMT
8192 		"first_link %d, primary_link %d",
8193 		QDF_MAC_ADDR_REF(peer->mac_addr.raw),
8194 		QDF_MAC_ADDR_REF(setup_info->mld_peer_mac),
8195 		setup_info->is_first_link,
8196 		setup_info->is_primary_link);
8197 
8198 	/* if this is the first link peer */
8199 	if (setup_info->is_first_link)
8200 		/* create MLD peer */
8201 		dp_peer_create_wifi3((struct cdp_soc_t *)soc,
8202 				     vdev_id,
8203 				     setup_info->mld_peer_mac,
8204 				     CDP_MLD_PEER_TYPE);
8205 
8206 	if (peer->vdev->opmode == wlan_op_mode_sta &&
8207 	    setup_info->is_primary_link) {
8208 		struct cdp_txrx_peer_params_update params = {0};
8209 
8210 		params.chip_id = dp_mlo_get_chip_id(soc);
8211 		params.pdev_id = peer->vdev->pdev->pdev_id;
8212 		params.osif_vdev = peer->vdev->osif_vdev;
8213 
8214 		dp_wdi_event_handler(
8215 				WDI_EVENT_STA_PRIMARY_UMAC_UPDATE,
8216 				soc,
8217 				(void *)&params, peer->peer_id,
8218 				WDI_NO_VAL, params.pdev_id);
8219 	}
8220 
8221 	peer->first_link = setup_info->is_first_link;
8222 	peer->primary_link = setup_info->is_primary_link;
8223 	mld_peer = dp_mld_peer_find_hash_find(soc,
8224 					      setup_info->mld_peer_mac,
8225 					      0, vdev_id, DP_MOD_ID_CDP);
8226 	if (mld_peer) {
8227 		if (setup_info->is_first_link) {
8228 			/* assign rx_tid to mld peer */
8229 			mld_peer->rx_tid = peer->rx_tid;
8230 			/* no cdp_peer_setup for MLD peer,
8231 			 * set it for addba processing
8232 			 */
8233 			qdf_atomic_set(&mld_peer->is_default_route_set, 1);
8234 		} else {
8235 			/* free link peer original rx_tids mem */
8236 			dp_peer_rx_tids_destroy(peer);
8237 			/* assign mld peer rx_tid to link peer */
8238 			peer->rx_tid = mld_peer->rx_tid;
8239 		}
8240 
8241 		if (setup_info->is_primary_link &&
8242 		    !setup_info->is_first_link) {
8243 			struct dp_vdev *prev_vdev;
8244 			/*
8245 			 * if first link is not the primary link,
8246 			 * then need to change mld_peer->vdev as
8247 			 * primary link dp_vdev is not same one
8248 			 * during mld peer creation.
8249 			 */
8250 			prev_vdev = mld_peer->vdev;
8251 			dp_info("Primary link is not the first link. vdev: %pK,"
8252 				"vdev_id %d vdev_ref_cnt %d",
8253 				mld_peer->vdev, vdev_id,
8254 				qdf_atomic_read(&mld_peer->vdev->ref_cnt));
8255 			/* release the ref to original dp_vdev */
8256 			dp_vdev_unref_delete(soc, mld_peer->vdev,
8257 					     DP_MOD_ID_CHILD);
8258 			/*
8259 			 * get the ref to new dp_vdev,
8260 			 * increase dp_vdev ref_cnt
8261 			 */
8262 			mld_peer->vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8263 							       DP_MOD_ID_CHILD);
8264 			mld_peer->txrx_peer->vdev = mld_peer->vdev;
8265 
8266 			dp_cfg_event_record_mlo_setup_vdev_update_evt(
8267 					soc, mld_peer, prev_vdev,
8268 					mld_peer->vdev);
8269 
8270 			params.osif_vdev = (void *)peer->vdev->osif_vdev;
8271 			params.peer_mac = peer->mac_addr.raw;
8272 			params.chip_id = dp_mlo_get_chip_id(soc);
8273 			params.pdev_id = peer->vdev->pdev->pdev_id;
8274 
8275 			dp_wdi_event_handler(
8276 					WDI_EVENT_PEER_PRIMARY_UMAC_UPDATE,
8277 					soc, (void *)&params, peer->peer_id,
8278 					WDI_NO_VAL, params.pdev_id);
8279 		}
8280 
8281 		/* associate mld and link peer */
8282 		dp_link_peer_add_mld_peer(peer, mld_peer);
8283 		dp_mld_peer_add_link_peer(mld_peer, peer);
8284 
8285 		mld_peer->txrx_peer->mld_peer = 1;
8286 		dp_peer_unref_delete(mld_peer, DP_MOD_ID_CDP);
8287 	} else {
8288 		peer->mld_peer = NULL;
8289 		dp_err("mld peer" QDF_MAC_ADDR_FMT "not found!",
8290 		       QDF_MAC_ADDR_REF(setup_info->mld_peer_mac));
8291 		return QDF_STATUS_E_FAILURE;
8292 	}
8293 
8294 	return QDF_STATUS_SUCCESS;
8295 }
8296 
8297 /**
8298  * dp_mlo_peer_authorize() - authorize MLO peer
8299  * @soc: soc handle
8300  * @peer: pointer to link peer
8301  *
8302  * Return: void
8303  */
8304 static void dp_mlo_peer_authorize(struct dp_soc *soc,
8305 				  struct dp_peer *peer)
8306 {
8307 	int i;
8308 	struct dp_peer *link_peer = NULL;
8309 	struct dp_peer *mld_peer = peer->mld_peer;
8310 	struct dp_mld_link_peers link_peers_info;
8311 
8312 	if (!mld_peer)
8313 		return;
8314 
8315 	/* get link peers with reference */
8316 	dp_get_link_peers_ref_from_mld_peer(soc, mld_peer,
8317 					    &link_peers_info,
8318 					    DP_MOD_ID_CDP);
8319 
8320 	for (i = 0; i < link_peers_info.num_links; i++) {
8321 		link_peer = link_peers_info.link_peers[i];
8322 
8323 		if (!link_peer->authorize) {
8324 			dp_release_link_peers_ref(&link_peers_info,
8325 						  DP_MOD_ID_CDP);
8326 			mld_peer->authorize = false;
8327 			return;
8328 		}
8329 	}
8330 
8331 	/* if we are here all link peers are authorized,
8332 	 * authorize ml_peer also
8333 	 */
8334 	mld_peer->authorize = true;
8335 
8336 	/* release link peers reference */
8337 	dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
8338 }
8339 #endif
8340 
8341 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
8342 				  enum cdp_host_reo_dest_ring *reo_dest,
8343 				  bool *hash_based)
8344 {
8345 	struct dp_soc *soc;
8346 	struct dp_pdev *pdev;
8347 
8348 	pdev = vdev->pdev;
8349 	soc = pdev->soc;
8350 	/*
8351 	 * hash based steering is disabled for Radios which are offloaded
8352 	 * to NSS
8353 	 */
8354 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
8355 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
8356 
8357 	/*
8358 	 * Below line of code will ensure the proper reo_dest ring is chosen
8359 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
8360 	 */
8361 	*reo_dest = pdev->reo_dest;
8362 }
8363 
8364 #ifdef IPA_OFFLOAD
8365 /**
8366  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
8367  * @vdev: Virtual device
8368  *
8369  * Return: true if the vdev is of subtype P2P
8370  *	   false if the vdev is of any other subtype
8371  */
8372 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
8373 {
8374 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
8375 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
8376 	    vdev->subtype == wlan_op_subtype_p2p_go)
8377 		return true;
8378 
8379 	return false;
8380 }
8381 
8382 /**
8383  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
8384  * @vdev: Datapath VDEV handle
8385  * @setup_info:
8386  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
8387  * @hash_based: pointer to hash value (enabled/disabled) to be populated
8388  * @lmac_peer_id_msb:
8389  *
8390  * If IPA is enabled in ini, for SAP mode, disable hash based
8391  * steering, use default reo_dst ring for RX. Use config values for other modes.
8392  *
8393  * Return: None
8394  */
8395 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
8396 				       struct cdp_peer_setup_info *setup_info,
8397 				       enum cdp_host_reo_dest_ring *reo_dest,
8398 				       bool *hash_based,
8399 				       uint8_t *lmac_peer_id_msb)
8400 {
8401 	struct dp_soc *soc;
8402 	struct dp_pdev *pdev;
8403 
8404 	pdev = vdev->pdev;
8405 	soc = pdev->soc;
8406 
8407 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
8408 
8409 	/* For P2P-GO interfaces we do not need to change the REO
8410 	 * configuration even if IPA config is enabled
8411 	 */
8412 	if (dp_is_vdev_subtype_p2p(vdev))
8413 		return;
8414 
8415 	/*
8416 	 * If IPA is enabled, disable hash-based flow steering and set
8417 	 * reo_dest_ring_4 as the REO ring to receive packets on.
8418 	 * IPA is configured to reap reo_dest_ring_4.
8419 	 *
8420 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
8421 	 * value enum value is from 1 - 4.
8422 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
8423 	 */
8424 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
8425 		if (vdev->opmode == wlan_op_mode_ap) {
8426 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
8427 			*hash_based = 0;
8428 		} else if (vdev->opmode == wlan_op_mode_sta &&
8429 			   dp_ipa_is_mdm_platform()) {
8430 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
8431 		}
8432 	}
8433 }
8434 
8435 #else
8436 
8437 /**
8438  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
8439  * @vdev: Datapath VDEV handle
8440  * @setup_info:
8441  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
8442  * @hash_based: pointer to hash value (enabled/disabled) to be populated
8443  * @lmac_peer_id_msb:
8444  *
8445  * Use system config values for hash based steering.
8446  * Return: None
8447  */
8448 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
8449 				       struct cdp_peer_setup_info *setup_info,
8450 				       enum cdp_host_reo_dest_ring *reo_dest,
8451 				       bool *hash_based,
8452 				       uint8_t *lmac_peer_id_msb)
8453 {
8454 	struct dp_soc *soc = vdev->pdev->soc;
8455 
8456 	soc->arch_ops.peer_get_reo_hash(vdev, setup_info, reo_dest, hash_based,
8457 					lmac_peer_id_msb);
8458 }
8459 #endif /* IPA_OFFLOAD */
8460 
8461 /**
8462  * dp_peer_setup_wifi3() - initialize the peer
8463  * @soc_hdl: soc handle object
8464  * @vdev_id: vdev_id of vdev object
8465  * @peer_mac: Peer's mac address
8466  * @setup_info: peer setup info for MLO
8467  *
8468  * Return: QDF_STATUS
8469  */
8470 static QDF_STATUS
8471 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8472 		    uint8_t *peer_mac,
8473 		    struct cdp_peer_setup_info *setup_info)
8474 {
8475 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8476 	struct dp_pdev *pdev;
8477 	bool hash_based = 0;
8478 	enum cdp_host_reo_dest_ring reo_dest;
8479 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8480 	struct dp_vdev *vdev = NULL;
8481 	struct dp_peer *peer =
8482 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
8483 					       DP_MOD_ID_CDP);
8484 	struct dp_peer *mld_peer = NULL;
8485 	enum wlan_op_mode vdev_opmode;
8486 	uint8_t lmac_peer_id_msb = 0;
8487 
8488 	if (!peer)
8489 		return QDF_STATUS_E_FAILURE;
8490 
8491 	vdev = peer->vdev;
8492 	if (!vdev) {
8493 		status = QDF_STATUS_E_FAILURE;
8494 		goto fail;
8495 	}
8496 
8497 	/* save vdev related member in case vdev freed */
8498 	vdev_opmode = vdev->opmode;
8499 	pdev = vdev->pdev;
8500 	dp_peer_setup_get_reo_hash(vdev, setup_info,
8501 				   &reo_dest, &hash_based,
8502 				   &lmac_peer_id_msb);
8503 
8504 	dp_cfg_event_record_peer_setup_evt(soc, DP_CFG_EVENT_PEER_SETUP,
8505 					   peer, vdev, vdev->vdev_id,
8506 					   setup_info);
8507 	dp_info("pdev: %d vdev :%d opmode:%u peer %pK (" QDF_MAC_ADDR_FMT ") "
8508 		"hash-based-steering:%d default-reo_dest:%u",
8509 		pdev->pdev_id, vdev->vdev_id,
8510 		vdev->opmode, peer,
8511 		QDF_MAC_ADDR_REF(peer->mac_addr.raw), hash_based, reo_dest);
8512 
8513 	/*
8514 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
8515 	 * i.e both the devices have same MAC address. In these
8516 	 * cases we want such pkts to be processed in NULL Q handler
8517 	 * which is REO2TCL ring. for this reason we should
8518 	 * not setup reo_queues and default route for bss_peer.
8519 	 */
8520 	if (!IS_MLO_DP_MLD_PEER(peer))
8521 		dp_monitor_peer_tx_init(pdev, peer);
8522 
8523 	if (!setup_info)
8524 		if (dp_peer_legacy_setup(soc, peer) !=
8525 				QDF_STATUS_SUCCESS) {
8526 			status = QDF_STATUS_E_RESOURCES;
8527 			goto fail;
8528 		}
8529 
8530 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
8531 		status = QDF_STATUS_E_FAILURE;
8532 		goto fail;
8533 	}
8534 
8535 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
8536 		/* TODO: Check the destination ring number to be passed to FW */
8537 		soc->cdp_soc.ol_ops->peer_set_default_routing(
8538 				soc->ctrl_psoc,
8539 				peer->vdev->pdev->pdev_id,
8540 				peer->mac_addr.raw,
8541 				peer->vdev->vdev_id, hash_based, reo_dest,
8542 				lmac_peer_id_msb);
8543 	}
8544 
8545 	qdf_atomic_set(&peer->is_default_route_set, 1);
8546 
8547 	status = dp_peer_mlo_setup(soc, peer, vdev->vdev_id, setup_info);
8548 	if (QDF_IS_STATUS_ERROR(status)) {
8549 		dp_peer_err("peer mlo setup failed");
8550 		qdf_assert_always(0);
8551 	}
8552 
8553 	if (vdev_opmode != wlan_op_mode_monitor) {
8554 		/* In case of MLD peer, switch peer to mld peer and
8555 		 * do peer_rx_init.
8556 		 */
8557 		if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
8558 		    IS_MLO_DP_LINK_PEER(peer)) {
8559 			if (setup_info && setup_info->is_first_link) {
8560 				mld_peer = DP_GET_MLD_PEER_FROM_PEER(peer);
8561 				if (mld_peer)
8562 					dp_peer_rx_init(pdev, mld_peer);
8563 				else
8564 					dp_peer_err("MLD peer null. Primary link peer:%pK", peer);
8565 			}
8566 		} else {
8567 			dp_peer_rx_init(pdev, peer);
8568 		}
8569 	}
8570 
8571 	dp_soc_txrx_peer_setup(vdev_opmode, soc, peer);
8572 
8573 	if (!IS_MLO_DP_MLD_PEER(peer))
8574 		dp_peer_ppdu_delayed_ba_init(peer);
8575 
8576 fail:
8577 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8578 	return status;
8579 }
8580 
8581 /**
8582  * dp_cp_peer_del_resp_handler() - Handle the peer delete response
8583  * @soc_hdl: Datapath SOC handle
8584  * @vdev_id: id of virtual device object
8585  * @mac_addr: Mac address of the peer
8586  *
8587  * Return: QDF_STATUS
8588  */
8589 static QDF_STATUS dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl,
8590 					      uint8_t vdev_id,
8591 					      uint8_t *mac_addr)
8592 {
8593 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8594 	struct dp_ast_entry  *ast_entry = NULL;
8595 	txrx_ast_free_cb cb = NULL;
8596 	void *cookie;
8597 
8598 	if (soc->ast_offload_support)
8599 		return QDF_STATUS_E_INVAL;
8600 
8601 	qdf_spin_lock_bh(&soc->ast_lock);
8602 
8603 	ast_entry =
8604 		dp_peer_ast_hash_find_by_vdevid(soc, mac_addr,
8605 						vdev_id);
8606 
8607 	/* in case of qwrap we have multiple BSS peers
8608 	 * with same mac address
8609 	 *
8610 	 * AST entry for this mac address will be created
8611 	 * only for one peer hence it will be NULL here
8612 	 */
8613 	if ((!ast_entry || !ast_entry->delete_in_progress) ||
8614 	    (ast_entry->peer_id != HTT_INVALID_PEER)) {
8615 		qdf_spin_unlock_bh(&soc->ast_lock);
8616 		return QDF_STATUS_E_FAILURE;
8617 	}
8618 
8619 	if (ast_entry->is_mapped)
8620 		soc->ast_table[ast_entry->ast_idx] = NULL;
8621 
8622 	DP_STATS_INC(soc, ast.deleted, 1);
8623 	dp_peer_ast_hash_remove(soc, ast_entry);
8624 
8625 	cb = ast_entry->callback;
8626 	cookie = ast_entry->cookie;
8627 	ast_entry->callback = NULL;
8628 	ast_entry->cookie = NULL;
8629 
8630 	soc->num_ast_entries--;
8631 	qdf_spin_unlock_bh(&soc->ast_lock);
8632 
8633 	if (cb) {
8634 		cb(soc->ctrl_psoc,
8635 		   dp_soc_to_cdp_soc(soc),
8636 		   cookie,
8637 		   CDP_TXRX_AST_DELETED);
8638 	}
8639 	qdf_mem_free(ast_entry);
8640 
8641 	return QDF_STATUS_SUCCESS;
8642 }
8643 
8644 /**
8645  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
8646  * @txrx_soc: cdp soc handle
8647  * @ac: Access category
8648  * @value: timeout value in millisec
8649  *
8650  * Return: void
8651  */
8652 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
8653 				    uint8_t ac, uint32_t value)
8654 {
8655 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
8656 
8657 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
8658 }
8659 
8660 /**
8661  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
8662  * @txrx_soc: cdp soc handle
8663  * @ac: access category
8664  * @value: timeout value in millisec
8665  *
8666  * Return: void
8667  */
8668 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
8669 				    uint8_t ac, uint32_t *value)
8670 {
8671 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
8672 
8673 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
8674 }
8675 
8676 /**
8677  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
8678  * @txrx_soc: cdp soc handle
8679  * @pdev_id: id of physical device object
8680  * @val: reo destination ring index (1 - 4)
8681  *
8682  * Return: QDF_STATUS
8683  */
8684 static QDF_STATUS
8685 dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
8686 		     enum cdp_host_reo_dest_ring val)
8687 {
8688 	struct dp_pdev *pdev =
8689 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
8690 						   pdev_id);
8691 
8692 	if (pdev) {
8693 		pdev->reo_dest = val;
8694 		return QDF_STATUS_SUCCESS;
8695 	}
8696 
8697 	return QDF_STATUS_E_FAILURE;
8698 }
8699 
8700 /**
8701  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
8702  * @txrx_soc: cdp soc handle
8703  * @pdev_id: id of physical device object
8704  *
8705  * Return: reo destination ring index
8706  */
8707 static enum cdp_host_reo_dest_ring
8708 dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id)
8709 {
8710 	struct dp_pdev *pdev =
8711 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
8712 						   pdev_id);
8713 
8714 	if (pdev)
8715 		return pdev->reo_dest;
8716 	else
8717 		return cdp_host_reo_dest_ring_unknown;
8718 }
8719 
8720 #ifdef WLAN_SUPPORT_MSCS
8721 /**
8722  * dp_record_mscs_params() - Record MSCS parameters sent by the STA in
8723  * the MSCS Request to the AP.
8724  * @soc_hdl: Datapath soc handle
8725  * @peer_mac: STA Mac address
8726  * @vdev_id: ID of the vdev handle
8727  * @mscs_params: Structure having MSCS parameters obtained
8728  * from handshake
8729  * @active: Flag to set MSCS active/inactive
8730  *
8731  * The AP makes a note of these parameters while comparing the MSDUs
8732  * sent by the STA, to send the downlink traffic with correct User
8733  * priority.
8734  *
8735  * Return: QDF_STATUS - Success/Invalid
8736  */
8737 static QDF_STATUS
8738 dp_record_mscs_params(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
8739 		      uint8_t vdev_id, struct cdp_mscs_params *mscs_params,
8740 		      bool active)
8741 {
8742 	struct dp_peer *peer;
8743 	QDF_STATUS status = QDF_STATUS_E_INVAL;
8744 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8745 
8746 	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
8747 				      DP_MOD_ID_CDP);
8748 
8749 	if (!peer) {
8750 		dp_err("Peer is NULL!");
8751 		goto fail;
8752 	}
8753 	if (!active) {
8754 		dp_info("MSCS Procedure is terminated");
8755 		peer->mscs_active = active;
8756 		goto fail;
8757 	}
8758 
8759 	if (mscs_params->classifier_type == IEEE80211_TCLAS_MASK_CLA_TYPE_4) {
8760 		/* Populate entries inside IPV4 database first */
8761 		peer->mscs_ipv4_parameter.user_priority_bitmap =
8762 			mscs_params->user_pri_bitmap;
8763 		peer->mscs_ipv4_parameter.user_priority_limit =
8764 			mscs_params->user_pri_limit;
8765 		peer->mscs_ipv4_parameter.classifier_mask =
8766 			mscs_params->classifier_mask;
8767 
8768 		/* Populate entries inside IPV6 database */
8769 		peer->mscs_ipv6_parameter.user_priority_bitmap =
8770 			mscs_params->user_pri_bitmap;
8771 		peer->mscs_ipv6_parameter.user_priority_limit =
8772 			mscs_params->user_pri_limit;
8773 		peer->mscs_ipv6_parameter.classifier_mask =
8774 			mscs_params->classifier_mask;
8775 		peer->mscs_active = 1;
8776 		dp_info("\n\tMSCS Procedure request based parameters for "QDF_MAC_ADDR_FMT"\n"
8777 			"\tClassifier_type = %d\tUser priority bitmap = %x\n"
8778 			"\tUser priority limit = %x\tClassifier mask = %x",
8779 			QDF_MAC_ADDR_REF(peer_mac),
8780 			mscs_params->classifier_type,
8781 			peer->mscs_ipv4_parameter.user_priority_bitmap,
8782 			peer->mscs_ipv4_parameter.user_priority_limit,
8783 			peer->mscs_ipv4_parameter.classifier_mask);
8784 	}
8785 
8786 	status = QDF_STATUS_SUCCESS;
8787 fail:
8788 	if (peer)
8789 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8790 	return status;
8791 }
8792 #endif
8793 
8794 /**
8795  * dp_get_sec_type() - Get the security type
8796  * @soc: soc handle
8797  * @vdev_id: id of dp handle
8798  * @peer_mac: mac of datapath PEER handle
8799  * @sec_idx:    Security id (mcast, ucast)
8800  *
8801  * return sec_type: Security type
8802  */
8803 static int dp_get_sec_type(struct cdp_soc_t *soc, uint8_t vdev_id,
8804 			   uint8_t *peer_mac, uint8_t sec_idx)
8805 {
8806 	int sec_type = 0;
8807 	struct dp_peer *peer =
8808 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
8809 						       peer_mac, 0, vdev_id,
8810 						       DP_MOD_ID_CDP);
8811 
8812 	if (!peer) {
8813 		dp_cdp_err("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
8814 		return sec_type;
8815 	}
8816 
8817 	if (!peer->txrx_peer) {
8818 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8819 		dp_peer_debug("%pK: txrx peer is NULL!\n", soc);
8820 		return sec_type;
8821 	}
8822 	sec_type = peer->txrx_peer->security[sec_idx].sec_type;
8823 
8824 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8825 	return sec_type;
8826 }
8827 
8828 /**
8829  * dp_peer_authorize() - authorize txrx peer
8830  * @soc_hdl: soc handle
8831  * @vdev_id: id of dp handle
8832  * @peer_mac: mac of datapath PEER handle
8833  * @authorize:
8834  *
8835  * Return: QDF_STATUS
8836  *
8837  */
8838 static QDF_STATUS
8839 dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8840 		  uint8_t *peer_mac, uint32_t authorize)
8841 {
8842 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8843 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8844 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(soc, peer_mac,
8845 							      0, vdev_id,
8846 							      DP_MOD_ID_CDP);
8847 
8848 	if (!peer) {
8849 		dp_cdp_debug("%pK: Peer is NULL!\n", soc);
8850 		status = QDF_STATUS_E_FAILURE;
8851 	} else {
8852 		peer->authorize = authorize ? 1 : 0;
8853 		if (peer->txrx_peer)
8854 			peer->txrx_peer->authorize = peer->authorize;
8855 
8856 		if (!peer->authorize)
8857 			dp_peer_flush_frags(soc_hdl, vdev_id, peer_mac);
8858 
8859 		dp_mlo_peer_authorize(soc, peer);
8860 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8861 	}
8862 
8863 	return status;
8864 }
8865 
8866 /**
8867  * dp_peer_get_authorize() - get peer authorize status
8868  * @soc_hdl: soc handle
8869  * @vdev_id: id of dp handle
8870  * @peer_mac: mac of datapath PEER handle
8871  *
8872  * Return: true is peer is authorized, false otherwise
8873  */
8874 static bool
8875 dp_peer_get_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8876 		      uint8_t *peer_mac)
8877 {
8878 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8879 	bool authorize = false;
8880 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
8881 						      0, vdev_id,
8882 						      DP_MOD_ID_CDP);
8883 
8884 	if (!peer) {
8885 		dp_cdp_debug("%pK: Peer is NULL!\n", soc);
8886 		return authorize;
8887 	}
8888 
8889 	authorize = peer->authorize;
8890 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8891 
8892 	return authorize;
8893 }
8894 
8895 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev,
8896 			  enum dp_mod_id mod_id)
8897 {
8898 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
8899 	void *vdev_delete_context = NULL;
8900 	uint8_t vdev_id = vdev->vdev_id;
8901 	struct dp_pdev *pdev = vdev->pdev;
8902 	struct dp_vdev *tmp_vdev = NULL;
8903 	uint8_t found = 0;
8904 
8905 	QDF_ASSERT(qdf_atomic_dec_return(&vdev->mod_refs[mod_id]) >= 0);
8906 
8907 	/* Return if this is not the last reference*/
8908 	if (!qdf_atomic_dec_and_test(&vdev->ref_cnt))
8909 		return;
8910 
8911 	/*
8912 	 * This should be set as last reference need to released
8913 	 * after cdp_vdev_detach() is called
8914 	 *
8915 	 * if this assert is hit there is a ref count issue
8916 	 */
8917 	QDF_ASSERT(vdev->delete.pending);
8918 
8919 	vdev_delete_cb = vdev->delete.callback;
8920 	vdev_delete_context = vdev->delete.context;
8921 
8922 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")- its last peer is done",
8923 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
8924 
8925 	if (wlan_op_mode_monitor == vdev->opmode) {
8926 		dp_monitor_vdev_delete(soc, vdev);
8927 		goto free_vdev;
8928 	}
8929 
8930 	/* all peers are gone, go ahead and delete it */
8931 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
8932 			FLOW_TYPE_VDEV, vdev_id);
8933 	dp_tx_vdev_detach(vdev);
8934 	dp_monitor_vdev_detach(vdev);
8935 
8936 free_vdev:
8937 	qdf_spinlock_destroy(&vdev->peer_list_lock);
8938 
8939 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
8940 	TAILQ_FOREACH(tmp_vdev, &soc->inactive_vdev_list,
8941 		      inactive_list_elem) {
8942 		if (tmp_vdev == vdev) {
8943 			found = 1;
8944 			break;
8945 		}
8946 	}
8947 	if (found)
8948 		TAILQ_REMOVE(&soc->inactive_vdev_list, vdev,
8949 			     inactive_list_elem);
8950 	/* delete this peer from the list */
8951 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
8952 
8953 	dp_cfg_event_record_vdev_evt(soc, DP_CFG_EVENT_VDEV_UNREF_DEL,
8954 				     vdev);
8955 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")",
8956 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
8957 	wlan_minidump_remove(vdev, sizeof(*vdev), soc->ctrl_psoc,
8958 			     WLAN_MD_DP_VDEV, "dp_vdev");
8959 	qdf_mem_free(vdev);
8960 	vdev = NULL;
8961 
8962 	if (vdev_delete_cb)
8963 		vdev_delete_cb(vdev_delete_context);
8964 }
8965 
8966 qdf_export_symbol(dp_vdev_unref_delete);
8967 
8968 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id mod_id)
8969 {
8970 	struct dp_vdev *vdev = peer->vdev;
8971 	struct dp_pdev *pdev = vdev->pdev;
8972 	struct dp_soc *soc = pdev->soc;
8973 	uint16_t peer_id;
8974 	struct dp_peer *tmp_peer;
8975 	bool found = false;
8976 
8977 	if (mod_id > DP_MOD_ID_RX)
8978 		QDF_ASSERT(qdf_atomic_dec_return(&peer->mod_refs[mod_id]) >= 0);
8979 
8980 	/*
8981 	 * Hold the lock all the way from checking if the peer ref count
8982 	 * is zero until the peer references are removed from the hash
8983 	 * table and vdev list (if the peer ref count is zero).
8984 	 * This protects against a new HL tx operation starting to use the
8985 	 * peer object just after this function concludes it's done being used.
8986 	 * Furthermore, the lock needs to be held while checking whether the
8987 	 * vdev's list of peers is empty, to make sure that list is not modified
8988 	 * concurrently with the empty check.
8989 	 */
8990 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
8991 		peer_id = peer->peer_id;
8992 
8993 		/*
8994 		 * Make sure that the reference to the peer in
8995 		 * peer object map is removed
8996 		 */
8997 		QDF_ASSERT(peer_id == HTT_INVALID_PEER);
8998 
8999 		dp_peer_info("Deleting peer %pK ("QDF_MAC_ADDR_FMT")", peer,
9000 			     QDF_MAC_ADDR_REF(peer->mac_addr.raw));
9001 
9002 		dp_peer_sawf_ctx_free(soc, peer);
9003 
9004 		wlan_minidump_remove(peer, sizeof(*peer), soc->ctrl_psoc,
9005 				     WLAN_MD_DP_PEER, "dp_peer");
9006 
9007 		qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
9008 		TAILQ_FOREACH(tmp_peer, &soc->inactive_peer_list,
9009 			      inactive_list_elem) {
9010 			if (tmp_peer == peer) {
9011 				found = 1;
9012 				break;
9013 			}
9014 		}
9015 		if (found)
9016 			TAILQ_REMOVE(&soc->inactive_peer_list, peer,
9017 				     inactive_list_elem);
9018 		/* delete this peer from the list */
9019 		qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
9020 		DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
9021 		dp_peer_update_state(soc, peer, DP_PEER_STATE_FREED);
9022 
9023 		/* cleanup the peer data */
9024 		dp_peer_cleanup(vdev, peer);
9025 
9026 		if (!IS_MLO_DP_MLD_PEER(peer))
9027 			dp_monitor_peer_detach(soc, peer);
9028 
9029 		qdf_spinlock_destroy(&peer->peer_state_lock);
9030 
9031 		dp_txrx_peer_detach(soc, peer);
9032 		dp_cfg_event_record_peer_evt(soc, DP_CFG_EVENT_PEER_UNREF_DEL,
9033 					     peer, vdev, 0);
9034 		qdf_mem_free(peer);
9035 
9036 		/*
9037 		 * Decrement ref count taken at peer create
9038 		 */
9039 		dp_peer_info("Deleted peer. Unref vdev %pK, vdev_ref_cnt %d",
9040 			     vdev, qdf_atomic_read(&vdev->ref_cnt));
9041 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CHILD);
9042 	}
9043 }
9044 
9045 qdf_export_symbol(dp_peer_unref_delete);
9046 
9047 void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle,
9048 			       enum dp_mod_id mod_id)
9049 {
9050 	dp_peer_unref_delete((struct dp_peer *)handle, mod_id);
9051 }
9052 
9053 qdf_export_symbol(dp_txrx_peer_unref_delete);
9054 
9055 /**
9056  * dp_peer_delete_wifi3() - Delete txrx peer
9057  * @soc_hdl: soc handle
9058  * @vdev_id: id of dp handle
9059  * @peer_mac: mac of datapath PEER handle
9060  * @bitmap: bitmap indicating special handling of request.
9061  * @peer_type: peer type (link or MLD)
9062  *
9063  */
9064 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
9065 				       uint8_t vdev_id,
9066 				       uint8_t *peer_mac, uint32_t bitmap,
9067 				       enum cdp_peer_type peer_type)
9068 {
9069 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9070 	struct dp_peer *peer;
9071 	struct cdp_peer_info peer_info = { 0 };
9072 	struct dp_vdev *vdev = NULL;
9073 
9074 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac,
9075 				 false, peer_type);
9076 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP);
9077 
9078 	/* Peer can be null for monitor vap mac address */
9079 	if (!peer) {
9080 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
9081 			  "%s: Invalid peer\n", __func__);
9082 		return QDF_STATUS_E_FAILURE;
9083 	}
9084 
9085 	if (!peer->valid) {
9086 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9087 		dp_err("Invalid peer: "QDF_MAC_ADDR_FMT,
9088 			QDF_MAC_ADDR_REF(peer_mac));
9089 		return QDF_STATUS_E_ALREADY;
9090 	}
9091 
9092 	vdev = peer->vdev;
9093 
9094 	if (!vdev) {
9095 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9096 		return QDF_STATUS_E_FAILURE;
9097 	}
9098 
9099 	peer->valid = 0;
9100 
9101 	dp_cfg_event_record_peer_evt(soc, DP_CFG_EVENT_PEER_DELETE, peer,
9102 				     vdev, 0);
9103 	dp_init_info("%pK: peer %pK (" QDF_MAC_ADDR_FMT ") pending-refs %d",
9104 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
9105 		     qdf_atomic_read(&peer->ref_cnt));
9106 
9107 	dp_peer_rx_reo_shared_qaddr_delete(soc, peer);
9108 
9109 	dp_local_peer_id_free(peer->vdev->pdev, peer);
9110 
9111 	/* Drop all rx packets before deleting peer */
9112 	dp_clear_peer_internal(soc, peer);
9113 
9114 	qdf_spinlock_destroy(&peer->peer_info_lock);
9115 	dp_peer_multipass_list_remove(peer);
9116 
9117 	/* remove the reference to the peer from the hash table */
9118 	dp_peer_find_hash_remove(soc, peer);
9119 
9120 	dp_peer_vdev_list_remove(soc, vdev, peer);
9121 
9122 	dp_peer_mlo_delete(peer);
9123 
9124 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
9125 	TAILQ_INSERT_TAIL(&soc->inactive_peer_list, peer,
9126 			  inactive_list_elem);
9127 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
9128 
9129 	/*
9130 	 * Remove the reference added during peer_attach.
9131 	 * The peer will still be left allocated until the
9132 	 * PEER_UNMAP message arrives to remove the other
9133 	 * reference, added by the PEER_MAP message.
9134 	 */
9135 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
9136 	/*
9137 	 * Remove the reference taken above
9138 	 */
9139 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9140 
9141 	return QDF_STATUS_SUCCESS;
9142 }
9143 
9144 #ifdef DP_RX_UDP_OVER_PEER_ROAM
9145 static QDF_STATUS dp_update_roaming_peer_wifi3(struct cdp_soc_t *soc_hdl,
9146 					       uint8_t vdev_id,
9147 					       uint8_t *peer_mac,
9148 					       uint32_t auth_status)
9149 {
9150 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9151 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9152 						     DP_MOD_ID_CDP);
9153 	if (!vdev)
9154 		return QDF_STATUS_E_FAILURE;
9155 
9156 	vdev->roaming_peer_status = auth_status;
9157 	qdf_mem_copy(vdev->roaming_peer_mac.raw, peer_mac,
9158 		     QDF_MAC_ADDR_SIZE);
9159 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9160 
9161 	return QDF_STATUS_SUCCESS;
9162 }
9163 #endif
9164 /**
9165  * dp_get_vdev_mac_addr_wifi3() - Detach txrx peer
9166  * @soc_hdl: Datapath soc handle
9167  * @vdev_id: virtual interface id
9168  *
9169  * Return: MAC address on success, NULL on failure.
9170  *
9171  */
9172 static uint8_t *dp_get_vdev_mac_addr_wifi3(struct cdp_soc_t *soc_hdl,
9173 					   uint8_t vdev_id)
9174 {
9175 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9176 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9177 						     DP_MOD_ID_CDP);
9178 	uint8_t *mac = NULL;
9179 
9180 	if (!vdev)
9181 		return NULL;
9182 
9183 	mac = vdev->mac_addr.raw;
9184 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9185 
9186 	return mac;
9187 }
9188 
9189 /**
9190  * dp_vdev_set_wds() - Enable per packet stats
9191  * @soc_hdl: DP soc handle
9192  * @vdev_id: id of DP VDEV handle
9193  * @val: value
9194  *
9195  * Return: none
9196  */
9197 static int dp_vdev_set_wds(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
9198 			   uint32_t val)
9199 {
9200 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9201 	struct dp_vdev *vdev =
9202 		dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id,
9203 				      DP_MOD_ID_CDP);
9204 
9205 	if (!vdev)
9206 		return QDF_STATUS_E_FAILURE;
9207 
9208 	vdev->wds_enabled = val;
9209 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9210 
9211 	return QDF_STATUS_SUCCESS;
9212 }
9213 
9214 static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
9215 {
9216 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9217 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9218 						     DP_MOD_ID_CDP);
9219 	int opmode;
9220 
9221 	if (!vdev) {
9222 		dp_err_rl("vdev for id %d is NULL", vdev_id);
9223 		return -EINVAL;
9224 	}
9225 	opmode = vdev->opmode;
9226 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9227 
9228 	return opmode;
9229 }
9230 
9231 /**
9232  * dp_get_os_rx_handles_from_vdev_wifi3() - Get os rx handles for a vdev
9233  * @soc_hdl: ol_txrx_soc_handle handle
9234  * @vdev_id: vdev id for which os rx handles are needed
9235  * @stack_fn_p: pointer to stack function pointer
9236  * @osif_vdev_p: pointer to ol_osif_vdev_handle
9237  *
9238  * Return: void
9239  */
9240 static
9241 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_soc_t *soc_hdl,
9242 					  uint8_t vdev_id,
9243 					  ol_txrx_rx_fp *stack_fn_p,
9244 					  ol_osif_vdev_handle *osif_vdev_p)
9245 {
9246 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9247 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9248 						     DP_MOD_ID_CDP);
9249 
9250 	if (qdf_unlikely(!vdev)) {
9251 		*stack_fn_p = NULL;
9252 		*osif_vdev_p = NULL;
9253 		return;
9254 	}
9255 	*stack_fn_p = vdev->osif_rx_stack;
9256 	*osif_vdev_p = vdev->osif_vdev;
9257 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9258 }
9259 
9260 /**
9261  * dp_get_ctrl_pdev_from_vdev_wifi3() - Get control pdev of vdev
9262  * @soc_hdl: datapath soc handle
9263  * @vdev_id: virtual device/interface id
9264  *
9265  * Return: Handle to control pdev
9266  */
9267 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(
9268 						struct cdp_soc_t *soc_hdl,
9269 						uint8_t vdev_id)
9270 {
9271 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9272 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9273 						     DP_MOD_ID_CDP);
9274 	struct dp_pdev *pdev;
9275 
9276 	if (!vdev)
9277 		return NULL;
9278 
9279 	pdev = vdev->pdev;
9280 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9281 	return pdev ? (struct cdp_cfg *)pdev->wlan_cfg_ctx : NULL;
9282 }
9283 
9284 /**
9285  * dp_get_tx_pending() - read pending tx
9286  * @pdev_handle: Datapath PDEV handle
9287  *
9288  * Return: outstanding tx
9289  */
9290 static int32_t dp_get_tx_pending(struct cdp_pdev *pdev_handle)
9291 {
9292 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9293 
9294 	return qdf_atomic_read(&pdev->num_tx_outstanding);
9295 }
9296 
9297 /**
9298  * dp_get_peer_mac_from_peer_id() - get peer mac
9299  * @soc: CDP SoC handle
9300  * @peer_id: Peer ID
9301  * @peer_mac: MAC addr of PEER
9302  *
9303  * Return: QDF_STATUS
9304  */
9305 static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc,
9306 					       uint32_t peer_id,
9307 					       uint8_t *peer_mac)
9308 {
9309 	struct dp_peer *peer;
9310 
9311 	if (soc && peer_mac) {
9312 		peer = dp_peer_get_ref_by_id((struct dp_soc *)soc,
9313 					     (uint16_t)peer_id,
9314 					     DP_MOD_ID_CDP);
9315 		if (peer) {
9316 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
9317 				     QDF_MAC_ADDR_SIZE);
9318 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9319 			return QDF_STATUS_SUCCESS;
9320 		}
9321 	}
9322 
9323 	return QDF_STATUS_E_FAILURE;
9324 }
9325 
9326 #ifdef MESH_MODE_SUPPORT
9327 static
9328 void dp_vdev_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
9329 {
9330 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
9331 
9332 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
9333 	vdev->mesh_vdev = val;
9334 	if (val)
9335 		vdev->skip_sw_tid_classification |=
9336 			DP_TX_MESH_ENABLED;
9337 	else
9338 		vdev->skip_sw_tid_classification &=
9339 			~DP_TX_MESH_ENABLED;
9340 }
9341 
9342 /**
9343  * dp_vdev_set_mesh_rx_filter() - to set the mesh rx filter
9344  * @vdev_hdl: virtual device object
9345  * @val: value to be set
9346  *
9347  * Return: void
9348  */
9349 static
9350 void dp_vdev_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
9351 {
9352 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
9353 
9354 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
9355 	vdev->mesh_rx_filter = val;
9356 }
9357 #endif
9358 
9359 /**
9360  * dp_vdev_set_hlos_tid_override() - to set hlos tid override
9361  * @vdev: virtual device object
9362  * @val: value to be set
9363  *
9364  * Return: void
9365  */
9366 static
9367 void dp_vdev_set_hlos_tid_override(struct dp_vdev *vdev, uint32_t val)
9368 {
9369 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
9370 	if (val)
9371 		vdev->skip_sw_tid_classification |=
9372 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
9373 	else
9374 		vdev->skip_sw_tid_classification &=
9375 			~DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
9376 }
9377 
9378 /**
9379  * dp_vdev_get_hlos_tid_override() - to get hlos tid override flag
9380  * @vdev_hdl: virtual device object
9381  *
9382  * Return: 1 if this flag is set
9383  */
9384 static
9385 uint8_t dp_vdev_get_hlos_tid_override(struct cdp_vdev *vdev_hdl)
9386 {
9387 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
9388 
9389 	return !!(vdev->skip_sw_tid_classification &
9390 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED);
9391 }
9392 
9393 #ifdef VDEV_PEER_PROTOCOL_COUNT
9394 static void dp_enable_vdev_peer_protocol_count(struct cdp_soc_t *soc_hdl,
9395 					       int8_t vdev_id,
9396 					       bool enable)
9397 {
9398 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9399 	struct dp_vdev *vdev;
9400 
9401 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9402 	if (!vdev)
9403 		return;
9404 
9405 	dp_info("enable %d vdev_id %d", enable, vdev_id);
9406 	vdev->peer_protocol_count_track = enable;
9407 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9408 }
9409 
9410 static void dp_enable_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
9411 						   int8_t vdev_id,
9412 						   int drop_mask)
9413 {
9414 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9415 	struct dp_vdev *vdev;
9416 
9417 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9418 	if (!vdev)
9419 		return;
9420 
9421 	dp_info("drop_mask %d vdev_id %d", drop_mask, vdev_id);
9422 	vdev->peer_protocol_count_dropmask = drop_mask;
9423 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9424 }
9425 
9426 static int dp_is_vdev_peer_protocol_count_enabled(struct cdp_soc_t *soc_hdl,
9427 						  int8_t vdev_id)
9428 {
9429 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9430 	struct dp_vdev *vdev;
9431 	int peer_protocol_count_track;
9432 
9433 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9434 	if (!vdev)
9435 		return 0;
9436 
9437 	dp_info("enable %d vdev_id %d", vdev->peer_protocol_count_track,
9438 		vdev_id);
9439 	peer_protocol_count_track =
9440 		vdev->peer_protocol_count_track;
9441 
9442 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9443 	return peer_protocol_count_track;
9444 }
9445 
9446 static int dp_get_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
9447 					       int8_t vdev_id)
9448 {
9449 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9450 	struct dp_vdev *vdev;
9451 	int peer_protocol_count_dropmask;
9452 
9453 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9454 	if (!vdev)
9455 		return 0;
9456 
9457 	dp_info("drop_mask %d vdev_id %d", vdev->peer_protocol_count_dropmask,
9458 		vdev_id);
9459 	peer_protocol_count_dropmask =
9460 		vdev->peer_protocol_count_dropmask;
9461 
9462 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9463 	return peer_protocol_count_dropmask;
9464 }
9465 
9466 #endif
9467 
9468 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data)
9469 {
9470 	uint8_t pdev_count;
9471 
9472 	for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) {
9473 		if (soc->pdev_list[pdev_count] &&
9474 		    soc->pdev_list[pdev_count] == data)
9475 			return true;
9476 	}
9477 	return false;
9478 }
9479 
9480 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
9481 	union hal_reo_status *reo_status)
9482 {
9483 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
9484 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
9485 
9486 	if (!dp_check_pdev_exists(soc, pdev)) {
9487 		dp_err_rl("pdev doesn't exist");
9488 		return;
9489 	}
9490 
9491 	if (!qdf_atomic_read(&soc->cmn_init_done))
9492 		return;
9493 
9494 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
9495 		DP_PRINT_STATS("REO stats failure %d",
9496 			       queue_status->header.status);
9497 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
9498 		return;
9499 	}
9500 
9501 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
9502 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
9503 
9504 }
9505 
9506 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
9507 			     struct cdp_vdev_stats *vdev_stats)
9508 {
9509 
9510 	if (!vdev || !vdev->pdev)
9511 		return;
9512 
9513 
9514 	dp_update_vdev_ingress_stats(vdev);
9515 
9516 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
9517 
9518 	dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats,
9519 			     DP_MOD_ID_GENERIC_STATS);
9520 
9521 	dp_update_vdev_rate_stats(vdev_stats, &vdev->stats);
9522 
9523 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9524 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
9525 			     vdev_stats, vdev->vdev_id,
9526 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
9527 #endif
9528 }
9529 
9530 void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
9531 {
9532 	struct dp_vdev *vdev = NULL;
9533 	struct dp_soc *soc;
9534 	struct cdp_vdev_stats *vdev_stats =
9535 			qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
9536 
9537 	if (!vdev_stats) {
9538 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
9539 			   pdev->soc);
9540 		return;
9541 	}
9542 
9543 	soc = pdev->soc;
9544 
9545 	qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
9546 	qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
9547 	qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
9548 	qdf_mem_zero(&pdev->stats.rx_i, sizeof(pdev->stats.rx_i));
9549 
9550 	if (dp_monitor_is_enable_mcopy_mode(pdev))
9551 		dp_monitor_invalid_peer_update_pdev_stats(soc, pdev);
9552 
9553 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
9554 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
9555 
9556 		dp_aggregate_vdev_stats(vdev, vdev_stats);
9557 		dp_update_pdev_stats(pdev, vdev_stats);
9558 		dp_update_pdev_ingress_stats(pdev, vdev);
9559 	}
9560 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
9561 	qdf_mem_free(vdev_stats);
9562 
9563 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9564 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
9565 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
9566 #endif
9567 }
9568 
9569 /**
9570  * dp_vdev_getstats() - get vdev packet level stats
9571  * @vdev_handle: Datapath VDEV handle
9572  * @stats: cdp network device stats structure
9573  *
9574  * Return: QDF_STATUS
9575  */
9576 static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle,
9577 				   struct cdp_dev_stats *stats)
9578 {
9579 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
9580 	struct dp_pdev *pdev;
9581 	struct dp_soc *soc;
9582 	struct cdp_vdev_stats *vdev_stats;
9583 
9584 	if (!vdev)
9585 		return QDF_STATUS_E_FAILURE;
9586 
9587 	pdev = vdev->pdev;
9588 	if (!pdev)
9589 		return QDF_STATUS_E_FAILURE;
9590 
9591 	soc = pdev->soc;
9592 
9593 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
9594 
9595 	if (!vdev_stats) {
9596 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
9597 			   soc);
9598 		return QDF_STATUS_E_FAILURE;
9599 	}
9600 
9601 	dp_aggregate_vdev_stats(vdev, vdev_stats);
9602 
9603 	stats->tx_packets = vdev_stats->tx.comp_pkt.num;
9604 	stats->tx_bytes = vdev_stats->tx.comp_pkt.bytes;
9605 
9606 	stats->tx_errors = vdev_stats->tx.tx_failed;
9607 	stats->tx_dropped = vdev_stats->tx_i.dropped.dropped_pkt.num +
9608 			    vdev_stats->tx_i.sg.dropped_host.num +
9609 			    vdev_stats->tx_i.mcast_en.dropped_map_error +
9610 			    vdev_stats->tx_i.mcast_en.dropped_self_mac +
9611 			    vdev_stats->tx_i.mcast_en.dropped_send_fail +
9612 			    vdev_stats->tx.nawds_mcast_drop;
9613 
9614 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
9615 		stats->rx_packets = vdev_stats->rx.to_stack.num;
9616 		stats->rx_bytes = vdev_stats->rx.to_stack.bytes;
9617 	} else {
9618 		stats->rx_packets = vdev_stats->rx_i.reo_rcvd_pkt.num +
9619 				    vdev_stats->rx_i.null_q_desc_pkt.num +
9620 				    vdev_stats->rx_i.routed_eapol_pkt.num;
9621 		stats->rx_bytes = vdev_stats->rx_i.reo_rcvd_pkt.bytes +
9622 				  vdev_stats->rx_i.null_q_desc_pkt.bytes +
9623 				  vdev_stats->rx_i.routed_eapol_pkt.bytes;
9624 	}
9625 
9626 	stats->rx_errors = vdev_stats->rx.err.mic_err +
9627 			   vdev_stats->rx.err.decrypt_err +
9628 			   vdev_stats->rx.err.fcserr +
9629 			   vdev_stats->rx.err.pn_err +
9630 			   vdev_stats->rx.err.oor_err +
9631 			   vdev_stats->rx.err.jump_2k_err +
9632 			   vdev_stats->rx.err.rxdma_wifi_parse_err;
9633 
9634 	stats->rx_dropped = vdev_stats->rx.mec_drop.num +
9635 			    vdev_stats->rx.multipass_rx_pkt_drop +
9636 			    vdev_stats->rx.peer_unauth_rx_pkt_drop +
9637 			    vdev_stats->rx.policy_check_drop +
9638 			    vdev_stats->rx.nawds_mcast_drop +
9639 			    vdev_stats->rx.mcast_3addr_drop;
9640 
9641 	qdf_mem_free(vdev_stats);
9642 
9643 	return QDF_STATUS_SUCCESS;
9644 }
9645 
9646 /**
9647  * dp_pdev_getstats() - get pdev packet level stats
9648  * @pdev_handle: Datapath PDEV handle
9649  * @stats: cdp network device stats structure
9650  *
9651  * Return: QDF_STATUS
9652  */
9653 static void dp_pdev_getstats(struct cdp_pdev *pdev_handle,
9654 			     struct cdp_dev_stats *stats)
9655 {
9656 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9657 
9658 	dp_aggregate_pdev_stats(pdev);
9659 
9660 	stats->tx_packets = pdev->stats.tx.comp_pkt.num;
9661 	stats->tx_bytes = pdev->stats.tx.comp_pkt.bytes;
9662 
9663 	stats->tx_errors = pdev->stats.tx.tx_failed;
9664 	stats->tx_dropped = pdev->stats.tx_i.dropped.dropped_pkt.num +
9665 			    pdev->stats.tx_i.sg.dropped_host.num +
9666 			    pdev->stats.tx_i.mcast_en.dropped_map_error +
9667 			    pdev->stats.tx_i.mcast_en.dropped_self_mac +
9668 			    pdev->stats.tx_i.mcast_en.dropped_send_fail +
9669 			    pdev->stats.tx.nawds_mcast_drop +
9670 			    pdev->stats.tso_stats.dropped_host.num;
9671 
9672 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(pdev->soc->wlan_cfg_ctx)) {
9673 		stats->rx_packets = pdev->stats.rx.to_stack.num;
9674 		stats->rx_bytes = pdev->stats.rx.to_stack.bytes;
9675 	} else {
9676 		stats->rx_packets = pdev->stats.rx_i.reo_rcvd_pkt.num +
9677 				    pdev->stats.rx_i.null_q_desc_pkt.num +
9678 				    pdev->stats.rx_i.routed_eapol_pkt.num;
9679 		stats->rx_bytes = pdev->stats.rx_i.reo_rcvd_pkt.bytes +
9680 				  pdev->stats.rx_i.null_q_desc_pkt.bytes +
9681 				  pdev->stats.rx_i.routed_eapol_pkt.bytes;
9682 	}
9683 
9684 	stats->rx_errors = pdev->stats.err.ip_csum_err +
9685 		pdev->stats.err.tcp_udp_csum_err +
9686 		pdev->stats.rx.err.mic_err +
9687 		pdev->stats.rx.err.decrypt_err +
9688 		pdev->stats.rx.err.fcserr +
9689 		pdev->stats.rx.err.pn_err +
9690 		pdev->stats.rx.err.oor_err +
9691 		pdev->stats.rx.err.jump_2k_err +
9692 		pdev->stats.rx.err.rxdma_wifi_parse_err;
9693 	stats->rx_dropped = pdev->stats.dropped.msdu_not_done +
9694 		pdev->stats.dropped.mec +
9695 		pdev->stats.dropped.mesh_filter +
9696 		pdev->stats.dropped.wifi_parse +
9697 		pdev->stats.dropped.mon_rx_drop +
9698 		pdev->stats.dropped.mon_radiotap_update_err +
9699 		pdev->stats.rx.mec_drop.num +
9700 		pdev->stats.rx.multipass_rx_pkt_drop +
9701 		pdev->stats.rx.peer_unauth_rx_pkt_drop +
9702 		pdev->stats.rx.policy_check_drop +
9703 		pdev->stats.rx.nawds_mcast_drop +
9704 		pdev->stats.rx.mcast_3addr_drop;
9705 }
9706 
9707 /**
9708  * dp_get_device_stats() - get interface level packet stats
9709  * @soc_hdl: soc handle
9710  * @id: vdev_id or pdev_id based on type
9711  * @stats: cdp network device stats structure
9712  * @type: device type pdev/vdev
9713  *
9714  * Return: QDF_STATUS
9715  */
9716 static QDF_STATUS dp_get_device_stats(struct cdp_soc_t *soc_hdl, uint8_t id,
9717 				      struct cdp_dev_stats *stats,
9718 				      uint8_t type)
9719 {
9720 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9721 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
9722 	struct dp_vdev *vdev;
9723 
9724 	switch (type) {
9725 	case UPDATE_VDEV_STATS:
9726 		vdev = dp_vdev_get_ref_by_id(soc, id, DP_MOD_ID_CDP);
9727 
9728 		if (vdev) {
9729 			status = dp_vdev_getstats((struct cdp_vdev *)vdev,
9730 						  stats);
9731 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9732 		}
9733 		return status;
9734 	case UPDATE_PDEV_STATS:
9735 		{
9736 			struct dp_pdev *pdev =
9737 				dp_get_pdev_from_soc_pdev_id_wifi3(
9738 						(struct dp_soc *)soc,
9739 						 id);
9740 			if (pdev) {
9741 				dp_pdev_getstats((struct cdp_pdev *)pdev,
9742 						 stats);
9743 				return QDF_STATUS_SUCCESS;
9744 			}
9745 		}
9746 		break;
9747 	default:
9748 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9749 			"apstats cannot be updated for this input "
9750 			"type %d", type);
9751 		break;
9752 	}
9753 
9754 	return QDF_STATUS_E_FAILURE;
9755 }
9756 
9757 const
9758 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
9759 {
9760 	switch (ring_type) {
9761 	case REO_DST:
9762 		return "Reo_dst";
9763 	case REO_EXCEPTION:
9764 		return "Reo_exception";
9765 	case REO_CMD:
9766 		return "Reo_cmd";
9767 	case REO_REINJECT:
9768 		return "Reo_reinject";
9769 	case REO_STATUS:
9770 		return "Reo_status";
9771 	case WBM2SW_RELEASE:
9772 		return "wbm2sw_release";
9773 	case TCL_DATA:
9774 		return "tcl_data";
9775 	case TCL_CMD_CREDIT:
9776 		return "tcl_cmd_credit";
9777 	case TCL_STATUS:
9778 		return "tcl_status";
9779 	case SW2WBM_RELEASE:
9780 		return "sw2wbm_release";
9781 	case RXDMA_BUF:
9782 		return "Rxdma_buf";
9783 	case RXDMA_DST:
9784 		return "Rxdma_dst";
9785 	case RXDMA_MONITOR_BUF:
9786 		return "Rxdma_monitor_buf";
9787 	case RXDMA_MONITOR_DESC:
9788 		return "Rxdma_monitor_desc";
9789 	case RXDMA_MONITOR_STATUS:
9790 		return "Rxdma_monitor_status";
9791 	case RXDMA_MONITOR_DST:
9792 		return "Rxdma_monitor_destination";
9793 	case WBM_IDLE_LINK:
9794 		return "WBM_hw_idle_link";
9795 	case PPE2TCL:
9796 		return "PPE2TCL";
9797 	case REO2PPE:
9798 		return "REO2PPE";
9799 	case TX_MONITOR_DST:
9800 		return "tx_monitor_destination";
9801 	case TX_MONITOR_BUF:
9802 		return "tx_monitor_buf";
9803 	default:
9804 		dp_err("Invalid ring type");
9805 		break;
9806 	}
9807 	return "Invalid";
9808 }
9809 
9810 void dp_print_napi_stats(struct dp_soc *soc)
9811 {
9812 	hif_print_napi_stats(soc->hif_handle);
9813 }
9814 
9815 /**
9816  * dp_txrx_host_peer_stats_clr() - Reinitialize the txrx peer stats
9817  * @soc: Datapath soc
9818  * @peer: Datatpath peer
9819  * @arg: argument to iter function
9820  *
9821  * Return: QDF_STATUS
9822  */
9823 static inline void
9824 dp_txrx_host_peer_stats_clr(struct dp_soc *soc,
9825 			    struct dp_peer *peer,
9826 			    void *arg)
9827 {
9828 	struct dp_txrx_peer *txrx_peer = NULL;
9829 	struct dp_peer *tgt_peer = NULL;
9830 	struct cdp_interface_peer_stats peer_stats_intf;
9831 
9832 	qdf_mem_zero(&peer_stats_intf, sizeof(struct cdp_interface_peer_stats));
9833 
9834 	DP_STATS_CLR(peer);
9835 	/* Clear monitor peer stats */
9836 	dp_monitor_peer_reset_stats(soc, peer);
9837 
9838 	/* Clear MLD peer stats only when link peer is primary */
9839 	if (dp_peer_is_primary_link_peer(peer)) {
9840 		tgt_peer = dp_get_tgt_peer_from_peer(peer);
9841 		if (tgt_peer) {
9842 			DP_STATS_CLR(tgt_peer);
9843 			txrx_peer = tgt_peer->txrx_peer;
9844 			dp_txrx_peer_stats_clr(txrx_peer);
9845 		}
9846 	}
9847 
9848 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9849 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, peer->vdev->pdev->soc,
9850 			     &peer_stats_intf,  peer->peer_id,
9851 			     UPDATE_PEER_STATS, peer->vdev->pdev->pdev_id);
9852 #endif
9853 }
9854 
9855 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING
9856 static inline void dp_srng_clear_ring_usage_wm_stats(struct dp_soc *soc)
9857 {
9858 	int ring;
9859 
9860 	for (ring = 0; ring < soc->num_reo_dest_rings; ring++)
9861 		hal_srng_clear_ring_usage_wm_locked(soc->hal_soc,
9862 					    soc->reo_dest_ring[ring].hal_srng);
9863 }
9864 #else
9865 static inline void dp_srng_clear_ring_usage_wm_stats(struct dp_soc *soc)
9866 {
9867 }
9868 #endif
9869 
9870 /**
9871  * dp_txrx_host_stats_clr() - Reinitialize the txrx stats
9872  * @vdev: DP_VDEV handle
9873  * @soc: DP_SOC handle
9874  *
9875  * Return: QDF_STATUS
9876  */
9877 static inline QDF_STATUS
9878 dp_txrx_host_stats_clr(struct dp_vdev *vdev, struct dp_soc *soc)
9879 {
9880 	if (!vdev || !vdev->pdev)
9881 		return QDF_STATUS_E_FAILURE;
9882 
9883 	/*
9884 	 * if NSS offload is enabled, then send message
9885 	 * to NSS FW to clear the stats. Once NSS FW clears the statistics
9886 	 * then clear host statistics.
9887 	 */
9888 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
9889 		if (soc->cdp_soc.ol_ops->nss_stats_clr)
9890 			soc->cdp_soc.ol_ops->nss_stats_clr(soc->ctrl_psoc,
9891 							   vdev->vdev_id);
9892 	}
9893 
9894 	dp_vdev_stats_hw_offload_target_clear(soc, vdev->pdev->pdev_id,
9895 					      (1 << vdev->vdev_id));
9896 
9897 	DP_STATS_CLR(vdev->pdev);
9898 	DP_STATS_CLR(vdev->pdev->soc);
9899 	DP_STATS_CLR(vdev);
9900 
9901 	hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
9902 
9903 	dp_vdev_iterate_peer(vdev, dp_txrx_host_peer_stats_clr, NULL,
9904 			     DP_MOD_ID_GENERIC_STATS);
9905 
9906 	dp_srng_clear_ring_usage_wm_stats(soc);
9907 
9908 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9909 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
9910 			     &vdev->stats,  vdev->vdev_id,
9911 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
9912 #endif
9913 	return QDF_STATUS_SUCCESS;
9914 }
9915 
9916 /**
9917  * dp_get_peer_calibr_stats()- Get peer calibrated stats
9918  * @peer: Datapath peer
9919  * @peer_stats: buffer for peer stats
9920  *
9921  * Return: none
9922  */
9923 static inline
9924 void dp_get_peer_calibr_stats(struct dp_peer *peer,
9925 			      struct cdp_peer_stats *peer_stats)
9926 {
9927 	struct dp_peer *tgt_peer;
9928 
9929 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
9930 	if (!tgt_peer)
9931 		return;
9932 
9933 	peer_stats->tx.last_per = tgt_peer->stats.tx.last_per;
9934 	peer_stats->tx.tx_bytes_success_last =
9935 				tgt_peer->stats.tx.tx_bytes_success_last;
9936 	peer_stats->tx.tx_data_success_last =
9937 					tgt_peer->stats.tx.tx_data_success_last;
9938 	peer_stats->tx.tx_byte_rate = tgt_peer->stats.tx.tx_byte_rate;
9939 	peer_stats->tx.tx_data_rate = tgt_peer->stats.tx.tx_data_rate;
9940 	peer_stats->tx.tx_data_ucast_last =
9941 					tgt_peer->stats.tx.tx_data_ucast_last;
9942 	peer_stats->tx.tx_data_ucast_rate =
9943 					tgt_peer->stats.tx.tx_data_ucast_rate;
9944 	peer_stats->tx.inactive_time = tgt_peer->stats.tx.inactive_time;
9945 	peer_stats->rx.rx_bytes_success_last =
9946 				tgt_peer->stats.rx.rx_bytes_success_last;
9947 	peer_stats->rx.rx_data_success_last =
9948 				tgt_peer->stats.rx.rx_data_success_last;
9949 	peer_stats->rx.rx_byte_rate = tgt_peer->stats.rx.rx_byte_rate;
9950 	peer_stats->rx.rx_data_rate = tgt_peer->stats.rx.rx_data_rate;
9951 }
9952 
9953 /**
9954  * dp_get_peer_basic_stats()- Get peer basic stats
9955  * @peer: Datapath peer
9956  * @peer_stats: buffer for peer stats
9957  *
9958  * Return: none
9959  */
9960 static inline
9961 void dp_get_peer_basic_stats(struct dp_peer *peer,
9962 			     struct cdp_peer_stats *peer_stats)
9963 {
9964 	struct dp_txrx_peer *txrx_peer;
9965 
9966 	txrx_peer = dp_get_txrx_peer(peer);
9967 	if (!txrx_peer)
9968 		return;
9969 
9970 	peer_stats->tx.comp_pkt.num += txrx_peer->comp_pkt.num;
9971 	peer_stats->tx.comp_pkt.bytes += txrx_peer->comp_pkt.bytes;
9972 	peer_stats->tx.tx_failed += txrx_peer->tx_failed;
9973 	peer_stats->rx.to_stack.num += txrx_peer->to_stack.num;
9974 	peer_stats->rx.to_stack.bytes += txrx_peer->to_stack.bytes;
9975 }
9976 
9977 /**
9978  * dp_get_peer_per_pkt_stats()- Get peer per pkt stats
9979  * @peer: Datapath peer
9980  * @peer_stats: buffer for peer stats
9981  *
9982  * Return: none
9983  */
9984 static inline
9985 void dp_get_peer_per_pkt_stats(struct dp_peer *peer,
9986 			       struct cdp_peer_stats *peer_stats)
9987 {
9988 	struct dp_txrx_peer *txrx_peer;
9989 	struct dp_peer_per_pkt_stats *per_pkt_stats;
9990 
9991 	txrx_peer = dp_get_txrx_peer(peer);
9992 	if (!txrx_peer)
9993 		return;
9994 
9995 	per_pkt_stats = &txrx_peer->stats.per_pkt_stats;
9996 	DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats);
9997 }
9998 
9999 /**
10000  * dp_get_peer_extd_stats()- Get peer extd stats
10001  * @peer: Datapath peer
10002  * @peer_stats: buffer for peer stats
10003  *
10004  * Return: none
10005  */
10006 #ifdef QCA_ENHANCED_STATS_SUPPORT
10007 #ifdef WLAN_FEATURE_11BE_MLO
10008 static inline
10009 void dp_get_peer_extd_stats(struct dp_peer *peer,
10010 			    struct cdp_peer_stats *peer_stats)
10011 {
10012 	struct dp_soc *soc = peer->vdev->pdev->soc;
10013 
10014 	if (IS_MLO_DP_MLD_PEER(peer)) {
10015 		uint8_t i;
10016 		struct dp_peer *link_peer;
10017 		struct dp_soc *link_peer_soc;
10018 		struct dp_mld_link_peers link_peers_info;
10019 
10020 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
10021 						    &link_peers_info,
10022 						    DP_MOD_ID_CDP);
10023 		for (i = 0; i < link_peers_info.num_links; i++) {
10024 			link_peer = link_peers_info.link_peers[i];
10025 			link_peer_soc = link_peer->vdev->pdev->soc;
10026 			dp_monitor_peer_get_stats(link_peer_soc, link_peer,
10027 						  peer_stats,
10028 						  UPDATE_PEER_STATS);
10029 		}
10030 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
10031 	} else {
10032 		dp_monitor_peer_get_stats(soc, peer, peer_stats,
10033 					  UPDATE_PEER_STATS);
10034 	}
10035 }
10036 #else
10037 static inline
10038 void dp_get_peer_extd_stats(struct dp_peer *peer,
10039 			    struct cdp_peer_stats *peer_stats)
10040 {
10041 	struct dp_soc *soc = peer->vdev->pdev->soc;
10042 
10043 	dp_monitor_peer_get_stats(soc, peer, peer_stats, UPDATE_PEER_STATS);
10044 }
10045 #endif
10046 #else
10047 static inline
10048 void dp_get_peer_extd_stats(struct dp_peer *peer,
10049 			    struct cdp_peer_stats *peer_stats)
10050 {
10051 	struct dp_txrx_peer *txrx_peer;
10052 	struct dp_peer_extd_stats *extd_stats;
10053 
10054 	txrx_peer = dp_get_txrx_peer(peer);
10055 	if (qdf_unlikely(!txrx_peer)) {
10056 		dp_err_rl("txrx_peer NULL");
10057 		return;
10058 	}
10059 
10060 	extd_stats = &txrx_peer->stats.extd_stats;
10061 	DP_UPDATE_EXTD_STATS(peer_stats, extd_stats);
10062 }
10063 #endif
10064 
10065 /**
10066  * dp_get_peer_tx_per()- Get peer packet error ratio
10067  * @peer_stats: buffer for peer stats
10068  *
10069  * Return: none
10070  */
10071 static inline
10072 void dp_get_peer_tx_per(struct cdp_peer_stats *peer_stats)
10073 {
10074 	if (peer_stats->tx.tx_success.num + peer_stats->tx.retries > 0)
10075 		peer_stats->tx.per = (peer_stats->tx.retries * 100) /
10076 				  (peer_stats->tx.tx_success.num +
10077 				   peer_stats->tx.retries);
10078 	else
10079 		peer_stats->tx.per = 0;
10080 }
10081 
10082 void dp_get_peer_stats(struct dp_peer *peer, struct cdp_peer_stats *peer_stats)
10083 {
10084 	dp_get_peer_calibr_stats(peer, peer_stats);
10085 
10086 	dp_get_peer_basic_stats(peer, peer_stats);
10087 
10088 	dp_get_peer_per_pkt_stats(peer, peer_stats);
10089 
10090 	dp_get_peer_extd_stats(peer, peer_stats);
10091 
10092 	dp_get_peer_tx_per(peer_stats);
10093 }
10094 
10095 /**
10096  * dp_get_host_peer_stats()- function to print peer stats
10097  * @soc: dp_soc handle
10098  * @mac_addr: mac address of the peer
10099  *
10100  * Return: QDF_STATUS
10101  */
10102 static QDF_STATUS
10103 dp_get_host_peer_stats(struct cdp_soc_t *soc, uint8_t *mac_addr)
10104 {
10105 	struct dp_peer *peer = NULL;
10106 	struct cdp_peer_stats *peer_stats = NULL;
10107 	struct cdp_peer_info peer_info = { 0 };
10108 
10109 	if (!mac_addr) {
10110 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
10111 			  "%s: NULL peer mac addr\n", __func__);
10112 		return QDF_STATUS_E_FAILURE;
10113 	}
10114 
10115 	DP_PEER_INFO_PARAMS_INIT(&peer_info, DP_VDEV_ALL, mac_addr, false,
10116 				 CDP_WILD_PEER_TYPE);
10117 
10118 	peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info,
10119 					 DP_MOD_ID_CDP);
10120 	if (!peer) {
10121 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
10122 			  "%s: Invalid peer\n", __func__);
10123 		return QDF_STATUS_E_FAILURE;
10124 	}
10125 
10126 	peer_stats = qdf_mem_malloc(sizeof(struct cdp_peer_stats));
10127 	if (!peer_stats) {
10128 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
10129 			  "%s: Memory allocation failed for cdp_peer_stats\n",
10130 			  __func__);
10131 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10132 		return QDF_STATUS_E_NOMEM;
10133 	}
10134 
10135 	qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats));
10136 
10137 	dp_get_peer_stats(peer, peer_stats);
10138 	dp_print_peer_stats(peer, peer_stats);
10139 
10140 	dp_peer_rxtid_stats(dp_get_tgt_peer_from_peer(peer),
10141 			    dp_rx_tid_stats_cb, NULL);
10142 
10143 	qdf_mem_free(peer_stats);
10144 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10145 
10146 	return QDF_STATUS_SUCCESS;
10147 }
10148 
10149 /**
10150  * dp_dump_wbm_idle_hptp() - dump wbm idle ring, hw hp tp info.
10151  * @soc: dp soc.
10152  * @pdev: dp pdev.
10153  *
10154  * Return: None.
10155  */
10156 static void
10157 dp_dump_wbm_idle_hptp(struct dp_soc *soc, struct dp_pdev *pdev)
10158 {
10159 	uint32_t hw_head;
10160 	uint32_t hw_tail;
10161 	struct dp_srng *srng;
10162 
10163 	if (!soc) {
10164 		dp_err("soc is NULL");
10165 		return;
10166 	}
10167 
10168 	if (!pdev) {
10169 		dp_err("pdev is NULL");
10170 		return;
10171 	}
10172 
10173 	srng = &pdev->soc->wbm_idle_link_ring;
10174 	if (!srng) {
10175 		dp_err("wbm_idle_link_ring srng is NULL");
10176 		return;
10177 	}
10178 
10179 	hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &hw_head,
10180 			&hw_tail, WBM_IDLE_LINK);
10181 
10182 	dp_debug("WBM_IDLE_LINK: HW hp: %d, HW tp: %d",
10183 			hw_head, hw_tail);
10184 }
10185 
10186 
10187 /**
10188  * dp_txrx_stats_help() - Helper function for Txrx_Stats
10189  *
10190  * Return: None
10191  */
10192 static void dp_txrx_stats_help(void)
10193 {
10194 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
10195 	dp_info("stats_option:");
10196 	dp_info("  1 -- HTT Tx Statistics");
10197 	dp_info("  2 -- HTT Rx Statistics");
10198 	dp_info("  3 -- HTT Tx HW Queue Statistics");
10199 	dp_info("  4 -- HTT Tx HW Sched Statistics");
10200 	dp_info("  5 -- HTT Error Statistics");
10201 	dp_info("  6 -- HTT TQM Statistics");
10202 	dp_info("  7 -- HTT TQM CMDQ Statistics");
10203 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
10204 	dp_info("  9 -- HTT Tx Rate Statistics");
10205 	dp_info(" 10 -- HTT Rx Rate Statistics");
10206 	dp_info(" 11 -- HTT Peer Statistics");
10207 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
10208 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
10209 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
10210 	dp_info(" 15 -- HTT SRNG Statistics");
10211 	dp_info(" 16 -- HTT SFM Info Statistics");
10212 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
10213 	dp_info(" 18 -- HTT Peer List Details");
10214 	dp_info(" 20 -- Clear Host Statistics");
10215 	dp_info(" 21 -- Host Rx Rate Statistics");
10216 	dp_info(" 22 -- Host Tx Rate Statistics");
10217 	dp_info(" 23 -- Host Tx Statistics");
10218 	dp_info(" 24 -- Host Rx Statistics");
10219 	dp_info(" 25 -- Host AST Statistics");
10220 	dp_info(" 26 -- Host SRNG PTR Statistics");
10221 	dp_info(" 27 -- Host Mon Statistics");
10222 	dp_info(" 28 -- Host REO Queue Statistics");
10223 	dp_info(" 29 -- Host Soc cfg param Statistics");
10224 	dp_info(" 30 -- Host pdev cfg param Statistics");
10225 	dp_info(" 31 -- Host NAPI stats");
10226 	dp_info(" 32 -- Host Interrupt stats");
10227 	dp_info(" 33 -- Host FISA stats");
10228 	dp_info(" 34 -- Host Register Work stats");
10229 	dp_info(" 35 -- HW REO Queue stats");
10230 	dp_info(" 36 -- Host WBM IDLE link desc ring HP/TP");
10231 	dp_info(" 37 -- Host SRNG usage watermark stats");
10232 }
10233 
10234 #ifdef DP_UMAC_HW_RESET_SUPPORT
10235 /**
10236  * dp_umac_rst_skel_enable_update() - Update skel dbg flag for umac reset
10237  * @soc: dp soc handle
10238  * @en: ebable/disable
10239  *
10240  * Return: void
10241  */
10242 static void dp_umac_rst_skel_enable_update(struct dp_soc *soc, bool en)
10243 {
10244 	soc->umac_reset_ctx.skel_enable = en;
10245 	dp_cdp_debug("UMAC HW reset debug skeleton code enabled :%u",
10246 		     soc->umac_reset_ctx.skel_enable);
10247 }
10248 
10249 /**
10250  * dp_umac_rst_skel_enable_get() - Get skel dbg flag for umac reset
10251  * @soc: dp soc handle
10252  *
10253  * Return: enable/disable flag
10254  */
10255 static bool dp_umac_rst_skel_enable_get(struct dp_soc *soc)
10256 {
10257 	return soc->umac_reset_ctx.skel_enable;
10258 }
10259 #else
10260 static void dp_umac_rst_skel_enable_update(struct dp_soc *soc, bool en)
10261 {
10262 }
10263 
10264 static bool dp_umac_rst_skel_enable_get(struct dp_soc *soc)
10265 {
10266 	return false;
10267 }
10268 #endif
10269 
10270 /**
10271  * dp_print_host_stats()- Function to print the stats aggregated at host
10272  * @vdev: DP_VDEV handle
10273  * @req: host stats type
10274  * @soc: dp soc handler
10275  *
10276  * Return: 0 on success, print error message in case of failure
10277  */
10278 static int
10279 dp_print_host_stats(struct dp_vdev *vdev,
10280 		    struct cdp_txrx_stats_req *req,
10281 		    struct dp_soc *soc)
10282 {
10283 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
10284 	enum cdp_host_txrx_stats type =
10285 			dp_stats_mapping_table[req->stats][STATS_HOST];
10286 
10287 	dp_aggregate_pdev_stats(pdev);
10288 
10289 	switch (type) {
10290 	case TXRX_CLEAR_STATS:
10291 		dp_txrx_host_stats_clr(vdev, soc);
10292 		break;
10293 	case TXRX_RX_RATE_STATS:
10294 		dp_print_rx_rates(vdev);
10295 		break;
10296 	case TXRX_TX_RATE_STATS:
10297 		dp_print_tx_rates(vdev);
10298 		break;
10299 	case TXRX_TX_HOST_STATS:
10300 		dp_print_pdev_tx_stats(pdev);
10301 		dp_print_soc_tx_stats(pdev->soc);
10302 		dp_print_global_desc_count();
10303 		break;
10304 	case TXRX_RX_HOST_STATS:
10305 		dp_print_pdev_rx_stats(pdev);
10306 		dp_print_soc_rx_stats(pdev->soc);
10307 		break;
10308 	case TXRX_AST_STATS:
10309 		dp_print_ast_stats(pdev->soc);
10310 		dp_print_mec_stats(pdev->soc);
10311 		dp_print_peer_table(vdev);
10312 		break;
10313 	case TXRX_SRNG_PTR_STATS:
10314 		dp_print_ring_stats(pdev);
10315 		break;
10316 	case TXRX_RX_MON_STATS:
10317 		dp_monitor_print_pdev_rx_mon_stats(pdev);
10318 		break;
10319 	case TXRX_REO_QUEUE_STATS:
10320 		dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc,
10321 				       req->peer_addr);
10322 		break;
10323 	case TXRX_SOC_CFG_PARAMS:
10324 		dp_print_soc_cfg_params(pdev->soc);
10325 		break;
10326 	case TXRX_PDEV_CFG_PARAMS:
10327 		dp_print_pdev_cfg_params(pdev);
10328 		break;
10329 	case TXRX_NAPI_STATS:
10330 		dp_print_napi_stats(pdev->soc);
10331 		break;
10332 	case TXRX_SOC_INTERRUPT_STATS:
10333 		dp_print_soc_interrupt_stats(pdev->soc);
10334 		break;
10335 	case TXRX_SOC_FSE_STATS:
10336 		dp_rx_dump_fisa_table(pdev->soc);
10337 		break;
10338 	case TXRX_HAL_REG_WRITE_STATS:
10339 		hal_dump_reg_write_stats(pdev->soc->hal_soc);
10340 		hal_dump_reg_write_srng_stats(pdev->soc->hal_soc);
10341 		break;
10342 	case TXRX_SOC_REO_HW_DESC_DUMP:
10343 		dp_get_rx_reo_queue_info((struct cdp_soc_t *)pdev->soc,
10344 					 vdev->vdev_id);
10345 		break;
10346 	case TXRX_SOC_WBM_IDLE_HPTP_DUMP:
10347 		dp_dump_wbm_idle_hptp(pdev->soc, pdev);
10348 		break;
10349 	case TXRX_SRNG_USAGE_WM_STATS:
10350 		/* Dump usage watermark stats for all SRNGs */
10351 		dp_dump_srng_high_wm_stats(soc, 0xFF);
10352 		break;
10353 	default:
10354 		dp_info("Wrong Input For TxRx Host Stats");
10355 		dp_txrx_stats_help();
10356 		break;
10357 	}
10358 	return 0;
10359 }
10360 
10361 /**
10362  * dp_pdev_tid_stats_ingress_inc() - increment ingress_stack counter
10363  * @pdev: pdev handle
10364  * @val: increase in value
10365  *
10366  * Return: void
10367  */
10368 static void
10369 dp_pdev_tid_stats_ingress_inc(struct dp_pdev *pdev, uint32_t val)
10370 {
10371 	pdev->stats.tid_stats.ingress_stack += val;
10372 }
10373 
10374 /**
10375  * dp_pdev_tid_stats_osif_drop() - increment osif_drop counter
10376  * @pdev: pdev handle
10377  * @val: increase in value
10378  *
10379  * Return: void
10380  */
10381 static void
10382 dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val)
10383 {
10384 	pdev->stats.tid_stats.osif_drop += val;
10385 }
10386 
10387 /**
10388  * dp_get_fw_peer_stats()- function to print peer stats
10389  * @soc: soc handle
10390  * @pdev_id: id of the pdev handle
10391  * @mac_addr: mac address of the peer
10392  * @cap: Type of htt stats requested
10393  * @is_wait: if set, wait on completion from firmware response
10394  *
10395  * Currently Supporting only MAC ID based requests Only
10396  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
10397  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
10398  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
10399  *
10400  * Return: QDF_STATUS
10401  */
10402 static QDF_STATUS
10403 dp_get_fw_peer_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
10404 		     uint8_t *mac_addr,
10405 		     uint32_t cap, uint32_t is_wait)
10406 {
10407 	int i;
10408 	uint32_t config_param0 = 0;
10409 	uint32_t config_param1 = 0;
10410 	uint32_t config_param2 = 0;
10411 	uint32_t config_param3 = 0;
10412 	struct dp_pdev *pdev =
10413 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10414 						   pdev_id);
10415 
10416 	if (!pdev)
10417 		return QDF_STATUS_E_FAILURE;
10418 
10419 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
10420 	config_param0 |= (1 << (cap + 1));
10421 
10422 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
10423 		config_param1 |= (1 << i);
10424 	}
10425 
10426 	config_param2 |= (mac_addr[0] & 0x000000ff);
10427 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
10428 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
10429 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
10430 
10431 	config_param3 |= (mac_addr[4] & 0x000000ff);
10432 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
10433 
10434 	if (is_wait) {
10435 		qdf_event_reset(&pdev->fw_peer_stats_event);
10436 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
10437 					  config_param0, config_param1,
10438 					  config_param2, config_param3,
10439 					  0, DBG_STATS_COOKIE_DP_STATS, 0);
10440 		qdf_wait_single_event(&pdev->fw_peer_stats_event,
10441 				      DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
10442 	} else {
10443 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
10444 					  config_param0, config_param1,
10445 					  config_param2, config_param3,
10446 					  0, DBG_STATS_COOKIE_DEFAULT, 0);
10447 	}
10448 
10449 	return QDF_STATUS_SUCCESS;
10450 
10451 }
10452 
10453 /* This struct definition will be removed from here
10454  * once it get added in FW headers*/
10455 struct httstats_cmd_req {
10456     uint32_t    config_param0;
10457     uint32_t    config_param1;
10458     uint32_t    config_param2;
10459     uint32_t    config_param3;
10460     int cookie;
10461     u_int8_t    stats_id;
10462 };
10463 
10464 /**
10465  * dp_get_htt_stats: function to process the httstas request
10466  * @soc: DP soc handle
10467  * @pdev_id: id of pdev handle
10468  * @data: pointer to request data
10469  * @data_len: length for request data
10470  *
10471  * Return: QDF_STATUS
10472  */
10473 static QDF_STATUS
10474 dp_get_htt_stats(struct cdp_soc_t *soc, uint8_t pdev_id, void *data,
10475 		 uint32_t data_len)
10476 {
10477 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
10478 	struct dp_pdev *pdev =
10479 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10480 						   pdev_id);
10481 
10482 	if (!pdev)
10483 		return QDF_STATUS_E_FAILURE;
10484 
10485 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
10486 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
10487 				req->config_param0, req->config_param1,
10488 				req->config_param2, req->config_param3,
10489 				req->cookie, DBG_STATS_COOKIE_DEFAULT, 0);
10490 
10491 	return QDF_STATUS_SUCCESS;
10492 }
10493 
10494 /**
10495  * dp_set_pdev_tidmap_prty_wifi3() - update tidmap priority in pdev
10496  * @pdev: DP_PDEV handle
10497  * @prio: tidmap priority value passed by the user
10498  *
10499  * Return: QDF_STATUS_SUCCESS on success
10500  */
10501 static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct dp_pdev *pdev,
10502 						uint8_t prio)
10503 {
10504 	struct dp_soc *soc = pdev->soc;
10505 
10506 	soc->tidmap_prty = prio;
10507 
10508 	hal_tx_set_tidmap_prty(soc->hal_soc, prio);
10509 	return QDF_STATUS_SUCCESS;
10510 }
10511 
10512 /**
10513  * dp_get_peer_param: function to get parameters in peer
10514  * @cdp_soc: DP soc handle
10515  * @vdev_id: id of vdev handle
10516  * @peer_mac: peer mac address
10517  * @param: parameter type to be set
10518  * @val: address of buffer
10519  *
10520  * Return: val
10521  */
10522 static QDF_STATUS dp_get_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
10523 				    uint8_t *peer_mac,
10524 				    enum cdp_peer_param_type param,
10525 				    cdp_config_param_type *val)
10526 {
10527 	return QDF_STATUS_SUCCESS;
10528 }
10529 
10530 /**
10531  * dp_set_peer_param: function to set parameters in peer
10532  * @cdp_soc: DP soc handle
10533  * @vdev_id: id of vdev handle
10534  * @peer_mac: peer mac address
10535  * @param: parameter type to be set
10536  * @val: value of parameter to be set
10537  *
10538  * Return: 0 for success. nonzero for failure.
10539  */
10540 static QDF_STATUS dp_set_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
10541 				    uint8_t *peer_mac,
10542 				    enum cdp_peer_param_type param,
10543 				    cdp_config_param_type val)
10544 {
10545 	struct dp_peer *peer =
10546 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
10547 						       peer_mac, 0, vdev_id,
10548 						       DP_MOD_ID_CDP);
10549 	struct dp_txrx_peer *txrx_peer;
10550 
10551 	if (!peer)
10552 		return QDF_STATUS_E_FAILURE;
10553 
10554 	txrx_peer = peer->txrx_peer;
10555 	if (!txrx_peer) {
10556 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10557 		return QDF_STATUS_E_FAILURE;
10558 	}
10559 
10560 	switch (param) {
10561 	case CDP_CONFIG_NAWDS:
10562 		txrx_peer->nawds_enabled = val.cdp_peer_param_nawds;
10563 		break;
10564 	case CDP_CONFIG_ISOLATION:
10565 		dp_set_peer_isolation(txrx_peer, val.cdp_peer_param_isolation);
10566 		break;
10567 	case CDP_CONFIG_IN_TWT:
10568 		txrx_peer->in_twt = !!(val.cdp_peer_param_in_twt);
10569 		break;
10570 	default:
10571 		break;
10572 	}
10573 
10574 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10575 
10576 	return QDF_STATUS_SUCCESS;
10577 }
10578 
10579 /**
10580  * dp_get_pdev_param() - function to get parameters from pdev
10581  * @cdp_soc: DP soc handle
10582  * @pdev_id: id of pdev handle
10583  * @param: parameter type to be get
10584  * @val: buffer for value
10585  *
10586  * Return: status
10587  */
10588 static QDF_STATUS dp_get_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
10589 				    enum cdp_pdev_param_type param,
10590 				    cdp_config_param_type *val)
10591 {
10592 	struct cdp_pdev *pdev = (struct cdp_pdev *)
10593 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10594 						   pdev_id);
10595 	if (!pdev)
10596 		return QDF_STATUS_E_FAILURE;
10597 
10598 	switch (param) {
10599 	case CDP_CONFIG_VOW:
10600 		val->cdp_pdev_param_cfg_vow =
10601 				((struct dp_pdev *)pdev)->delay_stats_flag;
10602 		break;
10603 	case CDP_TX_PENDING:
10604 		val->cdp_pdev_param_tx_pending = dp_get_tx_pending(pdev);
10605 		break;
10606 	case CDP_FILTER_MCAST_DATA:
10607 		val->cdp_pdev_param_fltr_mcast =
10608 				dp_monitor_pdev_get_filter_mcast_data(pdev);
10609 		break;
10610 	case CDP_FILTER_NO_DATA:
10611 		val->cdp_pdev_param_fltr_none =
10612 				dp_monitor_pdev_get_filter_non_data(pdev);
10613 		break;
10614 	case CDP_FILTER_UCAST_DATA:
10615 		val->cdp_pdev_param_fltr_ucast =
10616 				dp_monitor_pdev_get_filter_ucast_data(pdev);
10617 		break;
10618 	case CDP_MONITOR_CHANNEL:
10619 		val->cdp_pdev_param_monitor_chan =
10620 			dp_monitor_get_chan_num((struct dp_pdev *)pdev);
10621 		break;
10622 	case CDP_MONITOR_FREQUENCY:
10623 		val->cdp_pdev_param_mon_freq =
10624 			dp_monitor_get_chan_freq((struct dp_pdev *)pdev);
10625 		break;
10626 	default:
10627 		return QDF_STATUS_E_FAILURE;
10628 	}
10629 
10630 	return QDF_STATUS_SUCCESS;
10631 }
10632 
10633 /**
10634  * dp_set_pdev_param() - function to set parameters in pdev
10635  * @cdp_soc: DP soc handle
10636  * @pdev_id: id of pdev handle
10637  * @param: parameter type to be set
10638  * @val: value of parameter to be set
10639  *
10640  * Return: 0 for success. nonzero for failure.
10641  */
10642 static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
10643 				    enum cdp_pdev_param_type param,
10644 				    cdp_config_param_type val)
10645 {
10646 	int target_type;
10647 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
10648 	struct dp_pdev *pdev =
10649 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10650 						   pdev_id);
10651 	enum reg_wifi_band chan_band;
10652 
10653 	if (!pdev)
10654 		return QDF_STATUS_E_FAILURE;
10655 
10656 	target_type = hal_get_target_type(soc->hal_soc);
10657 	switch (target_type) {
10658 	case TARGET_TYPE_QCA6750:
10659 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC0_LMAC_ID;
10660 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
10661 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
10662 		break;
10663 	case TARGET_TYPE_KIWI:
10664 	case TARGET_TYPE_MANGO:
10665 	case TARGET_TYPE_PEACH:
10666 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC0_LMAC_ID;
10667 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
10668 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
10669 		break;
10670 	default:
10671 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC1_LMAC_ID;
10672 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
10673 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
10674 		break;
10675 	}
10676 
10677 	switch (param) {
10678 	case CDP_CONFIG_TX_CAPTURE:
10679 		return dp_monitor_config_debug_sniffer(pdev,
10680 						val.cdp_pdev_param_tx_capture);
10681 	case CDP_CONFIG_DEBUG_SNIFFER:
10682 		return dp_monitor_config_debug_sniffer(pdev,
10683 						val.cdp_pdev_param_dbg_snf);
10684 	case CDP_CONFIG_BPR_ENABLE:
10685 		return dp_monitor_set_bpr_enable(pdev,
10686 						 val.cdp_pdev_param_bpr_enable);
10687 	case CDP_CONFIG_PRIMARY_RADIO:
10688 		pdev->is_primary = val.cdp_pdev_param_primary_radio;
10689 		break;
10690 	case CDP_CONFIG_CAPTURE_LATENCY:
10691 		pdev->latency_capture_enable = val.cdp_pdev_param_cptr_latcy;
10692 		break;
10693 	case CDP_INGRESS_STATS:
10694 		dp_pdev_tid_stats_ingress_inc(pdev,
10695 					      val.cdp_pdev_param_ingrs_stats);
10696 		break;
10697 	case CDP_OSIF_DROP:
10698 		dp_pdev_tid_stats_osif_drop(pdev,
10699 					    val.cdp_pdev_param_osif_drop);
10700 		break;
10701 	case CDP_CONFIG_ENH_RX_CAPTURE:
10702 		return dp_monitor_config_enh_rx_capture(pdev,
10703 						val.cdp_pdev_param_en_rx_cap);
10704 	case CDP_CONFIG_ENH_TX_CAPTURE:
10705 		return dp_monitor_config_enh_tx_capture(pdev,
10706 						val.cdp_pdev_param_en_tx_cap);
10707 	case CDP_CONFIG_HMMC_TID_OVERRIDE:
10708 		pdev->hmmc_tid_override_en = val.cdp_pdev_param_hmmc_tid_ovrd;
10709 		break;
10710 	case CDP_CONFIG_HMMC_TID_VALUE:
10711 		pdev->hmmc_tid = val.cdp_pdev_param_hmmc_tid;
10712 		break;
10713 	case CDP_CHAN_NOISE_FLOOR:
10714 		pdev->chan_noise_floor = val.cdp_pdev_param_chn_noise_flr;
10715 		break;
10716 	case CDP_TIDMAP_PRTY:
10717 		dp_set_pdev_tidmap_prty_wifi3(pdev,
10718 					      val.cdp_pdev_param_tidmap_prty);
10719 		break;
10720 	case CDP_FILTER_NEIGH_PEERS:
10721 		dp_monitor_set_filter_neigh_peers(pdev,
10722 					val.cdp_pdev_param_fltr_neigh_peers);
10723 		break;
10724 	case CDP_MONITOR_CHANNEL:
10725 		dp_monitor_set_chan_num(pdev, val.cdp_pdev_param_monitor_chan);
10726 		break;
10727 	case CDP_MONITOR_FREQUENCY:
10728 		chan_band = wlan_reg_freq_to_band(val.cdp_pdev_param_mon_freq);
10729 		dp_monitor_set_chan_freq(pdev, val.cdp_pdev_param_mon_freq);
10730 		dp_monitor_set_chan_band(pdev, chan_band);
10731 		break;
10732 	case CDP_CONFIG_BSS_COLOR:
10733 		dp_monitor_set_bsscolor(pdev, val.cdp_pdev_param_bss_color);
10734 		break;
10735 	case CDP_SET_ATF_STATS_ENABLE:
10736 		dp_monitor_set_atf_stats_enable(pdev,
10737 					val.cdp_pdev_param_atf_stats_enable);
10738 		break;
10739 	case CDP_CONFIG_SPECIAL_VAP:
10740 		dp_monitor_pdev_config_scan_spcl_vap(pdev,
10741 					val.cdp_pdev_param_config_special_vap);
10742 		dp_monitor_vdev_set_monitor_mode_buf_rings(pdev);
10743 		break;
10744 	case CDP_RESET_SCAN_SPCL_VAP_STATS_ENABLE:
10745 		dp_monitor_pdev_reset_scan_spcl_vap_stats_enable(pdev,
10746 				val.cdp_pdev_param_reset_scan_spcl_vap_stats_enable);
10747 		break;
10748 	case CDP_CONFIG_ENHANCED_STATS_ENABLE:
10749 		pdev->enhanced_stats_en = val.cdp_pdev_param_enhanced_stats_enable;
10750 		break;
10751 	case CDP_ISOLATION:
10752 		pdev->isolation = val.cdp_pdev_param_isolation;
10753 		break;
10754 	case CDP_CONFIG_UNDECODED_METADATA_CAPTURE_ENABLE:
10755 		return dp_monitor_config_undecoded_metadata_capture(pdev,
10756 				val.cdp_pdev_param_undecoded_metadata_enable);
10757 		break;
10758 	default:
10759 		return QDF_STATUS_E_INVAL;
10760 	}
10761 	return QDF_STATUS_SUCCESS;
10762 }
10763 
10764 #ifdef QCA_UNDECODED_METADATA_SUPPORT
10765 static
10766 QDF_STATUS dp_set_pdev_phyrx_error_mask(struct cdp_soc_t *cdp_soc,
10767 					uint8_t pdev_id, uint32_t mask,
10768 					uint32_t mask_cont)
10769 {
10770 	struct dp_pdev *pdev =
10771 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10772 						   pdev_id);
10773 
10774 	if (!pdev)
10775 		return QDF_STATUS_E_FAILURE;
10776 
10777 	return dp_monitor_config_undecoded_metadata_phyrx_error_mask(pdev,
10778 				mask, mask_cont);
10779 }
10780 
10781 static
10782 QDF_STATUS dp_get_pdev_phyrx_error_mask(struct cdp_soc_t *cdp_soc,
10783 					uint8_t pdev_id, uint32_t *mask,
10784 					uint32_t *mask_cont)
10785 {
10786 	struct dp_pdev *pdev =
10787 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10788 						   pdev_id);
10789 
10790 	if (!pdev)
10791 		return QDF_STATUS_E_FAILURE;
10792 
10793 	return dp_monitor_get_undecoded_metadata_phyrx_error_mask(pdev,
10794 				mask, mask_cont);
10795 }
10796 #endif
10797 
10798 #ifdef QCA_PEER_EXT_STATS
10799 static void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
10800 					  qdf_nbuf_t nbuf)
10801 {
10802 	struct dp_peer *peer = NULL;
10803 	uint16_t peer_id, ring_id;
10804 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
10805 	struct dp_peer_delay_stats *delay_stats = NULL;
10806 
10807 	peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
10808 	if (peer_id > soc->max_peer_id)
10809 		return;
10810 
10811 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
10812 	if (qdf_unlikely(!peer))
10813 		return;
10814 
10815 	if (qdf_unlikely(!peer->txrx_peer)) {
10816 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10817 		return;
10818 	}
10819 
10820 	if (qdf_likely(peer->txrx_peer->delay_stats)) {
10821 		delay_stats = peer->txrx_peer->delay_stats;
10822 		ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
10823 		dp_rx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
10824 					nbuf);
10825 	}
10826 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10827 }
10828 #else
10829 static inline void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
10830 						 qdf_nbuf_t nbuf)
10831 {
10832 }
10833 #endif
10834 
10835 /**
10836  * dp_calculate_delay_stats() - function to get rx delay stats
10837  * @cdp_soc: DP soc handle
10838  * @vdev_id: id of DP vdev handle
10839  * @nbuf: skb
10840  *
10841  * Return: QDF_STATUS
10842  */
10843 static QDF_STATUS
10844 dp_calculate_delay_stats(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
10845 			 qdf_nbuf_t nbuf)
10846 {
10847 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
10848 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10849 						     DP_MOD_ID_CDP);
10850 
10851 	if (!vdev)
10852 		return QDF_STATUS_SUCCESS;
10853 
10854 	if (vdev->pdev->delay_stats_flag)
10855 		dp_rx_compute_delay(vdev, nbuf);
10856 	else
10857 		dp_rx_update_peer_delay_stats(soc, nbuf);
10858 
10859 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10860 	return QDF_STATUS_SUCCESS;
10861 }
10862 
10863 /**
10864  * dp_get_vdev_param() - function to get parameters from vdev
10865  * @cdp_soc: DP soc handle
10866  * @vdev_id: id of DP vdev handle
10867  * @param: parameter type to get value
10868  * @val: buffer address
10869  *
10870  * Return: status
10871  */
10872 static QDF_STATUS dp_get_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
10873 				    enum cdp_vdev_param_type param,
10874 				    cdp_config_param_type *val)
10875 {
10876 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
10877 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10878 						     DP_MOD_ID_CDP);
10879 
10880 	if (!vdev)
10881 		return QDF_STATUS_E_FAILURE;
10882 
10883 	switch (param) {
10884 	case CDP_ENABLE_WDS:
10885 		val->cdp_vdev_param_wds = vdev->wds_enabled;
10886 		break;
10887 	case CDP_ENABLE_MEC:
10888 		val->cdp_vdev_param_mec = vdev->mec_enabled;
10889 		break;
10890 	case CDP_ENABLE_DA_WAR:
10891 		val->cdp_vdev_param_da_war = vdev->pdev->soc->da_war_enabled;
10892 		break;
10893 	case CDP_ENABLE_IGMP_MCAST_EN:
10894 		val->cdp_vdev_param_igmp_mcast_en = vdev->igmp_mcast_enhanc_en;
10895 		break;
10896 	case CDP_ENABLE_MCAST_EN:
10897 		val->cdp_vdev_param_mcast_en = vdev->mcast_enhancement_en;
10898 		break;
10899 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
10900 		val->cdp_vdev_param_hlos_tid_override =
10901 			    dp_vdev_get_hlos_tid_override((struct cdp_vdev *)vdev);
10902 		break;
10903 	case CDP_ENABLE_PEER_AUTHORIZE:
10904 		val->cdp_vdev_param_peer_authorize =
10905 			    vdev->peer_authorize;
10906 		break;
10907 	case CDP_TX_ENCAP_TYPE:
10908 		val->cdp_vdev_param_tx_encap = vdev->tx_encap_type;
10909 		break;
10910 	case CDP_ENABLE_CIPHER:
10911 		val->cdp_vdev_param_cipher_en = vdev->sec_type;
10912 		break;
10913 #ifdef WLAN_SUPPORT_MESH_LATENCY
10914 	case CDP_ENABLE_PEER_TID_LATENCY:
10915 		val->cdp_vdev_param_peer_tid_latency_enable =
10916 			vdev->peer_tid_latency_enabled;
10917 		break;
10918 	case CDP_SET_VAP_MESH_TID:
10919 		val->cdp_vdev_param_mesh_tid =
10920 				vdev->mesh_tid_latency_config.latency_tid;
10921 		break;
10922 #endif
10923 	case CDP_DROP_3ADDR_MCAST:
10924 		val->cdp_drop_3addr_mcast = vdev->drop_3addr_mcast;
10925 		break;
10926 	case CDP_SET_MCAST_VDEV:
10927 		soc->arch_ops.txrx_get_vdev_mcast_param(soc, vdev, val);
10928 		break;
10929 #ifdef QCA_SUPPORT_WDS_EXTENDED
10930 	case CDP_DROP_TX_MCAST:
10931 		val->cdp_drop_tx_mcast = vdev->drop_tx_mcast;
10932 		break;
10933 #endif
10934 
10935 #ifdef MESH_MODE_SUPPORT
10936 	case CDP_MESH_RX_FILTER:
10937 		val->cdp_vdev_param_mesh_rx_filter = vdev->mesh_rx_filter;
10938 		break;
10939 	case CDP_MESH_MODE:
10940 		val->cdp_vdev_param_mesh_mode = vdev->mesh_vdev;
10941 		break;
10942 #endif
10943 	case CDP_ENABLE_NAWDS:
10944 		val->cdp_vdev_param_nawds = vdev->nawds_enabled;
10945 		break;
10946 
10947 	case CDP_ENABLE_WRAP:
10948 		val->cdp_vdev_param_wrap = vdev->wrap_vdev;
10949 		break;
10950 
10951 #ifdef DP_TRAFFIC_END_INDICATION
10952 	case CDP_ENABLE_TRAFFIC_END_INDICATION:
10953 		val->cdp_vdev_param_traffic_end_ind = vdev->traffic_end_ind_en;
10954 		break;
10955 #endif
10956 
10957 	default:
10958 		dp_cdp_err("%pK: param value %d is wrong",
10959 			   soc, param);
10960 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10961 		return QDF_STATUS_E_FAILURE;
10962 	}
10963 
10964 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10965 	return QDF_STATUS_SUCCESS;
10966 }
10967 
10968 /**
10969  * dp_set_vdev_param() - function to set parameters in vdev
10970  * @cdp_soc: DP soc handle
10971  * @vdev_id: id of DP vdev handle
10972  * @param: parameter type to get value
10973  * @val: value
10974  *
10975  * Return: QDF_STATUS
10976  */
10977 static QDF_STATUS
10978 dp_set_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
10979 		  enum cdp_vdev_param_type param, cdp_config_param_type val)
10980 {
10981 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
10982 	struct dp_vdev *vdev =
10983 		dp_vdev_get_ref_by_id(dsoc, vdev_id, DP_MOD_ID_CDP);
10984 	uint32_t var = 0;
10985 
10986 	if (!vdev)
10987 		return QDF_STATUS_E_FAILURE;
10988 
10989 	switch (param) {
10990 	case CDP_ENABLE_WDS:
10991 		dp_cdp_err("%pK: wds_enable %d for vdev(%pK) id(%d)\n",
10992 			   dsoc, val.cdp_vdev_param_wds, vdev, vdev->vdev_id);
10993 		vdev->wds_enabled = val.cdp_vdev_param_wds;
10994 		break;
10995 	case CDP_ENABLE_MEC:
10996 		dp_cdp_err("%pK: mec_enable %d for vdev(%pK) id(%d)\n",
10997 			   dsoc, val.cdp_vdev_param_mec, vdev, vdev->vdev_id);
10998 		vdev->mec_enabled = val.cdp_vdev_param_mec;
10999 		break;
11000 	case CDP_ENABLE_DA_WAR:
11001 		dp_cdp_err("%pK: da_war_enable %d for vdev(%pK) id(%d)\n",
11002 			   dsoc, val.cdp_vdev_param_da_war, vdev, vdev->vdev_id);
11003 		vdev->pdev->soc->da_war_enabled = val.cdp_vdev_param_da_war;
11004 		dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
11005 					     vdev->pdev->soc));
11006 		break;
11007 	case CDP_ENABLE_NAWDS:
11008 		vdev->nawds_enabled = val.cdp_vdev_param_nawds;
11009 		break;
11010 	case CDP_ENABLE_MCAST_EN:
11011 		vdev->mcast_enhancement_en = val.cdp_vdev_param_mcast_en;
11012 		break;
11013 	case CDP_ENABLE_IGMP_MCAST_EN:
11014 		vdev->igmp_mcast_enhanc_en = val.cdp_vdev_param_igmp_mcast_en;
11015 		break;
11016 	case CDP_ENABLE_PROXYSTA:
11017 		vdev->proxysta_vdev = val.cdp_vdev_param_proxysta;
11018 		break;
11019 	case CDP_UPDATE_TDLS_FLAGS:
11020 		vdev->tdls_link_connected = val.cdp_vdev_param_tdls_flags;
11021 		break;
11022 	case CDP_CFG_WDS_AGING_TIMER:
11023 		var = val.cdp_vdev_param_aging_tmr;
11024 		if (!var)
11025 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
11026 		else if (var != vdev->wds_aging_timer_val)
11027 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, var);
11028 
11029 		vdev->wds_aging_timer_val = var;
11030 		break;
11031 	case CDP_ENABLE_AP_BRIDGE:
11032 		if (wlan_op_mode_sta != vdev->opmode)
11033 			vdev->ap_bridge_enabled = val.cdp_vdev_param_ap_brdg_en;
11034 		else
11035 			vdev->ap_bridge_enabled = false;
11036 		break;
11037 	case CDP_ENABLE_CIPHER:
11038 		vdev->sec_type = val.cdp_vdev_param_cipher_en;
11039 		break;
11040 	case CDP_ENABLE_QWRAP_ISOLATION:
11041 		vdev->isolation_vdev = val.cdp_vdev_param_qwrap_isolation;
11042 		break;
11043 	case CDP_UPDATE_MULTIPASS:
11044 		vdev->multipass_en = val.cdp_vdev_param_update_multipass;
11045 		break;
11046 	case CDP_TX_ENCAP_TYPE:
11047 		vdev->tx_encap_type = val.cdp_vdev_param_tx_encap;
11048 		break;
11049 	case CDP_RX_DECAP_TYPE:
11050 		vdev->rx_decap_type = val.cdp_vdev_param_rx_decap;
11051 		break;
11052 	case CDP_TID_VDEV_PRTY:
11053 		vdev->tidmap_prty = val.cdp_vdev_param_tidmap_prty;
11054 		break;
11055 	case CDP_TIDMAP_TBL_ID:
11056 		vdev->tidmap_tbl_id = val.cdp_vdev_param_tidmap_tbl_id;
11057 		break;
11058 #ifdef MESH_MODE_SUPPORT
11059 	case CDP_MESH_RX_FILTER:
11060 		dp_vdev_set_mesh_rx_filter((struct cdp_vdev *)vdev,
11061 					   val.cdp_vdev_param_mesh_rx_filter);
11062 		break;
11063 	case CDP_MESH_MODE:
11064 		dp_vdev_set_mesh_mode((struct cdp_vdev *)vdev,
11065 				      val.cdp_vdev_param_mesh_mode);
11066 		break;
11067 #endif
11068 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
11069 		dp_info("vdev_id %d enable hlod tid override %d", vdev_id,
11070 			val.cdp_vdev_param_hlos_tid_override);
11071 		dp_vdev_set_hlos_tid_override(vdev,
11072 				val.cdp_vdev_param_hlos_tid_override);
11073 		break;
11074 #ifdef QCA_SUPPORT_WDS_EXTENDED
11075 	case CDP_CFG_WDS_EXT:
11076 		if (vdev->opmode == wlan_op_mode_ap)
11077 			vdev->wds_ext_enabled = val.cdp_vdev_param_wds_ext;
11078 		break;
11079 	case CDP_DROP_TX_MCAST:
11080 		dp_info("vdev_id %d drop tx mcast :%d", vdev_id,
11081 			val.cdp_drop_tx_mcast);
11082 		vdev->drop_tx_mcast = val.cdp_drop_tx_mcast;
11083 		break;
11084 #endif
11085 	case CDP_ENABLE_PEER_AUTHORIZE:
11086 		vdev->peer_authorize = val.cdp_vdev_param_peer_authorize;
11087 		break;
11088 #ifdef WLAN_SUPPORT_MESH_LATENCY
11089 	case CDP_ENABLE_PEER_TID_LATENCY:
11090 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
11091 			val.cdp_vdev_param_peer_tid_latency_enable);
11092 		vdev->peer_tid_latency_enabled =
11093 			val.cdp_vdev_param_peer_tid_latency_enable;
11094 		break;
11095 	case CDP_SET_VAP_MESH_TID:
11096 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
11097 			val.cdp_vdev_param_mesh_tid);
11098 		vdev->mesh_tid_latency_config.latency_tid
11099 				= val.cdp_vdev_param_mesh_tid;
11100 		break;
11101 #endif
11102 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
11103 	case CDP_SKIP_BAR_UPDATE_AP:
11104 		dp_info("vdev_id %d skip BAR update: %u", vdev_id,
11105 			val.cdp_skip_bar_update);
11106 		vdev->skip_bar_update = val.cdp_skip_bar_update;
11107 		vdev->skip_bar_update_last_ts = 0;
11108 		break;
11109 #endif
11110 	case CDP_DROP_3ADDR_MCAST:
11111 		dp_info("vdev_id %d drop 3 addr mcast :%d", vdev_id,
11112 			val.cdp_drop_3addr_mcast);
11113 		vdev->drop_3addr_mcast = val.cdp_drop_3addr_mcast;
11114 		break;
11115 	case CDP_ENABLE_WRAP:
11116 		vdev->wrap_vdev = val.cdp_vdev_param_wrap;
11117 		break;
11118 #ifdef DP_TRAFFIC_END_INDICATION
11119 	case CDP_ENABLE_TRAFFIC_END_INDICATION:
11120 		vdev->traffic_end_ind_en = val.cdp_vdev_param_traffic_end_ind;
11121 		break;
11122 #endif
11123 #ifdef FEATURE_DIRECT_LINK
11124 	case CDP_VDEV_TX_TO_FW:
11125 		dp_info("vdev_id %d to_fw :%d", vdev_id, val.cdp_vdev_tx_to_fw);
11126 		vdev->to_fw = val.cdp_vdev_tx_to_fw;
11127 		break;
11128 #endif
11129 	default:
11130 		break;
11131 	}
11132 
11133 	dp_tx_vdev_update_search_flags((struct dp_vdev *)vdev);
11134 	dsoc->arch_ops.txrx_set_vdev_param(dsoc, vdev, param, val);
11135 
11136 	/* Update PDEV flags as VDEV flags are updated */
11137 	dp_pdev_update_fast_rx_flag(dsoc, vdev->pdev);
11138 	dp_vdev_unref_delete(dsoc, vdev, DP_MOD_ID_CDP);
11139 
11140 	return QDF_STATUS_SUCCESS;
11141 }
11142 
11143 /**
11144  * dp_set_psoc_param: function to set parameters in psoc
11145  * @cdp_soc: DP soc handle
11146  * @param: parameter type to be set
11147  * @val: value of parameter to be set
11148  *
11149  * Return: QDF_STATUS
11150  */
11151 static QDF_STATUS
11152 dp_set_psoc_param(struct cdp_soc_t *cdp_soc,
11153 		  enum cdp_psoc_param_type param, cdp_config_param_type val)
11154 {
11155 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
11156 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->wlan_cfg_ctx;
11157 
11158 	switch (param) {
11159 	case CDP_ENABLE_RATE_STATS:
11160 		soc->peerstats_enabled = val.cdp_psoc_param_en_rate_stats;
11161 		break;
11162 	case CDP_SET_NSS_CFG:
11163 		wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx,
11164 					    val.cdp_psoc_param_en_nss_cfg);
11165 		/*
11166 		 * TODO: masked out based on the per offloaded radio
11167 		 */
11168 		switch (val.cdp_psoc_param_en_nss_cfg) {
11169 		case dp_nss_cfg_default:
11170 			break;
11171 		case dp_nss_cfg_first_radio:
11172 		/*
11173 		 * This configuration is valid for single band radio which
11174 		 * is also NSS offload.
11175 		 */
11176 		case dp_nss_cfg_dbdc:
11177 		case dp_nss_cfg_dbtc:
11178 			wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
11179 			wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
11180 			wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
11181 			wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
11182 			break;
11183 		default:
11184 			dp_cdp_err("%pK: Invalid offload config %d",
11185 				   soc, val.cdp_psoc_param_en_nss_cfg);
11186 		}
11187 
11188 			dp_cdp_err("%pK: nss-wifi<0> nss config is enabled"
11189 				   , soc);
11190 		break;
11191 	case CDP_SET_PREFERRED_HW_MODE:
11192 		soc->preferred_hw_mode = val.cdp_psoc_param_preferred_hw_mode;
11193 		break;
11194 	case CDP_IPA_ENABLE:
11195 		soc->wlan_cfg_ctx->ipa_enabled = val.cdp_ipa_enabled;
11196 		break;
11197 	case CDP_CFG_VDEV_STATS_HW_OFFLOAD:
11198 		wlan_cfg_set_vdev_stats_hw_offload_config(wlan_cfg_ctx,
11199 				val.cdp_psoc_param_vdev_stats_hw_offload);
11200 		break;
11201 	case CDP_SAWF_ENABLE:
11202 		wlan_cfg_set_sawf_config(wlan_cfg_ctx, val.cdp_sawf_enabled);
11203 		break;
11204 	case CDP_UMAC_RST_SKEL_ENABLE:
11205 		dp_umac_rst_skel_enable_update(soc, val.cdp_umac_rst_skel);
11206 		break;
11207 	case CDP_SAWF_STATS:
11208 		wlan_cfg_set_sawf_stats_config(wlan_cfg_ctx,
11209 					       val.cdp_sawf_stats);
11210 		break;
11211 	default:
11212 		break;
11213 	}
11214 
11215 	return QDF_STATUS_SUCCESS;
11216 }
11217 
11218 /**
11219  * dp_get_psoc_param: function to get parameters in soc
11220  * @cdp_soc: DP soc handle
11221  * @param: parameter type to be set
11222  * @val: address of buffer
11223  *
11224  * Return: status
11225  */
11226 static QDF_STATUS dp_get_psoc_param(struct cdp_soc_t *cdp_soc,
11227 				    enum cdp_psoc_param_type param,
11228 				    cdp_config_param_type *val)
11229 {
11230 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
11231 
11232 	if (!soc)
11233 		return QDF_STATUS_E_FAILURE;
11234 
11235 	switch (param) {
11236 	case CDP_CFG_PEER_EXT_STATS:
11237 		val->cdp_psoc_param_pext_stats =
11238 			wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx);
11239 		break;
11240 	case CDP_CFG_VDEV_STATS_HW_OFFLOAD:
11241 		val->cdp_psoc_param_vdev_stats_hw_offload =
11242 			wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx);
11243 		break;
11244 	case CDP_UMAC_RST_SKEL_ENABLE:
11245 		val->cdp_umac_rst_skel = dp_umac_rst_skel_enable_get(soc);
11246 		break;
11247 	case CDP_PPEDS_ENABLE:
11248 		val->cdp_psoc_param_ppeds_enabled =
11249 			wlan_cfg_get_dp_soc_is_ppeds_enabled(soc->wlan_cfg_ctx);
11250 		break;
11251 	default:
11252 		dp_warn("Invalid param");
11253 		break;
11254 	}
11255 
11256 	return QDF_STATUS_SUCCESS;
11257 }
11258 
11259 /**
11260  * dp_set_vdev_dscp_tid_map_wifi3() - Update Map ID selected for particular vdev
11261  * @cdp_soc: CDP SOC handle
11262  * @vdev_id: id of DP_VDEV handle
11263  * @map_id:ID of map that needs to be updated
11264  *
11265  * Return: QDF_STATUS
11266  */
11267 static QDF_STATUS dp_set_vdev_dscp_tid_map_wifi3(ol_txrx_soc_handle cdp_soc,
11268 						 uint8_t vdev_id,
11269 						 uint8_t map_id)
11270 {
11271 	cdp_config_param_type val;
11272 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
11273 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11274 						     DP_MOD_ID_CDP);
11275 	if (vdev) {
11276 		vdev->dscp_tid_map_id = map_id;
11277 		val.cdp_vdev_param_dscp_tid_map_id = map_id;
11278 		soc->arch_ops.txrx_set_vdev_param(soc,
11279 						  vdev,
11280 						  CDP_UPDATE_DSCP_TO_TID_MAP,
11281 						  val);
11282 		/* Update flag for transmit tid classification */
11283 		if (vdev->dscp_tid_map_id < soc->num_hw_dscp_tid_map)
11284 			vdev->skip_sw_tid_classification |=
11285 				DP_TX_HW_DSCP_TID_MAP_VALID;
11286 		else
11287 			vdev->skip_sw_tid_classification &=
11288 				~DP_TX_HW_DSCP_TID_MAP_VALID;
11289 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11290 		return QDF_STATUS_SUCCESS;
11291 	}
11292 
11293 	return QDF_STATUS_E_FAILURE;
11294 }
11295 
11296 #ifdef DP_RATETABLE_SUPPORT
11297 static int dp_txrx_get_ratekbps(int preamb, int mcs,
11298 				int htflag, int gintval)
11299 {
11300 	uint32_t rix;
11301 	uint16_t ratecode;
11302 	enum cdp_punctured_modes punc_mode = NO_PUNCTURE;
11303 
11304 	return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
11305 			       (uint8_t)preamb, 1, punc_mode,
11306 			       &rix, &ratecode);
11307 }
11308 #else
11309 static int dp_txrx_get_ratekbps(int preamb, int mcs,
11310 				int htflag, int gintval)
11311 {
11312 	return 0;
11313 }
11314 #endif
11315 
11316 /**
11317  * dp_txrx_get_pdev_stats() - Returns cdp_pdev_stats
11318  * @soc: DP soc handle
11319  * @pdev_id: id of DP pdev handle
11320  * @pdev_stats: buffer to copy to
11321  *
11322  * Return: status success/failure
11323  */
11324 static QDF_STATUS
11325 dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
11326 		       struct cdp_pdev_stats *pdev_stats)
11327 {
11328 	struct dp_pdev *pdev =
11329 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11330 						   pdev_id);
11331 	if (!pdev)
11332 		return QDF_STATUS_E_FAILURE;
11333 
11334 	dp_aggregate_pdev_stats(pdev);
11335 
11336 	qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats));
11337 	return QDF_STATUS_SUCCESS;
11338 }
11339 
11340 /**
11341  * dp_txrx_update_vdev_me_stats() - Update vdev ME stats sent from CDP
11342  * @vdev: DP vdev handle
11343  * @buf: buffer containing specific stats structure
11344  *
11345  * Return: void
11346  */
11347 static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev,
11348 					 void *buf)
11349 {
11350 	struct cdp_tx_ingress_stats *host_stats = NULL;
11351 
11352 	if (!buf) {
11353 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
11354 		return;
11355 	}
11356 	host_stats = (struct cdp_tx_ingress_stats *)buf;
11357 
11358 	DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt,
11359 			 host_stats->mcast_en.mcast_pkt.num,
11360 			 host_stats->mcast_en.mcast_pkt.bytes);
11361 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error,
11362 		     host_stats->mcast_en.dropped_map_error);
11363 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac,
11364 		     host_stats->mcast_en.dropped_self_mac);
11365 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail,
11366 		     host_stats->mcast_en.dropped_send_fail);
11367 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast,
11368 		     host_stats->mcast_en.ucast);
11369 	DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc,
11370 		     host_stats->mcast_en.fail_seg_alloc);
11371 	DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail,
11372 		     host_stats->mcast_en.clone_fail);
11373 }
11374 
11375 /**
11376  * dp_txrx_update_vdev_igmp_me_stats() - Update vdev IGMP ME stats sent from CDP
11377  * @vdev: DP vdev handle
11378  * @buf: buffer containing specific stats structure
11379  *
11380  * Return: void
11381  */
11382 static void dp_txrx_update_vdev_igmp_me_stats(struct dp_vdev *vdev,
11383 					      void *buf)
11384 {
11385 	struct cdp_tx_ingress_stats *host_stats = NULL;
11386 
11387 	if (!buf) {
11388 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
11389 		return;
11390 	}
11391 	host_stats = (struct cdp_tx_ingress_stats *)buf;
11392 
11393 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_rcvd,
11394 		     host_stats->igmp_mcast_en.igmp_rcvd);
11395 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_ucast_converted,
11396 		     host_stats->igmp_mcast_en.igmp_ucast_converted);
11397 }
11398 
11399 /**
11400  * dp_txrx_update_vdev_host_stats() - Update stats sent through CDP
11401  * @soc_hdl: DP soc handle
11402  * @vdev_id: id of DP vdev handle
11403  * @buf: buffer containing specific stats structure
11404  * @stats_id: stats type
11405  *
11406  * Return: QDF_STATUS
11407  */
11408 static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc_hdl,
11409 						 uint8_t vdev_id,
11410 						 void *buf,
11411 						 uint16_t stats_id)
11412 {
11413 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11414 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11415 						     DP_MOD_ID_CDP);
11416 
11417 	if (!vdev) {
11418 		dp_cdp_err("%pK: Invalid vdev handle", soc);
11419 		return QDF_STATUS_E_FAILURE;
11420 	}
11421 
11422 	switch (stats_id) {
11423 	case DP_VDEV_STATS_PKT_CNT_ONLY:
11424 		break;
11425 	case DP_VDEV_STATS_TX_ME:
11426 		dp_txrx_update_vdev_me_stats(vdev, buf);
11427 		dp_txrx_update_vdev_igmp_me_stats(vdev, buf);
11428 		break;
11429 	default:
11430 		qdf_info("Invalid stats_id %d", stats_id);
11431 		break;
11432 	}
11433 
11434 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11435 	return QDF_STATUS_SUCCESS;
11436 }
11437 
11438 /**
11439  * dp_txrx_get_peer_stats() - will return cdp_peer_stats
11440  * @soc: soc handle
11441  * @vdev_id: id of vdev handle
11442  * @peer_mac: mac of DP_PEER handle
11443  * @peer_stats: buffer to copy to
11444  *
11445  * Return: status success/failure
11446  */
11447 static QDF_STATUS
11448 dp_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
11449 		       uint8_t *peer_mac, struct cdp_peer_stats *peer_stats)
11450 {
11451 	struct dp_peer *peer = NULL;
11452 	struct cdp_peer_info peer_info = { 0 };
11453 
11454 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac, false,
11455 				 CDP_WILD_PEER_TYPE);
11456 
11457 	peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info,
11458 					 DP_MOD_ID_CDP);
11459 
11460 	qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats));
11461 
11462 	if (!peer)
11463 		return QDF_STATUS_E_FAILURE;
11464 
11465 	dp_get_peer_stats(peer, peer_stats);
11466 
11467 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11468 
11469 	return QDF_STATUS_SUCCESS;
11470 }
11471 
11472 /**
11473  * dp_txrx_get_peer_stats_param() - will return specified cdp_peer_stats
11474  * @soc: soc handle
11475  * @vdev_id: vdev_id of vdev object
11476  * @peer_mac: mac address of the peer
11477  * @type: enum of required stats
11478  * @buf: buffer to hold the value
11479  *
11480  * Return: status success/failure
11481  */
11482 static QDF_STATUS
11483 dp_txrx_get_peer_stats_param(struct cdp_soc_t *soc, uint8_t vdev_id,
11484 			     uint8_t *peer_mac, enum cdp_peer_stats_type type,
11485 			     cdp_peer_stats_param_t *buf)
11486 {
11487 	QDF_STATUS ret;
11488 	struct dp_peer *peer = NULL;
11489 	struct cdp_peer_info peer_info = { 0 };
11490 
11491 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac, false,
11492 				 CDP_WILD_PEER_TYPE);
11493 
11494 	peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info,
11495 				         DP_MOD_ID_CDP);
11496 
11497 	if (!peer) {
11498 		dp_peer_err("%pK: Invalid Peer for Mac " QDF_MAC_ADDR_FMT,
11499 			    soc, QDF_MAC_ADDR_REF(peer_mac));
11500 		return QDF_STATUS_E_FAILURE;
11501 	}
11502 
11503 	if (type >= cdp_peer_per_pkt_stats_min &&
11504 	    type < cdp_peer_per_pkt_stats_max) {
11505 		ret = dp_txrx_get_peer_per_pkt_stats_param(peer, type, buf);
11506 	} else if (type >= cdp_peer_extd_stats_min &&
11507 		   type < cdp_peer_extd_stats_max) {
11508 		ret = dp_txrx_get_peer_extd_stats_param(peer, type, buf);
11509 	} else {
11510 		dp_err("%pK: Invalid stat type requested", soc);
11511 		ret = QDF_STATUS_E_FAILURE;
11512 	}
11513 
11514 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11515 
11516 	return ret;
11517 }
11518 
11519 /**
11520  * dp_txrx_reset_peer_stats() - reset cdp_peer_stats for particular peer
11521  * @soc_hdl: soc handle
11522  * @vdev_id: id of vdev handle
11523  * @peer_mac: mac of DP_PEER handle
11524  *
11525  * Return: QDF_STATUS
11526  */
11527 #ifdef WLAN_FEATURE_11BE_MLO
11528 static QDF_STATUS
11529 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
11530 			 uint8_t *peer_mac)
11531 {
11532 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11533 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11534 	struct dp_peer *peer =
11535 			dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
11536 						       vdev_id, DP_MOD_ID_CDP);
11537 
11538 	if (!peer)
11539 		return QDF_STATUS_E_FAILURE;
11540 
11541 	DP_STATS_CLR(peer);
11542 	dp_txrx_peer_stats_clr(peer->txrx_peer);
11543 
11544 	if (IS_MLO_DP_MLD_PEER(peer)) {
11545 		uint8_t i;
11546 		struct dp_peer *link_peer;
11547 		struct dp_soc *link_peer_soc;
11548 		struct dp_mld_link_peers link_peers_info;
11549 
11550 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
11551 						    &link_peers_info,
11552 						    DP_MOD_ID_CDP);
11553 		for (i = 0; i < link_peers_info.num_links; i++) {
11554 			link_peer = link_peers_info.link_peers[i];
11555 			link_peer_soc = link_peer->vdev->pdev->soc;
11556 
11557 			DP_STATS_CLR(link_peer);
11558 			dp_monitor_peer_reset_stats(link_peer_soc, link_peer);
11559 		}
11560 
11561 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
11562 	} else {
11563 		dp_monitor_peer_reset_stats(soc, peer);
11564 	}
11565 
11566 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11567 
11568 	return status;
11569 }
11570 #else
11571 static QDF_STATUS
11572 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
11573 			 uint8_t *peer_mac)
11574 {
11575 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11576 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
11577 						      peer_mac, 0, vdev_id,
11578 						      DP_MOD_ID_CDP);
11579 
11580 	if (!peer)
11581 		return QDF_STATUS_E_FAILURE;
11582 
11583 	DP_STATS_CLR(peer);
11584 	dp_txrx_peer_stats_clr(peer->txrx_peer);
11585 	dp_monitor_peer_reset_stats((struct dp_soc *)soc, peer);
11586 
11587 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11588 
11589 	return status;
11590 }
11591 #endif
11592 
11593 /**
11594  * dp_txrx_get_vdev_stats() - Update buffer with cdp_vdev_stats
11595  * @soc_hdl: CDP SoC handle
11596  * @vdev_id: vdev Id
11597  * @buf: buffer for vdev stats
11598  * @is_aggregate: are aggregate stats being collected
11599  *
11600  * Return: int
11601  */
11602 static int dp_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
11603 				  void *buf, bool is_aggregate)
11604 {
11605 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11606 	struct cdp_vdev_stats *vdev_stats;
11607 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11608 						     DP_MOD_ID_CDP);
11609 
11610 	if (!vdev)
11611 		return 1;
11612 
11613 	vdev_stats = (struct cdp_vdev_stats *)buf;
11614 
11615 	if (is_aggregate) {
11616 		dp_aggregate_vdev_stats(vdev, buf);
11617 	} else {
11618 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
11619 	}
11620 
11621 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11622 	return 0;
11623 }
11624 
11625 /**
11626  * dp_get_total_per() - get total per
11627  * @soc: DP soc handle
11628  * @pdev_id: id of DP_PDEV handle
11629  *
11630  * Return: % error rate using retries per packet and success packets
11631  */
11632 static int dp_get_total_per(struct cdp_soc_t *soc, uint8_t pdev_id)
11633 {
11634 	struct dp_pdev *pdev =
11635 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11636 						   pdev_id);
11637 
11638 	if (!pdev)
11639 		return 0;
11640 
11641 	dp_aggregate_pdev_stats(pdev);
11642 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
11643 		return 0;
11644 	return ((pdev->stats.tx.retries * 100) /
11645 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
11646 }
11647 
11648 /**
11649  * dp_txrx_stats_publish() - publish pdev stats into a buffer
11650  * @soc: DP soc handle
11651  * @pdev_id: id of DP_PDEV handle
11652  * @buf: to hold pdev_stats
11653  *
11654  * Return: int
11655  */
11656 static int
11657 dp_txrx_stats_publish(struct cdp_soc_t *soc, uint8_t pdev_id,
11658 		      struct cdp_stats_extd *buf)
11659 {
11660 	struct cdp_txrx_stats_req req = {0,};
11661 	QDF_STATUS status;
11662 	struct dp_pdev *pdev =
11663 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11664 						   pdev_id);
11665 
11666 	if (!pdev)
11667 		return TXRX_STATS_LEVEL_OFF;
11668 
11669 	if (pdev->pending_fw_stats_response)
11670 		return TXRX_STATS_LEVEL_OFF;
11671 
11672 	dp_aggregate_pdev_stats(pdev);
11673 
11674 	pdev->pending_fw_stats_response = true;
11675 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
11676 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
11677 	pdev->fw_stats_tlv_bitmap_rcvd = 0;
11678 	qdf_event_reset(&pdev->fw_stats_event);
11679 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
11680 				req.param1, req.param2, req.param3, 0,
11681 				req.cookie_val, 0);
11682 
11683 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
11684 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
11685 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
11686 				req.param1, req.param2, req.param3, 0,
11687 				req.cookie_val, 0);
11688 
11689 	status =
11690 		qdf_wait_single_event(&pdev->fw_stats_event, DP_MAX_SLEEP_TIME);
11691 
11692 	if (status != QDF_STATUS_SUCCESS) {
11693 		if (status == QDF_STATUS_E_TIMEOUT)
11694 			qdf_debug("TIMEOUT_OCCURS");
11695 		pdev->pending_fw_stats_response = false;
11696 		return TXRX_STATS_LEVEL_OFF;
11697 	}
11698 	qdf_mem_copy(buf, &pdev->stats, sizeof(struct cdp_pdev_stats));
11699 	pdev->pending_fw_stats_response = false;
11700 
11701 	return TXRX_STATS_LEVEL;
11702 }
11703 
11704 /**
11705  * dp_get_obss_stats() - Get Pdev OBSS stats from Fw
11706  * @soc: DP soc handle
11707  * @pdev_id: id of DP_PDEV handle
11708  * @buf: to hold pdev obss stats
11709  * @req: Pointer to CDP TxRx stats
11710  *
11711  * Return: status
11712  */
11713 static QDF_STATUS
11714 dp_get_obss_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
11715 		  struct cdp_pdev_obss_pd_stats_tlv *buf,
11716 		  struct cdp_txrx_stats_req *req)
11717 {
11718 	QDF_STATUS status;
11719 	struct dp_pdev *pdev =
11720 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11721 						   pdev_id);
11722 
11723 	if (!pdev)
11724 		return QDF_STATUS_E_INVAL;
11725 
11726 	if (pdev->pending_fw_obss_stats_response)
11727 		return QDF_STATUS_E_AGAIN;
11728 
11729 	pdev->pending_fw_obss_stats_response = true;
11730 	req->stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_OBSS_PD_STATS;
11731 	req->cookie_val = DBG_STATS_COOKIE_HTT_OBSS;
11732 	qdf_event_reset(&pdev->fw_obss_stats_event);
11733 	status = dp_h2t_ext_stats_msg_send(pdev, req->stats, req->param0,
11734 					   req->param1, req->param2,
11735 					   req->param3, 0, req->cookie_val,
11736 					   req->mac_id);
11737 	if (QDF_IS_STATUS_ERROR(status)) {
11738 		pdev->pending_fw_obss_stats_response = false;
11739 		return status;
11740 	}
11741 	status =
11742 		qdf_wait_single_event(&pdev->fw_obss_stats_event,
11743 				      DP_MAX_SLEEP_TIME);
11744 
11745 	if (status != QDF_STATUS_SUCCESS) {
11746 		if (status == QDF_STATUS_E_TIMEOUT)
11747 			qdf_debug("TIMEOUT_OCCURS");
11748 		pdev->pending_fw_obss_stats_response = false;
11749 		return QDF_STATUS_E_TIMEOUT;
11750 	}
11751 	qdf_mem_copy(buf, &pdev->stats.htt_tx_pdev_stats.obss_pd_stats_tlv,
11752 		     sizeof(struct cdp_pdev_obss_pd_stats_tlv));
11753 	pdev->pending_fw_obss_stats_response = false;
11754 	return status;
11755 }
11756 
11757 /**
11758  * dp_clear_pdev_obss_pd_stats() - Clear pdev obss stats
11759  * @soc: DP soc handle
11760  * @pdev_id: id of DP_PDEV handle
11761  * @req: Pointer to CDP TxRx stats request mac_id will be
11762  *	 pre-filled and should not be overwritten
11763  *
11764  * Return: status
11765  */
11766 static QDF_STATUS
11767 dp_clear_pdev_obss_pd_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
11768 			    struct cdp_txrx_stats_req *req)
11769 {
11770 	struct dp_pdev *pdev =
11771 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11772 						   pdev_id);
11773 	uint32_t cookie_val = DBG_STATS_COOKIE_DEFAULT;
11774 
11775 	if (!pdev)
11776 		return QDF_STATUS_E_INVAL;
11777 
11778 	/*
11779 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
11780 	 * from param0 to param3 according to below rule:
11781 	 *
11782 	 * PARAM:
11783 	 *   - config_param0 : start_offset (stats type)
11784 	 *   - config_param1 : stats bmask from start offset
11785 	 *   - config_param2 : stats bmask from start offset + 32
11786 	 *   - config_param3 : stats bmask from start offset + 64
11787 	 */
11788 	req->stats = (enum cdp_stats)HTT_DBG_EXT_STATS_RESET;
11789 	req->param0 = HTT_DBG_EXT_STATS_PDEV_OBSS_PD_STATS;
11790 	req->param1 = 0x00000001;
11791 
11792 	return dp_h2t_ext_stats_msg_send(pdev, req->stats, req->param0,
11793 				  req->param1, req->param2, req->param3, 0,
11794 				cookie_val, req->mac_id);
11795 }
11796 
11797 /**
11798  * dp_set_pdev_dscp_tid_map_wifi3() - update dscp tid map in pdev
11799  * @soc_handle: soc handle
11800  * @pdev_id: id of DP_PDEV handle
11801  * @map_id: ID of map that needs to be updated
11802  * @tos: index value in map
11803  * @tid: tid value passed by the user
11804  *
11805  * Return: QDF_STATUS
11806  */
11807 static QDF_STATUS
11808 dp_set_pdev_dscp_tid_map_wifi3(struct cdp_soc_t *soc_handle,
11809 			       uint8_t pdev_id,
11810 			       uint8_t map_id,
11811 			       uint8_t tos, uint8_t tid)
11812 {
11813 	uint8_t dscp;
11814 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11815 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11816 
11817 	if (!pdev)
11818 		return QDF_STATUS_E_FAILURE;
11819 
11820 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
11821 	pdev->dscp_tid_map[map_id][dscp] = tid;
11822 
11823 	if (map_id < soc->num_hw_dscp_tid_map)
11824 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
11825 				       map_id, dscp);
11826 	else
11827 		return QDF_STATUS_E_FAILURE;
11828 
11829 	return QDF_STATUS_SUCCESS;
11830 }
11831 
11832 #ifdef WLAN_SYSFS_DP_STATS
11833 /**
11834  * dp_sysfs_event_trigger() - Trigger event to wait for firmware
11835  * stats request response.
11836  * @soc: soc handle
11837  * @cookie_val: cookie value
11838  *
11839  * Return: QDF_STATUS
11840  */
11841 static QDF_STATUS
11842 dp_sysfs_event_trigger(struct dp_soc *soc, uint32_t cookie_val)
11843 {
11844 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11845 	/* wait for firmware response for sysfs stats request */
11846 	if (cookie_val == DBG_SYSFS_STATS_COOKIE) {
11847 		if (!soc) {
11848 			dp_cdp_err("soc is NULL");
11849 			return QDF_STATUS_E_FAILURE;
11850 		}
11851 		/* wait for event completion */
11852 		status = qdf_wait_single_event(&soc->sysfs_config->sysfs_txrx_fw_request_done,
11853 					       WLAN_SYSFS_STAT_REQ_WAIT_MS);
11854 		if (status == QDF_STATUS_SUCCESS)
11855 			dp_cdp_info("sysfs_txrx_fw_request_done event completed");
11856 		else if (status == QDF_STATUS_E_TIMEOUT)
11857 			dp_cdp_warn("sysfs_txrx_fw_request_done event expired");
11858 		else
11859 			dp_cdp_warn("sysfs_txrx_fw_request_done event error code %d", status);
11860 	}
11861 
11862 	return status;
11863 }
11864 #else /* WLAN_SYSFS_DP_STATS */
11865 static QDF_STATUS
11866 dp_sysfs_event_trigger(struct dp_soc *soc, uint32_t cookie_val)
11867 {
11868 	return QDF_STATUS_SUCCESS;
11869 }
11870 #endif /* WLAN_SYSFS_DP_STATS */
11871 
11872 /**
11873  * dp_fw_stats_process() - Process TXRX FW stats request.
11874  * @vdev: DP VDEV handle
11875  * @req: stats request
11876  *
11877  * Return: QDF_STATUS
11878  */
11879 static QDF_STATUS
11880 dp_fw_stats_process(struct dp_vdev *vdev,
11881 		    struct cdp_txrx_stats_req *req)
11882 {
11883 	struct dp_pdev *pdev = NULL;
11884 	struct dp_soc *soc = NULL;
11885 	uint32_t stats = req->stats;
11886 	uint8_t mac_id = req->mac_id;
11887 	uint32_t cookie_val = DBG_STATS_COOKIE_DEFAULT;
11888 
11889 	if (!vdev) {
11890 		DP_TRACE(NONE, "VDEV not found");
11891 		return QDF_STATUS_E_FAILURE;
11892 	}
11893 
11894 	pdev = vdev->pdev;
11895 	if (!pdev) {
11896 		DP_TRACE(NONE, "PDEV not found");
11897 		return QDF_STATUS_E_FAILURE;
11898 	}
11899 
11900 	soc = pdev->soc;
11901 	if (!soc) {
11902 		DP_TRACE(NONE, "soc not found");
11903 		return QDF_STATUS_E_FAILURE;
11904 	}
11905 
11906 	/* In case request is from host sysfs for displaying stats on console */
11907 	if (req->cookie_val == DBG_SYSFS_STATS_COOKIE)
11908 		cookie_val = DBG_SYSFS_STATS_COOKIE;
11909 
11910 	/*
11911 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
11912 	 * from param0 to param3 according to below rule:
11913 	 *
11914 	 * PARAM:
11915 	 *   - config_param0 : start_offset (stats type)
11916 	 *   - config_param1 : stats bmask from start offset
11917 	 *   - config_param2 : stats bmask from start offset + 32
11918 	 *   - config_param3 : stats bmask from start offset + 64
11919 	 */
11920 	if (req->stats == CDP_TXRX_STATS_0) {
11921 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
11922 		req->param1 = 0xFFFFFFFF;
11923 		req->param2 = 0xFFFFFFFF;
11924 		req->param3 = 0xFFFFFFFF;
11925 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
11926 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
11927 	}
11928 
11929 	if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT) {
11930 		dp_h2t_ext_stats_msg_send(pdev,
11931 					  HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT,
11932 					  req->param0, req->param1, req->param2,
11933 					  req->param3, 0, cookie_val,
11934 					  mac_id);
11935 	} else {
11936 		dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
11937 					  req->param1, req->param2, req->param3,
11938 					  0, cookie_val, mac_id);
11939 	}
11940 
11941 	dp_sysfs_event_trigger(soc, cookie_val);
11942 
11943 	return QDF_STATUS_SUCCESS;
11944 }
11945 
11946 /**
11947  * dp_txrx_stats_request - function to map to firmware and host stats
11948  * @soc_handle: soc handle
11949  * @vdev_id: virtual device ID
11950  * @req: stats request
11951  *
11952  * Return: QDF_STATUS
11953  */
11954 static
11955 QDF_STATUS dp_txrx_stats_request(struct cdp_soc_t *soc_handle,
11956 				 uint8_t vdev_id,
11957 				 struct cdp_txrx_stats_req *req)
11958 {
11959 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_handle);
11960 	int host_stats;
11961 	int fw_stats;
11962 	enum cdp_stats stats;
11963 	int num_stats;
11964 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11965 						     DP_MOD_ID_CDP);
11966 	QDF_STATUS status = QDF_STATUS_E_INVAL;
11967 
11968 	if (!vdev || !req) {
11969 		dp_cdp_err("%pK: Invalid vdev/req instance", soc);
11970 		status = QDF_STATUS_E_INVAL;
11971 		goto fail0;
11972 	}
11973 
11974 	if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) {
11975 		dp_err("Invalid mac id request");
11976 		status = QDF_STATUS_E_INVAL;
11977 		goto fail0;
11978 	}
11979 
11980 	stats = req->stats;
11981 	if (stats >= CDP_TXRX_MAX_STATS) {
11982 		status = QDF_STATUS_E_INVAL;
11983 		goto fail0;
11984 	}
11985 
11986 	/*
11987 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
11988 	 *			has to be updated if new FW HTT stats added
11989 	 */
11990 	if (stats > CDP_TXRX_STATS_HTT_MAX)
11991 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
11992 
11993 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
11994 
11995 	if (stats >= num_stats) {
11996 		dp_cdp_err("%pK : Invalid stats option: %d", soc, stats);
11997 		status = QDF_STATUS_E_INVAL;
11998 		goto fail0;
11999 	}
12000 
12001 	req->stats = stats;
12002 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
12003 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
12004 
12005 	dp_info("stats: %u fw_stats_type: %d host_stats: %d",
12006 		stats, fw_stats, host_stats);
12007 
12008 	if (fw_stats != TXRX_FW_STATS_INVALID) {
12009 		/* update request with FW stats type */
12010 		req->stats = fw_stats;
12011 		status = dp_fw_stats_process(vdev, req);
12012 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
12013 			(host_stats <= TXRX_HOST_STATS_MAX))
12014 		status = dp_print_host_stats(vdev, req, soc);
12015 	else
12016 		dp_cdp_info("%pK: Wrong Input for TxRx Stats", soc);
12017 fail0:
12018 	if (vdev)
12019 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12020 	return status;
12021 }
12022 
12023 /**
12024  * dp_txrx_dump_stats() -  Dump statistics
12025  * @psoc: CDP soc handle
12026  * @value: Statistics option
12027  * @level: verbosity level
12028  */
12029 static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value,
12030 				     enum qdf_stats_verbosity_level level)
12031 {
12032 	struct dp_soc *soc =
12033 		(struct dp_soc *)psoc;
12034 	QDF_STATUS status = QDF_STATUS_SUCCESS;
12035 
12036 	if (!soc) {
12037 		dp_cdp_err("%pK: soc is NULL", soc);
12038 		return QDF_STATUS_E_INVAL;
12039 	}
12040 
12041 	switch (value) {
12042 	case CDP_TXRX_PATH_STATS:
12043 		dp_txrx_path_stats(soc);
12044 		dp_print_soc_interrupt_stats(soc);
12045 		hal_dump_reg_write_stats(soc->hal_soc);
12046 		dp_pdev_print_tx_delay_stats(soc);
12047 		/* Dump usage watermark stats for core TX/RX SRNGs */
12048 		dp_dump_srng_high_wm_stats(soc, (1 << REO_DST));
12049 		dp_print_fisa_stats(soc);
12050 		break;
12051 
12052 	case CDP_RX_RING_STATS:
12053 		dp_print_per_ring_stats(soc);
12054 		break;
12055 
12056 	case CDP_TXRX_TSO_STATS:
12057 		dp_print_tso_stats(soc, level);
12058 		break;
12059 
12060 	case CDP_DUMP_TX_FLOW_POOL_INFO:
12061 		if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH)
12062 			cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
12063 		else
12064 			dp_tx_dump_flow_pool_info_compact(soc);
12065 		break;
12066 
12067 	case CDP_DP_NAPI_STATS:
12068 		dp_print_napi_stats(soc);
12069 		break;
12070 
12071 	case CDP_TXRX_DESC_STATS:
12072 		/* TODO: NOT IMPLEMENTED */
12073 		break;
12074 
12075 	case CDP_DP_RX_FISA_STATS:
12076 		dp_rx_dump_fisa_stats(soc);
12077 		break;
12078 
12079 	case CDP_DP_SWLM_STATS:
12080 		dp_print_swlm_stats(soc);
12081 		break;
12082 
12083 	case CDP_DP_TX_HW_LATENCY_STATS:
12084 		dp_pdev_print_tx_delay_stats(soc);
12085 		break;
12086 
12087 	default:
12088 		status = QDF_STATUS_E_INVAL;
12089 		break;
12090 	}
12091 
12092 	return status;
12093 
12094 }
12095 
12096 #ifdef WLAN_SYSFS_DP_STATS
12097 static
12098 void dp_sysfs_get_stat_type(struct dp_soc *soc, uint32_t *mac_id,
12099 			    uint32_t *stat_type)
12100 {
12101 	qdf_spinlock_acquire(&soc->sysfs_config->rw_stats_lock);
12102 	*stat_type = soc->sysfs_config->stat_type_requested;
12103 	*mac_id   = soc->sysfs_config->mac_id;
12104 
12105 	qdf_spinlock_release(&soc->sysfs_config->rw_stats_lock);
12106 }
12107 
12108 static
12109 void dp_sysfs_update_config_buf_params(struct dp_soc *soc,
12110 				       uint32_t curr_len,
12111 				       uint32_t max_buf_len,
12112 				       char *buf)
12113 {
12114 	qdf_spinlock_acquire(&soc->sysfs_config->sysfs_write_user_buffer);
12115 	/* set sysfs_config parameters */
12116 	soc->sysfs_config->buf = buf;
12117 	soc->sysfs_config->curr_buffer_length = curr_len;
12118 	soc->sysfs_config->max_buffer_length = max_buf_len;
12119 	qdf_spinlock_release(&soc->sysfs_config->sysfs_write_user_buffer);
12120 }
12121 
12122 static
12123 QDF_STATUS dp_sysfs_fill_stats(ol_txrx_soc_handle soc_hdl,
12124 			       char *buf, uint32_t buf_size)
12125 {
12126 	uint32_t mac_id = 0;
12127 	uint32_t stat_type = 0;
12128 	uint32_t fw_stats = 0;
12129 	uint32_t host_stats = 0;
12130 	enum cdp_stats stats;
12131 	struct cdp_txrx_stats_req req;
12132 	uint32_t num_stats;
12133 	struct dp_soc *soc = NULL;
12134 
12135 	if (!soc_hdl) {
12136 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
12137 		return QDF_STATUS_E_INVAL;
12138 	}
12139 
12140 	soc = cdp_soc_t_to_dp_soc(soc_hdl);
12141 
12142 	if (!soc) {
12143 		dp_cdp_err("%pK: soc is NULL", soc);
12144 		return QDF_STATUS_E_INVAL;
12145 	}
12146 
12147 	dp_sysfs_get_stat_type(soc, &mac_id, &stat_type);
12148 
12149 	stats = stat_type;
12150 	if (stats >= CDP_TXRX_MAX_STATS) {
12151 		dp_cdp_info("sysfs stat type requested is invalid");
12152 		return QDF_STATUS_E_INVAL;
12153 	}
12154 	/*
12155 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
12156 	 *			has to be updated if new FW HTT stats added
12157 	 */
12158 	if (stats > CDP_TXRX_MAX_STATS)
12159 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
12160 
12161 	num_stats = QDF_ARRAY_SIZE(dp_stats_mapping_table);
12162 
12163 	if (stats >= num_stats) {
12164 		dp_cdp_err("%pK : Invalid stats option: %d, max num stats: %d",
12165 				soc, stats, num_stats);
12166 		return QDF_STATUS_E_INVAL;
12167 	}
12168 
12169 	/* build request */
12170 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
12171 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
12172 
12173 	req.stats = stat_type;
12174 	req.mac_id = mac_id;
12175 	/* request stats to be printed */
12176 	qdf_mutex_acquire(&soc->sysfs_config->sysfs_read_lock);
12177 
12178 	if (fw_stats != TXRX_FW_STATS_INVALID) {
12179 		/* update request with FW stats type */
12180 		req.cookie_val = DBG_SYSFS_STATS_COOKIE;
12181 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
12182 			(host_stats <= TXRX_HOST_STATS_MAX)) {
12183 		req.cookie_val = DBG_STATS_COOKIE_DEFAULT;
12184 		soc->sysfs_config->process_id = qdf_get_current_pid();
12185 		soc->sysfs_config->printing_mode = PRINTING_MODE_ENABLED;
12186 	}
12187 
12188 	dp_sysfs_update_config_buf_params(soc, 0, buf_size, buf);
12189 
12190 	dp_txrx_stats_request(soc_hdl, mac_id, &req);
12191 	soc->sysfs_config->process_id = 0;
12192 	soc->sysfs_config->printing_mode = PRINTING_MODE_DISABLED;
12193 
12194 	dp_sysfs_update_config_buf_params(soc, 0, 0, NULL);
12195 
12196 	qdf_mutex_release(&soc->sysfs_config->sysfs_read_lock);
12197 	return QDF_STATUS_SUCCESS;
12198 }
12199 
12200 static
12201 QDF_STATUS dp_sysfs_set_stat_type(ol_txrx_soc_handle soc_hdl,
12202 				  uint32_t stat_type, uint32_t mac_id)
12203 {
12204 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12205 
12206 	if (!soc_hdl) {
12207 		dp_cdp_err("%pK: soc is NULL", soc);
12208 		return QDF_STATUS_E_INVAL;
12209 	}
12210 
12211 	qdf_spinlock_acquire(&soc->sysfs_config->rw_stats_lock);
12212 
12213 	soc->sysfs_config->stat_type_requested = stat_type;
12214 	soc->sysfs_config->mac_id = mac_id;
12215 
12216 	qdf_spinlock_release(&soc->sysfs_config->rw_stats_lock);
12217 
12218 	return QDF_STATUS_SUCCESS;
12219 }
12220 
12221 static
12222 QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl)
12223 {
12224 	struct dp_soc *soc;
12225 	QDF_STATUS status;
12226 
12227 	if (!soc_hdl) {
12228 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
12229 		return QDF_STATUS_E_INVAL;
12230 	}
12231 
12232 	soc = soc_hdl;
12233 
12234 	soc->sysfs_config = qdf_mem_malloc(sizeof(struct sysfs_stats_config));
12235 	if (!soc->sysfs_config) {
12236 		dp_cdp_err("failed to allocate memory for sysfs_config no memory");
12237 		return QDF_STATUS_E_NOMEM;
12238 	}
12239 
12240 	status = qdf_event_create(&soc->sysfs_config->sysfs_txrx_fw_request_done);
12241 	/* create event for fw stats request from sysfs */
12242 	if (status != QDF_STATUS_SUCCESS) {
12243 		dp_cdp_err("failed to create event sysfs_txrx_fw_request_done");
12244 		qdf_mem_free(soc->sysfs_config);
12245 		soc->sysfs_config = NULL;
12246 		return QDF_STATUS_E_FAILURE;
12247 	}
12248 
12249 	qdf_spinlock_create(&soc->sysfs_config->rw_stats_lock);
12250 	qdf_mutex_create(&soc->sysfs_config->sysfs_read_lock);
12251 	qdf_spinlock_create(&soc->sysfs_config->sysfs_write_user_buffer);
12252 
12253 	return QDF_STATUS_SUCCESS;
12254 }
12255 
12256 static
12257 QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl)
12258 {
12259 	struct dp_soc *soc;
12260 	QDF_STATUS status;
12261 
12262 	if (!soc_hdl) {
12263 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
12264 		return QDF_STATUS_E_INVAL;
12265 	}
12266 
12267 	soc = soc_hdl;
12268 	if (!soc->sysfs_config) {
12269 		dp_cdp_err("soc->sysfs_config is NULL");
12270 		return QDF_STATUS_E_FAILURE;
12271 	}
12272 
12273 	status = qdf_event_destroy(&soc->sysfs_config->sysfs_txrx_fw_request_done);
12274 	if (status != QDF_STATUS_SUCCESS)
12275 		dp_cdp_err("Failed to destroy event sysfs_txrx_fw_request_done");
12276 
12277 	qdf_mutex_destroy(&soc->sysfs_config->sysfs_read_lock);
12278 	qdf_spinlock_destroy(&soc->sysfs_config->rw_stats_lock);
12279 	qdf_spinlock_destroy(&soc->sysfs_config->sysfs_write_user_buffer);
12280 
12281 	qdf_mem_free(soc->sysfs_config);
12282 
12283 	return QDF_STATUS_SUCCESS;
12284 }
12285 
12286 #else /* WLAN_SYSFS_DP_STATS */
12287 
12288 static
12289 QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl)
12290 {
12291 	return QDF_STATUS_SUCCESS;
12292 }
12293 
12294 static
12295 QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl)
12296 {
12297 	return QDF_STATUS_SUCCESS;
12298 }
12299 #endif /* WLAN_SYSFS_DP_STATS */
12300 
12301 /**
12302  * dp_txrx_clear_dump_stats() - clear dumpStats
12303  * @soc_hdl: soc handle
12304  * @pdev_id: pdev ID
12305  * @value: stats option
12306  *
12307  * Return: 0 - Success, non-zero - failure
12308  */
12309 static
12310 QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12311 				    uint8_t value)
12312 {
12313 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12314 	QDF_STATUS status = QDF_STATUS_SUCCESS;
12315 
12316 	if (!soc) {
12317 		dp_err("soc is NULL");
12318 		return QDF_STATUS_E_INVAL;
12319 	}
12320 
12321 	switch (value) {
12322 	case CDP_TXRX_TSO_STATS:
12323 		dp_txrx_clear_tso_stats(soc);
12324 		break;
12325 
12326 	case CDP_DP_TX_HW_LATENCY_STATS:
12327 		dp_pdev_clear_tx_delay_stats(soc);
12328 		break;
12329 
12330 	default:
12331 		status = QDF_STATUS_E_INVAL;
12332 		break;
12333 	}
12334 
12335 	return status;
12336 }
12337 
12338 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
12339 /**
12340  * dp_update_flow_control_parameters() - API to store datapath
12341  *                            config parameters
12342  * @soc: soc handle
12343  * @params: ini parameter handle
12344  *
12345  * Return: void
12346  */
12347 static inline
12348 void dp_update_flow_control_parameters(struct dp_soc *soc,
12349 				struct cdp_config_params *params)
12350 {
12351 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
12352 					params->tx_flow_stop_queue_threshold;
12353 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
12354 					params->tx_flow_start_queue_offset;
12355 }
12356 #else
12357 static inline
12358 void dp_update_flow_control_parameters(struct dp_soc *soc,
12359 				struct cdp_config_params *params)
12360 {
12361 }
12362 #endif
12363 
12364 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
12365 /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */
12366 #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024
12367 
12368 /* Max packet limit for RX REAP Loop (dp_rx_process) */
12369 #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024
12370 
12371 static
12372 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
12373 					struct cdp_config_params *params)
12374 {
12375 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit =
12376 				params->tx_comp_loop_pkt_limit;
12377 
12378 	if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX)
12379 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true;
12380 	else
12381 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false;
12382 
12383 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit =
12384 				params->rx_reap_loop_pkt_limit;
12385 
12386 	if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX)
12387 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = true;
12388 	else
12389 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = false;
12390 
12391 	soc->wlan_cfg_ctx->rx_hp_oos_update_limit =
12392 				params->rx_hp_oos_update_limit;
12393 
12394 	dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u",
12395 		soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit,
12396 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check,
12397 		soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit,
12398 		soc->wlan_cfg_ctx->rx_enable_eol_data_check,
12399 		soc->wlan_cfg_ctx->rx_hp_oos_update_limit);
12400 }
12401 
12402 static void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
12403 				      uint32_t rx_limit)
12404 {
12405 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit = tx_limit;
12406 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit = rx_limit;
12407 }
12408 
12409 #else
12410 static inline
12411 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
12412 					struct cdp_config_params *params)
12413 { }
12414 
12415 static inline
12416 void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
12417 			       uint32_t rx_limit)
12418 {
12419 }
12420 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
12421 
12422 /**
12423  * dp_update_config_parameters() - API to store datapath
12424  *                            config parameters
12425  * @psoc: soc handle
12426  * @params: ini parameter handle
12427  *
12428  * Return: status
12429  */
12430 static
12431 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
12432 				struct cdp_config_params *params)
12433 {
12434 	struct dp_soc *soc = (struct dp_soc *)psoc;
12435 
12436 	if (!(soc)) {
12437 		dp_cdp_err("%pK: Invalid handle", soc);
12438 		return QDF_STATUS_E_INVAL;
12439 	}
12440 
12441 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
12442 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
12443 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
12444 	soc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload =
12445 				params->p2p_tcp_udp_checksumoffload;
12446 	soc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload =
12447 				params->nan_tcp_udp_checksumoffload;
12448 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
12449 				params->tcp_udp_checksumoffload;
12450 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
12451 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
12452 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
12453 
12454 	dp_update_rx_soft_irq_limit_params(soc, params);
12455 	dp_update_flow_control_parameters(soc, params);
12456 
12457 	return QDF_STATUS_SUCCESS;
12458 }
12459 
12460 static struct cdp_wds_ops dp_ops_wds = {
12461 	.vdev_set_wds = dp_vdev_set_wds,
12462 #ifdef WDS_VENDOR_EXTENSION
12463 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
12464 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
12465 #endif
12466 };
12467 
12468 /**
12469  * dp_txrx_data_tx_cb_set() - set the callback for non standard tx
12470  * @soc_hdl: datapath soc handle
12471  * @vdev_id: virtual interface id
12472  * @callback: callback function
12473  * @ctxt: callback context
12474  *
12475  */
12476 static void
12477 dp_txrx_data_tx_cb_set(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
12478 		       ol_txrx_data_tx_cb callback, void *ctxt)
12479 {
12480 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12481 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12482 						     DP_MOD_ID_CDP);
12483 
12484 	if (!vdev)
12485 		return;
12486 
12487 	vdev->tx_non_std_data_callback.func = callback;
12488 	vdev->tx_non_std_data_callback.ctxt = ctxt;
12489 
12490 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12491 }
12492 
12493 /**
12494  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
12495  * @soc: datapath soc handle
12496  * @pdev_id: id of datapath pdev handle
12497  *
12498  * Return: opaque pointer to dp txrx handle
12499  */
12500 static void *dp_pdev_get_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id)
12501 {
12502 	struct dp_pdev *pdev =
12503 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12504 						   pdev_id);
12505 	if (qdf_unlikely(!pdev))
12506 		return NULL;
12507 
12508 	return pdev->dp_txrx_handle;
12509 }
12510 
12511 /**
12512  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
12513  * @soc: datapath soc handle
12514  * @pdev_id: id of datapath pdev handle
12515  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
12516  *
12517  * Return: void
12518  */
12519 static void
12520 dp_pdev_set_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id,
12521 			   void *dp_txrx_hdl)
12522 {
12523 	struct dp_pdev *pdev =
12524 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12525 						   pdev_id);
12526 
12527 	if (!pdev)
12528 		return;
12529 
12530 	pdev->dp_txrx_handle = dp_txrx_hdl;
12531 }
12532 
12533 /**
12534  * dp_vdev_get_dp_ext_handle() - get dp handle from vdev
12535  * @soc_hdl: datapath soc handle
12536  * @vdev_id: vdev id
12537  *
12538  * Return: opaque pointer to dp txrx handle
12539  */
12540 static void *dp_vdev_get_dp_ext_handle(ol_txrx_soc_handle soc_hdl,
12541 				       uint8_t vdev_id)
12542 {
12543 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12544 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12545 						     DP_MOD_ID_CDP);
12546 	void *dp_ext_handle;
12547 
12548 	if (!vdev)
12549 		return NULL;
12550 	dp_ext_handle = vdev->vdev_dp_ext_handle;
12551 
12552 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12553 	return dp_ext_handle;
12554 }
12555 
12556 /**
12557  * dp_vdev_set_dp_ext_handle() - set dp handle in vdev
12558  * @soc_hdl: datapath soc handle
12559  * @vdev_id: vdev id
12560  * @size: size of advance dp handle
12561  *
12562  * Return: QDF_STATUS
12563  */
12564 static QDF_STATUS
12565 dp_vdev_set_dp_ext_handle(ol_txrx_soc_handle soc_hdl, uint8_t vdev_id,
12566 			  uint16_t size)
12567 {
12568 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12569 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12570 						     DP_MOD_ID_CDP);
12571 	void *dp_ext_handle;
12572 
12573 	if (!vdev)
12574 		return QDF_STATUS_E_FAILURE;
12575 
12576 	dp_ext_handle = qdf_mem_malloc(size);
12577 
12578 	if (!dp_ext_handle) {
12579 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12580 		return QDF_STATUS_E_FAILURE;
12581 	}
12582 
12583 	vdev->vdev_dp_ext_handle = dp_ext_handle;
12584 
12585 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12586 	return QDF_STATUS_SUCCESS;
12587 }
12588 
12589 /**
12590  * dp_vdev_inform_ll_conn() - Inform vdev to add/delete a latency critical
12591  *			      connection for this vdev
12592  * @soc_hdl: CDP soc handle
12593  * @vdev_id: vdev ID
12594  * @action: Add/Delete action
12595  *
12596  * Return: QDF_STATUS.
12597  */
12598 static QDF_STATUS
12599 dp_vdev_inform_ll_conn(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
12600 		       enum vdev_ll_conn_actions action)
12601 {
12602 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12603 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12604 						     DP_MOD_ID_CDP);
12605 
12606 	if (!vdev) {
12607 		dp_err("LL connection action for invalid vdev %d", vdev_id);
12608 		return QDF_STATUS_E_FAILURE;
12609 	}
12610 
12611 	switch (action) {
12612 	case CDP_VDEV_LL_CONN_ADD:
12613 		vdev->num_latency_critical_conn++;
12614 		break;
12615 
12616 	case CDP_VDEV_LL_CONN_DEL:
12617 		vdev->num_latency_critical_conn--;
12618 		break;
12619 
12620 	default:
12621 		dp_err("LL connection action invalid %d", action);
12622 		break;
12623 	}
12624 
12625 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12626 	return QDF_STATUS_SUCCESS;
12627 }
12628 
12629 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
12630 /**
12631  * dp_soc_set_swlm_enable() - Enable/Disable SWLM if initialized.
12632  * @soc_hdl: CDP Soc handle
12633  * @value: Enable/Disable value
12634  *
12635  * Return: QDF_STATUS
12636  */
12637 static QDF_STATUS dp_soc_set_swlm_enable(struct cdp_soc_t *soc_hdl,
12638 					 uint8_t value)
12639 {
12640 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12641 
12642 	if (!soc->swlm.is_init) {
12643 		dp_err("SWLM is not initialized");
12644 		return QDF_STATUS_E_FAILURE;
12645 	}
12646 
12647 	soc->swlm.is_enabled = !!value;
12648 
12649 	return QDF_STATUS_SUCCESS;
12650 }
12651 
12652 /**
12653  * dp_soc_is_swlm_enabled() - Check if SWLM is enabled.
12654  * @soc_hdl: CDP Soc handle
12655  *
12656  * Return: QDF_STATUS
12657  */
12658 static uint8_t dp_soc_is_swlm_enabled(struct cdp_soc_t *soc_hdl)
12659 {
12660 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12661 
12662 	return soc->swlm.is_enabled;
12663 }
12664 #endif
12665 
12666 /**
12667  * dp_display_srng_info() - Dump the srng HP TP info
12668  * @soc_hdl: CDP Soc handle
12669  *
12670  * This function dumps the SW hp/tp values for the important rings.
12671  * HW hp/tp values are not being dumped, since it can lead to
12672  * READ NOC error when UMAC is in low power state. MCC does not have
12673  * device force wake working yet.
12674  *
12675  * Return: none
12676  */
12677 static void dp_display_srng_info(struct cdp_soc_t *soc_hdl)
12678 {
12679 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12680 	hal_soc_handle_t hal_soc = soc->hal_soc;
12681 	uint32_t hp, tp, i;
12682 
12683 	dp_info("SRNG HP-TP data:");
12684 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
12685 		hal_get_sw_hptp(hal_soc, soc->tcl_data_ring[i].hal_srng,
12686 				&tp, &hp);
12687 		dp_info("TCL DATA ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
12688 
12689 		if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, i) ==
12690 		    INVALID_WBM_RING_NUM)
12691 			continue;
12692 
12693 		hal_get_sw_hptp(hal_soc, soc->tx_comp_ring[i].hal_srng,
12694 				&tp, &hp);
12695 		dp_info("TX comp ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
12696 	}
12697 
12698 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
12699 		hal_get_sw_hptp(hal_soc, soc->reo_dest_ring[i].hal_srng,
12700 				&tp, &hp);
12701 		dp_info("REO DST ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
12702 	}
12703 
12704 	hal_get_sw_hptp(hal_soc, soc->reo_exception_ring.hal_srng, &tp, &hp);
12705 	dp_info("REO exception ring: hp=0x%x, tp=0x%x", hp, tp);
12706 
12707 	hal_get_sw_hptp(hal_soc, soc->rx_rel_ring.hal_srng, &tp, &hp);
12708 	dp_info("WBM RX release ring: hp=0x%x, tp=0x%x", hp, tp);
12709 
12710 	hal_get_sw_hptp(hal_soc, soc->wbm_desc_rel_ring.hal_srng, &tp, &hp);
12711 	dp_info("WBM desc release ring: hp=0x%x, tp=0x%x", hp, tp);
12712 }
12713 
12714 /**
12715  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
12716  * @soc_handle: datapath soc handle
12717  *
12718  * Return: opaque pointer to external dp (non-core DP)
12719  */
12720 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
12721 {
12722 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12723 
12724 	return soc->external_txrx_handle;
12725 }
12726 
12727 /**
12728  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
12729  * @soc_handle: datapath soc handle
12730  * @txrx_handle: opaque pointer to external dp (non-core DP)
12731  *
12732  * Return: void
12733  */
12734 static void
12735 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
12736 {
12737 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12738 
12739 	soc->external_txrx_handle = txrx_handle;
12740 }
12741 
12742 /**
12743  * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
12744  * @soc_hdl: datapath soc handle
12745  * @pdev_id: id of the datapath pdev handle
12746  * @lmac_id: lmac id
12747  *
12748  * Return: QDF_STATUS
12749  */
12750 static QDF_STATUS
12751 dp_soc_map_pdev_to_lmac
12752 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12753 	 uint32_t lmac_id)
12754 {
12755 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12756 
12757 	wlan_cfg_set_hw_mac_idx(soc->wlan_cfg_ctx,
12758 				pdev_id,
12759 				lmac_id);
12760 
12761 	/*Set host PDEV ID for lmac_id*/
12762 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
12763 			      pdev_id,
12764 			      lmac_id);
12765 
12766 	return QDF_STATUS_SUCCESS;
12767 }
12768 
12769 /**
12770  * dp_soc_handle_pdev_mode_change() - Update pdev to lmac mapping
12771  * @soc_hdl: datapath soc handle
12772  * @pdev_id: id of the datapath pdev handle
12773  * @lmac_id: lmac id
12774  *
12775  * In the event of a dynamic mode change, update the pdev to lmac mapping
12776  *
12777  * Return: QDF_STATUS
12778  */
12779 static QDF_STATUS
12780 dp_soc_handle_pdev_mode_change
12781 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12782 	 uint32_t lmac_id)
12783 {
12784 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12785 	struct dp_vdev *vdev = NULL;
12786 	uint8_t hw_pdev_id, mac_id;
12787 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc,
12788 								  pdev_id);
12789 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
12790 
12791 	if (qdf_unlikely(!pdev))
12792 		return QDF_STATUS_E_FAILURE;
12793 
12794 	pdev->lmac_id = lmac_id;
12795 	pdev->target_pdev_id =
12796 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
12797 	dp_info(" mode change %d %d\n", pdev->pdev_id, pdev->lmac_id);
12798 
12799 	/*Set host PDEV ID for lmac_id*/
12800 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
12801 			      pdev->pdev_id,
12802 			      lmac_id);
12803 
12804 	hw_pdev_id =
12805 		dp_get_target_pdev_id_for_host_pdev_id(soc,
12806 						       pdev->pdev_id);
12807 
12808 	/*
12809 	 * When NSS offload is enabled, send pdev_id->lmac_id
12810 	 * and pdev_id to hw_pdev_id to NSS FW
12811 	 */
12812 	if (nss_config) {
12813 		mac_id = pdev->lmac_id;
12814 		if (soc->cdp_soc.ol_ops->pdev_update_lmac_n_target_pdev_id)
12815 			soc->cdp_soc.ol_ops->
12816 				pdev_update_lmac_n_target_pdev_id(
12817 				soc->ctrl_psoc,
12818 				&pdev_id, &mac_id, &hw_pdev_id);
12819 	}
12820 
12821 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
12822 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
12823 		DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
12824 					       hw_pdev_id);
12825 		vdev->lmac_id = pdev->lmac_id;
12826 	}
12827 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
12828 
12829 	return QDF_STATUS_SUCCESS;
12830 }
12831 
12832 /**
12833  * dp_soc_set_pdev_status_down() - set pdev down/up status
12834  * @soc: datapath soc handle
12835  * @pdev_id: id of datapath pdev handle
12836  * @is_pdev_down: pdev down/up status
12837  *
12838  * Return: QDF_STATUS
12839  */
12840 static QDF_STATUS
12841 dp_soc_set_pdev_status_down(struct cdp_soc_t *soc, uint8_t pdev_id,
12842 			    bool is_pdev_down)
12843 {
12844 	struct dp_pdev *pdev =
12845 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12846 						   pdev_id);
12847 	if (!pdev)
12848 		return QDF_STATUS_E_FAILURE;
12849 
12850 	pdev->is_pdev_down = is_pdev_down;
12851 	return QDF_STATUS_SUCCESS;
12852 }
12853 
12854 /**
12855  * dp_get_cfg_capabilities() - get dp capabilities
12856  * @soc_handle: datapath soc handle
12857  * @dp_caps: enum for dp capabilities
12858  *
12859  * Return: bool to determine if dp caps is enabled
12860  */
12861 static bool
12862 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
12863 			enum cdp_capabilities dp_caps)
12864 {
12865 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12866 
12867 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
12868 }
12869 
12870 #ifdef FEATURE_AST
12871 static QDF_STATUS
12872 dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
12873 		       uint8_t *peer_mac)
12874 {
12875 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12876 	QDF_STATUS status = QDF_STATUS_SUCCESS;
12877 	struct dp_peer *peer =
12878 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
12879 					       DP_MOD_ID_CDP);
12880 
12881 	/* Peer can be null for monitor vap mac address */
12882 	if (!peer) {
12883 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
12884 			  "%s: Invalid peer\n", __func__);
12885 		return QDF_STATUS_E_FAILURE;
12886 	}
12887 
12888 	dp_peer_update_state(soc, peer, DP_PEER_STATE_LOGICAL_DELETE);
12889 
12890 	qdf_spin_lock_bh(&soc->ast_lock);
12891 	dp_peer_send_wds_disconnect(soc, peer);
12892 	dp_peer_delete_ast_entries(soc, peer);
12893 	qdf_spin_unlock_bh(&soc->ast_lock);
12894 
12895 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
12896 	return status;
12897 }
12898 #endif
12899 
12900 #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS
12901 /**
12902  * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
12903  * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
12904  * @soc: cdp_soc handle
12905  * @pdev_id: id of cdp_pdev handle
12906  * @protocol_type: protocol type for which stats should be displayed
12907  *
12908  * Return: none
12909  */
12910 static inline void
12911 dp_dump_pdev_rx_protocol_tag_stats(struct cdp_soc_t  *soc, uint8_t pdev_id,
12912 				   uint16_t protocol_type)
12913 {
12914 }
12915 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
12916 
12917 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
12918 /**
12919  * dp_update_pdev_rx_protocol_tag() - Add/remove a protocol tag that should be
12920  * applied to the desired protocol type packets
12921  * @soc: soc handle
12922  * @pdev_id: id of cdp_pdev handle
12923  * @enable_rx_protocol_tag: bitmask that indicates what protocol types
12924  * are enabled for tagging. zero indicates disable feature, non-zero indicates
12925  * enable feature
12926  * @protocol_type: new protocol type for which the tag is being added
12927  * @tag: user configured tag for the new protocol
12928  *
12929  * Return: Success
12930  */
12931 static inline QDF_STATUS
12932 dp_update_pdev_rx_protocol_tag(struct cdp_soc_t  *soc, uint8_t pdev_id,
12933 			       uint32_t enable_rx_protocol_tag,
12934 			       uint16_t protocol_type,
12935 			       uint16_t tag)
12936 {
12937 	return QDF_STATUS_SUCCESS;
12938 }
12939 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
12940 
12941 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
12942 /**
12943  * dp_set_rx_flow_tag() - add/delete a flow
12944  * @cdp_soc: CDP soc handle
12945  * @pdev_id: id of cdp_pdev handle
12946  * @flow_info: flow tuple that is to be added to/deleted from flow search table
12947  *
12948  * Return: Success
12949  */
12950 static inline QDF_STATUS
12951 dp_set_rx_flow_tag(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
12952 		   struct cdp_rx_flow_info *flow_info)
12953 {
12954 	return QDF_STATUS_SUCCESS;
12955 }
12956 /**
12957  * dp_dump_rx_flow_tag_stats() - dump the number of packets tagged for
12958  * given flow 5-tuple
12959  * @cdp_soc: soc handle
12960  * @pdev_id: id of cdp_pdev handle
12961  * @flow_info: flow 5-tuple for which stats should be displayed
12962  *
12963  * Return: Success
12964  */
12965 static inline QDF_STATUS
12966 dp_dump_rx_flow_tag_stats(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
12967 			  struct cdp_rx_flow_info *flow_info)
12968 {
12969 	return QDF_STATUS_SUCCESS;
12970 }
12971 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
12972 
12973 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
12974 					   uint32_t max_peers,
12975 					   uint32_t max_ast_index,
12976 					   uint8_t peer_map_unmap_versions)
12977 {
12978 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12979 	QDF_STATUS status;
12980 
12981 	soc->max_peers = max_peers;
12982 
12983 	wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
12984 
12985 	status = soc->arch_ops.txrx_peer_map_attach(soc);
12986 	if (!QDF_IS_STATUS_SUCCESS(status)) {
12987 		dp_err("failure in allocating peer tables");
12988 		return QDF_STATUS_E_FAILURE;
12989 	}
12990 
12991 	dp_info("max_peers %u, calculated max_peers %u max_ast_index: %u\n",
12992 		max_peers, soc->max_peer_id, max_ast_index);
12993 
12994 	status = dp_peer_find_attach(soc);
12995 	if (!QDF_IS_STATUS_SUCCESS(status)) {
12996 		dp_err("Peer find attach failure");
12997 		goto fail;
12998 	}
12999 
13000 	soc->peer_map_unmap_versions = peer_map_unmap_versions;
13001 	soc->peer_map_attach_success = TRUE;
13002 
13003 	return QDF_STATUS_SUCCESS;
13004 fail:
13005 	soc->arch_ops.txrx_peer_map_detach(soc);
13006 
13007 	return status;
13008 }
13009 
13010 static QDF_STATUS dp_soc_set_param(struct cdp_soc_t  *soc_hdl,
13011 				   enum cdp_soc_param_t param,
13012 				   uint32_t value)
13013 {
13014 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13015 
13016 	switch (param) {
13017 	case DP_SOC_PARAM_MSDU_EXCEPTION_DESC:
13018 		soc->num_msdu_exception_desc = value;
13019 		dp_info("num_msdu exception_desc %u",
13020 			value);
13021 		break;
13022 	case DP_SOC_PARAM_CMEM_FSE_SUPPORT:
13023 		if (wlan_cfg_is_fst_in_cmem_enabled(soc->wlan_cfg_ctx))
13024 			soc->fst_in_cmem = !!value;
13025 		dp_info("FW supports CMEM FSE %u", value);
13026 		break;
13027 	case DP_SOC_PARAM_MAX_AST_AGEOUT:
13028 		soc->max_ast_ageout_count = value;
13029 		dp_info("Max ast ageout count %u", soc->max_ast_ageout_count);
13030 		break;
13031 	case DP_SOC_PARAM_EAPOL_OVER_CONTROL_PORT:
13032 		soc->eapol_over_control_port = value;
13033 		dp_info("Eapol over control_port:%d",
13034 			soc->eapol_over_control_port);
13035 		break;
13036 	case DP_SOC_PARAM_MULTI_PEER_GRP_CMD_SUPPORT:
13037 		soc->multi_peer_grp_cmd_supported = value;
13038 		dp_info("Multi Peer group command support:%d",
13039 			soc->multi_peer_grp_cmd_supported);
13040 		break;
13041 	case DP_SOC_PARAM_RSSI_DBM_CONV_SUPPORT:
13042 		soc->features.rssi_dbm_conv_support = value;
13043 		dp_info("Rssi dbm conversion support:%u",
13044 			soc->features.rssi_dbm_conv_support);
13045 		break;
13046 	case DP_SOC_PARAM_UMAC_HW_RESET_SUPPORT:
13047 		soc->features.umac_hw_reset_support = value;
13048 		dp_info("UMAC HW reset support :%u",
13049 			soc->features.umac_hw_reset_support);
13050 		break;
13051 	default:
13052 		dp_info("not handled param %d ", param);
13053 		break;
13054 	}
13055 
13056 	return QDF_STATUS_SUCCESS;
13057 }
13058 
13059 static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
13060 				      void *stats_ctx)
13061 {
13062 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13063 
13064 	soc->rate_stats_ctx = (struct cdp_soc_rate_stats_ctx *)stats_ctx;
13065 }
13066 
13067 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
13068 /**
13069  * dp_peer_flush_rate_stats_req() - Flush peer rate stats
13070  * @soc: Datapath SOC handle
13071  * @peer: Datapath peer
13072  * @arg: argument to iter function
13073  *
13074  * Return: QDF_STATUS
13075  */
13076 static void
13077 dp_peer_flush_rate_stats_req(struct dp_soc *soc, struct dp_peer *peer,
13078 			     void *arg)
13079 {
13080 	if (peer->bss_peer)
13081 		return;
13082 
13083 	dp_wdi_event_handler(
13084 		WDI_EVENT_FLUSH_RATE_STATS_REQ,
13085 		soc, dp_monitor_peer_get_peerstats_ctx(soc, peer),
13086 		peer->peer_id,
13087 		WDI_NO_VAL, peer->vdev->pdev->pdev_id);
13088 }
13089 
13090 /**
13091  * dp_flush_rate_stats_req() - Flush peer rate stats in pdev
13092  * @soc_hdl: Datapath SOC handle
13093  * @pdev_id: pdev_id
13094  *
13095  * Return: QDF_STATUS
13096  */
13097 static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
13098 					  uint8_t pdev_id)
13099 {
13100 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13101 	struct dp_pdev *pdev =
13102 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
13103 						   pdev_id);
13104 	if (!pdev)
13105 		return QDF_STATUS_E_FAILURE;
13106 
13107 	dp_pdev_iterate_peer(pdev, dp_peer_flush_rate_stats_req, NULL,
13108 			     DP_MOD_ID_CDP);
13109 
13110 	return QDF_STATUS_SUCCESS;
13111 }
13112 #else
13113 static inline QDF_STATUS
13114 dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
13115 			uint8_t pdev_id)
13116 {
13117 	return QDF_STATUS_SUCCESS;
13118 }
13119 #endif
13120 
13121 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
13122 #ifdef WLAN_FEATURE_11BE_MLO
13123 /**
13124  * dp_get_peer_extd_rate_link_stats() - function to get peer
13125  *				extended rate and link stats
13126  * @soc_hdl: dp soc handler
13127  * @mac_addr: mac address of peer
13128  *
13129  * Return: QDF_STATUS
13130  */
13131 static QDF_STATUS
13132 dp_get_peer_extd_rate_link_stats(struct cdp_soc_t *soc_hdl, uint8_t *mac_addr)
13133 {
13134 	uint8_t i;
13135 	struct dp_peer *link_peer;
13136 	struct dp_soc *link_peer_soc;
13137 	struct dp_mld_link_peers link_peers_info;
13138 	struct dp_peer *peer = NULL;
13139 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13140 	struct cdp_peer_info peer_info = { 0 };
13141 
13142 	if (!mac_addr) {
13143 		dp_err("NULL peer mac addr\n");
13144 		return QDF_STATUS_E_FAILURE;
13145 	}
13146 
13147 	DP_PEER_INFO_PARAMS_INIT(&peer_info, DP_VDEV_ALL, mac_addr, false,
13148 				 CDP_WILD_PEER_TYPE);
13149 
13150 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP);
13151 	if (!peer) {
13152 		dp_err("Invalid peer\n");
13153 		return QDF_STATUS_E_FAILURE;
13154 	}
13155 
13156 	if (IS_MLO_DP_MLD_PEER(peer)) {
13157 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
13158 						    &link_peers_info,
13159 						    DP_MOD_ID_CDP);
13160 		for (i = 0; i < link_peers_info.num_links; i++) {
13161 			link_peer = link_peers_info.link_peers[i];
13162 			link_peer_soc = link_peer->vdev->pdev->soc;
13163 			dp_wdi_event_handler(WDI_EVENT_FLUSH_RATE_STATS_REQ,
13164 					     link_peer_soc,
13165 					     dp_monitor_peer_get_peerstats_ctx
13166 					     (link_peer_soc, link_peer),
13167 					     link_peer->peer_id,
13168 					     WDI_NO_VAL,
13169 					     link_peer->vdev->pdev->pdev_id);
13170 		}
13171 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
13172 	} else {
13173 		dp_wdi_event_handler(
13174 				WDI_EVENT_FLUSH_RATE_STATS_REQ, soc,
13175 				dp_monitor_peer_get_peerstats_ctx(soc, peer),
13176 				peer->peer_id,
13177 				WDI_NO_VAL, peer->vdev->pdev->pdev_id);
13178 	}
13179 
13180 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
13181 	return QDF_STATUS_SUCCESS;
13182 }
13183 #else
13184 static QDF_STATUS
13185 dp_get_peer_extd_rate_link_stats(struct cdp_soc_t *soc_hdl, uint8_t *mac_addr)
13186 {
13187 	struct dp_peer *peer = NULL;
13188 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13189 
13190 	if (!mac_addr) {
13191 		dp_err("NULL peer mac addr\n");
13192 		return QDF_STATUS_E_FAILURE;
13193 	}
13194 
13195 	peer = dp_peer_find_hash_find(soc, mac_addr, 0,
13196 				      DP_VDEV_ALL, DP_MOD_ID_CDP);
13197 	if (!peer) {
13198 		dp_err("Invalid peer\n");
13199 		return QDF_STATUS_E_FAILURE;
13200 	}
13201 
13202 	dp_wdi_event_handler(
13203 			WDI_EVENT_FLUSH_RATE_STATS_REQ, soc,
13204 			dp_monitor_peer_get_peerstats_ctx(soc, peer),
13205 			peer->peer_id,
13206 			WDI_NO_VAL, peer->vdev->pdev->pdev_id);
13207 
13208 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
13209 	return QDF_STATUS_SUCCESS;
13210 }
13211 #endif
13212 #else
13213 static inline QDF_STATUS
13214 dp_get_peer_extd_rate_link_stats(struct cdp_soc_t *soc_hdl, uint8_t *mac_addr)
13215 {
13216 	return QDF_STATUS_SUCCESS;
13217 }
13218 #endif
13219 
13220 static void *dp_peer_get_peerstats_ctx(struct cdp_soc_t *soc_hdl,
13221 				       uint8_t vdev_id,
13222 				       uint8_t *mac_addr)
13223 {
13224 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13225 	struct dp_peer *peer;
13226 	void *peerstats_ctx = NULL;
13227 
13228 	if (mac_addr) {
13229 		peer = dp_peer_find_hash_find(soc, mac_addr,
13230 					      0, vdev_id,
13231 					      DP_MOD_ID_CDP);
13232 		if (!peer)
13233 			return NULL;
13234 
13235 		if (!IS_MLO_DP_MLD_PEER(peer))
13236 			peerstats_ctx = dp_monitor_peer_get_peerstats_ctx(soc,
13237 									  peer);
13238 
13239 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
13240 	}
13241 
13242 	return peerstats_ctx;
13243 }
13244 
13245 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
13246 static QDF_STATUS dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
13247 					   uint8_t pdev_id,
13248 					   void *buf)
13249 {
13250 	 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS,
13251 			      (struct dp_soc *)soc, buf, HTT_INVALID_PEER,
13252 			      WDI_NO_VAL, pdev_id);
13253 	return QDF_STATUS_SUCCESS;
13254 }
13255 #else
13256 static inline QDF_STATUS
13257 dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
13258 			 uint8_t pdev_id,
13259 			 void *buf)
13260 {
13261 	return QDF_STATUS_SUCCESS;
13262 }
13263 #endif
13264 
13265 static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle)
13266 {
13267 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13268 
13269 	return soc->rate_stats_ctx;
13270 }
13271 
13272 /**
13273  * dp_get_cfg() - get dp cfg
13274  * @soc: cdp soc handle
13275  * @cfg: cfg enum
13276  *
13277  * Return: cfg value
13278  */
13279 static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg)
13280 {
13281 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
13282 	uint32_t value = 0;
13283 
13284 	switch (cfg) {
13285 	case cfg_dp_enable_data_stall:
13286 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
13287 		break;
13288 	case cfg_dp_enable_p2p_ip_tcp_udp_checksum_offload:
13289 		value = dpsoc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload;
13290 		break;
13291 	case cfg_dp_enable_nan_ip_tcp_udp_checksum_offload:
13292 		value = dpsoc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload;
13293 		break;
13294 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
13295 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
13296 		break;
13297 	case cfg_dp_disable_legacy_mode_csum_offload:
13298 		value = dpsoc->wlan_cfg_ctx->
13299 					legacy_mode_checksumoffload_disable;
13300 		break;
13301 	case cfg_dp_tso_enable:
13302 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
13303 		break;
13304 	case cfg_dp_lro_enable:
13305 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
13306 		break;
13307 	case cfg_dp_gro_enable:
13308 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
13309 		break;
13310 	case cfg_dp_tc_based_dyn_gro_enable:
13311 		value = dpsoc->wlan_cfg_ctx->tc_based_dynamic_gro;
13312 		break;
13313 	case cfg_dp_tc_ingress_prio:
13314 		value = dpsoc->wlan_cfg_ctx->tc_ingress_prio;
13315 		break;
13316 	case cfg_dp_sg_enable:
13317 		value = dpsoc->wlan_cfg_ctx->sg_enabled;
13318 		break;
13319 	case cfg_dp_tx_flow_start_queue_offset:
13320 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
13321 		break;
13322 	case cfg_dp_tx_flow_stop_queue_threshold:
13323 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
13324 		break;
13325 	case cfg_dp_disable_intra_bss_fwd:
13326 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
13327 		break;
13328 	case cfg_dp_pktlog_buffer_size:
13329 		value = dpsoc->wlan_cfg_ctx->pktlog_buffer_size;
13330 		break;
13331 	case cfg_dp_wow_check_rx_pending:
13332 		value = dpsoc->wlan_cfg_ctx->wow_check_rx_pending_enable;
13333 		break;
13334 	default:
13335 		value =  0;
13336 	}
13337 
13338 	return value;
13339 }
13340 
13341 #ifdef PEER_FLOW_CONTROL
13342 /**
13343  * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params
13344  * @soc_handle: datapath soc handle
13345  * @pdev_id: id of datapath pdev handle
13346  * @param: ol ath params
13347  * @value: value of the flag
13348  * @buff: Buffer to be passed
13349  *
13350  * Implemented this function same as legacy function. In legacy code, single
13351  * function is used to display stats and update pdev params.
13352  *
13353  * Return: 0 for success. nonzero for failure.
13354  */
13355 static uint32_t dp_tx_flow_ctrl_configure_pdev(struct cdp_soc_t *soc_handle,
13356 					       uint8_t pdev_id,
13357 					       enum _dp_param_t param,
13358 					       uint32_t value, void *buff)
13359 {
13360 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13361 	struct dp_pdev *pdev =
13362 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
13363 						   pdev_id);
13364 
13365 	if (qdf_unlikely(!pdev))
13366 		return 1;
13367 
13368 	soc = pdev->soc;
13369 	if (!soc)
13370 		return 1;
13371 
13372 	switch (param) {
13373 #ifdef QCA_ENH_V3_STATS_SUPPORT
13374 	case DP_PARAM_VIDEO_DELAY_STATS_FC:
13375 		if (value)
13376 			pdev->delay_stats_flag = true;
13377 		else
13378 			pdev->delay_stats_flag = false;
13379 		break;
13380 	case DP_PARAM_VIDEO_STATS_FC:
13381 		qdf_print("------- TID Stats ------\n");
13382 		dp_pdev_print_tid_stats(pdev);
13383 		qdf_print("------ Delay Stats ------\n");
13384 		dp_pdev_print_delay_stats(pdev);
13385 		qdf_print("------ Rx Error Stats ------\n");
13386 		dp_pdev_print_rx_error_stats(pdev);
13387 		break;
13388 #endif
13389 	case DP_PARAM_TOTAL_Q_SIZE:
13390 		{
13391 			uint32_t tx_min, tx_max;
13392 
13393 			tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx);
13394 			tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
13395 
13396 			if (!buff) {
13397 				if ((value >= tx_min) && (value <= tx_max)) {
13398 					pdev->num_tx_allowed = value;
13399 				} else {
13400 					dp_tx_info("%pK: Failed to update num_tx_allowed, Q_min = %d Q_max = %d",
13401 						   soc, tx_min, tx_max);
13402 					break;
13403 				}
13404 			} else {
13405 				*(int *)buff = pdev->num_tx_allowed;
13406 			}
13407 		}
13408 		break;
13409 	default:
13410 		dp_tx_info("%pK: not handled param %d ", soc, param);
13411 		break;
13412 	}
13413 
13414 	return 0;
13415 }
13416 #endif
13417 
13418 /**
13419  * dp_set_pdev_pcp_tid_map_wifi3() - update pcp tid map in pdev
13420  * @psoc: dp soc handle
13421  * @pdev_id: id of DP_PDEV handle
13422  * @pcp: pcp value
13423  * @tid: tid value passed by the user
13424  *
13425  * Return: QDF_STATUS_SUCCESS on success
13426  */
13427 static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
13428 						uint8_t pdev_id,
13429 						uint8_t pcp, uint8_t tid)
13430 {
13431 	struct dp_soc *soc = (struct dp_soc *)psoc;
13432 
13433 	soc->pcp_tid_map[pcp] = tid;
13434 
13435 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
13436 	return QDF_STATUS_SUCCESS;
13437 }
13438 
13439 /**
13440  * dp_set_vdev_pcp_tid_map_wifi3() - update pcp tid map in vdev
13441  * @soc_hdl: DP soc handle
13442  * @vdev_id: id of DP_VDEV handle
13443  * @pcp: pcp value
13444  * @tid: tid value passed by the user
13445  *
13446  * Return: QDF_STATUS_SUCCESS on success
13447  */
13448 static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc_hdl,
13449 						uint8_t vdev_id,
13450 						uint8_t pcp, uint8_t tid)
13451 {
13452 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13453 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
13454 						     DP_MOD_ID_CDP);
13455 
13456 	if (!vdev)
13457 		return QDF_STATUS_E_FAILURE;
13458 
13459 	vdev->pcp_tid_map[pcp] = tid;
13460 
13461 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13462 	return QDF_STATUS_SUCCESS;
13463 }
13464 
13465 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
13466 static void dp_drain_txrx(struct cdp_soc_t *soc_handle)
13467 {
13468 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13469 	uint32_t cur_tx_limit, cur_rx_limit;
13470 	uint32_t budget = 0xffff;
13471 	uint32_t val;
13472 	int i;
13473 	int cpu = dp_srng_get_cpu();
13474 
13475 	cur_tx_limit = soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit;
13476 	cur_rx_limit = soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit;
13477 
13478 	/* Temporarily increase soft irq limits when going to drain
13479 	 * the UMAC/LMAC SRNGs and restore them after polling.
13480 	 * Though the budget is on higher side, the TX/RX reaping loops
13481 	 * will not execute longer as both TX and RX would be suspended
13482 	 * by the time this API is called.
13483 	 */
13484 	dp_update_soft_irq_limits(soc, budget, budget);
13485 
13486 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
13487 		dp_service_srngs(&soc->intr_ctx[i], budget, cpu);
13488 
13489 	dp_update_soft_irq_limits(soc, cur_tx_limit, cur_rx_limit);
13490 
13491 	/* Do a dummy read at offset 0; this will ensure all
13492 	 * pendings writes(HP/TP) are flushed before read returns.
13493 	 */
13494 	val = HAL_REG_READ((struct hal_soc *)soc->hal_soc, 0);
13495 	dp_debug("Register value at offset 0: %u\n", val);
13496 }
13497 #endif
13498 
13499 #ifdef DP_UMAC_HW_RESET_SUPPORT
13500 /**
13501  * dp_reset_interrupt_ring_masks() - Reset rx interrupt masks
13502  * @soc: dp soc handle
13503  *
13504  * Return: void
13505  */
13506 static void dp_reset_interrupt_ring_masks(struct dp_soc *soc)
13507 {
13508 	struct dp_intr_bkp *intr_bkp;
13509 	struct dp_intr *intr_ctx;
13510 	int num_ctxt = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
13511 	int i;
13512 
13513 	intr_bkp =
13514 	(struct dp_intr_bkp *)qdf_mem_malloc_atomic(sizeof(struct dp_intr_bkp) *
13515 			num_ctxt);
13516 
13517 	qdf_assert_always(intr_bkp);
13518 
13519 	soc->umac_reset_ctx.intr_ctx_bkp = intr_bkp;
13520 	for (i = 0; i < num_ctxt; i++) {
13521 		intr_ctx = &soc->intr_ctx[i];
13522 
13523 		intr_bkp->tx_ring_mask = intr_ctx->tx_ring_mask;
13524 		intr_bkp->rx_ring_mask = intr_ctx->rx_ring_mask;
13525 		intr_bkp->rx_mon_ring_mask = intr_ctx->rx_mon_ring_mask;
13526 		intr_bkp->rx_err_ring_mask = intr_ctx->rx_err_ring_mask;
13527 		intr_bkp->rx_wbm_rel_ring_mask = intr_ctx->rx_wbm_rel_ring_mask;
13528 		intr_bkp->reo_status_ring_mask = intr_ctx->reo_status_ring_mask;
13529 		intr_bkp->rxdma2host_ring_mask = intr_ctx->rxdma2host_ring_mask;
13530 		intr_bkp->host2rxdma_ring_mask = intr_ctx->host2rxdma_ring_mask;
13531 		intr_bkp->host2rxdma_mon_ring_mask =
13532 					intr_ctx->host2rxdma_mon_ring_mask;
13533 		intr_bkp->tx_mon_ring_mask = intr_ctx->tx_mon_ring_mask;
13534 
13535 		intr_ctx->tx_ring_mask = 0;
13536 		intr_ctx->rx_ring_mask = 0;
13537 		intr_ctx->rx_mon_ring_mask = 0;
13538 		intr_ctx->rx_err_ring_mask = 0;
13539 		intr_ctx->rx_wbm_rel_ring_mask = 0;
13540 		intr_ctx->reo_status_ring_mask = 0;
13541 		intr_ctx->rxdma2host_ring_mask = 0;
13542 		intr_ctx->host2rxdma_ring_mask = 0;
13543 		intr_ctx->host2rxdma_mon_ring_mask = 0;
13544 		intr_ctx->tx_mon_ring_mask = 0;
13545 
13546 		intr_bkp++;
13547 	}
13548 }
13549 
13550 /**
13551  * dp_restore_interrupt_ring_masks() - Restore rx interrupt masks
13552  * @soc: dp soc handle
13553  *
13554  * Return: void
13555  */
13556 static void dp_restore_interrupt_ring_masks(struct dp_soc *soc)
13557 {
13558 	struct dp_intr_bkp *intr_bkp = soc->umac_reset_ctx.intr_ctx_bkp;
13559 	struct dp_intr_bkp *intr_bkp_base = intr_bkp;
13560 	struct dp_intr *intr_ctx;
13561 	int num_ctxt = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
13562 	int i;
13563 
13564 	qdf_assert_always(intr_bkp);
13565 
13566 	for (i = 0; i < num_ctxt; i++) {
13567 		intr_ctx = &soc->intr_ctx[i];
13568 
13569 		intr_ctx->tx_ring_mask = intr_bkp->tx_ring_mask;
13570 		intr_ctx->rx_ring_mask = intr_bkp->rx_ring_mask;
13571 		intr_ctx->rx_mon_ring_mask = intr_bkp->rx_mon_ring_mask;
13572 		intr_ctx->rx_err_ring_mask = intr_bkp->rx_err_ring_mask;
13573 		intr_ctx->rx_wbm_rel_ring_mask = intr_bkp->rx_wbm_rel_ring_mask;
13574 		intr_ctx->reo_status_ring_mask = intr_bkp->reo_status_ring_mask;
13575 		intr_ctx->rxdma2host_ring_mask = intr_bkp->rxdma2host_ring_mask;
13576 		intr_ctx->host2rxdma_ring_mask = intr_bkp->host2rxdma_ring_mask;
13577 		intr_ctx->host2rxdma_mon_ring_mask =
13578 			intr_bkp->host2rxdma_mon_ring_mask;
13579 		intr_ctx->tx_mon_ring_mask = intr_bkp->tx_mon_ring_mask;
13580 
13581 		intr_bkp++;
13582 	}
13583 
13584 	qdf_mem_free(intr_bkp_base);
13585 	soc->umac_reset_ctx.intr_ctx_bkp = NULL;
13586 }
13587 
13588 /**
13589  * dp_resume_tx_hardstart() - Restore the old Tx hardstart functions
13590  * @soc: dp soc handle
13591  *
13592  * Return: void
13593  */
13594 static void dp_resume_tx_hardstart(struct dp_soc *soc)
13595 {
13596 	struct dp_vdev *vdev;
13597 	struct ol_txrx_hardtart_ctxt ctxt = {0};
13598 	struct cdp_ctrl_objmgr_psoc *psoc = soc->ctrl_psoc;
13599 	int i;
13600 
13601 	for (i = 0; i < MAX_PDEV_CNT; i++) {
13602 		struct dp_pdev *pdev = soc->pdev_list[i];
13603 
13604 		if (!pdev)
13605 			continue;
13606 
13607 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
13608 			uint8_t vdev_id = vdev->vdev_id;
13609 
13610 			dp_vdev_fetch_tx_handler(vdev, soc, &ctxt);
13611 			soc->cdp_soc.ol_ops->dp_update_tx_hardstart(psoc,
13612 								    vdev_id,
13613 								    &ctxt);
13614 		}
13615 	}
13616 }
13617 
13618 /**
13619  * dp_pause_tx_hardstart() - Register Tx hardstart functions to drop packets
13620  * @soc: dp soc handle
13621  *
13622  * Return: void
13623  */
13624 static void dp_pause_tx_hardstart(struct dp_soc *soc)
13625 {
13626 	struct dp_vdev *vdev;
13627 	struct ol_txrx_hardtart_ctxt ctxt;
13628 	struct cdp_ctrl_objmgr_psoc *psoc = soc->ctrl_psoc;
13629 	int i;
13630 
13631 	ctxt.tx = &dp_tx_drop;
13632 	ctxt.tx_fast = &dp_tx_drop;
13633 	ctxt.tx_exception = &dp_tx_exc_drop;
13634 
13635 	for (i = 0; i < MAX_PDEV_CNT; i++) {
13636 		struct dp_pdev *pdev = soc->pdev_list[i];
13637 
13638 		if (!pdev)
13639 			continue;
13640 
13641 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
13642 			uint8_t vdev_id = vdev->vdev_id;
13643 
13644 			soc->cdp_soc.ol_ops->dp_update_tx_hardstart(psoc,
13645 								    vdev_id,
13646 								    &ctxt);
13647 		}
13648 	}
13649 }
13650 
13651 /**
13652  * dp_unregister_notify_umac_pre_reset_fw_callback() - unregister notify_fw_cb
13653  * @soc: dp soc handle
13654  *
13655  * Return: void
13656  */
13657 static inline
13658 void dp_unregister_notify_umac_pre_reset_fw_callback(struct dp_soc *soc)
13659 {
13660 	soc->notify_fw_callback = NULL;
13661 }
13662 
13663 /**
13664  * dp_check_n_notify_umac_prereset_done() - Send pre reset done to firmware
13665  * @soc: dp soc handle
13666  *
13667  * Return: void
13668  */
13669 static inline
13670 void dp_check_n_notify_umac_prereset_done(struct dp_soc *soc)
13671 {
13672 	/* Some Cpu(s) is processing the umac rings*/
13673 	if (soc->service_rings_running)
13674 		return;
13675 
13676 	/* Notify the firmware that Umac pre reset is complete */
13677 	dp_umac_reset_notify_action_completion(soc,
13678 					       UMAC_RESET_ACTION_DO_PRE_RESET);
13679 
13680 	/* Unregister the callback */
13681 	dp_unregister_notify_umac_pre_reset_fw_callback(soc);
13682 }
13683 
13684 /**
13685  * dp_register_notify_umac_pre_reset_fw_callback() - register notify_fw_cb
13686  * @soc: dp soc handle
13687  *
13688  * Return: void
13689  */
13690 static inline
13691 void dp_register_notify_umac_pre_reset_fw_callback(struct dp_soc *soc)
13692 {
13693 	soc->notify_fw_callback = dp_check_n_notify_umac_prereset_done;
13694 }
13695 
13696 #ifdef DP_UMAC_HW_HARD_RESET
13697 /**
13698  * dp_set_umac_regs() - Reinitialize host umac registers
13699  * @soc: dp soc handle
13700  *
13701  * Return: void
13702  */
13703 static void dp_set_umac_regs(struct dp_soc *soc)
13704 {
13705 	int i;
13706 	struct hal_reo_params reo_params;
13707 
13708 	qdf_mem_zero(&reo_params, sizeof(reo_params));
13709 
13710 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
13711 		if (soc->arch_ops.reo_remap_config(soc, &reo_params.remap0,
13712 						   &reo_params.remap1,
13713 						   &reo_params.remap2))
13714 			reo_params.rx_hash_enabled = true;
13715 		else
13716 			reo_params.rx_hash_enabled = false;
13717 	}
13718 
13719 	reo_params.reo_qref = &soc->reo_qref;
13720 	hal_reo_setup(soc->hal_soc, &reo_params, 0);
13721 
13722 	soc->arch_ops.dp_cc_reg_cfg_init(soc, true);
13723 
13724 	for (i = 0; i < PCP_TID_MAP_MAX; i++)
13725 		hal_tx_update_pcp_tid_map(soc->hal_soc, soc->pcp_tid_map[i], i);
13726 
13727 	for (i = 0; i < MAX_PDEV_CNT; i++) {
13728 		struct dp_vdev *vdev = NULL;
13729 		struct dp_pdev *pdev = soc->pdev_list[i];
13730 
13731 		if (!pdev)
13732 			continue;
13733 
13734 		for (i = 0; i < soc->num_hw_dscp_tid_map; i++)
13735 			hal_tx_set_dscp_tid_map(soc->hal_soc,
13736 						pdev->dscp_tid_map[i], i);
13737 
13738 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
13739 			soc->arch_ops.dp_bank_reconfig(soc, vdev);
13740 			soc->arch_ops.dp_reconfig_tx_vdev_mcast_ctrl(soc,
13741 								      vdev);
13742 		}
13743 	}
13744 }
13745 #else
13746 static void dp_set_umac_regs(struct dp_soc *soc)
13747 {
13748 }
13749 #endif
13750 
13751 /**
13752  * dp_reinit_rings() - Reinitialize host managed rings
13753  * @soc: dp soc handle
13754  *
13755  * Return: QDF_STATUS
13756  */
13757 static void dp_reinit_rings(struct dp_soc *soc)
13758 {
13759 	unsigned long end;
13760 
13761 	dp_soc_srng_deinit(soc);
13762 	dp_hw_link_desc_ring_deinit(soc);
13763 
13764 	/* Busy wait for 2 ms to make sure the rings are in idle state
13765 	 * before we enable them again
13766 	 */
13767 	end = jiffies + msecs_to_jiffies(2);
13768 	while (time_before(jiffies, end))
13769 		;
13770 
13771 	dp_hw_link_desc_ring_init(soc);
13772 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
13773 	dp_soc_srng_init(soc);
13774 }
13775 
13776 /**
13777  * dp_umac_reset_handle_pre_reset() - Handle Umac prereset interrupt from FW
13778  * @soc: dp soc handle
13779  *
13780  * Return: QDF_STATUS
13781  */
13782 static QDF_STATUS dp_umac_reset_handle_pre_reset(struct dp_soc *soc)
13783 {
13784 	dp_reset_interrupt_ring_masks(soc);
13785 
13786 	dp_pause_tx_hardstart(soc);
13787 	dp_pause_reo_send_cmd(soc);
13788 
13789 	dp_check_n_notify_umac_prereset_done(soc);
13790 
13791 	soc->umac_reset_ctx.nbuf_list = NULL;
13792 
13793 	return QDF_STATUS_SUCCESS;
13794 }
13795 
13796 /**
13797  * dp_umac_reset_handle_post_reset() - Handle Umac postreset interrupt from FW
13798  * @soc: dp soc handle
13799  *
13800  * Return: QDF_STATUS
13801  */
13802 static QDF_STATUS dp_umac_reset_handle_post_reset(struct dp_soc *soc)
13803 {
13804 	if (!soc->umac_reset_ctx.skel_enable) {
13805 		qdf_nbuf_t *nbuf_list = &soc->umac_reset_ctx.nbuf_list;
13806 
13807 		dp_set_umac_regs(soc);
13808 
13809 		dp_reinit_rings(soc);
13810 
13811 		dp_rx_desc_reuse(soc, nbuf_list);
13812 
13813 		dp_cleanup_reo_cmd_module(soc);
13814 
13815 		dp_tx_desc_pool_cleanup(soc, nbuf_list);
13816 
13817 		dp_reset_tid_q_setup(soc);
13818 	}
13819 
13820 	return dp_umac_reset_notify_action_completion(soc,
13821 					UMAC_RESET_ACTION_DO_POST_RESET_START);
13822 }
13823 
13824 /**
13825  * dp_umac_reset_handle_post_reset_complete() - Handle Umac postreset_complete
13826  *						interrupt from FW
13827  * @soc: dp soc handle
13828  *
13829  * Return: QDF_STATUS
13830  */
13831 static QDF_STATUS dp_umac_reset_handle_post_reset_complete(struct dp_soc *soc)
13832 {
13833 	QDF_STATUS status;
13834 	qdf_nbuf_t nbuf_list = soc->umac_reset_ctx.nbuf_list;
13835 
13836 	soc->umac_reset_ctx.nbuf_list = NULL;
13837 
13838 	dp_resume_reo_send_cmd(soc);
13839 
13840 	dp_restore_interrupt_ring_masks(soc);
13841 
13842 	dp_resume_tx_hardstart(soc);
13843 
13844 	status = dp_umac_reset_notify_action_completion(soc,
13845 				UMAC_RESET_ACTION_DO_POST_RESET_COMPLETE);
13846 
13847 	while (nbuf_list) {
13848 		qdf_nbuf_t nbuf = nbuf_list->next;
13849 
13850 		qdf_nbuf_free(nbuf_list);
13851 		nbuf_list = nbuf;
13852 	}
13853 
13854 	dp_umac_reset_info("Umac reset done on soc %pK\n prereset : %u us\n"
13855 			   "postreset : %u us \n postreset complete: %u us \n",
13856 			   soc,
13857 			   soc->umac_reset_ctx.ts.pre_reset_done -
13858 			   soc->umac_reset_ctx.ts.pre_reset_start,
13859 			   soc->umac_reset_ctx.ts.post_reset_done -
13860 			   soc->umac_reset_ctx.ts.post_reset_start,
13861 			   soc->umac_reset_ctx.ts.post_reset_complete_done -
13862 			   soc->umac_reset_ctx.ts.post_reset_complete_start);
13863 
13864 	return status;
13865 }
13866 #endif
13867 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
13868 static void
13869 dp_set_pkt_capture_mode(struct cdp_soc_t *soc_handle, bool val)
13870 {
13871 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13872 
13873 	soc->wlan_cfg_ctx->pkt_capture_mode = val;
13874 }
13875 #endif
13876 
13877 #ifdef HW_TX_DELAY_STATS_ENABLE
13878 /**
13879  * dp_enable_disable_vdev_tx_delay_stats() - Start/Stop tx delay stats capture
13880  * @soc_hdl: DP soc handle
13881  * @vdev_id: vdev id
13882  * @value: value
13883  *
13884  * Return: None
13885  */
13886 static void
13887 dp_enable_disable_vdev_tx_delay_stats(struct cdp_soc_t *soc_hdl,
13888 				      uint8_t vdev_id,
13889 				      uint8_t value)
13890 {
13891 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13892 	struct dp_vdev *vdev = NULL;
13893 
13894 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
13895 	if (!vdev)
13896 		return;
13897 
13898 	vdev->hw_tx_delay_stats_enabled = value;
13899 
13900 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13901 }
13902 
13903 /**
13904  * dp_check_vdev_tx_delay_stats_enabled() - check the feature is enabled or not
13905  * @soc_hdl: DP soc handle
13906  * @vdev_id: vdev id
13907  *
13908  * Return: 1 if enabled, 0 if disabled
13909  */
13910 static uint8_t
13911 dp_check_vdev_tx_delay_stats_enabled(struct cdp_soc_t *soc_hdl,
13912 				     uint8_t vdev_id)
13913 {
13914 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13915 	struct dp_vdev *vdev;
13916 	uint8_t ret_val = 0;
13917 
13918 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
13919 	if (!vdev)
13920 		return ret_val;
13921 
13922 	ret_val = vdev->hw_tx_delay_stats_enabled;
13923 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13924 
13925 	return ret_val;
13926 }
13927 #endif
13928 
13929 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
13930 static void
13931 dp_recovery_vdev_flush_peers(struct cdp_soc_t *cdp_soc,
13932 			     uint8_t vdev_id,
13933 			     bool mlo_peers_only)
13934 {
13935 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
13936 	struct dp_vdev *vdev;
13937 
13938 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
13939 
13940 	if (!vdev)
13941 		return;
13942 
13943 	dp_vdev_flush_peers((struct cdp_vdev *)vdev, false, mlo_peers_only);
13944 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13945 }
13946 #endif
13947 #ifdef QCA_GET_TSF_VIA_REG
13948 /**
13949  * dp_get_tsf_time() - get tsf time
13950  * @soc_hdl: Datapath soc handle
13951  * @tsf_id: TSF identifier
13952  * @mac_id: mac_id
13953  * @tsf: pointer to update tsf value
13954  * @tsf_sync_soc_time: pointer to update tsf sync time
13955  *
13956  * Return: None.
13957  */
13958 static inline void
13959 dp_get_tsf_time(struct cdp_soc_t *soc_hdl, uint32_t tsf_id, uint32_t mac_id,
13960 		uint64_t *tsf, uint64_t *tsf_sync_soc_time)
13961 {
13962 	hal_get_tsf_time(((struct dp_soc *)soc_hdl)->hal_soc, tsf_id, mac_id,
13963 			 tsf, tsf_sync_soc_time);
13964 }
13965 #else
13966 static inline void
13967 dp_get_tsf_time(struct cdp_soc_t *soc_hdl, uint32_t tsf_id, uint32_t mac_id,
13968 		uint64_t *tsf, uint64_t *tsf_sync_soc_time)
13969 {
13970 }
13971 #endif
13972 
13973 /**
13974  * dp_get_tsf2_scratch_reg() - get tsf2 offset from the scratch register
13975  * @soc_hdl: Datapath soc handle
13976  * @mac_id: mac_id
13977  * @value: pointer to update tsf2 offset value
13978  *
13979  * Return: None.
13980  */
13981 static inline void
13982 dp_get_tsf2_scratch_reg(struct cdp_soc_t *soc_hdl, uint8_t mac_id,
13983 			uint64_t *value)
13984 {
13985 	hal_get_tsf2_offset(((struct dp_soc *)soc_hdl)->hal_soc, mac_id, value);
13986 }
13987 
13988 /**
13989  * dp_get_tqm_scratch_reg() - get tqm offset from the scratch register
13990  * @soc_hdl: Datapath soc handle
13991  * @value: pointer to update tqm offset value
13992  *
13993  * Return: None.
13994  */
13995 static inline void
13996 dp_get_tqm_scratch_reg(struct cdp_soc_t *soc_hdl, uint64_t *value)
13997 {
13998 	hal_get_tqm_offset(((struct dp_soc *)soc_hdl)->hal_soc, value);
13999 }
14000 
14001 /**
14002  * dp_set_tx_pause() - Pause or resume tx path
14003  * @soc_hdl: Datapath soc handle
14004  * @flag: set or clear is_tx_pause
14005  *
14006  * Return: None.
14007  */
14008 static inline
14009 void dp_set_tx_pause(struct cdp_soc_t *soc_hdl, bool flag)
14010 {
14011 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14012 
14013 	soc->is_tx_pause = flag;
14014 }
14015 
14016 static struct cdp_cmn_ops dp_ops_cmn = {
14017 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
14018 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
14019 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
14020 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
14021 	.txrx_pdev_post_attach = dp_pdev_post_attach_wifi3,
14022 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
14023 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
14024 	.txrx_peer_create = dp_peer_create_wifi3,
14025 	.txrx_peer_setup = dp_peer_setup_wifi3,
14026 #ifdef FEATURE_AST
14027 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
14028 #else
14029 	.txrx_peer_teardown = NULL,
14030 #endif
14031 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
14032 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
14033 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
14034 	.txrx_peer_get_ast_info_by_pdev =
14035 		dp_peer_get_ast_info_by_pdevid_wifi3,
14036 	.txrx_peer_ast_delete_by_soc =
14037 		dp_peer_ast_entry_del_by_soc,
14038 	.txrx_peer_ast_delete_by_pdev =
14039 		dp_peer_ast_entry_del_by_pdev,
14040 	.txrx_peer_delete = dp_peer_delete_wifi3,
14041 #ifdef DP_RX_UDP_OVER_PEER_ROAM
14042 	.txrx_update_roaming_peer = dp_update_roaming_peer_wifi3,
14043 #endif
14044 	.txrx_vdev_register = dp_vdev_register_wifi3,
14045 	.txrx_soc_detach = dp_soc_detach_wifi3,
14046 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
14047 	.txrx_soc_init = dp_soc_init_wifi3,
14048 #ifndef QCA_HOST_MODE_WIFI_DISABLED
14049 	.txrx_tso_soc_attach = dp_tso_soc_attach,
14050 	.txrx_tso_soc_detach = dp_tso_soc_detach,
14051 	.tx_send = dp_tx_send,
14052 	.tx_send_exc = dp_tx_send_exception,
14053 #endif
14054 	.set_tx_pause = dp_set_tx_pause,
14055 	.txrx_pdev_init = dp_pdev_init_wifi3,
14056 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
14057 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
14058 	.txrx_ath_getstats = dp_get_device_stats,
14059 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
14060 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
14061 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
14062 	.delba_process = dp_delba_process_wifi3,
14063 	.set_addba_response = dp_set_addba_response,
14064 	.flush_cache_rx_queue = NULL,
14065 	.tid_update_ba_win_size = dp_rx_tid_update_ba_win_size,
14066 	/* TODO: get API's for dscp-tid need to be added*/
14067 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
14068 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
14069 	.txrx_get_total_per = dp_get_total_per,
14070 	.txrx_stats_request = dp_txrx_stats_request,
14071 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
14072 	.display_stats = dp_txrx_dump_stats,
14073 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
14074 	.txrx_intr_detach = dp_soc_interrupt_detach,
14075 	.txrx_ppeds_stop = dp_soc_ppeds_stop,
14076 	.set_pn_check = dp_set_pn_check_wifi3,
14077 	.set_key_sec_type = dp_set_key_sec_type_wifi3,
14078 	.update_config_parameters = dp_update_config_parameters,
14079 	/* TODO: Add other functions */
14080 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
14081 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
14082 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
14083 	.get_vdev_dp_ext_txrx_handle = dp_vdev_get_dp_ext_handle,
14084 	.set_vdev_dp_ext_txrx_handle = dp_vdev_set_dp_ext_handle,
14085 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
14086 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
14087 	.map_pdev_to_lmac = dp_soc_map_pdev_to_lmac,
14088 	.handle_mode_change = dp_soc_handle_pdev_mode_change,
14089 	.set_pdev_status_down = dp_soc_set_pdev_status_down,
14090 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
14091 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
14092 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
14093 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
14094 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
14095 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
14096 	.set_soc_param = dp_soc_set_param,
14097 	.txrx_get_os_rx_handles_from_vdev =
14098 					dp_get_os_rx_handles_from_vdev_wifi3,
14099 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
14100 	.get_dp_capabilities = dp_get_cfg_capabilities,
14101 	.txrx_get_cfg = dp_get_cfg,
14102 	.set_rate_stats_ctx = dp_soc_set_rate_stats_ctx,
14103 	.get_rate_stats_ctx = dp_soc_get_rate_stats_ctx,
14104 	.txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats,
14105 	.txrx_flush_rate_stats_request = dp_flush_rate_stats_req,
14106 	.txrx_peer_get_peerstats_ctx = dp_peer_get_peerstats_ctx,
14107 
14108 	.set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3,
14109 	.set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3,
14110 
14111 	.txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler,
14112 #ifdef QCA_MULTIPASS_SUPPORT
14113 	.set_vlan_groupkey = dp_set_vlan_groupkey,
14114 #endif
14115 	.get_peer_mac_list = dp_get_peer_mac_list,
14116 	.get_peer_id = dp_get_peer_id,
14117 #ifdef QCA_SUPPORT_WDS_EXTENDED
14118 	.set_wds_ext_peer_rx = dp_wds_ext_set_peer_rx,
14119 #endif /* QCA_SUPPORT_WDS_EXTENDED */
14120 
14121 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
14122 	.txrx_drain = dp_drain_txrx,
14123 #endif
14124 #if defined(FEATURE_RUNTIME_PM)
14125 	.set_rtpm_tput_policy = dp_set_rtpm_tput_policy_requirement,
14126 #endif
14127 #ifdef WLAN_SYSFS_DP_STATS
14128 	.txrx_sysfs_fill_stats = dp_sysfs_fill_stats,
14129 	.txrx_sysfs_set_stat_type = dp_sysfs_set_stat_type,
14130 #endif /* WLAN_SYSFS_DP_STATS */
14131 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
14132 	.set_pkt_capture_mode = dp_set_pkt_capture_mode,
14133 #endif
14134 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
14135 	.txrx_recovery_vdev_flush_peers = dp_recovery_vdev_flush_peers,
14136 #endif
14137 	.txrx_umac_reset_deinit = dp_soc_umac_reset_deinit,
14138 	.txrx_get_tsf_time = dp_get_tsf_time,
14139 	.txrx_get_tsf2_offset = dp_get_tsf2_scratch_reg,
14140 	.txrx_get_tqm_offset = dp_get_tqm_scratch_reg,
14141 };
14142 
14143 static struct cdp_ctrl_ops dp_ops_ctrl = {
14144 	.txrx_peer_authorize = dp_peer_authorize,
14145 	.txrx_peer_get_authorize = dp_peer_get_authorize,
14146 #ifdef VDEV_PEER_PROTOCOL_COUNT
14147 	.txrx_enable_peer_protocol_count = dp_enable_vdev_peer_protocol_count,
14148 	.txrx_set_peer_protocol_drop_mask =
14149 		dp_enable_vdev_peer_protocol_drop_mask,
14150 	.txrx_is_peer_protocol_count_enabled =
14151 		dp_is_vdev_peer_protocol_count_enabled,
14152 	.txrx_get_peer_protocol_drop_mask = dp_get_vdev_peer_protocol_drop_mask,
14153 #endif
14154 	.txrx_set_vdev_param = dp_set_vdev_param,
14155 	.txrx_set_psoc_param = dp_set_psoc_param,
14156 	.txrx_get_psoc_param = dp_get_psoc_param,
14157 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
14158 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
14159 	.txrx_get_sec_type = dp_get_sec_type,
14160 	.txrx_wdi_event_sub = dp_wdi_event_sub,
14161 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
14162 	.txrx_set_pdev_param = dp_set_pdev_param,
14163 	.txrx_get_pdev_param = dp_get_pdev_param,
14164 	.txrx_set_peer_param = dp_set_peer_param,
14165 	.txrx_get_peer_param = dp_get_peer_param,
14166 #ifdef VDEV_PEER_PROTOCOL_COUNT
14167 	.txrx_peer_protocol_cnt = dp_peer_stats_update_protocol_cnt,
14168 #endif
14169 #ifdef WLAN_SUPPORT_MSCS
14170 	.txrx_record_mscs_params = dp_record_mscs_params,
14171 #endif
14172 	.set_key = dp_set_michael_key,
14173 	.txrx_get_vdev_param = dp_get_vdev_param,
14174 	.calculate_delay_stats = dp_calculate_delay_stats,
14175 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
14176 	.txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag,
14177 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
14178 	.txrx_dump_pdev_rx_protocol_tag_stats =
14179 				dp_dump_pdev_rx_protocol_tag_stats,
14180 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
14181 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
14182 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
14183 	.txrx_set_rx_flow_tag = dp_set_rx_flow_tag,
14184 	.txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats,
14185 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
14186 #ifdef QCA_MULTIPASS_SUPPORT
14187 	.txrx_peer_set_vlan_id = dp_peer_set_vlan_id,
14188 #endif /*QCA_MULTIPASS_SUPPORT*/
14189 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
14190 	.txrx_set_delta_tsf = dp_set_delta_tsf,
14191 #endif
14192 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
14193 	.txrx_set_tsf_ul_delay_report = dp_set_tsf_ul_delay_report,
14194 	.txrx_get_uplink_delay = dp_get_uplink_delay,
14195 #endif
14196 #ifdef QCA_UNDECODED_METADATA_SUPPORT
14197 	.txrx_set_pdev_phyrx_error_mask = dp_set_pdev_phyrx_error_mask,
14198 	.txrx_get_pdev_phyrx_error_mask = dp_get_pdev_phyrx_error_mask,
14199 #endif
14200 	.txrx_peer_flush_frags = dp_peer_flush_frags,
14201 };
14202 
14203 static struct cdp_me_ops dp_ops_me = {
14204 #ifndef QCA_HOST_MODE_WIFI_DISABLED
14205 #ifdef ATH_SUPPORT_IQUE
14206 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
14207 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
14208 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
14209 #endif
14210 #endif
14211 };
14212 
14213 static struct cdp_host_stats_ops dp_ops_host_stats = {
14214 	.txrx_per_peer_stats = dp_get_host_peer_stats,
14215 	.get_fw_peer_stats = dp_get_fw_peer_stats,
14216 	.get_htt_stats = dp_get_htt_stats,
14217 	.txrx_stats_publish = dp_txrx_stats_publish,
14218 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
14219 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
14220 	.txrx_get_soc_stats = dp_txrx_get_soc_stats,
14221 	.txrx_get_peer_stats_param = dp_txrx_get_peer_stats_param,
14222 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
14223 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
14224 #if defined(IPA_OFFLOAD) && defined(QCA_ENHANCED_STATS_SUPPORT)
14225 	.txrx_get_peer_stats = dp_ipa_txrx_get_peer_stats,
14226 	.txrx_get_vdev_stats  = dp_ipa_txrx_get_vdev_stats,
14227 	.txrx_get_pdev_stats = dp_ipa_txrx_get_pdev_stats,
14228 #endif
14229 	.txrx_get_ratekbps = dp_txrx_get_ratekbps,
14230 	.txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats,
14231 	.txrx_get_peer_delay_stats = dp_txrx_get_peer_delay_stats,
14232 	.txrx_get_peer_jitter_stats = dp_txrx_get_peer_jitter_stats,
14233 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
14234 	.txrx_alloc_vdev_stats_id = dp_txrx_alloc_vdev_stats_id,
14235 	.txrx_reset_vdev_stats_id = dp_txrx_reset_vdev_stats_id,
14236 #endif
14237 #ifdef WLAN_TX_PKT_CAPTURE_ENH
14238 	.get_peer_tx_capture_stats = dp_peer_get_tx_capture_stats,
14239 	.get_pdev_tx_capture_stats = dp_pdev_get_tx_capture_stats,
14240 #endif /* WLAN_TX_PKT_CAPTURE_ENH */
14241 #ifdef HW_TX_DELAY_STATS_ENABLE
14242 	.enable_disable_vdev_tx_delay_stats =
14243 				dp_enable_disable_vdev_tx_delay_stats,
14244 	.is_tx_delay_stats_enabled = dp_check_vdev_tx_delay_stats_enabled,
14245 #endif
14246 	.txrx_get_pdev_tid_stats = dp_pdev_get_tid_stats,
14247 #ifdef WLAN_TELEMETRY_STATS_SUPPORT
14248 	.txrx_pdev_telemetry_stats = dp_get_pdev_telemetry_stats,
14249 	.txrx_peer_telemetry_stats = dp_get_peer_telemetry_stats,
14250 	.txrx_pdev_deter_stats = dp_get_pdev_deter_stats,
14251 	.txrx_peer_deter_stats = dp_get_peer_deter_stats,
14252 	.txrx_update_pdev_chan_util_stats = dp_update_pdev_chan_util_stats,
14253 #endif
14254 	.txrx_get_peer_extd_rate_link_stats =
14255 					dp_get_peer_extd_rate_link_stats,
14256 	.get_pdev_obss_stats = dp_get_obss_stats,
14257 	.clear_pdev_obss_pd_stats = dp_clear_pdev_obss_pd_stats,
14258 	/* TODO */
14259 };
14260 
14261 static struct cdp_raw_ops dp_ops_raw = {
14262 	/* TODO */
14263 };
14264 
14265 #ifdef PEER_FLOW_CONTROL
14266 static struct cdp_pflow_ops dp_ops_pflow = {
14267 	dp_tx_flow_ctrl_configure_pdev,
14268 };
14269 #endif /* CONFIG_WIN */
14270 
14271 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
14272 static struct cdp_cfr_ops dp_ops_cfr = {
14273 	.txrx_cfr_filter = NULL,
14274 	.txrx_get_cfr_rcc = dp_get_cfr_rcc,
14275 	.txrx_set_cfr_rcc = dp_set_cfr_rcc,
14276 	.txrx_get_cfr_dbg_stats = dp_get_cfr_dbg_stats,
14277 	.txrx_clear_cfr_dbg_stats = dp_clear_cfr_dbg_stats,
14278 };
14279 #endif
14280 
14281 #ifdef WLAN_SUPPORT_MSCS
14282 static struct cdp_mscs_ops dp_ops_mscs = {
14283 	.mscs_peer_lookup_n_get_priority = dp_mscs_peer_lookup_n_get_priority,
14284 };
14285 #endif
14286 
14287 #ifdef WLAN_SUPPORT_MESH_LATENCY
14288 static struct cdp_mesh_latency_ops dp_ops_mesh_latency = {
14289 	.mesh_latency_update_peer_parameter =
14290 		dp_mesh_latency_update_peer_parameter,
14291 };
14292 #endif
14293 
14294 #ifdef WLAN_SUPPORT_SCS
14295 static struct cdp_scs_ops dp_ops_scs = {
14296 	.scs_peer_lookup_n_rule_match = dp_scs_peer_lookup_n_rule_match,
14297 };
14298 #endif
14299 
14300 #ifdef CONFIG_SAWF_DEF_QUEUES
14301 static struct cdp_sawf_ops dp_ops_sawf = {
14302 	.sawf_def_queues_map_req = dp_sawf_def_queues_map_req,
14303 	.sawf_def_queues_unmap_req = dp_sawf_def_queues_unmap_req,
14304 	.sawf_def_queues_get_map_report =
14305 		dp_sawf_def_queues_get_map_report,
14306 #ifdef CONFIG_SAWF_STATS
14307 	.txrx_get_peer_sawf_delay_stats = dp_sawf_get_peer_delay_stats,
14308 	.txrx_get_peer_sawf_tx_stats = dp_sawf_get_peer_tx_stats,
14309 	.sawf_mpdu_stats_req = dp_sawf_mpdu_stats_req,
14310 	.sawf_mpdu_details_stats_req = dp_sawf_mpdu_details_stats_req,
14311 	.txrx_sawf_set_mov_avg_params = dp_sawf_set_mov_avg_params,
14312 	.txrx_sawf_set_sla_params = dp_sawf_set_sla_params,
14313 	.txrx_sawf_init_telemtery_params = dp_sawf_init_telemetry_params,
14314 	.telemetry_get_throughput_stats = dp_sawf_get_tx_stats,
14315 	.telemetry_get_mpdu_stats = dp_sawf_get_mpdu_sched_stats,
14316 	.telemetry_get_drop_stats = dp_sawf_get_drop_stats,
14317 	.peer_config_ul = dp_sawf_peer_config_ul,
14318 	.swaf_peer_is_sla_configured = dp_swaf_peer_is_sla_configured,
14319 #endif
14320 };
14321 #endif
14322 
14323 #if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
14324 /**
14325  * dp_flush_ring_hptp() - Update ring shadow
14326  *			  register HP/TP address when runtime
14327  *                        resume
14328  * @soc: DP soc context
14329  * @hal_srng: srng
14330  *
14331  * Return: None
14332  */
14333 static
14334 void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
14335 {
14336 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
14337 						 HAL_SRNG_FLUSH_EVENT)) {
14338 		/* Acquire the lock */
14339 		hal_srng_access_start(soc->hal_soc, hal_srng);
14340 
14341 		hal_srng_access_end(soc->hal_soc, hal_srng);
14342 
14343 		hal_srng_set_flush_last_ts(hal_srng);
14344 
14345 		dp_debug("flushed");
14346 	}
14347 }
14348 #endif
14349 
14350 #ifdef DP_TX_TRACKING
14351 
14352 #define DP_TX_COMP_MAX_LATENCY_MS 60000
14353 /**
14354  * dp_tx_comp_delay_check() - calculate time latency for tx completion per pkt
14355  * @tx_desc: tx descriptor
14356  *
14357  * Calculate time latency for tx completion per pkt and trigger self recovery
14358  * when the delay is more than threshold value.
14359  *
14360  * Return: True if delay is more than threshold
14361  */
14362 static bool dp_tx_comp_delay_check(struct dp_tx_desc_s *tx_desc)
14363 {
14364 	uint64_t time_latency, timestamp_tick = tx_desc->timestamp_tick;
14365 	qdf_ktime_t current_time = qdf_ktime_real_get();
14366 	qdf_ktime_t timestamp = tx_desc->timestamp;
14367 
14368 	if (dp_tx_pkt_tracepoints_enabled()) {
14369 		if (!timestamp)
14370 			return false;
14371 
14372 		time_latency = qdf_ktime_to_ms(current_time) -
14373 				qdf_ktime_to_ms(timestamp);
14374 		if (time_latency >= DP_TX_COMP_MAX_LATENCY_MS) {
14375 			dp_err_rl("enqueued: %llu ms, current : %llu ms",
14376 				  timestamp, current_time);
14377 			return true;
14378 		}
14379 	} else {
14380 		if (!timestamp_tick)
14381 			return false;
14382 
14383 		current_time = qdf_system_ticks();
14384 		time_latency = qdf_system_ticks_to_msecs(current_time -
14385 							 timestamp_tick);
14386 		if (time_latency >= DP_TX_COMP_MAX_LATENCY_MS) {
14387 			dp_err_rl("enqueued: %u ms, current : %u ms",
14388 				  qdf_system_ticks_to_msecs(timestamp_tick),
14389 				  qdf_system_ticks_to_msecs(current_time));
14390 			return true;
14391 		}
14392 	}
14393 
14394 	return false;
14395 }
14396 
14397 /**
14398  * dp_find_missing_tx_comp() - check for leaked descriptor in tx path
14399  * @soc: DP SOC context
14400  *
14401  * Parse through descriptors in all pools and validate magic number and
14402  * completion time. Trigger self recovery if magic value is corrupted.
14403  *
14404  * Return: None.
14405  */
14406 static void dp_find_missing_tx_comp(struct dp_soc *soc)
14407 {
14408 	uint8_t i;
14409 	uint32_t j;
14410 	uint32_t num_desc, page_id, offset;
14411 	uint16_t num_desc_per_page;
14412 	struct dp_tx_desc_s *tx_desc = NULL;
14413 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
14414 
14415 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
14416 		tx_desc_pool = &soc->tx_desc[i];
14417 		if (!(tx_desc_pool->pool_size) ||
14418 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
14419 		    !(tx_desc_pool->desc_pages.cacheable_pages))
14420 			continue;
14421 
14422 		num_desc = tx_desc_pool->pool_size;
14423 		num_desc_per_page =
14424 			tx_desc_pool->desc_pages.num_element_per_page;
14425 		for (j = 0; j < num_desc; j++) {
14426 			page_id = j / num_desc_per_page;
14427 			offset = j % num_desc_per_page;
14428 
14429 			if (qdf_unlikely(!(tx_desc_pool->
14430 					 desc_pages.cacheable_pages)))
14431 				break;
14432 
14433 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
14434 			if (tx_desc->magic == DP_TX_MAGIC_PATTERN_FREE) {
14435 				continue;
14436 			} else if (tx_desc->magic ==
14437 				   DP_TX_MAGIC_PATTERN_INUSE) {
14438 				if (dp_tx_comp_delay_check(tx_desc)) {
14439 					dp_err_rl("Tx completion not rcvd for id: %u",
14440 						  tx_desc->id);
14441 					if (tx_desc->vdev_id == DP_INVALID_VDEV_ID) {
14442 						tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
14443 						dp_err_rl("Freed tx_desc %u",
14444 							  tx_desc->id);
14445 						dp_tx_comp_free_buf(soc,
14446 								    tx_desc,
14447 								    false);
14448 						dp_tx_desc_release(tx_desc, i);
14449 						DP_STATS_INC(soc,
14450 							     tx.tx_comp_force_freed, 1);
14451 					}
14452 				}
14453 			} else {
14454 				dp_err_rl("tx desc %u corrupted, flags: 0x%x",
14455 					  tx_desc->id, tx_desc->flags);
14456 			}
14457 		}
14458 	}
14459 }
14460 #else
14461 static inline void dp_find_missing_tx_comp(struct dp_soc *soc)
14462 {
14463 }
14464 #endif
14465 
14466 #ifdef FEATURE_RUNTIME_PM
14467 /**
14468  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
14469  * @soc_hdl: Datapath soc handle
14470  * @pdev_id: id of data path pdev handle
14471  *
14472  * DP is ready to runtime suspend if there are no pending TX packets.
14473  *
14474  * Return: QDF_STATUS
14475  */
14476 static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
14477 {
14478 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14479 	struct dp_pdev *pdev;
14480 	uint8_t i;
14481 	int32_t tx_pending;
14482 
14483 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14484 	if (!pdev) {
14485 		dp_err("pdev is NULL");
14486 		return QDF_STATUS_E_INVAL;
14487 	}
14488 
14489 	/* Abort if there are any pending TX packets */
14490 	tx_pending = dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev));
14491 	if (tx_pending) {
14492 		dp_info_rl("%pK: Abort suspend due to pending TX packets %d",
14493 			   soc, tx_pending);
14494 		dp_find_missing_tx_comp(soc);
14495 		/* perform a force flush if tx is pending */
14496 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
14497 			hal_srng_set_event(soc->tcl_data_ring[i].hal_srng,
14498 					   HAL_SRNG_FLUSH_EVENT);
14499 			dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
14500 		}
14501 		qdf_atomic_set(&soc->tx_pending_rtpm, 0);
14502 
14503 		return QDF_STATUS_E_AGAIN;
14504 	}
14505 
14506 	if (dp_runtime_get_refcount(soc)) {
14507 		dp_init_info("refcount: %d", dp_runtime_get_refcount(soc));
14508 
14509 		return QDF_STATUS_E_AGAIN;
14510 	}
14511 
14512 	if (soc->intr_mode == DP_INTR_POLL)
14513 		qdf_timer_stop(&soc->int_timer);
14514 
14515 	dp_rx_fst_update_pm_suspend_status(soc, true);
14516 
14517 	return QDF_STATUS_SUCCESS;
14518 }
14519 
14520 #define DP_FLUSH_WAIT_CNT 10
14521 #define DP_RUNTIME_SUSPEND_WAIT_MS 10
14522 /**
14523  * dp_runtime_resume() - ensure DP is ready to runtime resume
14524  * @soc_hdl: Datapath soc handle
14525  * @pdev_id: id of data path pdev handle
14526  *
14527  * Resume DP for runtime PM.
14528  *
14529  * Return: QDF_STATUS
14530  */
14531 static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
14532 {
14533 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14534 	int i, suspend_wait = 0;
14535 
14536 	if (soc->intr_mode == DP_INTR_POLL)
14537 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
14538 
14539 	/*
14540 	 * Wait until dp runtime refcount becomes zero or time out, then flush
14541 	 * pending tx for runtime suspend.
14542 	 */
14543 	while (dp_runtime_get_refcount(soc) &&
14544 	       suspend_wait < DP_FLUSH_WAIT_CNT) {
14545 		qdf_sleep(DP_RUNTIME_SUSPEND_WAIT_MS);
14546 		suspend_wait++;
14547 	}
14548 
14549 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
14550 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
14551 	}
14552 	qdf_atomic_set(&soc->tx_pending_rtpm, 0);
14553 
14554 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
14555 	dp_rx_fst_update_pm_suspend_status(soc, false);
14556 
14557 	return QDF_STATUS_SUCCESS;
14558 }
14559 #endif /* FEATURE_RUNTIME_PM */
14560 
14561 /**
14562  * dp_tx_get_success_ack_stats() - get tx success completion count
14563  * @soc_hdl: Datapath soc handle
14564  * @vdev_id: vdev identifier
14565  *
14566  * Return: tx success ack count
14567  */
14568 static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl,
14569 					    uint8_t vdev_id)
14570 {
14571 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14572 	struct cdp_vdev_stats *vdev_stats = NULL;
14573 	uint32_t tx_success;
14574 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
14575 						     DP_MOD_ID_CDP);
14576 
14577 	if (!vdev) {
14578 		dp_cdp_err("%pK: Invalid vdev id %d", soc, vdev_id);
14579 		return 0;
14580 	}
14581 
14582 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
14583 	if (!vdev_stats) {
14584 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats", soc);
14585 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
14586 		return 0;
14587 	}
14588 
14589 	dp_aggregate_vdev_stats(vdev, vdev_stats);
14590 
14591 	tx_success = vdev_stats->tx.tx_success.num;
14592 	qdf_mem_free(vdev_stats);
14593 
14594 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
14595 	return tx_success;
14596 }
14597 
14598 #ifdef WLAN_SUPPORT_DATA_STALL
14599 /**
14600  * dp_register_data_stall_detect_cb() - register data stall callback
14601  * @soc_hdl: Datapath soc handle
14602  * @pdev_id: id of data path pdev handle
14603  * @data_stall_detect_callback: data stall callback function
14604  *
14605  * Return: QDF_STATUS Enumeration
14606  */
14607 static
14608 QDF_STATUS dp_register_data_stall_detect_cb(
14609 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14610 			data_stall_detect_cb data_stall_detect_callback)
14611 {
14612 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14613 	struct dp_pdev *pdev;
14614 
14615 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14616 	if (!pdev) {
14617 		dp_err("pdev NULL!");
14618 		return QDF_STATUS_E_INVAL;
14619 	}
14620 
14621 	pdev->data_stall_detect_callback = data_stall_detect_callback;
14622 	return QDF_STATUS_SUCCESS;
14623 }
14624 
14625 /**
14626  * dp_deregister_data_stall_detect_cb() - de-register data stall callback
14627  * @soc_hdl: Datapath soc handle
14628  * @pdev_id: id of data path pdev handle
14629  * @data_stall_detect_callback: data stall callback function
14630  *
14631  * Return: QDF_STATUS Enumeration
14632  */
14633 static
14634 QDF_STATUS dp_deregister_data_stall_detect_cb(
14635 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14636 			data_stall_detect_cb data_stall_detect_callback)
14637 {
14638 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14639 	struct dp_pdev *pdev;
14640 
14641 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14642 	if (!pdev) {
14643 		dp_err("pdev NULL!");
14644 		return QDF_STATUS_E_INVAL;
14645 	}
14646 
14647 	pdev->data_stall_detect_callback = NULL;
14648 	return QDF_STATUS_SUCCESS;
14649 }
14650 
14651 /**
14652  * dp_txrx_post_data_stall_event() - post data stall event
14653  * @soc_hdl: Datapath soc handle
14654  * @indicator: Module triggering data stall
14655  * @data_stall_type: data stall event type
14656  * @pdev_id: pdev id
14657  * @vdev_id_bitmap: vdev id bitmap
14658  * @recovery_type: data stall recovery type
14659  *
14660  * Return: None
14661  */
14662 static void
14663 dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl,
14664 			      enum data_stall_log_event_indicator indicator,
14665 			      enum data_stall_log_event_type data_stall_type,
14666 			      uint32_t pdev_id, uint32_t vdev_id_bitmap,
14667 			      enum data_stall_log_recovery_type recovery_type)
14668 {
14669 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14670 	struct data_stall_event_info data_stall_info;
14671 	struct dp_pdev *pdev;
14672 
14673 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14674 	if (!pdev) {
14675 		dp_err("pdev NULL!");
14676 		return;
14677 	}
14678 
14679 	if (!pdev->data_stall_detect_callback) {
14680 		dp_err("data stall cb not registered!");
14681 		return;
14682 	}
14683 
14684 	dp_info("data_stall_type: %x pdev_id: %d",
14685 		data_stall_type, pdev_id);
14686 
14687 	data_stall_info.indicator = indicator;
14688 	data_stall_info.data_stall_type = data_stall_type;
14689 	data_stall_info.vdev_id_bitmap = vdev_id_bitmap;
14690 	data_stall_info.pdev_id = pdev_id;
14691 	data_stall_info.recovery_type = recovery_type;
14692 
14693 	pdev->data_stall_detect_callback(&data_stall_info);
14694 }
14695 #endif /* WLAN_SUPPORT_DATA_STALL */
14696 
14697 #ifdef WLAN_FEATURE_STATS_EXT
14698 /* rx hw stats event wait timeout in ms */
14699 #define DP_REO_STATUS_STATS_TIMEOUT 850
14700 /**
14701  * dp_txrx_ext_stats_request() - request dp txrx extended stats request
14702  * @soc_hdl: soc handle
14703  * @pdev_id: pdev id
14704  * @req: stats request
14705  *
14706  * Return: QDF_STATUS
14707  */
14708 static QDF_STATUS
14709 dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14710 			  struct cdp_txrx_ext_stats *req)
14711 {
14712 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
14713 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14714 	int i = 0;
14715 	int tcl_ring_full = 0;
14716 
14717 	if (!pdev) {
14718 		dp_err("pdev is null");
14719 		return QDF_STATUS_E_INVAL;
14720 	}
14721 
14722 	dp_aggregate_pdev_stats(pdev);
14723 
14724 	for(i = 0 ; i < MAX_TCL_DATA_RINGS; i++)
14725 		tcl_ring_full += soc->stats.tx.tcl_ring_full[i];
14726 
14727 	req->tx_msdu_enqueue = pdev->stats.tx_i.processed.num;
14728 	req->tx_msdu_overflow = tcl_ring_full;
14729 	/* Error rate at LMAC */
14730 	req->rx_mpdu_received = soc->ext_stats.rx_mpdu_received +
14731 				pdev->stats.err.fw_reported_rxdma_error;
14732 	/* only count error source from RXDMA */
14733 	req->rx_mpdu_error = pdev->stats.err.fw_reported_rxdma_error;
14734 
14735 	/* Error rate at above the MAC */
14736 	req->rx_mpdu_delivered = soc->ext_stats.rx_mpdu_received;
14737 	req->rx_mpdu_missed = pdev->stats.err.reo_error;
14738 
14739 	dp_info("ext stats: tx_msdu_enq = %u, tx_msdu_overflow = %u, "
14740 		"rx_mpdu_receive = %u, rx_mpdu_delivered = %u, "
14741 		"rx_mpdu_missed = %u, rx_mpdu_error = %u",
14742 		req->tx_msdu_enqueue,
14743 		req->tx_msdu_overflow,
14744 		req->rx_mpdu_received,
14745 		req->rx_mpdu_delivered,
14746 		req->rx_mpdu_missed,
14747 		req->rx_mpdu_error);
14748 
14749 	return QDF_STATUS_SUCCESS;
14750 }
14751 
14752 /**
14753  * dp_rx_hw_stats_cb() - request rx hw stats response callback
14754  * @soc: soc handle
14755  * @cb_ctxt: callback context
14756  * @reo_status: reo command response status
14757  *
14758  * Return: None
14759  */
14760 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
14761 			      union hal_reo_status *reo_status)
14762 {
14763 	struct dp_req_rx_hw_stats_t *rx_hw_stats = cb_ctxt;
14764 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
14765 	bool is_query_timeout;
14766 
14767 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
14768 	is_query_timeout = rx_hw_stats->is_query_timeout;
14769 	/* free the cb_ctxt if all pending tid stats query is received */
14770 	if (qdf_atomic_dec_and_test(&rx_hw_stats->pending_tid_stats_cnt)) {
14771 		if (!is_query_timeout) {
14772 			qdf_event_set(&soc->rx_hw_stats_event);
14773 			soc->is_last_stats_ctx_init = false;
14774 		}
14775 
14776 		qdf_mem_free(rx_hw_stats);
14777 	}
14778 
14779 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
14780 		dp_info("REO stats failure %d",
14781 			queue_status->header.status);
14782 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14783 		return;
14784 	}
14785 
14786 	if (!is_query_timeout) {
14787 		soc->ext_stats.rx_mpdu_received +=
14788 					queue_status->mpdu_frms_cnt;
14789 		soc->ext_stats.rx_mpdu_missed +=
14790 					queue_status->hole_cnt;
14791 	}
14792 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14793 }
14794 
14795 /**
14796  * dp_request_rx_hw_stats() - request rx hardware stats
14797  * @soc_hdl: soc handle
14798  * @vdev_id: vdev id
14799  *
14800  * Return: None
14801  */
14802 static QDF_STATUS
14803 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
14804 {
14805 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
14806 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
14807 						     DP_MOD_ID_CDP);
14808 	struct dp_peer *peer = NULL;
14809 	QDF_STATUS status;
14810 	struct dp_req_rx_hw_stats_t *rx_hw_stats;
14811 	int rx_stats_sent_cnt = 0;
14812 	uint32_t last_rx_mpdu_received;
14813 	uint32_t last_rx_mpdu_missed;
14814 
14815 	if (!vdev) {
14816 		dp_err("vdev is null for vdev_id: %u", vdev_id);
14817 		status = QDF_STATUS_E_INVAL;
14818 		goto out;
14819 	}
14820 
14821 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_CDP);
14822 
14823 	if (!peer) {
14824 		dp_err("Peer is NULL");
14825 		status = QDF_STATUS_E_INVAL;
14826 		goto out;
14827 	}
14828 
14829 	rx_hw_stats = qdf_mem_malloc(sizeof(*rx_hw_stats));
14830 
14831 	if (!rx_hw_stats) {
14832 		dp_err("malloc failed for hw stats structure");
14833 		status = QDF_STATUS_E_INVAL;
14834 		goto out;
14835 	}
14836 
14837 	qdf_event_reset(&soc->rx_hw_stats_event);
14838 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
14839 	/* save the last soc cumulative stats and reset it to 0 */
14840 	last_rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
14841 	last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
14842 	soc->ext_stats.rx_mpdu_received = 0;
14843 	soc->ext_stats.rx_mpdu_missed = 0;
14844 
14845 	dp_debug("HW stats query start");
14846 	rx_stats_sent_cnt =
14847 		dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
14848 	if (!rx_stats_sent_cnt) {
14849 		dp_err("no tid stats sent successfully");
14850 		qdf_mem_free(rx_hw_stats);
14851 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14852 		status = QDF_STATUS_E_INVAL;
14853 		goto out;
14854 	}
14855 	qdf_atomic_set(&rx_hw_stats->pending_tid_stats_cnt,
14856 		       rx_stats_sent_cnt);
14857 	rx_hw_stats->is_query_timeout = false;
14858 	soc->is_last_stats_ctx_init = true;
14859 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14860 
14861 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
14862 				       DP_REO_STATUS_STATS_TIMEOUT);
14863 	dp_debug("HW stats query end with %d", rx_stats_sent_cnt);
14864 
14865 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
14866 	if (status != QDF_STATUS_SUCCESS) {
14867 		dp_info("partial rx hw stats event collected with %d",
14868 			qdf_atomic_read(
14869 				&rx_hw_stats->pending_tid_stats_cnt));
14870 		if (soc->is_last_stats_ctx_init)
14871 			rx_hw_stats->is_query_timeout = true;
14872 		/*
14873 		 * If query timeout happened, use the last saved stats
14874 		 * for this time query.
14875 		 */
14876 		soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
14877 		soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
14878 		DP_STATS_INC(soc, rx.rx_hw_stats_timeout, 1);
14879 
14880 	}
14881 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14882 
14883 out:
14884 	if (peer)
14885 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
14886 	if (vdev)
14887 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
14888 	DP_STATS_INC(soc, rx.rx_hw_stats_requested, 1);
14889 
14890 	return status;
14891 }
14892 
14893 /**
14894  * dp_reset_rx_hw_ext_stats() - Reset rx hardware ext stats
14895  * @soc_hdl: soc handle
14896  *
14897  * Return: None
14898  */
14899 static
14900 void dp_reset_rx_hw_ext_stats(struct cdp_soc_t *soc_hdl)
14901 {
14902 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
14903 
14904 	soc->ext_stats.rx_mpdu_received = 0;
14905 	soc->ext_stats.rx_mpdu_missed = 0;
14906 }
14907 #endif /* WLAN_FEATURE_STATS_EXT */
14908 
14909 static
14910 uint32_t dp_get_tx_rings_grp_bitmap(struct cdp_soc_t *soc_hdl)
14911 {
14912 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
14913 
14914 	return soc->wlan_cfg_ctx->tx_rings_grp_bitmap;
14915 }
14916 
14917 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
14918 /**
14919  * dp_mark_first_wakeup_packet() - set flag to indicate that
14920  *    fw is compatible for marking first packet after wow wakeup
14921  * @soc_hdl: Datapath soc handle
14922  * @pdev_id: id of data path pdev handle
14923  * @value: 1 for enabled/ 0 for disabled
14924  *
14925  * Return: None
14926  */
14927 static void dp_mark_first_wakeup_packet(struct cdp_soc_t *soc_hdl,
14928 					uint8_t pdev_id, uint8_t value)
14929 {
14930 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14931 	struct dp_pdev *pdev;
14932 
14933 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14934 	if (!pdev) {
14935 		dp_err("pdev is NULL");
14936 		return;
14937 	}
14938 
14939 	pdev->is_first_wakeup_packet = value;
14940 }
14941 #endif
14942 
14943 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
14944 /**
14945  * dp_set_peer_txq_flush_config() - Set the peer txq flush configuration
14946  * @soc_hdl: Opaque handle to the DP soc object
14947  * @vdev_id: VDEV identifier
14948  * @mac: MAC address of the peer
14949  * @ac: access category mask
14950  * @tid: TID mask
14951  * @policy: Flush policy
14952  *
14953  * Return: 0 on success, errno on failure
14954  */
14955 static int dp_set_peer_txq_flush_config(struct cdp_soc_t *soc_hdl,
14956 					uint8_t vdev_id, uint8_t *mac,
14957 					uint8_t ac, uint32_t tid,
14958 					enum cdp_peer_txq_flush_policy policy)
14959 {
14960 	struct dp_soc *soc;
14961 
14962 	if (!soc_hdl) {
14963 		dp_err("soc is null");
14964 		return -EINVAL;
14965 	}
14966 	soc = cdp_soc_t_to_dp_soc(soc_hdl);
14967 	return target_if_peer_txq_flush_config(soc->ctrl_psoc, vdev_id,
14968 					       mac, ac, tid, policy);
14969 }
14970 #endif
14971 
14972 #ifdef CONNECTIVITY_PKTLOG
14973 /**
14974  * dp_register_packetdump_callback() - registers
14975  *  tx data packet, tx mgmt. packet and rx data packet
14976  *  dump callback handler.
14977  *
14978  * @soc_hdl: Datapath soc handle
14979  * @pdev_id: id of data path pdev handle
14980  * @dp_tx_packetdump_cb: tx packetdump cb
14981  * @dp_rx_packetdump_cb: rx packetdump cb
14982  *
14983  * This function is used to register tx data pkt, tx mgmt.
14984  * pkt and rx data pkt dump callback
14985  *
14986  * Return: None
14987  *
14988  */
14989 static inline
14990 void dp_register_packetdump_callback(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14991 				     ol_txrx_pktdump_cb dp_tx_packetdump_cb,
14992 				     ol_txrx_pktdump_cb dp_rx_packetdump_cb)
14993 {
14994 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14995 	struct dp_pdev *pdev;
14996 
14997 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14998 	if (!pdev) {
14999 		dp_err("pdev is NULL!");
15000 		return;
15001 	}
15002 
15003 	pdev->dp_tx_packetdump_cb = dp_tx_packetdump_cb;
15004 	pdev->dp_rx_packetdump_cb = dp_rx_packetdump_cb;
15005 }
15006 
15007 /**
15008  * dp_deregister_packetdump_callback() - deregidters
15009  *  tx data packet, tx mgmt. packet and rx data packet
15010  *  dump callback handler
15011  * @soc_hdl: Datapath soc handle
15012  * @pdev_id: id of data path pdev handle
15013  *
15014  * This function is used to deregidter tx data pkt.,
15015  * tx mgmt. pkt and rx data pkt. dump callback
15016  *
15017  * Return: None
15018  *
15019  */
15020 static inline
15021 void dp_deregister_packetdump_callback(struct cdp_soc_t *soc_hdl,
15022 				       uint8_t pdev_id)
15023 {
15024 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15025 	struct dp_pdev *pdev;
15026 
15027 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15028 	if (!pdev) {
15029 		dp_err("pdev is NULL!");
15030 		return;
15031 	}
15032 
15033 	pdev->dp_tx_packetdump_cb = NULL;
15034 	pdev->dp_rx_packetdump_cb = NULL;
15035 }
15036 #endif
15037 
15038 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
15039 /**
15040  * dp_set_bus_vote_lvl_high() - Take a vote on bus bandwidth from dp
15041  * @soc_hdl: Datapath soc handle
15042  * @high: whether the bus bw is high or not
15043  *
15044  * Return: void
15045  */
15046 static void
15047 dp_set_bus_vote_lvl_high(ol_txrx_soc_handle soc_hdl, bool high)
15048 {
15049 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15050 
15051 	soc->high_throughput = high;
15052 }
15053 
15054 /**
15055  * dp_get_bus_vote_lvl_high() - get bus bandwidth vote to dp
15056  * @soc_hdl: Datapath soc handle
15057  *
15058  * Return: bool
15059  */
15060 static bool
15061 dp_get_bus_vote_lvl_high(ol_txrx_soc_handle soc_hdl)
15062 {
15063 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15064 
15065 	return soc->high_throughput;
15066 }
15067 #endif
15068 
15069 #ifdef DP_PEER_EXTENDED_API
15070 static struct cdp_misc_ops dp_ops_misc = {
15071 #ifdef FEATURE_WLAN_TDLS
15072 	.tx_non_std = dp_tx_non_std,
15073 #endif /* FEATURE_WLAN_TDLS */
15074 	.get_opmode = dp_get_opmode,
15075 #ifdef FEATURE_RUNTIME_PM
15076 	.runtime_suspend = dp_runtime_suspend,
15077 	.runtime_resume = dp_runtime_resume,
15078 #endif /* FEATURE_RUNTIME_PM */
15079 	.get_num_rx_contexts = dp_get_num_rx_contexts,
15080 	.get_tx_ack_stats = dp_tx_get_success_ack_stats,
15081 #ifdef WLAN_SUPPORT_DATA_STALL
15082 	.txrx_data_stall_cb_register = dp_register_data_stall_detect_cb,
15083 	.txrx_data_stall_cb_deregister = dp_deregister_data_stall_detect_cb,
15084 	.txrx_post_data_stall_event = dp_txrx_post_data_stall_event,
15085 #endif
15086 
15087 #ifdef WLAN_FEATURE_STATS_EXT
15088 	.txrx_ext_stats_request = dp_txrx_ext_stats_request,
15089 	.request_rx_hw_stats = dp_request_rx_hw_stats,
15090 	.reset_rx_hw_ext_stats = dp_reset_rx_hw_ext_stats,
15091 #endif /* WLAN_FEATURE_STATS_EXT */
15092 	.vdev_inform_ll_conn = dp_vdev_inform_ll_conn,
15093 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
15094 	.set_swlm_enable = dp_soc_set_swlm_enable,
15095 	.is_swlm_enabled = dp_soc_is_swlm_enabled,
15096 #endif
15097 	.display_txrx_hw_info = dp_display_srng_info,
15098 	.get_tx_rings_grp_bitmap = dp_get_tx_rings_grp_bitmap,
15099 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
15100 	.mark_first_wakeup_packet = dp_mark_first_wakeup_packet,
15101 #endif
15102 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
15103 	.set_peer_txq_flush_config = dp_set_peer_txq_flush_config,
15104 #endif
15105 #ifdef CONNECTIVITY_PKTLOG
15106 	.register_pktdump_cb = dp_register_packetdump_callback,
15107 	.unregister_pktdump_cb = dp_deregister_packetdump_callback,
15108 #endif
15109 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
15110 	.set_bus_vote_lvl_high = dp_set_bus_vote_lvl_high,
15111 	.get_bus_vote_lvl_high = dp_get_bus_vote_lvl_high,
15112 #endif
15113 };
15114 #endif
15115 
15116 #ifdef DP_FLOW_CTL
15117 static struct cdp_flowctl_ops dp_ops_flowctl = {
15118 	/* WIFI 3.0 DP implement as required. */
15119 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
15120 	.flow_pool_map_handler = dp_tx_flow_pool_map,
15121 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
15122 	.register_pause_cb = dp_txrx_register_pause_cb,
15123 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
15124 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
15125 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
15126 };
15127 
15128 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
15129 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
15130 };
15131 #endif
15132 
15133 #ifdef IPA_OFFLOAD
15134 static struct cdp_ipa_ops dp_ops_ipa = {
15135 	.ipa_get_resource = dp_ipa_get_resource,
15136 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
15137 	.ipa_iounmap_doorbell_vaddr = dp_ipa_iounmap_doorbell_vaddr,
15138 	.ipa_op_response = dp_ipa_op_response,
15139 	.ipa_register_op_cb = dp_ipa_register_op_cb,
15140 	.ipa_deregister_op_cb = dp_ipa_deregister_op_cb,
15141 	.ipa_get_stat = dp_ipa_get_stat,
15142 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
15143 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
15144 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
15145 	.ipa_setup = dp_ipa_setup,
15146 	.ipa_cleanup = dp_ipa_cleanup,
15147 	.ipa_setup_iface = dp_ipa_setup_iface,
15148 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
15149 	.ipa_enable_pipes = dp_ipa_enable_pipes,
15150 	.ipa_disable_pipes = dp_ipa_disable_pipes,
15151 	.ipa_set_perf_level = dp_ipa_set_perf_level,
15152 	.ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd,
15153 	.ipa_tx_buf_smmu_mapping = dp_ipa_tx_buf_smmu_mapping,
15154 	.ipa_tx_buf_smmu_unmapping = dp_ipa_tx_buf_smmu_unmapping,
15155 #ifdef QCA_ENHANCED_STATS_SUPPORT
15156 	.ipa_update_peer_rx_stats = dp_ipa_update_peer_rx_stats,
15157 #endif
15158 #ifdef IPA_WDS_EASYMESH_FEATURE
15159 	.ipa_ast_create = dp_ipa_ast_create,
15160 #endif
15161 };
15162 #endif
15163 
15164 #ifdef DP_POWER_SAVE
15165 static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
15166 {
15167 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15168 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15169 	int timeout = SUSPEND_DRAIN_WAIT;
15170 	int drain_wait_delay = 50; /* 50 ms */
15171 	int32_t tx_pending;
15172 
15173 	if (qdf_unlikely(!pdev)) {
15174 		dp_err("pdev is NULL");
15175 		return QDF_STATUS_E_INVAL;
15176 	}
15177 
15178 	/* Abort if there are any pending TX packets */
15179 	while ((tx_pending = dp_get_tx_pending((struct cdp_pdev *)pdev))) {
15180 		qdf_sleep(drain_wait_delay);
15181 		if (timeout <= 0) {
15182 			dp_info("TX frames are pending %d, abort suspend",
15183 				tx_pending);
15184 			dp_find_missing_tx_comp(soc);
15185 			return QDF_STATUS_E_TIMEOUT;
15186 		}
15187 		timeout = timeout - drain_wait_delay;
15188 	}
15189 
15190 	if (soc->intr_mode == DP_INTR_POLL)
15191 		qdf_timer_stop(&soc->int_timer);
15192 
15193 	/* Stop monitor reap timer and reap any pending frames in ring */
15194 	dp_monitor_reap_timer_suspend(soc);
15195 
15196 	dp_suspend_fse_cache_flush(soc);
15197 	dp_rx_fst_update_pm_suspend_status(soc, true);
15198 
15199 	return QDF_STATUS_SUCCESS;
15200 }
15201 
15202 static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
15203 {
15204 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15205 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15206 	uint8_t i;
15207 
15208 	if (qdf_unlikely(!pdev)) {
15209 		dp_err("pdev is NULL");
15210 		return QDF_STATUS_E_INVAL;
15211 	}
15212 
15213 	if (soc->intr_mode == DP_INTR_POLL)
15214 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
15215 
15216 	/* Start monitor reap timer */
15217 	dp_monitor_reap_timer_start(soc, CDP_MON_REAP_SOURCE_ANY);
15218 
15219 	dp_resume_fse_cache_flush(soc);
15220 
15221 	for (i = 0; i < soc->num_tcl_data_rings; i++)
15222 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
15223 
15224 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
15225 	dp_rx_fst_update_pm_suspend_status(soc, false);
15226 
15227 	dp_rx_fst_requeue_wq(soc);
15228 
15229 	return QDF_STATUS_SUCCESS;
15230 }
15231 
15232 /**
15233  * dp_process_wow_ack_rsp() - process wow ack response
15234  * @soc_hdl: datapath soc handle
15235  * @pdev_id: data path pdev handle id
15236  *
15237  * Return: none
15238  */
15239 static void dp_process_wow_ack_rsp(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
15240 {
15241 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15242 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15243 
15244 	if (qdf_unlikely(!pdev)) {
15245 		dp_err("pdev is NULL");
15246 		return;
15247 	}
15248 
15249 	/*
15250 	 * As part of wow enable FW disables the mon status ring and in wow ack
15251 	 * response from FW reap mon status ring to make sure no packets pending
15252 	 * in the ring.
15253 	 */
15254 	dp_monitor_reap_timer_suspend(soc);
15255 }
15256 
15257 /**
15258  * dp_process_target_suspend_req() - process target suspend request
15259  * @soc_hdl: datapath soc handle
15260  * @pdev_id: data path pdev handle id
15261  *
15262  * Return: none
15263  */
15264 static void dp_process_target_suspend_req(struct cdp_soc_t *soc_hdl,
15265 					  uint8_t pdev_id)
15266 {
15267 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15268 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15269 
15270 	if (qdf_unlikely(!pdev)) {
15271 		dp_err("pdev is NULL");
15272 		return;
15273 	}
15274 
15275 	/* Stop monitor reap timer and reap any pending frames in ring */
15276 	dp_monitor_reap_timer_suspend(soc);
15277 }
15278 
15279 static struct cdp_bus_ops dp_ops_bus = {
15280 	.bus_suspend = dp_bus_suspend,
15281 	.bus_resume = dp_bus_resume,
15282 	.process_wow_ack_rsp = dp_process_wow_ack_rsp,
15283 	.process_target_suspend_req = dp_process_target_suspend_req
15284 };
15285 #endif
15286 
15287 #ifdef DP_FLOW_CTL
15288 static struct cdp_throttle_ops dp_ops_throttle = {
15289 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
15290 };
15291 
15292 static struct cdp_cfg_ops dp_ops_cfg = {
15293 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
15294 };
15295 #endif
15296 
15297 #ifdef DP_PEER_EXTENDED_API
15298 static struct cdp_ocb_ops dp_ops_ocb = {
15299 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
15300 };
15301 
15302 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
15303 	.clear_stats = dp_txrx_clear_dump_stats,
15304 };
15305 
15306 static struct cdp_peer_ops dp_ops_peer = {
15307 	.register_peer = dp_register_peer,
15308 	.clear_peer = dp_clear_peer,
15309 	.find_peer_exist = dp_find_peer_exist,
15310 	.find_peer_exist_on_vdev = dp_find_peer_exist_on_vdev,
15311 	.find_peer_exist_on_other_vdev = dp_find_peer_exist_on_other_vdev,
15312 	.peer_state_update = dp_peer_state_update,
15313 	.get_vdevid = dp_get_vdevid,
15314 	.get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr,
15315 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
15316 	.get_peer_state = dp_get_peer_state,
15317 	.peer_flush_frags = dp_peer_flush_frags,
15318 	.set_peer_as_tdls_peer = dp_set_peer_as_tdls_peer,
15319 };
15320 #endif
15321 
15322 static void dp_soc_txrx_ops_attach(struct dp_soc *soc)
15323 {
15324 	soc->cdp_soc.ops->cmn_drv_ops = &dp_ops_cmn;
15325 	soc->cdp_soc.ops->ctrl_ops = &dp_ops_ctrl;
15326 	soc->cdp_soc.ops->me_ops = &dp_ops_me;
15327 	soc->cdp_soc.ops->host_stats_ops = &dp_ops_host_stats;
15328 	soc->cdp_soc.ops->wds_ops = &dp_ops_wds;
15329 	soc->cdp_soc.ops->raw_ops = &dp_ops_raw;
15330 #ifdef PEER_FLOW_CONTROL
15331 	soc->cdp_soc.ops->pflow_ops = &dp_ops_pflow;
15332 #endif /* PEER_FLOW_CONTROL */
15333 #ifdef DP_PEER_EXTENDED_API
15334 	soc->cdp_soc.ops->misc_ops = &dp_ops_misc;
15335 	soc->cdp_soc.ops->ocb_ops = &dp_ops_ocb;
15336 	soc->cdp_soc.ops->peer_ops = &dp_ops_peer;
15337 	soc->cdp_soc.ops->mob_stats_ops = &dp_ops_mob_stats;
15338 #endif
15339 #ifdef DP_FLOW_CTL
15340 	soc->cdp_soc.ops->cfg_ops = &dp_ops_cfg;
15341 	soc->cdp_soc.ops->flowctl_ops = &dp_ops_flowctl;
15342 	soc->cdp_soc.ops->l_flowctl_ops = &dp_ops_l_flowctl;
15343 	soc->cdp_soc.ops->throttle_ops = &dp_ops_throttle;
15344 #endif
15345 #ifdef IPA_OFFLOAD
15346 	soc->cdp_soc.ops->ipa_ops = &dp_ops_ipa;
15347 #endif
15348 #ifdef DP_POWER_SAVE
15349 	soc->cdp_soc.ops->bus_ops = &dp_ops_bus;
15350 #endif
15351 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
15352 	soc->cdp_soc.ops->cfr_ops = &dp_ops_cfr;
15353 #endif
15354 #ifdef WLAN_SUPPORT_MSCS
15355 	soc->cdp_soc.ops->mscs_ops = &dp_ops_mscs;
15356 #endif
15357 #ifdef WLAN_SUPPORT_MESH_LATENCY
15358 	soc->cdp_soc.ops->mesh_latency_ops = &dp_ops_mesh_latency;
15359 #endif
15360 #ifdef CONFIG_SAWF_DEF_QUEUES
15361 	soc->cdp_soc.ops->sawf_ops = &dp_ops_sawf;
15362 #endif
15363 #ifdef WLAN_SUPPORT_SCS
15364 	soc->cdp_soc.ops->scs_ops = &dp_ops_scs;
15365 #endif
15366 };
15367 
15368 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
15369 {
15370 	uint32_t i;
15371 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
15372 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
15373 	}
15374 }
15375 
15376 qdf_export_symbol(dp_soc_set_txrx_ring_map);
15377 
15378 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
15379 	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574) || \
15380 	defined(QCA_WIFI_QCA5332)
15381 /**
15382  * dp_soc_attach_wifi3() - Attach txrx SOC
15383  * @ctrl_psoc: Opaque SOC handle from control plane
15384  * @params: SOC attach params
15385  *
15386  * Return: DP SOC handle on success, NULL on failure
15387  */
15388 struct cdp_soc_t *
15389 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
15390 		    struct cdp_soc_attach_params *params)
15391 {
15392 	struct dp_soc *dp_soc = NULL;
15393 
15394 	dp_soc = dp_soc_attach(ctrl_psoc, params);
15395 
15396 	return dp_soc_to_cdp_soc_t(dp_soc);
15397 }
15398 
15399 static inline void dp_soc_set_def_pdev(struct dp_soc *soc)
15400 {
15401 	int lmac_id;
15402 
15403 	for (lmac_id = 0; lmac_id < MAX_NUM_LMAC_HW; lmac_id++) {
15404 		/*Set default host PDEV ID for lmac_id*/
15405 		wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
15406 				      INVALID_PDEV_ID, lmac_id);
15407 	}
15408 }
15409 
15410 static uint32_t
15411 dp_get_link_desc_id_start(uint16_t arch_id)
15412 {
15413 	switch (arch_id) {
15414 	case CDP_ARCH_TYPE_LI:
15415 		return LINK_DESC_ID_START_21_BITS_COOKIE;
15416 	case CDP_ARCH_TYPE_BE:
15417 		return LINK_DESC_ID_START_20_BITS_COOKIE;
15418 	default:
15419 		dp_err("unknown arch_id 0x%x", arch_id);
15420 		QDF_BUG(0);
15421 		return LINK_DESC_ID_START_21_BITS_COOKIE;
15422 	}
15423 }
15424 
15425 /**
15426  * dp_soc_attach() - Attach txrx SOC
15427  * @ctrl_psoc: Opaque SOC handle from control plane
15428  * @params: SOC attach params
15429  *
15430  * Return: DP SOC handle on success, NULL on failure
15431  */
15432 static struct dp_soc *
15433 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
15434 	      struct cdp_soc_attach_params *params)
15435 {
15436 	struct dp_soc *soc =  NULL;
15437 	uint16_t arch_id;
15438 	struct hif_opaque_softc *hif_handle = params->hif_handle;
15439 	qdf_device_t qdf_osdev = params->qdf_osdev;
15440 	struct ol_if_ops *ol_ops = params->ol_ops;
15441 	uint16_t device_id = params->device_id;
15442 
15443 	if (!hif_handle) {
15444 		dp_err("HIF handle is NULL");
15445 		goto fail0;
15446 	}
15447 	arch_id = cdp_get_arch_type_from_devid(device_id);
15448 	soc = qdf_mem_malloc(dp_get_soc_context_size(device_id));
15449 	if (!soc) {
15450 		dp_err("DP SOC memory allocation failed");
15451 		goto fail0;
15452 	}
15453 
15454 	dp_info("soc memory allocated %pK", soc);
15455 	soc->hif_handle = hif_handle;
15456 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
15457 	if (!soc->hal_soc)
15458 		goto fail1;
15459 
15460 	hif_get_cmem_info(soc->hif_handle,
15461 			  &soc->cmem_base,
15462 			  &soc->cmem_total_size);
15463 	soc->cmem_avail_size = soc->cmem_total_size;
15464 	soc->device_id = device_id;
15465 	soc->cdp_soc.ops =
15466 		(struct cdp_ops *)qdf_mem_malloc(sizeof(struct cdp_ops));
15467 	if (!soc->cdp_soc.ops)
15468 		goto fail1;
15469 
15470 	dp_soc_txrx_ops_attach(soc);
15471 	soc->cdp_soc.ol_ops = ol_ops;
15472 	soc->ctrl_psoc = ctrl_psoc;
15473 	soc->osdev = qdf_osdev;
15474 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
15475 	hal_rx_get_tlv_size(soc->hal_soc, &soc->rx_pkt_tlv_size,
15476 			    &soc->rx_mon_pkt_tlv_size);
15477 	soc->idle_link_bm_id = hal_get_idle_link_bm_id(soc->hal_soc,
15478 						       params->mlo_chip_id);
15479 	soc->features.dmac_cmn_src_rxbuf_ring_enabled =
15480 		hal_dmac_cmn_src_rxbuf_ring_get(soc->hal_soc);
15481 	soc->arch_id = arch_id;
15482 	soc->link_desc_id_start =
15483 			dp_get_link_desc_id_start(soc->arch_id);
15484 	dp_configure_arch_ops(soc);
15485 
15486 	/* Reset wbm sg list and flags */
15487 	dp_rx_wbm_sg_list_reset(soc);
15488 
15489 	dp_soc_cfg_history_attach(soc);
15490 	dp_soc_tx_hw_desc_history_attach(soc);
15491 	dp_soc_rx_history_attach(soc);
15492 	dp_soc_mon_status_ring_history_attach(soc);
15493 	dp_soc_tx_history_attach(soc);
15494 	wlan_set_srng_cfg(&soc->wlan_srng_cfg);
15495 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
15496 	if (!soc->wlan_cfg_ctx) {
15497 		dp_err("wlan_cfg_ctx failed\n");
15498 		goto fail2;
15499 	}
15500 	dp_soc_cfg_attach(soc);
15501 
15502 	if (dp_hw_link_desc_pool_banks_alloc(soc, WLAN_INVALID_PDEV_ID)) {
15503 		dp_err("failed to allocate link desc pool banks");
15504 		goto fail3;
15505 	}
15506 
15507 	if (dp_hw_link_desc_ring_alloc(soc)) {
15508 		dp_err("failed to allocate link_desc_ring");
15509 		goto fail4;
15510 	}
15511 
15512 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_attach(soc,
15513 								 params))) {
15514 		dp_err("unable to do target specific attach");
15515 		goto fail5;
15516 	}
15517 
15518 	if (dp_soc_srng_alloc(soc)) {
15519 		dp_err("failed to allocate soc srng rings");
15520 		goto fail6;
15521 	}
15522 
15523 	if (dp_soc_tx_desc_sw_pools_alloc(soc)) {
15524 		dp_err("dp_soc_tx_desc_sw_pools_alloc failed");
15525 		goto fail7;
15526 	}
15527 
15528 	if (!dp_monitor_modularized_enable()) {
15529 		if (dp_mon_soc_attach_wrapper(soc)) {
15530 			dp_err("failed to attach monitor");
15531 			goto fail8;
15532 		}
15533 	}
15534 
15535 	if (hal_reo_shared_qaddr_setup((hal_soc_handle_t)soc->hal_soc,
15536 				       &soc->reo_qref)
15537 	    != QDF_STATUS_SUCCESS) {
15538 		dp_err("unable to setup reo shared qaddr");
15539 		goto fail9;
15540 	}
15541 
15542 	if (dp_sysfs_initialize_stats(soc) != QDF_STATUS_SUCCESS) {
15543 		dp_err("failed to initialize dp stats sysfs file");
15544 		dp_sysfs_deinitialize_stats(soc);
15545 	}
15546 
15547 	dp_soc_swlm_attach(soc);
15548 	dp_soc_set_interrupt_mode(soc);
15549 	dp_soc_set_def_pdev(soc);
15550 
15551 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
15552 		qdf_dma_mem_stats_read(),
15553 		qdf_heap_mem_stats_read(),
15554 		qdf_skb_total_mem_stats_read());
15555 
15556 	return soc;
15557 fail9:
15558 	if (!dp_monitor_modularized_enable())
15559 		dp_mon_soc_detach_wrapper(soc);
15560 fail8:
15561 	dp_soc_tx_desc_sw_pools_free(soc);
15562 fail7:
15563 	dp_soc_srng_free(soc);
15564 fail6:
15565 	soc->arch_ops.txrx_soc_detach(soc);
15566 fail5:
15567 	dp_hw_link_desc_ring_free(soc);
15568 fail4:
15569 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
15570 fail3:
15571 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
15572 fail2:
15573 	qdf_mem_free(soc->cdp_soc.ops);
15574 fail1:
15575 	qdf_mem_free(soc);
15576 fail0:
15577 	return NULL;
15578 }
15579 
15580 /**
15581  * dp_soc_init() - Initialize txrx SOC
15582  * @soc: Opaque DP SOC handle
15583  * @htc_handle: Opaque HTC handle
15584  * @hif_handle: Opaque HIF handle
15585  *
15586  * Return: DP SOC handle on success, NULL on failure
15587  */
15588 static void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
15589 			 struct hif_opaque_softc *hif_handle)
15590 {
15591 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
15592 	bool is_monitor_mode = false;
15593 	uint8_t i;
15594 	int num_dp_msi;
15595 
15596 	wlan_minidump_log(soc, sizeof(*soc), soc->ctrl_psoc,
15597 			  WLAN_MD_DP_SOC, "dp_soc");
15598 
15599 	soc->hif_handle = hif_handle;
15600 
15601 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
15602 	if (!soc->hal_soc)
15603 		goto fail0;
15604 
15605 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_init(soc))) {
15606 		dp_err("unable to do target specific init");
15607 		goto fail0;
15608 	}
15609 
15610 	htt_soc = htt_soc_attach(soc, htc_handle);
15611 	if (!htt_soc)
15612 		goto fail1;
15613 
15614 	soc->htt_handle = htt_soc;
15615 
15616 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
15617 		goto fail2;
15618 
15619 	htt_set_htc_handle(htt_soc, htc_handle);
15620 
15621 	dp_soc_cfg_init(soc);
15622 
15623 	dp_monitor_soc_cfg_init(soc);
15624 	/* Reset/Initialize wbm sg list and flags */
15625 	dp_rx_wbm_sg_list_reset(soc);
15626 
15627 	/* Note: Any SRNG ring initialization should happen only after
15628 	 * Interrupt mode is set and followed by filling up the
15629 	 * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER.
15630 	 */
15631 	dp_soc_set_interrupt_mode(soc);
15632 	if (soc->cdp_soc.ol_ops->get_con_mode &&
15633 	    soc->cdp_soc.ol_ops->get_con_mode() ==
15634 	    QDF_GLOBAL_MONITOR_MODE) {
15635 		is_monitor_mode = true;
15636 		soc->curr_rx_pkt_tlv_size = soc->rx_mon_pkt_tlv_size;
15637 	} else {
15638 		soc->curr_rx_pkt_tlv_size = soc->rx_pkt_tlv_size;
15639 	}
15640 
15641 	num_dp_msi = dp_get_num_msi_available(soc, soc->intr_mode);
15642 	if (num_dp_msi < 0) {
15643 		dp_init_err("%pK: dp_interrupt assignment failed", soc);
15644 		goto fail3;
15645 	}
15646 
15647 	wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, num_dp_msi,
15648 				     soc->intr_mode, is_monitor_mode);
15649 
15650 	/* initialize WBM_IDLE_LINK ring */
15651 	if (dp_hw_link_desc_ring_init(soc)) {
15652 		dp_init_err("%pK: dp_hw_link_desc_ring_init failed", soc);
15653 		goto fail3;
15654 	}
15655 
15656 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
15657 
15658 	if (dp_soc_srng_init(soc)) {
15659 		dp_init_err("%pK: dp_soc_srng_init failed", soc);
15660 		goto fail4;
15661 	}
15662 
15663 	if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
15664 			       htt_get_htc_handle(htt_soc),
15665 			       soc->hal_soc, soc->osdev) == NULL)
15666 		goto fail5;
15667 
15668 	/* Initialize descriptors in TCL Rings */
15669 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
15670 		hal_tx_init_data_ring(soc->hal_soc,
15671 				      soc->tcl_data_ring[i].hal_srng);
15672 	}
15673 
15674 	if (dp_soc_tx_desc_sw_pools_init(soc)) {
15675 		dp_init_err("%pK: dp_tx_soc_attach failed", soc);
15676 		goto fail6;
15677 	}
15678 
15679 	if (soc->arch_ops.txrx_soc_ppeds_start) {
15680 		if (soc->arch_ops.txrx_soc_ppeds_start(soc)) {
15681 			dp_init_err("%pK: ppeds start failed", soc);
15682 			goto fail7;
15683 		}
15684 	}
15685 
15686 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
15687 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
15688 	soc->cce_disable = false;
15689 	soc->max_ast_ageout_count = MAX_AST_AGEOUT_COUNT;
15690 
15691 	soc->sta_mode_search_policy = DP_TX_ADDR_SEARCH_ADDR_POLICY;
15692 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
15693 	qdf_spinlock_create(&soc->vdev_map_lock);
15694 	qdf_atomic_init(&soc->num_tx_outstanding);
15695 	qdf_atomic_init(&soc->num_tx_exception);
15696 	soc->num_tx_allowed =
15697 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
15698 	soc->num_tx_spl_allowed =
15699 		wlan_cfg_get_dp_soc_tx_spl_device_limit(soc->wlan_cfg_ctx);
15700 	soc->num_reg_tx_allowed = soc->num_tx_allowed - soc->num_tx_spl_allowed;
15701 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
15702 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
15703 				CDP_CFG_MAX_PEER_ID);
15704 
15705 		if (ret != -EINVAL)
15706 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
15707 
15708 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
15709 				CDP_CFG_CCE_DISABLE);
15710 		if (ret == 1)
15711 			soc->cce_disable = true;
15712 	}
15713 
15714 	/*
15715 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
15716 	 * and IPQ5018 WMAC2 is not there in these platforms.
15717 	 */
15718 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018 ||
15719 	    soc->disable_mac2_intr)
15720 		dp_soc_disable_unused_mac_intr_mask(soc, 0x2);
15721 
15722 	/*
15723 	 * Skip registering hw ring interrupts for WMAC1 on IPQ5018
15724 	 * WMAC1 is not there in this platform.
15725 	 */
15726 	if (soc->disable_mac1_intr)
15727 		dp_soc_disable_unused_mac_intr_mask(soc, 0x1);
15728 
15729 	/* setup the global rx defrag waitlist */
15730 	TAILQ_INIT(&soc->rx.defrag.waitlist);
15731 	soc->rx.defrag.timeout_ms =
15732 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
15733 	soc->rx.defrag.next_flush_ms = 0;
15734 	soc->rx.flags.defrag_timeout_check =
15735 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
15736 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
15737 
15738 	dp_monitor_soc_init(soc);
15739 
15740 	qdf_atomic_set(&soc->cmn_init_done, 1);
15741 
15742 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
15743 
15744 	qdf_spinlock_create(&soc->ast_lock);
15745 	dp_peer_mec_spinlock_create(soc);
15746 
15747 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
15748 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
15749 	INIT_RX_HW_STATS_LOCK(soc);
15750 
15751 	qdf_nbuf_queue_init(&soc->invalid_buf_queue);
15752 	/* fill the tx/rx cpu ring map*/
15753 	dp_soc_set_txrx_ring_map(soc);
15754 
15755 	TAILQ_INIT(&soc->inactive_peer_list);
15756 	qdf_spinlock_create(&soc->inactive_peer_list_lock);
15757 	TAILQ_INIT(&soc->inactive_vdev_list);
15758 	qdf_spinlock_create(&soc->inactive_vdev_list_lock);
15759 	qdf_spinlock_create(&soc->htt_stats.lock);
15760 	/* initialize work queue for stats processing */
15761 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
15762 
15763 	dp_reo_desc_deferred_freelist_create(soc);
15764 
15765 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
15766 		qdf_dma_mem_stats_read(),
15767 		qdf_heap_mem_stats_read(),
15768 		qdf_skb_total_mem_stats_read());
15769 
15770 	soc->vdev_stats_id_map = 0;
15771 
15772 	return soc;
15773 fail7:
15774 	dp_soc_tx_desc_sw_pools_deinit(soc);
15775 fail6:
15776 	htt_soc_htc_dealloc(soc->htt_handle);
15777 fail5:
15778 	dp_soc_srng_deinit(soc);
15779 fail4:
15780 	dp_hw_link_desc_ring_deinit(soc);
15781 fail3:
15782 	htt_htc_pkt_pool_free(htt_soc);
15783 fail2:
15784 	htt_soc_detach(htt_soc);
15785 fail1:
15786 	soc->arch_ops.txrx_soc_deinit(soc);
15787 fail0:
15788 	return NULL;
15789 }
15790 
15791 void *dp_soc_init_wifi3(struct cdp_soc_t *soc,
15792 			struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
15793 			struct hif_opaque_softc *hif_handle,
15794 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
15795 			struct ol_if_ops *ol_ops, uint16_t device_id)
15796 {
15797 	return dp_soc_init((struct dp_soc *)soc, htc_handle, hif_handle);
15798 }
15799 
15800 #endif
15801 
15802 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
15803 {
15804 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
15805 		return (mac_id < MAX_PDEV_CNT) ? soc->pdev_list[mac_id] : NULL;
15806 
15807 	/* Typically for MCL as there only 1 PDEV*/
15808 	return soc->pdev_list[0];
15809 }
15810 
15811 void dp_update_num_mac_rings_for_dbs(struct dp_soc *soc,
15812 				     int *max_mac_rings)
15813 {
15814 	bool dbs_enable = false;
15815 
15816 	if (soc->cdp_soc.ol_ops->is_hw_dbs_capable)
15817 		dbs_enable = soc->cdp_soc.ol_ops->
15818 				is_hw_dbs_capable((void *)soc->ctrl_psoc);
15819 
15820 	*max_mac_rings = dbs_enable ? (*max_mac_rings) : 1;
15821 	dp_info("dbs_enable %d, max_mac_rings %d",
15822 		dbs_enable, *max_mac_rings);
15823 }
15824 
15825 qdf_export_symbol(dp_update_num_mac_rings_for_dbs);
15826 
15827 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
15828 /**
15829  * dp_get_cfr_rcc() - get cfr rcc config
15830  * @soc_hdl: Datapath soc handle
15831  * @pdev_id: id of objmgr pdev
15832  *
15833  * Return: true/false based on cfr mode setting
15834  */
15835 static
15836 bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
15837 {
15838 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15839 	struct dp_pdev *pdev = NULL;
15840 
15841 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15842 	if (!pdev) {
15843 		dp_err("pdev is NULL");
15844 		return false;
15845 	}
15846 
15847 	return pdev->cfr_rcc_mode;
15848 }
15849 
15850 /**
15851  * dp_set_cfr_rcc() - enable/disable cfr rcc config
15852  * @soc_hdl: Datapath soc handle
15853  * @pdev_id: id of objmgr pdev
15854  * @enable: Enable/Disable cfr rcc mode
15855  *
15856  * Return: none
15857  */
15858 static
15859 void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, bool enable)
15860 {
15861 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15862 	struct dp_pdev *pdev = NULL;
15863 
15864 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15865 	if (!pdev) {
15866 		dp_err("pdev is NULL");
15867 		return;
15868 	}
15869 
15870 	pdev->cfr_rcc_mode = enable;
15871 }
15872 
15873 /**
15874  * dp_get_cfr_dbg_stats - Get the debug statistics for CFR
15875  * @soc_hdl: Datapath soc handle
15876  * @pdev_id: id of data path pdev handle
15877  * @cfr_rcc_stats: CFR RCC debug statistics buffer
15878  *
15879  * Return: none
15880  */
15881 static inline void
15882 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
15883 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats)
15884 {
15885 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15886 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15887 
15888 	if (!pdev) {
15889 		dp_err("Invalid pdev");
15890 		return;
15891 	}
15892 
15893 	qdf_mem_copy(cfr_rcc_stats, &pdev->stats.rcc,
15894 		     sizeof(struct cdp_cfr_rcc_stats));
15895 }
15896 
15897 /**
15898  * dp_clear_cfr_dbg_stats - Clear debug statistics for CFR
15899  * @soc_hdl: Datapath soc handle
15900  * @pdev_id: id of data path pdev handle
15901  *
15902  * Return: none
15903  */
15904 static void dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl,
15905 				   uint8_t pdev_id)
15906 {
15907 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15908 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15909 
15910 	if (!pdev) {
15911 		dp_err("dp pdev is NULL");
15912 		return;
15913 	}
15914 
15915 	qdf_mem_zero(&pdev->stats.rcc, sizeof(pdev->stats.rcc));
15916 }
15917 #endif
15918 
15919 /**
15920  * dp_bucket_index() - Return index from array
15921  *
15922  * @delay: delay measured
15923  * @array: array used to index corresponding delay
15924  * @delay_in_us: flag to indicate whether the delay in ms or us
15925  *
15926  * Return: index
15927  */
15928 static uint8_t
15929 dp_bucket_index(uint32_t delay, uint16_t *array, bool delay_in_us)
15930 {
15931 	uint8_t i = CDP_DELAY_BUCKET_0;
15932 	uint32_t thr_low, thr_high;
15933 
15934 	for (; i < CDP_DELAY_BUCKET_MAX - 1; i++) {
15935 		thr_low = array[i];
15936 		thr_high = array[i + 1];
15937 
15938 		if (delay_in_us) {
15939 			thr_low = thr_low * USEC_PER_MSEC;
15940 			thr_high = thr_high * USEC_PER_MSEC;
15941 		}
15942 		if (delay >= thr_low && delay <= thr_high)
15943 			return i;
15944 	}
15945 	return (CDP_DELAY_BUCKET_MAX - 1);
15946 }
15947 
15948 #ifdef HW_TX_DELAY_STATS_ENABLE
15949 /*
15950  * cdp_fw_to_hw_delay_range
15951  * Fw to hw delay ranges in milliseconds
15952  */
15953 static uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
15954 	0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
15955 #else
15956 static uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
15957 	0, 2, 4, 6, 8, 10, 20, 30, 40, 50, 100, 250, 500};
15958 #endif
15959 
15960 /*
15961  * cdp_sw_enq_delay_range
15962  * Software enqueue delay ranges in milliseconds
15963  */
15964 static uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
15965 	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
15966 
15967 /*
15968  * cdp_intfrm_delay_range
15969  * Interframe delay ranges in milliseconds
15970  */
15971 static uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
15972 	0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
15973 
15974 /**
15975  * dp_fill_delay_buckets() - Fill delay statistics bucket for each
15976  *				type of delay
15977  * @tstats: tid tx stats
15978  * @rstats: tid rx stats
15979  * @delay: delay in ms
15980  * @tid: tid value
15981  * @mode: type of tx delay mode
15982  * @ring_id: ring number
15983  * @delay_in_us: flag to indicate whether the delay in ms or us
15984  *
15985  * Return: pointer to cdp_delay_stats structure
15986  */
15987 static struct cdp_delay_stats *
15988 dp_fill_delay_buckets(struct cdp_tid_tx_stats *tstats,
15989 		      struct cdp_tid_rx_stats *rstats, uint32_t delay,
15990 		      uint8_t tid, uint8_t mode, uint8_t ring_id,
15991 		      bool delay_in_us)
15992 {
15993 	uint8_t delay_index = 0;
15994 	struct cdp_delay_stats *stats = NULL;
15995 
15996 	/*
15997 	 * Update delay stats in proper bucket
15998 	 */
15999 	switch (mode) {
16000 	/* Software Enqueue delay ranges */
16001 	case CDP_DELAY_STATS_SW_ENQ:
16002 		if (!tstats)
16003 			break;
16004 
16005 		delay_index = dp_bucket_index(delay, cdp_sw_enq_delay,
16006 					      delay_in_us);
16007 		tstats->swq_delay.delay_bucket[delay_index]++;
16008 		stats = &tstats->swq_delay;
16009 		break;
16010 
16011 	/* Tx Completion delay ranges */
16012 	case CDP_DELAY_STATS_FW_HW_TRANSMIT:
16013 		if (!tstats)
16014 			break;
16015 
16016 		delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay,
16017 					      delay_in_us);
16018 		tstats->hwtx_delay.delay_bucket[delay_index]++;
16019 		stats = &tstats->hwtx_delay;
16020 		break;
16021 
16022 	/* Interframe tx delay ranges */
16023 	case CDP_DELAY_STATS_TX_INTERFRAME:
16024 		if (!tstats)
16025 			break;
16026 
16027 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
16028 					      delay_in_us);
16029 		tstats->intfrm_delay.delay_bucket[delay_index]++;
16030 		stats = &tstats->intfrm_delay;
16031 		break;
16032 
16033 	/* Interframe rx delay ranges */
16034 	case CDP_DELAY_STATS_RX_INTERFRAME:
16035 		if (!rstats)
16036 			break;
16037 
16038 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
16039 					      delay_in_us);
16040 		rstats->intfrm_delay.delay_bucket[delay_index]++;
16041 		stats = &rstats->intfrm_delay;
16042 		break;
16043 
16044 	/* Ring reap to indication to network stack */
16045 	case CDP_DELAY_STATS_REAP_STACK:
16046 		if (!rstats)
16047 			break;
16048 
16049 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
16050 					      delay_in_us);
16051 		rstats->to_stack_delay.delay_bucket[delay_index]++;
16052 		stats = &rstats->to_stack_delay;
16053 		break;
16054 	default:
16055 		dp_debug("Incorrect delay mode: %d", mode);
16056 	}
16057 
16058 	return stats;
16059 }
16060 
16061 void dp_update_delay_stats(struct cdp_tid_tx_stats *tstats,
16062 			   struct cdp_tid_rx_stats *rstats, uint32_t delay,
16063 			   uint8_t tid, uint8_t mode, uint8_t ring_id,
16064 			   bool delay_in_us)
16065 {
16066 	struct cdp_delay_stats *dstats = NULL;
16067 
16068 	/*
16069 	 * Delay ranges are different for different delay modes
16070 	 * Get the correct index to update delay bucket
16071 	 */
16072 	dstats = dp_fill_delay_buckets(tstats, rstats, delay, tid, mode,
16073 				       ring_id, delay_in_us);
16074 	if (qdf_unlikely(!dstats))
16075 		return;
16076 
16077 	if (delay != 0) {
16078 		/*
16079 		 * Compute minimum,average and maximum
16080 		 * delay
16081 		 */
16082 		if (delay < dstats->min_delay)
16083 			dstats->min_delay = delay;
16084 
16085 		if (delay > dstats->max_delay)
16086 			dstats->max_delay = delay;
16087 
16088 		/*
16089 		 * Average over delay measured till now
16090 		 */
16091 		if (!dstats->avg_delay)
16092 			dstats->avg_delay = delay;
16093 		else
16094 			dstats->avg_delay = ((delay + dstats->avg_delay) >> 1);
16095 	}
16096 }
16097 
16098 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id,
16099 			      u_int8_t newmac[][QDF_MAC_ADDR_SIZE],
16100 			      u_int16_t mac_cnt, bool limit)
16101 {
16102 	struct dp_soc *dp_soc = (struct dp_soc *)soc;
16103 	struct dp_vdev *vdev =
16104 		dp_vdev_get_ref_by_id(dp_soc, vdev_id, DP_MOD_ID_CDP);
16105 	struct dp_peer *peer;
16106 	uint16_t new_mac_cnt = 0;
16107 
16108 	if (!vdev)
16109 		return new_mac_cnt;
16110 
16111 	if (limit && (vdev->num_peers > mac_cnt))
16112 		return 0;
16113 
16114 	qdf_spin_lock_bh(&vdev->peer_list_lock);
16115 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
16116 		if (peer->bss_peer)
16117 			continue;
16118 		if (new_mac_cnt < mac_cnt) {
16119 			WLAN_ADDR_COPY(newmac[new_mac_cnt], peer->mac_addr.raw);
16120 			new_mac_cnt++;
16121 		}
16122 	}
16123 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
16124 	dp_vdev_unref_delete(dp_soc, vdev, DP_MOD_ID_CDP);
16125 	return new_mac_cnt;
16126 }
16127 
16128 uint16_t dp_get_peer_id(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *mac)
16129 {
16130 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
16131 						       mac, 0, vdev_id,
16132 						       DP_MOD_ID_CDP);
16133 	uint16_t peer_id = HTT_INVALID_PEER;
16134 
16135 	if (!peer) {
16136 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
16137 		return peer_id;
16138 	}
16139 
16140 	peer_id = peer->peer_id;
16141 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
16142 	return peer_id;
16143 }
16144 
16145 #ifdef QCA_SUPPORT_WDS_EXTENDED
16146 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc,
16147 				  uint8_t vdev_id,
16148 				  uint8_t *mac,
16149 				  ol_txrx_rx_fp rx,
16150 				  ol_osif_peer_handle osif_peer)
16151 {
16152 	struct dp_txrx_peer *txrx_peer = NULL;
16153 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
16154 						       mac, 0, vdev_id,
16155 						       DP_MOD_ID_CDP);
16156 	QDF_STATUS status = QDF_STATUS_E_INVAL;
16157 
16158 	if (!peer) {
16159 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
16160 		return status;
16161 	}
16162 
16163 	txrx_peer = dp_get_txrx_peer(peer);
16164 	if (!txrx_peer) {
16165 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
16166 		return status;
16167 	}
16168 
16169 	if (rx) {
16170 		if (txrx_peer->osif_rx) {
16171 			status = QDF_STATUS_E_ALREADY;
16172 		} else {
16173 			txrx_peer->osif_rx = rx;
16174 			status = QDF_STATUS_SUCCESS;
16175 		}
16176 	} else {
16177 		if (txrx_peer->osif_rx) {
16178 			txrx_peer->osif_rx = NULL;
16179 			status = QDF_STATUS_SUCCESS;
16180 		} else {
16181 			status = QDF_STATUS_E_ALREADY;
16182 		}
16183 	}
16184 
16185 	txrx_peer->wds_ext.osif_peer = osif_peer;
16186 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
16187 
16188 	return status;
16189 }
16190 #endif /* QCA_SUPPORT_WDS_EXTENDED */
16191 
16192 /**
16193  * dp_pdev_srng_deinit() - de-initialize all pdev srng ring including
16194  *			   monitor rings
16195  * @pdev: Datapath pdev handle
16196  *
16197  */
16198 static void dp_pdev_srng_deinit(struct dp_pdev *pdev)
16199 {
16200 	struct dp_soc *soc = pdev->soc;
16201 	uint8_t i;
16202 
16203 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled)
16204 		dp_srng_deinit(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
16205 			       RXDMA_BUF,
16206 			       pdev->lmac_id);
16207 
16208 	if (!soc->rxdma2sw_rings_not_supported) {
16209 		for (i = 0;
16210 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
16211 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
16212 								 pdev->pdev_id);
16213 
16214 			wlan_minidump_remove(soc->rxdma_err_dst_ring[lmac_id].
16215 							base_vaddr_unaligned,
16216 					     soc->rxdma_err_dst_ring[lmac_id].
16217 								alloc_size,
16218 					     soc->ctrl_psoc,
16219 					     WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
16220 					     "rxdma_err_dst");
16221 			dp_srng_deinit(soc, &soc->rxdma_err_dst_ring[lmac_id],
16222 				       RXDMA_DST, lmac_id);
16223 		}
16224 	}
16225 
16226 
16227 }
16228 
16229 /**
16230  * dp_pdev_srng_init() - initialize all pdev srng rings including
16231  *			   monitor rings
16232  * @pdev: Datapath pdev handle
16233  *
16234  * Return: QDF_STATUS_SUCCESS on success
16235  *	   QDF_STATUS_E_NOMEM on failure
16236  */
16237 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev)
16238 {
16239 	struct dp_soc *soc = pdev->soc;
16240 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16241 	uint32_t i;
16242 
16243 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16244 
16245 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
16246 		if (dp_srng_init(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
16247 				 RXDMA_BUF, 0, pdev->lmac_id)) {
16248 			dp_init_err("%pK: dp_srng_init failed rx refill ring",
16249 				    soc);
16250 			goto fail1;
16251 		}
16252 	}
16253 
16254 	/* LMAC RxDMA to SW Rings configuration */
16255 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
16256 		/* Only valid for MCL */
16257 		pdev = soc->pdev_list[0];
16258 
16259 	if (!soc->rxdma2sw_rings_not_supported) {
16260 		for (i = 0;
16261 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
16262 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
16263 								 pdev->pdev_id);
16264 			struct dp_srng *srng =
16265 				&soc->rxdma_err_dst_ring[lmac_id];
16266 
16267 			if (srng->hal_srng)
16268 				continue;
16269 
16270 			if (dp_srng_init(soc, srng, RXDMA_DST, 0, lmac_id)) {
16271 				dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring",
16272 					    soc);
16273 				goto fail1;
16274 			}
16275 			wlan_minidump_log(soc->rxdma_err_dst_ring[lmac_id].
16276 						base_vaddr_unaligned,
16277 					  soc->rxdma_err_dst_ring[lmac_id].
16278 						alloc_size,
16279 					  soc->ctrl_psoc,
16280 					  WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
16281 					  "rxdma_err_dst");
16282 		}
16283 	}
16284 	return QDF_STATUS_SUCCESS;
16285 
16286 fail1:
16287 	dp_pdev_srng_deinit(pdev);
16288 	return QDF_STATUS_E_NOMEM;
16289 }
16290 
16291 /**
16292  * dp_pdev_srng_free() - free all pdev srng rings including monitor rings
16293  * @pdev: Datapath pdev handle
16294  *
16295  */
16296 static void dp_pdev_srng_free(struct dp_pdev *pdev)
16297 {
16298 	struct dp_soc *soc = pdev->soc;
16299 	uint8_t i;
16300 
16301 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled)
16302 		dp_srng_free(soc, &soc->rx_refill_buf_ring[pdev->lmac_id]);
16303 
16304 	if (!soc->rxdma2sw_rings_not_supported) {
16305 		for (i = 0;
16306 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
16307 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
16308 								 pdev->pdev_id);
16309 
16310 			dp_srng_free(soc, &soc->rxdma_err_dst_ring[lmac_id]);
16311 		}
16312 	}
16313 }
16314 
16315 /**
16316  * dp_pdev_srng_alloc() - allocate memory for all pdev srng rings including
16317  *			  monitor rings
16318  * @pdev: Datapath pdev handle
16319  *
16320  * Return: QDF_STATUS_SUCCESS on success
16321  *	   QDF_STATUS_E_NOMEM on failure
16322  */
16323 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev)
16324 {
16325 	struct dp_soc *soc = pdev->soc;
16326 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16327 	uint32_t ring_size;
16328 	uint32_t i;
16329 
16330 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16331 
16332 	ring_size = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
16333 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
16334 		if (dp_srng_alloc(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
16335 				  RXDMA_BUF, ring_size, 0)) {
16336 			dp_init_err("%pK: dp_srng_alloc failed rx refill ring",
16337 				    soc);
16338 			goto fail1;
16339 		}
16340 	}
16341 
16342 	ring_size = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
16343 	/* LMAC RxDMA to SW Rings configuration */
16344 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
16345 		/* Only valid for MCL */
16346 		pdev = soc->pdev_list[0];
16347 
16348 	if (!soc->rxdma2sw_rings_not_supported) {
16349 		for (i = 0;
16350 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
16351 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
16352 								 pdev->pdev_id);
16353 			struct dp_srng *srng =
16354 				&soc->rxdma_err_dst_ring[lmac_id];
16355 
16356 			if (srng->base_vaddr_unaligned)
16357 				continue;
16358 
16359 			if (dp_srng_alloc(soc, srng, RXDMA_DST, ring_size, 0)) {
16360 				dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring",
16361 					    soc);
16362 				goto fail1;
16363 			}
16364 		}
16365 	}
16366 
16367 	return QDF_STATUS_SUCCESS;
16368 fail1:
16369 	dp_pdev_srng_free(pdev);
16370 	return QDF_STATUS_E_NOMEM;
16371 }
16372 
16373 #ifndef WLAN_DP_DISABLE_TCL_CMD_CRED_SRNG
16374 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
16375 {
16376 	QDF_STATUS status;
16377 
16378 	if (soc->init_tcl_cmd_cred_ring) {
16379 		status =  dp_srng_init(soc, &soc->tcl_cmd_credit_ring,
16380 				       TCL_CMD_CREDIT, 0, 0);
16381 		if (QDF_IS_STATUS_ERROR(status))
16382 			return status;
16383 
16384 		wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
16385 				  soc->tcl_cmd_credit_ring.alloc_size,
16386 				  soc->ctrl_psoc,
16387 				  WLAN_MD_DP_SRNG_TCL_CMD,
16388 				  "wbm_desc_rel_ring");
16389 	}
16390 
16391 	return QDF_STATUS_SUCCESS;
16392 }
16393 
16394 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
16395 {
16396 	if (soc->init_tcl_cmd_cred_ring) {
16397 		wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
16398 				     soc->tcl_cmd_credit_ring.alloc_size,
16399 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_CMD,
16400 				     "wbm_desc_rel_ring");
16401 		dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring,
16402 			       TCL_CMD_CREDIT, 0);
16403 	}
16404 }
16405 
16406 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
16407 {
16408 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
16409 	uint32_t entries;
16410 	QDF_STATUS status;
16411 
16412 	entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx);
16413 	if (soc->init_tcl_cmd_cred_ring) {
16414 		status = dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring,
16415 				       TCL_CMD_CREDIT, entries, 0);
16416 		if (QDF_IS_STATUS_ERROR(status))
16417 			return status;
16418 	}
16419 
16420 	return QDF_STATUS_SUCCESS;
16421 }
16422 
16423 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
16424 {
16425 	if (soc->init_tcl_cmd_cred_ring)
16426 		dp_srng_free(soc, &soc->tcl_cmd_credit_ring);
16427 }
16428 
16429 static inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
16430 {
16431 	if (soc->init_tcl_cmd_cred_ring)
16432 		hal_tx_init_cmd_credit_ring(soc->hal_soc,
16433 					    soc->tcl_cmd_credit_ring.hal_srng);
16434 }
16435 #else
16436 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
16437 {
16438 	return QDF_STATUS_SUCCESS;
16439 }
16440 
16441 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
16442 {
16443 }
16444 
16445 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
16446 {
16447 	return QDF_STATUS_SUCCESS;
16448 }
16449 
16450 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
16451 {
16452 }
16453 
16454 static inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
16455 {
16456 }
16457 #endif
16458 
16459 #ifndef WLAN_DP_DISABLE_TCL_STATUS_SRNG
16460 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
16461 {
16462 	QDF_STATUS status;
16463 
16464 	status =  dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0);
16465 	if (QDF_IS_STATUS_ERROR(status))
16466 		return status;
16467 
16468 	wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned,
16469 			  soc->tcl_status_ring.alloc_size,
16470 			  soc->ctrl_psoc,
16471 			  WLAN_MD_DP_SRNG_TCL_STATUS,
16472 			  "wbm_desc_rel_ring");
16473 
16474 	return QDF_STATUS_SUCCESS;
16475 }
16476 
16477 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
16478 {
16479 	wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned,
16480 			     soc->tcl_status_ring.alloc_size,
16481 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_STATUS,
16482 			     "wbm_desc_rel_ring");
16483 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
16484 }
16485 
16486 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
16487 {
16488 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
16489 	uint32_t entries;
16490 	QDF_STATUS status = QDF_STATUS_SUCCESS;
16491 
16492 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
16493 	status = dp_srng_alloc(soc, &soc->tcl_status_ring,
16494 			       TCL_STATUS, entries, 0);
16495 
16496 	return status;
16497 }
16498 
16499 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
16500 {
16501 	dp_srng_free(soc, &soc->tcl_status_ring);
16502 }
16503 #else
16504 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
16505 {
16506 	return QDF_STATUS_SUCCESS;
16507 }
16508 
16509 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
16510 {
16511 }
16512 
16513 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
16514 {
16515 	return QDF_STATUS_SUCCESS;
16516 }
16517 
16518 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
16519 {
16520 }
16521 #endif
16522 
16523 /**
16524  * dp_soc_srng_deinit() - de-initialize soc srng rings
16525  * @soc: Datapath soc handle
16526  *
16527  */
16528 static void dp_soc_srng_deinit(struct dp_soc *soc)
16529 {
16530 	uint32_t i;
16531 
16532 	if (soc->arch_ops.txrx_soc_srng_deinit)
16533 		soc->arch_ops.txrx_soc_srng_deinit(soc);
16534 
16535 	/* Free the ring memories */
16536 	/* Common rings */
16537 	wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
16538 			     soc->wbm_desc_rel_ring.alloc_size,
16539 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_WBM_DESC_REL,
16540 			     "wbm_desc_rel_ring");
16541 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
16542 
16543 	/* Tx data rings */
16544 	for (i = 0; i < soc->num_tcl_data_rings; i++)
16545 		dp_deinit_tx_pair_by_index(soc, i);
16546 
16547 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16548 		dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
16549 		dp_ipa_deinit_alt_tx_ring(soc);
16550 	}
16551 
16552 	/* TCL command and status rings */
16553 	dp_soc_tcl_cmd_cred_srng_deinit(soc);
16554 	dp_soc_tcl_status_srng_deinit(soc);
16555 
16556 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
16557 		/* TODO: Get number of rings and ring sizes
16558 		 * from wlan_cfg
16559 		 */
16560 		wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned,
16561 				     soc->reo_dest_ring[i].alloc_size,
16562 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_DEST,
16563 				     "reo_dest_ring");
16564 		dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i);
16565 	}
16566 
16567 	/* REO reinjection ring */
16568 	wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned,
16569 			     soc->reo_reinject_ring.alloc_size,
16570 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_REINJECT,
16571 			     "reo_reinject_ring");
16572 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
16573 
16574 	/* Rx release ring */
16575 	wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned,
16576 			     soc->rx_rel_ring.alloc_size,
16577 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_RX_REL,
16578 			     "reo_release_ring");
16579 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
16580 
16581 	/* Rx exception ring */
16582 	/* TODO: Better to store ring_type and ring_num in
16583 	 * dp_srng during setup
16584 	 */
16585 	wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned,
16586 			     soc->reo_exception_ring.alloc_size,
16587 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_EXCEPTION,
16588 			     "reo_exception_ring");
16589 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
16590 
16591 	/* REO command and status rings */
16592 	wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned,
16593 			     soc->reo_cmd_ring.alloc_size,
16594 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_CMD,
16595 			     "reo_cmd_ring");
16596 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
16597 	wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned,
16598 			     soc->reo_status_ring.alloc_size,
16599 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_STATUS,
16600 			     "reo_status_ring");
16601 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
16602 }
16603 
16604 /**
16605  * dp_soc_srng_init() - Initialize soc level srng rings
16606  * @soc: Datapath soc handle
16607  *
16608  * Return: QDF_STATUS_SUCCESS on success
16609  *	   QDF_STATUS_E_FAILURE on failure
16610  */
16611 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc)
16612 {
16613 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16614 	uint8_t i;
16615 	uint8_t wbm2_sw_rx_rel_ring_id;
16616 
16617 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16618 
16619 	dp_enable_verbose_debug(soc);
16620 
16621 	/* WBM descriptor release ring */
16622 	if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) {
16623 		dp_init_err("%pK: dp_srng_init failed for wbm_desc_rel_ring", soc);
16624 		goto fail1;
16625 	}
16626 
16627 	wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
16628 			  soc->wbm_desc_rel_ring.alloc_size,
16629 			  soc->ctrl_psoc,
16630 			  WLAN_MD_DP_SRNG_WBM_DESC_REL,
16631 			  "wbm_desc_rel_ring");
16632 
16633 	/* TCL command and status rings */
16634 	if (dp_soc_tcl_cmd_cred_srng_init(soc)) {
16635 		dp_init_err("%pK: dp_srng_init failed for tcl_cmd_ring", soc);
16636 		goto fail1;
16637 	}
16638 
16639 	if (dp_soc_tcl_status_srng_init(soc)) {
16640 		dp_init_err("%pK: dp_srng_init failed for tcl_status_ring", soc);
16641 		goto fail1;
16642 	}
16643 
16644 	/* REO reinjection ring */
16645 	if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) {
16646 		dp_init_err("%pK: dp_srng_init failed for reo_reinject_ring", soc);
16647 		goto fail1;
16648 	}
16649 
16650 	wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned,
16651 			  soc->reo_reinject_ring.alloc_size,
16652 			  soc->ctrl_psoc,
16653 			  WLAN_MD_DP_SRNG_REO_REINJECT,
16654 			  "reo_reinject_ring");
16655 
16656 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc_cfg_ctx);
16657 	/* Rx release ring */
16658 	if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
16659 			 wbm2_sw_rx_rel_ring_id, 0)) {
16660 		dp_init_err("%pK: dp_srng_init failed for rx_rel_ring", soc);
16661 		goto fail1;
16662 	}
16663 
16664 	wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned,
16665 			  soc->rx_rel_ring.alloc_size,
16666 			  soc->ctrl_psoc,
16667 			  WLAN_MD_DP_SRNG_RX_REL,
16668 			  "reo_release_ring");
16669 
16670 	/* Rx exception ring */
16671 	if (dp_srng_init(soc, &soc->reo_exception_ring,
16672 			 REO_EXCEPTION, 0, MAX_REO_DEST_RINGS)) {
16673 		dp_init_err("%pK: dp_srng_init failed - reo_exception", soc);
16674 		goto fail1;
16675 	}
16676 
16677 	wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned,
16678 			  soc->reo_exception_ring.alloc_size,
16679 			  soc->ctrl_psoc,
16680 			  WLAN_MD_DP_SRNG_REO_EXCEPTION,
16681 			  "reo_exception_ring");
16682 
16683 	/* REO command and status rings */
16684 	if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) {
16685 		dp_init_err("%pK: dp_srng_init failed for reo_cmd_ring", soc);
16686 		goto fail1;
16687 	}
16688 
16689 	wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned,
16690 			  soc->reo_cmd_ring.alloc_size,
16691 			  soc->ctrl_psoc,
16692 			  WLAN_MD_DP_SRNG_REO_CMD,
16693 			  "reo_cmd_ring");
16694 
16695 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
16696 	TAILQ_INIT(&soc->rx.reo_cmd_list);
16697 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
16698 
16699 	if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) {
16700 		dp_init_err("%pK: dp_srng_init failed for reo_status_ring", soc);
16701 		goto fail1;
16702 	}
16703 
16704 	wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned,
16705 			  soc->reo_status_ring.alloc_size,
16706 			  soc->ctrl_psoc,
16707 			  WLAN_MD_DP_SRNG_REO_STATUS,
16708 			  "reo_status_ring");
16709 
16710 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
16711 		if (dp_init_tx_ring_pair_by_index(soc, i))
16712 			goto fail1;
16713 	}
16714 
16715 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16716 		if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
16717 			goto fail1;
16718 
16719 		if (dp_ipa_init_alt_tx_ring(soc))
16720 			goto fail1;
16721 	}
16722 
16723 	dp_create_ext_stats_event(soc);
16724 
16725 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
16726 		/* Initialize REO destination ring */
16727 		if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) {
16728 			dp_init_err("%pK: dp_srng_init failed for reo_dest_ringn", soc);
16729 			goto fail1;
16730 		}
16731 
16732 		wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned,
16733 				  soc->reo_dest_ring[i].alloc_size,
16734 				  soc->ctrl_psoc,
16735 				  WLAN_MD_DP_SRNG_REO_DEST,
16736 				  "reo_dest_ring");
16737 	}
16738 
16739 	if (soc->arch_ops.txrx_soc_srng_init) {
16740 		if (soc->arch_ops.txrx_soc_srng_init(soc)) {
16741 			dp_init_err("%pK: dp_srng_init failed for arch rings",
16742 				    soc);
16743 			goto fail1;
16744 		}
16745 	}
16746 
16747 	return QDF_STATUS_SUCCESS;
16748 fail1:
16749 	/*
16750 	 * Cleanup will be done as part of soc_detach, which will
16751 	 * be called on pdev attach failure
16752 	 */
16753 	dp_soc_srng_deinit(soc);
16754 	return QDF_STATUS_E_FAILURE;
16755 }
16756 
16757 /**
16758  * dp_soc_srng_free() - free soc level srng rings
16759  * @soc: Datapath soc handle
16760  *
16761  */
16762 static void dp_soc_srng_free(struct dp_soc *soc)
16763 {
16764 	uint32_t i;
16765 
16766 	if (soc->arch_ops.txrx_soc_srng_free)
16767 		soc->arch_ops.txrx_soc_srng_free(soc);
16768 
16769 	dp_srng_free(soc, &soc->wbm_desc_rel_ring);
16770 
16771 	for (i = 0; i < soc->num_tcl_data_rings; i++)
16772 		dp_free_tx_ring_pair_by_index(soc, i);
16773 
16774 	/* Free IPA rings for TCL_TX and TCL_COMPL ring */
16775 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16776 		dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
16777 		dp_ipa_free_alt_tx_ring(soc);
16778 	}
16779 
16780 	dp_soc_tcl_cmd_cred_srng_free(soc);
16781 	dp_soc_tcl_status_srng_free(soc);
16782 
16783 	for (i = 0; i < soc->num_reo_dest_rings; i++)
16784 		dp_srng_free(soc, &soc->reo_dest_ring[i]);
16785 
16786 	dp_srng_free(soc, &soc->reo_reinject_ring);
16787 	dp_srng_free(soc, &soc->rx_rel_ring);
16788 
16789 	dp_srng_free(soc, &soc->reo_exception_ring);
16790 
16791 	dp_srng_free(soc, &soc->reo_cmd_ring);
16792 	dp_srng_free(soc, &soc->reo_status_ring);
16793 }
16794 
16795 /**
16796  * dp_soc_srng_alloc() - Allocate memory for soc level srng rings
16797  * @soc: Datapath soc handle
16798  *
16799  * Return: QDF_STATUS_SUCCESS on success
16800  *	   QDF_STATUS_E_NOMEM on failure
16801  */
16802 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc)
16803 {
16804 	uint32_t entries;
16805 	uint32_t i;
16806 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16807 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
16808 	uint32_t reo_dst_ring_size;
16809 
16810 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16811 
16812 	/* sw2wbm link descriptor release ring */
16813 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
16814 	if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE,
16815 			  entries, 0)) {
16816 		dp_init_err("%pK: dp_srng_alloc failed for wbm_desc_rel_ring", soc);
16817 		goto fail1;
16818 	}
16819 
16820 	/* TCL command and status rings */
16821 	if (dp_soc_tcl_cmd_cred_srng_alloc(soc)) {
16822 		dp_init_err("%pK: dp_srng_alloc failed for tcl_cmd_ring", soc);
16823 		goto fail1;
16824 	}
16825 
16826 	if (dp_soc_tcl_status_srng_alloc(soc)) {
16827 		dp_init_err("%pK: dp_srng_alloc failed for tcl_status_ring", soc);
16828 		goto fail1;
16829 	}
16830 
16831 	/* REO reinjection ring */
16832 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
16833 	if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT,
16834 			  entries, 0)) {
16835 		dp_init_err("%pK: dp_srng_alloc failed for reo_reinject_ring", soc);
16836 		goto fail1;
16837 	}
16838 
16839 	/* Rx release ring */
16840 	entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx);
16841 	if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
16842 			  entries, 0)) {
16843 		dp_init_err("%pK: dp_srng_alloc failed for rx_rel_ring", soc);
16844 		goto fail1;
16845 	}
16846 
16847 	/* Rx exception ring */
16848 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
16849 	if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION,
16850 			  entries, 0)) {
16851 		dp_init_err("%pK: dp_srng_alloc failed - reo_exception", soc);
16852 		goto fail1;
16853 	}
16854 
16855 	/* REO command and status rings */
16856 	entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx);
16857 	if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) {
16858 		dp_init_err("%pK: dp_srng_alloc failed for reo_cmd_ring", soc);
16859 		goto fail1;
16860 	}
16861 
16862 	entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx);
16863 	if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS,
16864 			  entries, 0)) {
16865 		dp_init_err("%pK: dp_srng_alloc failed for reo_status_ring", soc);
16866 		goto fail1;
16867 	}
16868 
16869 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx);
16870 
16871 	/* Disable cached desc if NSS offload is enabled */
16872 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
16873 		cached = 0;
16874 
16875 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
16876 		if (dp_alloc_tx_ring_pair_by_index(soc, i))
16877 			goto fail1;
16878 	}
16879 
16880 	/* IPA rings for TCL_TX and TX_COMP will be allocated here */
16881 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16882 		if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
16883 			goto fail1;
16884 
16885 		if (dp_ipa_alloc_alt_tx_ring(soc))
16886 			goto fail1;
16887 	}
16888 
16889 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
16890 		/* Setup REO destination ring */
16891 		if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST,
16892 				  reo_dst_ring_size, cached)) {
16893 			dp_init_err("%pK: dp_srng_alloc failed for reo_dest_ring", soc);
16894 			goto fail1;
16895 		}
16896 	}
16897 
16898 	if (soc->arch_ops.txrx_soc_srng_alloc) {
16899 		if (soc->arch_ops.txrx_soc_srng_alloc(soc)) {
16900 			dp_init_err("%pK: dp_srng_alloc failed for arch rings",
16901 				    soc);
16902 			goto fail1;
16903 		}
16904 	}
16905 
16906 	return QDF_STATUS_SUCCESS;
16907 
16908 fail1:
16909 	dp_soc_srng_free(soc);
16910 	return QDF_STATUS_E_NOMEM;
16911 }
16912 
16913 static void dp_soc_cfg_dump(struct dp_soc *soc, uint32_t target_type)
16914 {
16915 	dp_init_info("DP soc Dump for Target = %d", target_type);
16916 	dp_init_info("ast_override_support = %d, da_war_enabled = %d,",
16917 		     soc->ast_override_support, soc->da_war_enabled);
16918 
16919 	wlan_cfg_dp_soc_ctx_dump(soc->wlan_cfg_ctx);
16920 }
16921 
16922 /**
16923  * dp_soc_cfg_init() - initialize target specific configuration
16924  *		       during dp_soc_init
16925  * @soc: dp soc handle
16926  */
16927 static void dp_soc_cfg_init(struct dp_soc *soc)
16928 {
16929 	uint32_t target_type;
16930 
16931 	target_type = hal_get_target_type(soc->hal_soc);
16932 	switch (target_type) {
16933 	case TARGET_TYPE_QCA6290:
16934 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
16935 					       REO_DST_RING_SIZE_QCA6290);
16936 		soc->ast_override_support = 1;
16937 		soc->da_war_enabled = false;
16938 		break;
16939 	case TARGET_TYPE_QCA6390:
16940 	case TARGET_TYPE_QCA6490:
16941 	case TARGET_TYPE_QCA6750:
16942 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
16943 					       REO_DST_RING_SIZE_QCA6290);
16944 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
16945 		soc->ast_override_support = 1;
16946 		if (soc->cdp_soc.ol_ops->get_con_mode &&
16947 		    soc->cdp_soc.ol_ops->get_con_mode() ==
16948 		    QDF_GLOBAL_MONITOR_MODE) {
16949 			int int_ctx;
16950 
16951 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
16952 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
16953 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
16954 			}
16955 		}
16956 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
16957 		break;
16958 	case TARGET_TYPE_KIWI:
16959 	case TARGET_TYPE_MANGO:
16960 	case TARGET_TYPE_PEACH:
16961 		soc->ast_override_support = 1;
16962 		soc->per_tid_basize_max_tid = 8;
16963 
16964 		if (soc->cdp_soc.ol_ops->get_con_mode &&
16965 		    soc->cdp_soc.ol_ops->get_con_mode() ==
16966 		    QDF_GLOBAL_MONITOR_MODE) {
16967 			int int_ctx;
16968 
16969 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS;
16970 			     int_ctx++) {
16971 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
16972 				if (dp_is_monitor_mode_using_poll(soc))
16973 					soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
16974 			}
16975 		}
16976 
16977 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
16978 		soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev = 1;
16979 		break;
16980 	case TARGET_TYPE_QCA8074:
16981 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
16982 		soc->da_war_enabled = true;
16983 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
16984 		break;
16985 	case TARGET_TYPE_QCA8074V2:
16986 	case TARGET_TYPE_QCA6018:
16987 	case TARGET_TYPE_QCA9574:
16988 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
16989 		soc->ast_override_support = 1;
16990 		soc->per_tid_basize_max_tid = 8;
16991 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
16992 		soc->da_war_enabled = false;
16993 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
16994 		break;
16995 	case TARGET_TYPE_QCN9000:
16996 		soc->ast_override_support = 1;
16997 		soc->da_war_enabled = false;
16998 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
16999 		soc->per_tid_basize_max_tid = 8;
17000 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
17001 		soc->lmac_polled_mode = 0;
17002 		soc->wbm_release_desc_rx_sg_support = 1;
17003 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
17004 		break;
17005 	case TARGET_TYPE_QCA5018:
17006 	case TARGET_TYPE_QCN6122:
17007 	case TARGET_TYPE_QCN9160:
17008 		soc->ast_override_support = 1;
17009 		soc->da_war_enabled = false;
17010 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
17011 		soc->per_tid_basize_max_tid = 8;
17012 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS_11AX;
17013 		soc->disable_mac1_intr = 1;
17014 		soc->disable_mac2_intr = 1;
17015 		soc->wbm_release_desc_rx_sg_support = 1;
17016 		break;
17017 	case TARGET_TYPE_QCN9224:
17018 		soc->ast_override_support = 1;
17019 		soc->da_war_enabled = false;
17020 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
17021 		soc->per_tid_basize_max_tid = 8;
17022 		soc->wbm_release_desc_rx_sg_support = 1;
17023 		soc->rxdma2sw_rings_not_supported = 1;
17024 		soc->wbm_sg_last_msdu_war = 1;
17025 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
17026 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
17027 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
17028 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
17029 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
17030 						  CFG_DP_HOST_AST_DB_ENABLE);
17031 		soc->features.wds_ext_ast_override_enable = true;
17032 		break;
17033 	case TARGET_TYPE_QCA5332:
17034 		soc->ast_override_support = 1;
17035 		soc->da_war_enabled = false;
17036 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
17037 		soc->per_tid_basize_max_tid = 8;
17038 		soc->wbm_release_desc_rx_sg_support = 1;
17039 		soc->rxdma2sw_rings_not_supported = 1;
17040 		soc->wbm_sg_last_msdu_war = 1;
17041 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
17042 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
17043 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS_5332;
17044 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
17045 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
17046 						  CFG_DP_HOST_AST_DB_ENABLE);
17047 		soc->features.wds_ext_ast_override_enable = true;
17048 		break;
17049 	default:
17050 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
17051 		qdf_assert_always(0);
17052 		break;
17053 	}
17054 	dp_soc_cfg_dump(soc, target_type);
17055 }
17056 
17057 /**
17058  * dp_soc_cfg_attach() - set target specific configuration in
17059  *			 dp soc cfg.
17060  * @soc: dp soc handle
17061  */
17062 static void dp_soc_cfg_attach(struct dp_soc *soc)
17063 {
17064 	int target_type;
17065 	int nss_cfg = 0;
17066 
17067 	target_type = hal_get_target_type(soc->hal_soc);
17068 	switch (target_type) {
17069 	case TARGET_TYPE_QCA6290:
17070 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
17071 					       REO_DST_RING_SIZE_QCA6290);
17072 		break;
17073 	case TARGET_TYPE_QCA6390:
17074 	case TARGET_TYPE_QCA6490:
17075 	case TARGET_TYPE_QCA6750:
17076 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
17077 					       REO_DST_RING_SIZE_QCA6290);
17078 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
17079 		break;
17080 	case TARGET_TYPE_KIWI:
17081 	case TARGET_TYPE_MANGO:
17082 	case TARGET_TYPE_PEACH:
17083 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
17084 		break;
17085 	case TARGET_TYPE_QCA8074:
17086 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
17087 		break;
17088 	case TARGET_TYPE_QCA8074V2:
17089 	case TARGET_TYPE_QCA6018:
17090 	case TARGET_TYPE_QCA9574:
17091 	case TARGET_TYPE_QCN6122:
17092 	case TARGET_TYPE_QCN9160:
17093 	case TARGET_TYPE_QCA5018:
17094 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
17095 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
17096 		break;
17097 	case TARGET_TYPE_QCN9000:
17098 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
17099 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
17100 		break;
17101 	case TARGET_TYPE_QCN9224:
17102 	case TARGET_TYPE_QCA5332:
17103 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
17104 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
17105 		break;
17106 	default:
17107 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
17108 		qdf_assert_always(0);
17109 		break;
17110 	}
17111 
17112 	if (soc->cdp_soc.ol_ops->get_soc_nss_cfg)
17113 		nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc);
17114 
17115 	wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg);
17116 
17117 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
17118 		wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0);
17119 		wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0);
17120 		wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0);
17121 		wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0);
17122 		soc->init_tcl_cmd_cred_ring = false;
17123 		soc->num_tcl_data_rings =
17124 			wlan_cfg_num_nss_tcl_data_rings(soc->wlan_cfg_ctx);
17125 		soc->num_reo_dest_rings =
17126 			wlan_cfg_num_nss_reo_dest_rings(soc->wlan_cfg_ctx);
17127 
17128 	} else {
17129 		soc->init_tcl_cmd_cred_ring = true;
17130 		soc->num_tx_comp_rings =
17131 			wlan_cfg_num_tx_comp_rings(soc->wlan_cfg_ctx);
17132 		soc->num_tcl_data_rings =
17133 			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
17134 		soc->num_reo_dest_rings =
17135 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
17136 	}
17137 
17138 	soc->arch_ops.soc_cfg_attach(soc);
17139 }
17140 
17141 static inline  void dp_pdev_set_default_reo(struct dp_pdev *pdev)
17142 {
17143 	struct dp_soc *soc = pdev->soc;
17144 
17145 	switch (pdev->pdev_id) {
17146 	case 0:
17147 		pdev->reo_dest =
17148 			wlan_cfg_radio0_default_reo_get(soc->wlan_cfg_ctx);
17149 		break;
17150 
17151 	case 1:
17152 		pdev->reo_dest =
17153 			wlan_cfg_radio1_default_reo_get(soc->wlan_cfg_ctx);
17154 		break;
17155 
17156 	case 2:
17157 		pdev->reo_dest =
17158 			wlan_cfg_radio2_default_reo_get(soc->wlan_cfg_ctx);
17159 		break;
17160 
17161 	default:
17162 		dp_init_err("%pK: Invalid pdev_id %d for reo selection",
17163 			    soc, pdev->pdev_id);
17164 		break;
17165 	}
17166 }
17167 
17168 static QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc,
17169 				      HTC_HANDLE htc_handle,
17170 				      qdf_device_t qdf_osdev,
17171 				      uint8_t pdev_id)
17172 {
17173 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
17174 	int nss_cfg;
17175 	void *sojourn_buf;
17176 
17177 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
17178 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
17179 
17180 	soc_cfg_ctx = soc->wlan_cfg_ctx;
17181 	pdev->soc = soc;
17182 	pdev->pdev_id = pdev_id;
17183 
17184 	/*
17185 	 * Variable to prevent double pdev deinitialization during
17186 	 * radio detach execution .i.e. in the absence of any vdev.
17187 	 */
17188 	pdev->pdev_deinit = 0;
17189 
17190 	if (dp_wdi_event_attach(pdev)) {
17191 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
17192 			  "dp_wdi_evet_attach failed");
17193 		goto fail0;
17194 	}
17195 
17196 	if (dp_pdev_srng_init(pdev)) {
17197 		dp_init_err("%pK: Failed to initialize pdev srng rings", soc);
17198 		goto fail1;
17199 	}
17200 
17201 	/* Initialize descriptors in TCL Rings used by IPA */
17202 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
17203 		hal_tx_init_data_ring(soc->hal_soc,
17204 				      soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng);
17205 		dp_ipa_hal_tx_init_alt_data_ring(soc);
17206 	}
17207 
17208 	/*
17209 	 * Initialize command/credit ring descriptor
17210 	 * Command/CREDIT ring also used for sending DATA cmds
17211 	 */
17212 	dp_tx_init_cmd_credit_ring(soc);
17213 
17214 	dp_tx_pdev_init(pdev);
17215 
17216 	/*
17217 	 * set nss pdev config based on soc config
17218 	 */
17219 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
17220 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
17221 					 (nss_cfg & (1 << pdev_id)));
17222 	pdev->target_pdev_id =
17223 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
17224 
17225 	if (soc->preferred_hw_mode == WMI_HOST_HW_MODE_2G_PHYB &&
17226 	    pdev->lmac_id == PHYB_2G_LMAC_ID) {
17227 		pdev->target_pdev_id = PHYB_2G_TARGET_PDEV_ID;
17228 	}
17229 
17230 	/* Reset the cpu ring map if radio is NSS offloaded */
17231 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
17232 		dp_soc_reset_cpu_ring_map(soc);
17233 		dp_soc_reset_intr_mask(soc);
17234 	}
17235 
17236 	/* Reset the cpu ring map if radio is NSS offloaded */
17237 	dp_soc_reset_ipa_vlan_intr_mask(soc);
17238 
17239 	TAILQ_INIT(&pdev->vdev_list);
17240 	qdf_spinlock_create(&pdev->vdev_list_lock);
17241 	pdev->vdev_count = 0;
17242 	pdev->is_lro_hash_configured = 0;
17243 
17244 	qdf_spinlock_create(&pdev->tx_mutex);
17245 	pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_INVALID_LMAC_ID;
17246 	pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_INVALID_LMAC_ID;
17247 	pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_INVALID_LMAC_ID;
17248 
17249 	DP_STATS_INIT(pdev);
17250 
17251 	dp_local_peer_id_pool_init(pdev);
17252 
17253 	dp_dscp_tid_map_setup(pdev);
17254 	dp_pcp_tid_map_setup(pdev);
17255 
17256 	/* set the reo destination during initialization */
17257 	dp_pdev_set_default_reo(pdev);
17258 
17259 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
17260 
17261 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
17262 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
17263 			      TRUE);
17264 
17265 	if (!pdev->sojourn_buf) {
17266 		dp_init_err("%pK: Failed to allocate sojourn buf", soc);
17267 		goto fail2;
17268 	}
17269 	sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
17270 	qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
17271 
17272 	qdf_event_create(&pdev->fw_peer_stats_event);
17273 	qdf_event_create(&pdev->fw_stats_event);
17274 	qdf_event_create(&pdev->fw_obss_stats_event);
17275 
17276 	pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
17277 	pdev->num_tx_spl_allowed =
17278 		wlan_cfg_get_num_tx_spl_desc(soc->wlan_cfg_ctx);
17279 	pdev->num_reg_tx_allowed =
17280 		pdev->num_tx_allowed - pdev->num_tx_spl_allowed;
17281 	if (dp_rxdma_ring_setup(soc, pdev)) {
17282 		dp_init_err("%pK: RXDMA ring config failed", soc);
17283 		goto fail3;
17284 	}
17285 
17286 	if (dp_init_ipa_rx_refill_buf_ring(soc, pdev))
17287 		goto fail3;
17288 
17289 	if (dp_ipa_ring_resource_setup(soc, pdev))
17290 		goto fail4;
17291 
17292 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
17293 		dp_init_err("%pK: dp_ipa_uc_attach failed", soc);
17294 		goto fail4;
17295 	}
17296 
17297 	if (dp_pdev_bkp_stats_attach(pdev) != QDF_STATUS_SUCCESS) {
17298 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
17299 			  FL("dp_pdev_bkp_stats_attach failed"));
17300 		goto fail5;
17301 	}
17302 
17303 	if (dp_monitor_pdev_init(pdev)) {
17304 		dp_init_err("%pK: dp_monitor_pdev_init failed\n", soc);
17305 		goto fail6;
17306 	}
17307 
17308 	/* initialize sw rx descriptors */
17309 	dp_rx_pdev_desc_pool_init(pdev);
17310 	/* allocate buffers and replenish the RxDMA ring */
17311 	dp_rx_pdev_buffers_alloc(pdev);
17312 
17313 	dp_init_tso_stats(pdev);
17314 
17315 	pdev->rx_fast_flag = false;
17316 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
17317 		qdf_dma_mem_stats_read(),
17318 		qdf_heap_mem_stats_read(),
17319 		qdf_skb_total_mem_stats_read());
17320 
17321 	return QDF_STATUS_SUCCESS;
17322 fail6:
17323 	dp_pdev_bkp_stats_detach(pdev);
17324 fail5:
17325 	dp_ipa_uc_detach(soc, pdev);
17326 fail4:
17327 	dp_deinit_ipa_rx_refill_buf_ring(soc, pdev);
17328 fail3:
17329 	dp_rxdma_ring_cleanup(soc, pdev);
17330 	qdf_nbuf_free(pdev->sojourn_buf);
17331 fail2:
17332 	qdf_spinlock_destroy(&pdev->tx_mutex);
17333 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
17334 	dp_pdev_srng_deinit(pdev);
17335 fail1:
17336 	dp_wdi_event_detach(pdev);
17337 fail0:
17338 	return QDF_STATUS_E_FAILURE;
17339 }
17340 
17341 /**
17342  * dp_pdev_init_wifi3() - Init txrx pdev
17343  * @txrx_soc:
17344  * @htc_handle: HTC handle for host-target interface
17345  * @qdf_osdev: QDF OS device
17346  * @pdev_id: pdev Id
17347  *
17348  * Return: QDF_STATUS
17349  */
17350 static QDF_STATUS dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
17351 				     HTC_HANDLE htc_handle,
17352 				     qdf_device_t qdf_osdev,
17353 				     uint8_t pdev_id)
17354 {
17355 	return dp_pdev_init(txrx_soc, htc_handle, qdf_osdev, pdev_id);
17356 }
17357 
17358 #ifdef FEATURE_DIRECT_LINK
17359 struct dp_srng *dp_setup_direct_link_refill_ring(struct cdp_soc_t *soc_hdl,
17360 						 uint8_t pdev_id)
17361 {
17362 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
17363 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
17364 
17365 	if (!pdev) {
17366 		dp_err("DP pdev is NULL");
17367 		return NULL;
17368 	}
17369 
17370 	if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring4,
17371 			  RXDMA_BUF, DIRECT_LINK_REFILL_RING_ENTRIES, false)) {
17372 		dp_err("SRNG alloc failed for rx_refill_buf_ring4");
17373 		return NULL;
17374 	}
17375 
17376 	if (dp_srng_init(soc, &pdev->rx_refill_buf_ring4,
17377 			 RXDMA_BUF, DIRECT_LINK_REFILL_RING_IDX, 0)) {
17378 		dp_err("SRNG init failed for rx_refill_buf_ring4");
17379 		dp_srng_free(soc, &pdev->rx_refill_buf_ring4);
17380 		return NULL;
17381 	}
17382 
17383 	if (htt_srng_setup(soc->htt_handle, pdev_id,
17384 			   pdev->rx_refill_buf_ring4.hal_srng, RXDMA_BUF)) {
17385 		dp_srng_deinit(soc, &pdev->rx_refill_buf_ring4, RXDMA_BUF,
17386 			       DIRECT_LINK_REFILL_RING_IDX);
17387 		dp_srng_free(soc, &pdev->rx_refill_buf_ring4);
17388 		return NULL;
17389 	}
17390 
17391 	return &pdev->rx_refill_buf_ring4;
17392 }
17393 
17394 void dp_destroy_direct_link_refill_ring(struct cdp_soc_t *soc_hdl,
17395 					uint8_t pdev_id)
17396 {
17397 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
17398 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
17399 
17400 	if (!pdev) {
17401 		dp_err("DP pdev is NULL");
17402 		return;
17403 	}
17404 
17405 	dp_srng_deinit(soc, &pdev->rx_refill_buf_ring4, RXDMA_BUF, 0);
17406 	dp_srng_free(soc, &pdev->rx_refill_buf_ring4);
17407 }
17408 #endif
17409