xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision 70a19e16789e308182f63b15c75decec7bf0b342)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <wlan_ipa_obj_mgmt_api.h>
21 #include <qdf_types.h>
22 #include <qdf_lock.h>
23 #include <qdf_net_types.h>
24 #include <qdf_lro.h>
25 #include <qdf_module.h>
26 #include <hal_hw_headers.h>
27 #include <hal_api.h>
28 #include <hif.h>
29 #include <htt.h>
30 #include <wdi_event.h>
31 #include <queue.h>
32 #include "dp_types.h"
33 #include "dp_internal.h"
34 #include "dp_tx.h"
35 #include "dp_tx_desc.h"
36 #include "dp_rx.h"
37 #ifdef DP_RATETABLE_SUPPORT
38 #include "dp_ratetable.h"
39 #endif
40 #include <cdp_txrx_handle.h>
41 #include <wlan_cfg.h>
42 #include <wlan_utility.h>
43 #include "cdp_txrx_cmn_struct.h"
44 #include "cdp_txrx_stats_struct.h"
45 #include "cdp_txrx_cmn_reg.h"
46 #include <qdf_util.h>
47 #include "dp_peer.h"
48 #include "htt_stats.h"
49 #include "dp_htt.h"
50 #ifdef WLAN_SUPPORT_RX_FISA
51 #include <wlan_dp_fisa_rx.h>
52 #endif
53 #include "htt_ppdu_stats.h"
54 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
55 #include "cfg_ucfg_api.h"
56 #include <wlan_module_ids.h>
57 
58 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
59 #include "cdp_txrx_flow_ctrl_v2.h"
60 #else
61 
62 static inline void
63 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
64 {
65 	return;
66 }
67 #endif
68 #ifdef WIFI_MONITOR_SUPPORT
69 #include <dp_mon.h>
70 #endif
71 #include "dp_ipa.h"
72 #ifdef FEATURE_WDS
73 #include "dp_txrx_wds.h"
74 #endif
75 #ifdef WLAN_SUPPORT_MSCS
76 #include "dp_mscs.h"
77 #endif
78 #ifdef WLAN_SUPPORT_MESH_LATENCY
79 #include "dp_mesh_latency.h"
80 #endif
81 #ifdef WLAN_SUPPORT_SCS
82 #include "dp_scs.h"
83 #endif
84 #ifdef ATH_SUPPORT_IQUE
85 #include "dp_txrx_me.h"
86 #endif
87 #if defined(DP_CON_MON)
88 #ifndef REMOVE_PKT_LOG
89 #include <pktlog_ac_api.h>
90 #include <pktlog_ac.h>
91 #endif
92 #endif
93 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
94 #include <wlan_dp_swlm.h>
95 #endif
96 #ifdef CONFIG_SAWF_DEF_QUEUES
97 #include "dp_sawf.h"
98 #endif
99 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
100 #include <target_if_dp.h>
101 #endif
102 
103 #ifdef WLAN_FEATURE_STATS_EXT
104 #define INIT_RX_HW_STATS_LOCK(_soc) \
105 	qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
106 #define DEINIT_RX_HW_STATS_LOCK(_soc) \
107 	qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
108 #else
109 #define INIT_RX_HW_STATS_LOCK(_soc)  /* no op */
110 #define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
111 #endif
112 
113 #if defined(DP_PEER_EXTENDED_API) || defined(WLAN_DP_PENDING_MEM_FLUSH)
114 #define SET_PEER_REF_CNT_ONE(_peer) \
115 	qdf_atomic_set(&(_peer)->ref_cnt, 1)
116 #else
117 #define SET_PEER_REF_CNT_ONE(_peer)
118 #endif
119 
120 #ifdef WLAN_SYSFS_DP_STATS
121 /* sysfs event wait time for firmware stat request unit milliseconds */
122 #define WLAN_SYSFS_STAT_REQ_WAIT_MS 3000
123 #endif
124 
125 #ifdef QCA_DP_ENABLE_TX_COMP_RING4
126 #define TXCOMP_RING4_NUM 3
127 #else
128 #define TXCOMP_RING4_NUM WBM2SW_TXCOMP_RING4_NUM
129 #endif
130 
131 #ifdef QCA_DP_TX_FW_METADATA_V2
132 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) \
133 		HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
134 #else
135 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) \
136 		HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
137 #endif
138 
139 QDF_COMPILE_TIME_ASSERT(max_rx_rings_check,
140 			MAX_REO_DEST_RINGS == CDP_MAX_RX_RINGS);
141 
142 QDF_COMPILE_TIME_ASSERT(max_tx_rings_check,
143 			MAX_TCL_DATA_RINGS == CDP_MAX_TX_COMP_RINGS);
144 
145 #define dp_init_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_INIT, params)
146 #define dp_init_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_INIT, params)
147 #define dp_init_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_INIT, params)
148 #define dp_init_info(params...) \
149 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_INIT, ## params)
150 #define dp_init_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_INIT, params)
151 
152 #define dp_vdev_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_VDEV, params)
153 #define dp_vdev_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_VDEV, params)
154 #define dp_vdev_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_VDEV, params)
155 #define dp_vdev_info(params...) \
156 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_VDEV, ## params)
157 #define dp_vdev_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_VDEV, params)
158 
159 void dp_configure_arch_ops(struct dp_soc *soc);
160 qdf_size_t dp_get_soc_context_size(uint16_t device_id);
161 
162 /*
163  * The max size of cdp_peer_stats_param_t is limited to 16 bytes.
164  * If the buffer size is exceeding this size limit,
165  * dp_txrx_get_peer_stats is to be used instead.
166  */
167 QDF_COMPILE_TIME_ASSERT(cdp_peer_stats_param_t_max_size,
168 			(sizeof(cdp_peer_stats_param_t) <= 16));
169 
170 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
171 /*
172  * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS
173  * also should be updated accordingly
174  */
175 QDF_COMPILE_TIME_ASSERT(num_intr_grps,
176 			HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS);
177 
178 /*
179  * HIF_EVENT_HIST_MAX should always be power of 2
180  */
181 QDF_COMPILE_TIME_ASSERT(hif_event_history_size,
182 			(HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0);
183 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
184 
185 /*
186  * If WLAN_CFG_INT_NUM_CONTEXTS is changed,
187  * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated
188  */
189 QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs,
190 			WLAN_CFG_INT_NUM_CONTEXTS_MAX >=
191 			WLAN_CFG_INT_NUM_CONTEXTS);
192 
193 static QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl);
194 static QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl);
195 
196 static void dp_pdev_srng_deinit(struct dp_pdev *pdev);
197 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev);
198 static void dp_pdev_srng_free(struct dp_pdev *pdev);
199 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev);
200 
201 static void dp_soc_srng_deinit(struct dp_soc *soc);
202 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc);
203 static void dp_soc_srng_free(struct dp_soc *soc);
204 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc);
205 
206 static void dp_soc_cfg_init(struct dp_soc *soc);
207 static void dp_soc_cfg_attach(struct dp_soc *soc);
208 
209 static inline
210 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
211 				struct cdp_pdev_attach_params *params);
212 
213 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id);
214 
215 static QDF_STATUS
216 dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
217 		   HTC_HANDLE htc_handle,
218 		   qdf_device_t qdf_osdev,
219 		   uint8_t pdev_id);
220 
221 static QDF_STATUS
222 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, int force);
223 
224 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc);
225 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc);
226 
227 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
228 		  struct hif_opaque_softc *hif_handle);
229 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
230 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc,
231 				       uint8_t pdev_id,
232 				       int force);
233 static struct dp_soc *
234 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
235 	      struct cdp_soc_attach_params *params);
236 static inline QDF_STATUS dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl,
237 					      uint8_t vdev_id,
238 					      uint8_t *peer_mac_addr,
239 					      enum cdp_peer_type peer_type);
240 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
241 				       uint8_t vdev_id,
242 				       uint8_t *peer_mac, uint32_t bitmap,
243 				       enum cdp_peer_type peer_type);
244 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
245 				bool unmap_only,
246 				bool mlo_peers_only);
247 #ifdef ENABLE_VERBOSE_DEBUG
248 bool is_dp_verbose_debug_enabled;
249 #endif
250 
251 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
252 static bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
253 static void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
254 			   bool enable);
255 static inline void
256 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
257 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats);
258 static inline void
259 dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
260 #endif
261 
262 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
263 						uint8_t index);
264 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index);
265 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index);
266 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
267 						 uint8_t index);
268 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
269 					    enum hal_ring_type ring_type,
270 					    int ring_num);
271 #ifdef DP_UMAC_HW_RESET_SUPPORT
272 static QDF_STATUS dp_umac_reset_handle_pre_reset(struct dp_soc *soc);
273 static QDF_STATUS dp_umac_reset_handle_post_reset(struct dp_soc *soc);
274 static QDF_STATUS dp_umac_reset_handle_post_reset_complete(struct dp_soc *soc);
275 #endif
276 
277 #define DP_INTR_POLL_TIMER_MS	5
278 
279 #define MON_VDEV_TIMER_INIT 0x1
280 #define MON_VDEV_TIMER_RUNNING 0x2
281 
282 #define DP_MCS_LENGTH (6*MAX_MCS)
283 
284 #define DP_CURR_FW_STATS_AVAIL 19
285 #define DP_HTT_DBG_EXT_STATS_MAX 256
286 #define DP_MAX_SLEEP_TIME 100
287 #ifndef QCA_WIFI_3_0_EMU
288 #define SUSPEND_DRAIN_WAIT 500
289 #else
290 #define SUSPEND_DRAIN_WAIT 3000
291 #endif
292 
293 #ifdef IPA_OFFLOAD
294 /* Exclude IPA rings from the interrupt context */
295 #define TX_RING_MASK_VAL	0xb
296 #define RX_RING_MASK_VAL	0x7
297 #else
298 #define TX_RING_MASK_VAL	0xF
299 #define RX_RING_MASK_VAL	0xF
300 #endif
301 
302 #define STR_MAXLEN	64
303 
304 #define RNG_ERR		"SRNG setup failed for"
305 
306 /**
307  * default_dscp_tid_map - Default DSCP-TID mapping
308  *
309  * DSCP        TID
310  * 000000      0
311  * 001000      1
312  * 010000      2
313  * 011000      3
314  * 100000      4
315  * 101000      5
316  * 110000      6
317  * 111000      7
318  */
319 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
320 	0, 0, 0, 0, 0, 0, 0, 0,
321 	1, 1, 1, 1, 1, 1, 1, 1,
322 	2, 2, 2, 2, 2, 2, 2, 2,
323 	3, 3, 3, 3, 3, 3, 3, 3,
324 	4, 4, 4, 4, 4, 4, 4, 4,
325 	5, 5, 5, 5, 5, 5, 5, 5,
326 	6, 6, 6, 6, 6, 6, 6, 6,
327 	7, 7, 7, 7, 7, 7, 7, 7,
328 };
329 
330 /**
331  * default_pcp_tid_map - Default PCP-TID mapping
332  *
333  * PCP     TID
334  * 000      0
335  * 001      1
336  * 010      2
337  * 011      3
338  * 100      4
339  * 101      5
340  * 110      6
341  * 111      7
342  */
343 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
344 	0, 1, 2, 3, 4, 5, 6, 7,
345 };
346 
347 /**
348  * @brief Cpu to tx ring map
349  */
350 uint8_t
351 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
352 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
353 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
354 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
355 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
356 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
357 #ifdef WLAN_TX_PKT_CAPTURE_ENH
358 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
359 #endif
360 };
361 
362 qdf_export_symbol(dp_cpu_ring_map);
363 
364 /**
365  * @brief Select the type of statistics
366  */
367 enum dp_stats_type {
368 	STATS_FW = 0,
369 	STATS_HOST = 1,
370 	STATS_TYPE_MAX = 2,
371 };
372 
373 /**
374  * @brief General Firmware statistics options
375  *
376  */
377 enum dp_fw_stats {
378 	TXRX_FW_STATS_INVALID	= -1,
379 };
380 
381 /**
382  * dp_stats_mapping_table - Firmware and Host statistics
383  * currently supported
384  */
385 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
386 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
387 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
388 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
389 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
390 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
391 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
392 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
393 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
394 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
395 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
396 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
397 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
398 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
399 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
400 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
401 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
402 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
403 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
404 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
405 	/* Last ENUM for HTT FW STATS */
406 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
407 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
408 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
409 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
410 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
411 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
412 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
413 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
414 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
415 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
416 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
417 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
418 	{TXRX_FW_STATS_INVALID, TXRX_NAPI_STATS},
419 	{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
420 	{TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS},
421 	{TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS},
422 	{TXRX_FW_STATS_INVALID, TXRX_SOC_REO_HW_DESC_DUMP},
423 	{TXRX_FW_STATS_INVALID, TXRX_SOC_WBM_IDLE_HPTP_DUMP},
424 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_USAGE_WM_STATS},
425 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, TXRX_HOST_STATS_INVALID},
426 	{HTT_DBG_EXT_STATS_TX_SOUNDING_INFO, TXRX_HOST_STATS_INVALID}
427 };
428 
429 /* MCL specific functions */
430 #if defined(DP_CON_MON)
431 
432 #ifdef DP_CON_MON_MSI_ENABLED
433 /**
434  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
435  * @soc: pointer to dp_soc handle
436  * @intr_ctx_num: interrupt context number for which mon mask is needed
437  *
438  * For MCL, monitor mode rings are being processed in timer contexts (polled).
439  * This function is returning 0, since in interrupt mode(softirq based RX),
440  * we donot want to process monitor mode rings in a softirq.
441  *
442  * So, in case packet log is enabled for SAP/STA/P2P modes,
443  * regular interrupt processing will not process monitor mode rings. It would be
444  * done in a separate timer context.
445  *
446  * Return: 0
447  */
448 static inline uint32_t
449 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
450 {
451 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
452 }
453 #else
454 /**
455  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
456  * @soc: pointer to dp_soc handle
457  * @intr_ctx_num: interrupt context number for which mon mask is needed
458  *
459  * For MCL, monitor mode rings are being processed in timer contexts (polled).
460  * This function is returning 0, since in interrupt mode(softirq based RX),
461  * we donot want to process monitor mode rings in a softirq.
462  *
463  * So, in case packet log is enabled for SAP/STA/P2P modes,
464  * regular interrupt processing will not process monitor mode rings. It would be
465  * done in a separate timer context.
466  *
467  * Return: 0
468  */
469 static inline uint32_t
470 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
471 {
472 	return 0;
473 }
474 #endif
475 
476 #ifdef IPA_OFFLOAD
477 /**
478  * dp_get_num_rx_contexts() - get number of RX contexts
479  * @soc_hdl: cdp opaque soc handle
480  *
481  * Return: number of RX contexts
482  */
483 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
484 {
485 	int num_rx_contexts;
486 	uint32_t reo_ring_map;
487 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
488 
489 	reo_ring_map = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
490 
491 	switch (soc->arch_id) {
492 	case CDP_ARCH_TYPE_BE:
493 		/* 2 REO rings are used for IPA */
494 		reo_ring_map &=  ~(BIT(3) | BIT(7));
495 
496 		break;
497 	case CDP_ARCH_TYPE_LI:
498 		/* 1 REO ring is used for IPA */
499 		reo_ring_map &=  ~BIT(3);
500 		break;
501 	default:
502 		dp_err("unknown arch_id 0x%x", soc->arch_id);
503 		QDF_BUG(0);
504 	}
505 	/*
506 	 * qdf_get_hweight32 prefer over qdf_get_hweight8 in case map is scaled
507 	 * in future
508 	 */
509 	num_rx_contexts = qdf_get_hweight32(reo_ring_map);
510 
511 	return num_rx_contexts;
512 }
513 #else
514 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
515 {
516 	int num_rx_contexts;
517 	uint32_t reo_config;
518 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
519 
520 	reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
521 	/*
522 	 * qdf_get_hweight32 prefer over qdf_get_hweight8 in case map is scaled
523 	 * in future
524 	 */
525 	num_rx_contexts = qdf_get_hweight32(reo_config);
526 
527 	return num_rx_contexts;
528 }
529 #endif
530 
531 #else
532 
533 /**
534  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
535  * @soc: pointer to dp_soc handle
536  * @intr_ctx_num: interrupt context number for which mon mask is needed
537  *
538  * Return: mon mask value
539  */
540 static inline
541 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
542 {
543 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
544 }
545 
546 /**
547  * dp_soc_reset_mon_intr_mask() - reset mon intr mask
548  * @soc: pointer to dp_soc handle
549  *
550  * Return:
551  */
552 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc)
553 {
554 	int i;
555 
556 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
557 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
558 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
559 	}
560 }
561 
562 qdf_export_symbol(dp_soc_reset_mon_intr_mask);
563 
564 /*
565  * dp_service_lmac_rings()- timer to reap lmac rings
566  * @arg: SoC Handle
567  *
568  * Return:
569  *
570  */
571 static void dp_service_lmac_rings(void *arg)
572 {
573 	struct dp_soc *soc = (struct dp_soc *)arg;
574 	int ring = 0, i;
575 	struct dp_pdev *pdev = NULL;
576 	union dp_rx_desc_list_elem_t *desc_list = NULL;
577 	union dp_rx_desc_list_elem_t *tail = NULL;
578 
579 	/* Process LMAC interrupts */
580 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
581 		int mac_for_pdev = ring;
582 		struct dp_srng *rx_refill_buf_ring;
583 
584 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
585 		if (!pdev)
586 			continue;
587 
588 		rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev];
589 
590 		dp_monitor_process(soc, NULL, mac_for_pdev,
591 				   QCA_NAPI_BUDGET);
592 
593 		for (i = 0;
594 		     i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
595 			dp_rxdma_err_process(&soc->intr_ctx[i], soc,
596 					     mac_for_pdev,
597 					     QCA_NAPI_BUDGET);
598 
599 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF,
600 						  mac_for_pdev))
601 			dp_rx_buffers_replenish(soc, mac_for_pdev,
602 						rx_refill_buf_ring,
603 						&soc->rx_desc_buf[mac_for_pdev],
604 						0, &desc_list, &tail, false);
605 	}
606 
607 	qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
608 }
609 
610 #endif
611 
612 #ifdef FEATURE_MEC
613 void dp_peer_mec_flush_entries(struct dp_soc *soc)
614 {
615 	unsigned int index;
616 	struct dp_mec_entry *mecentry, *mecentry_next;
617 
618 	TAILQ_HEAD(, dp_mec_entry) free_list;
619 	TAILQ_INIT(&free_list);
620 
621 	if (!soc->mec_hash.mask)
622 		return;
623 
624 	if (!soc->mec_hash.bins)
625 		return;
626 
627 	if (!qdf_atomic_read(&soc->mec_cnt))
628 		return;
629 
630 	qdf_spin_lock_bh(&soc->mec_lock);
631 	for (index = 0; index <= soc->mec_hash.mask; index++) {
632 		if (!TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
633 			TAILQ_FOREACH_SAFE(mecentry, &soc->mec_hash.bins[index],
634 					   hash_list_elem, mecentry_next) {
635 			    dp_peer_mec_detach_entry(soc, mecentry, &free_list);
636 			}
637 		}
638 	}
639 	qdf_spin_unlock_bh(&soc->mec_lock);
640 
641 	dp_peer_mec_free_list(soc, &free_list);
642 }
643 
644 /**
645  * dp_print_mec_entries() - Dump MEC entries in table
646  * @soc: Datapath soc handle
647  *
648  * Return: none
649  */
650 static void dp_print_mec_stats(struct dp_soc *soc)
651 {
652 	int i;
653 	uint32_t index;
654 	struct dp_mec_entry *mecentry = NULL, *mec_list;
655 	uint32_t num_entries = 0;
656 
657 	DP_PRINT_STATS("MEC Stats:");
658 	DP_PRINT_STATS("   Entries Added   = %d", soc->stats.mec.added);
659 	DP_PRINT_STATS("   Entries Deleted = %d", soc->stats.mec.deleted);
660 
661 	if (!qdf_atomic_read(&soc->mec_cnt))
662 		return;
663 
664 	mec_list = qdf_mem_malloc(sizeof(*mecentry) * DP_PEER_MAX_MEC_ENTRY);
665 	if (!mec_list) {
666 		dp_peer_warn("%pK: failed to allocate mec_list", soc);
667 		return;
668 	}
669 
670 	DP_PRINT_STATS("MEC Table:");
671 	for (index = 0; index <= soc->mec_hash.mask; index++) {
672 		qdf_spin_lock_bh(&soc->mec_lock);
673 		if (TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
674 			qdf_spin_unlock_bh(&soc->mec_lock);
675 			continue;
676 		}
677 
678 		TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index],
679 			      hash_list_elem) {
680 			qdf_mem_copy(&mec_list[num_entries], mecentry,
681 				     sizeof(*mecentry));
682 			num_entries++;
683 		}
684 		qdf_spin_unlock_bh(&soc->mec_lock);
685 	}
686 
687 	if (!num_entries) {
688 		qdf_mem_free(mec_list);
689 		return;
690 	}
691 
692 	for (i = 0; i < num_entries; i++) {
693 		DP_PRINT_STATS("%6d mac_addr = " QDF_MAC_ADDR_FMT
694 			       " is_active = %d pdev_id = %d vdev_id = %d",
695 			       i,
696 			       QDF_MAC_ADDR_REF(mec_list[i].mac_addr.raw),
697 			       mec_list[i].is_active,
698 			       mec_list[i].pdev_id,
699 			       mec_list[i].vdev_id);
700 	}
701 	qdf_mem_free(mec_list);
702 }
703 #else
704 static void dp_print_mec_stats(struct dp_soc *soc)
705 {
706 }
707 #endif
708 
709 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
710 				 uint8_t vdev_id,
711 				 uint8_t *peer_mac,
712 				 uint8_t *mac_addr,
713 				 enum cdp_txrx_ast_entry_type type,
714 				 uint32_t flags)
715 {
716 	int ret = -1;
717 	QDF_STATUS status = QDF_STATUS_SUCCESS;
718 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
719 						       peer_mac, 0, vdev_id,
720 						       DP_MOD_ID_CDP);
721 
722 	if (!peer) {
723 		dp_peer_debug("Peer is NULL!");
724 		return ret;
725 	}
726 
727 	status = dp_peer_add_ast((struct dp_soc *)soc_hdl,
728 				 peer,
729 				 mac_addr,
730 				 type,
731 				 flags);
732 	if ((status == QDF_STATUS_SUCCESS) ||
733 	    (status == QDF_STATUS_E_ALREADY) ||
734 	    (status == QDF_STATUS_E_AGAIN))
735 		ret = 0;
736 
737 	dp_hmwds_ast_add_notify(peer, mac_addr,
738 				type, status, false);
739 
740 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
741 
742 	return ret;
743 }
744 
745 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
746 						uint8_t vdev_id,
747 						uint8_t *peer_mac,
748 						uint8_t *wds_macaddr,
749 						uint32_t flags)
750 {
751 	int status = -1;
752 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
753 	struct dp_ast_entry  *ast_entry = NULL;
754 	struct dp_peer *peer;
755 
756 	if (soc->ast_offload_support)
757 		return status;
758 
759 	peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
760 				      peer_mac, 0, vdev_id,
761 				      DP_MOD_ID_CDP);
762 
763 	if (!peer) {
764 		dp_peer_debug("Peer is NULL!");
765 		return status;
766 	}
767 
768 	qdf_spin_lock_bh(&soc->ast_lock);
769 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
770 						    peer->vdev->pdev->pdev_id);
771 
772 	if (ast_entry) {
773 		status = dp_peer_update_ast(soc,
774 					    peer,
775 					    ast_entry, flags);
776 	}
777 	qdf_spin_unlock_bh(&soc->ast_lock);
778 
779 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
780 
781 	return status;
782 }
783 
784 /*
785  * dp_peer_reset_ast_entries() - Deletes all HMWDS entries for a peer
786  * @soc_handle:		Datapath SOC handle
787  * @peer:		DP peer
788  * @arg:		callback argument
789  *
790  * Return: None
791  */
792 static void
793 dp_peer_reset_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
794 {
795 	struct dp_ast_entry *ast_entry = NULL;
796 	struct dp_ast_entry *tmp_ast_entry;
797 
798 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
799 		if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
800 		    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
801 			dp_peer_del_ast(soc, ast_entry);
802 	}
803 }
804 
805 /*
806  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
807  * @soc_handle:		Datapath SOC handle
808  * @wds_macaddr:	WDS entry MAC Address
809  * @peer_macaddr:	WDS entry MAC Address
810  * @vdev_id:		id of vdev handle
811  * Return: QDF_STATUS
812  */
813 static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
814 					 uint8_t *wds_macaddr,
815 					 uint8_t *peer_mac_addr,
816 					 uint8_t vdev_id)
817 {
818 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
819 	struct dp_ast_entry *ast_entry = NULL;
820 	struct dp_peer *peer;
821 	struct dp_pdev *pdev;
822 	struct dp_vdev *vdev;
823 
824 	if (soc->ast_offload_support)
825 		return QDF_STATUS_E_FAILURE;
826 
827 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
828 
829 	if (!vdev)
830 		return QDF_STATUS_E_FAILURE;
831 
832 	pdev = vdev->pdev;
833 
834 	if (peer_mac_addr) {
835 		peer = dp_peer_find_hash_find(soc, peer_mac_addr,
836 					      0, vdev->vdev_id,
837 					      DP_MOD_ID_CDP);
838 		if (!peer) {
839 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
840 			return QDF_STATUS_E_FAILURE;
841 		}
842 
843 		qdf_spin_lock_bh(&soc->ast_lock);
844 		dp_peer_reset_ast_entries(soc, peer, NULL);
845 		qdf_spin_unlock_bh(&soc->ast_lock);
846 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
847 	} else if (wds_macaddr) {
848 		qdf_spin_lock_bh(&soc->ast_lock);
849 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
850 							    pdev->pdev_id);
851 
852 		if (ast_entry) {
853 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
854 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
855 				dp_peer_del_ast(soc, ast_entry);
856 		}
857 		qdf_spin_unlock_bh(&soc->ast_lock);
858 	}
859 
860 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
861 	return QDF_STATUS_SUCCESS;
862 }
863 
864 /*
865  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
866  * @soc:		Datapath SOC handle
867  * @vdev_id:		id of vdev object
868  *
869  * Return: QDF_STATUS
870  */
871 static QDF_STATUS
872 dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
873 			     uint8_t vdev_id)
874 {
875 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
876 
877 	if (soc->ast_offload_support)
878 		return QDF_STATUS_SUCCESS;
879 
880 	qdf_spin_lock_bh(&soc->ast_lock);
881 
882 	dp_soc_iterate_peer(soc, dp_peer_reset_ast_entries, NULL,
883 			    DP_MOD_ID_CDP);
884 	qdf_spin_unlock_bh(&soc->ast_lock);
885 
886 	return QDF_STATUS_SUCCESS;
887 }
888 
889 /*
890  * dp_peer_flush_ast_entries() - Delete all wds and hmwds ast entries of a peer
891  * @soc:		Datapath SOC
892  * @peer:		Datapath peer
893  * @arg:		arg to callback
894  *
895  * Return: None
896  */
897 static void
898 dp_peer_flush_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
899 {
900 	struct dp_ast_entry *ase = NULL;
901 	struct dp_ast_entry *temp_ase;
902 
903 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
904 		if ((ase->type ==
905 			CDP_TXRX_AST_TYPE_STATIC) ||
906 			(ase->type ==
907 			 CDP_TXRX_AST_TYPE_SELF) ||
908 			(ase->type ==
909 			 CDP_TXRX_AST_TYPE_STA_BSS))
910 			continue;
911 		dp_peer_del_ast(soc, ase);
912 	}
913 }
914 
915 /*
916  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
917  * @soc:		Datapath SOC handle
918  *
919  * Return: None
920  */
921 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
922 {
923 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
924 
925 	qdf_spin_lock_bh(&soc->ast_lock);
926 
927 	dp_soc_iterate_peer(soc, dp_peer_flush_ast_entries, NULL,
928 			    DP_MOD_ID_CDP);
929 
930 	qdf_spin_unlock_bh(&soc->ast_lock);
931 	dp_peer_mec_flush_entries(soc);
932 }
933 
934 #if defined(IPA_WDS_EASYMESH_FEATURE) && defined(FEATURE_AST)
935 /*
936  * dp_peer_send_wds_disconnect() - Send Disconnect event to IPA for each peer
937  * @soc: Datapath SOC
938  * @peer: Datapath peer
939  *
940  * Return: None
941  */
942 static void
943 dp_peer_send_wds_disconnect(struct dp_soc *soc, struct dp_peer *peer)
944 {
945 	struct dp_ast_entry *ase = NULL;
946 	struct dp_ast_entry *temp_ase;
947 
948 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
949 		if (ase->type == CDP_TXRX_AST_TYPE_WDS) {
950 			soc->cdp_soc.ol_ops->peer_send_wds_disconnect(soc->ctrl_psoc,
951 								      ase->mac_addr.raw,
952 								      ase->vdev_id);
953 		}
954 	}
955 }
956 #elif defined(FEATURE_AST)
957 static void
958 dp_peer_send_wds_disconnect(struct dp_soc *soc, struct dp_peer *peer)
959 {
960 }
961 #endif
962 
963 /**
964  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
965  *                                       and return ast entry information
966  *                                       of first ast entry found in the
967  *                                       table with given mac address
968  *
969  * @soc : data path soc handle
970  * @ast_mac_addr : AST entry mac address
971  * @ast_entry_info : ast entry information
972  *
973  * return : true if ast entry found with ast_mac_addr
974  *          false if ast entry not found
975  */
976 static bool dp_peer_get_ast_info_by_soc_wifi3
977 	(struct cdp_soc_t *soc_hdl,
978 	 uint8_t *ast_mac_addr,
979 	 struct cdp_ast_entry_info *ast_entry_info)
980 {
981 	struct dp_ast_entry *ast_entry = NULL;
982 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
983 	struct dp_peer *peer = NULL;
984 
985 	if (soc->ast_offload_support)
986 		return false;
987 
988 	qdf_spin_lock_bh(&soc->ast_lock);
989 
990 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
991 	if ((!ast_entry) ||
992 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
993 		qdf_spin_unlock_bh(&soc->ast_lock);
994 		return false;
995 	}
996 
997 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
998 				     DP_MOD_ID_AST);
999 	if (!peer) {
1000 		qdf_spin_unlock_bh(&soc->ast_lock);
1001 		return false;
1002 	}
1003 
1004 	ast_entry_info->type = ast_entry->type;
1005 	ast_entry_info->pdev_id = ast_entry->pdev_id;
1006 	ast_entry_info->vdev_id = ast_entry->vdev_id;
1007 	ast_entry_info->peer_id = ast_entry->peer_id;
1008 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
1009 		     &peer->mac_addr.raw[0],
1010 		     QDF_MAC_ADDR_SIZE);
1011 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1012 	qdf_spin_unlock_bh(&soc->ast_lock);
1013 	return true;
1014 }
1015 
1016 /**
1017  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
1018  *                                          and return ast entry information
1019  *                                          if mac address and pdev_id matches
1020  *
1021  * @soc : data path soc handle
1022  * @ast_mac_addr : AST entry mac address
1023  * @pdev_id : pdev_id
1024  * @ast_entry_info : ast entry information
1025  *
1026  * return : true if ast entry found with ast_mac_addr
1027  *          false if ast entry not found
1028  */
1029 static bool dp_peer_get_ast_info_by_pdevid_wifi3
1030 		(struct cdp_soc_t *soc_hdl,
1031 		 uint8_t *ast_mac_addr,
1032 		 uint8_t pdev_id,
1033 		 struct cdp_ast_entry_info *ast_entry_info)
1034 {
1035 	struct dp_ast_entry *ast_entry;
1036 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
1037 	struct dp_peer *peer = NULL;
1038 
1039 	if (soc->ast_offload_support)
1040 		return false;
1041 
1042 	qdf_spin_lock_bh(&soc->ast_lock);
1043 
1044 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr,
1045 						    pdev_id);
1046 
1047 	if ((!ast_entry) ||
1048 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
1049 		qdf_spin_unlock_bh(&soc->ast_lock);
1050 		return false;
1051 	}
1052 
1053 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1054 				     DP_MOD_ID_AST);
1055 	if (!peer) {
1056 		qdf_spin_unlock_bh(&soc->ast_lock);
1057 		return false;
1058 	}
1059 
1060 	ast_entry_info->type = ast_entry->type;
1061 	ast_entry_info->pdev_id = ast_entry->pdev_id;
1062 	ast_entry_info->vdev_id = ast_entry->vdev_id;
1063 	ast_entry_info->peer_id = ast_entry->peer_id;
1064 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
1065 		     &peer->mac_addr.raw[0],
1066 		     QDF_MAC_ADDR_SIZE);
1067 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1068 	qdf_spin_unlock_bh(&soc->ast_lock);
1069 	return true;
1070 }
1071 
1072 /**
1073  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
1074  *                            with given mac address
1075  *
1076  * @soc : data path soc handle
1077  * @ast_mac_addr : AST entry mac address
1078  * @callback : callback function to called on ast delete response from FW
1079  * @cookie : argument to be passed to callback
1080  *
1081  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
1082  *          is sent
1083  *          QDF_STATUS_E_INVAL false if ast entry not found
1084  */
1085 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
1086 					       uint8_t *mac_addr,
1087 					       txrx_ast_free_cb callback,
1088 					       void *cookie)
1089 
1090 {
1091 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1092 	struct dp_ast_entry *ast_entry = NULL;
1093 	txrx_ast_free_cb cb = NULL;
1094 	void *arg = NULL;
1095 
1096 	if (soc->ast_offload_support)
1097 		return -QDF_STATUS_E_INVAL;
1098 
1099 	qdf_spin_lock_bh(&soc->ast_lock);
1100 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1101 	if (!ast_entry) {
1102 		qdf_spin_unlock_bh(&soc->ast_lock);
1103 		return -QDF_STATUS_E_INVAL;
1104 	}
1105 
1106 	if (ast_entry->callback) {
1107 		cb = ast_entry->callback;
1108 		arg = ast_entry->cookie;
1109 	}
1110 
1111 	ast_entry->callback = callback;
1112 	ast_entry->cookie = cookie;
1113 
1114 	/*
1115 	 * if delete_in_progress is set AST delete is sent to target
1116 	 * and host is waiting for response should not send delete
1117 	 * again
1118 	 */
1119 	if (!ast_entry->delete_in_progress)
1120 		dp_peer_del_ast(soc, ast_entry);
1121 
1122 	qdf_spin_unlock_bh(&soc->ast_lock);
1123 	if (cb) {
1124 		cb(soc->ctrl_psoc,
1125 		   dp_soc_to_cdp_soc(soc),
1126 		   arg,
1127 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1128 	}
1129 	return QDF_STATUS_SUCCESS;
1130 }
1131 
1132 /**
1133  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
1134  *                                   table if mac address and pdev_id matches
1135  *
1136  * @soc : data path soc handle
1137  * @ast_mac_addr : AST entry mac address
1138  * @pdev_id : pdev id
1139  * @callback : callback function to called on ast delete response from FW
1140  * @cookie : argument to be passed to callback
1141  *
1142  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
1143  *          is sent
1144  *          QDF_STATUS_E_INVAL false if ast entry not found
1145  */
1146 
1147 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
1148 						uint8_t *mac_addr,
1149 						uint8_t pdev_id,
1150 						txrx_ast_free_cb callback,
1151 						void *cookie)
1152 
1153 {
1154 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1155 	struct dp_ast_entry *ast_entry;
1156 	txrx_ast_free_cb cb = NULL;
1157 	void *arg = NULL;
1158 
1159 	if (soc->ast_offload_support)
1160 		return -QDF_STATUS_E_INVAL;
1161 
1162 	qdf_spin_lock_bh(&soc->ast_lock);
1163 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
1164 
1165 	if (!ast_entry) {
1166 		qdf_spin_unlock_bh(&soc->ast_lock);
1167 		return -QDF_STATUS_E_INVAL;
1168 	}
1169 
1170 	if (ast_entry->callback) {
1171 		cb = ast_entry->callback;
1172 		arg = ast_entry->cookie;
1173 	}
1174 
1175 	ast_entry->callback = callback;
1176 	ast_entry->cookie = cookie;
1177 
1178 	/*
1179 	 * if delete_in_progress is set AST delete is sent to target
1180 	 * and host is waiting for response should not sent delete
1181 	 * again
1182 	 */
1183 	if (!ast_entry->delete_in_progress)
1184 		dp_peer_del_ast(soc, ast_entry);
1185 
1186 	qdf_spin_unlock_bh(&soc->ast_lock);
1187 
1188 	if (cb) {
1189 		cb(soc->ctrl_psoc,
1190 		   dp_soc_to_cdp_soc(soc),
1191 		   arg,
1192 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1193 	}
1194 	return QDF_STATUS_SUCCESS;
1195 }
1196 
1197 /**
1198  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
1199  * @ring_num: ring num of the ring being queried
1200  * @grp_mask: the grp_mask array for the ring type in question.
1201  *
1202  * The grp_mask array is indexed by group number and the bit fields correspond
1203  * to ring numbers.  We are finding which interrupt group a ring belongs to.
1204  *
1205  * Return: the index in the grp_mask array with the ring number.
1206  * -QDF_STATUS_E_NOENT if no entry is found
1207  */
1208 static int dp_srng_find_ring_in_mask(int ring_num, uint8_t *grp_mask)
1209 {
1210 	int ext_group_num;
1211 	uint8_t mask = 1 << ring_num;
1212 
1213 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
1214 	     ext_group_num++) {
1215 		if (mask & grp_mask[ext_group_num])
1216 			return ext_group_num;
1217 	}
1218 
1219 	return -QDF_STATUS_E_NOENT;
1220 }
1221 
1222 /**
1223  * dp_is_msi_group_number_invalid() - check msi_group_number valid or not
1224  * @soc: dp_soc
1225  * @msi_group_number: MSI group number.
1226  * @msi_data_count: MSI data count.
1227  *
1228  * Return: true if msi_group_number is invalid.
1229  */
1230 static bool dp_is_msi_group_number_invalid(struct dp_soc *soc,
1231 					   int msi_group_number,
1232 					   int msi_data_count)
1233 {
1234 	if (soc && soc->osdev && soc->osdev->dev &&
1235 	    pld_is_one_msi(soc->osdev->dev))
1236 		return false;
1237 
1238 	return msi_group_number > msi_data_count;
1239 }
1240 
1241 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1242 /**
1243  * dp_is_reo_ring_num_in_nf_grp1() - Check if the current reo ring is part of
1244  *				rx_near_full_grp1 mask
1245  * @soc: Datapath SoC Handle
1246  * @ring_num: REO ring number
1247  *
1248  * Return: 1 if the ring_num belongs to reo_nf_grp1,
1249  *	   0, otherwise.
1250  */
1251 static inline int
1252 dp_is_reo_ring_num_in_nf_grp1(struct dp_soc *soc, int ring_num)
1253 {
1254 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_1 & (1 << ring_num));
1255 }
1256 
1257 /**
1258  * dp_is_reo_ring_num_in_nf_grp2() - Check if the current reo ring is part of
1259  *				rx_near_full_grp2 mask
1260  * @soc: Datapath SoC Handle
1261  * @ring_num: REO ring number
1262  *
1263  * Return: 1 if the ring_num belongs to reo_nf_grp2,
1264  *	   0, otherwise.
1265  */
1266 static inline int
1267 dp_is_reo_ring_num_in_nf_grp2(struct dp_soc *soc, int ring_num)
1268 {
1269 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_2 & (1 << ring_num));
1270 }
1271 
1272 /**
1273  * dp_srng_get_near_full_irq_mask() - Get near-full irq mask for a particular
1274  *				ring type and number
1275  * @soc: Datapath SoC handle
1276  * @ring_type: SRNG type
1277  * @ring_num: ring num
1278  *
1279  * Return: near ful irq mask pointer
1280  */
1281 static inline
1282 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
1283 					enum hal_ring_type ring_type,
1284 					int ring_num)
1285 {
1286 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
1287 	uint8_t wbm2_sw_rx_rel_ring_id;
1288 	uint8_t *nf_irq_mask = NULL;
1289 
1290 	switch (ring_type) {
1291 	case WBM2SW_RELEASE:
1292 		wbm2_sw_rx_rel_ring_id =
1293 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
1294 		if (ring_num != wbm2_sw_rx_rel_ring_id) {
1295 			nf_irq_mask = &soc->wlan_cfg_ctx->
1296 					int_tx_ring_near_full_irq_mask[0];
1297 		}
1298 		break;
1299 	case REO_DST:
1300 		if (dp_is_reo_ring_num_in_nf_grp1(soc, ring_num))
1301 			nf_irq_mask =
1302 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_1_mask[0];
1303 		else if (dp_is_reo_ring_num_in_nf_grp2(soc, ring_num))
1304 			nf_irq_mask =
1305 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_2_mask[0];
1306 		else
1307 			qdf_assert(0);
1308 		break;
1309 	default:
1310 		break;
1311 	}
1312 
1313 	return nf_irq_mask;
1314 }
1315 
1316 /**
1317  * dp_srng_set_msi2_ring_params() - Set the msi2 addr/data in the ring params
1318  * @soc: Datapath SoC handle
1319  * @ring_params: srng params handle
1320  * @msi2_addr: MSI2 addr to be set for the SRNG
1321  * @msi2_data: MSI2 data to be set for the SRNG
1322  *
1323  * Return: None
1324  */
1325 static inline
1326 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
1327 				  struct hal_srng_params *ring_params,
1328 				  qdf_dma_addr_t msi2_addr,
1329 				  uint32_t msi2_data)
1330 {
1331 	ring_params->msi2_addr = msi2_addr;
1332 	ring_params->msi2_data = msi2_data;
1333 }
1334 
1335 /**
1336  * dp_srng_msi2_setup() - Setup MSI2 details for near full IRQ of an SRNG
1337  * @soc: Datapath SoC handle
1338  * @ring_params: ring_params for SRNG
1339  * @ring_type: SENG type
1340  * @ring_num: ring number for the SRNG
1341  * @nf_msi_grp_num: near full msi group number
1342  *
1343  * Return: None
1344  */
1345 static inline void
1346 dp_srng_msi2_setup(struct dp_soc *soc,
1347 		   struct hal_srng_params *ring_params,
1348 		   int ring_type, int ring_num, int nf_msi_grp_num)
1349 {
1350 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1351 	int msi_data_count, ret;
1352 
1353 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1354 					  &msi_data_count, &msi_data_start,
1355 					  &msi_irq_start);
1356 	if (ret)
1357 		return;
1358 
1359 	if (nf_msi_grp_num < 0) {
1360 		dp_init_info("%pK: ring near full IRQ not part of an ext_group; ring_type: %d,ring_num %d",
1361 			     soc, ring_type, ring_num);
1362 		ring_params->msi2_addr = 0;
1363 		ring_params->msi2_data = 0;
1364 		return;
1365 	}
1366 
1367 	if (dp_is_msi_group_number_invalid(soc, nf_msi_grp_num,
1368 					   msi_data_count)) {
1369 		dp_init_warn("%pK: 2 msi_groups will share an msi for near full IRQ; msi_group_num %d",
1370 			     soc, nf_msi_grp_num);
1371 		QDF_ASSERT(0);
1372 	}
1373 
1374 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1375 
1376 	ring_params->nf_irq_support = 1;
1377 	ring_params->msi2_addr = addr_low;
1378 	ring_params->msi2_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1379 	ring_params->msi2_data = (nf_msi_grp_num % msi_data_count)
1380 		+ msi_data_start;
1381 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1382 }
1383 
1384 /* Percentage of ring entries considered as nearly full */
1385 #define DP_NF_HIGH_THRESH_PERCENTAGE	75
1386 /* Percentage of ring entries considered as critically full */
1387 #define DP_NF_CRIT_THRESH_PERCENTAGE	90
1388 /* Percentage of ring entries considered as safe threshold */
1389 #define DP_NF_SAFE_THRESH_PERCENTAGE	50
1390 
1391 /**
1392  * dp_srng_configure_nf_interrupt_thresholds() - Configure the thresholds for
1393  *			near full irq
1394  * @soc: Datapath SoC handle
1395  * @ring_params: ring params for SRNG
1396  * @ring_type: ring type
1397  */
1398 static inline void
1399 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
1400 					  struct hal_srng_params *ring_params,
1401 					  int ring_type)
1402 {
1403 	if (ring_params->nf_irq_support) {
1404 		ring_params->high_thresh = (ring_params->num_entries *
1405 					    DP_NF_HIGH_THRESH_PERCENTAGE) / 100;
1406 		ring_params->crit_thresh = (ring_params->num_entries *
1407 					    DP_NF_CRIT_THRESH_PERCENTAGE) / 100;
1408 		ring_params->safe_thresh = (ring_params->num_entries *
1409 					    DP_NF_SAFE_THRESH_PERCENTAGE) /100;
1410 	}
1411 }
1412 
1413 /**
1414  * dp_srng_set_nf_thresholds() - Set the near full thresholds to srng data
1415  *			structure from the ring params
1416  * @soc: Datapath SoC handle
1417  * @srng: SRNG handle
1418  * @ring_params: ring params for a SRNG
1419  *
1420  * Return: None
1421  */
1422 static inline void
1423 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
1424 			  struct hal_srng_params *ring_params)
1425 {
1426 	srng->crit_thresh = ring_params->crit_thresh;
1427 	srng->safe_thresh = ring_params->safe_thresh;
1428 }
1429 
1430 #else
1431 static inline
1432 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
1433 					enum hal_ring_type ring_type,
1434 					int ring_num)
1435 {
1436 	return NULL;
1437 }
1438 
1439 static inline
1440 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
1441 				  struct hal_srng_params *ring_params,
1442 				  qdf_dma_addr_t msi2_addr,
1443 				  uint32_t msi2_data)
1444 {
1445 }
1446 
1447 static inline void
1448 dp_srng_msi2_setup(struct dp_soc *soc,
1449 		   struct hal_srng_params *ring_params,
1450 		   int ring_type, int ring_num, int nf_msi_grp_num)
1451 {
1452 }
1453 
1454 static inline void
1455 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
1456 					  struct hal_srng_params *ring_params,
1457 					  int ring_type)
1458 {
1459 }
1460 
1461 static inline void
1462 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
1463 			  struct hal_srng_params *ring_params)
1464 {
1465 }
1466 #endif
1467 
1468 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
1469 				       enum hal_ring_type ring_type,
1470 				       int ring_num,
1471 				       int *reg_msi_grp_num,
1472 				       bool nf_irq_support,
1473 				       int *nf_msi_grp_num)
1474 {
1475 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
1476 	uint8_t *grp_mask, *nf_irq_mask = NULL;
1477 	bool nf_irq_enabled = false;
1478 	uint8_t wbm2_sw_rx_rel_ring_id;
1479 
1480 	switch (ring_type) {
1481 	case WBM2SW_RELEASE:
1482 		wbm2_sw_rx_rel_ring_id =
1483 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
1484 		if (ring_num == wbm2_sw_rx_rel_ring_id) {
1485 			/* dp_rx_wbm_err_process - soc->rx_rel_ring */
1486 			grp_mask = &cfg_ctx->int_rx_wbm_rel_ring_mask[0];
1487 			ring_num = 0;
1488 		} else if (ring_num == WBM2_SW_PPE_REL_RING_ID) {
1489 			grp_mask = &cfg_ctx->int_ppeds_wbm_release_ring_mask[0];
1490 			ring_num = 0;
1491 		}  else { /* dp_tx_comp_handler - soc->tx_comp_ring */
1492 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1493 			nf_irq_mask = dp_srng_get_near_full_irq_mask(soc,
1494 								     ring_type,
1495 								     ring_num);
1496 			if (nf_irq_mask)
1497 				nf_irq_enabled = true;
1498 
1499 			/*
1500 			 * Using ring 4 as 4th tx completion ring since ring 3
1501 			 * is Rx error ring
1502 			 */
1503 			if (ring_num == WBM2SW_TXCOMP_RING4_NUM)
1504 				ring_num = TXCOMP_RING4_NUM;
1505 		}
1506 	break;
1507 
1508 	case REO_EXCEPTION:
1509 		/* dp_rx_err_process - &soc->reo_exception_ring */
1510 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
1511 	break;
1512 
1513 	case REO_DST:
1514 		/* dp_rx_process - soc->reo_dest_ring */
1515 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1516 		nf_irq_mask = dp_srng_get_near_full_irq_mask(soc, ring_type,
1517 							     ring_num);
1518 		if (nf_irq_mask)
1519 			nf_irq_enabled = true;
1520 	break;
1521 
1522 	case REO_STATUS:
1523 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
1524 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
1525 	break;
1526 
1527 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
1528 	case RXDMA_MONITOR_STATUS:
1529 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
1530 	case RXDMA_MONITOR_DST:
1531 		/* dp_mon_process */
1532 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
1533 	break;
1534 	case TX_MONITOR_DST:
1535 		/* dp_tx_mon_process */
1536 		grp_mask = &soc->wlan_cfg_ctx->int_tx_mon_ring_mask[0];
1537 	break;
1538 	case RXDMA_DST:
1539 		/* dp_rxdma_err_process */
1540 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
1541 	break;
1542 
1543 	case RXDMA_BUF:
1544 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1545 	break;
1546 
1547 	case RXDMA_MONITOR_BUF:
1548 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
1549 	break;
1550 
1551 	case TX_MONITOR_BUF:
1552 		grp_mask = &soc->wlan_cfg_ctx->int_host2txmon_ring_mask[0];
1553 	break;
1554 
1555 	case REO2PPE:
1556 		grp_mask = &soc->wlan_cfg_ctx->int_reo2ppe_ring_mask[0];
1557 	break;
1558 
1559 	case PPE2TCL:
1560 		grp_mask = &soc->wlan_cfg_ctx->int_ppe2tcl_ring_mask[0];
1561 	break;
1562 
1563 	case TCL_DATA:
1564 	/* CMD_CREDIT_RING is used as command in 8074 and credit in 9000 */
1565 	case TCL_CMD_CREDIT:
1566 	case REO_CMD:
1567 	case SW2WBM_RELEASE:
1568 	case WBM_IDLE_LINK:
1569 		/* normally empty SW_TO_HW rings */
1570 		return -QDF_STATUS_E_NOENT;
1571 	break;
1572 
1573 	case TCL_STATUS:
1574 	case REO_REINJECT:
1575 		/* misc unused rings */
1576 		return -QDF_STATUS_E_NOENT;
1577 	break;
1578 
1579 	case CE_SRC:
1580 	case CE_DST:
1581 	case CE_DST_STATUS:
1582 		/* CE_rings - currently handled by hif */
1583 	default:
1584 		return -QDF_STATUS_E_NOENT;
1585 	break;
1586 	}
1587 
1588 	*reg_msi_grp_num = dp_srng_find_ring_in_mask(ring_num, grp_mask);
1589 
1590 	if (nf_irq_support && nf_irq_enabled) {
1591 		*nf_msi_grp_num = dp_srng_find_ring_in_mask(ring_num,
1592 							    nf_irq_mask);
1593 	}
1594 
1595 	return QDF_STATUS_SUCCESS;
1596 }
1597 
1598 /*
1599  * dp_get_num_msi_available()- API to get number of MSIs available
1600  * @dp_soc: DP soc Handle
1601  * @interrupt_mode: Mode of interrupts
1602  *
1603  * Return: Number of MSIs available or 0 in case of integrated
1604  */
1605 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
1606 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1607 {
1608 	return 0;
1609 }
1610 #else
1611 /*
1612  * dp_get_num_msi_available()- API to get number of MSIs available
1613  * @dp_soc: DP soc Handle
1614  * @interrupt_mode: Mode of interrupts
1615  *
1616  * Return: Number of MSIs available or 0 in case of integrated
1617  */
1618 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1619 {
1620 	int msi_data_count;
1621 	int msi_data_start;
1622 	int msi_irq_start;
1623 	int ret;
1624 
1625 	if (interrupt_mode == DP_INTR_INTEGRATED) {
1626 		return 0;
1627 	} else if (interrupt_mode == DP_INTR_MSI || interrupt_mode ==
1628 		   DP_INTR_POLL) {
1629 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1630 						  &msi_data_count,
1631 						  &msi_data_start,
1632 						  &msi_irq_start);
1633 		if (ret) {
1634 			qdf_err("Unable to get DP MSI assignment %d",
1635 				interrupt_mode);
1636 			return -EINVAL;
1637 		}
1638 		return msi_data_count;
1639 	}
1640 	qdf_err("Interrupt mode invalid %d", interrupt_mode);
1641 	return -EINVAL;
1642 }
1643 #endif
1644 
1645 static void dp_srng_msi_setup(struct dp_soc *soc, struct dp_srng *srng,
1646 			      struct hal_srng_params *ring_params,
1647 			      int ring_type, int ring_num)
1648 {
1649 	int reg_msi_grp_num;
1650 	/*
1651 	 * nf_msi_grp_num needs to be initialized with negative value,
1652 	 * to avoid configuring near-full msi for WBM2SW3 ring
1653 	 */
1654 	int nf_msi_grp_num = -1;
1655 	int msi_data_count;
1656 	int ret;
1657 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1658 	bool nf_irq_support;
1659 	int vector;
1660 
1661 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1662 					    &msi_data_count, &msi_data_start,
1663 					    &msi_irq_start);
1664 
1665 	if (ret)
1666 		return;
1667 
1668 	nf_irq_support = hal_srng_is_near_full_irq_supported(soc->hal_soc,
1669 							     ring_type,
1670 							     ring_num);
1671 	ret = dp_srng_calculate_msi_group(soc, ring_type, ring_num,
1672 					  &reg_msi_grp_num,
1673 					  nf_irq_support,
1674 					  &nf_msi_grp_num);
1675 	if (ret < 0) {
1676 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1677 			     soc, ring_type, ring_num);
1678 		ring_params->msi_addr = 0;
1679 		ring_params->msi_data = 0;
1680 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
1681 		return;
1682 	}
1683 
1684 	if (reg_msi_grp_num < 0) {
1685 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1686 			     soc, ring_type, ring_num);
1687 		ring_params->msi_addr = 0;
1688 		ring_params->msi_data = 0;
1689 		goto configure_msi2;
1690 	}
1691 
1692 	if (dp_is_msi_group_number_invalid(soc, reg_msi_grp_num,
1693 					   msi_data_count)) {
1694 		dp_init_warn("%pK: 2 msi_groups will share an msi; msi_group_num %d",
1695 			     soc, reg_msi_grp_num);
1696 		QDF_ASSERT(0);
1697 	}
1698 
1699 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1700 
1701 	ring_params->msi_addr = addr_low;
1702 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1703 	ring_params->msi_data = (reg_msi_grp_num % msi_data_count)
1704 		+ msi_data_start;
1705 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1706 
1707 	dp_debug("ring type %u ring_num %u msi->data %u msi_addr %llx",
1708 		 ring_type, ring_num, ring_params->msi_data,
1709 		 (uint64_t)ring_params->msi_addr);
1710 
1711 	vector = msi_irq_start + (reg_msi_grp_num % msi_data_count);
1712 	if (soc->arch_ops.dp_register_ppeds_interrupts)
1713 		if (soc->arch_ops.dp_register_ppeds_interrupts(soc, srng,
1714 							       vector,
1715 							       ring_type,
1716 							       ring_num))
1717 			return;
1718 
1719 configure_msi2:
1720 	if (!nf_irq_support) {
1721 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
1722 		return;
1723 	}
1724 
1725 	dp_srng_msi2_setup(soc, ring_params, ring_type, ring_num,
1726 			   nf_msi_grp_num);
1727 }
1728 
1729 #ifdef FEATURE_AST
1730 /**
1731  * dp_print_mlo_ast_stats() - Print AST stats for MLO peers
1732  *
1733  * @soc : core DP soc context
1734  *
1735  * Return: void
1736  */
1737 static void dp_print_mlo_ast_stats(struct dp_soc *soc)
1738 {
1739 	if (soc->arch_ops.print_mlo_ast_stats)
1740 		soc->arch_ops.print_mlo_ast_stats(soc);
1741 }
1742 
1743 /**
1744  * dp_print_peer_ast_entries() - Dump AST entries of peer
1745  * @soc: Datapath soc handle
1746  * @peer: Datapath peer
1747  * @arg: argument to iterate function
1748  *
1749  * return void
1750  */
1751 void
1752 dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1753 {
1754 	struct dp_ast_entry *ase, *tmp_ase;
1755 	uint32_t num_entries = 0;
1756 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1757 			"NONE", "STATIC", "SELF", "WDS", "HMWDS", "BSS",
1758 			"DA", "HMWDS_SEC", "MLD"};
1759 
1760 	DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1761 	    DP_PRINT_STATS("%6d mac_addr = "QDF_MAC_ADDR_FMT
1762 		    " peer_mac_addr = "QDF_MAC_ADDR_FMT
1763 		    " peer_id = %u"
1764 		    " type = %s"
1765 		    " next_hop = %d"
1766 		    " is_active = %d"
1767 		    " ast_idx = %d"
1768 		    " ast_hash = %d"
1769 		    " delete_in_progress = %d"
1770 		    " pdev_id = %d"
1771 		    " vdev_id = %d",
1772 		    ++num_entries,
1773 		    QDF_MAC_ADDR_REF(ase->mac_addr.raw),
1774 		    QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1775 		    ase->peer_id,
1776 		    type[ase->type],
1777 		    ase->next_hop,
1778 		    ase->is_active,
1779 		    ase->ast_idx,
1780 		    ase->ast_hash_value,
1781 		    ase->delete_in_progress,
1782 		    ase->pdev_id,
1783 		    ase->vdev_id);
1784 	}
1785 }
1786 
1787 /**
1788  * dp_print_ast_stats() - Dump AST table contents
1789  * @soc: Datapath soc handle
1790  *
1791  * return void
1792  */
1793 void dp_print_ast_stats(struct dp_soc *soc)
1794 {
1795 	DP_PRINT_STATS("AST Stats:");
1796 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1797 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1798 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1799 	DP_PRINT_STATS("	Entries MAP ERR  = %d", soc->stats.ast.map_err);
1800 	DP_PRINT_STATS("	Entries Mismatch ERR  = %d",
1801 		       soc->stats.ast.ast_mismatch);
1802 
1803 	DP_PRINT_STATS("AST Table:");
1804 
1805 	qdf_spin_lock_bh(&soc->ast_lock);
1806 
1807 	dp_soc_iterate_peer(soc, dp_print_peer_ast_entries, NULL,
1808 			    DP_MOD_ID_GENERIC_STATS);
1809 
1810 	qdf_spin_unlock_bh(&soc->ast_lock);
1811 
1812 	dp_print_mlo_ast_stats(soc);
1813 }
1814 #else
1815 void dp_print_ast_stats(struct dp_soc *soc)
1816 {
1817 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1818 	return;
1819 }
1820 #endif
1821 
1822 /**
1823  * dp_print_peer_info() - Dump peer info
1824  * @soc: Datapath soc handle
1825  * @peer: Datapath peer handle
1826  * @arg: argument to iter function
1827  *
1828  * return void
1829  */
1830 static void
1831 dp_print_peer_info(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1832 {
1833 	struct dp_txrx_peer *txrx_peer = NULL;
1834 
1835 	txrx_peer = dp_get_txrx_peer(peer);
1836 	if (!txrx_peer)
1837 		return;
1838 
1839 	DP_PRINT_STATS(" peer id = %d"
1840 		       " peer_mac_addr = "QDF_MAC_ADDR_FMT
1841 		       " nawds_enabled = %d"
1842 		       " bss_peer = %d"
1843 		       " wds_enabled = %d"
1844 		       " tx_cap_enabled = %d"
1845 		       " rx_cap_enabled = %d",
1846 		       peer->peer_id,
1847 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1848 		       txrx_peer->nawds_enabled,
1849 		       txrx_peer->bss_peer,
1850 		       txrx_peer->wds_enabled,
1851 		       dp_monitor_is_tx_cap_enabled(peer),
1852 		       dp_monitor_is_rx_cap_enabled(peer));
1853 }
1854 
1855 /**
1856  * dp_print_peer_table() - Dump all Peer stats
1857  * @vdev: Datapath Vdev handle
1858  *
1859  * return void
1860  */
1861 static void dp_print_peer_table(struct dp_vdev *vdev)
1862 {
1863 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1864 	dp_vdev_iterate_peer(vdev, dp_print_peer_info, NULL,
1865 			     DP_MOD_ID_GENERIC_STATS);
1866 }
1867 
1868 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
1869 /**
1870  * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt
1871  * threshold values from the wlan_srng_cfg table for each ring type
1872  * @soc: device handle
1873  * @ring_params: per ring specific parameters
1874  * @ring_type: Ring type
1875  * @ring_num: Ring number for a given ring type
1876  *
1877  * Fill the ring params with the interrupt threshold
1878  * configuration parameters available in the per ring type wlan_srng_cfg
1879  * table.
1880  *
1881  * Return: None
1882  */
1883 static void
1884 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1885 				       struct hal_srng_params *ring_params,
1886 				       int ring_type, int ring_num,
1887 				       int num_entries)
1888 {
1889 	uint8_t wbm2_sw_rx_rel_ring_id;
1890 
1891 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
1892 
1893 	if (ring_type == REO_DST) {
1894 		ring_params->intr_timer_thres_us =
1895 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1896 		ring_params->intr_batch_cntr_thres_entries =
1897 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1898 	} else if (ring_type == WBM2SW_RELEASE &&
1899 		   (ring_num == wbm2_sw_rx_rel_ring_id)) {
1900 		ring_params->intr_timer_thres_us =
1901 				wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1902 		ring_params->intr_batch_cntr_thres_entries =
1903 				wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1904 	} else {
1905 		ring_params->intr_timer_thres_us =
1906 				soc->wlan_srng_cfg[ring_type].timer_threshold;
1907 		ring_params->intr_batch_cntr_thres_entries =
1908 				soc->wlan_srng_cfg[ring_type].batch_count_threshold;
1909 	}
1910 	ring_params->low_threshold =
1911 			soc->wlan_srng_cfg[ring_type].low_threshold;
1912 	if (ring_params->low_threshold)
1913 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1914 
1915 	dp_srng_configure_nf_interrupt_thresholds(soc, ring_params, ring_type);
1916 }
1917 #else
1918 static void
1919 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1920 				       struct hal_srng_params *ring_params,
1921 				       int ring_type, int ring_num,
1922 				       int num_entries)
1923 {
1924 	uint8_t wbm2_sw_rx_rel_ring_id;
1925 	bool rx_refill_lt_disable;
1926 
1927 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
1928 
1929 	if (ring_type == REO_DST || ring_type == REO2PPE) {
1930 		ring_params->intr_timer_thres_us =
1931 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1932 		ring_params->intr_batch_cntr_thres_entries =
1933 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1934 	} else if (ring_type == WBM2SW_RELEASE &&
1935 		   (ring_num < wbm2_sw_rx_rel_ring_id ||
1936 		   ring_num == WBM2SW_TXCOMP_RING4_NUM ||
1937 		   ring_num == WBM2_SW_PPE_REL_RING_ID)) {
1938 		ring_params->intr_timer_thres_us =
1939 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1940 		ring_params->intr_batch_cntr_thres_entries =
1941 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1942 	} else if (ring_type == RXDMA_BUF) {
1943 		rx_refill_lt_disable =
1944 			wlan_cfg_get_dp_soc_rxdma_refill_lt_disable
1945 							(soc->wlan_cfg_ctx);
1946 		ring_params->intr_timer_thres_us =
1947 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1948 
1949 		if (!rx_refill_lt_disable) {
1950 			ring_params->low_threshold = num_entries >> 3;
1951 			ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1952 			ring_params->intr_batch_cntr_thres_entries = 0;
1953 		}
1954 	} else {
1955 		ring_params->intr_timer_thres_us =
1956 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1957 		ring_params->intr_batch_cntr_thres_entries =
1958 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1959 	}
1960 
1961 	/* These rings donot require interrupt to host. Make them zero */
1962 	switch (ring_type) {
1963 	case REO_REINJECT:
1964 	case REO_CMD:
1965 	case TCL_DATA:
1966 	case TCL_CMD_CREDIT:
1967 	case TCL_STATUS:
1968 	case WBM_IDLE_LINK:
1969 	case SW2WBM_RELEASE:
1970 	case SW2RXDMA_NEW:
1971 		ring_params->intr_timer_thres_us = 0;
1972 		ring_params->intr_batch_cntr_thres_entries = 0;
1973 		break;
1974 	case PPE2TCL:
1975 		ring_params->intr_timer_thres_us =
1976 			wlan_cfg_get_int_timer_threshold_ppe2tcl(soc->wlan_cfg_ctx);
1977 		ring_params->intr_batch_cntr_thres_entries =
1978 			wlan_cfg_get_int_batch_threshold_ppe2tcl(soc->wlan_cfg_ctx);
1979 		break;
1980 	}
1981 
1982 	/* Enable low threshold interrupts for rx buffer rings (regular and
1983 	 * monitor buffer rings.
1984 	 * TODO: See if this is required for any other ring
1985 	 */
1986 	if ((ring_type == RXDMA_MONITOR_BUF) ||
1987 	    (ring_type == RXDMA_MONITOR_STATUS ||
1988 	    (ring_type == TX_MONITOR_BUF))) {
1989 		/* TODO: Setting low threshold to 1/8th of ring size
1990 		 * see if this needs to be configurable
1991 		 */
1992 		ring_params->low_threshold = num_entries >> 3;
1993 		ring_params->intr_timer_thres_us =
1994 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1995 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1996 		ring_params->intr_batch_cntr_thres_entries = 0;
1997 	}
1998 
1999 	/* During initialisation monitor rings are only filled with
2000 	 * MON_BUF_MIN_ENTRIES entries. So low threshold needs to be set to
2001 	 * a value less than that. Low threshold value is reconfigured again
2002 	 * to 1/8th of the ring size when monitor vap is created.
2003 	 */
2004 	if (ring_type == RXDMA_MONITOR_BUF)
2005 		ring_params->low_threshold = MON_BUF_MIN_ENTRIES >> 1;
2006 
2007 	/* In case of PCI chipsets, we dont have PPDU end interrupts,
2008 	 * so MONITOR STATUS ring is reaped by receiving MSI from srng.
2009 	 * Keep batch threshold as 8 so that interrupt is received for
2010 	 * every 4 packets in MONITOR_STATUS ring
2011 	 */
2012 	if ((ring_type == RXDMA_MONITOR_STATUS) &&
2013 	    (soc->intr_mode == DP_INTR_MSI))
2014 		ring_params->intr_batch_cntr_thres_entries = 4;
2015 }
2016 #endif
2017 
2018 #ifdef DP_MEM_PRE_ALLOC
2019 
2020 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
2021 			   size_t ctxt_size)
2022 {
2023 	void *ctxt_mem;
2024 
2025 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_context) {
2026 		dp_warn("dp_prealloc_get_context null!");
2027 		goto dynamic_alloc;
2028 	}
2029 
2030 	ctxt_mem = soc->cdp_soc.ol_ops->dp_prealloc_get_context(ctxt_type,
2031 								ctxt_size);
2032 
2033 	if (ctxt_mem)
2034 		goto end;
2035 
2036 dynamic_alloc:
2037 	dp_info("switch to dynamic-alloc for type %d, size %zu",
2038 		ctxt_type, ctxt_size);
2039 	ctxt_mem = qdf_mem_malloc(ctxt_size);
2040 end:
2041 	return ctxt_mem;
2042 }
2043 
2044 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
2045 			 void *vaddr)
2046 {
2047 	QDF_STATUS status;
2048 
2049 	if (soc->cdp_soc.ol_ops->dp_prealloc_put_context) {
2050 		status = soc->cdp_soc.ol_ops->dp_prealloc_put_context(
2051 								ctxt_type,
2052 								vaddr);
2053 	} else {
2054 		dp_warn("dp_prealloc_put_context null!");
2055 		status = QDF_STATUS_E_NOSUPPORT;
2056 	}
2057 
2058 	if (QDF_IS_STATUS_ERROR(status)) {
2059 		dp_info("Context type %d not pre-allocated", ctxt_type);
2060 		qdf_mem_free(vaddr);
2061 	}
2062 }
2063 
2064 static inline
2065 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
2066 					   struct dp_srng *srng,
2067 					   uint32_t ring_type)
2068 {
2069 	void *mem;
2070 
2071 	qdf_assert(!srng->is_mem_prealloc);
2072 
2073 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_consistent) {
2074 		dp_warn("dp_prealloc_get_consistent is null!");
2075 		goto qdf;
2076 	}
2077 
2078 	mem =
2079 		soc->cdp_soc.ol_ops->dp_prealloc_get_consistent
2080 						(&srng->alloc_size,
2081 						 &srng->base_vaddr_unaligned,
2082 						 &srng->base_paddr_unaligned,
2083 						 &srng->base_paddr_aligned,
2084 						 DP_RING_BASE_ALIGN, ring_type);
2085 
2086 	if (mem) {
2087 		srng->is_mem_prealloc = true;
2088 		goto end;
2089 	}
2090 qdf:
2091 	mem =  qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
2092 						&srng->base_vaddr_unaligned,
2093 						&srng->base_paddr_unaligned,
2094 						&srng->base_paddr_aligned,
2095 						DP_RING_BASE_ALIGN);
2096 end:
2097 	dp_info("%s memory %pK dp_srng %pK ring_type %d alloc_size %d num_entries %d",
2098 		srng->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", mem,
2099 		srng, ring_type, srng->alloc_size, srng->num_entries);
2100 	return mem;
2101 }
2102 
2103 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
2104 					       struct dp_srng *srng)
2105 {
2106 	if (srng->is_mem_prealloc) {
2107 		if (!soc->cdp_soc.ol_ops->dp_prealloc_put_consistent) {
2108 			dp_warn("dp_prealloc_put_consistent is null!");
2109 			QDF_BUG(0);
2110 			return;
2111 		}
2112 		soc->cdp_soc.ol_ops->dp_prealloc_put_consistent
2113 						(srng->alloc_size,
2114 						 srng->base_vaddr_unaligned,
2115 						 srng->base_paddr_unaligned);
2116 
2117 	} else {
2118 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2119 					srng->alloc_size,
2120 					srng->base_vaddr_unaligned,
2121 					srng->base_paddr_unaligned, 0);
2122 	}
2123 }
2124 
2125 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
2126 				   enum dp_desc_type desc_type,
2127 				   struct qdf_mem_multi_page_t *pages,
2128 				   size_t element_size,
2129 				   uint32_t element_num,
2130 				   qdf_dma_context_t memctxt,
2131 				   bool cacheable)
2132 {
2133 	if (!soc->cdp_soc.ol_ops->dp_get_multi_pages) {
2134 		dp_warn("dp_get_multi_pages is null!");
2135 		goto qdf;
2136 	}
2137 
2138 	pages->num_pages = 0;
2139 	pages->is_mem_prealloc = 0;
2140 	soc->cdp_soc.ol_ops->dp_get_multi_pages(desc_type,
2141 						element_size,
2142 						element_num,
2143 						pages,
2144 						cacheable);
2145 	if (pages->num_pages)
2146 		goto end;
2147 
2148 qdf:
2149 	qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size,
2150 				  element_num, memctxt, cacheable);
2151 end:
2152 	dp_info("%s desc_type %d element_size %d element_num %d cacheable %d",
2153 		pages->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc",
2154 		desc_type, (int)element_size, element_num, cacheable);
2155 }
2156 
2157 void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
2158 				  enum dp_desc_type desc_type,
2159 				  struct qdf_mem_multi_page_t *pages,
2160 				  qdf_dma_context_t memctxt,
2161 				  bool cacheable)
2162 {
2163 	if (pages->is_mem_prealloc) {
2164 		if (!soc->cdp_soc.ol_ops->dp_put_multi_pages) {
2165 			dp_warn("dp_put_multi_pages is null!");
2166 			QDF_BUG(0);
2167 			return;
2168 		}
2169 
2170 		soc->cdp_soc.ol_ops->dp_put_multi_pages(desc_type, pages);
2171 		qdf_mem_zero(pages, sizeof(*pages));
2172 	} else {
2173 		qdf_mem_multi_pages_free(soc->osdev, pages,
2174 					 memctxt, cacheable);
2175 	}
2176 }
2177 
2178 #else
2179 
2180 static inline
2181 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
2182 					   struct dp_srng *srng,
2183 					   uint32_t ring_type)
2184 
2185 {
2186 	void *mem;
2187 
2188 	mem = qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
2189 					       &srng->base_vaddr_unaligned,
2190 					       &srng->base_paddr_unaligned,
2191 					       &srng->base_paddr_aligned,
2192 					       DP_RING_BASE_ALIGN);
2193 	if (mem)
2194 		qdf_mem_set(srng->base_vaddr_unaligned, 0, srng->alloc_size);
2195 
2196 	return mem;
2197 }
2198 
2199 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
2200 					       struct dp_srng *srng)
2201 {
2202 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2203 				srng->alloc_size,
2204 				srng->base_vaddr_unaligned,
2205 				srng->base_paddr_unaligned, 0);
2206 }
2207 
2208 #endif /* DP_MEM_PRE_ALLOC */
2209 
2210 #ifdef QCA_SUPPORT_WDS_EXTENDED
2211 static bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev)
2212 {
2213 	return vdev->wds_ext_enabled;
2214 }
2215 #else
2216 static bool dp_vdev_is_wds_ext_enabled(struct dp_vdev *vdev)
2217 {
2218 	return false;
2219 }
2220 #endif
2221 
2222 void dp_pdev_update_fast_rx_flag(struct dp_soc *soc, struct dp_pdev *pdev)
2223 {
2224 	struct dp_vdev *vdev = NULL;
2225 	uint8_t rx_fast_flag = true;
2226 
2227 	if (wlan_cfg_is_rx_flow_tag_enabled(soc->wlan_cfg_ctx)) {
2228 		rx_fast_flag = false;
2229 		goto update_flag;
2230 	}
2231 
2232 	/* Check if protocol tagging enable */
2233 	if (pdev->is_rx_protocol_tagging_enabled) {
2234 		rx_fast_flag = false;
2235 		goto update_flag;
2236 	}
2237 
2238 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
2239 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2240 		/* Check if any VDEV has NAWDS enabled */
2241 		if (vdev->nawds_enabled) {
2242 			rx_fast_flag = false;
2243 			break;
2244 		}
2245 
2246 		/* Check if any VDEV has multipass enabled */
2247 		if (vdev->multipass_en) {
2248 			rx_fast_flag = false;
2249 			break;
2250 		}
2251 
2252 		/* Check if any VDEV has mesh enabled */
2253 		if (vdev->mesh_vdev) {
2254 			rx_fast_flag = false;
2255 			break;
2256 		}
2257 
2258 		/* Check if any VDEV has WDS ext enabled */
2259 		if (dp_vdev_is_wds_ext_enabled(vdev)) {
2260 			rx_fast_flag = false;
2261 			break;
2262 		}
2263 	}
2264 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
2265 
2266 update_flag:
2267 	dp_init_info("Updated Rx fast flag to %u", rx_fast_flag);
2268 	pdev->rx_fast_flag = rx_fast_flag;
2269 }
2270 
2271 /*
2272  * dp_srng_free() - Free SRNG memory
2273  * @soc  : Data path soc handle
2274  * @srng : SRNG pointer
2275  *
2276  * return: None
2277  */
2278 void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng)
2279 {
2280 	if (srng->alloc_size && srng->base_vaddr_unaligned) {
2281 		if (!srng->cached) {
2282 			dp_srng_mem_free_consistent(soc, srng);
2283 		} else {
2284 			qdf_mem_free(srng->base_vaddr_unaligned);
2285 		}
2286 		srng->alloc_size = 0;
2287 		srng->base_vaddr_unaligned = NULL;
2288 	}
2289 	srng->hal_srng = NULL;
2290 }
2291 
2292 qdf_export_symbol(dp_srng_free);
2293 
2294 #ifdef DISABLE_MON_RING_MSI_CFG
2295 /*
2296  * dp_skip_msi_cfg() - Check if msi cfg has to be skipped for ring_type
2297  * @ring_type: sring type
2298  *
2299  * Return: True if msi cfg should be skipped for srng type else false
2300  */
2301 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2302 {
2303 	if (ring_type == RXDMA_MONITOR_STATUS)
2304 		return true;
2305 
2306 	return false;
2307 }
2308 #else
2309 #ifdef DP_CON_MON_MSI_ENABLED
2310 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2311 {
2312 	if (soc->cdp_soc.ol_ops->get_con_mode &&
2313 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE) {
2314 		if (ring_type == REO_DST || ring_type == RXDMA_DST)
2315 			return true;
2316 	} else if (ring_type == RXDMA_MONITOR_STATUS) {
2317 		return true;
2318 	}
2319 
2320 	return false;
2321 }
2322 #else
2323 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2324 {
2325 	return false;
2326 }
2327 #endif /* DP_CON_MON_MSI_ENABLED */
2328 #endif /* DISABLE_MON_RING_MSI_CFG */
2329 
2330 #ifdef DP_UMAC_HW_RESET_SUPPORT
2331 static bool dp_check_umac_reset_in_progress(struct dp_soc *soc)
2332 {
2333 	return !!soc->umac_reset_ctx.intr_ctx_bkp;
2334 }
2335 #else
2336 static bool dp_check_umac_reset_in_progress(struct dp_soc *soc)
2337 {
2338 	return false;
2339 }
2340 #endif
2341 
2342 /*
2343  * dp_srng_init_idx() - Initialize SRNG
2344  * @soc  : Data path soc handle
2345  * @srng : SRNG pointer
2346  * @ring_type : Ring Type
2347  * @ring_num: Ring number
2348  * @mac_id: mac_id
2349  * @idx: ring index
2350  *
2351  * return: QDF_STATUS
2352  */
2353 QDF_STATUS dp_srng_init_idx(struct dp_soc *soc, struct dp_srng *srng,
2354 			    int ring_type, int ring_num, int mac_id,
2355 			    uint32_t idx)
2356 {
2357 	bool idle_check;
2358 
2359 	hal_soc_handle_t hal_soc = soc->hal_soc;
2360 	struct hal_srng_params ring_params;
2361 
2362 	if (srng->hal_srng) {
2363 		dp_init_err("%pK: Ring type: %d, num:%d is already initialized",
2364 			    soc, ring_type, ring_num);
2365 		return QDF_STATUS_SUCCESS;
2366 	}
2367 
2368 	/* memset the srng ring to zero */
2369 	qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size);
2370 
2371 	qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params));
2372 	ring_params.ring_base_paddr = srng->base_paddr_aligned;
2373 	ring_params.ring_base_vaddr = srng->base_vaddr_aligned;
2374 
2375 	ring_params.num_entries = srng->num_entries;
2376 
2377 	dp_info("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
2378 		ring_type, ring_num,
2379 		(void *)ring_params.ring_base_vaddr,
2380 		(void *)ring_params.ring_base_paddr,
2381 		ring_params.num_entries);
2382 
2383 	if (soc->intr_mode == DP_INTR_MSI && !dp_skip_msi_cfg(soc, ring_type)) {
2384 		dp_srng_msi_setup(soc, srng, &ring_params, ring_type, ring_num);
2385 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
2386 				 ring_type, ring_num);
2387 	} else {
2388 		ring_params.msi_data = 0;
2389 		ring_params.msi_addr = 0;
2390 		dp_srng_set_msi2_ring_params(soc, &ring_params, 0, 0);
2391 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
2392 				 ring_type, ring_num);
2393 	}
2394 
2395 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
2396 					       ring_type, ring_num,
2397 					       srng->num_entries);
2398 
2399 	dp_srng_set_nf_thresholds(soc, srng, &ring_params);
2400 
2401 	if (srng->cached)
2402 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
2403 
2404 	idle_check = dp_check_umac_reset_in_progress(soc);
2405 
2406 	srng->hal_srng = hal_srng_setup_idx(hal_soc, ring_type, ring_num,
2407 					    mac_id, &ring_params, idle_check,
2408 					    idx);
2409 
2410 	if (!srng->hal_srng) {
2411 		dp_srng_free(soc, srng);
2412 		return QDF_STATUS_E_FAILURE;
2413 	}
2414 
2415 	return QDF_STATUS_SUCCESS;
2416 }
2417 
2418 qdf_export_symbol(dp_srng_init_idx);
2419 
2420 /*
2421  * dp_srng_init() - Initialize SRNG
2422  * @soc  : Data path soc handle
2423  * @srng : SRNG pointer
2424  * @ring_type : Ring Type
2425  * @ring_num: Ring number
2426  * @mac_id: mac_id
2427  *
2428  * return: QDF_STATUS
2429  */
2430 QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng, int ring_type,
2431 			int ring_num, int mac_id)
2432 {
2433 	return dp_srng_init_idx(soc, srng, ring_type, ring_num, mac_id, 0);
2434 }
2435 
2436 qdf_export_symbol(dp_srng_init);
2437 /*
2438  * dp_srng_alloc() - Allocate memory for SRNG
2439  * @soc  : Data path soc handle
2440  * @srng : SRNG pointer
2441  * @ring_type : Ring Type
2442  * @num_entries: Number of entries
2443  * @cached: cached flag variable
2444  *
2445  * return: QDF_STATUS
2446  */
2447 QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
2448 			 int ring_type, uint32_t num_entries,
2449 			 bool cached)
2450 {
2451 	hal_soc_handle_t hal_soc = soc->hal_soc;
2452 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
2453 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
2454 
2455 	if (srng->base_vaddr_unaligned) {
2456 		dp_init_err("%pK: Ring type: %d, is already allocated",
2457 			    soc, ring_type);
2458 		return QDF_STATUS_SUCCESS;
2459 	}
2460 
2461 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
2462 	srng->hal_srng = NULL;
2463 	srng->alloc_size = num_entries * entry_size;
2464 	srng->num_entries = num_entries;
2465 	srng->cached = cached;
2466 
2467 	if (!cached) {
2468 		srng->base_vaddr_aligned =
2469 		    dp_srng_aligned_mem_alloc_consistent(soc,
2470 							 srng,
2471 							 ring_type);
2472 	} else {
2473 		srng->base_vaddr_aligned = qdf_aligned_malloc(
2474 					&srng->alloc_size,
2475 					&srng->base_vaddr_unaligned,
2476 					&srng->base_paddr_unaligned,
2477 					&srng->base_paddr_aligned,
2478 					DP_RING_BASE_ALIGN);
2479 	}
2480 
2481 	if (!srng->base_vaddr_aligned)
2482 		return QDF_STATUS_E_NOMEM;
2483 
2484 	return QDF_STATUS_SUCCESS;
2485 }
2486 
2487 qdf_export_symbol(dp_srng_alloc);
2488 
2489 /*
2490  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
2491  * @soc: DP SOC handle
2492  * @srng: source ring structure
2493  * @ring_type: type of ring
2494  * @ring_num: ring number
2495  *
2496  * Return: None
2497  */
2498 void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
2499 		    int ring_type, int ring_num)
2500 {
2501 	if (!srng->hal_srng) {
2502 		dp_init_err("%pK: Ring type: %d, num:%d not setup",
2503 			    soc, ring_type, ring_num);
2504 		return;
2505 	}
2506 
2507 	if (soc->arch_ops.dp_free_ppeds_interrupts)
2508 		soc->arch_ops.dp_free_ppeds_interrupts(soc, srng, ring_type,
2509 						       ring_num);
2510 
2511 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
2512 	srng->hal_srng = NULL;
2513 }
2514 
2515 qdf_export_symbol(dp_srng_deinit);
2516 
2517 /* TODO: Need this interface from HIF */
2518 void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle);
2519 
2520 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2521 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2522 			 hal_ring_handle_t hal_ring_hdl)
2523 {
2524 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2525 	uint32_t hp, tp;
2526 	uint8_t ring_id;
2527 
2528 	if (!int_ctx)
2529 		return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2530 
2531 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2532 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2533 
2534 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2535 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START);
2536 
2537 	return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2538 }
2539 
2540 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2541 			hal_ring_handle_t hal_ring_hdl)
2542 {
2543 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2544 	uint32_t hp, tp;
2545 	uint8_t ring_id;
2546 
2547 	if (!int_ctx)
2548 		return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2549 
2550 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2551 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2552 
2553 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2554 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END);
2555 
2556 	return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2557 }
2558 
2559 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2560 					      uint8_t hist_group_id)
2561 {
2562 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2563 			 0, 0, 0, HIF_EVENT_TIMER_ENTRY);
2564 }
2565 
2566 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2567 					     uint8_t hist_group_id)
2568 {
2569 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2570 			 0, 0, 0, HIF_EVENT_TIMER_EXIT);
2571 }
2572 #else
2573 
2574 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2575 					      uint8_t hist_group_id)
2576 {
2577 }
2578 
2579 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2580 					     uint8_t hist_group_id)
2581 {
2582 }
2583 
2584 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
2585 
2586 /*
2587  * dp_should_timer_irq_yield() - Decide if the bottom half should yield
2588  * @soc: DP soc handle
2589  * @work_done: work done in softirq context
2590  * @start_time: start time for the softirq
2591  *
2592  * Return: enum with yield code
2593  */
2594 enum timer_yield_status
2595 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
2596 			  uint64_t start_time)
2597 {
2598 	uint64_t cur_time = qdf_get_log_timestamp();
2599 
2600 	if (!work_done)
2601 		return DP_TIMER_WORK_DONE;
2602 
2603 	if (cur_time - start_time > DP_MAX_TIMER_EXEC_TIME_TICKS)
2604 		return DP_TIMER_TIME_EXHAUST;
2605 
2606 	return DP_TIMER_NO_YIELD;
2607 }
2608 
2609 qdf_export_symbol(dp_should_timer_irq_yield);
2610 
2611 static int dp_process_rxdma_dst_ring(struct dp_soc *soc,
2612 				     struct dp_intr *int_ctx,
2613 				     int mac_for_pdev,
2614 				     int total_budget)
2615 {
2616 	return dp_rxdma_err_process(int_ctx, soc, mac_for_pdev,
2617 				    total_budget);
2618 }
2619 
2620 /**
2621  * dp_process_lmac_rings() - Process LMAC rings
2622  * @int_ctx: interrupt context
2623  * @total_budget: budget of work which can be done
2624  *
2625  * Return: work done
2626  */
2627 static int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget)
2628 {
2629 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2630 	struct dp_soc *soc = int_ctx->soc;
2631 	uint32_t remaining_quota = total_budget;
2632 	struct dp_pdev *pdev = NULL;
2633 	uint32_t work_done  = 0;
2634 	int budget = total_budget;
2635 	int ring = 0;
2636 
2637 	/* Process LMAC interrupts */
2638 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
2639 		int mac_for_pdev = ring;
2640 
2641 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
2642 		if (!pdev)
2643 			continue;
2644 		if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
2645 			work_done = dp_monitor_process(soc, int_ctx,
2646 						       mac_for_pdev,
2647 						       remaining_quota);
2648 			if (work_done)
2649 				intr_stats->num_rx_mon_ring_masks++;
2650 			budget -= work_done;
2651 			if (budget <= 0)
2652 				goto budget_done;
2653 			remaining_quota = budget;
2654 		}
2655 
2656 		if (int_ctx->tx_mon_ring_mask & (1 << mac_for_pdev)) {
2657 			work_done = dp_tx_mon_process(soc, int_ctx,
2658 						      mac_for_pdev,
2659 						      remaining_quota);
2660 			if (work_done)
2661 				intr_stats->num_tx_mon_ring_masks++;
2662 			budget -= work_done;
2663 			if (budget <= 0)
2664 				goto budget_done;
2665 			remaining_quota = budget;
2666 		}
2667 
2668 		if (int_ctx->rxdma2host_ring_mask &
2669 				(1 << mac_for_pdev)) {
2670 			work_done = dp_process_rxdma_dst_ring(soc, int_ctx,
2671 							      mac_for_pdev,
2672 							      remaining_quota);
2673 			if (work_done)
2674 				intr_stats->num_rxdma2host_ring_masks++;
2675 			budget -=  work_done;
2676 			if (budget <= 0)
2677 				goto budget_done;
2678 			remaining_quota = budget;
2679 		}
2680 
2681 		if (int_ctx->host2rxdma_ring_mask & (1 << mac_for_pdev)) {
2682 			union dp_rx_desc_list_elem_t *desc_list = NULL;
2683 			union dp_rx_desc_list_elem_t *tail = NULL;
2684 			struct dp_srng *rx_refill_buf_ring;
2685 			struct rx_desc_pool *rx_desc_pool;
2686 
2687 			rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2688 			if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
2689 				rx_refill_buf_ring =
2690 					&soc->rx_refill_buf_ring[mac_for_pdev];
2691 			else
2692 				rx_refill_buf_ring =
2693 					&soc->rx_refill_buf_ring[pdev->lmac_id];
2694 
2695 			intr_stats->num_host2rxdma_ring_masks++;
2696 			dp_rx_buffers_lt_replenish_simple(soc, mac_for_pdev,
2697 							  rx_refill_buf_ring,
2698 							  rx_desc_pool,
2699 							  0,
2700 							  &desc_list,
2701 							  &tail);
2702 		}
2703 
2704 	}
2705 
2706 	if (int_ctx->host2rxdma_mon_ring_mask)
2707 		dp_rx_mon_buf_refill(int_ctx);
2708 
2709 	if (int_ctx->host2txmon_ring_mask)
2710 		dp_tx_mon_buf_refill(int_ctx);
2711 
2712 budget_done:
2713 	return total_budget - budget;
2714 }
2715 
2716 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
2717 /**
2718  * dp_service_near_full_srngs() - Bottom half handler to process the near
2719  *				full IRQ on a SRNG
2720  * @dp_ctx: Datapath SoC handle
2721  * @dp_budget: Number of SRNGs which can be processed in a single attempt
2722  *		without rescheduling
2723  * @cpu: cpu id
2724  *
2725  * Return: remaining budget/quota for the soc device
2726  */
2727 static
2728 uint32_t dp_service_near_full_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
2729 {
2730 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2731 	struct dp_soc *soc = int_ctx->soc;
2732 
2733 	/*
2734 	 * dp_service_near_full_srngs arch ops should be initialized always
2735 	 * if the NEAR FULL IRQ feature is enabled.
2736 	 */
2737 	return soc->arch_ops.dp_service_near_full_srngs(soc, int_ctx,
2738 							dp_budget);
2739 }
2740 #endif
2741 
2742 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2743 
2744 /*
2745  * dp_srng_get_cpu() - Get the smp processor id for srng processing
2746  *
2747  * Return: smp processor id
2748  */
2749 static inline int dp_srng_get_cpu(void)
2750 {
2751 	return smp_processor_id();
2752 }
2753 
2754 /*
2755  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
2756  * @dp_ctx: DP SOC handle
2757  * @budget: Number of frames/descriptors that can be processed in one shot
2758  * @cpu: CPU on which this instance is running
2759  *
2760  * Return: remaining budget/quota for the soc device
2761  */
2762 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
2763 {
2764 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2765 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2766 	struct dp_soc *soc = int_ctx->soc;
2767 	int ring = 0;
2768 	int index;
2769 	uint32_t work_done  = 0;
2770 	int budget = dp_budget;
2771 	uint8_t tx_mask = int_ctx->tx_ring_mask;
2772 	uint8_t rx_mask = int_ctx->rx_ring_mask;
2773 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
2774 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
2775 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2776 	uint32_t remaining_quota = dp_budget;
2777 
2778 	qdf_atomic_set_bit(cpu, &soc->service_rings_running);
2779 
2780 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n",
2781 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
2782 			 reo_status_mask,
2783 			 int_ctx->rx_mon_ring_mask,
2784 			 int_ctx->host2rxdma_ring_mask,
2785 			 int_ctx->rxdma2host_ring_mask);
2786 
2787 	/* Process Tx completion interrupts first to return back buffers */
2788 	for (index = 0; index < soc->num_tx_comp_rings; index++) {
2789 		if (!(1 << wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) & tx_mask))
2790 			continue;
2791 		work_done = dp_tx_comp_handler(int_ctx,
2792 					       soc,
2793 					       soc->tx_comp_ring[index].hal_srng,
2794 					       index, remaining_quota);
2795 		if (work_done) {
2796 			intr_stats->num_tx_ring_masks[index]++;
2797 			dp_verbose_debug("tx mask 0x%x index %d, budget %d, work_done %d",
2798 					 tx_mask, index, budget,
2799 					 work_done);
2800 		}
2801 		budget -= work_done;
2802 		if (budget <= 0)
2803 			goto budget_done;
2804 
2805 		remaining_quota = budget;
2806 	}
2807 
2808 	/* Process REO Exception ring interrupt */
2809 	if (rx_err_mask) {
2810 		work_done = dp_rx_err_process(int_ctx, soc,
2811 					      soc->reo_exception_ring.hal_srng,
2812 					      remaining_quota);
2813 
2814 		if (work_done) {
2815 			intr_stats->num_rx_err_ring_masks++;
2816 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
2817 					 work_done, budget);
2818 		}
2819 
2820 		budget -=  work_done;
2821 		if (budget <= 0) {
2822 			goto budget_done;
2823 		}
2824 		remaining_quota = budget;
2825 	}
2826 
2827 	/* Process Rx WBM release ring interrupt */
2828 	if (rx_wbm_rel_mask) {
2829 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
2830 						  soc->rx_rel_ring.hal_srng,
2831 						  remaining_quota);
2832 
2833 		if (work_done) {
2834 			intr_stats->num_rx_wbm_rel_ring_masks++;
2835 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
2836 					 work_done, budget);
2837 		}
2838 
2839 		budget -=  work_done;
2840 		if (budget <= 0) {
2841 			goto budget_done;
2842 		}
2843 		remaining_quota = budget;
2844 	}
2845 
2846 	/* Process Rx interrupts */
2847 	if (rx_mask) {
2848 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
2849 			if (!(rx_mask & (1 << ring)))
2850 				continue;
2851 			work_done = soc->arch_ops.dp_rx_process(int_ctx,
2852 						  soc->reo_dest_ring[ring].hal_srng,
2853 						  ring,
2854 						  remaining_quota);
2855 			if (work_done) {
2856 				intr_stats->num_rx_ring_masks[ring]++;
2857 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
2858 						 rx_mask, ring,
2859 						 work_done, budget);
2860 				budget -=  work_done;
2861 				if (budget <= 0)
2862 					goto budget_done;
2863 				remaining_quota = budget;
2864 			}
2865 		}
2866 	}
2867 
2868 	if (reo_status_mask) {
2869 		if (dp_reo_status_ring_handler(int_ctx, soc))
2870 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2871 	}
2872 
2873 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
2874 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2875 		if (work_done) {
2876 			budget -=  work_done;
2877 			if (budget <= 0)
2878 				goto budget_done;
2879 			remaining_quota = budget;
2880 		}
2881 	}
2882 
2883 	qdf_lro_flush(int_ctx->lro_ctx);
2884 	intr_stats->num_masks++;
2885 
2886 budget_done:
2887 	qdf_atomic_clear_bit(cpu, &soc->service_rings_running);
2888 
2889 	if (soc->notify_fw_callback)
2890 		soc->notify_fw_callback(soc);
2891 
2892 	return dp_budget - budget;
2893 }
2894 
2895 #else /* QCA_HOST_MODE_WIFI_DISABLED */
2896 
2897 /*
2898  * dp_srng_get_cpu() - Get the smp processor id for srng processing
2899  *
2900  * Return: smp processor id
2901  */
2902 static inline int dp_srng_get_cpu(void)
2903 {
2904 	return 0;
2905 }
2906 
2907 /*
2908  * dp_service_srngs() - Top level handler for DP Monitor Ring interrupts
2909  * @dp_ctx: DP SOC handle
2910  * @budget: Number of frames/descriptors that can be processed in one shot
2911  *
2912  * Return: remaining budget/quota for the soc device
2913  */
2914 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
2915 {
2916 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2917 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2918 	struct dp_soc *soc = int_ctx->soc;
2919 	uint32_t remaining_quota = dp_budget;
2920 	uint32_t work_done  = 0;
2921 	int budget = dp_budget;
2922 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2923 
2924 	if (reo_status_mask) {
2925 		if (dp_reo_status_ring_handler(int_ctx, soc))
2926 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2927 	}
2928 
2929 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
2930 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2931 		if (work_done) {
2932 			budget -=  work_done;
2933 			if (budget <= 0)
2934 				goto budget_done;
2935 			remaining_quota = budget;
2936 		}
2937 	}
2938 
2939 	qdf_lro_flush(int_ctx->lro_ctx);
2940 	intr_stats->num_masks++;
2941 
2942 budget_done:
2943 	return dp_budget - budget;
2944 }
2945 
2946 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2947 
2948 /* dp_interrupt_timer()- timer poll for interrupts
2949  *
2950  * @arg: SoC Handle
2951  *
2952  * Return:
2953  *
2954  */
2955 static void dp_interrupt_timer(void *arg)
2956 {
2957 	struct dp_soc *soc = (struct dp_soc *) arg;
2958 	struct dp_pdev *pdev = soc->pdev_list[0];
2959 	enum timer_yield_status yield = DP_TIMER_NO_YIELD;
2960 	uint32_t work_done  = 0, total_work_done = 0;
2961 	int budget = 0xffff, i;
2962 	uint32_t remaining_quota = budget;
2963 	uint64_t start_time;
2964 	uint32_t lmac_id = DP_MON_INVALID_LMAC_ID;
2965 	uint8_t dp_intr_id = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
2966 	uint32_t lmac_iter;
2967 	int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
2968 	enum reg_wifi_band mon_band;
2969 	int cpu = dp_srng_get_cpu();
2970 
2971 	/*
2972 	 * this logic makes all data path interfacing rings (UMAC/LMAC)
2973 	 * and Monitor rings polling mode when NSS offload is disabled
2974 	 */
2975 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx) &&
2976 	    !wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2977 		if (qdf_atomic_read(&soc->cmn_init_done)) {
2978 			for (i = 0; i < wlan_cfg_get_num_contexts(
2979 						soc->wlan_cfg_ctx); i++)
2980 				dp_service_srngs(&soc->intr_ctx[i], 0xffff,
2981 						 cpu);
2982 
2983 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
2984 		}
2985 		return;
2986 	}
2987 
2988 	if (!qdf_atomic_read(&soc->cmn_init_done))
2989 		return;
2990 
2991 	if (dp_monitor_is_chan_band_known(pdev)) {
2992 		mon_band = dp_monitor_get_chan_band(pdev);
2993 		lmac_id = pdev->ch_band_lmac_id_mapping[mon_band];
2994 		if (qdf_likely(lmac_id != DP_MON_INVALID_LMAC_ID)) {
2995 			dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id];
2996 			dp_srng_record_timer_entry(soc, dp_intr_id);
2997 		}
2998 	}
2999 
3000 	start_time = qdf_get_log_timestamp();
3001 	dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
3002 
3003 	while (yield == DP_TIMER_NO_YIELD) {
3004 		for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) {
3005 			if (lmac_iter == lmac_id)
3006 				work_done = dp_monitor_process(soc,
3007 						&soc->intr_ctx[dp_intr_id],
3008 						lmac_iter, remaining_quota);
3009 			else
3010 				work_done =
3011 					dp_monitor_drop_packets_for_mac(pdev,
3012 							     lmac_iter,
3013 							     remaining_quota);
3014 			if (work_done) {
3015 				budget -=  work_done;
3016 				if (budget <= 0) {
3017 					yield = DP_TIMER_WORK_EXHAUST;
3018 					goto budget_done;
3019 				}
3020 				remaining_quota = budget;
3021 				total_work_done += work_done;
3022 			}
3023 		}
3024 
3025 		yield = dp_should_timer_irq_yield(soc, total_work_done,
3026 						  start_time);
3027 		total_work_done = 0;
3028 	}
3029 
3030 budget_done:
3031 	if (yield == DP_TIMER_WORK_EXHAUST ||
3032 	    yield == DP_TIMER_TIME_EXHAUST)
3033 		qdf_timer_mod(&soc->int_timer, 1);
3034 	else
3035 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
3036 
3037 	if (lmac_id != DP_MON_INVALID_LMAC_ID)
3038 		dp_srng_record_timer_exit(soc, dp_intr_id);
3039 }
3040 
3041 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
3042 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
3043 					struct dp_intr *intr_ctx)
3044 {
3045 	if (intr_ctx->rx_mon_ring_mask)
3046 		return true;
3047 
3048 	return false;
3049 }
3050 #else
3051 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
3052 					struct dp_intr *intr_ctx)
3053 {
3054 	return false;
3055 }
3056 #endif
3057 
3058 /*
3059  * dp_soc_attach_poll() - Register handlers for DP interrupts
3060  * @txrx_soc: DP SOC handle
3061  *
3062  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
3063  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
3064  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
3065  *
3066  * Return: 0 for success, nonzero for failure.
3067  */
3068 static QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
3069 {
3070 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3071 	int i;
3072 	int lmac_id = 0;
3073 
3074 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3075 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
3076 	soc->intr_mode = DP_INTR_POLL;
3077 
3078 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3079 		soc->intr_ctx[i].dp_intr_id = i;
3080 		soc->intr_ctx[i].tx_ring_mask =
3081 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
3082 		soc->intr_ctx[i].rx_ring_mask =
3083 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
3084 		soc->intr_ctx[i].rx_mon_ring_mask =
3085 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
3086 		soc->intr_ctx[i].rx_err_ring_mask =
3087 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
3088 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
3089 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
3090 		soc->intr_ctx[i].reo_status_ring_mask =
3091 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
3092 		soc->intr_ctx[i].rxdma2host_ring_mask =
3093 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
3094 		soc->intr_ctx[i].soc = soc;
3095 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
3096 
3097 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
3098 			hif_event_history_init(soc->hif_handle, i);
3099 			soc->mon_intr_id_lmac_map[lmac_id] = i;
3100 			lmac_id++;
3101 		}
3102 	}
3103 
3104 	qdf_timer_init(soc->osdev, &soc->int_timer,
3105 			dp_interrupt_timer, (void *)soc,
3106 			QDF_TIMER_TYPE_WAKE_APPS);
3107 
3108 	return QDF_STATUS_SUCCESS;
3109 }
3110 
3111 /**
3112  * dp_soc_set_interrupt_mode() - Set the interrupt mode in soc
3113  * soc: DP soc handle
3114  *
3115  * Set the appropriate interrupt mode flag in the soc
3116  */
3117 static void dp_soc_set_interrupt_mode(struct dp_soc *soc)
3118 {
3119 	uint32_t msi_base_data, msi_vector_start;
3120 	int msi_vector_count, ret;
3121 
3122 	soc->intr_mode = DP_INTR_INTEGRATED;
3123 
3124 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
3125 	    (dp_is_monitor_mode_using_poll(soc) &&
3126 	     soc->cdp_soc.ol_ops->get_con_mode &&
3127 	     soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) {
3128 		soc->intr_mode = DP_INTR_POLL;
3129 	} else {
3130 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
3131 						  &msi_vector_count,
3132 						  &msi_base_data,
3133 						  &msi_vector_start);
3134 		if (ret)
3135 			return;
3136 
3137 		soc->intr_mode = DP_INTR_MSI;
3138 	}
3139 }
3140 
3141 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc);
3142 #if defined(DP_INTR_POLL_BOTH)
3143 /*
3144  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
3145  * @txrx_soc: DP SOC handle
3146  *
3147  * Call the appropriate attach function based on the mode of operation.
3148  * This is a WAR for enabling monitor mode.
3149  *
3150  * Return: 0 for success. nonzero for failure.
3151  */
3152 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
3153 {
3154 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3155 
3156 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
3157 	    (dp_is_monitor_mode_using_poll(soc) &&
3158 	     soc->cdp_soc.ol_ops->get_con_mode &&
3159 	     soc->cdp_soc.ol_ops->get_con_mode() ==
3160 	     QDF_GLOBAL_MONITOR_MODE)) {
3161 		dp_info("Poll mode");
3162 		return dp_soc_attach_poll(txrx_soc);
3163 	} else {
3164 		dp_info("Interrupt  mode");
3165 		return dp_soc_interrupt_attach(txrx_soc);
3166 	}
3167 }
3168 #else
3169 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
3170 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
3171 {
3172 	return dp_soc_attach_poll(txrx_soc);
3173 }
3174 #else
3175 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
3176 {
3177 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3178 
3179 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx))
3180 		return dp_soc_attach_poll(txrx_soc);
3181 	else
3182 		return dp_soc_interrupt_attach(txrx_soc);
3183 }
3184 #endif
3185 #endif
3186 
3187 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
3188 /**
3189  * dp_soc_interrupt_map_calculate_wifi3_pci_legacy()
3190  * Calculate interrupt map for legacy interrupts
3191  * @soc: DP soc handle
3192  * @intr_ctx_num: Interrupt context number
3193  * @irq_id_map: IRQ map
3194  * num_irq_r: Number of interrupts assigned for this context
3195  *
3196  * Return: void
3197  */
3198 static void dp_soc_interrupt_map_calculate_wifi3_pci_legacy(struct dp_soc *soc,
3199 							    int intr_ctx_num,
3200 							    int *irq_id_map,
3201 							    int *num_irq_r)
3202 {
3203 	int j;
3204 	int num_irq = 0;
3205 	int tx_mask = wlan_cfg_get_tx_ring_mask(
3206 					soc->wlan_cfg_ctx, intr_ctx_num);
3207 	int rx_mask = wlan_cfg_get_rx_ring_mask(
3208 					soc->wlan_cfg_ctx, intr_ctx_num);
3209 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
3210 					soc->wlan_cfg_ctx, intr_ctx_num);
3211 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3212 					soc->wlan_cfg_ctx, intr_ctx_num);
3213 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3214 					soc->wlan_cfg_ctx, intr_ctx_num);
3215 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3216 					soc->wlan_cfg_ctx, intr_ctx_num);
3217 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3218 					soc->wlan_cfg_ctx, intr_ctx_num);
3219 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3220 					soc->wlan_cfg_ctx, intr_ctx_num);
3221 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3222 					soc->wlan_cfg_ctx, intr_ctx_num);
3223 	soc->intr_mode = DP_INTR_LEGACY_VIRTUAL_IRQ;
3224 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
3225 		if (tx_mask & (1 << j))
3226 			irq_id_map[num_irq++] = (wbm2sw0_release - j);
3227 		if (rx_mask & (1 << j))
3228 			irq_id_map[num_irq++] = (reo2sw1_intr - j);
3229 		if (rx_mon_mask & (1 << j))
3230 			irq_id_map[num_irq++] = (rxmon2sw_p0_dest0 - j);
3231 		if (rx_err_ring_mask & (1 << j))
3232 			irq_id_map[num_irq++] = (reo2sw0_intr - j);
3233 		if (rx_wbm_rel_ring_mask & (1 << j))
3234 			irq_id_map[num_irq++] = (wbm2sw5_release - j);
3235 		if (reo_status_ring_mask & (1 << j))
3236 			irq_id_map[num_irq++] = (reo_status - j);
3237 		if (rxdma2host_ring_mask & (1 << j))
3238 			irq_id_map[num_irq++] = (rxdma2sw_dst_ring0 - j);
3239 		if (host2rxdma_ring_mask & (1 << j))
3240 			irq_id_map[num_irq++] = (sw2rxdma_0 - j);
3241 		if (host2rxdma_mon_ring_mask & (1 << j))
3242 			irq_id_map[num_irq++] = (sw2rxmon_src_ring - j);
3243 	}
3244 	*num_irq_r = num_irq;
3245 }
3246 #else
3247 /**
3248  * dp_soc_interrupt_map_calculate_wifi3_pci_legacy()
3249  * Calculate interrupt map for legacy interrupts
3250  * @soc: DP soc handle
3251  * @intr_ctx_num: Interrupt context number
3252  * @irq_id_map: IRQ map
3253  * num_irq_r: Number of interrupts assigned for this context
3254  *
3255  * Return: void
3256  */
3257 static void dp_soc_interrupt_map_calculate_wifi3_pci_legacy(struct dp_soc *soc,
3258 							    int intr_ctx_num,
3259 							    int *irq_id_map,
3260 							    int *num_irq_r)
3261 {
3262 }
3263 #endif
3264 
3265 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
3266 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
3267 {
3268 	int j;
3269 	int num_irq = 0;
3270 
3271 	int tx_mask =
3272 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3273 	int rx_mask =
3274 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3275 	int rx_mon_mask =
3276 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3277 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3278 					soc->wlan_cfg_ctx, intr_ctx_num);
3279 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3280 					soc->wlan_cfg_ctx, intr_ctx_num);
3281 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3282 					soc->wlan_cfg_ctx, intr_ctx_num);
3283 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3284 					soc->wlan_cfg_ctx, intr_ctx_num);
3285 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3286 					soc->wlan_cfg_ctx, intr_ctx_num);
3287 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3288 					soc->wlan_cfg_ctx, intr_ctx_num);
3289 	int host2txmon_ring_mask = wlan_cfg_get_host2txmon_ring_mask(
3290 					soc->wlan_cfg_ctx, intr_ctx_num);
3291 	int txmon2host_mon_ring_mask = wlan_cfg_get_tx_mon_ring_mask(
3292 					soc->wlan_cfg_ctx, intr_ctx_num);
3293 	int umac_reset_mask = wlan_cfg_get_umac_reset_intr_mask(
3294 					soc->wlan_cfg_ctx, intr_ctx_num);
3295 
3296 	soc->intr_mode = DP_INTR_INTEGRATED;
3297 
3298 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
3299 
3300 		if (tx_mask & (1 << j)) {
3301 			irq_id_map[num_irq++] =
3302 				(wbm2host_tx_completions_ring1 - j);
3303 		}
3304 
3305 		if (rx_mask & (1 << j)) {
3306 			irq_id_map[num_irq++] =
3307 				(reo2host_destination_ring1 - j);
3308 		}
3309 
3310 		if (rxdma2host_ring_mask & (1 << j)) {
3311 			irq_id_map[num_irq++] =
3312 				rxdma2host_destination_ring_mac1 - j;
3313 		}
3314 
3315 		if (host2rxdma_ring_mask & (1 << j)) {
3316 			irq_id_map[num_irq++] =
3317 				host2rxdma_host_buf_ring_mac1 -	j;
3318 		}
3319 
3320 		if (host2rxdma_mon_ring_mask & (1 << j)) {
3321 			irq_id_map[num_irq++] =
3322 				host2rxdma_monitor_ring1 - j;
3323 		}
3324 
3325 		if (rx_mon_mask & (1 << j)) {
3326 			irq_id_map[num_irq++] =
3327 				ppdu_end_interrupts_mac1 - j;
3328 			irq_id_map[num_irq++] =
3329 				rxdma2host_monitor_status_ring_mac1 - j;
3330 			irq_id_map[num_irq++] =
3331 				rxdma2host_monitor_destination_mac1 - j;
3332 		}
3333 
3334 		if (rx_wbm_rel_ring_mask & (1 << j))
3335 			irq_id_map[num_irq++] = wbm2host_rx_release;
3336 
3337 		if (rx_err_ring_mask & (1 << j))
3338 			irq_id_map[num_irq++] = reo2host_exception;
3339 
3340 		if (reo_status_ring_mask & (1 << j))
3341 			irq_id_map[num_irq++] = reo2host_status;
3342 
3343 		if (host2txmon_ring_mask & (1 << j))
3344 			irq_id_map[num_irq++] = host2tx_monitor_ring1;
3345 
3346 		if (txmon2host_mon_ring_mask & (1 << j)) {
3347 			irq_id_map[num_irq++] =
3348 				(txmon2host_monitor_destination_mac1 - j);
3349 		}
3350 
3351 		if (umac_reset_mask & (1 << j))
3352 			irq_id_map[num_irq++] = (umac_reset - j);
3353 
3354 	}
3355 	*num_irq_r = num_irq;
3356 }
3357 
3358 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
3359 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
3360 		int msi_vector_count, int msi_vector_start)
3361 {
3362 	int tx_mask = wlan_cfg_get_tx_ring_mask(
3363 					soc->wlan_cfg_ctx, intr_ctx_num);
3364 	int rx_mask = wlan_cfg_get_rx_ring_mask(
3365 					soc->wlan_cfg_ctx, intr_ctx_num);
3366 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
3367 					soc->wlan_cfg_ctx, intr_ctx_num);
3368 	int tx_mon_mask = wlan_cfg_get_tx_mon_ring_mask(
3369 					soc->wlan_cfg_ctx, intr_ctx_num);
3370 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3371 					soc->wlan_cfg_ctx, intr_ctx_num);
3372 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3373 					soc->wlan_cfg_ctx, intr_ctx_num);
3374 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3375 					soc->wlan_cfg_ctx, intr_ctx_num);
3376 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3377 					soc->wlan_cfg_ctx, intr_ctx_num);
3378 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3379 					soc->wlan_cfg_ctx, intr_ctx_num);
3380 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3381 					soc->wlan_cfg_ctx, intr_ctx_num);
3382 	int rx_near_full_grp_1_mask =
3383 		wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
3384 						     intr_ctx_num);
3385 	int rx_near_full_grp_2_mask =
3386 		wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
3387 						     intr_ctx_num);
3388 	int tx_ring_near_full_mask =
3389 		wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
3390 						    intr_ctx_num);
3391 
3392 	int host2txmon_ring_mask =
3393 		wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx,
3394 						  intr_ctx_num);
3395 	unsigned int vector =
3396 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
3397 	int num_irq = 0;
3398 
3399 	soc->intr_mode = DP_INTR_MSI;
3400 
3401 	if (tx_mask | rx_mask | rx_mon_mask | tx_mon_mask | rx_err_ring_mask |
3402 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask |
3403 	    host2rxdma_ring_mask | host2rxdma_mon_ring_mask |
3404 	    rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
3405 	    tx_ring_near_full_mask | host2txmon_ring_mask)
3406 		irq_id_map[num_irq++] =
3407 			pld_get_msi_irq(soc->osdev->dev, vector);
3408 
3409 	*num_irq_r = num_irq;
3410 }
3411 
3412 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
3413 				    int *irq_id_map, int *num_irq)
3414 {
3415 	int msi_vector_count, ret;
3416 	uint32_t msi_base_data, msi_vector_start;
3417 
3418 	if (pld_get_enable_intx(soc->osdev->dev)) {
3419 		return dp_soc_interrupt_map_calculate_wifi3_pci_legacy(soc,
3420 				intr_ctx_num, irq_id_map, num_irq);
3421 	}
3422 
3423 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
3424 					    &msi_vector_count,
3425 					    &msi_base_data,
3426 					    &msi_vector_start);
3427 	if (ret)
3428 		return dp_soc_interrupt_map_calculate_integrated(soc,
3429 				intr_ctx_num, irq_id_map, num_irq);
3430 
3431 	else
3432 		dp_soc_interrupt_map_calculate_msi(soc,
3433 				intr_ctx_num, irq_id_map, num_irq,
3434 				msi_vector_count, msi_vector_start);
3435 }
3436 
3437 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
3438 /**
3439  * dp_soc_near_full_interrupt_attach() - Register handler for DP near fill irq
3440  * @soc: DP soc handle
3441  * @num_irq: IRQ number
3442  * @irq_id_map: IRQ map
3443  * intr_id: interrupt context ID
3444  *
3445  * Return: 0 for success. nonzero for failure.
3446  */
3447 static inline int
3448 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
3449 				  int irq_id_map[], int intr_id)
3450 {
3451 	return hif_register_ext_group(soc->hif_handle,
3452 				      num_irq, irq_id_map,
3453 				      dp_service_near_full_srngs,
3454 				      &soc->intr_ctx[intr_id], "dp_nf_intr",
3455 				      HIF_EXEC_NAPI_TYPE,
3456 				      QCA_NAPI_DEF_SCALE_BIN_SHIFT);
3457 }
3458 #else
3459 static inline int
3460 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
3461 				  int *irq_id_map, int intr_id)
3462 {
3463 	return 0;
3464 }
3465 #endif
3466 
3467 #ifdef DP_CON_MON_MSI_SKIP_SET
3468 static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
3469 {
3470 	return !!(soc->cdp_soc.ol_ops->get_con_mode() !=
3471 			QDF_GLOBAL_MONITOR_MODE);
3472 }
3473 #else
3474 static inline bool dp_skip_rx_mon_ring_mask_set(struct dp_soc *soc)
3475 {
3476 	return false;
3477 }
3478 #endif
3479 
3480 /*
3481  * dp_soc_ppeds_stop() - Stop PPE DS processing
3482  * @txrx_soc: DP SOC handle
3483  *
3484  * Return: none
3485  */
3486 static void dp_soc_ppeds_stop(struct cdp_soc_t *soc_handle)
3487 {
3488 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
3489 
3490 	if (soc->arch_ops.txrx_soc_ppeds_stop)
3491 		soc->arch_ops.txrx_soc_ppeds_stop(soc);
3492 }
3493 
3494 /*
3495  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
3496  * @txrx_soc: DP SOC handle
3497  *
3498  * Return: none
3499  */
3500 void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
3501 {
3502 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3503 	int i;
3504 
3505 	if (soc->intr_mode == DP_INTR_POLL) {
3506 		qdf_timer_free(&soc->int_timer);
3507 	} else {
3508 		hif_deconfigure_ext_group_interrupts(soc->hif_handle);
3509 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
3510 		hif_deregister_exec_group(soc->hif_handle, "dp_nf_intr");
3511 	}
3512 
3513 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3514 		soc->intr_ctx[i].tx_ring_mask = 0;
3515 		soc->intr_ctx[i].rx_ring_mask = 0;
3516 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
3517 		soc->intr_ctx[i].rx_err_ring_mask = 0;
3518 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
3519 		soc->intr_ctx[i].reo_status_ring_mask = 0;
3520 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
3521 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
3522 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
3523 		soc->intr_ctx[i].rx_near_full_grp_1_mask = 0;
3524 		soc->intr_ctx[i].rx_near_full_grp_2_mask = 0;
3525 		soc->intr_ctx[i].tx_ring_near_full_mask = 0;
3526 		soc->intr_ctx[i].tx_mon_ring_mask = 0;
3527 		soc->intr_ctx[i].host2txmon_ring_mask = 0;
3528 		soc->intr_ctx[i].umac_reset_intr_mask = 0;
3529 
3530 		hif_event_history_deinit(soc->hif_handle, i);
3531 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
3532 	}
3533 
3534 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3535 		    sizeof(soc->mon_intr_id_lmac_map),
3536 		    DP_MON_INVALID_LMAC_ID);
3537 }
3538 
3539 /*
3540  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
3541  * @txrx_soc: DP SOC handle
3542  *
3543  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
3544  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
3545  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
3546  *
3547  * Return: 0 for success. nonzero for failure.
3548  */
3549 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
3550 {
3551 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3552 
3553 	int i = 0;
3554 	int num_irq = 0;
3555 	int rx_err_ring_intr_ctxt_id = HIF_MAX_GROUP;
3556 	int lmac_id = 0;
3557 	int napi_scale;
3558 
3559 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3560 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
3561 
3562 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3563 		int ret = 0;
3564 
3565 		/* Map of IRQ ids registered with one interrupt context */
3566 		int irq_id_map[HIF_MAX_GRP_IRQ];
3567 
3568 		int tx_mask =
3569 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
3570 		int rx_mask =
3571 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
3572 		int rx_mon_mask =
3573 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
3574 		int tx_mon_ring_mask =
3575 			wlan_cfg_get_tx_mon_ring_mask(soc->wlan_cfg_ctx, i);
3576 		int rx_err_ring_mask =
3577 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
3578 		int rx_wbm_rel_ring_mask =
3579 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
3580 		int reo_status_ring_mask =
3581 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
3582 		int rxdma2host_ring_mask =
3583 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
3584 		int host2rxdma_ring_mask =
3585 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
3586 		int host2rxdma_mon_ring_mask =
3587 			wlan_cfg_get_host2rxdma_mon_ring_mask(
3588 				soc->wlan_cfg_ctx, i);
3589 		int rx_near_full_grp_1_mask =
3590 			wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
3591 							     i);
3592 		int rx_near_full_grp_2_mask =
3593 			wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
3594 							     i);
3595 		int tx_ring_near_full_mask =
3596 			wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
3597 							    i);
3598 		int host2txmon_ring_mask =
3599 			wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx, i);
3600 		int umac_reset_intr_mask =
3601 			wlan_cfg_get_umac_reset_intr_mask(soc->wlan_cfg_ctx, i);
3602 
3603 		if (dp_skip_rx_mon_ring_mask_set(soc))
3604 			rx_mon_mask = 0;
3605 
3606 		soc->intr_ctx[i].dp_intr_id = i;
3607 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
3608 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
3609 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
3610 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
3611 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
3612 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
3613 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
3614 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
3615 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
3616 			 host2rxdma_mon_ring_mask;
3617 		soc->intr_ctx[i].rx_near_full_grp_1_mask =
3618 						rx_near_full_grp_1_mask;
3619 		soc->intr_ctx[i].rx_near_full_grp_2_mask =
3620 						rx_near_full_grp_2_mask;
3621 		soc->intr_ctx[i].tx_ring_near_full_mask =
3622 						tx_ring_near_full_mask;
3623 		soc->intr_ctx[i].tx_mon_ring_mask = tx_mon_ring_mask;
3624 		soc->intr_ctx[i].host2txmon_ring_mask = host2txmon_ring_mask;
3625 		soc->intr_ctx[i].umac_reset_intr_mask = umac_reset_intr_mask;
3626 
3627 		soc->intr_ctx[i].soc = soc;
3628 
3629 		num_irq = 0;
3630 
3631 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
3632 					       &num_irq);
3633 
3634 		if (rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
3635 		    tx_ring_near_full_mask) {
3636 			dp_soc_near_full_interrupt_attach(soc, num_irq,
3637 							  irq_id_map, i);
3638 		} else {
3639 			napi_scale = wlan_cfg_get_napi_scale_factor(
3640 							    soc->wlan_cfg_ctx);
3641 			if (!napi_scale)
3642 				napi_scale = QCA_NAPI_DEF_SCALE_BIN_SHIFT;
3643 
3644 			ret = hif_register_ext_group(soc->hif_handle,
3645 				num_irq, irq_id_map, dp_service_srngs,
3646 				&soc->intr_ctx[i], "dp_intr",
3647 				HIF_EXEC_NAPI_TYPE, napi_scale);
3648 		}
3649 
3650 		dp_debug(" int ctx %u num_irq %u irq_id_map %u %u",
3651 			 i, num_irq, irq_id_map[0], irq_id_map[1]);
3652 
3653 		if (ret) {
3654 			dp_init_err("%pK: failed, ret = %d", soc, ret);
3655 			dp_soc_interrupt_detach(txrx_soc);
3656 			return QDF_STATUS_E_FAILURE;
3657 		}
3658 
3659 		hif_event_history_init(soc->hif_handle, i);
3660 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
3661 
3662 		if (rx_err_ring_mask)
3663 			rx_err_ring_intr_ctxt_id = i;
3664 
3665 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
3666 			soc->mon_intr_id_lmac_map[lmac_id] = i;
3667 			lmac_id++;
3668 		}
3669 	}
3670 
3671 	hif_configure_ext_group_interrupts(soc->hif_handle);
3672 	if (rx_err_ring_intr_ctxt_id != HIF_MAX_GROUP)
3673 		hif_config_irq_clear_cpu_affinity(soc->hif_handle,
3674 						  rx_err_ring_intr_ctxt_id, 0);
3675 
3676 	return QDF_STATUS_SUCCESS;
3677 }
3678 
3679 #define AVG_MAX_MPDUS_PER_TID 128
3680 #define AVG_TIDS_PER_CLIENT 2
3681 #define AVG_FLOWS_PER_TID 2
3682 #define AVG_MSDUS_PER_FLOW 128
3683 #define AVG_MSDUS_PER_MPDU 4
3684 
3685 /*
3686  * dp_hw_link_desc_pool_banks_free() - Free h/w link desc pool banks
3687  * @soc: DP SOC handle
3688  * @mac_id: mac id
3689  *
3690  * Return: none
3691  */
3692 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
3693 {
3694 	struct qdf_mem_multi_page_t *pages;
3695 
3696 	if (mac_id != WLAN_INVALID_PDEV_ID) {
3697 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3698 	} else {
3699 		pages = &soc->link_desc_pages;
3700 	}
3701 
3702 	if (!pages) {
3703 		dp_err("can not get link desc pages");
3704 		QDF_ASSERT(0);
3705 		return;
3706 	}
3707 
3708 	if (pages->dma_pages) {
3709 		wlan_minidump_remove((void *)
3710 				     pages->dma_pages->page_v_addr_start,
3711 				     pages->num_pages * pages->page_size,
3712 				     soc->ctrl_psoc,
3713 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3714 				     "hw_link_desc_bank");
3715 		dp_desc_multi_pages_mem_free(soc, DP_HW_LINK_DESC_TYPE,
3716 					     pages, 0, false);
3717 	}
3718 }
3719 
3720 qdf_export_symbol(dp_hw_link_desc_pool_banks_free);
3721 
3722 /*
3723  * dp_hw_link_desc_pool_banks_alloc() - Allocate h/w link desc pool banks
3724  * @soc: DP SOC handle
3725  * @mac_id: mac id
3726  *
3727  * Allocates memory pages for link descriptors, the page size is 4K for
3728  * MCL and 2MB for WIN. if the mac_id is invalid link descriptor pages are
3729  * allocated for regular RX/TX and if the there is a proper mac_id link
3730  * descriptors are allocated for RX monitor mode.
3731  *
3732  * Return: QDF_STATUS_SUCCESS: Success
3733  *	   QDF_STATUS_E_FAILURE: Failure
3734  */
3735 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
3736 {
3737 	hal_soc_handle_t hal_soc = soc->hal_soc;
3738 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
3739 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
3740 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
3741 	uint32_t num_mpdus_per_link_desc = hal_num_mpdus_per_link_desc(hal_soc);
3742 	uint32_t num_msdus_per_link_desc = hal_num_msdus_per_link_desc(hal_soc);
3743 	uint32_t num_mpdu_links_per_queue_desc =
3744 		hal_num_mpdu_links_per_queue_desc(hal_soc);
3745 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3746 	uint32_t *total_link_descs, total_mem_size;
3747 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
3748 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
3749 	uint32_t num_entries;
3750 	struct qdf_mem_multi_page_t *pages;
3751 	struct dp_srng *dp_srng;
3752 	uint8_t minidump_str[MINIDUMP_STR_SIZE];
3753 
3754 	/* Only Tx queue descriptors are allocated from common link descriptor
3755 	 * pool Rx queue descriptors are not included in this because (REO queue
3756 	 * extension descriptors) they are expected to be allocated contiguously
3757 	 * with REO queue descriptors
3758 	 */
3759 	if (mac_id != WLAN_INVALID_PDEV_ID) {
3760 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3761 		/* dp_monitor_get_link_desc_pages returns NULL only
3762 		 * if monitor SOC is  NULL
3763 		 */
3764 		if (!pages) {
3765 			dp_err("can not get link desc pages");
3766 			QDF_ASSERT(0);
3767 			return QDF_STATUS_E_FAULT;
3768 		}
3769 		dp_srng = &soc->rxdma_mon_desc_ring[mac_id];
3770 		num_entries = dp_srng->alloc_size /
3771 			hal_srng_get_entrysize(soc->hal_soc,
3772 					       RXDMA_MONITOR_DESC);
3773 		total_link_descs = dp_monitor_get_total_link_descs(soc, mac_id);
3774 		qdf_str_lcopy(minidump_str, "mon_link_desc_bank",
3775 			      MINIDUMP_STR_SIZE);
3776 	} else {
3777 		num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3778 			AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
3779 
3780 		num_mpdu_queue_descs = num_mpdu_link_descs /
3781 			num_mpdu_links_per_queue_desc;
3782 
3783 		num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3784 			AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
3785 			num_msdus_per_link_desc;
3786 
3787 		num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3788 			AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
3789 
3790 		num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
3791 			num_tx_msdu_link_descs + num_rx_msdu_link_descs;
3792 
3793 		pages = &soc->link_desc_pages;
3794 		total_link_descs = &soc->total_link_descs;
3795 		qdf_str_lcopy(minidump_str, "link_desc_bank",
3796 			      MINIDUMP_STR_SIZE);
3797 	}
3798 
3799 	/* If link descriptor banks are allocated, return from here */
3800 	if (pages->num_pages)
3801 		return QDF_STATUS_SUCCESS;
3802 
3803 	/* Round up to power of 2 */
3804 	*total_link_descs = 1;
3805 	while (*total_link_descs < num_entries)
3806 		*total_link_descs <<= 1;
3807 
3808 	dp_init_info("%pK: total_link_descs: %u, link_desc_size: %d",
3809 		     soc, *total_link_descs, link_desc_size);
3810 	total_mem_size =  *total_link_descs * link_desc_size;
3811 	total_mem_size += link_desc_align;
3812 
3813 	dp_init_info("%pK: total_mem_size: %d",
3814 		     soc, total_mem_size);
3815 
3816 	dp_set_max_page_size(pages, max_alloc_size);
3817 	dp_desc_multi_pages_mem_alloc(soc, DP_HW_LINK_DESC_TYPE,
3818 				      pages,
3819 				      link_desc_size,
3820 				      *total_link_descs,
3821 				      0, false);
3822 	if (!pages->num_pages) {
3823 		dp_err("Multi page alloc fail for hw link desc pool");
3824 		return QDF_STATUS_E_FAULT;
3825 	}
3826 
3827 	wlan_minidump_log(pages->dma_pages->page_v_addr_start,
3828 			  pages->num_pages * pages->page_size,
3829 			  soc->ctrl_psoc,
3830 			  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3831 			  "hw_link_desc_bank");
3832 
3833 	return QDF_STATUS_SUCCESS;
3834 }
3835 
3836 /*
3837  * dp_hw_link_desc_ring_free() - Free h/w link desc rings
3838  * @soc: DP SOC handle
3839  *
3840  * Return: none
3841  */
3842 static void dp_hw_link_desc_ring_free(struct dp_soc *soc)
3843 {
3844 	uint32_t i;
3845 	uint32_t size = soc->wbm_idle_scatter_buf_size;
3846 	void *vaddr = soc->wbm_idle_link_ring.base_vaddr_unaligned;
3847 	qdf_dma_addr_t paddr;
3848 
3849 	if (soc->wbm_idle_scatter_buf_base_vaddr[0]) {
3850 		for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3851 			vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3852 			paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3853 			if (vaddr) {
3854 				qdf_mem_free_consistent(soc->osdev,
3855 							soc->osdev->dev,
3856 							size,
3857 							vaddr,
3858 							paddr,
3859 							0);
3860 				vaddr = NULL;
3861 			}
3862 		}
3863 	} else {
3864 		wlan_minidump_remove(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3865 				     soc->wbm_idle_link_ring.alloc_size,
3866 				     soc->ctrl_psoc,
3867 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3868 				     "wbm_idle_link_ring");
3869 		dp_srng_free(soc, &soc->wbm_idle_link_ring);
3870 	}
3871 }
3872 
3873 /*
3874  * dp_hw_link_desc_ring_alloc() - Allocate hw link desc rings
3875  * @soc: DP SOC handle
3876  *
3877  * Allocate memory for WBM_IDLE_LINK srng ring if the number of
3878  * link descriptors is less then the max_allocated size. else
3879  * allocate memory for wbm_idle_scatter_buffer.
3880  *
3881  * Return: QDF_STATUS_SUCCESS: success
3882  *         QDF_STATUS_E_NO_MEM: No memory (Failure)
3883  */
3884 static QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc)
3885 {
3886 	uint32_t entry_size, i;
3887 	uint32_t total_mem_size;
3888 	qdf_dma_addr_t *baseaddr = NULL;
3889 	struct dp_srng *dp_srng;
3890 	uint32_t ring_type;
3891 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3892 	uint32_t tlds;
3893 
3894 	ring_type = WBM_IDLE_LINK;
3895 	dp_srng = &soc->wbm_idle_link_ring;
3896 	tlds = soc->total_link_descs;
3897 
3898 	entry_size = hal_srng_get_entrysize(soc->hal_soc, ring_type);
3899 	total_mem_size = entry_size * tlds;
3900 
3901 	if (total_mem_size <= max_alloc_size) {
3902 		if (dp_srng_alloc(soc, dp_srng, ring_type, tlds, 0)) {
3903 			dp_init_err("%pK: Link desc idle ring setup failed",
3904 				    soc);
3905 			goto fail;
3906 		}
3907 
3908 		wlan_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3909 				  soc->wbm_idle_link_ring.alloc_size,
3910 				  soc->ctrl_psoc,
3911 				  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3912 				  "wbm_idle_link_ring");
3913 	} else {
3914 		uint32_t num_scatter_bufs;
3915 		uint32_t buf_size = 0;
3916 
3917 		soc->wbm_idle_scatter_buf_size =
3918 			hal_idle_list_scatter_buf_size(soc->hal_soc);
3919 		hal_idle_scatter_buf_num_entries(
3920 					soc->hal_soc,
3921 					soc->wbm_idle_scatter_buf_size);
3922 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
3923 					soc->hal_soc, total_mem_size,
3924 					soc->wbm_idle_scatter_buf_size);
3925 
3926 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
3927 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3928 				  FL("scatter bufs size out of bounds"));
3929 			goto fail;
3930 		}
3931 
3932 		for (i = 0; i < num_scatter_bufs; i++) {
3933 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
3934 			buf_size = soc->wbm_idle_scatter_buf_size;
3935 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
3936 				qdf_mem_alloc_consistent(soc->osdev,
3937 							 soc->osdev->dev,
3938 							 buf_size,
3939 							 baseaddr);
3940 
3941 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
3942 				QDF_TRACE(QDF_MODULE_ID_DP,
3943 					  QDF_TRACE_LEVEL_ERROR,
3944 					  FL("Scatter lst memory alloc fail"));
3945 				goto fail;
3946 			}
3947 		}
3948 		soc->num_scatter_bufs = num_scatter_bufs;
3949 	}
3950 	return QDF_STATUS_SUCCESS;
3951 
3952 fail:
3953 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3954 		void *vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3955 		qdf_dma_addr_t paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3956 
3957 		if (vaddr) {
3958 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
3959 						soc->wbm_idle_scatter_buf_size,
3960 						vaddr,
3961 						paddr, 0);
3962 			vaddr = NULL;
3963 		}
3964 	}
3965 	return QDF_STATUS_E_NOMEM;
3966 }
3967 
3968 qdf_export_symbol(dp_hw_link_desc_pool_banks_alloc);
3969 
3970 /*
3971  * dp_hw_link_desc_ring_init() - Initialize hw link desc rings
3972  * @soc: DP SOC handle
3973  *
3974  * Return: QDF_STATUS_SUCCESS: success
3975  *         QDF_STATUS_E_FAILURE: failure
3976  */
3977 static QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc)
3978 {
3979 	struct dp_srng *dp_srng = &soc->wbm_idle_link_ring;
3980 
3981 	if (dp_srng->base_vaddr_unaligned) {
3982 		if (dp_srng_init(soc, dp_srng, WBM_IDLE_LINK, 0, 0))
3983 			return QDF_STATUS_E_FAILURE;
3984 	}
3985 	return QDF_STATUS_SUCCESS;
3986 }
3987 
3988 /*
3989  * dp_hw_link_desc_ring_deinit() - Reset hw link desc rings
3990  * @soc: DP SOC handle
3991  *
3992  * Return: None
3993  */
3994 static void dp_hw_link_desc_ring_deinit(struct dp_soc *soc)
3995 {
3996 	dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0);
3997 }
3998 
3999 /*
4000  * dp_hw_link_desc_ring_replenish() - Replenish hw link desc rings
4001  * @soc: DP SOC handle
4002  * @mac_id: mac id
4003  *
4004  * Return: None
4005  */
4006 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id)
4007 {
4008 	uint32_t cookie = 0;
4009 	uint32_t page_idx = 0;
4010 	struct qdf_mem_multi_page_t *pages;
4011 	struct qdf_mem_dma_page_t *dma_pages;
4012 	uint32_t offset = 0;
4013 	uint32_t count = 0;
4014 	uint32_t desc_id = 0;
4015 	void *desc_srng;
4016 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
4017 	uint32_t *total_link_descs_addr;
4018 	uint32_t total_link_descs;
4019 	uint32_t scatter_buf_num;
4020 	uint32_t num_entries_per_buf = 0;
4021 	uint32_t rem_entries;
4022 	uint32_t num_descs_per_page;
4023 	uint32_t num_scatter_bufs = 0;
4024 	uint8_t *scatter_buf_ptr;
4025 	void *desc;
4026 
4027 	num_scatter_bufs = soc->num_scatter_bufs;
4028 
4029 	if (mac_id == WLAN_INVALID_PDEV_ID) {
4030 		pages = &soc->link_desc_pages;
4031 		total_link_descs = soc->total_link_descs;
4032 		desc_srng = soc->wbm_idle_link_ring.hal_srng;
4033 	} else {
4034 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
4035 		/* dp_monitor_get_link_desc_pages returns NULL only
4036 		 * if monitor SOC is  NULL
4037 		 */
4038 		if (!pages) {
4039 			dp_err("can not get link desc pages");
4040 			QDF_ASSERT(0);
4041 			return;
4042 		}
4043 		total_link_descs_addr =
4044 				dp_monitor_get_total_link_descs(soc, mac_id);
4045 		total_link_descs = *total_link_descs_addr;
4046 		desc_srng = soc->rxdma_mon_desc_ring[mac_id].hal_srng;
4047 	}
4048 
4049 	dma_pages = pages->dma_pages;
4050 	do {
4051 		qdf_mem_zero(dma_pages[page_idx].page_v_addr_start,
4052 			     pages->page_size);
4053 		page_idx++;
4054 	} while (page_idx < pages->num_pages);
4055 
4056 	if (desc_srng) {
4057 		hal_srng_access_start_unlocked(soc->hal_soc, desc_srng);
4058 		page_idx = 0;
4059 		count = 0;
4060 		offset = 0;
4061 		pages = &soc->link_desc_pages;
4062 		while ((desc = hal_srng_src_get_next(soc->hal_soc,
4063 						     desc_srng)) &&
4064 			(count < total_link_descs)) {
4065 			page_idx = count / pages->num_element_per_page;
4066 			if (desc_id == pages->num_element_per_page)
4067 				desc_id = 0;
4068 
4069 			offset = count % pages->num_element_per_page;
4070 			cookie = LINK_DESC_COOKIE(desc_id, page_idx,
4071 						  soc->link_desc_id_start);
4072 
4073 			hal_set_link_desc_addr(soc->hal_soc, desc, cookie,
4074 					       dma_pages[page_idx].page_p_addr
4075 					       + (offset * link_desc_size),
4076 					       soc->idle_link_bm_id);
4077 			count++;
4078 			desc_id++;
4079 		}
4080 		hal_srng_access_end_unlocked(soc->hal_soc, desc_srng);
4081 	} else {
4082 		/* Populate idle list scatter buffers with link descriptor
4083 		 * pointers
4084 		 */
4085 		scatter_buf_num = 0;
4086 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
4087 					soc->hal_soc,
4088 					soc->wbm_idle_scatter_buf_size);
4089 
4090 		scatter_buf_ptr = (uint8_t *)(
4091 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
4092 		rem_entries = num_entries_per_buf;
4093 		pages = &soc->link_desc_pages;
4094 		page_idx = 0; count = 0;
4095 		offset = 0;
4096 		num_descs_per_page = pages->num_element_per_page;
4097 
4098 		while (count < total_link_descs) {
4099 			page_idx = count / num_descs_per_page;
4100 			offset = count % num_descs_per_page;
4101 			if (desc_id == pages->num_element_per_page)
4102 				desc_id = 0;
4103 
4104 			cookie = LINK_DESC_COOKIE(desc_id, page_idx,
4105 						  soc->link_desc_id_start);
4106 			hal_set_link_desc_addr(soc->hal_soc,
4107 					       (void *)scatter_buf_ptr,
4108 					       cookie,
4109 					       dma_pages[page_idx].page_p_addr +
4110 					       (offset * link_desc_size),
4111 					       soc->idle_link_bm_id);
4112 			rem_entries--;
4113 			if (rem_entries) {
4114 				scatter_buf_ptr += link_desc_size;
4115 			} else {
4116 				rem_entries = num_entries_per_buf;
4117 				scatter_buf_num++;
4118 				if (scatter_buf_num >= num_scatter_bufs)
4119 					break;
4120 				scatter_buf_ptr = (uint8_t *)
4121 					(soc->wbm_idle_scatter_buf_base_vaddr[
4122 					 scatter_buf_num]);
4123 			}
4124 			count++;
4125 			desc_id++;
4126 		}
4127 		/* Setup link descriptor idle list in HW */
4128 		hal_setup_link_idle_list(soc->hal_soc,
4129 			soc->wbm_idle_scatter_buf_base_paddr,
4130 			soc->wbm_idle_scatter_buf_base_vaddr,
4131 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
4132 			(uint32_t)(scatter_buf_ptr -
4133 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
4134 			scatter_buf_num-1])), total_link_descs);
4135 	}
4136 }
4137 
4138 qdf_export_symbol(dp_link_desc_ring_replenish);
4139 
4140 #ifdef IPA_OFFLOAD
4141 #define USE_1_IPA_RX_REO_RING 1
4142 #define USE_2_IPA_RX_REO_RINGS 2
4143 #define REO_DST_RING_SIZE_QCA6290 1023
4144 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
4145 #define REO_DST_RING_SIZE_QCA8074 1023
4146 #define REO_DST_RING_SIZE_QCN9000 2048
4147 #else
4148 #define REO_DST_RING_SIZE_QCA8074 8
4149 #define REO_DST_RING_SIZE_QCN9000 8
4150 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
4151 
4152 #ifdef IPA_WDI3_TX_TWO_PIPES
4153 #ifdef DP_MEMORY_OPT
4154 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4155 {
4156 	return dp_init_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4157 }
4158 
4159 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4160 {
4161 	dp_deinit_tx_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4162 }
4163 
4164 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4165 {
4166 	return dp_alloc_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4167 }
4168 
4169 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4170 {
4171 	dp_free_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
4172 }
4173 
4174 #else /* !DP_MEMORY_OPT */
4175 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4176 {
4177 	return 0;
4178 }
4179 
4180 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4181 {
4182 }
4183 
4184 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4185 {
4186 	return 0
4187 }
4188 
4189 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4190 {
4191 }
4192 #endif /* DP_MEMORY_OPT */
4193 
4194 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
4195 {
4196 	hal_tx_init_data_ring(soc->hal_soc,
4197 			      soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng);
4198 }
4199 
4200 #else /* !IPA_WDI3_TX_TWO_PIPES */
4201 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4202 {
4203 	return 0;
4204 }
4205 
4206 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4207 {
4208 }
4209 
4210 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4211 {
4212 	return 0;
4213 }
4214 
4215 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4216 {
4217 }
4218 
4219 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
4220 {
4221 }
4222 
4223 #endif /* IPA_WDI3_TX_TWO_PIPES */
4224 
4225 #else
4226 
4227 #define REO_DST_RING_SIZE_QCA6290 1024
4228 
4229 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
4230 {
4231 	return 0;
4232 }
4233 
4234 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
4235 {
4236 }
4237 
4238 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
4239 {
4240 	return 0;
4241 }
4242 
4243 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
4244 {
4245 }
4246 
4247 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
4248 {
4249 }
4250 
4251 #endif /* IPA_OFFLOAD */
4252 
4253 /*
4254  * dp_soc_reset_ring_map() - Reset cpu ring map
4255  * @soc: Datapath soc handler
4256  *
4257  * This api resets the default cpu ring map
4258  */
4259 
4260 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
4261 {
4262 	uint8_t i;
4263 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4264 
4265 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
4266 		switch (nss_config) {
4267 		case dp_nss_cfg_first_radio:
4268 			/*
4269 			 * Setting Tx ring map for one nss offloaded radio
4270 			 */
4271 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
4272 			break;
4273 
4274 		case dp_nss_cfg_second_radio:
4275 			/*
4276 			 * Setting Tx ring for two nss offloaded radios
4277 			 */
4278 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
4279 			break;
4280 
4281 		case dp_nss_cfg_dbdc:
4282 			/*
4283 			 * Setting Tx ring map for 2 nss offloaded radios
4284 			 */
4285 			soc->tx_ring_map[i] =
4286 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
4287 			break;
4288 
4289 		case dp_nss_cfg_dbtc:
4290 			/*
4291 			 * Setting Tx ring map for 3 nss offloaded radios
4292 			 */
4293 			soc->tx_ring_map[i] =
4294 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
4295 			break;
4296 
4297 		default:
4298 			dp_err("tx_ring_map failed due to invalid nss cfg");
4299 			break;
4300 		}
4301 	}
4302 }
4303 
4304 /*
4305  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
4306  * @dp_soc - DP soc handle
4307  * @ring_type - ring type
4308  * @ring_num - ring_num
4309  *
4310  * return 0 or 1
4311  */
4312 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
4313 {
4314 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4315 	uint8_t status = 0;
4316 
4317 	switch (ring_type) {
4318 	case WBM2SW_RELEASE:
4319 	case REO_DST:
4320 	case RXDMA_BUF:
4321 	case REO_EXCEPTION:
4322 		status = ((nss_config) & (1 << ring_num));
4323 		break;
4324 	default:
4325 		break;
4326 	}
4327 
4328 	return status;
4329 }
4330 
4331 /*
4332  * dp_soc_disable_unused_mac_intr_mask() - reset interrupt mask for
4333  *					  unused WMAC hw rings
4334  * @dp_soc - DP Soc handle
4335  * @mac_num - wmac num
4336  *
4337  * Return: Return void
4338  */
4339 static void dp_soc_disable_unused_mac_intr_mask(struct dp_soc *soc,
4340 						int mac_num)
4341 {
4342 	uint8_t *grp_mask = NULL;
4343 	int group_number;
4344 
4345 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
4346 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4347 	wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4348 					  group_number, 0x0);
4349 
4350 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
4351 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4352 	wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
4353 				      group_number, 0x0);
4354 
4355 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
4356 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4357 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
4358 					  group_number, 0x0);
4359 
4360 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
4361 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4362 	wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
4363 					      group_number, 0x0);
4364 }
4365 
4366 #ifdef IPA_OFFLOAD
4367 #ifdef IPA_WDI3_VLAN_SUPPORT
4368 /*
4369  * dp_soc_reset_ipa_vlan_intr_mask() - reset interrupt mask for IPA offloaded
4370  * ring for vlan tagged traffic
4371  * @dp_soc - DP Soc handle
4372  *
4373  * Return: Return void
4374  */
4375 static void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
4376 {
4377 	uint8_t *grp_mask = NULL;
4378 	int group_number, mask;
4379 
4380 	if (!wlan_ipa_is_vlan_enabled())
4381 		return;
4382 
4383 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
4384 
4385 	group_number = dp_srng_find_ring_in_mask(IPA_ALT_REO_DEST_RING_IDX, grp_mask);
4386 	if (group_number < 0) {
4387 		dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4388 			      soc, REO_DST, IPA_ALT_REO_DEST_RING_IDX);
4389 		return;
4390 	}
4391 
4392 	mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
4393 
4394 	/* reset the interrupt mask for offloaded ring */
4395 	mask &= (~(1 << IPA_ALT_REO_DEST_RING_IDX));
4396 
4397 	/*
4398 	 * set the interrupt mask to zero for rx offloaded radio.
4399 	 */
4400 	wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4401 }
4402 #else
4403 static inline
4404 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
4405 { }
4406 #endif /* IPA_WDI3_VLAN_SUPPORT */
4407 #else
4408 static inline
4409 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
4410 { }
4411 #endif /* IPA_OFFLOAD */
4412 
4413 /*
4414  * dp_soc_reset_intr_mask() - reset interrupt mask
4415  * @dp_soc - DP Soc handle
4416  *
4417  * Return: Return void
4418  */
4419 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
4420 {
4421 	uint8_t j;
4422 	uint8_t *grp_mask = NULL;
4423 	int group_number, mask, num_ring;
4424 
4425 	/* number of tx ring */
4426 	num_ring = soc->num_tcl_data_rings;
4427 
4428 	/*
4429 	 * group mask for tx completion  ring.
4430 	 */
4431 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
4432 
4433 	/* loop and reset the mask for only offloaded ring */
4434 	for (j = 0; j < WLAN_CFG_NUM_TCL_DATA_RINGS; j++) {
4435 		/*
4436 		 * Group number corresponding to tx offloaded ring.
4437 		 */
4438 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4439 		if (group_number < 0) {
4440 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4441 				      soc, WBM2SW_RELEASE, j);
4442 			continue;
4443 		}
4444 
4445 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
4446 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j) &&
4447 		    (!mask)) {
4448 			continue;
4449 		}
4450 
4451 		/* reset the tx mask for offloaded ring */
4452 		mask &= (~(1 << j));
4453 
4454 		/*
4455 		 * reset the interrupt mask for offloaded ring.
4456 		 */
4457 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4458 	}
4459 
4460 	/* number of rx rings */
4461 	num_ring = soc->num_reo_dest_rings;
4462 
4463 	/*
4464 	 * group mask for reo destination ring.
4465 	 */
4466 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
4467 
4468 	/* loop and reset the mask for only offloaded ring */
4469 	for (j = 0; j < WLAN_CFG_NUM_REO_DEST_RING; j++) {
4470 		/*
4471 		 * Group number corresponding to rx offloaded ring.
4472 		 */
4473 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4474 		if (group_number < 0) {
4475 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4476 				      soc, REO_DST, j);
4477 			continue;
4478 		}
4479 
4480 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
4481 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j) &&
4482 		    (!mask)) {
4483 			continue;
4484 		}
4485 
4486 		/* reset the interrupt mask for offloaded ring */
4487 		mask &= (~(1 << j));
4488 
4489 		/*
4490 		 * set the interrupt mask to zero for rx offloaded radio.
4491 		 */
4492 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4493 	}
4494 
4495 	/*
4496 	 * group mask for Rx buffer refill ring
4497 	 */
4498 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
4499 
4500 	/* loop and reset the mask for only offloaded ring */
4501 	for (j = 0; j < MAX_PDEV_CNT; j++) {
4502 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
4503 
4504 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
4505 			continue;
4506 		}
4507 
4508 		/*
4509 		 * Group number corresponding to rx offloaded ring.
4510 		 */
4511 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
4512 		if (group_number < 0) {
4513 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4514 				      soc, REO_DST, lmac_id);
4515 			continue;
4516 		}
4517 
4518 		/* set the interrupt mask for offloaded ring */
4519 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4520 				group_number);
4521 		mask &= (~(1 << lmac_id));
4522 
4523 		/*
4524 		 * set the interrupt mask to zero for rx offloaded radio.
4525 		 */
4526 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4527 			group_number, mask);
4528 	}
4529 
4530 	grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
4531 
4532 	for (j = 0; j < num_ring; j++) {
4533 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_EXCEPTION, j)) {
4534 			continue;
4535 		}
4536 
4537 		/*
4538 		 * Group number corresponding to rx err ring.
4539 		 */
4540 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4541 		if (group_number < 0) {
4542 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4543 				      soc, REO_EXCEPTION, j);
4544 			continue;
4545 		}
4546 
4547 		wlan_cfg_set_rx_err_ring_mask(soc->wlan_cfg_ctx,
4548 					      group_number, 0);
4549 	}
4550 }
4551 
4552 #ifdef IPA_OFFLOAD
4553 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
4554 			 uint32_t *remap1, uint32_t *remap2)
4555 {
4556 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX] = {
4557 				REO_REMAP_SW1, REO_REMAP_SW2, REO_REMAP_SW3,
4558 				REO_REMAP_SW5, REO_REMAP_SW6, REO_REMAP_SW7};
4559 
4560 	switch (soc->arch_id) {
4561 	case CDP_ARCH_TYPE_BE:
4562 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4563 					      soc->num_reo_dest_rings -
4564 					      USE_2_IPA_RX_REO_RINGS, remap1,
4565 					      remap2);
4566 		break;
4567 
4568 	case CDP_ARCH_TYPE_LI:
4569 		if (wlan_ipa_is_vlan_enabled()) {
4570 			hal_compute_reo_remap_ix2_ix3(
4571 					soc->hal_soc, ring,
4572 					soc->num_reo_dest_rings -
4573 					USE_2_IPA_RX_REO_RINGS, remap1,
4574 					remap2);
4575 
4576 		} else {
4577 			hal_compute_reo_remap_ix2_ix3(
4578 					soc->hal_soc, ring,
4579 					soc->num_reo_dest_rings -
4580 					USE_1_IPA_RX_REO_RING, remap1,
4581 					remap2);
4582 		}
4583 
4584 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
4585 		break;
4586 	default:
4587 		dp_err("unknown arch_id 0x%x", soc->arch_id);
4588 		QDF_BUG(0);
4589 
4590 	}
4591 
4592 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
4593 
4594 	return true;
4595 }
4596 
4597 #ifdef IPA_WDI3_TX_TWO_PIPES
4598 static bool dp_ipa_is_alt_tx_ring(int index)
4599 {
4600 	return index == IPA_TX_ALT_RING_IDX;
4601 }
4602 
4603 static bool dp_ipa_is_alt_tx_comp_ring(int index)
4604 {
4605 	return index == IPA_TX_ALT_COMP_RING_IDX;
4606 }
4607 #else /* !IPA_WDI3_TX_TWO_PIPES */
4608 static bool dp_ipa_is_alt_tx_ring(int index)
4609 {
4610 	return false;
4611 }
4612 
4613 static bool dp_ipa_is_alt_tx_comp_ring(int index)
4614 {
4615 	return false;
4616 }
4617 #endif /* IPA_WDI3_TX_TWO_PIPES */
4618 
4619 /**
4620  * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA
4621  *
4622  * @tx_ring_num: Tx ring number
4623  * @tx_ipa_ring_sz: Return param only updated for IPA.
4624  * @soc_cfg_ctx: dp soc cfg context
4625  *
4626  * Return: None
4627  */
4628 static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz,
4629 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4630 {
4631 	if (!soc_cfg_ctx->ipa_enabled)
4632 		return;
4633 
4634 	if (tx_ring_num == IPA_TCL_DATA_RING_IDX)
4635 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_ring_size(soc_cfg_ctx);
4636 	else if (dp_ipa_is_alt_tx_ring(tx_ring_num))
4637 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_alt_ring_size(soc_cfg_ctx);
4638 }
4639 
4640 /**
4641  * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA
4642  *
4643  * @tx_comp_ring_num: Tx comp ring number
4644  * @tx_comp_ipa_ring_sz: Return param only updated for IPA.
4645  * @soc_cfg_ctx: dp soc cfg context
4646  *
4647  * Return: None
4648  */
4649 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
4650 					 int *tx_comp_ipa_ring_sz,
4651 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4652 {
4653 	if (!soc_cfg_ctx->ipa_enabled)
4654 		return;
4655 
4656 	if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX)
4657 		*tx_comp_ipa_ring_sz =
4658 				wlan_cfg_ipa_tx_comp_ring_size(soc_cfg_ctx);
4659 	else if (dp_ipa_is_alt_tx_comp_ring(tx_comp_ring_num))
4660 		*tx_comp_ipa_ring_sz =
4661 				wlan_cfg_ipa_tx_alt_comp_ring_size(soc_cfg_ctx);
4662 }
4663 #else
4664 static uint8_t dp_reo_ring_selection(uint32_t value, uint32_t *ring)
4665 {
4666 	uint8_t num = 0;
4667 
4668 	switch (value) {
4669 	/* should we have all the different possible ring configs */
4670 	case 0xFF:
4671 		num = 8;
4672 		ring[0] = REO_REMAP_SW1;
4673 		ring[1] = REO_REMAP_SW2;
4674 		ring[2] = REO_REMAP_SW3;
4675 		ring[3] = REO_REMAP_SW4;
4676 		ring[4] = REO_REMAP_SW5;
4677 		ring[5] = REO_REMAP_SW6;
4678 		ring[6] = REO_REMAP_SW7;
4679 		ring[7] = REO_REMAP_SW8;
4680 		break;
4681 
4682 	case 0x3F:
4683 		num = 6;
4684 		ring[0] = REO_REMAP_SW1;
4685 		ring[1] = REO_REMAP_SW2;
4686 		ring[2] = REO_REMAP_SW3;
4687 		ring[3] = REO_REMAP_SW4;
4688 		ring[4] = REO_REMAP_SW5;
4689 		ring[5] = REO_REMAP_SW6;
4690 		break;
4691 
4692 	case 0xF:
4693 		num = 4;
4694 		ring[0] = REO_REMAP_SW1;
4695 		ring[1] = REO_REMAP_SW2;
4696 		ring[2] = REO_REMAP_SW3;
4697 		ring[3] = REO_REMAP_SW4;
4698 		break;
4699 	case 0xE:
4700 		num = 3;
4701 		ring[0] = REO_REMAP_SW2;
4702 		ring[1] = REO_REMAP_SW3;
4703 		ring[2] = REO_REMAP_SW4;
4704 		break;
4705 	case 0xD:
4706 		num = 3;
4707 		ring[0] = REO_REMAP_SW1;
4708 		ring[1] = REO_REMAP_SW3;
4709 		ring[2] = REO_REMAP_SW4;
4710 		break;
4711 	case 0xC:
4712 		num = 2;
4713 		ring[0] = REO_REMAP_SW3;
4714 		ring[1] = REO_REMAP_SW4;
4715 		break;
4716 	case 0xB:
4717 		num = 3;
4718 		ring[0] = REO_REMAP_SW1;
4719 		ring[1] = REO_REMAP_SW2;
4720 		ring[2] = REO_REMAP_SW4;
4721 		break;
4722 	case 0xA:
4723 		num = 2;
4724 		ring[0] = REO_REMAP_SW2;
4725 		ring[1] = REO_REMAP_SW4;
4726 		break;
4727 	case 0x9:
4728 		num = 2;
4729 		ring[0] = REO_REMAP_SW1;
4730 		ring[1] = REO_REMAP_SW4;
4731 		break;
4732 	case 0x8:
4733 		num = 1;
4734 		ring[0] = REO_REMAP_SW4;
4735 		break;
4736 	case 0x7:
4737 		num = 3;
4738 		ring[0] = REO_REMAP_SW1;
4739 		ring[1] = REO_REMAP_SW2;
4740 		ring[2] = REO_REMAP_SW3;
4741 		break;
4742 	case 0x6:
4743 		num = 2;
4744 		ring[0] = REO_REMAP_SW2;
4745 		ring[1] = REO_REMAP_SW3;
4746 		break;
4747 	case 0x5:
4748 		num = 2;
4749 		ring[0] = REO_REMAP_SW1;
4750 		ring[1] = REO_REMAP_SW3;
4751 		break;
4752 	case 0x4:
4753 		num = 1;
4754 		ring[0] = REO_REMAP_SW3;
4755 		break;
4756 	case 0x3:
4757 		num = 2;
4758 		ring[0] = REO_REMAP_SW1;
4759 		ring[1] = REO_REMAP_SW2;
4760 		break;
4761 	case 0x2:
4762 		num = 1;
4763 		ring[0] = REO_REMAP_SW2;
4764 		break;
4765 	case 0x1:
4766 		num = 1;
4767 		ring[0] = REO_REMAP_SW1;
4768 		break;
4769 	default:
4770 		dp_err("unknown reo ring map 0x%x", value);
4771 		QDF_BUG(0);
4772 	}
4773 	return num;
4774 }
4775 
4776 bool dp_reo_remap_config(struct dp_soc *soc,
4777 			 uint32_t *remap0,
4778 			 uint32_t *remap1,
4779 			 uint32_t *remap2)
4780 {
4781 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4782 	uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
4783 	uint8_t num;
4784 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX];
4785 	uint32_t value;
4786 
4787 	switch (offload_radio) {
4788 	case dp_nss_cfg_default:
4789 		value = reo_config & WLAN_CFG_NUM_REO_RINGS_MAP_MAX;
4790 		num = dp_reo_ring_selection(value, ring);
4791 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4792 					      num, remap1, remap2);
4793 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
4794 
4795 		break;
4796 	case dp_nss_cfg_first_radio:
4797 		value = reo_config & 0xE;
4798 		num = dp_reo_ring_selection(value, ring);
4799 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4800 					      num, remap1, remap2);
4801 
4802 		break;
4803 	case dp_nss_cfg_second_radio:
4804 		value = reo_config & 0xD;
4805 		num = dp_reo_ring_selection(value, ring);
4806 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4807 					      num, remap1, remap2);
4808 
4809 		break;
4810 	case dp_nss_cfg_dbdc:
4811 	case dp_nss_cfg_dbtc:
4812 		/* return false if both or all are offloaded to NSS */
4813 		return false;
4814 
4815 	}
4816 
4817 	dp_debug("remap1 %x remap2 %x offload_radio %u",
4818 		 *remap1, *remap2, offload_radio);
4819 	return true;
4820 }
4821 
4822 static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz,
4823 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4824 {
4825 }
4826 
4827 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
4828 					 int *tx_comp_ipa_ring_sz,
4829 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4830 {
4831 }
4832 #endif /* IPA_OFFLOAD */
4833 
4834 /*
4835  * dp_reo_frag_dst_set() - configure reo register to set the
4836  *                        fragment destination ring
4837  * @soc : Datapath soc
4838  * @frag_dst_ring : output parameter to set fragment destination ring
4839  *
4840  * Based on offload_radio below fragment destination rings is selected
4841  * 0 - TCL
4842  * 1 - SW1
4843  * 2 - SW2
4844  * 3 - SW3
4845  * 4 - SW4
4846  * 5 - Release
4847  * 6 - FW
4848  * 7 - alternate select
4849  *
4850  * return: void
4851  */
4852 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
4853 {
4854 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4855 
4856 	switch (offload_radio) {
4857 	case dp_nss_cfg_default:
4858 		*frag_dst_ring = REO_REMAP_TCL;
4859 		break;
4860 	case dp_nss_cfg_first_radio:
4861 		/*
4862 		 * This configuration is valid for single band radio which
4863 		 * is also NSS offload.
4864 		 */
4865 	case dp_nss_cfg_dbdc:
4866 	case dp_nss_cfg_dbtc:
4867 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
4868 		break;
4869 	default:
4870 		dp_init_err("%pK: dp_reo_frag_dst_set invalid offload radio config", soc);
4871 		break;
4872 	}
4873 }
4874 
4875 #ifdef ENABLE_VERBOSE_DEBUG
4876 static void dp_enable_verbose_debug(struct dp_soc *soc)
4877 {
4878 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4879 
4880 	soc_cfg_ctx = soc->wlan_cfg_ctx;
4881 
4882 	if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask)
4883 		is_dp_verbose_debug_enabled = true;
4884 
4885 	if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask)
4886 		hal_set_verbose_debug(true);
4887 	else
4888 		hal_set_verbose_debug(false);
4889 }
4890 #else
4891 static void dp_enable_verbose_debug(struct dp_soc *soc)
4892 {
4893 }
4894 #endif
4895 
4896 #ifdef WLAN_FEATURE_STATS_EXT
4897 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
4898 {
4899 	qdf_event_create(&soc->rx_hw_stats_event);
4900 }
4901 #else
4902 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
4903 {
4904 }
4905 #endif
4906 
4907 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index)
4908 {
4909 	int tcl_ring_num, wbm_ring_num;
4910 
4911 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
4912 						index,
4913 						&tcl_ring_num,
4914 						&wbm_ring_num);
4915 
4916 	if (tcl_ring_num == -1) {
4917 		dp_err("incorrect tcl ring num for index %u", index);
4918 		return;
4919 	}
4920 
4921 	wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned,
4922 			     soc->tcl_data_ring[index].alloc_size,
4923 			     soc->ctrl_psoc,
4924 			     WLAN_MD_DP_SRNG_TCL_DATA,
4925 			     "tcl_data_ring");
4926 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
4927 	dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA,
4928 		       tcl_ring_num);
4929 
4930 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
4931 		return;
4932 
4933 	wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned,
4934 			     soc->tx_comp_ring[index].alloc_size,
4935 			     soc->ctrl_psoc,
4936 			     WLAN_MD_DP_SRNG_TX_COMP,
4937 			     "tcl_comp_ring");
4938 	dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4939 		       wbm_ring_num);
4940 }
4941 
4942 /**
4943  * dp_init_tx_ring_pair_by_index() - The function inits tcl data/wbm completion
4944  * ring pair
4945  * @soc: DP soc pointer
4946  * @index: index of soc->tcl_data or soc->tx_comp to initialize
4947  *
4948  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
4949  */
4950 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
4951 						uint8_t index)
4952 {
4953 	int tcl_ring_num, wbm_ring_num;
4954 	uint8_t bm_id;
4955 
4956 	if (index >= MAX_TCL_DATA_RINGS) {
4957 		dp_err("unexpected index!");
4958 		QDF_BUG(0);
4959 		goto fail1;
4960 	}
4961 
4962 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
4963 						index,
4964 						&tcl_ring_num,
4965 						&wbm_ring_num);
4966 
4967 	if (tcl_ring_num == -1) {
4968 		dp_err("incorrect tcl ring num for index %u", index);
4969 		goto fail1;
4970 	}
4971 
4972 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
4973 	if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA,
4974 			 tcl_ring_num, 0)) {
4975 		dp_err("dp_srng_init failed for tcl_data_ring");
4976 		goto fail1;
4977 	}
4978 	wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned,
4979 			  soc->tcl_data_ring[index].alloc_size,
4980 			  soc->ctrl_psoc,
4981 			  WLAN_MD_DP_SRNG_TCL_DATA,
4982 			  "tcl_data_ring");
4983 
4984 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
4985 		goto set_rbm;
4986 
4987 	if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4988 			 wbm_ring_num, 0)) {
4989 		dp_err("dp_srng_init failed for tx_comp_ring");
4990 		goto fail1;
4991 	}
4992 
4993 	wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned,
4994 			  soc->tx_comp_ring[index].alloc_size,
4995 			  soc->ctrl_psoc,
4996 			  WLAN_MD_DP_SRNG_TX_COMP,
4997 			  "tcl_comp_ring");
4998 set_rbm:
4999 	bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_ring_num);
5000 
5001 	soc->arch_ops.tx_implicit_rbm_set(soc, tcl_ring_num, bm_id);
5002 
5003 	return QDF_STATUS_SUCCESS;
5004 
5005 fail1:
5006 	return QDF_STATUS_E_FAILURE;
5007 }
5008 
5009 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index)
5010 {
5011 	dp_debug("index %u", index);
5012 	dp_srng_free(soc, &soc->tcl_data_ring[index]);
5013 	dp_srng_free(soc, &soc->tx_comp_ring[index]);
5014 }
5015 
5016 /**
5017  * dp_alloc_tx_ring_pair_by_index() - The function allocs tcl data/wbm2sw
5018  * ring pair for the given "index"
5019  * @soc: DP soc pointer
5020  * @index: index of soc->tcl_data or soc->tx_comp to initialize
5021  *
5022  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
5023  */
5024 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
5025 						 uint8_t index)
5026 {
5027 	int tx_ring_size;
5028 	int tx_comp_ring_size;
5029 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
5030 	int cached = 0;
5031 
5032 	if (index >= MAX_TCL_DATA_RINGS) {
5033 		dp_err("unexpected index!");
5034 		QDF_BUG(0);
5035 		goto fail1;
5036 	}
5037 
5038 	dp_debug("index %u", index);
5039 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
5040 	dp_ipa_get_tx_ring_size(index, &tx_ring_size, soc_cfg_ctx);
5041 
5042 	if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA,
5043 			  tx_ring_size, cached)) {
5044 		dp_err("dp_srng_alloc failed for tcl_data_ring");
5045 		goto fail1;
5046 	}
5047 
5048 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
5049 	dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size, soc_cfg_ctx);
5050 	/* Enable cached TCL desc if NSS offload is disabled */
5051 	if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
5052 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
5053 
5054 	if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) ==
5055 	    INVALID_WBM_RING_NUM)
5056 		return QDF_STATUS_SUCCESS;
5057 
5058 	if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
5059 			  tx_comp_ring_size, cached)) {
5060 		dp_err("dp_srng_alloc failed for tx_comp_ring");
5061 		goto fail1;
5062 	}
5063 
5064 	return QDF_STATUS_SUCCESS;
5065 
5066 fail1:
5067 	return QDF_STATUS_E_FAILURE;
5068 }
5069 
5070 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
5071 {
5072 	struct cdp_lro_hash_config lro_hash;
5073 	QDF_STATUS status;
5074 
5075 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
5076 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
5077 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
5078 		dp_err("LRO, GRO and RX hash disabled");
5079 		return QDF_STATUS_E_FAILURE;
5080 	}
5081 
5082 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
5083 
5084 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
5085 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
5086 		lro_hash.lro_enable = 1;
5087 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
5088 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
5089 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
5090 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
5091 	}
5092 
5093 	soc->arch_ops.get_rx_hash_key(soc, &lro_hash);
5094 
5095 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
5096 
5097 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
5098 		QDF_BUG(0);
5099 		dp_err("lro_hash_config not configured");
5100 		return QDF_STATUS_E_FAILURE;
5101 	}
5102 
5103 	status = soc->cdp_soc.ol_ops->lro_hash_config(soc->ctrl_psoc,
5104 						      pdev->pdev_id,
5105 						      &lro_hash);
5106 	if (!QDF_IS_STATUS_SUCCESS(status)) {
5107 		dp_err("failed to send lro_hash_config to FW %u", status);
5108 		return status;
5109 	}
5110 
5111 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
5112 		lro_hash.lro_enable, lro_hash.tcp_flag,
5113 		lro_hash.tcp_flag_mask);
5114 
5115 	dp_info("toeplitz_hash_ipv4:");
5116 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5117 			   lro_hash.toeplitz_hash_ipv4,
5118 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
5119 			   LRO_IPV4_SEED_ARR_SZ));
5120 
5121 	dp_info("toeplitz_hash_ipv6:");
5122 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5123 			   lro_hash.toeplitz_hash_ipv6,
5124 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
5125 			   LRO_IPV6_SEED_ARR_SZ));
5126 
5127 	return status;
5128 }
5129 
5130 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
5131 /*
5132  * dp_reap_timer_init() - initialize the reap timer
5133  * @soc: data path SoC handle
5134  *
5135  * Return: void
5136  */
5137 static void dp_reap_timer_init(struct dp_soc *soc)
5138 {
5139 	/*
5140 	 * Timer to reap rxdma status rings.
5141 	 * Needed until we enable ppdu end interrupts
5142 	 */
5143 	dp_monitor_reap_timer_init(soc);
5144 	dp_monitor_vdev_timer_init(soc);
5145 }
5146 
5147 /*
5148  * dp_reap_timer_deinit() - de-initialize the reap timer
5149  * @soc: data path SoC handle
5150  *
5151  * Return: void
5152  */
5153 static void dp_reap_timer_deinit(struct dp_soc *soc)
5154 {
5155 	dp_monitor_reap_timer_deinit(soc);
5156 }
5157 #else
5158 /* WIN use case */
5159 static void dp_reap_timer_init(struct dp_soc *soc)
5160 {
5161 	/* Configure LMAC rings in Polled mode */
5162 	if (soc->lmac_polled_mode) {
5163 		/*
5164 		 * Timer to reap lmac rings.
5165 		 */
5166 		qdf_timer_init(soc->osdev, &soc->lmac_reap_timer,
5167 			       dp_service_lmac_rings, (void *)soc,
5168 			       QDF_TIMER_TYPE_WAKE_APPS);
5169 		soc->lmac_timer_init = 1;
5170 		qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
5171 	}
5172 }
5173 
5174 static void dp_reap_timer_deinit(struct dp_soc *soc)
5175 {
5176 	if (soc->lmac_timer_init) {
5177 		qdf_timer_stop(&soc->lmac_reap_timer);
5178 		qdf_timer_free(&soc->lmac_reap_timer);
5179 		soc->lmac_timer_init = 0;
5180 	}
5181 }
5182 #endif
5183 
5184 #ifdef QCA_HOST2FW_RXBUF_RING
5185 /*
5186  * dp_rxdma_ring_alloc() - allocate the RXDMA rings
5187  * @soc: data path SoC handle
5188  * @pdev: Physical device handle
5189  *
5190  * Return: 0 - success, > 0 - failure
5191  */
5192 static int dp_rxdma_ring_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
5193 {
5194 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
5195 	int max_mac_rings;
5196 	int i;
5197 	int ring_size;
5198 
5199 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
5200 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
5201 	ring_size =  wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx);
5202 
5203 	for (i = 0; i < max_mac_rings; i++) {
5204 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
5205 		if (dp_srng_alloc(soc, &pdev->rx_mac_buf_ring[i],
5206 				  RXDMA_BUF, ring_size, 0)) {
5207 			dp_init_err("%pK: failed rx mac ring setup", soc);
5208 			return QDF_STATUS_E_FAILURE;
5209 		}
5210 	}
5211 	return QDF_STATUS_SUCCESS;
5212 }
5213 
5214 /*
5215  * dp_rxdma_ring_setup() - configure the RXDMA rings
5216  * @soc: data path SoC handle
5217  * @pdev: Physical device handle
5218  *
5219  * Return: 0 - success, > 0 - failure
5220  */
5221 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
5222 {
5223 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
5224 	int max_mac_rings;
5225 	int i;
5226 
5227 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
5228 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
5229 
5230 	for (i = 0; i < max_mac_rings; i++) {
5231 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
5232 		if (dp_srng_init(soc, &pdev->rx_mac_buf_ring[i],
5233 				 RXDMA_BUF, 1, i)) {
5234 			dp_init_err("%pK: failed rx mac ring setup", soc);
5235 			return QDF_STATUS_E_FAILURE;
5236 		}
5237 	}
5238 	return QDF_STATUS_SUCCESS;
5239 }
5240 
5241 /*
5242  * dp_rxdma_ring_cleanup() - Deinit the RXDMA rings and reap timer
5243  * @soc: data path SoC handle
5244  * @pdev: Physical device handle
5245  *
5246  * Return: void
5247  */
5248 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
5249 {
5250 	int i;
5251 
5252 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
5253 		dp_srng_deinit(soc, &pdev->rx_mac_buf_ring[i], RXDMA_BUF, 1);
5254 
5255 	dp_reap_timer_deinit(soc);
5256 }
5257 
5258 /*
5259  * dp_rxdma_ring_free() - Free the RXDMA rings
5260  * @pdev: Physical device handle
5261  *
5262  * Return: void
5263  */
5264 static void dp_rxdma_ring_free(struct dp_pdev *pdev)
5265 {
5266 	int i;
5267 
5268 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
5269 		dp_srng_free(pdev->soc, &pdev->rx_mac_buf_ring[i]);
5270 }
5271 
5272 #else
5273 static int dp_rxdma_ring_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
5274 {
5275 	return QDF_STATUS_SUCCESS;
5276 }
5277 
5278 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
5279 {
5280 	return QDF_STATUS_SUCCESS;
5281 }
5282 
5283 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
5284 {
5285 	dp_reap_timer_deinit(soc);
5286 }
5287 
5288 static void dp_rxdma_ring_free(struct dp_pdev *pdev)
5289 {
5290 }
5291 #endif
5292 
5293 /**
5294  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
5295  * @pdev - DP_PDEV handle
5296  *
5297  * Return: void
5298  */
5299 static inline void
5300 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
5301 {
5302 	uint8_t map_id;
5303 	struct dp_soc *soc = pdev->soc;
5304 
5305 	if (!soc)
5306 		return;
5307 
5308 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
5309 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
5310 			     default_dscp_tid_map,
5311 			     sizeof(default_dscp_tid_map));
5312 	}
5313 
5314 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
5315 		hal_tx_set_dscp_tid_map(soc->hal_soc,
5316 					default_dscp_tid_map,
5317 					map_id);
5318 	}
5319 }
5320 
5321 /**
5322  * dp_pcp_tid_map_setup(): Initialize the pcp-tid maps
5323  * @pdev - DP_PDEV handle
5324  *
5325  * Return: void
5326  */
5327 static inline void
5328 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
5329 {
5330 	struct dp_soc *soc = pdev->soc;
5331 
5332 	if (!soc)
5333 		return;
5334 
5335 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
5336 		     sizeof(default_pcp_tid_map));
5337 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
5338 }
5339 
5340 #ifdef IPA_OFFLOAD
5341 /**
5342  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
5343  * @soc: data path instance
5344  * @pdev: core txrx pdev context
5345  *
5346  * Return: QDF_STATUS_SUCCESS: success
5347  *         QDF_STATUS_E_RESOURCES: Error return
5348  */
5349 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5350 					   struct dp_pdev *pdev)
5351 {
5352 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5353 	int entries;
5354 
5355 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5356 		soc_cfg_ctx = soc->wlan_cfg_ctx;
5357 		entries =
5358 			wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
5359 
5360 		/* Setup second Rx refill buffer ring */
5361 		if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
5362 				  entries, 0)) {
5363 			dp_init_err("%pK: dp_srng_alloc failed second"
5364 				    "rx refill ring", soc);
5365 			return QDF_STATUS_E_FAILURE;
5366 		}
5367 	}
5368 
5369 	return QDF_STATUS_SUCCESS;
5370 }
5371 
5372 #ifdef IPA_WDI3_VLAN_SUPPORT
5373 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5374 					       struct dp_pdev *pdev)
5375 {
5376 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5377 	int entries;
5378 
5379 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5380 	    wlan_ipa_is_vlan_enabled()) {
5381 		soc_cfg_ctx = soc->wlan_cfg_ctx;
5382 		entries =
5383 			wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
5384 
5385 		/* Setup second Rx refill buffer ring */
5386 		if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF,
5387 				  entries, 0)) {
5388 			dp_init_err("%pK: alloc failed for 3rd rx refill ring",
5389 				    soc);
5390 			return QDF_STATUS_E_FAILURE;
5391 		}
5392 	}
5393 
5394 	return QDF_STATUS_SUCCESS;
5395 }
5396 
5397 static int dp_init_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5398 					      struct dp_pdev *pdev)
5399 {
5400 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5401 	    wlan_ipa_is_vlan_enabled()) {
5402 		if (dp_srng_init(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF,
5403 				 IPA_RX_ALT_REFILL_BUF_RING_IDX,
5404 				 pdev->pdev_id)) {
5405 			dp_init_err("%pK: init failed for 3rd rx refill ring",
5406 				    soc);
5407 			return QDF_STATUS_E_FAILURE;
5408 		}
5409 	}
5410 
5411 	return QDF_STATUS_SUCCESS;
5412 }
5413 
5414 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5415 						 struct dp_pdev *pdev)
5416 {
5417 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5418 	    wlan_ipa_is_vlan_enabled())
5419 		dp_srng_deinit(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF, 0);
5420 }
5421 
5422 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5423 					       struct dp_pdev *pdev)
5424 {
5425 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5426 	    wlan_ipa_is_vlan_enabled())
5427 		dp_srng_free(soc, &pdev->rx_refill_buf_ring3);
5428 }
5429 #else
5430 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5431 					       struct dp_pdev *pdev)
5432 {
5433 	return QDF_STATUS_SUCCESS;
5434 }
5435 
5436 static int dp_init_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5437 					      struct dp_pdev *pdev)
5438 {
5439 	return QDF_STATUS_SUCCESS;
5440 }
5441 
5442 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5443 						 struct dp_pdev *pdev)
5444 {
5445 }
5446 
5447 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5448 					       struct dp_pdev *pdev)
5449 {
5450 }
5451 #endif
5452 
5453 /**
5454  * dp_deinit_ipa_rx_refill_buf_ring - deinit second Rx refill buffer ring
5455  * @soc: data path instance
5456  * @pdev: core txrx pdev context
5457  *
5458  * Return: void
5459  */
5460 static void dp_deinit_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5461 					     struct dp_pdev *pdev)
5462 {
5463 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
5464 		dp_srng_deinit(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 0);
5465 }
5466 
5467 /**
5468  * dp_init_ipa_rx_refill_buf_ring - Init second Rx refill buffer ring
5469  * @soc: data path instance
5470  * @pdev: core txrx pdev context
5471  *
5472  * Return: QDF_STATUS_SUCCESS: success
5473  *         QDF_STATUS_E_RESOURCES: Error return
5474  */
5475 static int dp_init_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5476 					  struct dp_pdev *pdev)
5477 {
5478 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5479 		if (dp_srng_init(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
5480 				 IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id)) {
5481 			dp_init_err("%pK: dp_srng_init failed second"
5482 				    "rx refill ring", soc);
5483 			return QDF_STATUS_E_FAILURE;
5484 		}
5485 	}
5486 
5487 	if (dp_init_ipa_rx_alt_refill_buf_ring(soc, pdev)) {
5488 		dp_deinit_ipa_rx_refill_buf_ring(soc, pdev);
5489 		return QDF_STATUS_E_FAILURE;
5490 	}
5491 
5492 	return QDF_STATUS_SUCCESS;
5493 }
5494 
5495 /**
5496  * dp_free_ipa_rx_refill_buf_ring - free second Rx refill buffer ring
5497  * @soc: data path instance
5498  * @pdev: core txrx pdev context
5499  *
5500  * Return: void
5501  */
5502 static void dp_free_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5503 					   struct dp_pdev *pdev)
5504 {
5505 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
5506 		dp_srng_free(soc, &pdev->rx_refill_buf_ring2);
5507 }
5508 #else
5509 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5510 					   struct dp_pdev *pdev)
5511 {
5512 	return QDF_STATUS_SUCCESS;
5513 }
5514 
5515 static int dp_init_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5516 					  struct dp_pdev *pdev)
5517 {
5518 	return QDF_STATUS_SUCCESS;
5519 }
5520 
5521 static void dp_deinit_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5522 					     struct dp_pdev *pdev)
5523 {
5524 }
5525 
5526 static void dp_free_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5527 					   struct dp_pdev *pdev)
5528 {
5529 }
5530 
5531 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5532 					       struct dp_pdev *pdev)
5533 {
5534 	return QDF_STATUS_SUCCESS;
5535 }
5536 
5537 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5538 						 struct dp_pdev *pdev)
5539 {
5540 }
5541 
5542 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5543 					       struct dp_pdev *pdev)
5544 {
5545 }
5546 #endif
5547 
5548 #ifdef WLAN_FEATURE_DP_CFG_EVENT_HISTORY
5549 
5550 /**
5551  * dp_soc_cfg_history_attach() - Allocate and attach datapath config events
5552  *				 history
5553  * @soc: DP soc handle
5554  *
5555  * Return: None
5556  */
5557 static void dp_soc_cfg_history_attach(struct dp_soc *soc)
5558 {
5559 	dp_soc_frag_history_attach(soc, &soc->cfg_event_history,
5560 				   DP_CFG_EVT_HIST_MAX_SLOTS,
5561 				   DP_CFG_EVT_HIST_PER_SLOT_MAX,
5562 				   sizeof(struct dp_cfg_event),
5563 				   true, DP_CFG_EVENT_HIST_TYPE);
5564 }
5565 
5566 /**
5567  * dp_soc_cfg_history_detach() - Detach and free DP config events history
5568  * @soc: DP soc handle
5569  *
5570  * Return: none
5571  */
5572 static void dp_soc_cfg_history_detach(struct dp_soc *soc)
5573 {
5574 	dp_soc_frag_history_detach(soc, &soc->cfg_event_history,
5575 				   DP_CFG_EVT_HIST_MAX_SLOTS,
5576 				   true, DP_CFG_EVENT_HIST_TYPE);
5577 }
5578 
5579 #else
5580 static void dp_soc_cfg_history_attach(struct dp_soc *soc)
5581 {
5582 }
5583 
5584 static void dp_soc_cfg_history_detach(struct dp_soc *soc)
5585 {
5586 }
5587 #endif
5588 
5589 #ifdef DP_TX_HW_DESC_HISTORY
5590 /**
5591  * dp_soc_tx_hw_desc_history_attach - Attach TX HW descriptor history
5592  *
5593  * @soc: DP soc handle
5594  *
5595  * Return: None
5596  */
5597 static void dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
5598 {
5599 	dp_soc_frag_history_attach(soc, &soc->tx_hw_desc_history,
5600 				   DP_TX_HW_DESC_HIST_MAX_SLOTS,
5601 				   DP_TX_HW_DESC_HIST_PER_SLOT_MAX,
5602 				   sizeof(struct dp_tx_hw_desc_evt),
5603 				   true, DP_TX_HW_DESC_HIST_TYPE);
5604 }
5605 
5606 static void dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
5607 {
5608 	dp_soc_frag_history_detach(soc, &soc->tx_hw_desc_history,
5609 				   DP_TX_HW_DESC_HIST_MAX_SLOTS,
5610 				   true, DP_TX_HW_DESC_HIST_TYPE);
5611 }
5612 
5613 #else /* DP_TX_HW_DESC_HISTORY */
5614 static inline void
5615 dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
5616 {
5617 }
5618 
5619 static inline void
5620 dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
5621 {
5622 }
5623 #endif /* DP_TX_HW_DESC_HISTORY */
5624 
5625 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
5626 #ifndef RX_DEFRAG_DO_NOT_REINJECT
5627 /**
5628  * dp_soc_rx_reinject_ring_history_attach - Attach the reo reinject ring
5629  *					    history.
5630  * @soc: DP soc handle
5631  *
5632  * Return: None
5633  */
5634 static void dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
5635 {
5636 	soc->rx_reinject_ring_history =
5637 		dp_context_alloc_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
5638 				     sizeof(struct dp_rx_reinject_history));
5639 	if (soc->rx_reinject_ring_history)
5640 		qdf_atomic_init(&soc->rx_reinject_ring_history->index);
5641 }
5642 #else /* RX_DEFRAG_DO_NOT_REINJECT */
5643 static inline void
5644 dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
5645 {
5646 }
5647 #endif /* RX_DEFRAG_DO_NOT_REINJECT */
5648 
5649 /**
5650  * dp_soc_rx_history_attach() - Attach the ring history record buffers
5651  * @soc: DP soc structure
5652  *
5653  * This function allocates the memory for recording the rx ring, rx error
5654  * ring and the reinject ring entries. There is no error returned in case
5655  * of allocation failure since the record function checks if the history is
5656  * initialized or not. We do not want to fail the driver load in case of
5657  * failure to allocate memory for debug history.
5658  *
5659  * Returns: None
5660  */
5661 static void dp_soc_rx_history_attach(struct dp_soc *soc)
5662 {
5663 	int i;
5664 	uint32_t rx_ring_hist_size;
5665 	uint32_t rx_refill_ring_hist_size;
5666 
5667 	rx_ring_hist_size = sizeof(*soc->rx_ring_history[0]);
5668 	rx_refill_ring_hist_size = sizeof(*soc->rx_refill_ring_history[0]);
5669 
5670 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5671 		soc->rx_ring_history[i] = dp_context_alloc_mem(
5672 				soc, DP_RX_RING_HIST_TYPE, rx_ring_hist_size);
5673 		if (soc->rx_ring_history[i])
5674 			qdf_atomic_init(&soc->rx_ring_history[i]->index);
5675 	}
5676 
5677 	soc->rx_err_ring_history = dp_context_alloc_mem(
5678 			soc, DP_RX_ERR_RING_HIST_TYPE, rx_ring_hist_size);
5679 	if (soc->rx_err_ring_history)
5680 		qdf_atomic_init(&soc->rx_err_ring_history->index);
5681 
5682 	dp_soc_rx_reinject_ring_history_attach(soc);
5683 
5684 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5685 		soc->rx_refill_ring_history[i] = dp_context_alloc_mem(
5686 						soc,
5687 						DP_RX_REFILL_RING_HIST_TYPE,
5688 						rx_refill_ring_hist_size);
5689 
5690 		if (soc->rx_refill_ring_history[i])
5691 			qdf_atomic_init(&soc->rx_refill_ring_history[i]->index);
5692 	}
5693 }
5694 
5695 static void dp_soc_rx_history_detach(struct dp_soc *soc)
5696 {
5697 	int i;
5698 
5699 	for (i = 0; i < MAX_REO_DEST_RINGS; i++)
5700 		dp_context_free_mem(soc, DP_RX_RING_HIST_TYPE,
5701 				    soc->rx_ring_history[i]);
5702 
5703 	dp_context_free_mem(soc, DP_RX_ERR_RING_HIST_TYPE,
5704 			    soc->rx_err_ring_history);
5705 
5706 	/*
5707 	 * No need for a featurized detach since qdf_mem_free takes
5708 	 * care of NULL pointer.
5709 	 */
5710 	dp_context_free_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
5711 			    soc->rx_reinject_ring_history);
5712 
5713 	for (i = 0; i < MAX_PDEV_CNT; i++)
5714 		dp_context_free_mem(soc, DP_RX_REFILL_RING_HIST_TYPE,
5715 				    soc->rx_refill_ring_history[i]);
5716 }
5717 
5718 #else
5719 static inline void dp_soc_rx_history_attach(struct dp_soc *soc)
5720 {
5721 }
5722 
5723 static inline void dp_soc_rx_history_detach(struct dp_soc *soc)
5724 {
5725 }
5726 #endif
5727 
5728 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY
5729 /**
5730  * dp_soc_mon_status_ring_history_attach() - Attach the monitor status
5731  *					     buffer record history.
5732  * @soc: DP soc handle
5733  *
5734  * This function allocates memory to track the event for a monitor
5735  * status buffer, before its parsed and freed.
5736  *
5737  * Return: None
5738  */
5739 static void dp_soc_mon_status_ring_history_attach(struct dp_soc *soc)
5740 {
5741 	soc->mon_status_ring_history = dp_context_alloc_mem(soc,
5742 				DP_MON_STATUS_BUF_HIST_TYPE,
5743 				sizeof(struct dp_mon_status_ring_history));
5744 	if (!soc->mon_status_ring_history) {
5745 		dp_err("Failed to alloc memory for mon status ring history");
5746 		return;
5747 	}
5748 }
5749 
5750 /**
5751  * dp_soc_mon_status_ring_history_detach() - Detach the monitor status buffer
5752  *					     record history.
5753  * @soc: DP soc handle
5754  *
5755  * Return: None
5756  */
5757 static void dp_soc_mon_status_ring_history_detach(struct dp_soc *soc)
5758 {
5759 	dp_context_free_mem(soc, DP_MON_STATUS_BUF_HIST_TYPE,
5760 			    soc->mon_status_ring_history);
5761 }
5762 #else
5763 static void dp_soc_mon_status_ring_history_attach(struct dp_soc *soc)
5764 {
5765 }
5766 
5767 static void dp_soc_mon_status_ring_history_detach(struct dp_soc *soc)
5768 {
5769 }
5770 #endif
5771 
5772 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
5773 /**
5774  * dp_soc_tx_history_attach() - Attach the ring history record buffers
5775  * @soc: DP soc structure
5776  *
5777  * This function allocates the memory for recording the tx tcl ring and
5778  * the tx comp ring entries. There is no error returned in case
5779  * of allocation failure since the record function checks if the history is
5780  * initialized or not. We do not want to fail the driver load in case of
5781  * failure to allocate memory for debug history.
5782  *
5783  * Returns: None
5784  */
5785 static void dp_soc_tx_history_attach(struct dp_soc *soc)
5786 {
5787 	dp_soc_frag_history_attach(soc, &soc->tx_tcl_history,
5788 				   DP_TX_TCL_HIST_MAX_SLOTS,
5789 				   DP_TX_TCL_HIST_PER_SLOT_MAX,
5790 				   sizeof(struct dp_tx_desc_event),
5791 				   true, DP_TX_TCL_HIST_TYPE);
5792 	dp_soc_frag_history_attach(soc, &soc->tx_comp_history,
5793 				   DP_TX_COMP_HIST_MAX_SLOTS,
5794 				   DP_TX_COMP_HIST_PER_SLOT_MAX,
5795 				   sizeof(struct dp_tx_desc_event),
5796 				   true, DP_TX_COMP_HIST_TYPE);
5797 }
5798 
5799 /**
5800  * dp_soc_tx_history_detach() - Detach the ring history record buffers
5801  * @soc: DP soc structure
5802  *
5803  * This function frees the memory for recording the tx tcl ring and
5804  * the tx comp ring entries.
5805  *
5806  * Returns: None
5807  */
5808 static void dp_soc_tx_history_detach(struct dp_soc *soc)
5809 {
5810 	dp_soc_frag_history_detach(soc, &soc->tx_tcl_history,
5811 				   DP_TX_TCL_HIST_MAX_SLOTS,
5812 				   true, DP_TX_TCL_HIST_TYPE);
5813 	dp_soc_frag_history_detach(soc, &soc->tx_comp_history,
5814 				   DP_TX_COMP_HIST_MAX_SLOTS,
5815 				   true, DP_TX_COMP_HIST_TYPE);
5816 }
5817 
5818 #else
5819 static inline void dp_soc_tx_history_attach(struct dp_soc *soc)
5820 {
5821 }
5822 
5823 static inline void dp_soc_tx_history_detach(struct dp_soc *soc)
5824 {
5825 }
5826 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
5827 
5828 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
5829 /**
5830  * dp_rx_fst_attach_wrapper() - wrapper API for dp_rx_fst_attach
5831  * @soc: SoC handle
5832  * @pdev: Pdev handle
5833  *
5834  * Return: Handle to flow search table entry
5835  */
5836 QDF_STATUS
5837 dp_rx_fst_attach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev)
5838 {
5839 	struct dp_rx_fst *rx_fst = NULL;
5840 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
5841 
5842 	/* for Lithium the below API is not registered
5843 	 * hence fst attach happens for each pdev
5844 	 */
5845 	if (!soc->arch_ops.dp_get_rx_fst)
5846 		return dp_rx_fst_attach(soc, pdev);
5847 
5848 	rx_fst = soc->arch_ops.dp_get_rx_fst(soc);
5849 
5850 	/* for BE the FST attach is called only once per
5851 	 * ML context. if rx_fst is already registered
5852 	 * increase the ref count and return.
5853 	 */
5854 	if (rx_fst) {
5855 		soc->rx_fst = rx_fst;
5856 		pdev->rx_fst = rx_fst;
5857 		soc->arch_ops.dp_rx_fst_ref(soc);
5858 	} else {
5859 		ret = dp_rx_fst_attach(soc, pdev);
5860 		if ((ret != QDF_STATUS_SUCCESS) &&
5861 		    (ret != QDF_STATUS_E_NOSUPPORT))
5862 			return ret;
5863 
5864 		soc->arch_ops.dp_set_rx_fst(soc, soc->rx_fst);
5865 		soc->arch_ops.dp_rx_fst_ref(soc);
5866 	}
5867 	return ret;
5868 }
5869 
5870 /**
5871  * dp_rx_fst_detach_wrapper() - wrapper API for dp_rx_fst_detach
5872  * @soc: SoC handle
5873  * @pdev: Pdev handle
5874  *
5875  * Return: None
5876  */
5877 void
5878 dp_rx_fst_detach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev)
5879 {
5880 	struct dp_rx_fst *rx_fst = NULL;
5881 
5882 	/* for Lithium the below API is not registered
5883 	 * hence fst detach happens for each pdev
5884 	 */
5885 	if (!soc->arch_ops.dp_get_rx_fst) {
5886 		dp_rx_fst_detach(soc, pdev);
5887 		return;
5888 	}
5889 
5890 	rx_fst = soc->arch_ops.dp_get_rx_fst(soc);
5891 
5892 	/* for BE the FST detach is called only when last
5893 	 * ref count reaches 1.
5894 	 */
5895 	if (rx_fst) {
5896 		if (soc->arch_ops.dp_rx_fst_deref(soc) == 1)
5897 			dp_rx_fst_detach(soc, pdev);
5898 	}
5899 	pdev->rx_fst = NULL;
5900 }
5901 #elif defined(WLAN_SUPPORT_RX_FISA)
5902 /**
5903  * dp_rx_fst_attach_wrapper() - wrapper API for dp_rx_fst_attach
5904  * @soc: SoC handle
5905  * @pdev: Pdev handle
5906  *
5907  * Return: Handle to flow search table entry
5908  */
5909 QDF_STATUS
5910 dp_rx_fst_attach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev)
5911 {
5912 	return dp_rx_fst_attach(soc, pdev);
5913 }
5914 
5915 /**
5916  * dp_rx_fst_detach_wrapper() - wrapper API for dp_rx_fst_detach
5917  * @soc: SoC handle
5918  * @pdev: Pdev handle
5919  *
5920  * Return: None
5921  */
5922 void
5923 dp_rx_fst_detach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev)
5924 {
5925 	dp_rx_fst_detach(soc, pdev);
5926 }
5927 #else
5928 /**
5929  * dp_rx_fst_attach_wrapper() - wrapper API for dp_rx_fst_attach
5930  * @soc: SoC handle
5931  * @pdev: Pdev handle
5932  *
5933  * Return: Handle to flow search table entry
5934  */
5935 QDF_STATUS
5936 dp_rx_fst_attach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev)
5937 {
5938 	return QDF_STATUS_SUCCESS;
5939 }
5940 
5941 /**
5942  * dp_rx_fst_detach_wrapper() - wrapper API for dp_rx_fst_detach
5943  * @soc: SoC handle
5944  * @pdev: Pdev handle
5945  *
5946  * Return: None
5947  */
5948 void
5949 dp_rx_fst_detach_wrapper(struct dp_soc *soc, struct dp_pdev *pdev)
5950 {
5951 }
5952 #endif
5953 
5954 /*
5955 * dp_pdev_attach_wifi3() - attach txrx pdev
5956 * @txrx_soc: Datapath SOC handle
5957 * @params: Params for PDEV attach
5958 *
5959 * Return: QDF_STATUS
5960 */
5961 static inline
5962 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
5963 				struct cdp_pdev_attach_params *params)
5964 {
5965 	qdf_size_t pdev_context_size;
5966 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5967 	struct dp_pdev *pdev = NULL;
5968 	uint8_t pdev_id = params->pdev_id;
5969 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5970 	int nss_cfg;
5971 	QDF_STATUS ret;
5972 
5973 	pdev_context_size =
5974 		soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_PDEV);
5975 	if (pdev_context_size)
5976 		pdev = dp_context_alloc_mem(soc, DP_PDEV_TYPE,
5977 					    pdev_context_size);
5978 
5979 	if (!pdev) {
5980 		dp_init_err("%pK: DP PDEV memory allocation failed",
5981 			    soc);
5982 		goto fail0;
5983 	}
5984 	wlan_minidump_log(pdev, sizeof(*pdev), soc->ctrl_psoc,
5985 			  WLAN_MD_DP_PDEV, "dp_pdev");
5986 
5987 	soc_cfg_ctx = soc->wlan_cfg_ctx;
5988 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
5989 
5990 	if (!pdev->wlan_cfg_ctx) {
5991 		dp_init_err("%pK: pdev cfg_attach failed", soc);
5992 		goto fail1;
5993 	}
5994 
5995 	/*
5996 	 * set nss pdev config based on soc config
5997 	 */
5998 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
5999 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
6000 					 (nss_cfg & (1 << pdev_id)));
6001 
6002 	pdev->soc = soc;
6003 	pdev->pdev_id = pdev_id;
6004 	soc->pdev_list[pdev_id] = pdev;
6005 
6006 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
6007 	soc->pdev_count++;
6008 
6009 	/* Allocate memory for pdev srng rings */
6010 	if (dp_pdev_srng_alloc(pdev)) {
6011 		dp_init_err("%pK: dp_pdev_srng_alloc failed", soc);
6012 		goto fail2;
6013 	}
6014 
6015 	/* Setup second Rx refill buffer ring */
6016 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev)) {
6017 		dp_init_err("%pK: dp_srng_alloc failed rxrefill2 ring",
6018 			    soc);
6019 		goto fail3;
6020 	}
6021 
6022 	/* Allocate memory for pdev rxdma rings */
6023 	if (dp_rxdma_ring_alloc(soc, pdev)) {
6024 		dp_init_err("%pK: dp_rxdma_ring_alloc failed", soc);
6025 		goto fail4;
6026 	}
6027 
6028 	/* Rx specific init */
6029 	if (dp_rx_pdev_desc_pool_alloc(pdev)) {
6030 		dp_init_err("%pK: dp_rx_pdev_attach failed", soc);
6031 		goto fail4;
6032 	}
6033 
6034 	if (dp_monitor_pdev_attach(pdev)) {
6035 		dp_init_err("%pK: dp_monitor_pdev_attach failed", soc);
6036 		goto fail5;
6037 	}
6038 
6039 	soc->arch_ops.txrx_pdev_attach(pdev, params);
6040 
6041 	/* Setup third Rx refill buffer ring */
6042 	if (dp_setup_ipa_rx_alt_refill_buf_ring(soc, pdev)) {
6043 		dp_init_err("%pK: dp_srng_alloc failed rxrefill3 ring",
6044 			    soc);
6045 		goto fail6;
6046 	}
6047 
6048 	ret = dp_rx_fst_attach_wrapper(soc, pdev);
6049 	if ((ret != QDF_STATUS_SUCCESS) && (ret != QDF_STATUS_E_NOSUPPORT)) {
6050 		dp_init_err("%pK: RX FST attach failed: pdev %d err %d",
6051 			    soc, pdev_id, ret);
6052 		goto fail7;
6053 	}
6054 
6055 	return QDF_STATUS_SUCCESS;
6056 
6057 fail7:
6058 	dp_free_ipa_rx_alt_refill_buf_ring(soc, pdev);
6059 fail6:
6060 	dp_monitor_pdev_detach(pdev);
6061 fail5:
6062 	dp_rx_pdev_desc_pool_free(pdev);
6063 fail4:
6064 	dp_rxdma_ring_free(pdev);
6065 	dp_free_ipa_rx_refill_buf_ring(soc, pdev);
6066 fail3:
6067 	dp_pdev_srng_free(pdev);
6068 fail2:
6069 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
6070 fail1:
6071 	soc->pdev_list[pdev_id] = NULL;
6072 	qdf_mem_free(pdev);
6073 fail0:
6074 	return QDF_STATUS_E_FAILURE;
6075 }
6076 
6077 /**
6078  * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev
6079  * @pdev: Datapath PDEV handle
6080  *
6081  * This is the last chance to flush all pending dp vdevs/peers,
6082  * some peer/vdev leak case like Non-SSR + peer unmap missing
6083  * will be covered here.
6084  *
6085  * Return: None
6086  */
6087 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
6088 {
6089 	struct dp_soc *soc = pdev->soc;
6090 	struct dp_vdev *vdev_arr[MAX_VDEV_CNT] = {0};
6091 	uint32_t i = 0;
6092 	uint32_t num_vdevs = 0;
6093 	struct dp_vdev *vdev = NULL;
6094 
6095 	if (TAILQ_EMPTY(&soc->inactive_vdev_list))
6096 		return;
6097 
6098 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
6099 	TAILQ_FOREACH(vdev, &soc->inactive_vdev_list,
6100 		      inactive_list_elem) {
6101 		if (vdev->pdev != pdev)
6102 			continue;
6103 
6104 		vdev_arr[num_vdevs] = vdev;
6105 		num_vdevs++;
6106 		/* take reference to free */
6107 		dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CDP);
6108 	}
6109 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
6110 
6111 	for (i = 0; i < num_vdevs; i++) {
6112 		dp_vdev_flush_peers((struct cdp_vdev *)vdev_arr[i], 0, 0);
6113 		dp_vdev_unref_delete(soc, vdev_arr[i], DP_MOD_ID_CDP);
6114 	}
6115 }
6116 
6117 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
6118 /**
6119  * dp_vdev_stats_hw_offload_target_config() - Send HTT command to FW
6120  *                                          for enable/disable of HW vdev stats
6121  * @soc: Datapath soc handle
6122  * @pdev_id: INVALID_PDEV_ID for all pdevs or 0,1,2 for individual pdev
6123  * @enable: flag to represent enable/disable of hw vdev stats
6124  *
6125  * Return: none
6126  */
6127 static void dp_vdev_stats_hw_offload_target_config(struct dp_soc *soc,
6128 						   uint8_t pdev_id,
6129 						   bool enable)
6130 {
6131 	/* Check SOC level config for HW offload vdev stats support */
6132 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
6133 		dp_debug("%pK: HW vdev offload stats is disabled", soc);
6134 		return;
6135 	}
6136 
6137 	/* Send HTT command to FW for enable of stats */
6138 	dp_h2t_hw_vdev_stats_config_send(soc, pdev_id, enable, false, 0);
6139 }
6140 
6141 /**
6142  * dp_vdev_stats_hw_offload_target_clear() - Clear HW vdev stats on target
6143  * @soc: Datapath soc handle
6144  * @pdev_id: pdev_id (0,1,2)
6145  * @bitmask: bitmask with vdev_id(s) for which stats are to be cleared on HW
6146  *
6147  * Return: none
6148  */
6149 static
6150 void dp_vdev_stats_hw_offload_target_clear(struct dp_soc *soc, uint8_t pdev_id,
6151 					   uint64_t vdev_id_bitmask)
6152 {
6153 	/* Check SOC level config for HW offload vdev stats support */
6154 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
6155 		dp_debug("%pK: HW vdev offload stats is disabled", soc);
6156 		return;
6157 	}
6158 
6159 	/* Send HTT command to FW for reset of stats */
6160 	dp_h2t_hw_vdev_stats_config_send(soc, pdev_id, true, true,
6161 					 vdev_id_bitmask);
6162 }
6163 #else
6164 static void
6165 dp_vdev_stats_hw_offload_target_config(struct dp_soc *soc, uint8_t pdev_id,
6166 				       bool enable)
6167 {
6168 }
6169 
6170 static
6171 void dp_vdev_stats_hw_offload_target_clear(struct dp_soc *soc, uint8_t pdev_id,
6172 					   uint64_t vdev_id_bitmask)
6173 {
6174 }
6175 #endif /*QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT */
6176 
6177 /**
6178  * dp_pdev_deinit() - Deinit txrx pdev
6179  * @txrx_pdev: Datapath PDEV handle
6180  * @force: Force deinit
6181  *
6182  * Return: None
6183  */
6184 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
6185 {
6186 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
6187 	qdf_nbuf_t curr_nbuf, next_nbuf;
6188 
6189 	if (pdev->pdev_deinit)
6190 		return;
6191 
6192 	dp_tx_me_exit(pdev);
6193 	dp_rx_pdev_buffers_free(pdev);
6194 	dp_rx_pdev_desc_pool_deinit(pdev);
6195 	dp_pdev_bkp_stats_detach(pdev);
6196 	qdf_event_destroy(&pdev->fw_peer_stats_event);
6197 	qdf_event_destroy(&pdev->fw_stats_event);
6198 	qdf_event_destroy(&pdev->fw_obss_stats_event);
6199 	if (pdev->sojourn_buf)
6200 		qdf_nbuf_free(pdev->sojourn_buf);
6201 
6202 	dp_pdev_flush_pending_vdevs(pdev);
6203 	dp_tx_desc_flush(pdev, NULL, true);
6204 
6205 	qdf_spinlock_destroy(&pdev->tx_mutex);
6206 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
6207 
6208 	dp_monitor_pdev_deinit(pdev);
6209 
6210 	dp_pdev_srng_deinit(pdev);
6211 
6212 	dp_ipa_uc_detach(pdev->soc, pdev);
6213 	dp_deinit_ipa_rx_alt_refill_buf_ring(pdev->soc, pdev);
6214 	dp_deinit_ipa_rx_refill_buf_ring(pdev->soc, pdev);
6215 	dp_rxdma_ring_cleanup(pdev->soc, pdev);
6216 
6217 	curr_nbuf = pdev->invalid_peer_head_msdu;
6218 	while (curr_nbuf) {
6219 		next_nbuf = qdf_nbuf_next(curr_nbuf);
6220 		dp_rx_nbuf_free(curr_nbuf);
6221 		curr_nbuf = next_nbuf;
6222 	}
6223 	pdev->invalid_peer_head_msdu = NULL;
6224 	pdev->invalid_peer_tail_msdu = NULL;
6225 
6226 	dp_wdi_event_detach(pdev);
6227 	pdev->pdev_deinit = 1;
6228 }
6229 
6230 /**
6231  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
6232  * @psoc: Datapath psoc handle
6233  * @pdev_id: Id of datapath PDEV handle
6234  * @force: Force deinit
6235  *
6236  * Return: QDF_STATUS
6237  */
6238 static QDF_STATUS
6239 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
6240 		     int force)
6241 {
6242 	struct dp_pdev *txrx_pdev;
6243 
6244 	txrx_pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
6245 						       pdev_id);
6246 
6247 	if (!txrx_pdev)
6248 		return QDF_STATUS_E_FAILURE;
6249 
6250 	dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force);
6251 
6252 	return QDF_STATUS_SUCCESS;
6253 }
6254 
6255 /*
6256  * dp_pdev_post_attach() - Do post pdev attach after dev_alloc_name
6257  * @txrx_pdev: Datapath PDEV handle
6258  *
6259  * Return: None
6260  */
6261 static void dp_pdev_post_attach(struct cdp_pdev *txrx_pdev)
6262 {
6263 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
6264 
6265 	dp_monitor_tx_capture_debugfs_init(pdev);
6266 
6267 	if (dp_pdev_htt_stats_dbgfs_init(pdev)) {
6268 		dp_init_err("%pK: Failed to initialize pdev HTT stats debugfs", pdev->soc);
6269 	}
6270 }
6271 
6272 /*
6273  * dp_pdev_post_attach_wifi3() - attach txrx pdev post
6274  * @psoc: Datapath soc handle
6275  * @pdev_id: pdev id of pdev
6276  *
6277  * Return: QDF_STATUS
6278  */
6279 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *soc,
6280 				     uint8_t pdev_id)
6281 {
6282 	struct dp_pdev *pdev;
6283 
6284 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
6285 						  pdev_id);
6286 
6287 	if (!pdev) {
6288 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
6289 			    (struct dp_soc *)soc, pdev_id);
6290 		return QDF_STATUS_E_FAILURE;
6291 	}
6292 
6293 	dp_pdev_post_attach((struct cdp_pdev *)pdev);
6294 	return QDF_STATUS_SUCCESS;
6295 }
6296 
6297 /*
6298  * dp_pdev_detach() - Complete rest of pdev detach
6299  * @txrx_pdev: Datapath PDEV handle
6300  * @force: Force deinit
6301  *
6302  * Return: None
6303  */
6304 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
6305 {
6306 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
6307 	struct dp_soc *soc = pdev->soc;
6308 
6309 	dp_rx_fst_detach_wrapper(soc, pdev);
6310 	dp_pdev_htt_stats_dbgfs_deinit(pdev);
6311 	dp_rx_pdev_desc_pool_free(pdev);
6312 	dp_monitor_pdev_detach(pdev);
6313 	dp_rxdma_ring_free(pdev);
6314 	dp_free_ipa_rx_refill_buf_ring(soc, pdev);
6315 	dp_free_ipa_rx_alt_refill_buf_ring(soc, pdev);
6316 	dp_pdev_srng_free(pdev);
6317 
6318 	soc->pdev_count--;
6319 	soc->pdev_list[pdev->pdev_id] = NULL;
6320 
6321 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
6322 	wlan_minidump_remove(pdev, sizeof(*pdev), soc->ctrl_psoc,
6323 			     WLAN_MD_DP_PDEV, "dp_pdev");
6324 	dp_context_free_mem(soc, DP_PDEV_TYPE, pdev);
6325 }
6326 
6327 /*
6328  * dp_pdev_detach_wifi3() - detach txrx pdev
6329  * @psoc: Datapath soc handle
6330  * @pdev_id: pdev id of pdev
6331  * @force: Force detach
6332  *
6333  * Return: QDF_STATUS
6334  */
6335 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
6336 				       int force)
6337 {
6338 	struct dp_pdev *pdev;
6339 	struct dp_soc *soc = (struct dp_soc *)psoc;
6340 
6341 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
6342 						  pdev_id);
6343 
6344 	if (!pdev) {
6345 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
6346 			    (struct dp_soc *)psoc, pdev_id);
6347 		return QDF_STATUS_E_FAILURE;
6348 	}
6349 
6350 	soc->arch_ops.txrx_pdev_detach(pdev);
6351 
6352 	dp_pdev_detach((struct cdp_pdev *)pdev, force);
6353 	return QDF_STATUS_SUCCESS;
6354 }
6355 
6356 /*
6357  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
6358  * @soc: DP SOC handle
6359  */
6360 #ifndef DP_UMAC_HW_RESET_SUPPORT
6361 static inline
6362 #endif
6363 void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
6364 {
6365 	struct reo_desc_list_node *desc;
6366 	struct dp_rx_tid *rx_tid;
6367 
6368 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
6369 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
6370 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
6371 		rx_tid = &desc->rx_tid;
6372 		qdf_mem_unmap_nbytes_single(soc->osdev,
6373 			rx_tid->hw_qdesc_paddr,
6374 			QDF_DMA_BIDIRECTIONAL,
6375 			rx_tid->hw_qdesc_alloc_size);
6376 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
6377 		qdf_mem_free(desc);
6378 	}
6379 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
6380 	qdf_list_destroy(&soc->reo_desc_freelist);
6381 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
6382 }
6383 
6384 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
6385 /*
6386  * dp_reo_desc_deferred_freelist_create() - Initialize the resources used
6387  *                                          for deferred reo desc list
6388  * @psoc: Datapath soc handle
6389  *
6390  * Return: void
6391  */
6392 static void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
6393 {
6394 	qdf_spinlock_create(&soc->reo_desc_deferred_freelist_lock);
6395 	qdf_list_create(&soc->reo_desc_deferred_freelist,
6396 			REO_DESC_DEFERRED_FREELIST_SIZE);
6397 	soc->reo_desc_deferred_freelist_init = true;
6398 }
6399 
6400 /*
6401  * dp_reo_desc_deferred_freelist_destroy() - loop the deferred free list &
6402  *                                           free the leftover REO QDESCs
6403  * @psoc: Datapath soc handle
6404  *
6405  * Return: void
6406  */
6407 static void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
6408 {
6409 	struct reo_desc_deferred_freelist_node *desc;
6410 
6411 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
6412 	soc->reo_desc_deferred_freelist_init = false;
6413 	while (qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
6414 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
6415 		qdf_mem_unmap_nbytes_single(soc->osdev,
6416 					    desc->hw_qdesc_paddr,
6417 					    QDF_DMA_BIDIRECTIONAL,
6418 					    desc->hw_qdesc_alloc_size);
6419 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
6420 		qdf_mem_free(desc);
6421 	}
6422 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
6423 
6424 	qdf_list_destroy(&soc->reo_desc_deferred_freelist);
6425 	qdf_spinlock_destroy(&soc->reo_desc_deferred_freelist_lock);
6426 }
6427 #else
6428 static inline void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
6429 {
6430 }
6431 
6432 static inline void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
6433 {
6434 }
6435 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
6436 
6437 /*
6438  * dp_soc_reset_txrx_ring_map() - reset tx ring map
6439  * @soc: DP SOC handle
6440  *
6441  */
6442 static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc)
6443 {
6444 	uint32_t i;
6445 
6446 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++)
6447 		soc->tx_ring_map[i] = 0;
6448 }
6449 
6450 /*
6451  * dp_soc_print_inactive_objects() - prints inactive peer and vdev list
6452  * @soc: DP SOC handle
6453  *
6454  */
6455 static void dp_soc_print_inactive_objects(struct dp_soc *soc)
6456 {
6457 	struct dp_peer *peer = NULL;
6458 	struct dp_peer *tmp_peer = NULL;
6459 	struct dp_vdev *vdev = NULL;
6460 	struct dp_vdev *tmp_vdev = NULL;
6461 	int i = 0;
6462 	uint32_t count;
6463 
6464 	if (TAILQ_EMPTY(&soc->inactive_peer_list) &&
6465 	    TAILQ_EMPTY(&soc->inactive_vdev_list))
6466 		return;
6467 
6468 	TAILQ_FOREACH_SAFE(peer, &soc->inactive_peer_list,
6469 			   inactive_list_elem, tmp_peer) {
6470 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
6471 			count = qdf_atomic_read(&peer->mod_refs[i]);
6472 			if (count)
6473 				DP_PRINT_STATS("peer %pK Module id %u ==> %u",
6474 					       peer, i, count);
6475 		}
6476 	}
6477 
6478 	TAILQ_FOREACH_SAFE(vdev, &soc->inactive_vdev_list,
6479 			   inactive_list_elem, tmp_vdev) {
6480 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
6481 			count = qdf_atomic_read(&vdev->mod_refs[i]);
6482 			if (count)
6483 				DP_PRINT_STATS("vdev %pK Module id %u ==> %u",
6484 					       vdev, i, count);
6485 		}
6486 	}
6487 	QDF_BUG(0);
6488 }
6489 
6490 /**
6491  * dp_soc_deinit() - Deinitialize txrx SOC
6492  * @txrx_soc: Opaque DP SOC handle
6493  *
6494  * Return: None
6495  */
6496 static void dp_soc_deinit(void *txrx_soc)
6497 {
6498 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6499 	struct htt_soc *htt_soc = soc->htt_handle;
6500 
6501 	qdf_atomic_set(&soc->cmn_init_done, 0);
6502 
6503 	if (soc->arch_ops.txrx_soc_ppeds_stop)
6504 		soc->arch_ops.txrx_soc_ppeds_stop(soc);
6505 
6506 	soc->arch_ops.txrx_soc_deinit(soc);
6507 
6508 	dp_monitor_soc_deinit(soc);
6509 
6510 	/* free peer tables & AST tables allocated during peer_map_attach */
6511 	if (soc->peer_map_attach_success) {
6512 		dp_peer_find_detach(soc);
6513 		soc->arch_ops.txrx_peer_map_detach(soc);
6514 		soc->peer_map_attach_success = FALSE;
6515 	}
6516 
6517 	qdf_flush_work(&soc->htt_stats.work);
6518 	qdf_disable_work(&soc->htt_stats.work);
6519 
6520 	qdf_spinlock_destroy(&soc->htt_stats.lock);
6521 
6522 	dp_soc_reset_txrx_ring_map(soc);
6523 
6524 	dp_reo_desc_freelist_destroy(soc);
6525 	dp_reo_desc_deferred_freelist_destroy(soc);
6526 
6527 	DEINIT_RX_HW_STATS_LOCK(soc);
6528 
6529 	qdf_spinlock_destroy(&soc->ast_lock);
6530 
6531 	dp_peer_mec_spinlock_destroy(soc);
6532 
6533 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
6534 
6535 	qdf_nbuf_queue_free(&soc->invalid_buf_queue);
6536 
6537 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
6538 
6539 	qdf_spinlock_destroy(&soc->vdev_map_lock);
6540 
6541 	dp_reo_cmdlist_destroy(soc);
6542 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
6543 
6544 	dp_soc_tx_desc_sw_pools_deinit(soc);
6545 
6546 	dp_soc_srng_deinit(soc);
6547 
6548 	dp_hw_link_desc_ring_deinit(soc);
6549 
6550 	dp_soc_print_inactive_objects(soc);
6551 	qdf_spinlock_destroy(&soc->inactive_peer_list_lock);
6552 	qdf_spinlock_destroy(&soc->inactive_vdev_list_lock);
6553 
6554 	htt_soc_htc_dealloc(soc->htt_handle);
6555 
6556 	htt_soc_detach(htt_soc);
6557 
6558 	/* Free wbm sg list and reset flags in down path */
6559 	dp_rx_wbm_sg_list_deinit(soc);
6560 
6561 	wlan_minidump_remove(soc, sizeof(*soc), soc->ctrl_psoc,
6562 			     WLAN_MD_DP_SOC, "dp_soc");
6563 }
6564 
6565 /**
6566  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
6567  * @txrx_soc: Opaque DP SOC handle
6568  *
6569  * Return: None
6570  */
6571 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc)
6572 {
6573 	dp_soc_deinit(txrx_soc);
6574 }
6575 
6576 /*
6577  * dp_soc_detach() - Detach rest of txrx SOC
6578  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
6579  *
6580  * Return: None
6581  */
6582 static void dp_soc_detach(struct cdp_soc_t *txrx_soc)
6583 {
6584 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6585 
6586 	soc->arch_ops.txrx_soc_detach(soc);
6587 
6588 	dp_runtime_deinit();
6589 
6590 	dp_sysfs_deinitialize_stats(soc);
6591 	dp_soc_swlm_detach(soc);
6592 	dp_soc_tx_desc_sw_pools_free(soc);
6593 	dp_soc_srng_free(soc);
6594 	dp_hw_link_desc_ring_free(soc);
6595 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
6596 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
6597 	dp_soc_tx_hw_desc_history_detach(soc);
6598 	dp_soc_tx_history_detach(soc);
6599 	dp_soc_mon_status_ring_history_detach(soc);
6600 	dp_soc_rx_history_detach(soc);
6601 	dp_soc_cfg_history_detach(soc);
6602 
6603 	if (!dp_monitor_modularized_enable()) {
6604 		dp_mon_soc_detach_wrapper(soc);
6605 	}
6606 
6607 	qdf_mem_free(soc->cdp_soc.ops);
6608 	qdf_mem_free(soc);
6609 }
6610 
6611 /*
6612  * dp_soc_detach_wifi3() - Detach txrx SOC
6613  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
6614  *
6615  * Return: None
6616  */
6617 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc)
6618 {
6619 	dp_soc_detach(txrx_soc);
6620 }
6621 
6622 /*
6623  * dp_rxdma_ring_config() - configure the RX DMA rings
6624  *
6625  * This function is used to configure the MAC rings.
6626  * On MCL host provides buffers in Host2FW ring
6627  * FW refills (copies) buffers to the ring and updates
6628  * ring_idx in register
6629  *
6630  * @soc: data path SoC handle
6631  *
6632  * Return: zero on success, non-zero on failure
6633  */
6634 #ifdef QCA_HOST2FW_RXBUF_RING
6635 static inline void
6636 dp_htt_setup_rxdma_err_dst_ring(struct dp_soc *soc, int mac_id,
6637 				int lmac_id)
6638 {
6639 	if (soc->rxdma_err_dst_ring[lmac_id].hal_srng)
6640 		htt_srng_setup(soc->htt_handle, mac_id,
6641 			       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
6642 			       RXDMA_DST);
6643 }
6644 
6645 #ifdef IPA_WDI3_VLAN_SUPPORT
6646 static inline
6647 void dp_rxdma_setup_refill_ring3(struct dp_soc *soc,
6648 				 struct dp_pdev *pdev,
6649 				 uint8_t idx)
6650 {
6651 	if (pdev->rx_refill_buf_ring3.hal_srng)
6652 		htt_srng_setup(soc->htt_handle, idx,
6653 			       pdev->rx_refill_buf_ring3.hal_srng,
6654 			       RXDMA_BUF);
6655 }
6656 #else
6657 static inline
6658 void dp_rxdma_setup_refill_ring3(struct dp_soc *soc,
6659 				 struct dp_pdev *pdev,
6660 				 uint8_t idx)
6661 { }
6662 #endif
6663 
6664 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
6665 {
6666 	int i;
6667 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6668 
6669 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6670 		struct dp_pdev *pdev = soc->pdev_list[i];
6671 
6672 		if (pdev) {
6673 			int mac_id;
6674 			int max_mac_rings =
6675 				 wlan_cfg_get_num_mac_rings
6676 				(pdev->wlan_cfg_ctx);
6677 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
6678 
6679 			htt_srng_setup(soc->htt_handle, i,
6680 				       soc->rx_refill_buf_ring[lmac_id]
6681 				       .hal_srng,
6682 				       RXDMA_BUF);
6683 
6684 			if (pdev->rx_refill_buf_ring2.hal_srng)
6685 				htt_srng_setup(soc->htt_handle, i,
6686 					       pdev->rx_refill_buf_ring2
6687 					       .hal_srng,
6688 					       RXDMA_BUF);
6689 
6690 			dp_rxdma_setup_refill_ring3(soc, pdev, i);
6691 
6692 			dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
6693 			dp_err("pdev_id %d max_mac_rings %d",
6694 			       pdev->pdev_id, max_mac_rings);
6695 
6696 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
6697 				int mac_for_pdev =
6698 					dp_get_mac_id_for_pdev(mac_id,
6699 							       pdev->pdev_id);
6700 				/*
6701 				 * Obtain lmac id from pdev to access the LMAC
6702 				 * ring in soc context
6703 				 */
6704 				lmac_id =
6705 				dp_get_lmac_id_for_pdev_id(soc,
6706 							   mac_id,
6707 							   pdev->pdev_id);
6708 				QDF_TRACE(QDF_MODULE_ID_TXRX,
6709 					 QDF_TRACE_LEVEL_ERROR,
6710 					 FL("mac_id %d"), mac_for_pdev);
6711 
6712 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
6713 					 pdev->rx_mac_buf_ring[mac_id]
6714 						.hal_srng,
6715 					 RXDMA_BUF);
6716 
6717 				if (!soc->rxdma2sw_rings_not_supported)
6718 					dp_htt_setup_rxdma_err_dst_ring(soc,
6719 						mac_for_pdev, lmac_id);
6720 
6721 				/* Configure monitor mode rings */
6722 				status = dp_monitor_htt_srng_setup(soc, pdev,
6723 								   lmac_id,
6724 								   mac_for_pdev);
6725 				if (status != QDF_STATUS_SUCCESS) {
6726 					dp_err("Failed to send htt monitor messages to target");
6727 					return status;
6728 				}
6729 
6730 			}
6731 		}
6732 	}
6733 
6734 	dp_reap_timer_init(soc);
6735 	return status;
6736 }
6737 #else
6738 /* This is only for WIN */
6739 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
6740 {
6741 	int i;
6742 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6743 	int mac_for_pdev;
6744 	int lmac_id;
6745 
6746 	/* Configure monitor mode rings */
6747 	dp_monitor_soc_htt_srng_setup(soc);
6748 
6749 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6750 		struct dp_pdev *pdev =  soc->pdev_list[i];
6751 
6752 		if (!pdev)
6753 			continue;
6754 
6755 		mac_for_pdev = i;
6756 		lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
6757 
6758 		if (soc->rx_refill_buf_ring[lmac_id].hal_srng)
6759 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
6760 				       soc->rx_refill_buf_ring[lmac_id].
6761 				       hal_srng, RXDMA_BUF);
6762 
6763 		/* Configure monitor mode rings */
6764 		dp_monitor_htt_srng_setup(soc, pdev,
6765 					  lmac_id,
6766 					  mac_for_pdev);
6767 		if (!soc->rxdma2sw_rings_not_supported)
6768 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
6769 				       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
6770 				       RXDMA_DST);
6771 	}
6772 
6773 	dp_reap_timer_init(soc);
6774 	return status;
6775 }
6776 #endif
6777 
6778 /*
6779  * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine
6780  *
6781  * This function is used to configure the FSE HW block in RX OLE on a
6782  * per pdev basis. Here, we will be programming parameters related to
6783  * the Flow Search Table.
6784  *
6785  * @soc: data path SoC handle
6786  *
6787  * Return: zero on success, non-zero on failure
6788  */
6789 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
6790 static QDF_STATUS
6791 dp_rx_target_fst_config(struct dp_soc *soc)
6792 {
6793 	int i;
6794 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6795 
6796 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6797 		struct dp_pdev *pdev = soc->pdev_list[i];
6798 
6799 		/* Flow search is not enabled if NSS offload is enabled */
6800 		if (pdev &&
6801 		    !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
6802 			status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev);
6803 			if (status != QDF_STATUS_SUCCESS)
6804 				break;
6805 		}
6806 	}
6807 	return status;
6808 }
6809 #elif defined(WLAN_SUPPORT_RX_FISA)
6810 /**
6811  * dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW
6812  * @soc: SoC handle
6813  *
6814  * Return: Success
6815  */
6816 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
6817 {
6818 	QDF_STATUS status;
6819 	struct dp_rx_fst *fst = soc->rx_fst;
6820 
6821 	/* Check if it is enabled in the INI */
6822 	if (!soc->fisa_enable) {
6823 		dp_err("RX FISA feature is disabled");
6824 		return QDF_STATUS_E_NOSUPPORT;
6825 	}
6826 
6827 	status = dp_rx_flow_send_fst_fw_setup(soc, soc->pdev_list[0]);
6828 	if (QDF_IS_STATUS_ERROR(status)) {
6829 		dp_err("dp_rx_flow_send_fst_fw_setup failed %d",
6830 		       status);
6831 		return status;
6832 	}
6833 
6834 	if (soc->fst_cmem_base) {
6835 		soc->fst_in_cmem = true;
6836 		dp_rx_fst_update_cmem_params(soc, fst->max_entries,
6837 					     soc->fst_cmem_base & 0xffffffff,
6838 					     soc->fst_cmem_base >> 32);
6839 	}
6840 	return status;
6841 }
6842 
6843 #define FISA_MAX_TIMEOUT 0xffffffff
6844 #define FISA_DISABLE_TIMEOUT 0
6845 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
6846 {
6847 	struct dp_htt_rx_fisa_cfg fisa_config;
6848 
6849 	fisa_config.pdev_id = 0;
6850 	fisa_config.fisa_timeout = FISA_MAX_TIMEOUT;
6851 
6852 	return dp_htt_rx_fisa_config(soc->pdev_list[0], &fisa_config);
6853 }
6854 
6855 #else /* !WLAN_SUPPORT_RX_FISA */
6856 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
6857 {
6858 	return QDF_STATUS_SUCCESS;
6859 }
6860 #endif /* !WLAN_SUPPORT_RX_FISA */
6861 
6862 #ifndef WLAN_SUPPORT_RX_FISA
6863 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
6864 {
6865 	return QDF_STATUS_SUCCESS;
6866 }
6867 
6868 static QDF_STATUS dp_rx_dump_fisa_stats(struct dp_soc *soc)
6869 {
6870 	return QDF_STATUS_SUCCESS;
6871 }
6872 
6873 static void dp_rx_dump_fisa_table(struct dp_soc *soc)
6874 {
6875 }
6876 
6877 static void dp_suspend_fse_cache_flush(struct dp_soc *soc)
6878 {
6879 }
6880 
6881 static void dp_resume_fse_cache_flush(struct dp_soc *soc)
6882 {
6883 }
6884 #endif /* !WLAN_SUPPORT_RX_FISA */
6885 
6886 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR
6887 static inline QDF_STATUS dp_print_swlm_stats(struct dp_soc *soc)
6888 {
6889 	return QDF_STATUS_SUCCESS;
6890 }
6891 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */
6892 
6893 #ifdef WLAN_SUPPORT_PPEDS
6894 /*
6895  * dp_soc_target_ppe_rxole_rxdma_cfg() - Configure the RxOLe and RxDMA for PPE
6896  * @soc: DP Tx/Rx handle
6897  *
6898  * Return: QDF_STATUS
6899  */
6900 static
6901 QDF_STATUS dp_soc_target_ppe_rxole_rxdma_cfg(struct dp_soc *soc)
6902 {
6903 	struct dp_htt_rxdma_rxole_ppe_config htt_cfg = {0};
6904 	QDF_STATUS status;
6905 
6906 	/*
6907 	 * Program RxDMA to override the reo destination indication
6908 	 * with REO2PPE_DST_IND, when use_ppe is set to 1 in RX_MSDU_END,
6909 	 * thereby driving the packet to REO2PPE ring.
6910 	 * If the MSDU is spanning more than 1 buffer, then this
6911 	 * override is not done.
6912 	 */
6913 	htt_cfg.override = 1;
6914 	htt_cfg.reo_destination_indication = REO2PPE_DST_IND;
6915 	htt_cfg.multi_buffer_msdu_override_en = 0;
6916 
6917 	/*
6918 	 * Override use_ppe to 0 in RxOLE for the following
6919 	 * cases.
6920 	 */
6921 	htt_cfg.intra_bss_override = 1;
6922 	htt_cfg.decap_raw_override = 1;
6923 	htt_cfg.decap_nwifi_override = 1;
6924 	htt_cfg.ip_frag_override = 1;
6925 
6926 	status = dp_htt_rxdma_rxole_ppe_cfg_set(soc, &htt_cfg);
6927 	if (status != QDF_STATUS_SUCCESS)
6928 		dp_err("RxOLE and RxDMA PPE config failed %d", status);
6929 
6930 	return status;
6931 }
6932 
6933 static inline
6934 void dp_soc_txrx_peer_setup(enum wlan_op_mode vdev_opmode, struct dp_soc *soc,
6935 			    struct dp_peer *peer)
6936 {
6937 	if (((vdev_opmode == wlan_op_mode_ap) ||
6938 	     (vdev_opmode == wlan_op_mode_sta)) &&
6939 	     (soc->arch_ops.txrx_peer_setup)) {
6940 		if (soc->arch_ops.txrx_peer_setup(soc, peer)
6941 				!= QDF_STATUS_SUCCESS) {
6942 			dp_err("unable to setup target peer features");
6943 			qdf_assert_always(0);
6944 		}
6945 	}
6946 }
6947 #else
6948 static inline
6949 QDF_STATUS dp_soc_target_ppe_rxole_rxdma_cfg(struct dp_soc *soc)
6950 {
6951 	return QDF_STATUS_SUCCESS;
6952 }
6953 
6954 static inline
6955 void dp_soc_txrx_peer_setup(enum wlan_op_mode vdev_opmode, struct dp_soc *soc,
6956 			    struct dp_peer *peer)
6957 {
6958 }
6959 #endif /* WLAN_SUPPORT_PPEDS */
6960 
6961 #ifdef DP_UMAC_HW_RESET_SUPPORT
6962 static void dp_register_umac_reset_handlers(struct dp_soc *soc)
6963 {
6964 	dp_umac_reset_register_rx_action_callback(soc,
6965 		dp_umac_reset_handle_pre_reset, UMAC_RESET_ACTION_DO_PRE_RESET);
6966 
6967 	dp_umac_reset_register_rx_action_callback(soc,
6968 					dp_umac_reset_handle_post_reset,
6969 					UMAC_RESET_ACTION_DO_POST_RESET_START);
6970 
6971 	dp_umac_reset_register_rx_action_callback(soc,
6972 				dp_umac_reset_handle_post_reset_complete,
6973 				UMAC_RESET_ACTION_DO_POST_RESET_COMPLETE);
6974 
6975 }
6976 #else
6977 static void dp_register_umac_reset_handlers(struct dp_soc *soc)
6978 {
6979 }
6980 #endif
6981 /*
6982  * dp_soc_attach_target_wifi3() - SOC initialization in the target
6983  * @cdp_soc: Opaque Datapath SOC handle
6984  *
6985  * Return: zero on success, non-zero on failure
6986  */
6987 static QDF_STATUS
6988 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
6989 {
6990 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
6991 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6992 	struct hal_reo_params reo_params;
6993 
6994 	htt_soc_attach_target(soc->htt_handle);
6995 
6996 	status = dp_soc_target_ppe_rxole_rxdma_cfg(soc);
6997 	if (status != QDF_STATUS_SUCCESS) {
6998 		dp_err("Failed to send htt RxOLE and RxDMA messages to target");
6999 		return status;
7000 	}
7001 
7002 	status = dp_rxdma_ring_config(soc);
7003 	if (status != QDF_STATUS_SUCCESS) {
7004 		dp_err("Failed to send htt srng setup messages to target");
7005 		return status;
7006 	}
7007 
7008 	status = soc->arch_ops.dp_rxdma_ring_sel_cfg(soc);
7009 	if (status != QDF_STATUS_SUCCESS) {
7010 		dp_err("Failed to send htt ring config message to target");
7011 		return status;
7012 	}
7013 
7014 	status = dp_soc_umac_reset_init(soc);
7015 	if (status != QDF_STATUS_SUCCESS &&
7016 	    status != QDF_STATUS_E_NOSUPPORT) {
7017 		dp_err("Failed to initialize UMAC reset");
7018 		return status;
7019 	}
7020 
7021 	dp_register_umac_reset_handlers(soc);
7022 
7023 	status = dp_rx_target_fst_config(soc);
7024 	if (status != QDF_STATUS_SUCCESS &&
7025 	    status != QDF_STATUS_E_NOSUPPORT) {
7026 		dp_err("Failed to send htt fst setup config message to target");
7027 		return status;
7028 	}
7029 
7030 	if (status == QDF_STATUS_SUCCESS) {
7031 		status = dp_rx_fisa_config(soc);
7032 		if (status != QDF_STATUS_SUCCESS) {
7033 			dp_err("Failed to send htt FISA config message to target");
7034 			return status;
7035 		}
7036 	}
7037 
7038 	DP_STATS_INIT(soc);
7039 
7040 	dp_runtime_init(soc);
7041 
7042 	/* Enable HW vdev offload stats if feature is supported */
7043 	dp_vdev_stats_hw_offload_target_config(soc, INVALID_PDEV_ID, true);
7044 
7045 	/* initialize work queue for stats processing */
7046 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
7047 
7048 	wlan_cfg_soc_update_tgt_params(soc->wlan_cfg_ctx,
7049 				       soc->ctrl_psoc);
7050 	/* Setup HW REO */
7051 	qdf_mem_zero(&reo_params, sizeof(reo_params));
7052 
7053 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
7054 		/*
7055 		 * Reo ring remap is not required if both radios
7056 		 * are offloaded to NSS
7057 		 */
7058 
7059 		if (soc->arch_ops.reo_remap_config(soc, &reo_params.remap0,
7060 						   &reo_params.remap1,
7061 						   &reo_params.remap2))
7062 			reo_params.rx_hash_enabled = true;
7063 		else
7064 			reo_params.rx_hash_enabled = false;
7065 	}
7066 
7067 	/*
7068 	 * set the fragment destination ring
7069 	 */
7070 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
7071 
7072 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
7073 		reo_params.alt_dst_ind_0 = REO_REMAP_RELEASE;
7074 
7075 	hal_reo_setup(soc->hal_soc, &reo_params, 1);
7076 
7077 	hal_reo_set_err_dst_remap(soc->hal_soc);
7078 
7079 	soc->features.pn_in_reo_dest = hal_reo_enable_pn_in_dest(soc->hal_soc);
7080 
7081 	return QDF_STATUS_SUCCESS;
7082 }
7083 
7084 /*
7085  * dp_vdev_id_map_tbl_add() - Add vdev into vdev_id table
7086  * @soc: SoC handle
7087  * @vdev: vdev handle
7088  * @vdev_id: vdev_id
7089  *
7090  * Return: None
7091  */
7092 static void dp_vdev_id_map_tbl_add(struct dp_soc *soc,
7093 				   struct dp_vdev *vdev,
7094 				   uint8_t vdev_id)
7095 {
7096 	QDF_ASSERT(vdev_id <= MAX_VDEV_CNT);
7097 
7098 	qdf_spin_lock_bh(&soc->vdev_map_lock);
7099 
7100 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
7101 			QDF_STATUS_SUCCESS) {
7102 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK vdev_id %u",
7103 			     soc, vdev, vdev_id);
7104 		qdf_spin_unlock_bh(&soc->vdev_map_lock);
7105 		return;
7106 	}
7107 
7108 	if (!soc->vdev_id_map[vdev_id])
7109 		soc->vdev_id_map[vdev_id] = vdev;
7110 	else
7111 		QDF_ASSERT(0);
7112 
7113 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
7114 }
7115 
7116 /*
7117  * dp_vdev_id_map_tbl_remove() - remove vdev from vdev_id table
7118  * @soc: SoC handle
7119  * @vdev: vdev handle
7120  *
7121  * Return: None
7122  */
7123 static void dp_vdev_id_map_tbl_remove(struct dp_soc *soc,
7124 				      struct dp_vdev *vdev)
7125 {
7126 	qdf_spin_lock_bh(&soc->vdev_map_lock);
7127 	QDF_ASSERT(soc->vdev_id_map[vdev->vdev_id] == vdev);
7128 
7129 	soc->vdev_id_map[vdev->vdev_id] = NULL;
7130 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
7131 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
7132 }
7133 
7134 /*
7135  * dp_vdev_pdev_list_add() - add vdev into pdev's list
7136  * @soc: soc handle
7137  * @pdev: pdev handle
7138  * @vdev: vdev handle
7139  *
7140  * return: none
7141  */
7142 static void dp_vdev_pdev_list_add(struct dp_soc *soc,
7143 				  struct dp_pdev *pdev,
7144 				  struct dp_vdev *vdev)
7145 {
7146 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
7147 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
7148 			QDF_STATUS_SUCCESS) {
7149 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK",
7150 			     soc, vdev);
7151 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
7152 		return;
7153 	}
7154 	/* add this vdev into the pdev's list */
7155 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
7156 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
7157 }
7158 
7159 /*
7160  * dp_vdev_pdev_list_remove() - remove vdev from pdev's list
7161  * @soc: SoC handle
7162  * @pdev: pdev handle
7163  * @vdev: VDEV handle
7164  *
7165  * Return: none
7166  */
7167 static void dp_vdev_pdev_list_remove(struct dp_soc *soc,
7168 				     struct dp_pdev *pdev,
7169 				     struct dp_vdev *vdev)
7170 {
7171 	uint8_t found = 0;
7172 	struct dp_vdev *tmpvdev = NULL;
7173 
7174 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
7175 	TAILQ_FOREACH(tmpvdev, &pdev->vdev_list, vdev_list_elem) {
7176 		if (tmpvdev == vdev) {
7177 			found = 1;
7178 			break;
7179 		}
7180 	}
7181 
7182 	if (found) {
7183 		TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
7184 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
7185 	} else {
7186 		dp_vdev_debug("%pK: vdev:%pK not found in pdev:%pK vdevlist:%pK",
7187 			      soc, vdev, pdev, &pdev->vdev_list);
7188 		QDF_ASSERT(0);
7189 	}
7190 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
7191 }
7192 
7193 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
7194 /*
7195  * dp_vdev_init_rx_eapol() - initializing osif_rx_eapol
7196  * @vdev: Datapath VDEV handle
7197  *
7198  * Return: None
7199  */
7200 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev)
7201 {
7202 	vdev->osif_rx_eapol = NULL;
7203 }
7204 
7205 /*
7206  * dp_vdev_register_rx_eapol() - Register VDEV operations for rx_eapol
7207  * @vdev: DP vdev handle
7208  * @txrx_ops: Tx and Rx operations
7209  *
7210  * Return: None
7211  */
7212 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev,
7213 					     struct ol_txrx_ops *txrx_ops)
7214 {
7215 	vdev->osif_rx_eapol = txrx_ops->rx.rx_eapol;
7216 }
7217 #else
7218 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev)
7219 {
7220 }
7221 
7222 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev,
7223 					     struct ol_txrx_ops *txrx_ops)
7224 {
7225 }
7226 #endif
7227 
7228 #ifdef WLAN_FEATURE_11BE_MLO
7229 static inline void dp_vdev_save_mld_addr(struct dp_vdev *vdev,
7230 					 struct cdp_vdev_info *vdev_info)
7231 {
7232 	if (vdev_info->mld_mac_addr)
7233 		qdf_mem_copy(&vdev->mld_mac_addr.raw[0],
7234 			     vdev_info->mld_mac_addr, QDF_MAC_ADDR_SIZE);
7235 }
7236 #else
7237 static inline void dp_vdev_save_mld_addr(struct dp_vdev *vdev,
7238 					 struct cdp_vdev_info *vdev_info)
7239 {
7240 
7241 }
7242 #endif
7243 
7244 #ifdef DP_TRAFFIC_END_INDICATION
7245 /*
7246  * dp_tx_traffic_end_indication_attach() - Initialize data end indication
7247  *                                         related members in VDEV
7248  * @vdev: DP vdev handle
7249  *
7250  * Return: None
7251  */
7252 static inline void
7253 dp_tx_vdev_traffic_end_indication_attach(struct dp_vdev *vdev)
7254 {
7255 	qdf_nbuf_queue_init(&vdev->end_ind_pkt_q);
7256 }
7257 
7258 /*
7259  * dp_tx_vdev_traffic_end_indication_detach() - De-init data end indication
7260  *                                              related members in VDEV
7261  * @vdev: DP vdev handle
7262  *
7263  * Return: None
7264  */
7265 static inline void
7266 dp_tx_vdev_traffic_end_indication_detach(struct dp_vdev *vdev)
7267 {
7268 	qdf_nbuf_t nbuf;
7269 
7270 	while ((nbuf = qdf_nbuf_queue_remove(&vdev->end_ind_pkt_q)) != NULL)
7271 		qdf_nbuf_free(nbuf);
7272 }
7273 #else
7274 static inline void
7275 dp_tx_vdev_traffic_end_indication_attach(struct dp_vdev *vdev)
7276 {}
7277 
7278 static inline void
7279 dp_tx_vdev_traffic_end_indication_detach(struct dp_vdev *vdev)
7280 {}
7281 #endif
7282 
7283 /*
7284 * dp_vdev_attach_wifi3() - attach txrx vdev
7285 * @txrx_pdev: Datapath PDEV handle
7286 * @pdev_id: PDEV ID for vdev creation
7287 * @vdev_info: parameters used for vdev creation
7288 *
7289 * Return: status
7290 */
7291 static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc,
7292 				       uint8_t pdev_id,
7293 				       struct cdp_vdev_info *vdev_info)
7294 {
7295 	int i = 0;
7296 	qdf_size_t vdev_context_size;
7297 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
7298 	struct dp_pdev *pdev =
7299 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7300 						   pdev_id);
7301 	struct dp_vdev *vdev;
7302 	uint8_t *vdev_mac_addr = vdev_info->vdev_mac_addr;
7303 	uint8_t vdev_id = vdev_info->vdev_id;
7304 	enum wlan_op_mode op_mode = vdev_info->op_mode;
7305 	enum wlan_op_subtype subtype = vdev_info->subtype;
7306 	uint8_t vdev_stats_id = vdev_info->vdev_stats_id;
7307 
7308 	vdev_context_size =
7309 		soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_VDEV);
7310 	vdev = qdf_mem_malloc(vdev_context_size);
7311 
7312 	if (!pdev) {
7313 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
7314 			    cdp_soc, pdev_id);
7315 		qdf_mem_free(vdev);
7316 		goto fail0;
7317 	}
7318 
7319 	if (!vdev) {
7320 		dp_init_err("%pK: DP VDEV memory allocation failed",
7321 			    cdp_soc);
7322 		goto fail0;
7323 	}
7324 
7325 	wlan_minidump_log(vdev, sizeof(*vdev), soc->ctrl_psoc,
7326 			  WLAN_MD_DP_VDEV, "dp_vdev");
7327 
7328 	vdev->pdev = pdev;
7329 	vdev->vdev_id = vdev_id;
7330 	vdev->vdev_stats_id = vdev_stats_id;
7331 	vdev->opmode = op_mode;
7332 	vdev->subtype = subtype;
7333 	vdev->osdev = soc->osdev;
7334 
7335 	vdev->osif_rx = NULL;
7336 	vdev->osif_rsim_rx_decap = NULL;
7337 	vdev->osif_get_key = NULL;
7338 	vdev->osif_tx_free_ext = NULL;
7339 	vdev->osif_vdev = NULL;
7340 
7341 	vdev->delete.pending = 0;
7342 	vdev->safemode = 0;
7343 	vdev->drop_unenc = 1;
7344 	vdev->sec_type = cdp_sec_type_none;
7345 	vdev->multipass_en = false;
7346 	vdev->wrap_vdev = false;
7347 	dp_vdev_init_rx_eapol(vdev);
7348 	qdf_atomic_init(&vdev->ref_cnt);
7349 	for (i = 0; i < DP_MOD_ID_MAX; i++)
7350 		qdf_atomic_init(&vdev->mod_refs[i]);
7351 
7352 	/* Take one reference for create*/
7353 	qdf_atomic_inc(&vdev->ref_cnt);
7354 	qdf_atomic_inc(&vdev->mod_refs[DP_MOD_ID_CONFIG]);
7355 	vdev->num_peers = 0;
7356 #ifdef notyet
7357 	vdev->filters_num = 0;
7358 #endif
7359 	vdev->lmac_id = pdev->lmac_id;
7360 
7361 	qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
7362 
7363 	dp_vdev_save_mld_addr(vdev, vdev_info);
7364 
7365 	/* TODO: Initialize default HTT meta data that will be used in
7366 	 * TCL descriptors for packets transmitted from this VDEV
7367 	 */
7368 
7369 	qdf_spinlock_create(&vdev->peer_list_lock);
7370 	TAILQ_INIT(&vdev->peer_list);
7371 	dp_peer_multipass_list_init(vdev);
7372 	if ((soc->intr_mode == DP_INTR_POLL) &&
7373 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
7374 		if ((pdev->vdev_count == 0) ||
7375 		    (wlan_op_mode_monitor == vdev->opmode))
7376 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
7377 	} else if (dp_soc_get_con_mode(soc) == QDF_GLOBAL_MISSION_MODE &&
7378 		   soc->intr_mode == DP_INTR_MSI &&
7379 		   wlan_op_mode_monitor == vdev->opmode) {
7380 		/* Timer to reap status ring in mission mode */
7381 		dp_monitor_vdev_timer_start(soc);
7382 	}
7383 
7384 	dp_vdev_id_map_tbl_add(soc, vdev, vdev_id);
7385 
7386 	if (wlan_op_mode_monitor == vdev->opmode) {
7387 		if (dp_monitor_vdev_attach(vdev) == QDF_STATUS_SUCCESS) {
7388 			dp_monitor_pdev_set_mon_vdev(vdev);
7389 			return dp_monitor_vdev_set_monitor_mode_buf_rings(pdev);
7390 		}
7391 		return QDF_STATUS_E_FAILURE;
7392 	}
7393 
7394 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
7395 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
7396 	vdev->dscp_tid_map_id = 0;
7397 	vdev->mcast_enhancement_en = 0;
7398 	vdev->igmp_mcast_enhanc_en = 0;
7399 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
7400 	vdev->prev_tx_enq_tstamp = 0;
7401 	vdev->prev_rx_deliver_tstamp = 0;
7402 	vdev->skip_sw_tid_classification = DP_TX_HW_DSCP_TID_MAP_VALID;
7403 	dp_tx_vdev_traffic_end_indication_attach(vdev);
7404 
7405 	dp_vdev_pdev_list_add(soc, pdev, vdev);
7406 	pdev->vdev_count++;
7407 
7408 	if (wlan_op_mode_sta != vdev->opmode &&
7409 	    wlan_op_mode_ndi != vdev->opmode)
7410 		vdev->ap_bridge_enabled = true;
7411 	else
7412 		vdev->ap_bridge_enabled = false;
7413 	dp_init_info("%pK: wlan_cfg_ap_bridge_enabled %d",
7414 		     cdp_soc, vdev->ap_bridge_enabled);
7415 
7416 	dp_tx_vdev_attach(vdev);
7417 
7418 	dp_monitor_vdev_attach(vdev);
7419 	if (!pdev->is_lro_hash_configured) {
7420 		if (QDF_IS_STATUS_SUCCESS(dp_lro_hash_setup(soc, pdev)))
7421 			pdev->is_lro_hash_configured = true;
7422 		else
7423 			dp_err("LRO hash setup failure!");
7424 	}
7425 
7426 	dp_cfg_event_record_vdev_evt(soc, DP_CFG_EVENT_VDEV_ATTACH, vdev);
7427 	dp_info("Created vdev %pK ("QDF_MAC_ADDR_FMT") vdev_id %d", vdev,
7428 		QDF_MAC_ADDR_REF(vdev->mac_addr.raw), vdev->vdev_id);
7429 	DP_STATS_INIT(vdev);
7430 
7431 	if (QDF_IS_STATUS_ERROR(soc->arch_ops.txrx_vdev_attach(soc, vdev)))
7432 		goto fail0;
7433 
7434 	if (wlan_op_mode_sta == vdev->opmode)
7435 		dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id,
7436 				     vdev->mac_addr.raw, CDP_LINK_PEER_TYPE);
7437 
7438 	dp_pdev_update_fast_rx_flag(soc, pdev);
7439 
7440 	return QDF_STATUS_SUCCESS;
7441 
7442 fail0:
7443 	return QDF_STATUS_E_FAILURE;
7444 }
7445 
7446 #ifndef QCA_HOST_MODE_WIFI_DISABLED
7447 /**
7448  * dp_vdev_fetch_tx_handlers() - Fetch Tx handlers
7449  * @vdev: struct dp_vdev *
7450  * @soc: struct dp_soc *
7451  * @ctx: struct ol_txrx_hardtart_ctxt *
7452  */
7453 static inline void dp_vdev_fetch_tx_handler(struct dp_vdev *vdev,
7454 					    struct dp_soc *soc,
7455 					    struct ol_txrx_hardtart_ctxt *ctx)
7456 {
7457 	/* Enable vdev_id check only for ap, if flag is enabled */
7458 	if (vdev->mesh_vdev)
7459 		ctx->tx = dp_tx_send_mesh;
7460 	else if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
7461 		 (vdev->opmode == wlan_op_mode_ap)) {
7462 		ctx->tx = dp_tx_send_vdev_id_check;
7463 		ctx->tx_fast = dp_tx_send_vdev_id_check;
7464 	} else {
7465 		ctx->tx = dp_tx_send;
7466 		ctx->tx_fast = soc->arch_ops.dp_tx_send_fast;
7467 	}
7468 
7469 	/* Avoid check in regular exception Path */
7470 	if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
7471 	    (vdev->opmode == wlan_op_mode_ap))
7472 		ctx->tx_exception = dp_tx_send_exception_vdev_id_check;
7473 	else
7474 		ctx->tx_exception = dp_tx_send_exception;
7475 }
7476 
7477 /**
7478  * dp_vdev_register_tx_handler() - Register Tx handler
7479  * @vdev: struct dp_vdev *
7480  * @soc: struct dp_soc *
7481  * @txrx_ops: struct ol_txrx_ops *
7482  */
7483 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
7484 					       struct dp_soc *soc,
7485 					       struct ol_txrx_ops *txrx_ops)
7486 {
7487 	struct ol_txrx_hardtart_ctxt ctx = {0};
7488 
7489 	dp_vdev_fetch_tx_handler(vdev, soc, &ctx);
7490 
7491 	txrx_ops->tx.tx = ctx.tx;
7492 	txrx_ops->tx.tx_fast = ctx.tx_fast;
7493 	txrx_ops->tx.tx_exception = ctx.tx_exception;
7494 
7495 	dp_info("Configure tx_vdev_id_chk_handler Feature Flag: %d and mode:%d for vdev_id:%d",
7496 		wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx),
7497 		vdev->opmode, vdev->vdev_id);
7498 }
7499 #else /* QCA_HOST_MODE_WIFI_DISABLED */
7500 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
7501 					       struct dp_soc *soc,
7502 					       struct ol_txrx_ops *txrx_ops)
7503 {
7504 }
7505 
7506 static inline void dp_vdev_fetch_tx_handler(struct dp_vdev *vdev,
7507 					    struct dp_soc *soc,
7508 					    struct ol_txrx_hardtart_ctxt *ctx)
7509 {
7510 }
7511 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
7512 
7513 /**
7514  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
7515  * @soc: Datapath soc handle
7516  * @vdev_id: id of Datapath VDEV handle
7517  * @osif_vdev: OSIF vdev handle
7518  * @txrx_ops: Tx and Rx operations
7519  *
7520  * Return: DP VDEV handle on success, NULL on failure
7521  */
7522 static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc_hdl,
7523 					 uint8_t vdev_id,
7524 					 ol_osif_vdev_handle osif_vdev,
7525 					 struct ol_txrx_ops *txrx_ops)
7526 {
7527 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7528 	struct dp_vdev *vdev =	dp_vdev_get_ref_by_id(soc, vdev_id,
7529 						      DP_MOD_ID_CDP);
7530 
7531 	if (!vdev)
7532 		return QDF_STATUS_E_FAILURE;
7533 
7534 	vdev->osif_vdev = osif_vdev;
7535 	vdev->osif_rx = txrx_ops->rx.rx;
7536 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
7537 	vdev->osif_rx_flush = txrx_ops->rx.rx_flush;
7538 	vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush;
7539 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
7540 	vdev->osif_fisa_rx = txrx_ops->rx.osif_fisa_rx;
7541 	vdev->osif_fisa_flush = txrx_ops->rx.osif_fisa_flush;
7542 	vdev->osif_get_key = txrx_ops->get_key;
7543 	dp_monitor_vdev_register_osif(vdev, txrx_ops);
7544 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
7545 	vdev->tx_comp = txrx_ops->tx.tx_comp;
7546 	vdev->stats_cb = txrx_ops->rx.stats_rx;
7547 	vdev->tx_classify_critical_pkt_cb =
7548 		txrx_ops->tx.tx_classify_critical_pkt_cb;
7549 #ifdef notyet
7550 #if ATH_SUPPORT_WAPI
7551 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
7552 #endif
7553 #endif
7554 #ifdef UMAC_SUPPORT_PROXY_ARP
7555 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
7556 #endif
7557 	vdev->me_convert = txrx_ops->me_convert;
7558 	vdev->get_tsf_time = txrx_ops->get_tsf_time;
7559 
7560 	dp_vdev_register_rx_eapol(vdev, txrx_ops);
7561 
7562 	dp_vdev_register_tx_handler(vdev, soc, txrx_ops);
7563 
7564 	dp_init_info("%pK: DP Vdev Register success", soc);
7565 
7566 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7567 	return QDF_STATUS_SUCCESS;
7568 }
7569 
7570 #ifdef WLAN_FEATURE_11BE_MLO
7571 void dp_peer_delete(struct dp_soc *soc,
7572 		    struct dp_peer *peer,
7573 		    void *arg)
7574 {
7575 	if (!peer->valid)
7576 		return;
7577 
7578 	dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
7579 			     peer->vdev->vdev_id,
7580 			     peer->mac_addr.raw, 0,
7581 			     peer->peer_type);
7582 }
7583 #else
7584 void dp_peer_delete(struct dp_soc *soc,
7585 		    struct dp_peer *peer,
7586 		    void *arg)
7587 {
7588 	if (!peer->valid)
7589 		return;
7590 
7591 	dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
7592 			     peer->vdev->vdev_id,
7593 			     peer->mac_addr.raw, 0,
7594 			     CDP_LINK_PEER_TYPE);
7595 }
7596 #endif
7597 
7598 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
7599 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg)
7600 {
7601 	if (!peer->valid)
7602 		return;
7603 
7604 	if (IS_MLO_DP_LINK_PEER(peer))
7605 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
7606 				     peer->vdev->vdev_id,
7607 				     peer->mac_addr.raw, 0,
7608 				     CDP_LINK_PEER_TYPE);
7609 }
7610 #else
7611 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg)
7612 {
7613 }
7614 #endif
7615 /**
7616  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
7617  * @vdev: Datapath VDEV handle
7618  * @unmap_only: Flag to indicate "only unmap"
7619  *
7620  * Return: void
7621  */
7622 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
7623 				bool unmap_only,
7624 				bool mlo_peers_only)
7625 {
7626 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7627 	struct dp_pdev *pdev = vdev->pdev;
7628 	struct dp_soc *soc = pdev->soc;
7629 	struct dp_peer *peer;
7630 	uint32_t i = 0;
7631 
7632 
7633 	if (!unmap_only) {
7634 		if (!mlo_peers_only)
7635 			dp_vdev_iterate_peer_lock_safe(vdev,
7636 						       dp_peer_delete,
7637 						       NULL,
7638 						       DP_MOD_ID_CDP);
7639 		else
7640 			dp_vdev_iterate_peer_lock_safe(vdev,
7641 						       dp_mlo_peer_delete,
7642 						       NULL,
7643 						       DP_MOD_ID_CDP);
7644 	}
7645 
7646 	for (i = 0; i < soc->max_peer_id ; i++) {
7647 		peer = __dp_peer_get_ref_by_id(soc, i, DP_MOD_ID_CDP);
7648 
7649 		if (!peer)
7650 			continue;
7651 
7652 		if (peer->vdev != vdev) {
7653 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7654 			continue;
7655 		}
7656 
7657 		if (!mlo_peers_only) {
7658 			dp_info("peer: " QDF_MAC_ADDR_FMT " is getting unmap",
7659 				QDF_MAC_ADDR_REF(peer->mac_addr.raw));
7660 			dp_rx_peer_unmap_handler(soc, i,
7661 						 vdev->vdev_id,
7662 						 peer->mac_addr.raw, 0,
7663 						 DP_PEER_WDS_COUNT_INVALID);
7664 			SET_PEER_REF_CNT_ONE(peer);
7665 		} else if (IS_MLO_DP_LINK_PEER(peer) ||
7666 			   IS_MLO_DP_MLD_PEER(peer)) {
7667 			dp_info("peer: " QDF_MAC_ADDR_FMT " is getting unmap",
7668 				QDF_MAC_ADDR_REF(peer->mac_addr.raw));
7669 			dp_rx_peer_unmap_handler(soc, i,
7670 						 vdev->vdev_id,
7671 						 peer->mac_addr.raw, 0,
7672 						 DP_PEER_WDS_COUNT_INVALID);
7673 			SET_PEER_REF_CNT_ONE(peer);
7674 		}
7675 
7676 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7677 	}
7678 }
7679 
7680 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
7681 /*
7682  * dp_txrx_alloc_vdev_stats_id()- Allocate vdev_stats_id
7683  * @soc_hdl: Datapath soc handle
7684  * @vdev_stats_id: Address of vdev_stats_id
7685  *
7686  * Return: QDF_STATUS
7687  */
7688 static QDF_STATUS dp_txrx_alloc_vdev_stats_id(struct cdp_soc_t *soc_hdl,
7689 					      uint8_t *vdev_stats_id)
7690 {
7691 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7692 	uint8_t id = 0;
7693 
7694 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
7695 		*vdev_stats_id = CDP_INVALID_VDEV_STATS_ID;
7696 		return QDF_STATUS_E_FAILURE;
7697 	}
7698 
7699 	while (id < CDP_MAX_VDEV_STATS_ID) {
7700 		if (!qdf_atomic_test_and_set_bit(id, &soc->vdev_stats_id_map)) {
7701 			*vdev_stats_id = id;
7702 			return QDF_STATUS_SUCCESS;
7703 		}
7704 		id++;
7705 	}
7706 
7707 	*vdev_stats_id = CDP_INVALID_VDEV_STATS_ID;
7708 	return QDF_STATUS_E_FAILURE;
7709 }
7710 
7711 /*
7712  * dp_txrx_reset_vdev_stats_id() - Reset vdev_stats_id in dp_soc
7713  * @soc_hdl: Datapath soc handle
7714  * @vdev_stats_id: vdev_stats_id to reset in dp_soc
7715  *
7716  * Return: none
7717  */
7718 static void dp_txrx_reset_vdev_stats_id(struct cdp_soc_t *soc_hdl,
7719 					uint8_t vdev_stats_id)
7720 {
7721 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7722 
7723 	if ((!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) ||
7724 	    (vdev_stats_id >= CDP_MAX_VDEV_STATS_ID))
7725 		return;
7726 
7727 	qdf_atomic_clear_bit(vdev_stats_id, &soc->vdev_stats_id_map);
7728 }
7729 #else
7730 static void dp_txrx_reset_vdev_stats_id(struct cdp_soc_t *soc,
7731 					uint8_t vdev_stats_id)
7732 {}
7733 #endif
7734 /*
7735  * dp_vdev_detach_wifi3() - Detach txrx vdev
7736  * @cdp_soc: Datapath soc handle
7737  * @vdev_id: VDEV Id
7738  * @callback: Callback OL_IF on completion of detach
7739  * @cb_context:	Callback context
7740  *
7741  */
7742 static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc,
7743 				       uint8_t vdev_id,
7744 				       ol_txrx_vdev_delete_cb callback,
7745 				       void *cb_context)
7746 {
7747 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
7748 	struct dp_pdev *pdev;
7749 	struct dp_neighbour_peer *peer = NULL;
7750 	struct dp_peer *vap_self_peer = NULL;
7751 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7752 						     DP_MOD_ID_CDP);
7753 
7754 	if (!vdev)
7755 		return QDF_STATUS_E_FAILURE;
7756 
7757 	soc->arch_ops.txrx_vdev_detach(soc, vdev);
7758 
7759 	pdev = vdev->pdev;
7760 
7761 	vap_self_peer = dp_sta_vdev_self_peer_ref_n_get(soc, vdev,
7762 							DP_MOD_ID_CONFIG);
7763 	if (vap_self_peer) {
7764 		qdf_spin_lock_bh(&soc->ast_lock);
7765 		if (vap_self_peer->self_ast_entry) {
7766 			dp_peer_del_ast(soc, vap_self_peer->self_ast_entry);
7767 			vap_self_peer->self_ast_entry = NULL;
7768 		}
7769 		qdf_spin_unlock_bh(&soc->ast_lock);
7770 
7771 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc, vdev->vdev_id,
7772 				     vap_self_peer->mac_addr.raw, 0,
7773 				     CDP_LINK_PEER_TYPE);
7774 		dp_peer_unref_delete(vap_self_peer, DP_MOD_ID_CONFIG);
7775 	}
7776 
7777 	/*
7778 	 * If Target is hung, flush all peers before detaching vdev
7779 	 * this will free all references held due to missing
7780 	 * unmap commands from Target
7781 	 */
7782 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
7783 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, false, false);
7784 	else if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
7785 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, true, false);
7786 
7787 	/* indicate that the vdev needs to be deleted */
7788 	vdev->delete.pending = 1;
7789 	dp_rx_vdev_detach(vdev);
7790 	/*
7791 	 * move it after dp_rx_vdev_detach(),
7792 	 * as the call back done in dp_rx_vdev_detach()
7793 	 * still need to get vdev pointer by vdev_id.
7794 	 */
7795 	dp_vdev_id_map_tbl_remove(soc, vdev);
7796 
7797 	dp_monitor_neighbour_peer_list_remove(pdev, vdev, peer);
7798 
7799 	dp_txrx_reset_vdev_stats_id(cdp_soc, vdev->vdev_stats_id);
7800 
7801 	dp_tx_vdev_multipass_deinit(vdev);
7802 	dp_tx_vdev_traffic_end_indication_detach(vdev);
7803 
7804 	if (vdev->vdev_dp_ext_handle) {
7805 		qdf_mem_free(vdev->vdev_dp_ext_handle);
7806 		vdev->vdev_dp_ext_handle = NULL;
7807 	}
7808 	vdev->delete.callback = callback;
7809 	vdev->delete.context = cb_context;
7810 
7811 	if (vdev->opmode != wlan_op_mode_monitor)
7812 		dp_vdev_pdev_list_remove(soc, pdev, vdev);
7813 
7814 	pdev->vdev_count--;
7815 	/* release reference taken above for find */
7816 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7817 
7818 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
7819 	TAILQ_INSERT_TAIL(&soc->inactive_vdev_list, vdev, inactive_list_elem);
7820 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
7821 
7822 	dp_cfg_event_record_vdev_evt(soc, DP_CFG_EVENT_VDEV_DETACH, vdev);
7823 	dp_info("detach vdev %pK id %d pending refs %d",
7824 		vdev, vdev->vdev_id, qdf_atomic_read(&vdev->ref_cnt));
7825 
7826 	/* release reference taken at dp_vdev_create */
7827 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
7828 
7829 	return QDF_STATUS_SUCCESS;
7830 }
7831 
7832 #ifdef WLAN_FEATURE_11BE_MLO
7833 /**
7834  * is_dp_peer_can_reuse() - check if the dp_peer match condition to be reused
7835  * @vdev: Target DP vdev handle
7836  * @peer: DP peer handle to be checked
7837  * @peer_mac_addr: Target peer mac address
7838  * @peer_type: Target peer type
7839  *
7840  * Return: true - if match, false - not match
7841  */
7842 static inline
7843 bool is_dp_peer_can_reuse(struct dp_vdev *vdev,
7844 			  struct dp_peer *peer,
7845 			  uint8_t *peer_mac_addr,
7846 			  enum cdp_peer_type peer_type)
7847 {
7848 	if (peer->bss_peer && (peer->vdev == vdev) &&
7849 	    (peer->peer_type == peer_type) &&
7850 	    (qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw,
7851 			 QDF_MAC_ADDR_SIZE) == 0))
7852 		return true;
7853 
7854 	return false;
7855 }
7856 #else
7857 static inline
7858 bool is_dp_peer_can_reuse(struct dp_vdev *vdev,
7859 			  struct dp_peer *peer,
7860 			  uint8_t *peer_mac_addr,
7861 			  enum cdp_peer_type peer_type)
7862 {
7863 	if (peer->bss_peer && (peer->vdev == vdev) &&
7864 	    (qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw,
7865 			 QDF_MAC_ADDR_SIZE) == 0))
7866 		return true;
7867 
7868 	return false;
7869 }
7870 #endif
7871 
7872 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
7873 						uint8_t *peer_mac_addr,
7874 						enum cdp_peer_type peer_type)
7875 {
7876 	struct dp_peer *peer;
7877 	struct dp_soc *soc = vdev->pdev->soc;
7878 
7879 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
7880 	TAILQ_FOREACH(peer, &soc->inactive_peer_list,
7881 		      inactive_list_elem) {
7882 
7883 		/* reuse bss peer only when vdev matches*/
7884 		if (is_dp_peer_can_reuse(vdev, peer,
7885 					 peer_mac_addr, peer_type)) {
7886 			/* increment ref count for cdp_peer_create*/
7887 			if (dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG) ==
7888 						QDF_STATUS_SUCCESS) {
7889 				TAILQ_REMOVE(&soc->inactive_peer_list, peer,
7890 					     inactive_list_elem);
7891 				qdf_spin_unlock_bh
7892 					(&soc->inactive_peer_list_lock);
7893 				return peer;
7894 			}
7895 		}
7896 	}
7897 
7898 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
7899 	return NULL;
7900 }
7901 
7902 #ifdef FEATURE_AST
7903 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
7904 					       struct dp_pdev *pdev,
7905 					       uint8_t *peer_mac_addr)
7906 {
7907 	struct dp_ast_entry *ast_entry;
7908 
7909 	if (soc->ast_offload_support)
7910 		return;
7911 
7912 	qdf_spin_lock_bh(&soc->ast_lock);
7913 	if (soc->ast_override_support)
7914 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
7915 							    pdev->pdev_id);
7916 	else
7917 		ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
7918 
7919 	if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress)
7920 		dp_peer_del_ast(soc, ast_entry);
7921 
7922 	qdf_spin_unlock_bh(&soc->ast_lock);
7923 }
7924 #else
7925 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
7926 					       struct dp_pdev *pdev,
7927 					       uint8_t *peer_mac_addr)
7928 {
7929 }
7930 #endif
7931 
7932 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
7933 /*
7934  * dp_peer_hw_txrx_stats_init() - Initialize hw_txrx_stats_en in dp_peer
7935  * @soc: Datapath soc handle
7936  * @peer: Datapath peer handle
7937  *
7938  * Return: none
7939  */
7940 static inline
7941 void dp_peer_hw_txrx_stats_init(struct dp_soc *soc,
7942 				struct dp_txrx_peer *txrx_peer)
7943 {
7944 	txrx_peer->hw_txrx_stats_en =
7945 		wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx);
7946 }
7947 #else
7948 static inline
7949 void dp_peer_hw_txrx_stats_init(struct dp_soc *soc,
7950 				struct dp_txrx_peer *txrx_peer)
7951 {
7952 	txrx_peer->hw_txrx_stats_en = 0;
7953 }
7954 #endif
7955 
7956 static QDF_STATUS dp_txrx_peer_detach(struct dp_soc *soc, struct dp_peer *peer)
7957 {
7958 	struct dp_txrx_peer *txrx_peer;
7959 	struct dp_pdev *pdev;
7960 
7961 	/* dp_txrx_peer exists for mld peer and legacy peer */
7962 	if (peer->txrx_peer) {
7963 		txrx_peer = peer->txrx_peer;
7964 		peer->txrx_peer = NULL;
7965 		pdev = txrx_peer->vdev->pdev;
7966 
7967 		dp_peer_defrag_rx_tids_deinit(txrx_peer);
7968 		/*
7969 		 * Deallocate the extended stats contenxt
7970 		 */
7971 		dp_peer_delay_stats_ctx_dealloc(soc, txrx_peer);
7972 		dp_peer_rx_bufq_resources_deinit(txrx_peer);
7973 		dp_peer_jitter_stats_ctx_dealloc(pdev, txrx_peer);
7974 		dp_peer_sawf_stats_ctx_free(soc, txrx_peer);
7975 
7976 		qdf_mem_free(txrx_peer);
7977 	}
7978 
7979 	return QDF_STATUS_SUCCESS;
7980 }
7981 
7982 static QDF_STATUS dp_txrx_peer_attach(struct dp_soc *soc, struct dp_peer *peer)
7983 {
7984 	struct dp_txrx_peer *txrx_peer;
7985 	struct dp_pdev *pdev;
7986 
7987 	txrx_peer = (struct dp_txrx_peer *)qdf_mem_malloc(sizeof(*txrx_peer));
7988 
7989 	if (!txrx_peer)
7990 		return QDF_STATUS_E_NOMEM; /* failure */
7991 
7992 	txrx_peer->peer_id = HTT_INVALID_PEER;
7993 	/* initialize the peer_id */
7994 	txrx_peer->vdev = peer->vdev;
7995 	pdev = peer->vdev->pdev;
7996 
7997 	DP_STATS_INIT(txrx_peer);
7998 
7999 	dp_wds_ext_peer_init(txrx_peer);
8000 	dp_peer_rx_bufq_resources_init(txrx_peer);
8001 	dp_peer_hw_txrx_stats_init(soc, txrx_peer);
8002 	/*
8003 	 * Allocate peer extended stats context. Fall through in
8004 	 * case of failure as its not an implicit requirement to have
8005 	 * this object for regular statistics updates.
8006 	 */
8007 	if (dp_peer_delay_stats_ctx_alloc(soc, txrx_peer) !=
8008 					  QDF_STATUS_SUCCESS)
8009 		dp_warn("peer delay_stats ctx alloc failed");
8010 
8011 	/*
8012 	 * Alloctate memory for jitter stats. Fall through in
8013 	 * case of failure as its not an implicit requirement to have
8014 	 * this object for regular statistics updates.
8015 	 */
8016 	if (dp_peer_jitter_stats_ctx_alloc(pdev, txrx_peer) !=
8017 					   QDF_STATUS_SUCCESS)
8018 		dp_warn("peer jitter_stats ctx alloc failed");
8019 
8020 	dp_set_peer_isolation(txrx_peer, false);
8021 
8022 	dp_peer_defrag_rx_tids_init(txrx_peer);
8023 
8024 	if (dp_peer_sawf_stats_ctx_alloc(soc, txrx_peer) != QDF_STATUS_SUCCESS)
8025 		dp_warn("peer sawf stats alloc failed");
8026 
8027 	dp_txrx_peer_attach_add(soc, peer, txrx_peer);
8028 
8029 	return QDF_STATUS_SUCCESS;
8030 }
8031 
8032 static inline
8033 void dp_txrx_peer_stats_clr(struct dp_txrx_peer *txrx_peer)
8034 {
8035 	if (!txrx_peer)
8036 		return;
8037 
8038 	txrx_peer->tx_failed = 0;
8039 	txrx_peer->comp_pkt.num = 0;
8040 	txrx_peer->comp_pkt.bytes = 0;
8041 	txrx_peer->to_stack.num = 0;
8042 	txrx_peer->to_stack.bytes = 0;
8043 
8044 	DP_STATS_CLR(txrx_peer);
8045 	dp_peer_delay_stats_ctx_clr(txrx_peer);
8046 	dp_peer_jitter_stats_ctx_clr(txrx_peer);
8047 }
8048 
8049 /*
8050  * dp_peer_create_wifi3() - attach txrx peer
8051  * @soc_hdl: Datapath soc handle
8052  * @vdev_id: id of vdev
8053  * @peer_mac_addr: Peer MAC address
8054  * @peer_type: link or MLD peer type
8055  *
8056  * Return: 0 on success, -1 on failure
8057  */
8058 static QDF_STATUS
8059 dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8060 		     uint8_t *peer_mac_addr, enum cdp_peer_type peer_type)
8061 {
8062 	struct dp_peer *peer;
8063 	int i;
8064 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8065 	struct dp_pdev *pdev;
8066 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
8067 	struct dp_vdev *vdev = NULL;
8068 
8069 	if (!peer_mac_addr)
8070 		return QDF_STATUS_E_FAILURE;
8071 
8072 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
8073 
8074 	if (!vdev)
8075 		return QDF_STATUS_E_FAILURE;
8076 
8077 	pdev = vdev->pdev;
8078 	soc = pdev->soc;
8079 
8080 	/*
8081 	 * If a peer entry with given MAC address already exists,
8082 	 * reuse the peer and reset the state of peer.
8083 	 */
8084 	peer = dp_peer_can_reuse(vdev, peer_mac_addr, peer_type);
8085 
8086 	if (peer) {
8087 		qdf_atomic_init(&peer->is_default_route_set);
8088 		dp_peer_cleanup(vdev, peer);
8089 
8090 		dp_peer_vdev_list_add(soc, vdev, peer);
8091 		dp_peer_find_hash_add(soc, peer);
8092 
8093 		if (dp_peer_rx_tids_create(peer) != QDF_STATUS_SUCCESS) {
8094 			dp_alert("RX tid alloc fail for peer %pK (" QDF_MAC_ADDR_FMT ")",
8095 				 peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
8096 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8097 			return QDF_STATUS_E_FAILURE;
8098 		}
8099 
8100 		if (IS_MLO_DP_MLD_PEER(peer))
8101 			dp_mld_peer_init_link_peers_info(peer);
8102 
8103 		qdf_spin_lock_bh(&soc->ast_lock);
8104 		dp_peer_delete_ast_entries(soc, peer);
8105 		qdf_spin_unlock_bh(&soc->ast_lock);
8106 
8107 		if ((vdev->opmode == wlan_op_mode_sta) &&
8108 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
8109 		     QDF_MAC_ADDR_SIZE)) {
8110 			ast_type = CDP_TXRX_AST_TYPE_SELF;
8111 		}
8112 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
8113 
8114 		peer->valid = 1;
8115 		peer->is_tdls_peer = false;
8116 		dp_local_peer_id_alloc(pdev, peer);
8117 
8118 		qdf_spinlock_create(&peer->peer_info_lock);
8119 
8120 		DP_STATS_INIT(peer);
8121 
8122 		/*
8123 		 * In tx_monitor mode, filter may be set for unassociated peer
8124 		 * when unassociated peer get associated peer need to
8125 		 * update tx_cap_enabled flag to support peer filter.
8126 		 */
8127 		if (!IS_MLO_DP_MLD_PEER(peer)) {
8128 			dp_monitor_peer_tx_capture_filter_check(pdev, peer);
8129 			dp_monitor_peer_reset_stats(soc, peer);
8130 		}
8131 
8132 		if (peer->txrx_peer) {
8133 			dp_peer_rx_bufq_resources_init(peer->txrx_peer);
8134 			dp_txrx_peer_stats_clr(peer->txrx_peer);
8135 			dp_set_peer_isolation(peer->txrx_peer, false);
8136 			dp_wds_ext_peer_init(peer->txrx_peer);
8137 			dp_peer_hw_txrx_stats_init(soc, peer->txrx_peer);
8138 		}
8139 
8140 		dp_cfg_event_record_peer_evt(soc, DP_CFG_EVENT_PEER_CREATE,
8141 					     peer, vdev, 1);
8142 		dp_info("vdev %pK Reused peer %pK ("QDF_MAC_ADDR_FMT
8143 			") vdev_ref_cnt "
8144 			"%d peer_ref_cnt: %d",
8145 			vdev, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
8146 			qdf_atomic_read(&vdev->ref_cnt),
8147 			qdf_atomic_read(&peer->ref_cnt));
8148 			dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
8149 
8150 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8151 		return QDF_STATUS_SUCCESS;
8152 	} else {
8153 		/*
8154 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
8155 		 * need to remove the AST entry which was earlier added as a WDS
8156 		 * entry.
8157 		 * If an AST entry exists, but no peer entry exists with a given
8158 		 * MAC addresses, we could deduce it as a WDS entry
8159 		 */
8160 		dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
8161 	}
8162 
8163 #ifdef notyet
8164 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
8165 		soc->mempool_ol_ath_peer);
8166 #else
8167 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
8168 #endif
8169 	wlan_minidump_log(peer,
8170 			  sizeof(*peer),
8171 			  soc->ctrl_psoc,
8172 			  WLAN_MD_DP_PEER, "dp_peer");
8173 	if (!peer) {
8174 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8175 		return QDF_STATUS_E_FAILURE; /* failure */
8176 	}
8177 
8178 	qdf_mem_zero(peer, sizeof(struct dp_peer));
8179 
8180 	/* store provided params */
8181 	peer->vdev = vdev;
8182 
8183 	/* initialize the peer_id */
8184 	peer->peer_id = HTT_INVALID_PEER;
8185 
8186 	qdf_mem_copy(
8187 		&peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
8188 
8189 	DP_PEER_SET_TYPE(peer, peer_type);
8190 	if (IS_MLO_DP_MLD_PEER(peer)) {
8191 		if (dp_txrx_peer_attach(soc, peer) !=
8192 				QDF_STATUS_SUCCESS)
8193 			goto fail; /* failure */
8194 
8195 		dp_mld_peer_init_link_peers_info(peer);
8196 	} else if (dp_monitor_peer_attach(soc, peer) !=
8197 				QDF_STATUS_SUCCESS)
8198 		dp_warn("peer monitor ctx alloc failed");
8199 
8200 	TAILQ_INIT(&peer->ast_entry_list);
8201 
8202 	/* get the vdev reference for new peer */
8203 	dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CHILD);
8204 
8205 	if ((vdev->opmode == wlan_op_mode_sta) &&
8206 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
8207 			 QDF_MAC_ADDR_SIZE)) {
8208 		ast_type = CDP_TXRX_AST_TYPE_SELF;
8209 	}
8210 	qdf_spinlock_create(&peer->peer_state_lock);
8211 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
8212 	qdf_spinlock_create(&peer->peer_info_lock);
8213 
8214 	/* reset the ast index to flowid table */
8215 	dp_peer_reset_flowq_map(peer);
8216 
8217 	qdf_atomic_init(&peer->ref_cnt);
8218 
8219 	for (i = 0; i < DP_MOD_ID_MAX; i++)
8220 		qdf_atomic_init(&peer->mod_refs[i]);
8221 
8222 	/* keep one reference for attach */
8223 	qdf_atomic_inc(&peer->ref_cnt);
8224 	qdf_atomic_inc(&peer->mod_refs[DP_MOD_ID_CONFIG]);
8225 
8226 	dp_peer_vdev_list_add(soc, vdev, peer);
8227 
8228 	/* TODO: See if hash based search is required */
8229 	dp_peer_find_hash_add(soc, peer);
8230 
8231 	/* Initialize the peer state */
8232 	peer->state = OL_TXRX_PEER_STATE_DISC;
8233 
8234 	dp_cfg_event_record_peer_evt(soc, DP_CFG_EVENT_PEER_CREATE,
8235 				     peer, vdev, 0);
8236 	dp_info("vdev %pK created peer %pK ("QDF_MAC_ADDR_FMT") vdev_ref_cnt "
8237 		"%d peer_ref_cnt: %d",
8238 		vdev, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
8239 		qdf_atomic_read(&vdev->ref_cnt),
8240 		qdf_atomic_read(&peer->ref_cnt));
8241 	/*
8242 	 * For every peer MAp message search and set if bss_peer
8243 	 */
8244 	if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
8245 			QDF_MAC_ADDR_SIZE) == 0 &&
8246 			(wlan_op_mode_sta != vdev->opmode)) {
8247 		dp_info("vdev bss_peer!!");
8248 		peer->bss_peer = 1;
8249 		if (peer->txrx_peer)
8250 			peer->txrx_peer->bss_peer = 1;
8251 	}
8252 
8253 	if (wlan_op_mode_sta == vdev->opmode &&
8254 	    qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
8255 			QDF_MAC_ADDR_SIZE) == 0) {
8256 		peer->sta_self_peer = 1;
8257 	}
8258 
8259 	if (dp_peer_rx_tids_create(peer) != QDF_STATUS_SUCCESS) {
8260 		dp_alert("RX tid alloc fail for peer %pK (" QDF_MAC_ADDR_FMT ")",
8261 			 peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
8262 		goto fail;
8263 	}
8264 
8265 	peer->valid = 1;
8266 	dp_local_peer_id_alloc(pdev, peer);
8267 	DP_STATS_INIT(peer);
8268 
8269 	if (dp_peer_sawf_ctx_alloc(soc, peer) != QDF_STATUS_SUCCESS)
8270 		dp_warn("peer sawf context alloc failed");
8271 
8272 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
8273 
8274 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8275 
8276 	return QDF_STATUS_SUCCESS;
8277 fail:
8278 	qdf_mem_free(peer);
8279 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8280 
8281 	return QDF_STATUS_E_FAILURE;
8282 }
8283 
8284 static QDF_STATUS dp_peer_legacy_setup(struct dp_soc *soc, struct dp_peer *peer)
8285 {
8286 	/* txrx_peer might exist already in peer reuse case */
8287 	if (peer->txrx_peer)
8288 		return QDF_STATUS_SUCCESS;
8289 
8290 	if (dp_txrx_peer_attach(soc, peer) !=
8291 				QDF_STATUS_SUCCESS) {
8292 		dp_err("peer txrx ctx alloc failed");
8293 		return QDF_STATUS_E_FAILURE;
8294 	}
8295 
8296 	return QDF_STATUS_SUCCESS;
8297 }
8298 
8299 #ifdef WLAN_FEATURE_11BE_MLO
8300 QDF_STATUS dp_peer_mlo_setup(
8301 			struct dp_soc *soc,
8302 			struct dp_peer *peer,
8303 			uint8_t vdev_id,
8304 			struct cdp_peer_setup_info *setup_info)
8305 {
8306 	struct dp_peer *mld_peer = NULL;
8307 
8308 	/* Non-MLO connection, do nothing */
8309 	if (!setup_info || !setup_info->mld_peer_mac)
8310 		return QDF_STATUS_SUCCESS;
8311 
8312 	dp_cfg_event_record_peer_setup_evt(soc, DP_CFG_EVENT_MLO_SETUP,
8313 					   peer, NULL, vdev_id, setup_info);
8314 	dp_info("link peer: " QDF_MAC_ADDR_FMT "mld peer: " QDF_MAC_ADDR_FMT
8315 		"first_link %d, primary_link %d",
8316 		QDF_MAC_ADDR_REF(peer->mac_addr.raw),
8317 		QDF_MAC_ADDR_REF(setup_info->mld_peer_mac),
8318 		setup_info->is_first_link,
8319 		setup_info->is_primary_link);
8320 
8321 	/* if this is the first link peer */
8322 	if (setup_info->is_first_link)
8323 		/* create MLD peer */
8324 		dp_peer_create_wifi3((struct cdp_soc_t *)soc,
8325 				     vdev_id,
8326 				     setup_info->mld_peer_mac,
8327 				     CDP_MLD_PEER_TYPE);
8328 
8329 	peer->first_link = setup_info->is_first_link;
8330 	peer->primary_link = setup_info->is_primary_link;
8331 	mld_peer = dp_mld_peer_find_hash_find(soc,
8332 					      setup_info->mld_peer_mac,
8333 					      0, vdev_id, DP_MOD_ID_CDP);
8334 	if (mld_peer) {
8335 		if (setup_info->is_first_link) {
8336 			/* assign rx_tid to mld peer */
8337 			mld_peer->rx_tid = peer->rx_tid;
8338 			/* no cdp_peer_setup for MLD peer,
8339 			 * set it for addba processing
8340 			 */
8341 			qdf_atomic_set(&mld_peer->is_default_route_set, 1);
8342 		} else {
8343 			/* free link peer original rx_tids mem */
8344 			dp_peer_rx_tids_destroy(peer);
8345 			/* assign mld peer rx_tid to link peer */
8346 			peer->rx_tid = mld_peer->rx_tid;
8347 		}
8348 
8349 		if (setup_info->is_primary_link &&
8350 		    !setup_info->is_first_link) {
8351 			struct dp_vdev *prev_vdev;
8352 			/*
8353 			 * if first link is not the primary link,
8354 			 * then need to change mld_peer->vdev as
8355 			 * primary link dp_vdev is not same one
8356 			 * during mld peer creation.
8357 			 */
8358 			prev_vdev = mld_peer->vdev;
8359 			dp_info("Primary link is not the first link. vdev: %pK,"
8360 				"vdev_id %d vdev_ref_cnt %d",
8361 				mld_peer->vdev, vdev_id,
8362 				qdf_atomic_read(&mld_peer->vdev->ref_cnt));
8363 			/* release the ref to original dp_vdev */
8364 			dp_vdev_unref_delete(soc, mld_peer->vdev,
8365 					     DP_MOD_ID_CHILD);
8366 			/*
8367 			 * get the ref to new dp_vdev,
8368 			 * increase dp_vdev ref_cnt
8369 			 */
8370 			mld_peer->vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8371 							       DP_MOD_ID_CHILD);
8372 			mld_peer->txrx_peer->vdev = mld_peer->vdev;
8373 
8374 			dp_cfg_event_record_mlo_setup_vdev_update_evt(
8375 					soc, mld_peer, prev_vdev,
8376 					mld_peer->vdev);
8377 
8378 		}
8379 
8380 		/* associate mld and link peer */
8381 		dp_link_peer_add_mld_peer(peer, mld_peer);
8382 		dp_mld_peer_add_link_peer(mld_peer, peer);
8383 
8384 		mld_peer->txrx_peer->mld_peer = 1;
8385 		dp_peer_unref_delete(mld_peer, DP_MOD_ID_CDP);
8386 	} else {
8387 		peer->mld_peer = NULL;
8388 		dp_err("mld peer" QDF_MAC_ADDR_FMT "not found!",
8389 		       QDF_MAC_ADDR_REF(setup_info->mld_peer_mac));
8390 		return QDF_STATUS_E_FAILURE;
8391 	}
8392 
8393 	return QDF_STATUS_SUCCESS;
8394 }
8395 
8396 /*
8397  * dp_mlo_peer_authorize() - authorize MLO peer
8398  * @soc: soc handle
8399  * @peer: pointer to link peer
8400  *
8401  * return void
8402  */
8403 static void dp_mlo_peer_authorize(struct dp_soc *soc,
8404 				  struct dp_peer *peer)
8405 {
8406 	int i;
8407 	struct dp_peer *link_peer = NULL;
8408 	struct dp_peer *mld_peer = peer->mld_peer;
8409 	struct dp_mld_link_peers link_peers_info;
8410 
8411 	if (!mld_peer)
8412 		return;
8413 
8414 	/* get link peers with reference */
8415 	dp_get_link_peers_ref_from_mld_peer(soc, mld_peer,
8416 					    &link_peers_info,
8417 					    DP_MOD_ID_CDP);
8418 
8419 	for (i = 0; i < link_peers_info.num_links; i++) {
8420 		link_peer = link_peers_info.link_peers[i];
8421 
8422 		if (!link_peer->authorize) {
8423 			dp_release_link_peers_ref(&link_peers_info,
8424 						  DP_MOD_ID_CDP);
8425 			mld_peer->authorize = false;
8426 			return;
8427 		}
8428 	}
8429 
8430 	/* if we are here all link peers are authorized,
8431 	 * authorize ml_peer also
8432 	 */
8433 	mld_peer->authorize = true;
8434 
8435 	/* release link peers reference */
8436 	dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
8437 }
8438 #endif
8439 
8440 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
8441 				  enum cdp_host_reo_dest_ring *reo_dest,
8442 				  bool *hash_based)
8443 {
8444 	struct dp_soc *soc;
8445 	struct dp_pdev *pdev;
8446 
8447 	pdev = vdev->pdev;
8448 	soc = pdev->soc;
8449 	/*
8450 	 * hash based steering is disabled for Radios which are offloaded
8451 	 * to NSS
8452 	 */
8453 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
8454 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
8455 
8456 	/*
8457 	 * Below line of code will ensure the proper reo_dest ring is chosen
8458 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
8459 	 */
8460 	*reo_dest = pdev->reo_dest;
8461 }
8462 
8463 #ifdef IPA_OFFLOAD
8464 /**
8465  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
8466  * @vdev: Virtual device
8467  *
8468  * Return: true if the vdev is of subtype P2P
8469  *	   false if the vdev is of any other subtype
8470  */
8471 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
8472 {
8473 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
8474 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
8475 	    vdev->subtype == wlan_op_subtype_p2p_go)
8476 		return true;
8477 
8478 	return false;
8479 }
8480 
8481 /*
8482  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
8483  * @vdev: Datapath VDEV handle
8484  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
8485  * @hash_based: pointer to hash value (enabled/disabled) to be populated
8486  *
8487  * If IPA is enabled in ini, for SAP mode, disable hash based
8488  * steering, use default reo_dst ring for RX. Use config values for other modes.
8489  * Return: None
8490  */
8491 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
8492 				       struct cdp_peer_setup_info *setup_info,
8493 				       enum cdp_host_reo_dest_ring *reo_dest,
8494 				       bool *hash_based,
8495 				       uint8_t *lmac_peer_id_msb)
8496 {
8497 	struct dp_soc *soc;
8498 	struct dp_pdev *pdev;
8499 
8500 	pdev = vdev->pdev;
8501 	soc = pdev->soc;
8502 
8503 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
8504 
8505 	/* For P2P-GO interfaces we do not need to change the REO
8506 	 * configuration even if IPA config is enabled
8507 	 */
8508 	if (dp_is_vdev_subtype_p2p(vdev))
8509 		return;
8510 
8511 	/*
8512 	 * If IPA is enabled, disable hash-based flow steering and set
8513 	 * reo_dest_ring_4 as the REO ring to receive packets on.
8514 	 * IPA is configured to reap reo_dest_ring_4.
8515 	 *
8516 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
8517 	 * value enum value is from 1 - 4.
8518 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
8519 	 */
8520 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
8521 		if (vdev->opmode == wlan_op_mode_ap) {
8522 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
8523 			*hash_based = 0;
8524 		} else if (vdev->opmode == wlan_op_mode_sta &&
8525 			   dp_ipa_is_mdm_platform()) {
8526 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
8527 		}
8528 	}
8529 }
8530 
8531 #else
8532 
8533 /*
8534  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
8535  * @vdev: Datapath VDEV handle
8536  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
8537  * @hash_based: pointer to hash value (enabled/disabled) to be populated
8538  *
8539  * Use system config values for hash based steering.
8540  * Return: None
8541  */
8542 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
8543 				       struct cdp_peer_setup_info *setup_info,
8544 				       enum cdp_host_reo_dest_ring *reo_dest,
8545 				       bool *hash_based,
8546 				       uint8_t *lmac_peer_id_msb)
8547 {
8548 	struct dp_soc *soc = vdev->pdev->soc;
8549 
8550 	soc->arch_ops.peer_get_reo_hash(vdev, setup_info, reo_dest, hash_based,
8551 					lmac_peer_id_msb);
8552 }
8553 #endif /* IPA_OFFLOAD */
8554 
8555 /*
8556  * dp_peer_setup_wifi3() - initialize the peer
8557  * @soc_hdl: soc handle object
8558  * @vdev_id : vdev_id of vdev object
8559  * @peer_mac: Peer's mac address
8560  * @peer_setup_info: peer setup info for MLO
8561  *
8562  * Return: QDF_STATUS
8563  */
8564 static QDF_STATUS
8565 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8566 		    uint8_t *peer_mac,
8567 		    struct cdp_peer_setup_info *setup_info)
8568 {
8569 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8570 	struct dp_pdev *pdev;
8571 	bool hash_based = 0;
8572 	enum cdp_host_reo_dest_ring reo_dest;
8573 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8574 	struct dp_vdev *vdev = NULL;
8575 	struct dp_peer *peer =
8576 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
8577 					       DP_MOD_ID_CDP);
8578 	struct dp_peer *mld_peer = NULL;
8579 	enum wlan_op_mode vdev_opmode;
8580 	uint8_t lmac_peer_id_msb = 0;
8581 
8582 	if (!peer)
8583 		return QDF_STATUS_E_FAILURE;
8584 
8585 	vdev = peer->vdev;
8586 	if (!vdev) {
8587 		status = QDF_STATUS_E_FAILURE;
8588 		goto fail;
8589 	}
8590 
8591 	/* save vdev related member in case vdev freed */
8592 	vdev_opmode = vdev->opmode;
8593 	pdev = vdev->pdev;
8594 	dp_peer_setup_get_reo_hash(vdev, setup_info,
8595 				   &reo_dest, &hash_based,
8596 				   &lmac_peer_id_msb);
8597 
8598 	dp_cfg_event_record_peer_setup_evt(soc, DP_CFG_EVENT_PEER_SETUP,
8599 					   peer, vdev, vdev->vdev_id,
8600 					   setup_info);
8601 	dp_info("pdev: %d vdev :%d opmode:%u peer %pK (" QDF_MAC_ADDR_FMT ") "
8602 		"hash-based-steering:%d default-reo_dest:%u",
8603 		pdev->pdev_id, vdev->vdev_id,
8604 		vdev->opmode, peer,
8605 		QDF_MAC_ADDR_REF(peer->mac_addr.raw), hash_based, reo_dest);
8606 
8607 	/*
8608 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
8609 	 * i.e both the devices have same MAC address. In these
8610 	 * cases we want such pkts to be processed in NULL Q handler
8611 	 * which is REO2TCL ring. for this reason we should
8612 	 * not setup reo_queues and default route for bss_peer.
8613 	 */
8614 	if (!IS_MLO_DP_MLD_PEER(peer))
8615 		dp_monitor_peer_tx_init(pdev, peer);
8616 
8617 	if (!setup_info)
8618 		if (dp_peer_legacy_setup(soc, peer) !=
8619 				QDF_STATUS_SUCCESS) {
8620 			status = QDF_STATUS_E_RESOURCES;
8621 			goto fail;
8622 		}
8623 
8624 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
8625 		status = QDF_STATUS_E_FAILURE;
8626 		goto fail;
8627 	}
8628 
8629 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
8630 		/* TODO: Check the destination ring number to be passed to FW */
8631 		soc->cdp_soc.ol_ops->peer_set_default_routing(
8632 				soc->ctrl_psoc,
8633 				peer->vdev->pdev->pdev_id,
8634 				peer->mac_addr.raw,
8635 				peer->vdev->vdev_id, hash_based, reo_dest,
8636 				lmac_peer_id_msb);
8637 	}
8638 
8639 	qdf_atomic_set(&peer->is_default_route_set, 1);
8640 
8641 	status = dp_peer_mlo_setup(soc, peer, vdev->vdev_id, setup_info);
8642 	if (QDF_IS_STATUS_ERROR(status)) {
8643 		dp_peer_err("peer mlo setup failed");
8644 		qdf_assert_always(0);
8645 	}
8646 
8647 	if (vdev_opmode != wlan_op_mode_monitor) {
8648 		/* In case of MLD peer, switch peer to mld peer and
8649 		 * do peer_rx_init.
8650 		 */
8651 		if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
8652 		    IS_MLO_DP_LINK_PEER(peer)) {
8653 			if (setup_info && setup_info->is_first_link) {
8654 				mld_peer = DP_GET_MLD_PEER_FROM_PEER(peer);
8655 				if (mld_peer)
8656 					dp_peer_rx_init(pdev, mld_peer);
8657 				else
8658 					dp_peer_err("MLD peer null. Primary link peer:%pK", peer);
8659 			}
8660 		} else {
8661 			dp_peer_rx_init(pdev, peer);
8662 		}
8663 	}
8664 
8665 	dp_soc_txrx_peer_setup(vdev_opmode, soc, peer);
8666 
8667 	if (!IS_MLO_DP_MLD_PEER(peer))
8668 		dp_peer_ppdu_delayed_ba_init(peer);
8669 
8670 fail:
8671 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8672 	return status;
8673 }
8674 
8675 /*
8676  * dp_cp_peer_del_resp_handler - Handle the peer delete response
8677  * @soc_hdl: Datapath SOC handle
8678  * @vdev_id: id of virtual device object
8679  * @mac_addr: Mac address of the peer
8680  *
8681  * Return: QDF_STATUS
8682  */
8683 static QDF_STATUS dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl,
8684 					      uint8_t vdev_id,
8685 					      uint8_t *mac_addr)
8686 {
8687 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8688 	struct dp_ast_entry  *ast_entry = NULL;
8689 	txrx_ast_free_cb cb = NULL;
8690 	void *cookie;
8691 
8692 	if (soc->ast_offload_support)
8693 		return QDF_STATUS_E_INVAL;
8694 
8695 	qdf_spin_lock_bh(&soc->ast_lock);
8696 
8697 	ast_entry =
8698 		dp_peer_ast_hash_find_by_vdevid(soc, mac_addr,
8699 						vdev_id);
8700 
8701 	/* in case of qwrap we have multiple BSS peers
8702 	 * with same mac address
8703 	 *
8704 	 * AST entry for this mac address will be created
8705 	 * only for one peer hence it will be NULL here
8706 	 */
8707 	if ((!ast_entry || !ast_entry->delete_in_progress) ||
8708 	    (ast_entry->peer_id != HTT_INVALID_PEER)) {
8709 		qdf_spin_unlock_bh(&soc->ast_lock);
8710 		return QDF_STATUS_E_FAILURE;
8711 	}
8712 
8713 	if (ast_entry->is_mapped)
8714 		soc->ast_table[ast_entry->ast_idx] = NULL;
8715 
8716 	DP_STATS_INC(soc, ast.deleted, 1);
8717 	dp_peer_ast_hash_remove(soc, ast_entry);
8718 
8719 	cb = ast_entry->callback;
8720 	cookie = ast_entry->cookie;
8721 	ast_entry->callback = NULL;
8722 	ast_entry->cookie = NULL;
8723 
8724 	soc->num_ast_entries--;
8725 	qdf_spin_unlock_bh(&soc->ast_lock);
8726 
8727 	if (cb) {
8728 		cb(soc->ctrl_psoc,
8729 		   dp_soc_to_cdp_soc(soc),
8730 		   cookie,
8731 		   CDP_TXRX_AST_DELETED);
8732 	}
8733 	qdf_mem_free(ast_entry);
8734 
8735 	return QDF_STATUS_SUCCESS;
8736 }
8737 
8738 /*
8739  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
8740  * @txrx_soc: cdp soc handle
8741  * @ac: Access category
8742  * @value: timeout value in millisec
8743  *
8744  * Return: void
8745  */
8746 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
8747 				    uint8_t ac, uint32_t value)
8748 {
8749 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
8750 
8751 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
8752 }
8753 
8754 /*
8755  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
8756  * @txrx_soc: cdp soc handle
8757  * @ac: access category
8758  * @value: timeout value in millisec
8759  *
8760  * Return: void
8761  */
8762 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
8763 				    uint8_t ac, uint32_t *value)
8764 {
8765 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
8766 
8767 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
8768 }
8769 
8770 /*
8771  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
8772  * @txrx_soc: cdp soc handle
8773  * @pdev_id: id of physical device object
8774  * @val: reo destination ring index (1 - 4)
8775  *
8776  * Return: QDF_STATUS
8777  */
8778 static QDF_STATUS
8779 dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
8780 		     enum cdp_host_reo_dest_ring val)
8781 {
8782 	struct dp_pdev *pdev =
8783 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
8784 						   pdev_id);
8785 
8786 	if (pdev) {
8787 		pdev->reo_dest = val;
8788 		return QDF_STATUS_SUCCESS;
8789 	}
8790 
8791 	return QDF_STATUS_E_FAILURE;
8792 }
8793 
8794 /*
8795  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
8796  * @txrx_soc: cdp soc handle
8797  * @pdev_id: id of physical device object
8798  *
8799  * Return: reo destination ring index
8800  */
8801 static enum cdp_host_reo_dest_ring
8802 dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id)
8803 {
8804 	struct dp_pdev *pdev =
8805 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
8806 						   pdev_id);
8807 
8808 	if (pdev)
8809 		return pdev->reo_dest;
8810 	else
8811 		return cdp_host_reo_dest_ring_unknown;
8812 }
8813 
8814 #ifdef WLAN_SUPPORT_MSCS
8815 /*
8816  * dp_record_mscs_params - MSCS parameters sent by the STA in
8817  * the MSCS Request to the AP. The AP makes a note of these
8818  * parameters while comparing the MSDUs sent by the STA, to
8819  * send the downlink traffic with correct User priority.
8820  * @soc - Datapath soc handle
8821  * @peer_mac - STA Mac address
8822  * @vdev_id - ID of the vdev handle
8823  * @mscs_params - Structure having MSCS parameters obtained
8824  * from handshake
8825  * @active - Flag to set MSCS active/inactive
8826  * return type - QDF_STATUS - Success/Invalid
8827  */
8828 static QDF_STATUS
8829 dp_record_mscs_params(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
8830 		      uint8_t vdev_id, struct cdp_mscs_params *mscs_params,
8831 		      bool active)
8832 {
8833 	struct dp_peer *peer;
8834 	QDF_STATUS status = QDF_STATUS_E_INVAL;
8835 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8836 
8837 	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
8838 				      DP_MOD_ID_CDP);
8839 
8840 	if (!peer) {
8841 		dp_err("Peer is NULL!");
8842 		goto fail;
8843 	}
8844 	if (!active) {
8845 		dp_info("MSCS Procedure is terminated");
8846 		peer->mscs_active = active;
8847 		goto fail;
8848 	}
8849 
8850 	if (mscs_params->classifier_type == IEEE80211_TCLAS_MASK_CLA_TYPE_4) {
8851 		/* Populate entries inside IPV4 database first */
8852 		peer->mscs_ipv4_parameter.user_priority_bitmap =
8853 			mscs_params->user_pri_bitmap;
8854 		peer->mscs_ipv4_parameter.user_priority_limit =
8855 			mscs_params->user_pri_limit;
8856 		peer->mscs_ipv4_parameter.classifier_mask =
8857 			mscs_params->classifier_mask;
8858 
8859 		/* Populate entries inside IPV6 database */
8860 		peer->mscs_ipv6_parameter.user_priority_bitmap =
8861 			mscs_params->user_pri_bitmap;
8862 		peer->mscs_ipv6_parameter.user_priority_limit =
8863 			mscs_params->user_pri_limit;
8864 		peer->mscs_ipv6_parameter.classifier_mask =
8865 			mscs_params->classifier_mask;
8866 		peer->mscs_active = 1;
8867 		dp_info("\n\tMSCS Procedure request based parameters for "QDF_MAC_ADDR_FMT"\n"
8868 			"\tClassifier_type = %d\tUser priority bitmap = %x\n"
8869 			"\tUser priority limit = %x\tClassifier mask = %x",
8870 			QDF_MAC_ADDR_REF(peer_mac),
8871 			mscs_params->classifier_type,
8872 			peer->mscs_ipv4_parameter.user_priority_bitmap,
8873 			peer->mscs_ipv4_parameter.user_priority_limit,
8874 			peer->mscs_ipv4_parameter.classifier_mask);
8875 	}
8876 
8877 	status = QDF_STATUS_SUCCESS;
8878 fail:
8879 	if (peer)
8880 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8881 	return status;
8882 }
8883 #endif
8884 
8885 /*
8886  * dp_get_sec_type() - Get the security type
8887  * @soc: soc handle
8888  * @vdev_id: id of dp handle
8889  * @peer_mac: mac of datapath PEER handle
8890  * @sec_idx:    Security id (mcast, ucast)
8891  *
8892  * return sec_type: Security type
8893  */
8894 static int dp_get_sec_type(struct cdp_soc_t *soc, uint8_t vdev_id,
8895 			   uint8_t *peer_mac, uint8_t sec_idx)
8896 {
8897 	int sec_type = 0;
8898 	struct dp_peer *peer =
8899 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
8900 						       peer_mac, 0, vdev_id,
8901 						       DP_MOD_ID_CDP);
8902 
8903 	if (!peer) {
8904 		dp_cdp_err("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
8905 		return sec_type;
8906 	}
8907 
8908 	if (!peer->txrx_peer) {
8909 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8910 		dp_peer_debug("%pK: txrx peer is NULL!\n", soc);
8911 		return sec_type;
8912 	}
8913 	sec_type = peer->txrx_peer->security[sec_idx].sec_type;
8914 
8915 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8916 	return sec_type;
8917 }
8918 
8919 /*
8920  * dp_peer_authorize() - authorize txrx peer
8921  * @soc: soc handle
8922  * @vdev_id: id of dp handle
8923  * @peer_mac: mac of datapath PEER handle
8924  * @authorize
8925  *
8926  */
8927 static QDF_STATUS
8928 dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8929 		  uint8_t *peer_mac, uint32_t authorize)
8930 {
8931 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8932 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8933 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(soc, peer_mac,
8934 							      0, vdev_id,
8935 							      DP_MOD_ID_CDP);
8936 
8937 	if (!peer) {
8938 		dp_cdp_debug("%pK: Peer is NULL!\n", soc);
8939 		status = QDF_STATUS_E_FAILURE;
8940 	} else {
8941 		peer->authorize = authorize ? 1 : 0;
8942 		if (peer->txrx_peer)
8943 			peer->txrx_peer->authorize = peer->authorize;
8944 
8945 		if (!peer->authorize)
8946 			dp_peer_flush_frags(soc_hdl, vdev_id, peer_mac);
8947 
8948 		dp_mlo_peer_authorize(soc, peer);
8949 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8950 	}
8951 
8952 	return status;
8953 }
8954 
8955 /*
8956  * dp_peer_get_authorize() - get peer authorize status
8957  * @soc: soc handle
8958  * @vdev_id: id of dp handle
8959  * @peer_mac: mac of datapath PEER handle
8960  *
8961  * Retusn: true is peer is authorized, false otherwise
8962  */
8963 static bool
8964 dp_peer_get_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8965 		      uint8_t *peer_mac)
8966 {
8967 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8968 	bool authorize = false;
8969 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
8970 						      0, vdev_id,
8971 						      DP_MOD_ID_CDP);
8972 
8973 	if (!peer) {
8974 		dp_cdp_debug("%pK: Peer is NULL!\n", soc);
8975 		return authorize;
8976 	}
8977 
8978 	authorize = peer->authorize;
8979 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8980 
8981 	return authorize;
8982 }
8983 
8984 /**
8985  * dp_vdev_unref_delete() - check and process vdev delete
8986  * @soc : DP specific soc pointer
8987  * @vdev: DP specific vdev pointer
8988  * @mod_id: module id
8989  *
8990  */
8991 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev,
8992 			  enum dp_mod_id mod_id)
8993 {
8994 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
8995 	void *vdev_delete_context = NULL;
8996 	uint8_t vdev_id = vdev->vdev_id;
8997 	struct dp_pdev *pdev = vdev->pdev;
8998 	struct dp_vdev *tmp_vdev = NULL;
8999 	uint8_t found = 0;
9000 
9001 	QDF_ASSERT(qdf_atomic_dec_return(&vdev->mod_refs[mod_id]) >= 0);
9002 
9003 	/* Return if this is not the last reference*/
9004 	if (!qdf_atomic_dec_and_test(&vdev->ref_cnt))
9005 		return;
9006 
9007 	/*
9008 	 * This should be set as last reference need to released
9009 	 * after cdp_vdev_detach() is called
9010 	 *
9011 	 * if this assert is hit there is a ref count issue
9012 	 */
9013 	QDF_ASSERT(vdev->delete.pending);
9014 
9015 	vdev_delete_cb = vdev->delete.callback;
9016 	vdev_delete_context = vdev->delete.context;
9017 
9018 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")- its last peer is done",
9019 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
9020 
9021 	if (wlan_op_mode_monitor == vdev->opmode) {
9022 		dp_monitor_vdev_delete(soc, vdev);
9023 		goto free_vdev;
9024 	}
9025 
9026 	/* all peers are gone, go ahead and delete it */
9027 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
9028 			FLOW_TYPE_VDEV, vdev_id);
9029 	dp_tx_vdev_detach(vdev);
9030 	dp_monitor_vdev_detach(vdev);
9031 
9032 free_vdev:
9033 	qdf_spinlock_destroy(&vdev->peer_list_lock);
9034 
9035 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
9036 	TAILQ_FOREACH(tmp_vdev, &soc->inactive_vdev_list,
9037 		      inactive_list_elem) {
9038 		if (tmp_vdev == vdev) {
9039 			found = 1;
9040 			break;
9041 		}
9042 	}
9043 	if (found)
9044 		TAILQ_REMOVE(&soc->inactive_vdev_list, vdev,
9045 			     inactive_list_elem);
9046 	/* delete this peer from the list */
9047 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
9048 
9049 	dp_cfg_event_record_vdev_evt(soc, DP_CFG_EVENT_VDEV_UNREF_DEL,
9050 				     vdev);
9051 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")",
9052 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
9053 	wlan_minidump_remove(vdev, sizeof(*vdev), soc->ctrl_psoc,
9054 			     WLAN_MD_DP_VDEV, "dp_vdev");
9055 	qdf_mem_free(vdev);
9056 	vdev = NULL;
9057 
9058 	if (vdev_delete_cb)
9059 		vdev_delete_cb(vdev_delete_context);
9060 }
9061 
9062 qdf_export_symbol(dp_vdev_unref_delete);
9063 
9064 /*
9065  * dp_peer_unref_delete() - unref and delete peer
9066  * @peer_handle:    Datapath peer handle
9067  * @mod_id:         ID of module releasing reference
9068  *
9069  */
9070 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id mod_id)
9071 {
9072 	struct dp_vdev *vdev = peer->vdev;
9073 	struct dp_pdev *pdev = vdev->pdev;
9074 	struct dp_soc *soc = pdev->soc;
9075 	uint16_t peer_id;
9076 	struct dp_peer *tmp_peer;
9077 	bool found = false;
9078 
9079 	if (mod_id > DP_MOD_ID_RX)
9080 		QDF_ASSERT(qdf_atomic_dec_return(&peer->mod_refs[mod_id]) >= 0);
9081 
9082 	/*
9083 	 * Hold the lock all the way from checking if the peer ref count
9084 	 * is zero until the peer references are removed from the hash
9085 	 * table and vdev list (if the peer ref count is zero).
9086 	 * This protects against a new HL tx operation starting to use the
9087 	 * peer object just after this function concludes it's done being used.
9088 	 * Furthermore, the lock needs to be held while checking whether the
9089 	 * vdev's list of peers is empty, to make sure that list is not modified
9090 	 * concurrently with the empty check.
9091 	 */
9092 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
9093 		peer_id = peer->peer_id;
9094 
9095 		/*
9096 		 * Make sure that the reference to the peer in
9097 		 * peer object map is removed
9098 		 */
9099 		QDF_ASSERT(peer_id == HTT_INVALID_PEER);
9100 
9101 		dp_peer_info("Deleting peer %pK ("QDF_MAC_ADDR_FMT")", peer,
9102 			     QDF_MAC_ADDR_REF(peer->mac_addr.raw));
9103 
9104 		dp_peer_sawf_ctx_free(soc, peer);
9105 
9106 		wlan_minidump_remove(peer, sizeof(*peer), soc->ctrl_psoc,
9107 				     WLAN_MD_DP_PEER, "dp_peer");
9108 
9109 		qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
9110 		TAILQ_FOREACH(tmp_peer, &soc->inactive_peer_list,
9111 			      inactive_list_elem) {
9112 			if (tmp_peer == peer) {
9113 				found = 1;
9114 				break;
9115 			}
9116 		}
9117 		if (found)
9118 			TAILQ_REMOVE(&soc->inactive_peer_list, peer,
9119 				     inactive_list_elem);
9120 		/* delete this peer from the list */
9121 		qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
9122 		DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
9123 		dp_peer_update_state(soc, peer, DP_PEER_STATE_FREED);
9124 
9125 		/* cleanup the peer data */
9126 		dp_peer_cleanup(vdev, peer);
9127 
9128 		if (!IS_MLO_DP_MLD_PEER(peer))
9129 			dp_monitor_peer_detach(soc, peer);
9130 
9131 		qdf_spinlock_destroy(&peer->peer_state_lock);
9132 
9133 		dp_txrx_peer_detach(soc, peer);
9134 		dp_cfg_event_record_peer_evt(soc, DP_CFG_EVENT_PEER_UNREF_DEL,
9135 					     peer, vdev, 0);
9136 		qdf_mem_free(peer);
9137 
9138 		/*
9139 		 * Decrement ref count taken at peer create
9140 		 */
9141 		dp_peer_info("Deleted peer. Unref vdev %pK, vdev_ref_cnt %d",
9142 			     vdev, qdf_atomic_read(&vdev->ref_cnt));
9143 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CHILD);
9144 	}
9145 }
9146 
9147 qdf_export_symbol(dp_peer_unref_delete);
9148 
9149 /*
9150  * dp_txrx_peer_unref_delete() - unref and delete peer
9151  * @handle: Datapath txrx ref handle
9152  * @mod_id: Module ID of the caller
9153  *
9154  */
9155 void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle,
9156 			       enum dp_mod_id mod_id)
9157 {
9158 	dp_peer_unref_delete((struct dp_peer *)handle, mod_id);
9159 }
9160 
9161 qdf_export_symbol(dp_txrx_peer_unref_delete);
9162 
9163 /*
9164  * dp_peer_delete_wifi3() – Delete txrx peer
9165  * @soc_hdl: soc handle
9166  * @vdev_id: id of dp handle
9167  * @peer_mac: mac of datapath PEER handle
9168  * @bitmap: bitmap indicating special handling of request.
9169  * @peer_type: peer type (link or MLD)
9170  *
9171  */
9172 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
9173 				       uint8_t vdev_id,
9174 				       uint8_t *peer_mac, uint32_t bitmap,
9175 				       enum cdp_peer_type peer_type)
9176 {
9177 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9178 	struct dp_peer *peer;
9179 	struct cdp_peer_info peer_info = { 0 };
9180 	struct dp_vdev *vdev = NULL;
9181 
9182 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac,
9183 				 false, peer_type);
9184 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP);
9185 
9186 	/* Peer can be null for monitor vap mac address */
9187 	if (!peer) {
9188 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
9189 			  "%s: Invalid peer\n", __func__);
9190 		return QDF_STATUS_E_FAILURE;
9191 	}
9192 
9193 	if (!peer->valid) {
9194 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9195 		dp_err("Invalid peer: "QDF_MAC_ADDR_FMT,
9196 			QDF_MAC_ADDR_REF(peer_mac));
9197 		return QDF_STATUS_E_ALREADY;
9198 	}
9199 
9200 	vdev = peer->vdev;
9201 
9202 	if (!vdev) {
9203 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9204 		return QDF_STATUS_E_FAILURE;
9205 	}
9206 
9207 	peer->valid = 0;
9208 
9209 	dp_cfg_event_record_peer_evt(soc, DP_CFG_EVENT_PEER_DELETE, peer,
9210 				     vdev, 0);
9211 	dp_init_info("%pK: peer %pK (" QDF_MAC_ADDR_FMT ") pending-refs %d",
9212 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
9213 		     qdf_atomic_read(&peer->ref_cnt));
9214 
9215 	dp_peer_rx_reo_shared_qaddr_delete(soc, peer);
9216 
9217 	dp_local_peer_id_free(peer->vdev->pdev, peer);
9218 
9219 	/* Drop all rx packets before deleting peer */
9220 	dp_clear_peer_internal(soc, peer);
9221 
9222 	qdf_spinlock_destroy(&peer->peer_info_lock);
9223 	dp_peer_multipass_list_remove(peer);
9224 
9225 	/* remove the reference to the peer from the hash table */
9226 	dp_peer_find_hash_remove(soc, peer);
9227 
9228 	dp_peer_vdev_list_remove(soc, vdev, peer);
9229 
9230 	dp_peer_mlo_delete(peer);
9231 
9232 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
9233 	TAILQ_INSERT_TAIL(&soc->inactive_peer_list, peer,
9234 			  inactive_list_elem);
9235 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
9236 
9237 	/*
9238 	 * Remove the reference added during peer_attach.
9239 	 * The peer will still be left allocated until the
9240 	 * PEER_UNMAP message arrives to remove the other
9241 	 * reference, added by the PEER_MAP message.
9242 	 */
9243 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
9244 	/*
9245 	 * Remove the reference taken above
9246 	 */
9247 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9248 
9249 	return QDF_STATUS_SUCCESS;
9250 }
9251 
9252 #ifdef DP_RX_UDP_OVER_PEER_ROAM
9253 static QDF_STATUS dp_update_roaming_peer_wifi3(struct cdp_soc_t *soc_hdl,
9254 					       uint8_t vdev_id,
9255 					       uint8_t *peer_mac,
9256 					       uint32_t auth_status)
9257 {
9258 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9259 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9260 						     DP_MOD_ID_CDP);
9261 	if (!vdev)
9262 		return QDF_STATUS_E_FAILURE;
9263 
9264 	vdev->roaming_peer_status = auth_status;
9265 	qdf_mem_copy(vdev->roaming_peer_mac.raw, peer_mac,
9266 		     QDF_MAC_ADDR_SIZE);
9267 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9268 
9269 	return QDF_STATUS_SUCCESS;
9270 }
9271 #endif
9272 /*
9273  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
9274  * @soc_hdl: Datapath soc handle
9275  * @vdev_id: virtual interface id
9276  *
9277  * Return: MAC address on success, NULL on failure.
9278  *
9279  */
9280 static uint8_t *dp_get_vdev_mac_addr_wifi3(struct cdp_soc_t *soc_hdl,
9281 					   uint8_t vdev_id)
9282 {
9283 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9284 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9285 						     DP_MOD_ID_CDP);
9286 	uint8_t *mac = NULL;
9287 
9288 	if (!vdev)
9289 		return NULL;
9290 
9291 	mac = vdev->mac_addr.raw;
9292 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9293 
9294 	return mac;
9295 }
9296 
9297 /*
9298  * dp_vdev_set_wds() - Enable per packet stats
9299  * @soc: DP soc handle
9300  * @vdev_id: id of DP VDEV handle
9301  * @val: value
9302  *
9303  * Return: none
9304  */
9305 static int dp_vdev_set_wds(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
9306 			   uint32_t val)
9307 {
9308 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9309 	struct dp_vdev *vdev =
9310 		dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id,
9311 				      DP_MOD_ID_CDP);
9312 
9313 	if (!vdev)
9314 		return QDF_STATUS_E_FAILURE;
9315 
9316 	vdev->wds_enabled = val;
9317 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9318 
9319 	return QDF_STATUS_SUCCESS;
9320 }
9321 
9322 static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
9323 {
9324 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9325 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9326 						     DP_MOD_ID_CDP);
9327 	int opmode;
9328 
9329 	if (!vdev) {
9330 		dp_err_rl("vdev for id %d is NULL", vdev_id);
9331 		return -EINVAL;
9332 	}
9333 	opmode = vdev->opmode;
9334 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9335 
9336 	return opmode;
9337 }
9338 
9339 /**
9340  * dp_get_os_rx_handles_from_vdev_wifi3() - Get os rx handles for a vdev
9341  * @soc_hdl: ol_txrx_soc_handle handle
9342  * @vdev_id: vdev id for which os rx handles are needed
9343  * @stack_fn_p: pointer to stack function pointer
9344  * @osif_handle_p: pointer to ol_osif_vdev_handle
9345  *
9346  * Return: void
9347  */
9348 static
9349 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_soc_t *soc_hdl,
9350 					  uint8_t vdev_id,
9351 					  ol_txrx_rx_fp *stack_fn_p,
9352 					  ol_osif_vdev_handle *osif_vdev_p)
9353 {
9354 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9355 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9356 						     DP_MOD_ID_CDP);
9357 
9358 	if (qdf_unlikely(!vdev)) {
9359 		*stack_fn_p = NULL;
9360 		*osif_vdev_p = NULL;
9361 		return;
9362 	}
9363 	*stack_fn_p = vdev->osif_rx_stack;
9364 	*osif_vdev_p = vdev->osif_vdev;
9365 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9366 }
9367 
9368 /**
9369  * dp_get_ctrl_pdev_from_vdev() - Get control pdev of vdev
9370  * @soc_hdl: datapath soc handle
9371  * @vdev_id: virtual device/interface id
9372  *
9373  * Return: Handle to control pdev
9374  */
9375 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(
9376 						struct cdp_soc_t *soc_hdl,
9377 						uint8_t vdev_id)
9378 {
9379 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9380 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9381 						     DP_MOD_ID_CDP);
9382 	struct dp_pdev *pdev;
9383 
9384 	if (!vdev)
9385 		return NULL;
9386 
9387 	pdev = vdev->pdev;
9388 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9389 	return pdev ? (struct cdp_cfg *)pdev->wlan_cfg_ctx : NULL;
9390 }
9391 
9392 /**
9393  * dp_get_tx_pending() - read pending tx
9394  * @pdev_handle: Datapath PDEV handle
9395  *
9396  * Return: outstanding tx
9397  */
9398 static int32_t dp_get_tx_pending(struct cdp_pdev *pdev_handle)
9399 {
9400 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9401 
9402 	return qdf_atomic_read(&pdev->num_tx_outstanding);
9403 }
9404 
9405 /**
9406  * dp_get_peer_mac_from_peer_id() - get peer mac
9407  * @pdev_handle: Datapath PDEV handle
9408  * @peer_id: Peer ID
9409  * @peer_mac: MAC addr of PEER
9410  *
9411  * Return: QDF_STATUS
9412  */
9413 static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc,
9414 					       uint32_t peer_id,
9415 					       uint8_t *peer_mac)
9416 {
9417 	struct dp_peer *peer;
9418 
9419 	if (soc && peer_mac) {
9420 		peer = dp_peer_get_ref_by_id((struct dp_soc *)soc,
9421 					     (uint16_t)peer_id,
9422 					     DP_MOD_ID_CDP);
9423 		if (peer) {
9424 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
9425 				     QDF_MAC_ADDR_SIZE);
9426 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9427 			return QDF_STATUS_SUCCESS;
9428 		}
9429 	}
9430 
9431 	return QDF_STATUS_E_FAILURE;
9432 }
9433 
9434 #ifdef MESH_MODE_SUPPORT
9435 static
9436 void dp_vdev_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
9437 {
9438 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
9439 
9440 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
9441 	vdev->mesh_vdev = val;
9442 	if (val)
9443 		vdev->skip_sw_tid_classification |=
9444 			DP_TX_MESH_ENABLED;
9445 	else
9446 		vdev->skip_sw_tid_classification &=
9447 			~DP_TX_MESH_ENABLED;
9448 }
9449 
9450 /*
9451  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
9452  * @vdev_hdl: virtual device object
9453  * @val: value to be set
9454  *
9455  * Return: void
9456  */
9457 static
9458 void dp_vdev_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
9459 {
9460 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
9461 
9462 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
9463 	vdev->mesh_rx_filter = val;
9464 }
9465 #endif
9466 
9467 /*
9468  * dp_vdev_set_hlos_tid_override() - to set hlos tid override
9469  * @vdev_hdl: virtual device object
9470  * @val: value to be set
9471  *
9472  * Return: void
9473  */
9474 static
9475 void dp_vdev_set_hlos_tid_override(struct dp_vdev *vdev, uint32_t val)
9476 {
9477 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
9478 	if (val)
9479 		vdev->skip_sw_tid_classification |=
9480 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
9481 	else
9482 		vdev->skip_sw_tid_classification &=
9483 			~DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
9484 }
9485 
9486 /*
9487  * dp_vdev_get_hlos_tid_override() - to get hlos tid override flag
9488  * @vdev_hdl: virtual device object
9489  * @val: value to be set
9490  *
9491  * Return: 1 if this flag is set
9492  */
9493 static
9494 uint8_t dp_vdev_get_hlos_tid_override(struct cdp_vdev *vdev_hdl)
9495 {
9496 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
9497 
9498 	return !!(vdev->skip_sw_tid_classification &
9499 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED);
9500 }
9501 
9502 #ifdef VDEV_PEER_PROTOCOL_COUNT
9503 static void dp_enable_vdev_peer_protocol_count(struct cdp_soc_t *soc_hdl,
9504 					       int8_t vdev_id,
9505 					       bool enable)
9506 {
9507 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9508 	struct dp_vdev *vdev;
9509 
9510 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9511 	if (!vdev)
9512 		return;
9513 
9514 	dp_info("enable %d vdev_id %d", enable, vdev_id);
9515 	vdev->peer_protocol_count_track = enable;
9516 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9517 }
9518 
9519 static void dp_enable_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
9520 						   int8_t vdev_id,
9521 						   int drop_mask)
9522 {
9523 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9524 	struct dp_vdev *vdev;
9525 
9526 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9527 	if (!vdev)
9528 		return;
9529 
9530 	dp_info("drop_mask %d vdev_id %d", drop_mask, vdev_id);
9531 	vdev->peer_protocol_count_dropmask = drop_mask;
9532 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9533 }
9534 
9535 static int dp_is_vdev_peer_protocol_count_enabled(struct cdp_soc_t *soc_hdl,
9536 						  int8_t vdev_id)
9537 {
9538 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9539 	struct dp_vdev *vdev;
9540 	int peer_protocol_count_track;
9541 
9542 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9543 	if (!vdev)
9544 		return 0;
9545 
9546 	dp_info("enable %d vdev_id %d", vdev->peer_protocol_count_track,
9547 		vdev_id);
9548 	peer_protocol_count_track =
9549 		vdev->peer_protocol_count_track;
9550 
9551 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9552 	return peer_protocol_count_track;
9553 }
9554 
9555 static int dp_get_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
9556 					       int8_t vdev_id)
9557 {
9558 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9559 	struct dp_vdev *vdev;
9560 	int peer_protocol_count_dropmask;
9561 
9562 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
9563 	if (!vdev)
9564 		return 0;
9565 
9566 	dp_info("drop_mask %d vdev_id %d", vdev->peer_protocol_count_dropmask,
9567 		vdev_id);
9568 	peer_protocol_count_dropmask =
9569 		vdev->peer_protocol_count_dropmask;
9570 
9571 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9572 	return peer_protocol_count_dropmask;
9573 }
9574 
9575 #endif
9576 
9577 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data)
9578 {
9579 	uint8_t pdev_count;
9580 
9581 	for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) {
9582 		if (soc->pdev_list[pdev_count] &&
9583 		    soc->pdev_list[pdev_count] == data)
9584 			return true;
9585 	}
9586 	return false;
9587 }
9588 
9589 /**
9590  * dp_rx_bar_stats_cb(): BAR received stats callback
9591  * @soc: SOC handle
9592  * @cb_ctxt: Call back context
9593  * @reo_status: Reo status
9594  *
9595  * return: void
9596  */
9597 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
9598 	union hal_reo_status *reo_status)
9599 {
9600 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
9601 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
9602 
9603 	if (!dp_check_pdev_exists(soc, pdev)) {
9604 		dp_err_rl("pdev doesn't exist");
9605 		return;
9606 	}
9607 
9608 	if (!qdf_atomic_read(&soc->cmn_init_done))
9609 		return;
9610 
9611 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
9612 		DP_PRINT_STATS("REO stats failure %d",
9613 			       queue_status->header.status);
9614 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
9615 		return;
9616 	}
9617 
9618 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
9619 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
9620 
9621 }
9622 
9623 /**
9624  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
9625  * @vdev: DP VDEV handle
9626  *
9627  * return: void
9628  */
9629 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
9630 			     struct cdp_vdev_stats *vdev_stats)
9631 {
9632 
9633 	if (!vdev || !vdev->pdev)
9634 		return;
9635 
9636 
9637 	dp_update_vdev_ingress_stats(vdev);
9638 
9639 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
9640 
9641 	dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats,
9642 			     DP_MOD_ID_GENERIC_STATS);
9643 
9644 	dp_update_vdev_rate_stats(vdev_stats, &vdev->stats);
9645 
9646 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9647 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
9648 			     vdev_stats, vdev->vdev_id,
9649 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
9650 #endif
9651 }
9652 
9653 void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
9654 {
9655 	struct dp_vdev *vdev = NULL;
9656 	struct dp_soc *soc;
9657 	struct cdp_vdev_stats *vdev_stats =
9658 			qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
9659 
9660 	if (!vdev_stats) {
9661 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
9662 			   pdev->soc);
9663 		return;
9664 	}
9665 
9666 	soc = pdev->soc;
9667 
9668 	qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
9669 	qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
9670 	qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
9671 	qdf_mem_zero(&pdev->stats.rx_i, sizeof(pdev->stats.rx_i));
9672 
9673 	if (dp_monitor_is_enable_mcopy_mode(pdev))
9674 		dp_monitor_invalid_peer_update_pdev_stats(soc, pdev);
9675 
9676 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
9677 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
9678 
9679 		dp_aggregate_vdev_stats(vdev, vdev_stats);
9680 		dp_update_pdev_stats(pdev, vdev_stats);
9681 		dp_update_pdev_ingress_stats(pdev, vdev);
9682 	}
9683 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
9684 	qdf_mem_free(vdev_stats);
9685 
9686 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9687 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
9688 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
9689 #endif
9690 }
9691 
9692 /**
9693  * dp_vdev_getstats() - get vdev packet level stats
9694  * @vdev_handle: Datapath VDEV handle
9695  * @stats: cdp network device stats structure
9696  *
9697  * Return: QDF_STATUS
9698  */
9699 static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle,
9700 				   struct cdp_dev_stats *stats)
9701 {
9702 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
9703 	struct dp_pdev *pdev;
9704 	struct dp_soc *soc;
9705 	struct cdp_vdev_stats *vdev_stats;
9706 
9707 	if (!vdev)
9708 		return QDF_STATUS_E_FAILURE;
9709 
9710 	pdev = vdev->pdev;
9711 	if (!pdev)
9712 		return QDF_STATUS_E_FAILURE;
9713 
9714 	soc = pdev->soc;
9715 
9716 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
9717 
9718 	if (!vdev_stats) {
9719 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
9720 			   soc);
9721 		return QDF_STATUS_E_FAILURE;
9722 	}
9723 
9724 	dp_aggregate_vdev_stats(vdev, vdev_stats);
9725 
9726 	stats->tx_packets = vdev_stats->tx.comp_pkt.num;
9727 	stats->tx_bytes = vdev_stats->tx.comp_pkt.bytes;
9728 
9729 	stats->tx_errors = vdev_stats->tx.tx_failed;
9730 	stats->tx_dropped = vdev_stats->tx_i.dropped.dropped_pkt.num +
9731 			    vdev_stats->tx_i.sg.dropped_host.num +
9732 			    vdev_stats->tx_i.mcast_en.dropped_map_error +
9733 			    vdev_stats->tx_i.mcast_en.dropped_self_mac +
9734 			    vdev_stats->tx_i.mcast_en.dropped_send_fail +
9735 			    vdev_stats->tx.nawds_mcast_drop;
9736 
9737 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
9738 		stats->rx_packets = vdev_stats->rx.to_stack.num;
9739 		stats->rx_bytes = vdev_stats->rx.to_stack.bytes;
9740 	} else {
9741 		stats->rx_packets = vdev_stats->rx_i.reo_rcvd_pkt.num +
9742 				    vdev_stats->rx_i.null_q_desc_pkt.num +
9743 				    vdev_stats->rx_i.routed_eapol_pkt.num;
9744 		stats->rx_bytes = vdev_stats->rx_i.reo_rcvd_pkt.bytes +
9745 				  vdev_stats->rx_i.null_q_desc_pkt.bytes +
9746 				  vdev_stats->rx_i.routed_eapol_pkt.bytes;
9747 	}
9748 
9749 	stats->rx_errors = vdev_stats->rx.err.mic_err +
9750 			   vdev_stats->rx.err.decrypt_err +
9751 			   vdev_stats->rx.err.fcserr +
9752 			   vdev_stats->rx.err.pn_err +
9753 			   vdev_stats->rx.err.oor_err +
9754 			   vdev_stats->rx.err.jump_2k_err +
9755 			   vdev_stats->rx.err.rxdma_wifi_parse_err;
9756 
9757 	stats->rx_dropped = vdev_stats->rx.mec_drop.num +
9758 			    vdev_stats->rx.multipass_rx_pkt_drop +
9759 			    vdev_stats->rx.peer_unauth_rx_pkt_drop +
9760 			    vdev_stats->rx.policy_check_drop +
9761 			    vdev_stats->rx.nawds_mcast_drop +
9762 			    vdev_stats->rx.mcast_3addr_drop;
9763 
9764 	qdf_mem_free(vdev_stats);
9765 
9766 	return QDF_STATUS_SUCCESS;
9767 }
9768 
9769 /**
9770  * dp_pdev_getstats() - get pdev packet level stats
9771  * @pdev_handle: Datapath PDEV handle
9772  * @stats: cdp network device stats structure
9773  *
9774  * Return: QDF_STATUS
9775  */
9776 static void dp_pdev_getstats(struct cdp_pdev *pdev_handle,
9777 			     struct cdp_dev_stats *stats)
9778 {
9779 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9780 
9781 	dp_aggregate_pdev_stats(pdev);
9782 
9783 	stats->tx_packets = pdev->stats.tx.comp_pkt.num;
9784 	stats->tx_bytes = pdev->stats.tx.comp_pkt.bytes;
9785 
9786 	stats->tx_errors = pdev->stats.tx.tx_failed;
9787 	stats->tx_dropped = pdev->stats.tx_i.dropped.dropped_pkt.num +
9788 			    pdev->stats.tx_i.sg.dropped_host.num +
9789 			    pdev->stats.tx_i.mcast_en.dropped_map_error +
9790 			    pdev->stats.tx_i.mcast_en.dropped_self_mac +
9791 			    pdev->stats.tx_i.mcast_en.dropped_send_fail +
9792 			    pdev->stats.tx.nawds_mcast_drop +
9793 			    pdev->stats.tso_stats.dropped_host.num;
9794 
9795 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(pdev->soc->wlan_cfg_ctx)) {
9796 		stats->rx_packets = pdev->stats.rx.to_stack.num;
9797 		stats->rx_bytes = pdev->stats.rx.to_stack.bytes;
9798 	} else {
9799 		stats->rx_packets = pdev->stats.rx_i.reo_rcvd_pkt.num +
9800 				    pdev->stats.rx_i.null_q_desc_pkt.num +
9801 				    pdev->stats.rx_i.routed_eapol_pkt.num;
9802 		stats->rx_bytes = pdev->stats.rx_i.reo_rcvd_pkt.bytes +
9803 				  pdev->stats.rx_i.null_q_desc_pkt.bytes +
9804 				  pdev->stats.rx_i.routed_eapol_pkt.bytes;
9805 	}
9806 
9807 	stats->rx_errors = pdev->stats.err.ip_csum_err +
9808 		pdev->stats.err.tcp_udp_csum_err +
9809 		pdev->stats.rx.err.mic_err +
9810 		pdev->stats.rx.err.decrypt_err +
9811 		pdev->stats.rx.err.fcserr +
9812 		pdev->stats.rx.err.pn_err +
9813 		pdev->stats.rx.err.oor_err +
9814 		pdev->stats.rx.err.jump_2k_err +
9815 		pdev->stats.rx.err.rxdma_wifi_parse_err;
9816 	stats->rx_dropped = pdev->stats.dropped.msdu_not_done +
9817 		pdev->stats.dropped.mec +
9818 		pdev->stats.dropped.mesh_filter +
9819 		pdev->stats.dropped.wifi_parse +
9820 		pdev->stats.dropped.mon_rx_drop +
9821 		pdev->stats.dropped.mon_radiotap_update_err +
9822 		pdev->stats.rx.mec_drop.num +
9823 		pdev->stats.rx.multipass_rx_pkt_drop +
9824 		pdev->stats.rx.peer_unauth_rx_pkt_drop +
9825 		pdev->stats.rx.policy_check_drop +
9826 		pdev->stats.rx.nawds_mcast_drop +
9827 		pdev->stats.rx.mcast_3addr_drop;
9828 }
9829 
9830 /**
9831  * dp_get_device_stats() - get interface level packet stats
9832  * @soc: soc handle
9833  * @id : vdev_id or pdev_id based on type
9834  * @stats: cdp network device stats structure
9835  * @type: device type pdev/vdev
9836  *
9837  * Return: QDF_STATUS
9838  */
9839 static QDF_STATUS dp_get_device_stats(struct cdp_soc_t *soc_hdl, uint8_t id,
9840 				      struct cdp_dev_stats *stats,
9841 				      uint8_t type)
9842 {
9843 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9844 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
9845 	struct dp_vdev *vdev;
9846 
9847 	switch (type) {
9848 	case UPDATE_VDEV_STATS:
9849 		vdev = dp_vdev_get_ref_by_id(soc, id, DP_MOD_ID_CDP);
9850 
9851 		if (vdev) {
9852 			status = dp_vdev_getstats((struct cdp_vdev *)vdev,
9853 						  stats);
9854 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9855 		}
9856 		return status;
9857 	case UPDATE_PDEV_STATS:
9858 		{
9859 			struct dp_pdev *pdev =
9860 				dp_get_pdev_from_soc_pdev_id_wifi3(
9861 						(struct dp_soc *)soc,
9862 						 id);
9863 			if (pdev) {
9864 				dp_pdev_getstats((struct cdp_pdev *)pdev,
9865 						 stats);
9866 				return QDF_STATUS_SUCCESS;
9867 			}
9868 		}
9869 		break;
9870 	default:
9871 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9872 			"apstats cannot be updated for this input "
9873 			"type %d", type);
9874 		break;
9875 	}
9876 
9877 	return QDF_STATUS_E_FAILURE;
9878 }
9879 
9880 const
9881 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
9882 {
9883 	switch (ring_type) {
9884 	case REO_DST:
9885 		return "Reo_dst";
9886 	case REO_EXCEPTION:
9887 		return "Reo_exception";
9888 	case REO_CMD:
9889 		return "Reo_cmd";
9890 	case REO_REINJECT:
9891 		return "Reo_reinject";
9892 	case REO_STATUS:
9893 		return "Reo_status";
9894 	case WBM2SW_RELEASE:
9895 		return "wbm2sw_release";
9896 	case TCL_DATA:
9897 		return "tcl_data";
9898 	case TCL_CMD_CREDIT:
9899 		return "tcl_cmd_credit";
9900 	case TCL_STATUS:
9901 		return "tcl_status";
9902 	case SW2WBM_RELEASE:
9903 		return "sw2wbm_release";
9904 	case RXDMA_BUF:
9905 		return "Rxdma_buf";
9906 	case RXDMA_DST:
9907 		return "Rxdma_dst";
9908 	case RXDMA_MONITOR_BUF:
9909 		return "Rxdma_monitor_buf";
9910 	case RXDMA_MONITOR_DESC:
9911 		return "Rxdma_monitor_desc";
9912 	case RXDMA_MONITOR_STATUS:
9913 		return "Rxdma_monitor_status";
9914 	case RXDMA_MONITOR_DST:
9915 		return "Rxdma_monitor_destination";
9916 	case WBM_IDLE_LINK:
9917 		return "WBM_hw_idle_link";
9918 	case PPE2TCL:
9919 		return "PPE2TCL";
9920 	case REO2PPE:
9921 		return "REO2PPE";
9922 	case TX_MONITOR_DST:
9923 		return "tx_monitor_destination";
9924 	case TX_MONITOR_BUF:
9925 		return "tx_monitor_buf";
9926 	default:
9927 		dp_err("Invalid ring type");
9928 		break;
9929 	}
9930 	return "Invalid";
9931 }
9932 
9933 /*
9934  * dp_print_napi_stats(): NAPI stats
9935  * @soc - soc handle
9936  */
9937 void dp_print_napi_stats(struct dp_soc *soc)
9938 {
9939 	hif_print_napi_stats(soc->hif_handle);
9940 }
9941 
9942 /**
9943  * dp_txrx_host_peer_stats_clr): Reinitialize the txrx peer stats
9944  * @soc: Datapath soc
9945  * @peer: Datatpath peer
9946  * @arg: argument to iter function
9947  *
9948  * Return: QDF_STATUS
9949  */
9950 static inline void
9951 dp_txrx_host_peer_stats_clr(struct dp_soc *soc,
9952 			    struct dp_peer *peer,
9953 			    void *arg)
9954 {
9955 	struct dp_txrx_peer *txrx_peer = NULL;
9956 	struct dp_peer *tgt_peer = NULL;
9957 	struct cdp_interface_peer_stats peer_stats_intf;
9958 
9959 	qdf_mem_zero(&peer_stats_intf, sizeof(struct cdp_interface_peer_stats));
9960 
9961 	DP_STATS_CLR(peer);
9962 	/* Clear monitor peer stats */
9963 	dp_monitor_peer_reset_stats(soc, peer);
9964 
9965 	/* Clear MLD peer stats only when link peer is primary */
9966 	if (dp_peer_is_primary_link_peer(peer)) {
9967 		tgt_peer = dp_get_tgt_peer_from_peer(peer);
9968 		if (tgt_peer) {
9969 			DP_STATS_CLR(tgt_peer);
9970 			txrx_peer = tgt_peer->txrx_peer;
9971 			dp_txrx_peer_stats_clr(txrx_peer);
9972 		}
9973 	}
9974 
9975 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9976 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, peer->vdev->pdev->soc,
9977 			     &peer_stats_intf,  peer->peer_id,
9978 			     UPDATE_PEER_STATS, peer->vdev->pdev->pdev_id);
9979 #endif
9980 }
9981 
9982 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING
9983 static inline void dp_srng_clear_ring_usage_wm_stats(struct dp_soc *soc)
9984 {
9985 	int ring;
9986 
9987 	for (ring = 0; ring < soc->num_reo_dest_rings; ring++)
9988 		hal_srng_clear_ring_usage_wm_locked(soc->hal_soc,
9989 					    soc->reo_dest_ring[ring].hal_srng);
9990 }
9991 #else
9992 static inline void dp_srng_clear_ring_usage_wm_stats(struct dp_soc *soc)
9993 {
9994 }
9995 #endif
9996 
9997 /**
9998  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
9999  * @vdev: DP_VDEV handle
10000  * @dp_soc: DP_SOC handle
10001  *
10002  * Return: QDF_STATUS
10003  */
10004 static inline QDF_STATUS
10005 dp_txrx_host_stats_clr(struct dp_vdev *vdev, struct dp_soc *soc)
10006 {
10007 	if (!vdev || !vdev->pdev)
10008 		return QDF_STATUS_E_FAILURE;
10009 
10010 	/*
10011 	 * if NSS offload is enabled, then send message
10012 	 * to NSS FW to clear the stats. Once NSS FW clears the statistics
10013 	 * then clear host statistics.
10014 	 */
10015 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
10016 		if (soc->cdp_soc.ol_ops->nss_stats_clr)
10017 			soc->cdp_soc.ol_ops->nss_stats_clr(soc->ctrl_psoc,
10018 							   vdev->vdev_id);
10019 	}
10020 
10021 	dp_vdev_stats_hw_offload_target_clear(soc, vdev->pdev->pdev_id,
10022 					      (1 << vdev->vdev_id));
10023 
10024 	DP_STATS_CLR(vdev->pdev);
10025 	DP_STATS_CLR(vdev->pdev->soc);
10026 	DP_STATS_CLR(vdev);
10027 
10028 	hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
10029 
10030 	dp_vdev_iterate_peer(vdev, dp_txrx_host_peer_stats_clr, NULL,
10031 			     DP_MOD_ID_GENERIC_STATS);
10032 
10033 	dp_srng_clear_ring_usage_wm_stats(soc);
10034 
10035 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
10036 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
10037 			     &vdev->stats,  vdev->vdev_id,
10038 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
10039 #endif
10040 	return QDF_STATUS_SUCCESS;
10041 }
10042 
10043 /**
10044  * dp_get_peer_calibr_stats()- Get peer calibrated stats
10045  * @peer: Datapath peer
10046  * @peer_stats: buffer for peer stats
10047  *
10048  * Return: none
10049  */
10050 static inline
10051 void dp_get_peer_calibr_stats(struct dp_peer *peer,
10052 			      struct cdp_peer_stats *peer_stats)
10053 {
10054 	struct dp_peer *tgt_peer;
10055 
10056 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
10057 	if (!tgt_peer)
10058 		return;
10059 
10060 	peer_stats->tx.last_per = tgt_peer->stats.tx.last_per;
10061 	peer_stats->tx.tx_bytes_success_last =
10062 				tgt_peer->stats.tx.tx_bytes_success_last;
10063 	peer_stats->tx.tx_data_success_last =
10064 					tgt_peer->stats.tx.tx_data_success_last;
10065 	peer_stats->tx.tx_byte_rate = tgt_peer->stats.tx.tx_byte_rate;
10066 	peer_stats->tx.tx_data_rate = tgt_peer->stats.tx.tx_data_rate;
10067 	peer_stats->tx.tx_data_ucast_last =
10068 					tgt_peer->stats.tx.tx_data_ucast_last;
10069 	peer_stats->tx.tx_data_ucast_rate =
10070 					tgt_peer->stats.tx.tx_data_ucast_rate;
10071 	peer_stats->tx.inactive_time = tgt_peer->stats.tx.inactive_time;
10072 	peer_stats->rx.rx_bytes_success_last =
10073 				tgt_peer->stats.rx.rx_bytes_success_last;
10074 	peer_stats->rx.rx_data_success_last =
10075 				tgt_peer->stats.rx.rx_data_success_last;
10076 	peer_stats->rx.rx_byte_rate = tgt_peer->stats.rx.rx_byte_rate;
10077 	peer_stats->rx.rx_data_rate = tgt_peer->stats.rx.rx_data_rate;
10078 }
10079 
10080 /**
10081  * dp_get_peer_basic_stats()- Get peer basic stats
10082  * @peer: Datapath peer
10083  * @peer_stats: buffer for peer stats
10084  *
10085  * Return: none
10086  */
10087 static inline
10088 void dp_get_peer_basic_stats(struct dp_peer *peer,
10089 			     struct cdp_peer_stats *peer_stats)
10090 {
10091 	struct dp_txrx_peer *txrx_peer;
10092 
10093 	txrx_peer = dp_get_txrx_peer(peer);
10094 	if (!txrx_peer)
10095 		return;
10096 
10097 	peer_stats->tx.comp_pkt.num += txrx_peer->comp_pkt.num;
10098 	peer_stats->tx.comp_pkt.bytes += txrx_peer->comp_pkt.bytes;
10099 	peer_stats->tx.tx_failed += txrx_peer->tx_failed;
10100 	peer_stats->rx.to_stack.num += txrx_peer->to_stack.num;
10101 	peer_stats->rx.to_stack.bytes += txrx_peer->to_stack.bytes;
10102 }
10103 
10104 /**
10105  * dp_get_peer_per_pkt_stats()- Get peer per pkt stats
10106  * @peer: Datapath peer
10107  * @peer_stats: buffer for peer stats
10108  *
10109  * Return: none
10110  */
10111 static inline
10112 void dp_get_peer_per_pkt_stats(struct dp_peer *peer,
10113 			       struct cdp_peer_stats *peer_stats)
10114 {
10115 	struct dp_txrx_peer *txrx_peer;
10116 	struct dp_peer_per_pkt_stats *per_pkt_stats;
10117 
10118 	txrx_peer = dp_get_txrx_peer(peer);
10119 	if (!txrx_peer)
10120 		return;
10121 
10122 	per_pkt_stats = &txrx_peer->stats.per_pkt_stats;
10123 	DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats);
10124 }
10125 
10126 /**
10127  * dp_get_peer_extd_stats()- Get peer extd stats
10128  * @peer: Datapath peer
10129  * @peer_stats: buffer for peer stats
10130  *
10131  * Return: none
10132  */
10133 #ifdef QCA_ENHANCED_STATS_SUPPORT
10134 #ifdef WLAN_FEATURE_11BE_MLO
10135 static inline
10136 void dp_get_peer_extd_stats(struct dp_peer *peer,
10137 			    struct cdp_peer_stats *peer_stats)
10138 {
10139 	struct dp_soc *soc = peer->vdev->pdev->soc;
10140 
10141 	if (IS_MLO_DP_MLD_PEER(peer)) {
10142 		uint8_t i;
10143 		struct dp_peer *link_peer;
10144 		struct dp_soc *link_peer_soc;
10145 		struct dp_mld_link_peers link_peers_info;
10146 
10147 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
10148 						    &link_peers_info,
10149 						    DP_MOD_ID_CDP);
10150 		for (i = 0; i < link_peers_info.num_links; i++) {
10151 			link_peer = link_peers_info.link_peers[i];
10152 			link_peer_soc = link_peer->vdev->pdev->soc;
10153 			dp_monitor_peer_get_stats(link_peer_soc, link_peer,
10154 						  peer_stats,
10155 						  UPDATE_PEER_STATS);
10156 		}
10157 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
10158 	} else {
10159 		dp_monitor_peer_get_stats(soc, peer, peer_stats,
10160 					  UPDATE_PEER_STATS);
10161 	}
10162 }
10163 #else
10164 static inline
10165 void dp_get_peer_extd_stats(struct dp_peer *peer,
10166 			    struct cdp_peer_stats *peer_stats)
10167 {
10168 	struct dp_soc *soc = peer->vdev->pdev->soc;
10169 
10170 	dp_monitor_peer_get_stats(soc, peer, peer_stats, UPDATE_PEER_STATS);
10171 }
10172 #endif
10173 #else
10174 static inline
10175 void dp_get_peer_extd_stats(struct dp_peer *peer,
10176 			    struct cdp_peer_stats *peer_stats)
10177 {
10178 	struct dp_txrx_peer *txrx_peer;
10179 	struct dp_peer_extd_stats *extd_stats;
10180 
10181 	txrx_peer = dp_get_txrx_peer(peer);
10182 	if (qdf_unlikely(!txrx_peer)) {
10183 		dp_err_rl("txrx_peer NULL");
10184 		return;
10185 	}
10186 
10187 	extd_stats = &txrx_peer->stats.extd_stats;
10188 	DP_UPDATE_EXTD_STATS(peer_stats, extd_stats);
10189 }
10190 #endif
10191 
10192 /**
10193  * dp_get_peer_tx_per()- Get peer packet error ratio
10194  * @peer_stats: buffer for peer stats
10195  *
10196  * Return: none
10197  */
10198 static inline
10199 void dp_get_peer_tx_per(struct cdp_peer_stats *peer_stats)
10200 {
10201 	if (peer_stats->tx.tx_success.num + peer_stats->tx.retries > 0)
10202 		peer_stats->tx.per = (peer_stats->tx.retries * 100) /
10203 				  (peer_stats->tx.tx_success.num +
10204 				   peer_stats->tx.retries);
10205 	else
10206 		peer_stats->tx.per = 0;
10207 }
10208 
10209 /**
10210  * dp_get_peer_stats()- Get peer stats
10211  * @peer: Datapath peer
10212  * @peer_stats: buffer for peer stats
10213  *
10214  * Return: none
10215  */
10216 
10217 void dp_get_peer_stats(struct dp_peer *peer, struct cdp_peer_stats *peer_stats)
10218 {
10219 	dp_get_peer_calibr_stats(peer, peer_stats);
10220 
10221 	dp_get_peer_basic_stats(peer, peer_stats);
10222 
10223 	dp_get_peer_per_pkt_stats(peer, peer_stats);
10224 
10225 	dp_get_peer_extd_stats(peer, peer_stats);
10226 
10227 	dp_get_peer_tx_per(peer_stats);
10228 }
10229 
10230 /*
10231  * dp_get_host_peer_stats()- function to print peer stats
10232  * @soc: dp_soc handle
10233  * @mac_addr: mac address of the peer
10234  *
10235  * Return: QDF_STATUS
10236  */
10237 static QDF_STATUS
10238 dp_get_host_peer_stats(struct cdp_soc_t *soc, uint8_t *mac_addr)
10239 {
10240 	struct dp_peer *peer = NULL;
10241 	struct cdp_peer_stats *peer_stats = NULL;
10242 	struct cdp_peer_info peer_info = { 0 };
10243 
10244 	if (!mac_addr) {
10245 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
10246 			  "%s: NULL peer mac addr\n", __func__);
10247 		return QDF_STATUS_E_FAILURE;
10248 	}
10249 
10250 	DP_PEER_INFO_PARAMS_INIT(&peer_info, DP_VDEV_ALL, mac_addr, false,
10251 				 CDP_WILD_PEER_TYPE);
10252 
10253 	peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info,
10254 					 DP_MOD_ID_CDP);
10255 	if (!peer) {
10256 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
10257 			  "%s: Invalid peer\n", __func__);
10258 		return QDF_STATUS_E_FAILURE;
10259 	}
10260 
10261 	peer_stats = qdf_mem_malloc(sizeof(struct cdp_peer_stats));
10262 	if (!peer_stats) {
10263 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
10264 			  "%s: Memory allocation failed for cdp_peer_stats\n",
10265 			  __func__);
10266 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10267 		return QDF_STATUS_E_NOMEM;
10268 	}
10269 
10270 	qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats));
10271 
10272 	dp_get_peer_stats(peer, peer_stats);
10273 	dp_print_peer_stats(peer, peer_stats);
10274 
10275 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
10276 
10277 	qdf_mem_free(peer_stats);
10278 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10279 
10280 	return QDF_STATUS_SUCCESS;
10281 }
10282 
10283 /* *
10284  * dp_dump_wbm_idle_hptp() -dump wbm idle ring, hw hp tp info.
10285  * @soc: dp soc.
10286  * @pdev: dp pdev.
10287  *
10288  * Return: None.
10289  */
10290 static void
10291 dp_dump_wbm_idle_hptp(struct dp_soc *soc, struct dp_pdev *pdev)
10292 {
10293 	uint32_t hw_head;
10294 	uint32_t hw_tail;
10295 	struct dp_srng *srng;
10296 
10297 	if (!soc) {
10298 		dp_err("soc is NULL");
10299 		return;
10300 	}
10301 
10302 	if (!pdev) {
10303 		dp_err("pdev is NULL");
10304 		return;
10305 	}
10306 
10307 	srng = &pdev->soc->wbm_idle_link_ring;
10308 	if (!srng) {
10309 		dp_err("wbm_idle_link_ring srng is NULL");
10310 		return;
10311 	}
10312 
10313 	hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &hw_head,
10314 			&hw_tail, WBM_IDLE_LINK);
10315 
10316 	dp_debug("WBM_IDLE_LINK: HW hp: %d, HW tp: %d",
10317 			hw_head, hw_tail);
10318 }
10319 
10320 
10321 /**
10322  * dp_txrx_stats_help() - Helper function for Txrx_Stats
10323  *
10324  * Return: None
10325  */
10326 static void dp_txrx_stats_help(void)
10327 {
10328 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
10329 	dp_info("stats_option:");
10330 	dp_info("  1 -- HTT Tx Statistics");
10331 	dp_info("  2 -- HTT Rx Statistics");
10332 	dp_info("  3 -- HTT Tx HW Queue Statistics");
10333 	dp_info("  4 -- HTT Tx HW Sched Statistics");
10334 	dp_info("  5 -- HTT Error Statistics");
10335 	dp_info("  6 -- HTT TQM Statistics");
10336 	dp_info("  7 -- HTT TQM CMDQ Statistics");
10337 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
10338 	dp_info("  9 -- HTT Tx Rate Statistics");
10339 	dp_info(" 10 -- HTT Rx Rate Statistics");
10340 	dp_info(" 11 -- HTT Peer Statistics");
10341 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
10342 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
10343 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
10344 	dp_info(" 15 -- HTT SRNG Statistics");
10345 	dp_info(" 16 -- HTT SFM Info Statistics");
10346 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
10347 	dp_info(" 18 -- HTT Peer List Details");
10348 	dp_info(" 20 -- Clear Host Statistics");
10349 	dp_info(" 21 -- Host Rx Rate Statistics");
10350 	dp_info(" 22 -- Host Tx Rate Statistics");
10351 	dp_info(" 23 -- Host Tx Statistics");
10352 	dp_info(" 24 -- Host Rx Statistics");
10353 	dp_info(" 25 -- Host AST Statistics");
10354 	dp_info(" 26 -- Host SRNG PTR Statistics");
10355 	dp_info(" 27 -- Host Mon Statistics");
10356 	dp_info(" 28 -- Host REO Queue Statistics");
10357 	dp_info(" 29 -- Host Soc cfg param Statistics");
10358 	dp_info(" 30 -- Host pdev cfg param Statistics");
10359 	dp_info(" 31 -- Host NAPI stats");
10360 	dp_info(" 32 -- Host Interrupt stats");
10361 	dp_info(" 33 -- Host FISA stats");
10362 	dp_info(" 34 -- Host Register Work stats");
10363 	dp_info(" 35 -- HW REO Queue stats");
10364 	dp_info(" 36 -- Host WBM IDLE link desc ring HP/TP");
10365 	dp_info(" 37 -- Host SRNG usage watermark stats");
10366 }
10367 
10368 #ifdef DP_UMAC_HW_RESET_SUPPORT
10369 /**
10370  * dp_umac_rst_skel_enable_update(): Update skel dbg flag for umac reset
10371  * @soc: dp soc handle
10372  * @en: ebable/disable
10373  *
10374  * Return: void
10375  */
10376 static void dp_umac_rst_skel_enable_update(struct dp_soc *soc, bool en)
10377 {
10378 	soc->umac_reset_ctx.skel_enable = en;
10379 	dp_cdp_debug("UMAC HW reset debug skeleton code enabled :%u",
10380 		     soc->umac_reset_ctx.skel_enable);
10381 }
10382 
10383 /**
10384  * dp_umac_rst_skel_enable_get(): Get skel dbg flag for umac reset
10385  * @soc: dp soc handle
10386  *
10387  * Return: enable/disable flag
10388  */
10389 static bool dp_umac_rst_skel_enable_get(struct dp_soc *soc)
10390 {
10391 	return soc->umac_reset_ctx.skel_enable;
10392 }
10393 #else
10394 static void dp_umac_rst_skel_enable_update(struct dp_soc *soc, bool en)
10395 {
10396 }
10397 
10398 static bool dp_umac_rst_skel_enable_get(struct dp_soc *soc)
10399 {
10400 	return false;
10401 }
10402 #endif
10403 
10404 /**
10405  * dp_print_host_stats()- Function to print the stats aggregated at host
10406  * @vdev_handle: DP_VDEV handle
10407  * @req: host stats type
10408  * @soc: dp soc handler
10409  *
10410  * Return: 0 on success, print error message in case of failure
10411  */
10412 static int
10413 dp_print_host_stats(struct dp_vdev *vdev,
10414 		    struct cdp_txrx_stats_req *req,
10415 		    struct dp_soc *soc)
10416 {
10417 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
10418 	enum cdp_host_txrx_stats type =
10419 			dp_stats_mapping_table[req->stats][STATS_HOST];
10420 
10421 	dp_aggregate_pdev_stats(pdev);
10422 
10423 	switch (type) {
10424 	case TXRX_CLEAR_STATS:
10425 		dp_txrx_host_stats_clr(vdev, soc);
10426 		break;
10427 	case TXRX_RX_RATE_STATS:
10428 		dp_print_rx_rates(vdev);
10429 		break;
10430 	case TXRX_TX_RATE_STATS:
10431 		dp_print_tx_rates(vdev);
10432 		break;
10433 	case TXRX_TX_HOST_STATS:
10434 		dp_print_pdev_tx_stats(pdev);
10435 		dp_print_soc_tx_stats(pdev->soc);
10436 		break;
10437 	case TXRX_RX_HOST_STATS:
10438 		dp_print_pdev_rx_stats(pdev);
10439 		dp_print_soc_rx_stats(pdev->soc);
10440 		break;
10441 	case TXRX_AST_STATS:
10442 		dp_print_ast_stats(pdev->soc);
10443 		dp_print_mec_stats(pdev->soc);
10444 		dp_print_peer_table(vdev);
10445 		break;
10446 	case TXRX_SRNG_PTR_STATS:
10447 		dp_print_ring_stats(pdev);
10448 		break;
10449 	case TXRX_RX_MON_STATS:
10450 		dp_monitor_print_pdev_rx_mon_stats(pdev);
10451 		break;
10452 	case TXRX_REO_QUEUE_STATS:
10453 		dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc,
10454 				       req->peer_addr);
10455 		break;
10456 	case TXRX_SOC_CFG_PARAMS:
10457 		dp_print_soc_cfg_params(pdev->soc);
10458 		break;
10459 	case TXRX_PDEV_CFG_PARAMS:
10460 		dp_print_pdev_cfg_params(pdev);
10461 		break;
10462 	case TXRX_NAPI_STATS:
10463 		dp_print_napi_stats(pdev->soc);
10464 		break;
10465 	case TXRX_SOC_INTERRUPT_STATS:
10466 		dp_print_soc_interrupt_stats(pdev->soc);
10467 		break;
10468 	case TXRX_SOC_FSE_STATS:
10469 		dp_rx_dump_fisa_table(pdev->soc);
10470 		break;
10471 	case TXRX_HAL_REG_WRITE_STATS:
10472 		hal_dump_reg_write_stats(pdev->soc->hal_soc);
10473 		hal_dump_reg_write_srng_stats(pdev->soc->hal_soc);
10474 		break;
10475 	case TXRX_SOC_REO_HW_DESC_DUMP:
10476 		dp_get_rx_reo_queue_info((struct cdp_soc_t *)pdev->soc,
10477 					 vdev->vdev_id);
10478 		break;
10479 	case TXRX_SOC_WBM_IDLE_HPTP_DUMP:
10480 		dp_dump_wbm_idle_hptp(pdev->soc, pdev);
10481 		break;
10482 	case TXRX_SRNG_USAGE_WM_STATS:
10483 		/* Dump usage watermark stats for all SRNGs */
10484 		dp_dump_srng_high_wm_stats(soc, 0xFF);
10485 		break;
10486 	default:
10487 		dp_info("Wrong Input For TxRx Host Stats");
10488 		dp_txrx_stats_help();
10489 		break;
10490 	}
10491 	return 0;
10492 }
10493 
10494 /*
10495  * dp_pdev_tid_stats_ingress_inc
10496  * @pdev: pdev handle
10497  * @val: increase in value
10498  *
10499  * Return: void
10500  */
10501 static void
10502 dp_pdev_tid_stats_ingress_inc(struct dp_pdev *pdev, uint32_t val)
10503 {
10504 	pdev->stats.tid_stats.ingress_stack += val;
10505 }
10506 
10507 /*
10508  * dp_pdev_tid_stats_osif_drop
10509  * @pdev: pdev handle
10510  * @val: increase in value
10511  *
10512  * Return: void
10513  */
10514 static void
10515 dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val)
10516 {
10517 	pdev->stats.tid_stats.osif_drop += val;
10518 }
10519 
10520 /*
10521  * dp_get_fw_peer_stats()- function to print peer stats
10522  * @soc: soc handle
10523  * @pdev_id : id of the pdev handle
10524  * @mac_addr: mac address of the peer
10525  * @cap: Type of htt stats requested
10526  * @is_wait: if set, wait on completion from firmware response
10527  *
10528  * Currently Supporting only MAC ID based requests Only
10529  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
10530  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
10531  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
10532  *
10533  * Return: QDF_STATUS
10534  */
10535 static QDF_STATUS
10536 dp_get_fw_peer_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
10537 		     uint8_t *mac_addr,
10538 		     uint32_t cap, uint32_t is_wait)
10539 {
10540 	int i;
10541 	uint32_t config_param0 = 0;
10542 	uint32_t config_param1 = 0;
10543 	uint32_t config_param2 = 0;
10544 	uint32_t config_param3 = 0;
10545 	struct dp_pdev *pdev =
10546 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10547 						   pdev_id);
10548 
10549 	if (!pdev)
10550 		return QDF_STATUS_E_FAILURE;
10551 
10552 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
10553 	config_param0 |= (1 << (cap + 1));
10554 
10555 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
10556 		config_param1 |= (1 << i);
10557 	}
10558 
10559 	config_param2 |= (mac_addr[0] & 0x000000ff);
10560 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
10561 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
10562 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
10563 
10564 	config_param3 |= (mac_addr[4] & 0x000000ff);
10565 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
10566 
10567 	if (is_wait) {
10568 		qdf_event_reset(&pdev->fw_peer_stats_event);
10569 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
10570 					  config_param0, config_param1,
10571 					  config_param2, config_param3,
10572 					  0, DBG_STATS_COOKIE_DP_STATS, 0);
10573 		qdf_wait_single_event(&pdev->fw_peer_stats_event,
10574 				      DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
10575 	} else {
10576 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
10577 					  config_param0, config_param1,
10578 					  config_param2, config_param3,
10579 					  0, DBG_STATS_COOKIE_DEFAULT, 0);
10580 	}
10581 
10582 	return QDF_STATUS_SUCCESS;
10583 
10584 }
10585 
10586 /* This struct definition will be removed from here
10587  * once it get added in FW headers*/
10588 struct httstats_cmd_req {
10589     uint32_t    config_param0;
10590     uint32_t    config_param1;
10591     uint32_t    config_param2;
10592     uint32_t    config_param3;
10593     int cookie;
10594     u_int8_t    stats_id;
10595 };
10596 
10597 /*
10598  * dp_get_htt_stats: function to process the httstas request
10599  * @soc: DP soc handle
10600  * @pdev_id: id of pdev handle
10601  * @data: pointer to request data
10602  * @data_len: length for request data
10603  *
10604  * return: QDF_STATUS
10605  */
10606 static QDF_STATUS
10607 dp_get_htt_stats(struct cdp_soc_t *soc, uint8_t pdev_id, void *data,
10608 		 uint32_t data_len)
10609 {
10610 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
10611 	struct dp_pdev *pdev =
10612 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10613 						   pdev_id);
10614 
10615 	if (!pdev)
10616 		return QDF_STATUS_E_FAILURE;
10617 
10618 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
10619 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
10620 				req->config_param0, req->config_param1,
10621 				req->config_param2, req->config_param3,
10622 				req->cookie, DBG_STATS_COOKIE_DEFAULT, 0);
10623 
10624 	return QDF_STATUS_SUCCESS;
10625 }
10626 
10627 /**
10628  * dp_set_pdev_tidmap_prty_wifi3(): update tidmap priority in pdev
10629  * @pdev: DP_PDEV handle
10630  * @prio: tidmap priority value passed by the user
10631  *
10632  * Return: QDF_STATUS_SUCCESS on success
10633  */
10634 static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct dp_pdev *pdev,
10635 						uint8_t prio)
10636 {
10637 	struct dp_soc *soc = pdev->soc;
10638 
10639 	soc->tidmap_prty = prio;
10640 
10641 	hal_tx_set_tidmap_prty(soc->hal_soc, prio);
10642 	return QDF_STATUS_SUCCESS;
10643 }
10644 
10645 /*
10646  * dp_get_peer_param: function to get parameters in peer
10647  * @cdp_soc: DP soc handle
10648  * @vdev_id: id of vdev handle
10649  * @peer_mac: peer mac address
10650  * @param: parameter type to be set
10651  * @val : address of buffer
10652  *
10653  * Return: val
10654  */
10655 static QDF_STATUS dp_get_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
10656 				    uint8_t *peer_mac,
10657 				    enum cdp_peer_param_type param,
10658 				    cdp_config_param_type *val)
10659 {
10660 	return QDF_STATUS_SUCCESS;
10661 }
10662 
10663 /*
10664  * dp_set_peer_param: function to set parameters in peer
10665  * @cdp_soc: DP soc handle
10666  * @vdev_id: id of vdev handle
10667  * @peer_mac: peer mac address
10668  * @param: parameter type to be set
10669  * @val: value of parameter to be set
10670  *
10671  * Return: 0 for success. nonzero for failure.
10672  */
10673 static QDF_STATUS dp_set_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
10674 				    uint8_t *peer_mac,
10675 				    enum cdp_peer_param_type param,
10676 				    cdp_config_param_type val)
10677 {
10678 	struct dp_peer *peer =
10679 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
10680 						       peer_mac, 0, vdev_id,
10681 						       DP_MOD_ID_CDP);
10682 	struct dp_txrx_peer *txrx_peer;
10683 
10684 	if (!peer)
10685 		return QDF_STATUS_E_FAILURE;
10686 
10687 	txrx_peer = peer->txrx_peer;
10688 	if (!txrx_peer) {
10689 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10690 		return QDF_STATUS_E_FAILURE;
10691 	}
10692 
10693 	switch (param) {
10694 	case CDP_CONFIG_NAWDS:
10695 		txrx_peer->nawds_enabled = val.cdp_peer_param_nawds;
10696 		break;
10697 	case CDP_CONFIG_ISOLATION:
10698 		dp_set_peer_isolation(txrx_peer, val.cdp_peer_param_isolation);
10699 		break;
10700 	case CDP_CONFIG_IN_TWT:
10701 		txrx_peer->in_twt = !!(val.cdp_peer_param_in_twt);
10702 		break;
10703 	default:
10704 		break;
10705 	}
10706 
10707 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10708 
10709 	return QDF_STATUS_SUCCESS;
10710 }
10711 
10712 /*
10713  * dp_get_pdev_param: function to get parameters from pdev
10714  * @cdp_soc: DP soc handle
10715  * @pdev_id: id of pdev handle
10716  * @param: parameter type to be get
10717  * @value : buffer for value
10718  *
10719  * Return: status
10720  */
10721 static QDF_STATUS dp_get_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
10722 				    enum cdp_pdev_param_type param,
10723 				    cdp_config_param_type *val)
10724 {
10725 	struct cdp_pdev *pdev = (struct cdp_pdev *)
10726 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10727 						   pdev_id);
10728 	if (!pdev)
10729 		return QDF_STATUS_E_FAILURE;
10730 
10731 	switch (param) {
10732 	case CDP_CONFIG_VOW:
10733 		val->cdp_pdev_param_cfg_vow =
10734 				((struct dp_pdev *)pdev)->delay_stats_flag;
10735 		break;
10736 	case CDP_TX_PENDING:
10737 		val->cdp_pdev_param_tx_pending = dp_get_tx_pending(pdev);
10738 		break;
10739 	case CDP_FILTER_MCAST_DATA:
10740 		val->cdp_pdev_param_fltr_mcast =
10741 				dp_monitor_pdev_get_filter_mcast_data(pdev);
10742 		break;
10743 	case CDP_FILTER_NO_DATA:
10744 		val->cdp_pdev_param_fltr_none =
10745 				dp_monitor_pdev_get_filter_non_data(pdev);
10746 		break;
10747 	case CDP_FILTER_UCAST_DATA:
10748 		val->cdp_pdev_param_fltr_ucast =
10749 				dp_monitor_pdev_get_filter_ucast_data(pdev);
10750 		break;
10751 	case CDP_MONITOR_CHANNEL:
10752 		val->cdp_pdev_param_monitor_chan =
10753 			dp_monitor_get_chan_num((struct dp_pdev *)pdev);
10754 		break;
10755 	case CDP_MONITOR_FREQUENCY:
10756 		val->cdp_pdev_param_mon_freq =
10757 			dp_monitor_get_chan_freq((struct dp_pdev *)pdev);
10758 		break;
10759 	default:
10760 		return QDF_STATUS_E_FAILURE;
10761 	}
10762 
10763 	return QDF_STATUS_SUCCESS;
10764 }
10765 
10766 /*
10767  * dp_set_pdev_param: function to set parameters in pdev
10768  * @cdp_soc: DP soc handle
10769  * @pdev_id: id of pdev handle
10770  * @param: parameter type to be set
10771  * @val: value of parameter to be set
10772  *
10773  * Return: 0 for success. nonzero for failure.
10774  */
10775 static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
10776 				    enum cdp_pdev_param_type param,
10777 				    cdp_config_param_type val)
10778 {
10779 	int target_type;
10780 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
10781 	struct dp_pdev *pdev =
10782 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10783 						   pdev_id);
10784 	enum reg_wifi_band chan_band;
10785 
10786 	if (!pdev)
10787 		return QDF_STATUS_E_FAILURE;
10788 
10789 	target_type = hal_get_target_type(soc->hal_soc);
10790 	switch (target_type) {
10791 	case TARGET_TYPE_QCA6750:
10792 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC0_LMAC_ID;
10793 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
10794 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
10795 		break;
10796 	case TARGET_TYPE_KIWI:
10797 	case TARGET_TYPE_MANGO:
10798 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC0_LMAC_ID;
10799 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
10800 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
10801 		break;
10802 	default:
10803 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC1_LMAC_ID;
10804 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
10805 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
10806 		break;
10807 	}
10808 
10809 	switch (param) {
10810 	case CDP_CONFIG_TX_CAPTURE:
10811 		return dp_monitor_config_debug_sniffer(pdev,
10812 						val.cdp_pdev_param_tx_capture);
10813 	case CDP_CONFIG_DEBUG_SNIFFER:
10814 		return dp_monitor_config_debug_sniffer(pdev,
10815 						val.cdp_pdev_param_dbg_snf);
10816 	case CDP_CONFIG_BPR_ENABLE:
10817 		return dp_monitor_set_bpr_enable(pdev,
10818 						 val.cdp_pdev_param_bpr_enable);
10819 	case CDP_CONFIG_PRIMARY_RADIO:
10820 		pdev->is_primary = val.cdp_pdev_param_primary_radio;
10821 		break;
10822 	case CDP_CONFIG_CAPTURE_LATENCY:
10823 		pdev->latency_capture_enable = val.cdp_pdev_param_cptr_latcy;
10824 		break;
10825 	case CDP_INGRESS_STATS:
10826 		dp_pdev_tid_stats_ingress_inc(pdev,
10827 					      val.cdp_pdev_param_ingrs_stats);
10828 		break;
10829 	case CDP_OSIF_DROP:
10830 		dp_pdev_tid_stats_osif_drop(pdev,
10831 					    val.cdp_pdev_param_osif_drop);
10832 		break;
10833 	case CDP_CONFIG_ENH_RX_CAPTURE:
10834 		return dp_monitor_config_enh_rx_capture(pdev,
10835 						val.cdp_pdev_param_en_rx_cap);
10836 	case CDP_CONFIG_ENH_TX_CAPTURE:
10837 		return dp_monitor_config_enh_tx_capture(pdev,
10838 						val.cdp_pdev_param_en_tx_cap);
10839 	case CDP_CONFIG_HMMC_TID_OVERRIDE:
10840 		pdev->hmmc_tid_override_en = val.cdp_pdev_param_hmmc_tid_ovrd;
10841 		break;
10842 	case CDP_CONFIG_HMMC_TID_VALUE:
10843 		pdev->hmmc_tid = val.cdp_pdev_param_hmmc_tid;
10844 		break;
10845 	case CDP_CHAN_NOISE_FLOOR:
10846 		pdev->chan_noise_floor = val.cdp_pdev_param_chn_noise_flr;
10847 		break;
10848 	case CDP_TIDMAP_PRTY:
10849 		dp_set_pdev_tidmap_prty_wifi3(pdev,
10850 					      val.cdp_pdev_param_tidmap_prty);
10851 		break;
10852 	case CDP_FILTER_NEIGH_PEERS:
10853 		dp_monitor_set_filter_neigh_peers(pdev,
10854 					val.cdp_pdev_param_fltr_neigh_peers);
10855 		break;
10856 	case CDP_MONITOR_CHANNEL:
10857 		dp_monitor_set_chan_num(pdev, val.cdp_pdev_param_monitor_chan);
10858 		break;
10859 	case CDP_MONITOR_FREQUENCY:
10860 		chan_band = wlan_reg_freq_to_band(val.cdp_pdev_param_mon_freq);
10861 		dp_monitor_set_chan_freq(pdev, val.cdp_pdev_param_mon_freq);
10862 		dp_monitor_set_chan_band(pdev, chan_band);
10863 		break;
10864 	case CDP_CONFIG_BSS_COLOR:
10865 		dp_monitor_set_bsscolor(pdev, val.cdp_pdev_param_bss_color);
10866 		break;
10867 	case CDP_SET_ATF_STATS_ENABLE:
10868 		dp_monitor_set_atf_stats_enable(pdev,
10869 					val.cdp_pdev_param_atf_stats_enable);
10870 		break;
10871 	case CDP_CONFIG_SPECIAL_VAP:
10872 		dp_monitor_pdev_config_scan_spcl_vap(pdev,
10873 					val.cdp_pdev_param_config_special_vap);
10874 		dp_monitor_vdev_set_monitor_mode_buf_rings(pdev);
10875 		break;
10876 	case CDP_RESET_SCAN_SPCL_VAP_STATS_ENABLE:
10877 		dp_monitor_pdev_reset_scan_spcl_vap_stats_enable(pdev,
10878 				val.cdp_pdev_param_reset_scan_spcl_vap_stats_enable);
10879 		break;
10880 	case CDP_CONFIG_ENHANCED_STATS_ENABLE:
10881 		pdev->enhanced_stats_en = val.cdp_pdev_param_enhanced_stats_enable;
10882 		break;
10883 	case CDP_ISOLATION:
10884 		pdev->isolation = val.cdp_pdev_param_isolation;
10885 		break;
10886 	case CDP_CONFIG_UNDECODED_METADATA_CAPTURE_ENABLE:
10887 		return dp_monitor_config_undecoded_metadata_capture(pdev,
10888 				val.cdp_pdev_param_undecoded_metadata_enable);
10889 		break;
10890 	default:
10891 		return QDF_STATUS_E_INVAL;
10892 	}
10893 	return QDF_STATUS_SUCCESS;
10894 }
10895 
10896 #ifdef QCA_UNDECODED_METADATA_SUPPORT
10897 static
10898 QDF_STATUS dp_set_pdev_phyrx_error_mask(struct cdp_soc_t *cdp_soc,
10899 					uint8_t pdev_id, uint32_t mask,
10900 					uint32_t mask_cont)
10901 {
10902 	struct dp_pdev *pdev =
10903 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10904 						   pdev_id);
10905 
10906 	if (!pdev)
10907 		return QDF_STATUS_E_FAILURE;
10908 
10909 	return dp_monitor_config_undecoded_metadata_phyrx_error_mask(pdev,
10910 				mask, mask_cont);
10911 }
10912 
10913 static
10914 QDF_STATUS dp_get_pdev_phyrx_error_mask(struct cdp_soc_t *cdp_soc,
10915 					uint8_t pdev_id, uint32_t *mask,
10916 					uint32_t *mask_cont)
10917 {
10918 	struct dp_pdev *pdev =
10919 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10920 						   pdev_id);
10921 
10922 	if (!pdev)
10923 		return QDF_STATUS_E_FAILURE;
10924 
10925 	return dp_monitor_get_undecoded_metadata_phyrx_error_mask(pdev,
10926 				mask, mask_cont);
10927 }
10928 #endif
10929 
10930 #ifdef QCA_PEER_EXT_STATS
10931 static void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
10932 					  qdf_nbuf_t nbuf)
10933 {
10934 	struct dp_peer *peer = NULL;
10935 	uint16_t peer_id, ring_id;
10936 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
10937 	struct dp_peer_delay_stats *delay_stats = NULL;
10938 
10939 	peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
10940 	if (peer_id > soc->max_peer_id)
10941 		return;
10942 
10943 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
10944 	if (qdf_unlikely(!peer))
10945 		return;
10946 
10947 	if (qdf_unlikely(!peer->txrx_peer)) {
10948 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10949 		return;
10950 	}
10951 
10952 	if (qdf_likely(peer->txrx_peer->delay_stats)) {
10953 		delay_stats = peer->txrx_peer->delay_stats;
10954 		ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
10955 		dp_rx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
10956 					nbuf);
10957 	}
10958 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10959 }
10960 #else
10961 static inline void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
10962 						 qdf_nbuf_t nbuf)
10963 {
10964 }
10965 #endif
10966 
10967 /*
10968  * dp_calculate_delay_stats: function to get rx delay stats
10969  * @cdp_soc: DP soc handle
10970  * @vdev_id: id of DP vdev handle
10971  * @nbuf: skb
10972  *
10973  * Return: QDF_STATUS
10974  */
10975 static QDF_STATUS
10976 dp_calculate_delay_stats(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
10977 			 qdf_nbuf_t nbuf)
10978 {
10979 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
10980 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10981 						     DP_MOD_ID_CDP);
10982 
10983 	if (!vdev)
10984 		return QDF_STATUS_SUCCESS;
10985 
10986 	if (vdev->pdev->delay_stats_flag)
10987 		dp_rx_compute_delay(vdev, nbuf);
10988 	else
10989 		dp_rx_update_peer_delay_stats(soc, nbuf);
10990 
10991 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10992 	return QDF_STATUS_SUCCESS;
10993 }
10994 
10995 /**
10996  * dp_get_vdev_param() - function to get parameters from vdev
10997  * @cdp_soc: DP soc handle
10998  * @vdev_id: id of DP vdev handle
10999  * @param: parameter type to get value
11000  * @val: buffer address
11001  *
11002  * Return: status
11003  */
11004 static QDF_STATUS dp_get_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
11005 				    enum cdp_vdev_param_type param,
11006 				    cdp_config_param_type *val)
11007 {
11008 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
11009 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11010 						     DP_MOD_ID_CDP);
11011 
11012 	if (!vdev)
11013 		return QDF_STATUS_E_FAILURE;
11014 
11015 	switch (param) {
11016 	case CDP_ENABLE_WDS:
11017 		val->cdp_vdev_param_wds = vdev->wds_enabled;
11018 		break;
11019 	case CDP_ENABLE_MEC:
11020 		val->cdp_vdev_param_mec = vdev->mec_enabled;
11021 		break;
11022 	case CDP_ENABLE_DA_WAR:
11023 		val->cdp_vdev_param_da_war = vdev->pdev->soc->da_war_enabled;
11024 		break;
11025 	case CDP_ENABLE_IGMP_MCAST_EN:
11026 		val->cdp_vdev_param_igmp_mcast_en = vdev->igmp_mcast_enhanc_en;
11027 		break;
11028 	case CDP_ENABLE_MCAST_EN:
11029 		val->cdp_vdev_param_mcast_en = vdev->mcast_enhancement_en;
11030 		break;
11031 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
11032 		val->cdp_vdev_param_hlos_tid_override =
11033 			    dp_vdev_get_hlos_tid_override((struct cdp_vdev *)vdev);
11034 		break;
11035 	case CDP_ENABLE_PEER_AUTHORIZE:
11036 		val->cdp_vdev_param_peer_authorize =
11037 			    vdev->peer_authorize;
11038 		break;
11039 	case CDP_TX_ENCAP_TYPE:
11040 		val->cdp_vdev_param_tx_encap = vdev->tx_encap_type;
11041 		break;
11042 	case CDP_ENABLE_CIPHER:
11043 		val->cdp_vdev_param_cipher_en = vdev->sec_type;
11044 		break;
11045 #ifdef WLAN_SUPPORT_MESH_LATENCY
11046 	case CDP_ENABLE_PEER_TID_LATENCY:
11047 		val->cdp_vdev_param_peer_tid_latency_enable =
11048 			vdev->peer_tid_latency_enabled;
11049 		break;
11050 	case CDP_SET_VAP_MESH_TID:
11051 		val->cdp_vdev_param_mesh_tid =
11052 				vdev->mesh_tid_latency_config.latency_tid;
11053 		break;
11054 #endif
11055 	case CDP_DROP_3ADDR_MCAST:
11056 		val->cdp_drop_3addr_mcast = vdev->drop_3addr_mcast;
11057 		break;
11058 	case CDP_SET_MCAST_VDEV:
11059 		soc->arch_ops.txrx_get_vdev_mcast_param(soc, vdev, val);
11060 		break;
11061 #ifdef QCA_SUPPORT_WDS_EXTENDED
11062 	case CDP_DROP_TX_MCAST:
11063 		val->cdp_drop_tx_mcast = vdev->drop_tx_mcast;
11064 		break;
11065 #endif
11066 
11067 #ifdef MESH_MODE_SUPPORT
11068 	case CDP_MESH_RX_FILTER:
11069 		val->cdp_vdev_param_mesh_rx_filter = vdev->mesh_rx_filter;
11070 		break;
11071 	case CDP_MESH_MODE:
11072 		val->cdp_vdev_param_mesh_mode = vdev->mesh_vdev;
11073 		break;
11074 #endif
11075 	case CDP_ENABLE_NAWDS:
11076 		val->cdp_vdev_param_nawds = vdev->nawds_enabled;
11077 		break;
11078 
11079 	case CDP_ENABLE_WRAP:
11080 		val->cdp_vdev_param_wrap = vdev->wrap_vdev;
11081 		break;
11082 
11083 #ifdef DP_TRAFFIC_END_INDICATION
11084 	case CDP_ENABLE_TRAFFIC_END_INDICATION:
11085 		val->cdp_vdev_param_traffic_end_ind = vdev->traffic_end_ind_en;
11086 		break;
11087 #endif
11088 
11089 	default:
11090 		dp_cdp_err("%pK: param value %d is wrong",
11091 			   soc, param);
11092 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11093 		return QDF_STATUS_E_FAILURE;
11094 	}
11095 
11096 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11097 	return QDF_STATUS_SUCCESS;
11098 }
11099 
11100 /**
11101  * dp_set_vdev_param() - function to set parameters in vdev
11102  * @cdp_soc: DP soc handle
11103  * @vdev_id: id of DP vdev handle
11104  * @param: parameter type to get value
11105  * @val: value
11106  *
11107  * Return: QDF_STATUS
11108  */
11109 static QDF_STATUS
11110 dp_set_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
11111 		  enum cdp_vdev_param_type param, cdp_config_param_type val)
11112 {
11113 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
11114 	struct dp_vdev *vdev =
11115 		dp_vdev_get_ref_by_id(dsoc, vdev_id, DP_MOD_ID_CDP);
11116 	uint32_t var = 0;
11117 
11118 	if (!vdev)
11119 		return QDF_STATUS_E_FAILURE;
11120 
11121 	switch (param) {
11122 	case CDP_ENABLE_WDS:
11123 		dp_cdp_err("%pK: wds_enable %d for vdev(%pK) id(%d)\n",
11124 			   dsoc, val.cdp_vdev_param_wds, vdev, vdev->vdev_id);
11125 		vdev->wds_enabled = val.cdp_vdev_param_wds;
11126 		break;
11127 	case CDP_ENABLE_MEC:
11128 		dp_cdp_err("%pK: mec_enable %d for vdev(%pK) id(%d)\n",
11129 			   dsoc, val.cdp_vdev_param_mec, vdev, vdev->vdev_id);
11130 		vdev->mec_enabled = val.cdp_vdev_param_mec;
11131 		break;
11132 	case CDP_ENABLE_DA_WAR:
11133 		dp_cdp_err("%pK: da_war_enable %d for vdev(%pK) id(%d)\n",
11134 			   dsoc, val.cdp_vdev_param_da_war, vdev, vdev->vdev_id);
11135 		vdev->pdev->soc->da_war_enabled = val.cdp_vdev_param_da_war;
11136 		dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
11137 					     vdev->pdev->soc));
11138 		break;
11139 	case CDP_ENABLE_NAWDS:
11140 		vdev->nawds_enabled = val.cdp_vdev_param_nawds;
11141 		break;
11142 	case CDP_ENABLE_MCAST_EN:
11143 		vdev->mcast_enhancement_en = val.cdp_vdev_param_mcast_en;
11144 		break;
11145 	case CDP_ENABLE_IGMP_MCAST_EN:
11146 		vdev->igmp_mcast_enhanc_en = val.cdp_vdev_param_igmp_mcast_en;
11147 		break;
11148 	case CDP_ENABLE_PROXYSTA:
11149 		vdev->proxysta_vdev = val.cdp_vdev_param_proxysta;
11150 		break;
11151 	case CDP_UPDATE_TDLS_FLAGS:
11152 		vdev->tdls_link_connected = val.cdp_vdev_param_tdls_flags;
11153 		break;
11154 	case CDP_CFG_WDS_AGING_TIMER:
11155 		var = val.cdp_vdev_param_aging_tmr;
11156 		if (!var)
11157 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
11158 		else if (var != vdev->wds_aging_timer_val)
11159 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, var);
11160 
11161 		vdev->wds_aging_timer_val = var;
11162 		break;
11163 	case CDP_ENABLE_AP_BRIDGE:
11164 		if (wlan_op_mode_sta != vdev->opmode)
11165 			vdev->ap_bridge_enabled = val.cdp_vdev_param_ap_brdg_en;
11166 		else
11167 			vdev->ap_bridge_enabled = false;
11168 		break;
11169 	case CDP_ENABLE_CIPHER:
11170 		vdev->sec_type = val.cdp_vdev_param_cipher_en;
11171 		break;
11172 	case CDP_ENABLE_QWRAP_ISOLATION:
11173 		vdev->isolation_vdev = val.cdp_vdev_param_qwrap_isolation;
11174 		break;
11175 	case CDP_UPDATE_MULTIPASS:
11176 		vdev->multipass_en = val.cdp_vdev_param_update_multipass;
11177 		break;
11178 	case CDP_TX_ENCAP_TYPE:
11179 		vdev->tx_encap_type = val.cdp_vdev_param_tx_encap;
11180 		break;
11181 	case CDP_RX_DECAP_TYPE:
11182 		vdev->rx_decap_type = val.cdp_vdev_param_rx_decap;
11183 		break;
11184 	case CDP_TID_VDEV_PRTY:
11185 		vdev->tidmap_prty = val.cdp_vdev_param_tidmap_prty;
11186 		break;
11187 	case CDP_TIDMAP_TBL_ID:
11188 		vdev->tidmap_tbl_id = val.cdp_vdev_param_tidmap_tbl_id;
11189 		break;
11190 #ifdef MESH_MODE_SUPPORT
11191 	case CDP_MESH_RX_FILTER:
11192 		dp_vdev_set_mesh_rx_filter((struct cdp_vdev *)vdev,
11193 					   val.cdp_vdev_param_mesh_rx_filter);
11194 		break;
11195 	case CDP_MESH_MODE:
11196 		dp_vdev_set_mesh_mode((struct cdp_vdev *)vdev,
11197 				      val.cdp_vdev_param_mesh_mode);
11198 		break;
11199 #endif
11200 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
11201 		dp_info("vdev_id %d enable hlod tid override %d", vdev_id,
11202 			val.cdp_vdev_param_hlos_tid_override);
11203 		dp_vdev_set_hlos_tid_override(vdev,
11204 				val.cdp_vdev_param_hlos_tid_override);
11205 		break;
11206 #ifdef QCA_SUPPORT_WDS_EXTENDED
11207 	case CDP_CFG_WDS_EXT:
11208 		if (vdev->opmode == wlan_op_mode_ap)
11209 			vdev->wds_ext_enabled = val.cdp_vdev_param_wds_ext;
11210 		break;
11211 	case CDP_DROP_TX_MCAST:
11212 		dp_info("vdev_id %d drop tx mcast :%d", vdev_id,
11213 			val.cdp_drop_tx_mcast);
11214 		vdev->drop_tx_mcast = val.cdp_drop_tx_mcast;
11215 		break;
11216 #endif
11217 	case CDP_ENABLE_PEER_AUTHORIZE:
11218 		vdev->peer_authorize = val.cdp_vdev_param_peer_authorize;
11219 		break;
11220 #ifdef WLAN_SUPPORT_MESH_LATENCY
11221 	case CDP_ENABLE_PEER_TID_LATENCY:
11222 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
11223 			val.cdp_vdev_param_peer_tid_latency_enable);
11224 		vdev->peer_tid_latency_enabled =
11225 			val.cdp_vdev_param_peer_tid_latency_enable;
11226 		break;
11227 	case CDP_SET_VAP_MESH_TID:
11228 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
11229 			val.cdp_vdev_param_mesh_tid);
11230 		vdev->mesh_tid_latency_config.latency_tid
11231 				= val.cdp_vdev_param_mesh_tid;
11232 		break;
11233 #endif
11234 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
11235 	case CDP_SKIP_BAR_UPDATE_AP:
11236 		dp_info("vdev_id %d skip BAR update: %u", vdev_id,
11237 			val.cdp_skip_bar_update);
11238 		vdev->skip_bar_update = val.cdp_skip_bar_update;
11239 		vdev->skip_bar_update_last_ts = 0;
11240 		break;
11241 #endif
11242 	case CDP_DROP_3ADDR_MCAST:
11243 		dp_info("vdev_id %d drop 3 addr mcast :%d", vdev_id,
11244 			val.cdp_drop_3addr_mcast);
11245 		vdev->drop_3addr_mcast = val.cdp_drop_3addr_mcast;
11246 		break;
11247 	case CDP_ENABLE_WRAP:
11248 		vdev->wrap_vdev = val.cdp_vdev_param_wrap;
11249 		break;
11250 #ifdef DP_TRAFFIC_END_INDICATION
11251 	case CDP_ENABLE_TRAFFIC_END_INDICATION:
11252 		vdev->traffic_end_ind_en = val.cdp_vdev_param_traffic_end_ind;
11253 		break;
11254 #endif
11255 #ifdef FEATURE_DIRECT_LINK
11256 	case CDP_VDEV_TX_TO_FW:
11257 		dp_info("vdev_id %d to_fw :%d", vdev_id, val.cdp_vdev_tx_to_fw);
11258 		vdev->to_fw = val.cdp_vdev_tx_to_fw;
11259 		break;
11260 #endif
11261 	default:
11262 		break;
11263 	}
11264 
11265 	dp_tx_vdev_update_search_flags((struct dp_vdev *)vdev);
11266 	dsoc->arch_ops.txrx_set_vdev_param(dsoc, vdev, param, val);
11267 
11268 	/* Update PDEV flags as VDEV flags are updated */
11269 	dp_pdev_update_fast_rx_flag(dsoc, vdev->pdev);
11270 	dp_vdev_unref_delete(dsoc, vdev, DP_MOD_ID_CDP);
11271 
11272 	return QDF_STATUS_SUCCESS;
11273 }
11274 
11275 /*
11276  * dp_set_psoc_param: function to set parameters in psoc
11277  * @cdp_soc : DP soc handle
11278  * @param: parameter type to be set
11279  * @val: value of parameter to be set
11280  *
11281  * return: QDF_STATUS
11282  */
11283 static QDF_STATUS
11284 dp_set_psoc_param(struct cdp_soc_t *cdp_soc,
11285 		  enum cdp_psoc_param_type param, cdp_config_param_type val)
11286 {
11287 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
11288 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->wlan_cfg_ctx;
11289 
11290 	switch (param) {
11291 	case CDP_ENABLE_RATE_STATS:
11292 		soc->peerstats_enabled = val.cdp_psoc_param_en_rate_stats;
11293 		break;
11294 	case CDP_SET_NSS_CFG:
11295 		wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx,
11296 					    val.cdp_psoc_param_en_nss_cfg);
11297 		/*
11298 		 * TODO: masked out based on the per offloaded radio
11299 		 */
11300 		switch (val.cdp_psoc_param_en_nss_cfg) {
11301 		case dp_nss_cfg_default:
11302 			break;
11303 		case dp_nss_cfg_first_radio:
11304 		/*
11305 		 * This configuration is valid for single band radio which
11306 		 * is also NSS offload.
11307 		 */
11308 		case dp_nss_cfg_dbdc:
11309 		case dp_nss_cfg_dbtc:
11310 			wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
11311 			wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
11312 			wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
11313 			wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
11314 			break;
11315 		default:
11316 			dp_cdp_err("%pK: Invalid offload config %d",
11317 				   soc, val.cdp_psoc_param_en_nss_cfg);
11318 		}
11319 
11320 			dp_cdp_err("%pK: nss-wifi<0> nss config is enabled"
11321 				   , soc);
11322 		break;
11323 	case CDP_SET_PREFERRED_HW_MODE:
11324 		soc->preferred_hw_mode = val.cdp_psoc_param_preferred_hw_mode;
11325 		break;
11326 	case CDP_IPA_ENABLE:
11327 		soc->wlan_cfg_ctx->ipa_enabled = val.cdp_ipa_enabled;
11328 		break;
11329 	case CDP_CFG_VDEV_STATS_HW_OFFLOAD:
11330 		wlan_cfg_set_vdev_stats_hw_offload_config(wlan_cfg_ctx,
11331 				val.cdp_psoc_param_vdev_stats_hw_offload);
11332 		break;
11333 	case CDP_SAWF_ENABLE:
11334 		wlan_cfg_set_sawf_config(wlan_cfg_ctx, val.cdp_sawf_enabled);
11335 		break;
11336 	case CDP_UMAC_RST_SKEL_ENABLE:
11337 		dp_umac_rst_skel_enable_update(soc, val.cdp_umac_rst_skel);
11338 		break;
11339 	case CDP_SAWF_STATS:
11340 		wlan_cfg_set_sawf_stats_config(wlan_cfg_ctx,
11341 					       val.cdp_sawf_stats);
11342 		break;
11343 	default:
11344 		break;
11345 	}
11346 
11347 	return QDF_STATUS_SUCCESS;
11348 }
11349 
11350 /*
11351  * dp_get_psoc_param: function to get parameters in soc
11352  * @cdp_soc : DP soc handle
11353  * @param: parameter type to be set
11354  * @val: address of buffer
11355  *
11356  * return: status
11357  */
11358 static QDF_STATUS dp_get_psoc_param(struct cdp_soc_t *cdp_soc,
11359 				    enum cdp_psoc_param_type param,
11360 				    cdp_config_param_type *val)
11361 {
11362 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
11363 
11364 	if (!soc)
11365 		return QDF_STATUS_E_FAILURE;
11366 
11367 	switch (param) {
11368 	case CDP_CFG_PEER_EXT_STATS:
11369 		val->cdp_psoc_param_pext_stats =
11370 			wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx);
11371 		break;
11372 	case CDP_CFG_VDEV_STATS_HW_OFFLOAD:
11373 		val->cdp_psoc_param_vdev_stats_hw_offload =
11374 			wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx);
11375 		break;
11376 	case CDP_UMAC_RST_SKEL_ENABLE:
11377 		val->cdp_umac_rst_skel = dp_umac_rst_skel_enable_get(soc);
11378 		break;
11379 	case CDP_PPEDS_ENABLE:
11380 		val->cdp_psoc_param_ppeds_enabled =
11381 			wlan_cfg_get_dp_soc_is_ppeds_enabled(soc->wlan_cfg_ctx);
11382 		break;
11383 	default:
11384 		dp_warn("Invalid param");
11385 		break;
11386 	}
11387 
11388 	return QDF_STATUS_SUCCESS;
11389 }
11390 
11391 /*
11392  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
11393  * @soc: DP_SOC handle
11394  * @vdev_id: id of DP_VDEV handle
11395  * @map_id:ID of map that needs to be updated
11396  *
11397  * Return: QDF_STATUS
11398  */
11399 static QDF_STATUS dp_set_vdev_dscp_tid_map_wifi3(ol_txrx_soc_handle cdp_soc,
11400 						 uint8_t vdev_id,
11401 						 uint8_t map_id)
11402 {
11403 	cdp_config_param_type val;
11404 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
11405 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11406 						     DP_MOD_ID_CDP);
11407 	if (vdev) {
11408 		vdev->dscp_tid_map_id = map_id;
11409 		val.cdp_vdev_param_dscp_tid_map_id = map_id;
11410 		soc->arch_ops.txrx_set_vdev_param(soc,
11411 						  vdev,
11412 						  CDP_UPDATE_DSCP_TO_TID_MAP,
11413 						  val);
11414 		/* Updatr flag for transmit tid classification */
11415 		if (vdev->dscp_tid_map_id < soc->num_hw_dscp_tid_map)
11416 			vdev->skip_sw_tid_classification |=
11417 				DP_TX_HW_DSCP_TID_MAP_VALID;
11418 		else
11419 			vdev->skip_sw_tid_classification &=
11420 				~DP_TX_HW_DSCP_TID_MAP_VALID;
11421 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11422 		return QDF_STATUS_SUCCESS;
11423 	}
11424 
11425 	return QDF_STATUS_E_FAILURE;
11426 }
11427 
11428 #ifdef DP_RATETABLE_SUPPORT
11429 static int dp_txrx_get_ratekbps(int preamb, int mcs,
11430 				int htflag, int gintval)
11431 {
11432 	uint32_t rix;
11433 	uint16_t ratecode;
11434 	enum cdp_punctured_modes punc_mode = NO_PUNCTURE;
11435 
11436 	return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
11437 			       (uint8_t)preamb, 1, punc_mode,
11438 			       &rix, &ratecode);
11439 }
11440 #else
11441 static int dp_txrx_get_ratekbps(int preamb, int mcs,
11442 				int htflag, int gintval)
11443 {
11444 	return 0;
11445 }
11446 #endif
11447 
11448 /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
11449  * @soc: DP soc handle
11450  * @pdev_id: id of DP pdev handle
11451  * @pdev_stats: buffer to copy to
11452  *
11453  * return : status success/failure
11454  */
11455 static QDF_STATUS
11456 dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
11457 		       struct cdp_pdev_stats *pdev_stats)
11458 {
11459 	struct dp_pdev *pdev =
11460 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11461 						   pdev_id);
11462 	if (!pdev)
11463 		return QDF_STATUS_E_FAILURE;
11464 
11465 	dp_aggregate_pdev_stats(pdev);
11466 
11467 	qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats));
11468 	return QDF_STATUS_SUCCESS;
11469 }
11470 
11471 /* dp_txrx_update_vdev_me_stats(): Update vdev ME stats sent from CDP
11472  * @vdev: DP vdev handle
11473  * @buf: buffer containing specific stats structure
11474  *
11475  * Returns: void
11476  */
11477 static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev,
11478 					 void *buf)
11479 {
11480 	struct cdp_tx_ingress_stats *host_stats = NULL;
11481 
11482 	if (!buf) {
11483 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
11484 		return;
11485 	}
11486 	host_stats = (struct cdp_tx_ingress_stats *)buf;
11487 
11488 	DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt,
11489 			 host_stats->mcast_en.mcast_pkt.num,
11490 			 host_stats->mcast_en.mcast_pkt.bytes);
11491 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error,
11492 		     host_stats->mcast_en.dropped_map_error);
11493 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac,
11494 		     host_stats->mcast_en.dropped_self_mac);
11495 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail,
11496 		     host_stats->mcast_en.dropped_send_fail);
11497 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast,
11498 		     host_stats->mcast_en.ucast);
11499 	DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc,
11500 		     host_stats->mcast_en.fail_seg_alloc);
11501 	DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail,
11502 		     host_stats->mcast_en.clone_fail);
11503 }
11504 
11505 /* dp_txrx_update_vdev_igmp_me_stats(): Update vdev IGMP ME stats sent from CDP
11506  * @vdev: DP vdev handle
11507  * @buf: buffer containing specific stats structure
11508  *
11509  * Returns: void
11510  */
11511 static void dp_txrx_update_vdev_igmp_me_stats(struct dp_vdev *vdev,
11512 					      void *buf)
11513 {
11514 	struct cdp_tx_ingress_stats *host_stats = NULL;
11515 
11516 	if (!buf) {
11517 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
11518 		return;
11519 	}
11520 	host_stats = (struct cdp_tx_ingress_stats *)buf;
11521 
11522 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_rcvd,
11523 		     host_stats->igmp_mcast_en.igmp_rcvd);
11524 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_ucast_converted,
11525 		     host_stats->igmp_mcast_en.igmp_ucast_converted);
11526 }
11527 
11528 /* dp_txrx_update_vdev_host_stats(): Update stats sent through CDP
11529  * @soc: DP soc handle
11530  * @vdev_id: id of DP vdev handle
11531  * @buf: buffer containing specific stats structure
11532  * @stats_id: stats type
11533  *
11534  * Returns: QDF_STATUS
11535  */
11536 static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc_hdl,
11537 						 uint8_t vdev_id,
11538 						 void *buf,
11539 						 uint16_t stats_id)
11540 {
11541 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11542 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11543 						     DP_MOD_ID_CDP);
11544 
11545 	if (!vdev) {
11546 		dp_cdp_err("%pK: Invalid vdev handle", soc);
11547 		return QDF_STATUS_E_FAILURE;
11548 	}
11549 
11550 	switch (stats_id) {
11551 	case DP_VDEV_STATS_PKT_CNT_ONLY:
11552 		break;
11553 	case DP_VDEV_STATS_TX_ME:
11554 		dp_txrx_update_vdev_me_stats(vdev, buf);
11555 		dp_txrx_update_vdev_igmp_me_stats(vdev, buf);
11556 		break;
11557 	default:
11558 		qdf_info("Invalid stats_id %d", stats_id);
11559 		break;
11560 	}
11561 
11562 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11563 	return QDF_STATUS_SUCCESS;
11564 }
11565 
11566 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
11567  * @soc: soc handle
11568  * @vdev_id: id of vdev handle
11569  * @peer_mac: mac of DP_PEER handle
11570  * @peer_stats: buffer to copy to
11571  * return : status success/failure
11572  */
11573 static QDF_STATUS
11574 dp_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
11575 		       uint8_t *peer_mac, struct cdp_peer_stats *peer_stats)
11576 {
11577 	struct dp_peer *peer = NULL;
11578 	struct cdp_peer_info peer_info = { 0 };
11579 
11580 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac, false,
11581 				 CDP_WILD_PEER_TYPE);
11582 
11583 	peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info,
11584 					 DP_MOD_ID_CDP);
11585 
11586 	qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats));
11587 
11588 	if (!peer)
11589 		return QDF_STATUS_E_FAILURE;
11590 
11591 	dp_get_peer_stats(peer, peer_stats);
11592 
11593 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11594 
11595 	return QDF_STATUS_SUCCESS;
11596 }
11597 
11598 /* dp_txrx_get_peer_stats_param - will return specified cdp_peer_stats
11599  * @param soc - soc handle
11600  * @param vdev_id - vdev_id of vdev object
11601  * @param peer_mac - mac address of the peer
11602  * @param type - enum of required stats
11603  * @param buf - buffer to hold the value
11604  * return : status success/failure
11605  */
11606 static QDF_STATUS
11607 dp_txrx_get_peer_stats_param(struct cdp_soc_t *soc, uint8_t vdev_id,
11608 			     uint8_t *peer_mac, enum cdp_peer_stats_type type,
11609 			     cdp_peer_stats_param_t *buf)
11610 {
11611 	QDF_STATUS ret;
11612 	struct dp_peer *peer = NULL;
11613 	struct cdp_peer_info peer_info = { 0 };
11614 
11615 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac, false,
11616 				 CDP_WILD_PEER_TYPE);
11617 
11618 	peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info,
11619 				         DP_MOD_ID_CDP);
11620 
11621 	if (!peer) {
11622 		dp_peer_err("%pK: Invalid Peer for Mac " QDF_MAC_ADDR_FMT,
11623 			    soc, QDF_MAC_ADDR_REF(peer_mac));
11624 		return QDF_STATUS_E_FAILURE;
11625 	}
11626 
11627 	if (type >= cdp_peer_per_pkt_stats_min &&
11628 	    type < cdp_peer_per_pkt_stats_max) {
11629 		ret = dp_txrx_get_peer_per_pkt_stats_param(peer, type, buf);
11630 	} else if (type >= cdp_peer_extd_stats_min &&
11631 		   type < cdp_peer_extd_stats_max) {
11632 		ret = dp_txrx_get_peer_extd_stats_param(peer, type, buf);
11633 	} else {
11634 		dp_err("%pK: Invalid stat type requested", soc);
11635 		ret = QDF_STATUS_E_FAILURE;
11636 	}
11637 
11638 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11639 
11640 	return ret;
11641 }
11642 
11643 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
11644  * @soc: soc handle
11645  * @vdev_id: id of vdev handle
11646  * @peer_mac: mac of DP_PEER handle
11647  *
11648  * return : QDF_STATUS
11649  */
11650 #ifdef WLAN_FEATURE_11BE_MLO
11651 static QDF_STATUS
11652 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
11653 			 uint8_t *peer_mac)
11654 {
11655 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11656 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11657 	struct dp_peer *peer =
11658 			dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
11659 						       vdev_id, DP_MOD_ID_CDP);
11660 
11661 	if (!peer)
11662 		return QDF_STATUS_E_FAILURE;
11663 
11664 	DP_STATS_CLR(peer);
11665 	dp_txrx_peer_stats_clr(peer->txrx_peer);
11666 
11667 	if (IS_MLO_DP_MLD_PEER(peer)) {
11668 		uint8_t i;
11669 		struct dp_peer *link_peer;
11670 		struct dp_soc *link_peer_soc;
11671 		struct dp_mld_link_peers link_peers_info;
11672 
11673 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
11674 						    &link_peers_info,
11675 						    DP_MOD_ID_CDP);
11676 		for (i = 0; i < link_peers_info.num_links; i++) {
11677 			link_peer = link_peers_info.link_peers[i];
11678 			link_peer_soc = link_peer->vdev->pdev->soc;
11679 
11680 			DP_STATS_CLR(link_peer);
11681 			dp_monitor_peer_reset_stats(link_peer_soc, link_peer);
11682 		}
11683 
11684 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
11685 	} else {
11686 		dp_monitor_peer_reset_stats(soc, peer);
11687 	}
11688 
11689 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11690 
11691 	return status;
11692 }
11693 #else
11694 static QDF_STATUS
11695 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
11696 			 uint8_t *peer_mac)
11697 {
11698 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11699 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
11700 						      peer_mac, 0, vdev_id,
11701 						      DP_MOD_ID_CDP);
11702 
11703 	if (!peer)
11704 		return QDF_STATUS_E_FAILURE;
11705 
11706 	DP_STATS_CLR(peer);
11707 	dp_txrx_peer_stats_clr(peer->txrx_peer);
11708 	dp_monitor_peer_reset_stats((struct dp_soc *)soc, peer);
11709 
11710 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11711 
11712 	return status;
11713 }
11714 #endif
11715 
11716 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
11717  * @vdev_handle: DP_VDEV handle
11718  * @buf: buffer for vdev stats
11719  *
11720  * return : int
11721  */
11722 static int dp_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
11723 				  void *buf, bool is_aggregate)
11724 {
11725 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11726 	struct cdp_vdev_stats *vdev_stats;
11727 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11728 						     DP_MOD_ID_CDP);
11729 
11730 	if (!vdev)
11731 		return 1;
11732 
11733 	vdev_stats = (struct cdp_vdev_stats *)buf;
11734 
11735 	if (is_aggregate) {
11736 		dp_aggregate_vdev_stats(vdev, buf);
11737 	} else {
11738 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
11739 	}
11740 
11741 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11742 	return 0;
11743 }
11744 
11745 /*
11746  * dp_get_total_per(): get total per
11747  * @soc: DP soc handle
11748  * @pdev_id: id of DP_PDEV handle
11749  *
11750  * Return: % error rate using retries per packet and success packets
11751  */
11752 static int dp_get_total_per(struct cdp_soc_t *soc, uint8_t pdev_id)
11753 {
11754 	struct dp_pdev *pdev =
11755 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11756 						   pdev_id);
11757 
11758 	if (!pdev)
11759 		return 0;
11760 
11761 	dp_aggregate_pdev_stats(pdev);
11762 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
11763 		return 0;
11764 	return ((pdev->stats.tx.retries * 100) /
11765 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
11766 }
11767 
11768 /*
11769  * dp_txrx_stats_publish(): publish pdev stats into a buffer
11770  * @soc: DP soc handle
11771  * @pdev_id: id of DP_PDEV handle
11772  * @buf: to hold pdev_stats
11773  *
11774  * Return: int
11775  */
11776 static int
11777 dp_txrx_stats_publish(struct cdp_soc_t *soc, uint8_t pdev_id,
11778 		      struct cdp_stats_extd *buf)
11779 {
11780 	struct cdp_txrx_stats_req req = {0,};
11781 	QDF_STATUS status;
11782 	struct dp_pdev *pdev =
11783 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11784 						   pdev_id);
11785 
11786 	if (!pdev)
11787 		return TXRX_STATS_LEVEL_OFF;
11788 
11789 	if (pdev->pending_fw_stats_response)
11790 		return TXRX_STATS_LEVEL_OFF;
11791 
11792 	dp_aggregate_pdev_stats(pdev);
11793 
11794 	pdev->pending_fw_stats_response = true;
11795 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
11796 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
11797 	pdev->fw_stats_tlv_bitmap_rcvd = 0;
11798 	qdf_event_reset(&pdev->fw_stats_event);
11799 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
11800 				req.param1, req.param2, req.param3, 0,
11801 				req.cookie_val, 0);
11802 
11803 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
11804 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
11805 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
11806 				req.param1, req.param2, req.param3, 0,
11807 				req.cookie_val, 0);
11808 
11809 	status =
11810 		qdf_wait_single_event(&pdev->fw_stats_event, DP_MAX_SLEEP_TIME);
11811 
11812 	if (status != QDF_STATUS_SUCCESS) {
11813 		if (status == QDF_STATUS_E_TIMEOUT)
11814 			qdf_debug("TIMEOUT_OCCURS");
11815 		pdev->pending_fw_stats_response = false;
11816 		return TXRX_STATS_LEVEL_OFF;
11817 	}
11818 	qdf_mem_copy(buf, &pdev->stats, sizeof(struct cdp_pdev_stats));
11819 	pdev->pending_fw_stats_response = false;
11820 
11821 	return TXRX_STATS_LEVEL;
11822 }
11823 
11824 /*
11825  * dp_get_obss_stats(): Get Pdev OBSS stats from Fw
11826  * @soc: DP soc handle
11827  * @pdev_id: id of DP_PDEV handle
11828  * @buf: to hold pdev obss stats
11829  * @req: Pointer to CDP TxRx stats
11830  *
11831  * Return: status
11832  */
11833 static QDF_STATUS
11834 dp_get_obss_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
11835 		  struct cdp_pdev_obss_pd_stats_tlv *buf,
11836 		  struct cdp_txrx_stats_req *req)
11837 {
11838 	QDF_STATUS status;
11839 	struct dp_pdev *pdev =
11840 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11841 						   pdev_id);
11842 
11843 	if (!pdev)
11844 		return QDF_STATUS_E_INVAL;
11845 
11846 	if (pdev->pending_fw_obss_stats_response)
11847 		return QDF_STATUS_E_AGAIN;
11848 
11849 	pdev->pending_fw_obss_stats_response = true;
11850 	req->stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_OBSS_PD_STATS;
11851 	req->cookie_val = DBG_STATS_COOKIE_HTT_OBSS;
11852 	qdf_event_reset(&pdev->fw_obss_stats_event);
11853 	status = dp_h2t_ext_stats_msg_send(pdev, req->stats, req->param0,
11854 					   req->param1, req->param2,
11855 					   req->param3, 0, req->cookie_val,
11856 					   req->mac_id);
11857 	if (QDF_IS_STATUS_ERROR(status)) {
11858 		pdev->pending_fw_obss_stats_response = false;
11859 		return status;
11860 	}
11861 	status =
11862 		qdf_wait_single_event(&pdev->fw_obss_stats_event,
11863 				      DP_MAX_SLEEP_TIME);
11864 
11865 	if (status != QDF_STATUS_SUCCESS) {
11866 		if (status == QDF_STATUS_E_TIMEOUT)
11867 			qdf_debug("TIMEOUT_OCCURS");
11868 		pdev->pending_fw_obss_stats_response = false;
11869 		return QDF_STATUS_E_TIMEOUT;
11870 	}
11871 	qdf_mem_copy(buf, &pdev->stats.htt_tx_pdev_stats.obss_pd_stats_tlv,
11872 		     sizeof(struct cdp_pdev_obss_pd_stats_tlv));
11873 	pdev->pending_fw_obss_stats_response = false;
11874 	return status;
11875 }
11876 
11877 /*
11878  * dp_clear_pdev_obss_pd_stats(): Clear pdev obss stats
11879  * @soc: DP soc handle
11880  * @pdev_id: id of DP_PDEV handle
11881  * @req: Pointer to CDP TxRx stats request mac_id will be
11882  *	 pre-filled and should not be overwritten
11883  *
11884  * Return: status
11885  */
11886 static QDF_STATUS
11887 dp_clear_pdev_obss_pd_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
11888 			    struct cdp_txrx_stats_req *req)
11889 {
11890 	struct dp_pdev *pdev =
11891 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11892 						   pdev_id);
11893 	uint32_t cookie_val = DBG_STATS_COOKIE_DEFAULT;
11894 
11895 	if (!pdev)
11896 		return QDF_STATUS_E_INVAL;
11897 
11898 	/*
11899 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
11900 	 * from param0 to param3 according to below rule:
11901 	 *
11902 	 * PARAM:
11903 	 *   - config_param0 : start_offset (stats type)
11904 	 *   - config_param1 : stats bmask from start offset
11905 	 *   - config_param2 : stats bmask from start offset + 32
11906 	 *   - config_param3 : stats bmask from start offset + 64
11907 	 */
11908 	req->stats = (enum cdp_stats)HTT_DBG_EXT_STATS_RESET;
11909 	req->param0 = HTT_DBG_EXT_STATS_PDEV_OBSS_PD_STATS;
11910 	req->param1 = 0x00000001;
11911 
11912 	return dp_h2t_ext_stats_msg_send(pdev, req->stats, req->param0,
11913 				  req->param1, req->param2, req->param3, 0,
11914 				cookie_val, req->mac_id);
11915 }
11916 
11917 /**
11918  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
11919  * @soc: soc handle
11920  * @pdev_id: id of DP_PDEV handle
11921  * @map_id: ID of map that needs to be updated
11922  * @tos: index value in map
11923  * @tid: tid value passed by the user
11924  *
11925  * Return: QDF_STATUS
11926  */
11927 static QDF_STATUS
11928 dp_set_pdev_dscp_tid_map_wifi3(struct cdp_soc_t *soc_handle,
11929 			       uint8_t pdev_id,
11930 			       uint8_t map_id,
11931 			       uint8_t tos, uint8_t tid)
11932 {
11933 	uint8_t dscp;
11934 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11935 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11936 
11937 	if (!pdev)
11938 		return QDF_STATUS_E_FAILURE;
11939 
11940 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
11941 	pdev->dscp_tid_map[map_id][dscp] = tid;
11942 
11943 	if (map_id < soc->num_hw_dscp_tid_map)
11944 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
11945 				       map_id, dscp);
11946 	else
11947 		return QDF_STATUS_E_FAILURE;
11948 
11949 	return QDF_STATUS_SUCCESS;
11950 }
11951 
11952 #ifdef WLAN_SYSFS_DP_STATS
11953 /*
11954  * dp_sysfs_event_trigger(): Trigger event to wait for firmware
11955  * stats request response.
11956  * @soc: soc handle
11957  * @cookie_val: cookie value
11958  *
11959  * @Return: QDF_STATUS
11960  */
11961 static QDF_STATUS
11962 dp_sysfs_event_trigger(struct dp_soc *soc, uint32_t cookie_val)
11963 {
11964 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11965 	/* wait for firmware response for sysfs stats request */
11966 	if (cookie_val == DBG_SYSFS_STATS_COOKIE) {
11967 		if (!soc) {
11968 			dp_cdp_err("soc is NULL");
11969 			return QDF_STATUS_E_FAILURE;
11970 		}
11971 		/* wait for event completion */
11972 		status = qdf_wait_single_event(&soc->sysfs_config->sysfs_txrx_fw_request_done,
11973 					       WLAN_SYSFS_STAT_REQ_WAIT_MS);
11974 		if (status == QDF_STATUS_SUCCESS)
11975 			dp_cdp_info("sysfs_txrx_fw_request_done event completed");
11976 		else if (status == QDF_STATUS_E_TIMEOUT)
11977 			dp_cdp_warn("sysfs_txrx_fw_request_done event expired");
11978 		else
11979 			dp_cdp_warn("sysfs_txrx_fw_request_done event error code %d", status);
11980 	}
11981 
11982 	return status;
11983 }
11984 #else /* WLAN_SYSFS_DP_STATS */
11985 /*
11986  * dp_sysfs_event_trigger(): Trigger event to wait for firmware
11987  * stats request response.
11988  * @soc: soc handle
11989  * @cookie_val: cookie value
11990  *
11991  * @Return: QDF_STATUS
11992  */
11993 static QDF_STATUS
11994 dp_sysfs_event_trigger(struct dp_soc *soc, uint32_t cookie_val)
11995 {
11996 	return QDF_STATUS_SUCCESS;
11997 }
11998 #endif /* WLAN_SYSFS_DP_STATS */
11999 
12000 /**
12001  * dp_fw_stats_process(): Process TXRX FW stats request.
12002  * @vdev_handle: DP VDEV handle
12003  * @req: stats request
12004  *
12005  * return: QDF_STATUS
12006  */
12007 static QDF_STATUS
12008 dp_fw_stats_process(struct dp_vdev *vdev,
12009 		    struct cdp_txrx_stats_req *req)
12010 {
12011 	struct dp_pdev *pdev = NULL;
12012 	struct dp_soc *soc = NULL;
12013 	uint32_t stats = req->stats;
12014 	uint8_t mac_id = req->mac_id;
12015 	uint32_t cookie_val = DBG_STATS_COOKIE_DEFAULT;
12016 
12017 	if (!vdev) {
12018 		DP_TRACE(NONE, "VDEV not found");
12019 		return QDF_STATUS_E_FAILURE;
12020 	}
12021 
12022 	pdev = vdev->pdev;
12023 	if (!pdev) {
12024 		DP_TRACE(NONE, "PDEV not found");
12025 		return QDF_STATUS_E_FAILURE;
12026 	}
12027 
12028 	soc = pdev->soc;
12029 	if (!soc) {
12030 		DP_TRACE(NONE, "soc not found");
12031 		return QDF_STATUS_E_FAILURE;
12032 	}
12033 
12034 	/* In case request is from host sysfs for displaying stats on console */
12035 	if (req->cookie_val == DBG_SYSFS_STATS_COOKIE)
12036 		cookie_val = DBG_SYSFS_STATS_COOKIE;
12037 
12038 	/*
12039 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
12040 	 * from param0 to param3 according to below rule:
12041 	 *
12042 	 * PARAM:
12043 	 *   - config_param0 : start_offset (stats type)
12044 	 *   - config_param1 : stats bmask from start offset
12045 	 *   - config_param2 : stats bmask from start offset + 32
12046 	 *   - config_param3 : stats bmask from start offset + 64
12047 	 */
12048 	if (req->stats == CDP_TXRX_STATS_0) {
12049 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
12050 		req->param1 = 0xFFFFFFFF;
12051 		req->param2 = 0xFFFFFFFF;
12052 		req->param3 = 0xFFFFFFFF;
12053 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
12054 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
12055 	}
12056 
12057 	if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT) {
12058 		dp_h2t_ext_stats_msg_send(pdev,
12059 					  HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT,
12060 					  req->param0, req->param1, req->param2,
12061 					  req->param3, 0, cookie_val,
12062 					  mac_id);
12063 	} else {
12064 		dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
12065 					  req->param1, req->param2, req->param3,
12066 					  0, cookie_val, mac_id);
12067 	}
12068 
12069 	dp_sysfs_event_trigger(soc, cookie_val);
12070 
12071 	return QDF_STATUS_SUCCESS;
12072 }
12073 
12074 /**
12075  * dp_txrx_stats_request - function to map to firmware and host stats
12076  * @soc: soc handle
12077  * @vdev_id: virtual device ID
12078  * @req: stats request
12079  *
12080  * Return: QDF_STATUS
12081  */
12082 static
12083 QDF_STATUS dp_txrx_stats_request(struct cdp_soc_t *soc_handle,
12084 				 uint8_t vdev_id,
12085 				 struct cdp_txrx_stats_req *req)
12086 {
12087 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_handle);
12088 	int host_stats;
12089 	int fw_stats;
12090 	enum cdp_stats stats;
12091 	int num_stats;
12092 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12093 						     DP_MOD_ID_CDP);
12094 	QDF_STATUS status = QDF_STATUS_E_INVAL;
12095 
12096 	if (!vdev || !req) {
12097 		dp_cdp_err("%pK: Invalid vdev/req instance", soc);
12098 		status = QDF_STATUS_E_INVAL;
12099 		goto fail0;
12100 	}
12101 
12102 	if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) {
12103 		dp_err("Invalid mac id request");
12104 		status = QDF_STATUS_E_INVAL;
12105 		goto fail0;
12106 	}
12107 
12108 	stats = req->stats;
12109 	if (stats >= CDP_TXRX_MAX_STATS) {
12110 		status = QDF_STATUS_E_INVAL;
12111 		goto fail0;
12112 	}
12113 
12114 	/*
12115 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
12116 	 *			has to be updated if new FW HTT stats added
12117 	 */
12118 	if (stats > CDP_TXRX_STATS_HTT_MAX)
12119 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
12120 
12121 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
12122 
12123 	if (stats >= num_stats) {
12124 		dp_cdp_err("%pK : Invalid stats option: %d", soc, stats);
12125 		status = QDF_STATUS_E_INVAL;
12126 		goto fail0;
12127 	}
12128 
12129 	req->stats = stats;
12130 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
12131 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
12132 
12133 	dp_info("stats: %u fw_stats_type: %d host_stats: %d",
12134 		stats, fw_stats, host_stats);
12135 
12136 	if (fw_stats != TXRX_FW_STATS_INVALID) {
12137 		/* update request with FW stats type */
12138 		req->stats = fw_stats;
12139 		status = dp_fw_stats_process(vdev, req);
12140 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
12141 			(host_stats <= TXRX_HOST_STATS_MAX))
12142 		status = dp_print_host_stats(vdev, req, soc);
12143 	else
12144 		dp_cdp_info("%pK: Wrong Input for TxRx Stats", soc);
12145 fail0:
12146 	if (vdev)
12147 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12148 	return status;
12149 }
12150 
12151 /*
12152  * dp_txrx_dump_stats() -  Dump statistics
12153  * @value - Statistics option
12154  */
12155 static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value,
12156 				     enum qdf_stats_verbosity_level level)
12157 {
12158 	struct dp_soc *soc =
12159 		(struct dp_soc *)psoc;
12160 	QDF_STATUS status = QDF_STATUS_SUCCESS;
12161 
12162 	if (!soc) {
12163 		dp_cdp_err("%pK: soc is NULL", soc);
12164 		return QDF_STATUS_E_INVAL;
12165 	}
12166 
12167 	switch (value) {
12168 	case CDP_TXRX_PATH_STATS:
12169 		dp_txrx_path_stats(soc);
12170 		dp_print_soc_interrupt_stats(soc);
12171 		hal_dump_reg_write_stats(soc->hal_soc);
12172 		dp_pdev_print_tx_delay_stats(soc);
12173 		/* Dump usage watermark stats for core TX/RX SRNGs */
12174 		dp_dump_srng_high_wm_stats(soc, (1 << REO_DST));
12175 		dp_print_fisa_stats(soc);
12176 		break;
12177 
12178 	case CDP_RX_RING_STATS:
12179 		dp_print_per_ring_stats(soc);
12180 		break;
12181 
12182 	case CDP_TXRX_TSO_STATS:
12183 		dp_print_tso_stats(soc, level);
12184 		break;
12185 
12186 	case CDP_DUMP_TX_FLOW_POOL_INFO:
12187 		if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH)
12188 			cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
12189 		else
12190 			dp_tx_dump_flow_pool_info_compact(soc);
12191 		break;
12192 
12193 	case CDP_DP_NAPI_STATS:
12194 		dp_print_napi_stats(soc);
12195 		break;
12196 
12197 	case CDP_TXRX_DESC_STATS:
12198 		/* TODO: NOT IMPLEMENTED */
12199 		break;
12200 
12201 	case CDP_DP_RX_FISA_STATS:
12202 		dp_rx_dump_fisa_stats(soc);
12203 		break;
12204 
12205 	case CDP_DP_SWLM_STATS:
12206 		dp_print_swlm_stats(soc);
12207 		break;
12208 
12209 	case CDP_DP_TX_HW_LATENCY_STATS:
12210 		dp_pdev_print_tx_delay_stats(soc);
12211 		break;
12212 
12213 	default:
12214 		status = QDF_STATUS_E_INVAL;
12215 		break;
12216 	}
12217 
12218 	return status;
12219 
12220 }
12221 
12222 #ifdef WLAN_SYSFS_DP_STATS
12223 static
12224 void dp_sysfs_get_stat_type(struct dp_soc *soc, uint32_t *mac_id,
12225 			    uint32_t *stat_type)
12226 {
12227 	qdf_spinlock_acquire(&soc->sysfs_config->rw_stats_lock);
12228 	*stat_type = soc->sysfs_config->stat_type_requested;
12229 	*mac_id   = soc->sysfs_config->mac_id;
12230 
12231 	qdf_spinlock_release(&soc->sysfs_config->rw_stats_lock);
12232 }
12233 
12234 static
12235 void dp_sysfs_update_config_buf_params(struct dp_soc *soc,
12236 				       uint32_t curr_len,
12237 				       uint32_t max_buf_len,
12238 				       char *buf)
12239 {
12240 	qdf_spinlock_acquire(&soc->sysfs_config->sysfs_write_user_buffer);
12241 	/* set sysfs_config parameters */
12242 	soc->sysfs_config->buf = buf;
12243 	soc->sysfs_config->curr_buffer_length = curr_len;
12244 	soc->sysfs_config->max_buffer_length = max_buf_len;
12245 	qdf_spinlock_release(&soc->sysfs_config->sysfs_write_user_buffer);
12246 }
12247 
12248 static
12249 QDF_STATUS dp_sysfs_fill_stats(ol_txrx_soc_handle soc_hdl,
12250 			       char *buf, uint32_t buf_size)
12251 {
12252 	uint32_t mac_id = 0;
12253 	uint32_t stat_type = 0;
12254 	uint32_t fw_stats = 0;
12255 	uint32_t host_stats = 0;
12256 	enum cdp_stats stats;
12257 	struct cdp_txrx_stats_req req;
12258 	uint32_t num_stats;
12259 	struct dp_soc *soc = NULL;
12260 
12261 	if (!soc_hdl) {
12262 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
12263 		return QDF_STATUS_E_INVAL;
12264 	}
12265 
12266 	soc = cdp_soc_t_to_dp_soc(soc_hdl);
12267 
12268 	if (!soc) {
12269 		dp_cdp_err("%pK: soc is NULL", soc);
12270 		return QDF_STATUS_E_INVAL;
12271 	}
12272 
12273 	dp_sysfs_get_stat_type(soc, &mac_id, &stat_type);
12274 
12275 	stats = stat_type;
12276 	if (stats >= CDP_TXRX_MAX_STATS) {
12277 		dp_cdp_info("sysfs stat type requested is invalid");
12278 		return QDF_STATUS_E_INVAL;
12279 	}
12280 	/*
12281 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
12282 	 *			has to be updated if new FW HTT stats added
12283 	 */
12284 	if (stats > CDP_TXRX_MAX_STATS)
12285 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
12286 
12287 	num_stats = QDF_ARRAY_SIZE(dp_stats_mapping_table);
12288 
12289 	if (stats >= num_stats) {
12290 		dp_cdp_err("%pK : Invalid stats option: %d, max num stats: %d",
12291 				soc, stats, num_stats);
12292 		return QDF_STATUS_E_INVAL;
12293 	}
12294 
12295 	/* build request */
12296 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
12297 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
12298 
12299 	req.stats = stat_type;
12300 	req.mac_id = mac_id;
12301 	/* request stats to be printed */
12302 	qdf_mutex_acquire(&soc->sysfs_config->sysfs_read_lock);
12303 
12304 	if (fw_stats != TXRX_FW_STATS_INVALID) {
12305 		/* update request with FW stats type */
12306 		req.cookie_val = DBG_SYSFS_STATS_COOKIE;
12307 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
12308 			(host_stats <= TXRX_HOST_STATS_MAX)) {
12309 		req.cookie_val = DBG_STATS_COOKIE_DEFAULT;
12310 		soc->sysfs_config->process_id = qdf_get_current_pid();
12311 		soc->sysfs_config->printing_mode = PRINTING_MODE_ENABLED;
12312 	}
12313 
12314 	dp_sysfs_update_config_buf_params(soc, 0, buf_size, buf);
12315 
12316 	dp_txrx_stats_request(soc_hdl, mac_id, &req);
12317 	soc->sysfs_config->process_id = 0;
12318 	soc->sysfs_config->printing_mode = PRINTING_MODE_DISABLED;
12319 
12320 	dp_sysfs_update_config_buf_params(soc, 0, 0, NULL);
12321 
12322 	qdf_mutex_release(&soc->sysfs_config->sysfs_read_lock);
12323 	return QDF_STATUS_SUCCESS;
12324 }
12325 
12326 static
12327 QDF_STATUS dp_sysfs_set_stat_type(ol_txrx_soc_handle soc_hdl,
12328 				  uint32_t stat_type, uint32_t mac_id)
12329 {
12330 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12331 
12332 	if (!soc_hdl) {
12333 		dp_cdp_err("%pK: soc is NULL", soc);
12334 		return QDF_STATUS_E_INVAL;
12335 	}
12336 
12337 	qdf_spinlock_acquire(&soc->sysfs_config->rw_stats_lock);
12338 
12339 	soc->sysfs_config->stat_type_requested = stat_type;
12340 	soc->sysfs_config->mac_id = mac_id;
12341 
12342 	qdf_spinlock_release(&soc->sysfs_config->rw_stats_lock);
12343 
12344 	return QDF_STATUS_SUCCESS;
12345 }
12346 
12347 static
12348 QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl)
12349 {
12350 	struct dp_soc *soc;
12351 	QDF_STATUS status;
12352 
12353 	if (!soc_hdl) {
12354 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
12355 		return QDF_STATUS_E_INVAL;
12356 	}
12357 
12358 	soc = soc_hdl;
12359 
12360 	soc->sysfs_config = qdf_mem_malloc(sizeof(struct sysfs_stats_config));
12361 	if (!soc->sysfs_config) {
12362 		dp_cdp_err("failed to allocate memory for sysfs_config no memory");
12363 		return QDF_STATUS_E_NOMEM;
12364 	}
12365 
12366 	status = qdf_event_create(&soc->sysfs_config->sysfs_txrx_fw_request_done);
12367 	/* create event for fw stats request from sysfs */
12368 	if (status != QDF_STATUS_SUCCESS) {
12369 		dp_cdp_err("failed to create event sysfs_txrx_fw_request_done");
12370 		qdf_mem_free(soc->sysfs_config);
12371 		soc->sysfs_config = NULL;
12372 		return QDF_STATUS_E_FAILURE;
12373 	}
12374 
12375 	qdf_spinlock_create(&soc->sysfs_config->rw_stats_lock);
12376 	qdf_mutex_create(&soc->sysfs_config->sysfs_read_lock);
12377 	qdf_spinlock_create(&soc->sysfs_config->sysfs_write_user_buffer);
12378 
12379 	return QDF_STATUS_SUCCESS;
12380 }
12381 
12382 static
12383 QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl)
12384 {
12385 	struct dp_soc *soc;
12386 	QDF_STATUS status;
12387 
12388 	if (!soc_hdl) {
12389 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
12390 		return QDF_STATUS_E_INVAL;
12391 	}
12392 
12393 	soc = soc_hdl;
12394 	if (!soc->sysfs_config) {
12395 		dp_cdp_err("soc->sysfs_config is NULL");
12396 		return QDF_STATUS_E_FAILURE;
12397 	}
12398 
12399 	status = qdf_event_destroy(&soc->sysfs_config->sysfs_txrx_fw_request_done);
12400 	if (status != QDF_STATUS_SUCCESS)
12401 		dp_cdp_err("Failed to destroy event sysfs_txrx_fw_request_done");
12402 
12403 	qdf_mutex_destroy(&soc->sysfs_config->sysfs_read_lock);
12404 	qdf_spinlock_destroy(&soc->sysfs_config->rw_stats_lock);
12405 	qdf_spinlock_destroy(&soc->sysfs_config->sysfs_write_user_buffer);
12406 
12407 	qdf_mem_free(soc->sysfs_config);
12408 
12409 	return QDF_STATUS_SUCCESS;
12410 }
12411 
12412 #else /* WLAN_SYSFS_DP_STATS */
12413 
12414 static
12415 QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl)
12416 {
12417 	return QDF_STATUS_SUCCESS;
12418 }
12419 
12420 static
12421 QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl)
12422 {
12423 	return QDF_STATUS_SUCCESS;
12424 }
12425 #endif /* WLAN_SYSFS_DP_STATS */
12426 
12427 /**
12428  * dp_txrx_clear_dump_stats() - clear dumpStats
12429  * @soc- soc handle
12430  * @value - stats option
12431  *
12432  * Return: 0 - Success, non-zero - failure
12433  */
12434 static
12435 QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12436 				    uint8_t value)
12437 {
12438 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12439 	QDF_STATUS status = QDF_STATUS_SUCCESS;
12440 
12441 	if (!soc) {
12442 		dp_err("soc is NULL");
12443 		return QDF_STATUS_E_INVAL;
12444 	}
12445 
12446 	switch (value) {
12447 	case CDP_TXRX_TSO_STATS:
12448 		dp_txrx_clear_tso_stats(soc);
12449 		break;
12450 
12451 	case CDP_DP_TX_HW_LATENCY_STATS:
12452 		dp_pdev_clear_tx_delay_stats(soc);
12453 		break;
12454 
12455 	default:
12456 		status = QDF_STATUS_E_INVAL;
12457 		break;
12458 	}
12459 
12460 	return status;
12461 }
12462 
12463 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
12464 /**
12465  * dp_update_flow_control_parameters() - API to store datapath
12466  *                            config parameters
12467  * @soc: soc handle
12468  * @cfg: ini parameter handle
12469  *
12470  * Return: void
12471  */
12472 static inline
12473 void dp_update_flow_control_parameters(struct dp_soc *soc,
12474 				struct cdp_config_params *params)
12475 {
12476 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
12477 					params->tx_flow_stop_queue_threshold;
12478 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
12479 					params->tx_flow_start_queue_offset;
12480 }
12481 #else
12482 static inline
12483 void dp_update_flow_control_parameters(struct dp_soc *soc,
12484 				struct cdp_config_params *params)
12485 {
12486 }
12487 #endif
12488 
12489 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
12490 /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */
12491 #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024
12492 
12493 /* Max packet limit for RX REAP Loop (dp_rx_process) */
12494 #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024
12495 
12496 static
12497 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
12498 					struct cdp_config_params *params)
12499 {
12500 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit =
12501 				params->tx_comp_loop_pkt_limit;
12502 
12503 	if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX)
12504 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true;
12505 	else
12506 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false;
12507 
12508 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit =
12509 				params->rx_reap_loop_pkt_limit;
12510 
12511 	if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX)
12512 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = true;
12513 	else
12514 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = false;
12515 
12516 	soc->wlan_cfg_ctx->rx_hp_oos_update_limit =
12517 				params->rx_hp_oos_update_limit;
12518 
12519 	dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u",
12520 		soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit,
12521 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check,
12522 		soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit,
12523 		soc->wlan_cfg_ctx->rx_enable_eol_data_check,
12524 		soc->wlan_cfg_ctx->rx_hp_oos_update_limit);
12525 }
12526 
12527 static void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
12528 				      uint32_t rx_limit)
12529 {
12530 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit = tx_limit;
12531 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit = rx_limit;
12532 }
12533 
12534 #else
12535 static inline
12536 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
12537 					struct cdp_config_params *params)
12538 { }
12539 
12540 static inline
12541 void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
12542 			       uint32_t rx_limit)
12543 {
12544 }
12545 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
12546 
12547 /**
12548  * dp_update_config_parameters() - API to store datapath
12549  *                            config parameters
12550  * @soc: soc handle
12551  * @cfg: ini parameter handle
12552  *
12553  * Return: status
12554  */
12555 static
12556 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
12557 				struct cdp_config_params *params)
12558 {
12559 	struct dp_soc *soc = (struct dp_soc *)psoc;
12560 
12561 	if (!(soc)) {
12562 		dp_cdp_err("%pK: Invalid handle", soc);
12563 		return QDF_STATUS_E_INVAL;
12564 	}
12565 
12566 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
12567 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
12568 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
12569 	soc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload =
12570 				params->p2p_tcp_udp_checksumoffload;
12571 	soc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload =
12572 				params->nan_tcp_udp_checksumoffload;
12573 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
12574 				params->tcp_udp_checksumoffload;
12575 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
12576 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
12577 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
12578 
12579 	dp_update_rx_soft_irq_limit_params(soc, params);
12580 	dp_update_flow_control_parameters(soc, params);
12581 
12582 	return QDF_STATUS_SUCCESS;
12583 }
12584 
12585 static struct cdp_wds_ops dp_ops_wds = {
12586 	.vdev_set_wds = dp_vdev_set_wds,
12587 #ifdef WDS_VENDOR_EXTENSION
12588 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
12589 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
12590 #endif
12591 };
12592 
12593 /*
12594  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
12595  * @soc_hdl - datapath soc handle
12596  * @vdev_id - virtual interface id
12597  * @callback - callback function
12598  * @ctxt: callback context
12599  *
12600  */
12601 static void
12602 dp_txrx_data_tx_cb_set(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
12603 		       ol_txrx_data_tx_cb callback, void *ctxt)
12604 {
12605 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12606 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12607 						     DP_MOD_ID_CDP);
12608 
12609 	if (!vdev)
12610 		return;
12611 
12612 	vdev->tx_non_std_data_callback.func = callback;
12613 	vdev->tx_non_std_data_callback.ctxt = ctxt;
12614 
12615 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12616 }
12617 
12618 /**
12619  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
12620  * @soc: datapath soc handle
12621  * @pdev_id: id of datapath pdev handle
12622  *
12623  * Return: opaque pointer to dp txrx handle
12624  */
12625 static void *dp_pdev_get_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id)
12626 {
12627 	struct dp_pdev *pdev =
12628 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12629 						   pdev_id);
12630 	if (qdf_unlikely(!pdev))
12631 		return NULL;
12632 
12633 	return pdev->dp_txrx_handle;
12634 }
12635 
12636 /**
12637  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
12638  * @soc: datapath soc handle
12639  * @pdev_id: id of datapath pdev handle
12640  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
12641  *
12642  * Return: void
12643  */
12644 static void
12645 dp_pdev_set_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id,
12646 			   void *dp_txrx_hdl)
12647 {
12648 	struct dp_pdev *pdev =
12649 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12650 						   pdev_id);
12651 
12652 	if (!pdev)
12653 		return;
12654 
12655 	pdev->dp_txrx_handle = dp_txrx_hdl;
12656 }
12657 
12658 /**
12659  * dp_vdev_get_dp_ext_handle() - get dp handle from vdev
12660  * @soc: datapath soc handle
12661  * @vdev_id: vdev id
12662  *
12663  * Return: opaque pointer to dp txrx handle
12664  */
12665 static void *dp_vdev_get_dp_ext_handle(ol_txrx_soc_handle soc_hdl,
12666 				       uint8_t vdev_id)
12667 {
12668 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12669 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12670 						     DP_MOD_ID_CDP);
12671 	void *dp_ext_handle;
12672 
12673 	if (!vdev)
12674 		return NULL;
12675 	dp_ext_handle = vdev->vdev_dp_ext_handle;
12676 
12677 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12678 	return dp_ext_handle;
12679 }
12680 
12681 /**
12682  * dp_vdev_set_dp_ext_handle() - set dp handle in vdev
12683  * @soc: datapath soc handle
12684  * @vdev_id: vdev id
12685  * @size: size of advance dp handle
12686  *
12687  * Return: QDF_STATUS
12688  */
12689 static QDF_STATUS
12690 dp_vdev_set_dp_ext_handle(ol_txrx_soc_handle soc_hdl, uint8_t vdev_id,
12691 			  uint16_t size)
12692 {
12693 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12694 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12695 						     DP_MOD_ID_CDP);
12696 	void *dp_ext_handle;
12697 
12698 	if (!vdev)
12699 		return QDF_STATUS_E_FAILURE;
12700 
12701 	dp_ext_handle = qdf_mem_malloc(size);
12702 
12703 	if (!dp_ext_handle) {
12704 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12705 		return QDF_STATUS_E_FAILURE;
12706 	}
12707 
12708 	vdev->vdev_dp_ext_handle = dp_ext_handle;
12709 
12710 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12711 	return QDF_STATUS_SUCCESS;
12712 }
12713 
12714 /**
12715  * dp_vdev_inform_ll_conn() - Inform vdev to add/delete a latency critical
12716  *			      connection for this vdev
12717  * @soc_hdl: CDP soc handle
12718  * @vdev_id: vdev ID
12719  * @action: Add/Delete action
12720  *
12721  * Returns: QDF_STATUS.
12722  */
12723 static QDF_STATUS
12724 dp_vdev_inform_ll_conn(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
12725 		       enum vdev_ll_conn_actions action)
12726 {
12727 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12728 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12729 						     DP_MOD_ID_CDP);
12730 
12731 	if (!vdev) {
12732 		dp_err("LL connection action for invalid vdev %d", vdev_id);
12733 		return QDF_STATUS_E_FAILURE;
12734 	}
12735 
12736 	switch (action) {
12737 	case CDP_VDEV_LL_CONN_ADD:
12738 		vdev->num_latency_critical_conn++;
12739 		break;
12740 
12741 	case CDP_VDEV_LL_CONN_DEL:
12742 		vdev->num_latency_critical_conn--;
12743 		break;
12744 
12745 	default:
12746 		dp_err("LL connection action invalid %d", action);
12747 		break;
12748 	}
12749 
12750 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12751 	return QDF_STATUS_SUCCESS;
12752 }
12753 
12754 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
12755 /**
12756  * dp_soc_set_swlm_enable() - Enable/Disable SWLM if initialized.
12757  * @soc_hdl: CDP Soc handle
12758  * @value: Enable/Disable value
12759  *
12760  * Returns: QDF_STATUS
12761  */
12762 static QDF_STATUS dp_soc_set_swlm_enable(struct cdp_soc_t *soc_hdl,
12763 					 uint8_t value)
12764 {
12765 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12766 
12767 	if (!soc->swlm.is_init) {
12768 		dp_err("SWLM is not initialized");
12769 		return QDF_STATUS_E_FAILURE;
12770 	}
12771 
12772 	soc->swlm.is_enabled = !!value;
12773 
12774 	return QDF_STATUS_SUCCESS;
12775 }
12776 
12777 /**
12778  * dp_soc_is_swlm_enabled() - Check if SWLM is enabled.
12779  * @soc_hdl: CDP Soc handle
12780  *
12781  * Returns: QDF_STATUS
12782  */
12783 static uint8_t dp_soc_is_swlm_enabled(struct cdp_soc_t *soc_hdl)
12784 {
12785 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12786 
12787 	return soc->swlm.is_enabled;
12788 }
12789 #endif
12790 
12791 /**
12792  * dp_display_srng_info() - Dump the srng HP TP info
12793  * @soc_hdl: CDP Soc handle
12794  *
12795  * This function dumps the SW hp/tp values for the important rings.
12796  * HW hp/tp values are not being dumped, since it can lead to
12797  * READ NOC error when UMAC is in low power state. MCC does not have
12798  * device force wake working yet.
12799  *
12800  * Return: none
12801  */
12802 static void dp_display_srng_info(struct cdp_soc_t *soc_hdl)
12803 {
12804 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12805 	hal_soc_handle_t hal_soc = soc->hal_soc;
12806 	uint32_t hp, tp, i;
12807 
12808 	dp_info("SRNG HP-TP data:");
12809 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
12810 		hal_get_sw_hptp(hal_soc, soc->tcl_data_ring[i].hal_srng,
12811 				&tp, &hp);
12812 		dp_info("TCL DATA ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
12813 
12814 		if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, i) ==
12815 		    INVALID_WBM_RING_NUM)
12816 			continue;
12817 
12818 		hal_get_sw_hptp(hal_soc, soc->tx_comp_ring[i].hal_srng,
12819 				&tp, &hp);
12820 		dp_info("TX comp ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
12821 	}
12822 
12823 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
12824 		hal_get_sw_hptp(hal_soc, soc->reo_dest_ring[i].hal_srng,
12825 				&tp, &hp);
12826 		dp_info("REO DST ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
12827 	}
12828 
12829 	hal_get_sw_hptp(hal_soc, soc->reo_exception_ring.hal_srng, &tp, &hp);
12830 	dp_info("REO exception ring: hp=0x%x, tp=0x%x", hp, tp);
12831 
12832 	hal_get_sw_hptp(hal_soc, soc->rx_rel_ring.hal_srng, &tp, &hp);
12833 	dp_info("WBM RX release ring: hp=0x%x, tp=0x%x", hp, tp);
12834 
12835 	hal_get_sw_hptp(hal_soc, soc->wbm_desc_rel_ring.hal_srng, &tp, &hp);
12836 	dp_info("WBM desc release ring: hp=0x%x, tp=0x%x", hp, tp);
12837 }
12838 
12839 /**
12840  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
12841  * @soc_handle: datapath soc handle
12842  *
12843  * Return: opaque pointer to external dp (non-core DP)
12844  */
12845 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
12846 {
12847 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12848 
12849 	return soc->external_txrx_handle;
12850 }
12851 
12852 /**
12853  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
12854  * @soc_handle: datapath soc handle
12855  * @txrx_handle: opaque pointer to external dp (non-core DP)
12856  *
12857  * Return: void
12858  */
12859 static void
12860 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
12861 {
12862 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12863 
12864 	soc->external_txrx_handle = txrx_handle;
12865 }
12866 
12867 /**
12868  * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
12869  * @soc_hdl: datapath soc handle
12870  * @pdev_id: id of the datapath pdev handle
12871  * @lmac_id: lmac id
12872  *
12873  * Return: QDF_STATUS
12874  */
12875 static QDF_STATUS
12876 dp_soc_map_pdev_to_lmac
12877 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12878 	 uint32_t lmac_id)
12879 {
12880 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12881 
12882 	wlan_cfg_set_hw_mac_idx(soc->wlan_cfg_ctx,
12883 				pdev_id,
12884 				lmac_id);
12885 
12886 	/*Set host PDEV ID for lmac_id*/
12887 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
12888 			      pdev_id,
12889 			      lmac_id);
12890 
12891 	return QDF_STATUS_SUCCESS;
12892 }
12893 
12894 /**
12895  * dp_soc_handle_pdev_mode_change() - Update pdev to lmac mapping
12896  * @soc_hdl: datapath soc handle
12897  * @pdev_id: id of the datapath pdev handle
12898  * @lmac_id: lmac id
12899  *
12900  * In the event of a dynamic mode change, update the pdev to lmac mapping
12901  *
12902  * Return: QDF_STATUS
12903  */
12904 static QDF_STATUS
12905 dp_soc_handle_pdev_mode_change
12906 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12907 	 uint32_t lmac_id)
12908 {
12909 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12910 	struct dp_vdev *vdev = NULL;
12911 	uint8_t hw_pdev_id, mac_id;
12912 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc,
12913 								  pdev_id);
12914 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
12915 
12916 	if (qdf_unlikely(!pdev))
12917 		return QDF_STATUS_E_FAILURE;
12918 
12919 	pdev->lmac_id = lmac_id;
12920 	pdev->target_pdev_id =
12921 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
12922 	dp_info(" mode change %d %d\n", pdev->pdev_id, pdev->lmac_id);
12923 
12924 	/*Set host PDEV ID for lmac_id*/
12925 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
12926 			      pdev->pdev_id,
12927 			      lmac_id);
12928 
12929 	hw_pdev_id =
12930 		dp_get_target_pdev_id_for_host_pdev_id(soc,
12931 						       pdev->pdev_id);
12932 
12933 	/*
12934 	 * When NSS offload is enabled, send pdev_id->lmac_id
12935 	 * and pdev_id to hw_pdev_id to NSS FW
12936 	 */
12937 	if (nss_config) {
12938 		mac_id = pdev->lmac_id;
12939 		if (soc->cdp_soc.ol_ops->pdev_update_lmac_n_target_pdev_id)
12940 			soc->cdp_soc.ol_ops->
12941 				pdev_update_lmac_n_target_pdev_id(
12942 				soc->ctrl_psoc,
12943 				&pdev_id, &mac_id, &hw_pdev_id);
12944 	}
12945 
12946 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
12947 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
12948 		DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
12949 					       hw_pdev_id);
12950 		vdev->lmac_id = pdev->lmac_id;
12951 	}
12952 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
12953 
12954 	return QDF_STATUS_SUCCESS;
12955 }
12956 
12957 /**
12958  * dp_soc_set_pdev_status_down() - set pdev down/up status
12959  * @soc: datapath soc handle
12960  * @pdev_id: id of datapath pdev handle
12961  * @is_pdev_down: pdev down/up status
12962  *
12963  * Return: QDF_STATUS
12964  */
12965 static QDF_STATUS
12966 dp_soc_set_pdev_status_down(struct cdp_soc_t *soc, uint8_t pdev_id,
12967 			    bool is_pdev_down)
12968 {
12969 	struct dp_pdev *pdev =
12970 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12971 						   pdev_id);
12972 	if (!pdev)
12973 		return QDF_STATUS_E_FAILURE;
12974 
12975 	pdev->is_pdev_down = is_pdev_down;
12976 	return QDF_STATUS_SUCCESS;
12977 }
12978 
12979 /**
12980  * dp_get_cfg_capabilities() - get dp capabilities
12981  * @soc_handle: datapath soc handle
12982  * @dp_caps: enum for dp capabilities
12983  *
12984  * Return: bool to determine if dp caps is enabled
12985  */
12986 static bool
12987 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
12988 			enum cdp_capabilities dp_caps)
12989 {
12990 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12991 
12992 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
12993 }
12994 
12995 #ifdef FEATURE_AST
12996 static QDF_STATUS
12997 dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
12998 		       uint8_t *peer_mac)
12999 {
13000 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13001 	QDF_STATUS status = QDF_STATUS_SUCCESS;
13002 	struct dp_peer *peer =
13003 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
13004 					       DP_MOD_ID_CDP);
13005 
13006 	/* Peer can be null for monitor vap mac address */
13007 	if (!peer) {
13008 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
13009 			  "%s: Invalid peer\n", __func__);
13010 		return QDF_STATUS_E_FAILURE;
13011 	}
13012 
13013 	dp_peer_update_state(soc, peer, DP_PEER_STATE_LOGICAL_DELETE);
13014 
13015 	qdf_spin_lock_bh(&soc->ast_lock);
13016 	dp_peer_send_wds_disconnect(soc, peer);
13017 	dp_peer_delete_ast_entries(soc, peer);
13018 	qdf_spin_unlock_bh(&soc->ast_lock);
13019 
13020 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
13021 	return status;
13022 }
13023 #endif
13024 
13025 #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS
13026 /**
13027  * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
13028  * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
13029  * @soc: cdp_soc handle
13030  * @pdev_id: id of cdp_pdev handle
13031  * @protocol_type: protocol type for which stats should be displayed
13032  *
13033  * Return: none
13034  */
13035 static inline void
13036 dp_dump_pdev_rx_protocol_tag_stats(struct cdp_soc_t  *soc, uint8_t pdev_id,
13037 				   uint16_t protocol_type)
13038 {
13039 }
13040 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
13041 
13042 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
13043 /**
13044  * dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be
13045  * applied to the desired protocol type packets
13046  * @soc: soc handle
13047  * @pdev_id: id of cdp_pdev handle
13048  * @enable_rx_protocol_tag - bitmask that indicates what protocol types
13049  * are enabled for tagging. zero indicates disable feature, non-zero indicates
13050  * enable feature
13051  * @protocol_type: new protocol type for which the tag is being added
13052  * @tag: user configured tag for the new protocol
13053  *
13054  * Return: Success
13055  */
13056 static inline QDF_STATUS
13057 dp_update_pdev_rx_protocol_tag(struct cdp_soc_t  *soc, uint8_t pdev_id,
13058 			       uint32_t enable_rx_protocol_tag,
13059 			       uint16_t protocol_type,
13060 			       uint16_t tag)
13061 {
13062 	return QDF_STATUS_SUCCESS;
13063 }
13064 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
13065 
13066 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
13067 /**
13068  * dp_set_rx_flow_tag - add/delete a flow
13069  * @soc: soc handle
13070  * @pdev_id: id of cdp_pdev handle
13071  * @flow_info: flow tuple that is to be added to/deleted from flow search table
13072  *
13073  * Return: Success
13074  */
13075 static inline QDF_STATUS
13076 dp_set_rx_flow_tag(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
13077 		   struct cdp_rx_flow_info *flow_info)
13078 {
13079 	return QDF_STATUS_SUCCESS;
13080 }
13081 /**
13082  * dp_dump_rx_flow_tag_stats - dump the number of packets tagged for
13083  * given flow 5-tuple
13084  * @cdp_soc: soc handle
13085  * @pdev_id: id of cdp_pdev handle
13086  * @flow_info: flow 5-tuple for which stats should be displayed
13087  *
13088  * Return: Success
13089  */
13090 static inline QDF_STATUS
13091 dp_dump_rx_flow_tag_stats(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
13092 			  struct cdp_rx_flow_info *flow_info)
13093 {
13094 	return QDF_STATUS_SUCCESS;
13095 }
13096 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
13097 
13098 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
13099 					   uint32_t max_peers,
13100 					   uint32_t max_ast_index,
13101 					   uint8_t peer_map_unmap_versions)
13102 {
13103 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13104 	QDF_STATUS status;
13105 
13106 	soc->max_peers = max_peers;
13107 
13108 	wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
13109 
13110 	status = soc->arch_ops.txrx_peer_map_attach(soc);
13111 	if (!QDF_IS_STATUS_SUCCESS(status)) {
13112 		dp_err("failure in allocating peer tables");
13113 		return QDF_STATUS_E_FAILURE;
13114 	}
13115 
13116 	dp_info("max_peers %u, calculated max_peers %u max_ast_index: %u\n",
13117 		max_peers, soc->max_peer_id, max_ast_index);
13118 
13119 	status = dp_peer_find_attach(soc);
13120 	if (!QDF_IS_STATUS_SUCCESS(status)) {
13121 		dp_err("Peer find attach failure");
13122 		goto fail;
13123 	}
13124 
13125 	soc->peer_map_unmap_versions = peer_map_unmap_versions;
13126 	soc->peer_map_attach_success = TRUE;
13127 
13128 	return QDF_STATUS_SUCCESS;
13129 fail:
13130 	soc->arch_ops.txrx_peer_map_detach(soc);
13131 
13132 	return status;
13133 }
13134 
13135 static QDF_STATUS dp_soc_set_param(struct cdp_soc_t  *soc_hdl,
13136 				   enum cdp_soc_param_t param,
13137 				   uint32_t value)
13138 {
13139 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13140 
13141 	switch (param) {
13142 	case DP_SOC_PARAM_MSDU_EXCEPTION_DESC:
13143 		soc->num_msdu_exception_desc = value;
13144 		dp_info("num_msdu exception_desc %u",
13145 			value);
13146 		break;
13147 	case DP_SOC_PARAM_CMEM_FSE_SUPPORT:
13148 		if (wlan_cfg_is_fst_in_cmem_enabled(soc->wlan_cfg_ctx))
13149 			soc->fst_in_cmem = !!value;
13150 		dp_info("FW supports CMEM FSE %u", value);
13151 		break;
13152 	case DP_SOC_PARAM_MAX_AST_AGEOUT:
13153 		soc->max_ast_ageout_count = value;
13154 		dp_info("Max ast ageout count %u", soc->max_ast_ageout_count);
13155 		break;
13156 	case DP_SOC_PARAM_EAPOL_OVER_CONTROL_PORT:
13157 		soc->eapol_over_control_port = value;
13158 		dp_info("Eapol over control_port:%d",
13159 			soc->eapol_over_control_port);
13160 		break;
13161 	case DP_SOC_PARAM_MULTI_PEER_GRP_CMD_SUPPORT:
13162 		soc->multi_peer_grp_cmd_supported = value;
13163 		dp_info("Multi Peer group command support:%d",
13164 			soc->multi_peer_grp_cmd_supported);
13165 		break;
13166 	case DP_SOC_PARAM_RSSI_DBM_CONV_SUPPORT:
13167 		soc->features.rssi_dbm_conv_support = value;
13168 		dp_info("Rssi dbm conversion support:%u",
13169 			soc->features.rssi_dbm_conv_support);
13170 		break;
13171 	case DP_SOC_PARAM_UMAC_HW_RESET_SUPPORT:
13172 		soc->features.umac_hw_reset_support = value;
13173 		dp_info("UMAC HW reset support :%u",
13174 			soc->features.umac_hw_reset_support);
13175 		break;
13176 	default:
13177 		dp_info("not handled param %d ", param);
13178 		break;
13179 	}
13180 
13181 	return QDF_STATUS_SUCCESS;
13182 }
13183 
13184 static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
13185 				      void *stats_ctx)
13186 {
13187 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13188 
13189 	soc->rate_stats_ctx = (struct cdp_soc_rate_stats_ctx *)stats_ctx;
13190 }
13191 
13192 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
13193 /**
13194  * dp_peer_flush_rate_stats_req(): Flush peer rate stats
13195  * @soc: Datapath SOC handle
13196  * @peer: Datapath peer
13197  * @arg: argument to iter function
13198  *
13199  * Return: QDF_STATUS
13200  */
13201 static void
13202 dp_peer_flush_rate_stats_req(struct dp_soc *soc, struct dp_peer *peer,
13203 			     void *arg)
13204 {
13205 	if (peer->bss_peer)
13206 		return;
13207 
13208 	dp_wdi_event_handler(
13209 		WDI_EVENT_FLUSH_RATE_STATS_REQ,
13210 		soc, dp_monitor_peer_get_peerstats_ctx(soc, peer),
13211 		peer->peer_id,
13212 		WDI_NO_VAL, peer->vdev->pdev->pdev_id);
13213 }
13214 
13215 /**
13216  * dp_flush_rate_stats_req(): Flush peer rate stats in pdev
13217  * @soc_hdl: Datapath SOC handle
13218  * @pdev_id: pdev_id
13219  *
13220  * Return: QDF_STATUS
13221  */
13222 static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
13223 					  uint8_t pdev_id)
13224 {
13225 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13226 	struct dp_pdev *pdev =
13227 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
13228 						   pdev_id);
13229 	if (!pdev)
13230 		return QDF_STATUS_E_FAILURE;
13231 
13232 	dp_pdev_iterate_peer(pdev, dp_peer_flush_rate_stats_req, NULL,
13233 			     DP_MOD_ID_CDP);
13234 
13235 	return QDF_STATUS_SUCCESS;
13236 }
13237 #else
13238 static inline QDF_STATUS
13239 dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
13240 			uint8_t pdev_id)
13241 {
13242 	return QDF_STATUS_SUCCESS;
13243 }
13244 #endif
13245 
13246 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
13247 #ifdef WLAN_FEATURE_11BE_MLO
13248 /**
13249  * dp_get_peer_extd_rate_link_stats(): function to get peer
13250  *				extended rate and link stats
13251  * @soc_hdl: dp soc handler
13252  * @mac_addr: mac address of peer
13253  *
13254  * Return: QDF_STATUS
13255  */
13256 static QDF_STATUS
13257 dp_get_peer_extd_rate_link_stats(struct cdp_soc_t *soc_hdl, uint8_t *mac_addr)
13258 {
13259 	uint8_t i;
13260 	struct dp_peer *link_peer;
13261 	struct dp_soc *link_peer_soc;
13262 	struct dp_mld_link_peers link_peers_info;
13263 	struct dp_peer *peer = NULL;
13264 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13265 	struct cdp_peer_info peer_info = { 0 };
13266 
13267 	if (!mac_addr) {
13268 		dp_err("NULL peer mac addr\n");
13269 		return QDF_STATUS_E_FAILURE;
13270 	}
13271 
13272 	DP_PEER_INFO_PARAMS_INIT(&peer_info, DP_VDEV_ALL, mac_addr, false,
13273 				 CDP_WILD_PEER_TYPE);
13274 
13275 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP);
13276 	if (!peer) {
13277 		dp_err("Invalid peer\n");
13278 		return QDF_STATUS_E_FAILURE;
13279 	}
13280 
13281 	if (IS_MLO_DP_MLD_PEER(peer)) {
13282 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
13283 						    &link_peers_info,
13284 						    DP_MOD_ID_CDP);
13285 		for (i = 0; i < link_peers_info.num_links; i++) {
13286 			link_peer = link_peers_info.link_peers[i];
13287 			link_peer_soc = link_peer->vdev->pdev->soc;
13288 			dp_wdi_event_handler(WDI_EVENT_FLUSH_RATE_STATS_REQ,
13289 					     link_peer_soc,
13290 					     dp_monitor_peer_get_peerstats_ctx
13291 					     (link_peer_soc, link_peer),
13292 					     link_peer->peer_id,
13293 					     WDI_NO_VAL,
13294 					     link_peer->vdev->pdev->pdev_id);
13295 		}
13296 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
13297 	} else {
13298 		dp_wdi_event_handler(
13299 				WDI_EVENT_FLUSH_RATE_STATS_REQ, soc,
13300 				dp_monitor_peer_get_peerstats_ctx(soc, peer),
13301 				peer->peer_id,
13302 				WDI_NO_VAL, peer->vdev->pdev->pdev_id);
13303 	}
13304 
13305 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
13306 	return QDF_STATUS_SUCCESS;
13307 }
13308 #else
13309 static QDF_STATUS
13310 dp_get_peer_extd_rate_link_stats(struct cdp_soc_t *soc_hdl, uint8_t *mac_addr)
13311 {
13312 	struct dp_peer *peer = NULL;
13313 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13314 
13315 	if (!mac_addr) {
13316 		dp_err("NULL peer mac addr\n");
13317 		return QDF_STATUS_E_FAILURE;
13318 	}
13319 
13320 	peer = dp_peer_find_hash_find(soc, mac_addr, 0,
13321 				      DP_VDEV_ALL, DP_MOD_ID_CDP);
13322 	if (!peer) {
13323 		dp_err("Invalid peer\n");
13324 		return QDF_STATUS_E_FAILURE;
13325 	}
13326 
13327 	dp_wdi_event_handler(
13328 			WDI_EVENT_FLUSH_RATE_STATS_REQ, soc,
13329 			dp_monitor_peer_get_peerstats_ctx(soc, peer),
13330 			peer->peer_id,
13331 			WDI_NO_VAL, peer->vdev->pdev->pdev_id);
13332 
13333 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
13334 	return QDF_STATUS_SUCCESS;
13335 }
13336 #endif
13337 #else
13338 static inline QDF_STATUS
13339 dp_get_peer_extd_rate_link_stats(struct cdp_soc_t *soc_hdl, uint8_t *mac_addr)
13340 {
13341 	return QDF_STATUS_SUCCESS;
13342 }
13343 #endif
13344 
13345 static void *dp_peer_get_peerstats_ctx(struct cdp_soc_t *soc_hdl,
13346 				       uint8_t vdev_id,
13347 				       uint8_t *mac_addr)
13348 {
13349 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13350 	struct dp_peer *peer;
13351 	void *peerstats_ctx = NULL;
13352 
13353 	if (mac_addr) {
13354 		peer = dp_peer_find_hash_find(soc, mac_addr,
13355 					      0, vdev_id,
13356 					      DP_MOD_ID_CDP);
13357 		if (!peer)
13358 			return NULL;
13359 
13360 		if (!IS_MLO_DP_MLD_PEER(peer))
13361 			peerstats_ctx = dp_monitor_peer_get_peerstats_ctx(soc,
13362 									  peer);
13363 
13364 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
13365 	}
13366 
13367 	return peerstats_ctx;
13368 }
13369 
13370 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
13371 static QDF_STATUS dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
13372 					   uint8_t pdev_id,
13373 					   void *buf)
13374 {
13375 	 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS,
13376 			      (struct dp_soc *)soc, buf, HTT_INVALID_PEER,
13377 			      WDI_NO_VAL, pdev_id);
13378 	return QDF_STATUS_SUCCESS;
13379 }
13380 #else
13381 static inline QDF_STATUS
13382 dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
13383 			 uint8_t pdev_id,
13384 			 void *buf)
13385 {
13386 	return QDF_STATUS_SUCCESS;
13387 }
13388 #endif
13389 
13390 static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle)
13391 {
13392 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13393 
13394 	return soc->rate_stats_ctx;
13395 }
13396 
13397 /*
13398  * dp_get_cfg() - get dp cfg
13399  * @soc: cdp soc handle
13400  * @cfg: cfg enum
13401  *
13402  * Return: cfg value
13403  */
13404 static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg)
13405 {
13406 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
13407 	uint32_t value = 0;
13408 
13409 	switch (cfg) {
13410 	case cfg_dp_enable_data_stall:
13411 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
13412 		break;
13413 	case cfg_dp_enable_p2p_ip_tcp_udp_checksum_offload:
13414 		value = dpsoc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload;
13415 		break;
13416 	case cfg_dp_enable_nan_ip_tcp_udp_checksum_offload:
13417 		value = dpsoc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload;
13418 		break;
13419 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
13420 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
13421 		break;
13422 	case cfg_dp_disable_legacy_mode_csum_offload:
13423 		value = dpsoc->wlan_cfg_ctx->
13424 					legacy_mode_checksumoffload_disable;
13425 		break;
13426 	case cfg_dp_tso_enable:
13427 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
13428 		break;
13429 	case cfg_dp_lro_enable:
13430 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
13431 		break;
13432 	case cfg_dp_gro_enable:
13433 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
13434 		break;
13435 	case cfg_dp_tc_based_dyn_gro_enable:
13436 		value = dpsoc->wlan_cfg_ctx->tc_based_dynamic_gro;
13437 		break;
13438 	case cfg_dp_tc_ingress_prio:
13439 		value = dpsoc->wlan_cfg_ctx->tc_ingress_prio;
13440 		break;
13441 	case cfg_dp_sg_enable:
13442 		value = dpsoc->wlan_cfg_ctx->sg_enabled;
13443 		break;
13444 	case cfg_dp_tx_flow_start_queue_offset:
13445 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
13446 		break;
13447 	case cfg_dp_tx_flow_stop_queue_threshold:
13448 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
13449 		break;
13450 	case cfg_dp_disable_intra_bss_fwd:
13451 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
13452 		break;
13453 	case cfg_dp_pktlog_buffer_size:
13454 		value = dpsoc->wlan_cfg_ctx->pktlog_buffer_size;
13455 		break;
13456 	case cfg_dp_wow_check_rx_pending:
13457 		value = dpsoc->wlan_cfg_ctx->wow_check_rx_pending_enable;
13458 		break;
13459 	default:
13460 		value =  0;
13461 	}
13462 
13463 	return value;
13464 }
13465 
13466 #ifdef PEER_FLOW_CONTROL
13467 /**
13468  * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params
13469  * @soc_handle: datapath soc handle
13470  * @pdev_id: id of datapath pdev handle
13471  * @param: ol ath params
13472  * @value: value of the flag
13473  * @buff: Buffer to be passed
13474  *
13475  * Implemented this function same as legacy function. In legacy code, single
13476  * function is used to display stats and update pdev params.
13477  *
13478  * Return: 0 for success. nonzero for failure.
13479  */
13480 static uint32_t dp_tx_flow_ctrl_configure_pdev(struct cdp_soc_t *soc_handle,
13481 					       uint8_t pdev_id,
13482 					       enum _dp_param_t param,
13483 					       uint32_t value, void *buff)
13484 {
13485 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13486 	struct dp_pdev *pdev =
13487 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
13488 						   pdev_id);
13489 
13490 	if (qdf_unlikely(!pdev))
13491 		return 1;
13492 
13493 	soc = pdev->soc;
13494 	if (!soc)
13495 		return 1;
13496 
13497 	switch (param) {
13498 #ifdef QCA_ENH_V3_STATS_SUPPORT
13499 	case DP_PARAM_VIDEO_DELAY_STATS_FC:
13500 		if (value)
13501 			pdev->delay_stats_flag = true;
13502 		else
13503 			pdev->delay_stats_flag = false;
13504 		break;
13505 	case DP_PARAM_VIDEO_STATS_FC:
13506 		qdf_print("------- TID Stats ------\n");
13507 		dp_pdev_print_tid_stats(pdev);
13508 		qdf_print("------ Delay Stats ------\n");
13509 		dp_pdev_print_delay_stats(pdev);
13510 		qdf_print("------ Rx Error Stats ------\n");
13511 		dp_pdev_print_rx_error_stats(pdev);
13512 		break;
13513 #endif
13514 	case DP_PARAM_TOTAL_Q_SIZE:
13515 		{
13516 			uint32_t tx_min, tx_max;
13517 
13518 			tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx);
13519 			tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
13520 
13521 			if (!buff) {
13522 				if ((value >= tx_min) && (value <= tx_max)) {
13523 					pdev->num_tx_allowed = value;
13524 				} else {
13525 					dp_tx_info("%pK: Failed to update num_tx_allowed, Q_min = %d Q_max = %d",
13526 						   soc, tx_min, tx_max);
13527 					break;
13528 				}
13529 			} else {
13530 				*(int *)buff = pdev->num_tx_allowed;
13531 			}
13532 		}
13533 		break;
13534 	default:
13535 		dp_tx_info("%pK: not handled param %d ", soc, param);
13536 		break;
13537 	}
13538 
13539 	return 0;
13540 }
13541 #endif
13542 
13543 /**
13544  * dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev
13545  * @psoc: dp soc handle
13546  * @pdev_id: id of DP_PDEV handle
13547  * @pcp: pcp value
13548  * @tid: tid value passed by the user
13549  *
13550  * Return: QDF_STATUS_SUCCESS on success
13551  */
13552 static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
13553 						uint8_t pdev_id,
13554 						uint8_t pcp, uint8_t tid)
13555 {
13556 	struct dp_soc *soc = (struct dp_soc *)psoc;
13557 
13558 	soc->pcp_tid_map[pcp] = tid;
13559 
13560 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
13561 	return QDF_STATUS_SUCCESS;
13562 }
13563 
13564 /**
13565  * dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev
13566  * @soc: DP soc handle
13567  * @vdev_id: id of DP_VDEV handle
13568  * @pcp: pcp value
13569  * @tid: tid value passed by the user
13570  *
13571  * Return: QDF_STATUS_SUCCESS on success
13572  */
13573 static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc_hdl,
13574 						uint8_t vdev_id,
13575 						uint8_t pcp, uint8_t tid)
13576 {
13577 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13578 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
13579 						     DP_MOD_ID_CDP);
13580 
13581 	if (!vdev)
13582 		return QDF_STATUS_E_FAILURE;
13583 
13584 	vdev->pcp_tid_map[pcp] = tid;
13585 
13586 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13587 	return QDF_STATUS_SUCCESS;
13588 }
13589 
13590 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
13591 static void dp_drain_txrx(struct cdp_soc_t *soc_handle)
13592 {
13593 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13594 	uint32_t cur_tx_limit, cur_rx_limit;
13595 	uint32_t budget = 0xffff;
13596 	uint32_t val;
13597 	int i;
13598 	int cpu = dp_srng_get_cpu();
13599 
13600 	cur_tx_limit = soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit;
13601 	cur_rx_limit = soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit;
13602 
13603 	/* Temporarily increase soft irq limits when going to drain
13604 	 * the UMAC/LMAC SRNGs and restore them after polling.
13605 	 * Though the budget is on higher side, the TX/RX reaping loops
13606 	 * will not execute longer as both TX and RX would be suspended
13607 	 * by the time this API is called.
13608 	 */
13609 	dp_update_soft_irq_limits(soc, budget, budget);
13610 
13611 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
13612 		dp_service_srngs(&soc->intr_ctx[i], budget, cpu);
13613 
13614 	dp_update_soft_irq_limits(soc, cur_tx_limit, cur_rx_limit);
13615 
13616 	/* Do a dummy read at offset 0; this will ensure all
13617 	 * pendings writes(HP/TP) are flushed before read returns.
13618 	 */
13619 	val = HAL_REG_READ((struct hal_soc *)soc->hal_soc, 0);
13620 	dp_debug("Register value at offset 0: %u\n", val);
13621 }
13622 #endif
13623 
13624 #ifdef DP_UMAC_HW_RESET_SUPPORT
13625 /**
13626  * dp_reset_interrupt_ring_masks(): Reset rx interrupt masks
13627  * @soc: dp soc handle
13628  *
13629  * Return: void
13630  */
13631 static void dp_reset_interrupt_ring_masks(struct dp_soc *soc)
13632 {
13633 	struct dp_intr_bkp *intr_bkp;
13634 	struct dp_intr *intr_ctx;
13635 	int num_ctxt = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
13636 	int i;
13637 
13638 	intr_bkp =
13639 	(struct dp_intr_bkp *)qdf_mem_malloc_atomic(sizeof(struct dp_intr_bkp) *
13640 			num_ctxt);
13641 
13642 	qdf_assert_always(intr_bkp);
13643 
13644 	soc->umac_reset_ctx.intr_ctx_bkp = intr_bkp;
13645 	for (i = 0; i < num_ctxt; i++) {
13646 		intr_ctx = &soc->intr_ctx[i];
13647 
13648 		intr_bkp->tx_ring_mask = intr_ctx->tx_ring_mask;
13649 		intr_bkp->rx_ring_mask = intr_ctx->rx_ring_mask;
13650 		intr_bkp->rx_mon_ring_mask = intr_ctx->rx_mon_ring_mask;
13651 		intr_bkp->rx_err_ring_mask = intr_ctx->rx_err_ring_mask;
13652 		intr_bkp->rx_wbm_rel_ring_mask = intr_ctx->rx_wbm_rel_ring_mask;
13653 		intr_bkp->reo_status_ring_mask = intr_ctx->reo_status_ring_mask;
13654 		intr_bkp->rxdma2host_ring_mask = intr_ctx->rxdma2host_ring_mask;
13655 		intr_bkp->host2rxdma_ring_mask = intr_ctx->host2rxdma_ring_mask;
13656 		intr_bkp->host2rxdma_mon_ring_mask =
13657 					intr_ctx->host2rxdma_mon_ring_mask;
13658 		intr_bkp->tx_mon_ring_mask = intr_ctx->tx_mon_ring_mask;
13659 
13660 		intr_ctx->tx_ring_mask = 0;
13661 		intr_ctx->rx_ring_mask = 0;
13662 		intr_ctx->rx_mon_ring_mask = 0;
13663 		intr_ctx->rx_err_ring_mask = 0;
13664 		intr_ctx->rx_wbm_rel_ring_mask = 0;
13665 		intr_ctx->reo_status_ring_mask = 0;
13666 		intr_ctx->rxdma2host_ring_mask = 0;
13667 		intr_ctx->host2rxdma_ring_mask = 0;
13668 		intr_ctx->host2rxdma_mon_ring_mask = 0;
13669 		intr_ctx->tx_mon_ring_mask = 0;
13670 
13671 		intr_bkp++;
13672 	}
13673 }
13674 
13675 /**
13676  * dp_restore_interrupt_ring_masks(): Restore rx interrupt masks
13677  * @soc: dp soc handle
13678  *
13679  * Return: void
13680  */
13681 static void dp_restore_interrupt_ring_masks(struct dp_soc *soc)
13682 {
13683 	struct dp_intr_bkp *intr_bkp = soc->umac_reset_ctx.intr_ctx_bkp;
13684 	struct dp_intr_bkp *intr_bkp_base = intr_bkp;
13685 	struct dp_intr *intr_ctx;
13686 	int num_ctxt = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
13687 	int i;
13688 
13689 	qdf_assert_always(intr_bkp);
13690 
13691 	for (i = 0; i < num_ctxt; i++) {
13692 		intr_ctx = &soc->intr_ctx[i];
13693 
13694 		intr_ctx->tx_ring_mask = intr_bkp->tx_ring_mask;
13695 		intr_ctx->rx_ring_mask = intr_bkp->rx_ring_mask;
13696 		intr_ctx->rx_mon_ring_mask = intr_bkp->rx_mon_ring_mask;
13697 		intr_ctx->rx_err_ring_mask = intr_bkp->rx_err_ring_mask;
13698 		intr_ctx->rx_wbm_rel_ring_mask = intr_bkp->rx_wbm_rel_ring_mask;
13699 		intr_ctx->reo_status_ring_mask = intr_bkp->reo_status_ring_mask;
13700 		intr_ctx->rxdma2host_ring_mask = intr_bkp->rxdma2host_ring_mask;
13701 		intr_ctx->host2rxdma_ring_mask = intr_bkp->host2rxdma_ring_mask;
13702 		intr_ctx->host2rxdma_mon_ring_mask =
13703 			intr_bkp->host2rxdma_mon_ring_mask;
13704 		intr_ctx->tx_mon_ring_mask = intr_bkp->tx_mon_ring_mask;
13705 
13706 		intr_bkp++;
13707 	}
13708 
13709 	qdf_mem_free(intr_bkp_base);
13710 	soc->umac_reset_ctx.intr_ctx_bkp = NULL;
13711 }
13712 
13713 /**
13714  * dp_resume_tx_hardstart(): Restore the old Tx hardstart functions
13715  * @soc: dp soc handle
13716  *
13717  * Return: void
13718  */
13719 static void dp_resume_tx_hardstart(struct dp_soc *soc)
13720 {
13721 	struct dp_vdev *vdev;
13722 	struct ol_txrx_hardtart_ctxt ctxt = {0};
13723 	struct cdp_ctrl_objmgr_psoc *psoc = soc->ctrl_psoc;
13724 	int i;
13725 
13726 	for (i = 0; i < MAX_PDEV_CNT; i++) {
13727 		struct dp_pdev *pdev = soc->pdev_list[i];
13728 
13729 		if (!pdev)
13730 			continue;
13731 
13732 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
13733 			uint8_t vdev_id = vdev->vdev_id;
13734 
13735 			dp_vdev_fetch_tx_handler(vdev, soc, &ctxt);
13736 			soc->cdp_soc.ol_ops->dp_update_tx_hardstart(psoc,
13737 								    vdev_id,
13738 								    &ctxt);
13739 		}
13740 	}
13741 }
13742 
13743 /**
13744  * dp_pause_tx_hardstart(): Register Tx hardstart functions to drop packets
13745  * @soc: dp soc handle
13746  *
13747  * Return: void
13748  */
13749 static void dp_pause_tx_hardstart(struct dp_soc *soc)
13750 {
13751 	struct dp_vdev *vdev;
13752 	struct ol_txrx_hardtart_ctxt ctxt;
13753 	struct cdp_ctrl_objmgr_psoc *psoc = soc->ctrl_psoc;
13754 	int i;
13755 
13756 	ctxt.tx = &dp_tx_drop;
13757 	ctxt.tx_fast = &dp_tx_drop;
13758 	ctxt.tx_exception = &dp_tx_exc_drop;
13759 
13760 	for (i = 0; i < MAX_PDEV_CNT; i++) {
13761 		struct dp_pdev *pdev = soc->pdev_list[i];
13762 
13763 		if (!pdev)
13764 			continue;
13765 
13766 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
13767 			uint8_t vdev_id = vdev->vdev_id;
13768 
13769 			soc->cdp_soc.ol_ops->dp_update_tx_hardstart(psoc,
13770 								    vdev_id,
13771 								    &ctxt);
13772 		}
13773 	}
13774 }
13775 
13776 /**
13777  * dp_unregister_notify_umac_pre_reset_fw_callback(): unregister notify_fw_cb
13778  * @soc: dp soc handle
13779  *
13780  * Return: void
13781  */
13782 static inline
13783 void dp_unregister_notify_umac_pre_reset_fw_callback(struct dp_soc *soc)
13784 {
13785 	soc->notify_fw_callback = NULL;
13786 }
13787 
13788 /**
13789  * dp_check_n_notify_umac_prereset_done(): Send pre reset done to firmware
13790  * @soc: dp soc handle
13791  *
13792  * Return: void
13793  */
13794 static inline
13795 void dp_check_n_notify_umac_prereset_done(struct dp_soc *soc)
13796 {
13797 	/* Some Cpu(s) is processing the umac rings*/
13798 	if (soc->service_rings_running)
13799 		return;
13800 
13801 	/* Notify the firmware that Umac pre reset is complete */
13802 	dp_umac_reset_notify_action_completion(soc,
13803 					       UMAC_RESET_ACTION_DO_PRE_RESET);
13804 
13805 	/* Unregister the callback */
13806 	dp_unregister_notify_umac_pre_reset_fw_callback(soc);
13807 }
13808 
13809 /**
13810  * dp_register_notify_umac_pre_reset_fw_callback(): register notify_fw_cb
13811  * @soc: dp soc handle
13812  *
13813  * Return: void
13814  */
13815 static inline
13816 void dp_register_notify_umac_pre_reset_fw_callback(struct dp_soc *soc)
13817 {
13818 	soc->notify_fw_callback = dp_check_n_notify_umac_prereset_done;
13819 }
13820 
13821 #ifdef DP_UMAC_HW_HARD_RESET
13822 /**
13823  * dp_set_umac_regs(): Reinitialize host umac registers
13824  * @soc: dp soc handle
13825  *
13826  * Return: void
13827  */
13828 static void dp_set_umac_regs(struct dp_soc *soc)
13829 {
13830 	int i;
13831 	struct hal_reo_params reo_params;
13832 
13833 	qdf_mem_zero(&reo_params, sizeof(reo_params));
13834 
13835 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
13836 		if (soc->arch_ops.reo_remap_config(soc, &reo_params.remap0,
13837 						   &reo_params.remap1,
13838 						   &reo_params.remap2))
13839 			reo_params.rx_hash_enabled = true;
13840 		else
13841 			reo_params.rx_hash_enabled = false;
13842 	}
13843 
13844 	hal_reo_setup(soc->hal_soc, &reo_params, 0);
13845 
13846 	soc->arch_ops.dp_cc_reg_cfg_init(soc, true);
13847 
13848 	for (i = 0; i < PCP_TID_MAP_MAX; i++)
13849 		hal_tx_update_pcp_tid_map(soc->hal_soc, soc->pcp_tid_map[i], i);
13850 
13851 	for (i = 0; i < MAX_PDEV_CNT; i++) {
13852 		struct dp_vdev *vdev = NULL;
13853 		struct dp_pdev *pdev = soc->pdev_list[i];
13854 
13855 		if (!pdev)
13856 			continue;
13857 
13858 		for (i = 0; i < soc->num_hw_dscp_tid_map; i++)
13859 			hal_tx_set_dscp_tid_map(soc->hal_soc,
13860 						pdev->dscp_tid_map[i], i);
13861 
13862 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
13863 			soc->arch_ops.dp_bank_reconfig(soc, vdev);
13864 			soc->arch_ops.dp_reconfig_tx_vdev_mcast_ctrl(soc,
13865 								      vdev);
13866 		}
13867 	}
13868 }
13869 #else
13870 static void dp_set_umac_regs(struct dp_soc *soc)
13871 {
13872 }
13873 #endif
13874 
13875 /**
13876  * dp_reinit_rings(): Reinitialize host managed rings
13877  * @soc: dp soc handle
13878  *
13879  * Return: QDF_STATUS
13880  */
13881 static void dp_reinit_rings(struct dp_soc *soc)
13882 {
13883 	unsigned long end;
13884 
13885 	dp_soc_srng_deinit(soc);
13886 	dp_hw_link_desc_ring_deinit(soc);
13887 
13888 	/* Busy wait for 2 ms to make sure the rings are in idle state
13889 	 * before we enable them again
13890 	 */
13891 	end = jiffies + msecs_to_jiffies(2);
13892 	while (time_before(jiffies, end))
13893 		;
13894 
13895 	dp_hw_link_desc_ring_init(soc);
13896 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
13897 	dp_soc_srng_init(soc);
13898 }
13899 
13900 /**
13901  * dp_umac_reset_handle_pre_reset(): Handle Umac prereset interrupt from FW
13902  * @soc: dp soc handle
13903  *
13904  * Return: QDF_STATUS
13905  */
13906 static QDF_STATUS dp_umac_reset_handle_pre_reset(struct dp_soc *soc)
13907 {
13908 	dp_reset_interrupt_ring_masks(soc);
13909 
13910 	dp_pause_tx_hardstart(soc);
13911 	dp_pause_reo_send_cmd(soc);
13912 
13913 	dp_check_n_notify_umac_prereset_done(soc);
13914 
13915 	soc->umac_reset_ctx.nbuf_list = NULL;
13916 
13917 	return QDF_STATUS_SUCCESS;
13918 }
13919 
13920 /**
13921  * dp_umac_reset_handle_post_reset(): Handle Umac postreset interrupt from FW
13922  * @soc: dp soc handle
13923  *
13924  * Return: QDF_STATUS
13925  */
13926 static QDF_STATUS dp_umac_reset_handle_post_reset(struct dp_soc *soc)
13927 {
13928 	if (!soc->umac_reset_ctx.skel_enable) {
13929 		qdf_nbuf_t *nbuf_list = &soc->umac_reset_ctx.nbuf_list;
13930 
13931 		dp_set_umac_regs(soc);
13932 
13933 		dp_reinit_rings(soc);
13934 
13935 		dp_rx_desc_reuse(soc, nbuf_list);
13936 
13937 		dp_cleanup_reo_cmd_module(soc);
13938 
13939 		dp_tx_desc_pool_cleanup(soc, nbuf_list);
13940 
13941 		dp_reset_tid_q_setup(soc);
13942 	}
13943 
13944 	return dp_umac_reset_notify_action_completion(soc,
13945 					UMAC_RESET_ACTION_DO_POST_RESET_START);
13946 }
13947 
13948 /**
13949  * dp_umac_reset_handle_post_reset_complete(): Handle Umac postreset_complete
13950  *						interrupt from FW
13951  * @soc: dp soc handle
13952  *
13953  * Return: QDF_STATUS
13954  */
13955 static QDF_STATUS dp_umac_reset_handle_post_reset_complete(struct dp_soc *soc)
13956 {
13957 	QDF_STATUS status;
13958 	qdf_nbuf_t nbuf_list = soc->umac_reset_ctx.nbuf_list;
13959 
13960 	soc->umac_reset_ctx.nbuf_list = NULL;
13961 
13962 	dp_resume_reo_send_cmd(soc);
13963 
13964 	dp_restore_interrupt_ring_masks(soc);
13965 
13966 	dp_resume_tx_hardstart(soc);
13967 
13968 	status = dp_umac_reset_notify_action_completion(soc,
13969 				UMAC_RESET_ACTION_DO_POST_RESET_COMPLETE);
13970 
13971 	while (nbuf_list) {
13972 		qdf_nbuf_t nbuf = nbuf_list->next;
13973 
13974 		qdf_nbuf_free(nbuf_list);
13975 		nbuf_list = nbuf;
13976 	}
13977 
13978 	dp_umac_reset_info("Umac reset done on soc %pK\n prereset : %u us\n"
13979 			   "postreset : %u us \n postreset complete: %u us \n",
13980 			   soc,
13981 			   soc->umac_reset_ctx.ts.pre_reset_done -
13982 			   soc->umac_reset_ctx.ts.pre_reset_start,
13983 			   soc->umac_reset_ctx.ts.post_reset_done -
13984 			   soc->umac_reset_ctx.ts.post_reset_start,
13985 			   soc->umac_reset_ctx.ts.post_reset_complete_done -
13986 			   soc->umac_reset_ctx.ts.post_reset_complete_start);
13987 
13988 	return status;
13989 }
13990 #endif
13991 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
13992 static void
13993 dp_set_pkt_capture_mode(struct cdp_soc_t *soc_handle, bool val)
13994 {
13995 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
13996 
13997 	soc->wlan_cfg_ctx->pkt_capture_mode = val;
13998 }
13999 #endif
14000 
14001 #ifdef HW_TX_DELAY_STATS_ENABLE
14002 /**
14003  * dp_enable_disable_vdev_tx_delay_stats(): Start/Stop tx delay stats capture
14004  * @soc: DP soc handle
14005  * @vdev_id: vdev id
14006  * @value: value
14007  *
14008  * Return: None
14009  */
14010 static void
14011 dp_enable_disable_vdev_tx_delay_stats(struct cdp_soc_t *soc_hdl,
14012 				      uint8_t vdev_id,
14013 				      uint8_t value)
14014 {
14015 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14016 	struct dp_vdev *vdev = NULL;
14017 
14018 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
14019 	if (!vdev)
14020 		return;
14021 
14022 	vdev->hw_tx_delay_stats_enabled = value;
14023 
14024 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
14025 }
14026 
14027 /**
14028  * dp_check_vdev_tx_delay_stats_enabled() - check the feature is enabled or not
14029  * @soc: DP soc handle
14030  * @vdev_id: vdev id
14031  *
14032  * Returns: 1 if enabled, 0 if disabled
14033  */
14034 static uint8_t
14035 dp_check_vdev_tx_delay_stats_enabled(struct cdp_soc_t *soc_hdl,
14036 				     uint8_t vdev_id)
14037 {
14038 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14039 	struct dp_vdev *vdev;
14040 	uint8_t ret_val = 0;
14041 
14042 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
14043 	if (!vdev)
14044 		return ret_val;
14045 
14046 	ret_val = vdev->hw_tx_delay_stats_enabled;
14047 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
14048 
14049 	return ret_val;
14050 }
14051 #endif
14052 
14053 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
14054 static void
14055 dp_recovery_vdev_flush_peers(struct cdp_soc_t *cdp_soc,
14056 			     uint8_t vdev_id,
14057 			     bool mlo_peers_only)
14058 {
14059 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
14060 	struct dp_vdev *vdev;
14061 
14062 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
14063 
14064 	if (!vdev)
14065 		return;
14066 
14067 	dp_vdev_flush_peers((struct cdp_vdev *)vdev, false, mlo_peers_only);
14068 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
14069 }
14070 #endif
14071 #ifdef QCA_GET_TSF_VIA_REG
14072 /**
14073  * dp_get_tsf_time() - get tsf time
14074  * @soc: Datapath soc handle
14075  * @mac_id: mac_id
14076  * @tsf: pointer to update tsf value
14077  * @tsf_sync_soc_time: pointer to update tsf sync time
14078  *
14079  * Return: None.
14080  */
14081 static inline void
14082 dp_get_tsf_time(struct cdp_soc_t *soc_hdl, uint32_t tsf_id, uint32_t mac_id,
14083 		uint64_t *tsf, uint64_t *tsf_sync_soc_time)
14084 {
14085 	hal_get_tsf_time(((struct dp_soc *)soc_hdl)->hal_soc, tsf_id, mac_id,
14086 			 tsf, tsf_sync_soc_time);
14087 }
14088 #else
14089 static inline void
14090 dp_get_tsf_time(struct cdp_soc_t *soc_hdl, uint32_t tsf_id, uint32_t mac_id,
14091 		uint64_t *tsf, uint64_t *tsf_sync_soc_time)
14092 {
14093 }
14094 #endif
14095 
14096 /**
14097  * dp_get_tsf2_scratch_reg() - get tsf2 offset from the scratch register
14098  * @soc: Datapath soc handle
14099  * @mac_id: mac_id
14100  * @value: pointer to update tsf2 offset value
14101  *
14102  * Return: None.
14103  */
14104 static inline void
14105 dp_get_tsf2_scratch_reg(struct cdp_soc_t *soc_hdl, uint8_t mac_id,
14106 			uint64_t *value)
14107 {
14108 	hal_get_tsf2_offset(((struct dp_soc *)soc_hdl)->hal_soc, mac_id, value);
14109 }
14110 
14111 /**
14112  * dp_get_tqm_scratch_reg() - get tqm offset from the scratch register
14113  * @soc: Datapath soc handle
14114  * @value: pointer to update tqm offset value
14115  *
14116  * Return: None.
14117  */
14118 static inline void
14119 dp_get_tqm_scratch_reg(struct cdp_soc_t *soc_hdl, uint64_t *value)
14120 {
14121 	hal_get_tqm_offset(((struct dp_soc *)soc_hdl)->hal_soc, value);
14122 }
14123 
14124 /**
14125  * dp_set_tx_pause() - Pause or resume tx path
14126  * @soc_hdl: Datapath soc handle
14127  * @flag: set or clear is_tx_pause
14128  *
14129  * Return: None.
14130  */
14131 static inline
14132 void dp_set_tx_pause(struct cdp_soc_t *soc_hdl, bool flag)
14133 {
14134 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14135 
14136 	soc->is_tx_pause = flag;
14137 }
14138 
14139 static struct cdp_cmn_ops dp_ops_cmn = {
14140 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
14141 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
14142 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
14143 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
14144 	.txrx_pdev_post_attach = dp_pdev_post_attach_wifi3,
14145 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
14146 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
14147 	.txrx_peer_create = dp_peer_create_wifi3,
14148 	.txrx_peer_setup = dp_peer_setup_wifi3,
14149 #ifdef FEATURE_AST
14150 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
14151 #else
14152 	.txrx_peer_teardown = NULL,
14153 #endif
14154 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
14155 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
14156 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
14157 	.txrx_peer_get_ast_info_by_pdev =
14158 		dp_peer_get_ast_info_by_pdevid_wifi3,
14159 	.txrx_peer_ast_delete_by_soc =
14160 		dp_peer_ast_entry_del_by_soc,
14161 	.txrx_peer_ast_delete_by_pdev =
14162 		dp_peer_ast_entry_del_by_pdev,
14163 	.txrx_peer_delete = dp_peer_delete_wifi3,
14164 #ifdef DP_RX_UDP_OVER_PEER_ROAM
14165 	.txrx_update_roaming_peer = dp_update_roaming_peer_wifi3,
14166 #endif
14167 	.txrx_vdev_register = dp_vdev_register_wifi3,
14168 	.txrx_soc_detach = dp_soc_detach_wifi3,
14169 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
14170 	.txrx_soc_init = dp_soc_init_wifi3,
14171 #ifndef QCA_HOST_MODE_WIFI_DISABLED
14172 	.txrx_tso_soc_attach = dp_tso_soc_attach,
14173 	.txrx_tso_soc_detach = dp_tso_soc_detach,
14174 	.tx_send = dp_tx_send,
14175 	.tx_send_exc = dp_tx_send_exception,
14176 #endif
14177 	.set_tx_pause = dp_set_tx_pause,
14178 	.txrx_pdev_init = dp_pdev_init_wifi3,
14179 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
14180 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
14181 	.txrx_ath_getstats = dp_get_device_stats,
14182 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
14183 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
14184 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
14185 	.delba_process = dp_delba_process_wifi3,
14186 	.set_addba_response = dp_set_addba_response,
14187 	.flush_cache_rx_queue = NULL,
14188 	.tid_update_ba_win_size = dp_rx_tid_update_ba_win_size,
14189 	/* TODO: get API's for dscp-tid need to be added*/
14190 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
14191 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
14192 	.txrx_get_total_per = dp_get_total_per,
14193 	.txrx_stats_request = dp_txrx_stats_request,
14194 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
14195 	.display_stats = dp_txrx_dump_stats,
14196 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
14197 	.txrx_intr_detach = dp_soc_interrupt_detach,
14198 	.txrx_ppeds_stop = dp_soc_ppeds_stop,
14199 	.set_pn_check = dp_set_pn_check_wifi3,
14200 	.set_key_sec_type = dp_set_key_sec_type_wifi3,
14201 	.update_config_parameters = dp_update_config_parameters,
14202 	/* TODO: Add other functions */
14203 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
14204 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
14205 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
14206 	.get_vdev_dp_ext_txrx_handle = dp_vdev_get_dp_ext_handle,
14207 	.set_vdev_dp_ext_txrx_handle = dp_vdev_set_dp_ext_handle,
14208 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
14209 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
14210 	.map_pdev_to_lmac = dp_soc_map_pdev_to_lmac,
14211 	.handle_mode_change = dp_soc_handle_pdev_mode_change,
14212 	.set_pdev_status_down = dp_soc_set_pdev_status_down,
14213 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
14214 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
14215 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
14216 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
14217 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
14218 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
14219 	.set_soc_param = dp_soc_set_param,
14220 	.txrx_get_os_rx_handles_from_vdev =
14221 					dp_get_os_rx_handles_from_vdev_wifi3,
14222 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
14223 	.get_dp_capabilities = dp_get_cfg_capabilities,
14224 	.txrx_get_cfg = dp_get_cfg,
14225 	.set_rate_stats_ctx = dp_soc_set_rate_stats_ctx,
14226 	.get_rate_stats_ctx = dp_soc_get_rate_stats_ctx,
14227 	.txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats,
14228 	.txrx_flush_rate_stats_request = dp_flush_rate_stats_req,
14229 	.txrx_peer_get_peerstats_ctx = dp_peer_get_peerstats_ctx,
14230 
14231 	.set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3,
14232 	.set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3,
14233 
14234 	.txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler,
14235 #ifdef QCA_MULTIPASS_SUPPORT
14236 	.set_vlan_groupkey = dp_set_vlan_groupkey,
14237 #endif
14238 	.get_peer_mac_list = dp_get_peer_mac_list,
14239 	.get_peer_id = dp_get_peer_id,
14240 #ifdef QCA_SUPPORT_WDS_EXTENDED
14241 	.set_wds_ext_peer_rx = dp_wds_ext_set_peer_rx,
14242 #endif /* QCA_SUPPORT_WDS_EXTENDED */
14243 
14244 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
14245 	.txrx_drain = dp_drain_txrx,
14246 #endif
14247 #if defined(FEATURE_RUNTIME_PM)
14248 	.set_rtpm_tput_policy = dp_set_rtpm_tput_policy_requirement,
14249 #endif
14250 #ifdef WLAN_SYSFS_DP_STATS
14251 	.txrx_sysfs_fill_stats = dp_sysfs_fill_stats,
14252 	.txrx_sysfs_set_stat_type = dp_sysfs_set_stat_type,
14253 #endif /* WLAN_SYSFS_DP_STATS */
14254 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
14255 	.set_pkt_capture_mode = dp_set_pkt_capture_mode,
14256 #endif
14257 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
14258 	.txrx_recovery_vdev_flush_peers = dp_recovery_vdev_flush_peers,
14259 #endif
14260 	.txrx_umac_reset_deinit = dp_soc_umac_reset_deinit,
14261 	.txrx_get_tsf_time = dp_get_tsf_time,
14262 	.txrx_get_tsf2_offset = dp_get_tsf2_scratch_reg,
14263 	.txrx_get_tqm_offset = dp_get_tqm_scratch_reg,
14264 };
14265 
14266 static struct cdp_ctrl_ops dp_ops_ctrl = {
14267 	.txrx_peer_authorize = dp_peer_authorize,
14268 	.txrx_peer_get_authorize = dp_peer_get_authorize,
14269 #ifdef VDEV_PEER_PROTOCOL_COUNT
14270 	.txrx_enable_peer_protocol_count = dp_enable_vdev_peer_protocol_count,
14271 	.txrx_set_peer_protocol_drop_mask =
14272 		dp_enable_vdev_peer_protocol_drop_mask,
14273 	.txrx_is_peer_protocol_count_enabled =
14274 		dp_is_vdev_peer_protocol_count_enabled,
14275 	.txrx_get_peer_protocol_drop_mask = dp_get_vdev_peer_protocol_drop_mask,
14276 #endif
14277 	.txrx_set_vdev_param = dp_set_vdev_param,
14278 	.txrx_set_psoc_param = dp_set_psoc_param,
14279 	.txrx_get_psoc_param = dp_get_psoc_param,
14280 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
14281 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
14282 	.txrx_get_sec_type = dp_get_sec_type,
14283 	.txrx_wdi_event_sub = dp_wdi_event_sub,
14284 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
14285 	.txrx_set_pdev_param = dp_set_pdev_param,
14286 	.txrx_get_pdev_param = dp_get_pdev_param,
14287 	.txrx_set_peer_param = dp_set_peer_param,
14288 	.txrx_get_peer_param = dp_get_peer_param,
14289 #ifdef VDEV_PEER_PROTOCOL_COUNT
14290 	.txrx_peer_protocol_cnt = dp_peer_stats_update_protocol_cnt,
14291 #endif
14292 #ifdef WLAN_SUPPORT_MSCS
14293 	.txrx_record_mscs_params = dp_record_mscs_params,
14294 #endif
14295 	.set_key = dp_set_michael_key,
14296 	.txrx_get_vdev_param = dp_get_vdev_param,
14297 	.calculate_delay_stats = dp_calculate_delay_stats,
14298 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
14299 	.txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag,
14300 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
14301 	.txrx_dump_pdev_rx_protocol_tag_stats =
14302 				dp_dump_pdev_rx_protocol_tag_stats,
14303 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
14304 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
14305 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
14306 	.txrx_set_rx_flow_tag = dp_set_rx_flow_tag,
14307 	.txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats,
14308 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
14309 #ifdef QCA_MULTIPASS_SUPPORT
14310 	.txrx_peer_set_vlan_id = dp_peer_set_vlan_id,
14311 #endif /*QCA_MULTIPASS_SUPPORT*/
14312 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(WLAN_CONFIG_TX_DELAY)
14313 	.txrx_set_delta_tsf = dp_set_delta_tsf,
14314 #endif
14315 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
14316 	.txrx_set_tsf_ul_delay_report = dp_set_tsf_ul_delay_report,
14317 	.txrx_get_uplink_delay = dp_get_uplink_delay,
14318 #endif
14319 #ifdef QCA_UNDECODED_METADATA_SUPPORT
14320 	.txrx_set_pdev_phyrx_error_mask = dp_set_pdev_phyrx_error_mask,
14321 	.txrx_get_pdev_phyrx_error_mask = dp_get_pdev_phyrx_error_mask,
14322 #endif
14323 	.txrx_peer_flush_frags = dp_peer_flush_frags,
14324 };
14325 
14326 static struct cdp_me_ops dp_ops_me = {
14327 #ifndef QCA_HOST_MODE_WIFI_DISABLED
14328 #ifdef ATH_SUPPORT_IQUE
14329 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
14330 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
14331 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
14332 #endif
14333 #endif
14334 };
14335 
14336 static struct cdp_host_stats_ops dp_ops_host_stats = {
14337 	.txrx_per_peer_stats = dp_get_host_peer_stats,
14338 	.get_fw_peer_stats = dp_get_fw_peer_stats,
14339 	.get_htt_stats = dp_get_htt_stats,
14340 	.txrx_stats_publish = dp_txrx_stats_publish,
14341 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
14342 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
14343 	.txrx_get_soc_stats = dp_txrx_get_soc_stats,
14344 	.txrx_get_peer_stats_param = dp_txrx_get_peer_stats_param,
14345 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
14346 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
14347 #if defined(IPA_OFFLOAD) && defined(QCA_ENHANCED_STATS_SUPPORT)
14348 	.txrx_get_peer_stats = dp_ipa_txrx_get_peer_stats,
14349 	.txrx_get_vdev_stats  = dp_ipa_txrx_get_vdev_stats,
14350 	.txrx_get_pdev_stats = dp_ipa_txrx_get_pdev_stats,
14351 #endif
14352 	.txrx_get_ratekbps = dp_txrx_get_ratekbps,
14353 	.txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats,
14354 	.txrx_get_peer_delay_stats = dp_txrx_get_peer_delay_stats,
14355 	.txrx_get_peer_jitter_stats = dp_txrx_get_peer_jitter_stats,
14356 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
14357 	.txrx_alloc_vdev_stats_id = dp_txrx_alloc_vdev_stats_id,
14358 	.txrx_reset_vdev_stats_id = dp_txrx_reset_vdev_stats_id,
14359 #endif
14360 #ifdef WLAN_TX_PKT_CAPTURE_ENH
14361 	.get_peer_tx_capture_stats = dp_peer_get_tx_capture_stats,
14362 	.get_pdev_tx_capture_stats = dp_pdev_get_tx_capture_stats,
14363 #endif /* WLAN_TX_PKT_CAPTURE_ENH */
14364 #ifdef HW_TX_DELAY_STATS_ENABLE
14365 	.enable_disable_vdev_tx_delay_stats =
14366 				dp_enable_disable_vdev_tx_delay_stats,
14367 	.is_tx_delay_stats_enabled = dp_check_vdev_tx_delay_stats_enabled,
14368 #endif
14369 	.txrx_get_pdev_tid_stats = dp_pdev_get_tid_stats,
14370 #ifdef WLAN_TELEMETRY_STATS_SUPPORT
14371 	.txrx_pdev_telemetry_stats = dp_get_pdev_telemetry_stats,
14372 	.txrx_peer_telemetry_stats = dp_get_peer_telemetry_stats,
14373 #endif
14374 	.txrx_get_peer_extd_rate_link_stats =
14375 					dp_get_peer_extd_rate_link_stats,
14376 	.get_pdev_obss_stats = dp_get_obss_stats,
14377 	.clear_pdev_obss_pd_stats = dp_clear_pdev_obss_pd_stats,
14378 	/* TODO */
14379 };
14380 
14381 static struct cdp_raw_ops dp_ops_raw = {
14382 	/* TODO */
14383 };
14384 
14385 #ifdef PEER_FLOW_CONTROL
14386 static struct cdp_pflow_ops dp_ops_pflow = {
14387 	dp_tx_flow_ctrl_configure_pdev,
14388 };
14389 #endif /* CONFIG_WIN */
14390 
14391 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
14392 static struct cdp_cfr_ops dp_ops_cfr = {
14393 	.txrx_cfr_filter = NULL,
14394 	.txrx_get_cfr_rcc = dp_get_cfr_rcc,
14395 	.txrx_set_cfr_rcc = dp_set_cfr_rcc,
14396 	.txrx_get_cfr_dbg_stats = dp_get_cfr_dbg_stats,
14397 	.txrx_clear_cfr_dbg_stats = dp_clear_cfr_dbg_stats,
14398 };
14399 #endif
14400 
14401 #ifdef WLAN_SUPPORT_MSCS
14402 static struct cdp_mscs_ops dp_ops_mscs = {
14403 	.mscs_peer_lookup_n_get_priority = dp_mscs_peer_lookup_n_get_priority,
14404 };
14405 #endif
14406 
14407 #ifdef WLAN_SUPPORT_MESH_LATENCY
14408 static struct cdp_mesh_latency_ops dp_ops_mesh_latency = {
14409 	.mesh_latency_update_peer_parameter =
14410 		dp_mesh_latency_update_peer_parameter,
14411 };
14412 #endif
14413 
14414 #ifdef WLAN_SUPPORT_SCS
14415 static struct cdp_scs_ops dp_ops_scs = {
14416 	.scs_peer_lookup_n_rule_match = dp_scs_peer_lookup_n_rule_match,
14417 };
14418 #endif
14419 
14420 #ifdef CONFIG_SAWF_DEF_QUEUES
14421 static struct cdp_sawf_ops dp_ops_sawf = {
14422 	.sawf_def_queues_map_req = dp_sawf_def_queues_map_req,
14423 	.sawf_def_queues_unmap_req = dp_sawf_def_queues_unmap_req,
14424 	.sawf_def_queues_get_map_report =
14425 		dp_sawf_def_queues_get_map_report,
14426 #ifdef CONFIG_SAWF_STATS
14427 	.txrx_get_peer_sawf_delay_stats = dp_sawf_get_peer_delay_stats,
14428 	.txrx_get_peer_sawf_tx_stats = dp_sawf_get_peer_tx_stats,
14429 	.sawf_mpdu_stats_req = dp_sawf_mpdu_stats_req,
14430 	.sawf_mpdu_details_stats_req = dp_sawf_mpdu_details_stats_req,
14431 	.txrx_sawf_set_mov_avg_params = dp_sawf_set_mov_avg_params,
14432 	.txrx_sawf_set_sla_params = dp_sawf_set_sla_params,
14433 	.txrx_sawf_init_telemtery_params = dp_sawf_init_telemetry_params,
14434 	.telemetry_get_throughput_stats = dp_sawf_get_tx_stats,
14435 	.telemetry_get_mpdu_stats = dp_sawf_get_mpdu_sched_stats,
14436 	.telemetry_get_drop_stats = dp_sawf_get_drop_stats,
14437 	.peer_config_ul = dp_sawf_peer_config_ul,
14438 	.swaf_peer_is_sla_configured = dp_swaf_peer_is_sla_configured,
14439 #endif
14440 };
14441 #endif
14442 
14443 #if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
14444 /**
14445  * dp_flush_ring_hptp() - Update ring shadow
14446  *			  register HP/TP address when runtime
14447  *                        resume
14448  * @opaque_soc: DP soc context
14449  *
14450  * Return: None
14451  */
14452 static
14453 void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
14454 {
14455 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
14456 						 HAL_SRNG_FLUSH_EVENT)) {
14457 		/* Acquire the lock */
14458 		hal_srng_access_start(soc->hal_soc, hal_srng);
14459 
14460 		hal_srng_access_end(soc->hal_soc, hal_srng);
14461 
14462 		hal_srng_set_flush_last_ts(hal_srng);
14463 
14464 		dp_debug("flushed");
14465 	}
14466 }
14467 #endif
14468 
14469 #ifdef DP_TX_TRACKING
14470 
14471 #define DP_TX_COMP_MAX_LATENCY_MS 60000
14472 /**
14473  * dp_tx_comp_delay_check() - calculate time latency for tx completion per pkt
14474  * @tx_desc: tx descriptor
14475  *
14476  * Calculate time latency for tx completion per pkt and trigger self recovery
14477  * when the delay is more than threshold value.
14478  *
14479  * Return: True if delay is more than threshold
14480  */
14481 static bool dp_tx_comp_delay_check(struct dp_tx_desc_s *tx_desc)
14482 {
14483 	uint64_t time_latency, timestamp_tick = tx_desc->timestamp_tick;
14484 	qdf_ktime_t current_time = qdf_ktime_real_get();
14485 	qdf_ktime_t timestamp = tx_desc->timestamp;
14486 
14487 	if (dp_tx_pkt_tracepoints_enabled()) {
14488 		if (!timestamp)
14489 			return false;
14490 
14491 		time_latency = qdf_ktime_to_ms(current_time) -
14492 				qdf_ktime_to_ms(timestamp);
14493 		if (time_latency >= DP_TX_COMP_MAX_LATENCY_MS) {
14494 			dp_err_rl("enqueued: %llu ms, current : %llu ms",
14495 				  timestamp, current_time);
14496 			return true;
14497 		}
14498 	} else {
14499 		if (!timestamp_tick)
14500 			return false;
14501 
14502 		current_time = qdf_system_ticks();
14503 		time_latency = qdf_system_ticks_to_msecs(current_time -
14504 							 timestamp_tick);
14505 		if (time_latency >= DP_TX_COMP_MAX_LATENCY_MS) {
14506 			dp_err_rl("enqueued: %u ms, current : %u ms",
14507 				  qdf_system_ticks_to_msecs(timestamp_tick),
14508 				  qdf_system_ticks_to_msecs(current_time));
14509 			return true;
14510 		}
14511 	}
14512 
14513 	return false;
14514 }
14515 
14516 /**
14517  * dp_find_missing_tx_comp() - check for leaked descriptor in tx path
14518  * @soc - DP SOC context
14519  *
14520  * Parse through descriptors in all pools and validate magic number and
14521  * completion time. Trigger self recovery if magic value is corrupted.
14522  *
14523  * Return: None.
14524  */
14525 static void dp_find_missing_tx_comp(struct dp_soc *soc)
14526 {
14527 	uint8_t i;
14528 	uint32_t j;
14529 	uint32_t num_desc, page_id, offset;
14530 	uint16_t num_desc_per_page;
14531 	struct dp_tx_desc_s *tx_desc = NULL;
14532 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
14533 
14534 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
14535 		tx_desc_pool = &soc->tx_desc[i];
14536 		if (!(tx_desc_pool->pool_size) ||
14537 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
14538 		    !(tx_desc_pool->desc_pages.cacheable_pages))
14539 			continue;
14540 
14541 		num_desc = tx_desc_pool->pool_size;
14542 		num_desc_per_page =
14543 			tx_desc_pool->desc_pages.num_element_per_page;
14544 		for (j = 0; j < num_desc; j++) {
14545 			page_id = j / num_desc_per_page;
14546 			offset = j % num_desc_per_page;
14547 
14548 			if (qdf_unlikely(!(tx_desc_pool->
14549 					 desc_pages.cacheable_pages)))
14550 				break;
14551 
14552 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
14553 			if (tx_desc->magic == DP_TX_MAGIC_PATTERN_FREE) {
14554 				continue;
14555 			} else if (tx_desc->magic ==
14556 				   DP_TX_MAGIC_PATTERN_INUSE) {
14557 				if (dp_tx_comp_delay_check(tx_desc)) {
14558 					dp_err_rl("Tx completion not rcvd for id: %u",
14559 						  tx_desc->id);
14560 					if (tx_desc->vdev_id == DP_INVALID_VDEV_ID) {
14561 						tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
14562 						dp_err_rl("Freed tx_desc %u",
14563 							  tx_desc->id);
14564 						dp_tx_comp_free_buf(soc,
14565 								    tx_desc,
14566 								    false);
14567 						dp_tx_desc_release(tx_desc, i);
14568 						DP_STATS_INC(soc,
14569 							     tx.tx_comp_force_freed, 1);
14570 					}
14571 				}
14572 			} else {
14573 				dp_err_rl("tx desc %u corrupted, flags: 0x%x",
14574 					  tx_desc->id, tx_desc->flags);
14575 			}
14576 		}
14577 	}
14578 }
14579 #else
14580 static inline void dp_find_missing_tx_comp(struct dp_soc *soc)
14581 {
14582 }
14583 #endif
14584 
14585 #ifdef FEATURE_RUNTIME_PM
14586 /**
14587  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
14588  * @soc_hdl: Datapath soc handle
14589  * @pdev_id: id of data path pdev handle
14590  *
14591  * DP is ready to runtime suspend if there are no pending TX packets.
14592  *
14593  * Return: QDF_STATUS
14594  */
14595 static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
14596 {
14597 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14598 	struct dp_pdev *pdev;
14599 	uint8_t i;
14600 	int32_t tx_pending;
14601 
14602 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14603 	if (!pdev) {
14604 		dp_err("pdev is NULL");
14605 		return QDF_STATUS_E_INVAL;
14606 	}
14607 
14608 	/* Abort if there are any pending TX packets */
14609 	tx_pending = dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev));
14610 	if (tx_pending) {
14611 		dp_info_rl("%pK: Abort suspend due to pending TX packets %d",
14612 			   soc, tx_pending);
14613 		dp_find_missing_tx_comp(soc);
14614 		/* perform a force flush if tx is pending */
14615 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
14616 			hal_srng_set_event(soc->tcl_data_ring[i].hal_srng,
14617 					   HAL_SRNG_FLUSH_EVENT);
14618 			dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
14619 		}
14620 		qdf_atomic_set(&soc->tx_pending_rtpm, 0);
14621 
14622 		return QDF_STATUS_E_AGAIN;
14623 	}
14624 
14625 	if (dp_runtime_get_refcount(soc)) {
14626 		dp_init_info("refcount: %d", dp_runtime_get_refcount(soc));
14627 
14628 		return QDF_STATUS_E_AGAIN;
14629 	}
14630 
14631 	if (soc->intr_mode == DP_INTR_POLL)
14632 		qdf_timer_stop(&soc->int_timer);
14633 
14634 	dp_rx_fst_update_pm_suspend_status(soc, true);
14635 
14636 	return QDF_STATUS_SUCCESS;
14637 }
14638 
14639 #define DP_FLUSH_WAIT_CNT 10
14640 #define DP_RUNTIME_SUSPEND_WAIT_MS 10
14641 /**
14642  * dp_runtime_resume() - ensure DP is ready to runtime resume
14643  * @soc_hdl: Datapath soc handle
14644  * @pdev_id: id of data path pdev handle
14645  *
14646  * Resume DP for runtime PM.
14647  *
14648  * Return: QDF_STATUS
14649  */
14650 static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
14651 {
14652 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14653 	int i, suspend_wait = 0;
14654 
14655 	if (soc->intr_mode == DP_INTR_POLL)
14656 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
14657 
14658 	/*
14659 	 * Wait until dp runtime refcount becomes zero or time out, then flush
14660 	 * pending tx for runtime suspend.
14661 	 */
14662 	while (dp_runtime_get_refcount(soc) &&
14663 	       suspend_wait < DP_FLUSH_WAIT_CNT) {
14664 		qdf_sleep(DP_RUNTIME_SUSPEND_WAIT_MS);
14665 		suspend_wait++;
14666 	}
14667 
14668 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
14669 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
14670 	}
14671 	qdf_atomic_set(&soc->tx_pending_rtpm, 0);
14672 
14673 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
14674 	dp_rx_fst_update_pm_suspend_status(soc, false);
14675 
14676 	return QDF_STATUS_SUCCESS;
14677 }
14678 #endif /* FEATURE_RUNTIME_PM */
14679 
14680 /**
14681  * dp_tx_get_success_ack_stats() - get tx success completion count
14682  * @soc_hdl: Datapath soc handle
14683  * @vdevid: vdev identifier
14684  *
14685  * Return: tx success ack count
14686  */
14687 static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl,
14688 					    uint8_t vdev_id)
14689 {
14690 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14691 	struct cdp_vdev_stats *vdev_stats = NULL;
14692 	uint32_t tx_success;
14693 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
14694 						     DP_MOD_ID_CDP);
14695 
14696 	if (!vdev) {
14697 		dp_cdp_err("%pK: Invalid vdev id %d", soc, vdev_id);
14698 		return 0;
14699 	}
14700 
14701 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
14702 	if (!vdev_stats) {
14703 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats", soc);
14704 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
14705 		return 0;
14706 	}
14707 
14708 	dp_aggregate_vdev_stats(vdev, vdev_stats);
14709 
14710 	tx_success = vdev_stats->tx.tx_success.num;
14711 	qdf_mem_free(vdev_stats);
14712 
14713 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
14714 	return tx_success;
14715 }
14716 
14717 #ifdef WLAN_SUPPORT_DATA_STALL
14718 /**
14719  * dp_register_data_stall_detect_cb() - register data stall callback
14720  * @soc_hdl: Datapath soc handle
14721  * @pdev_id: id of data path pdev handle
14722  * @data_stall_detect_callback: data stall callback function
14723  *
14724  * Return: QDF_STATUS Enumeration
14725  */
14726 static
14727 QDF_STATUS dp_register_data_stall_detect_cb(
14728 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14729 			data_stall_detect_cb data_stall_detect_callback)
14730 {
14731 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14732 	struct dp_pdev *pdev;
14733 
14734 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14735 	if (!pdev) {
14736 		dp_err("pdev NULL!");
14737 		return QDF_STATUS_E_INVAL;
14738 	}
14739 
14740 	pdev->data_stall_detect_callback = data_stall_detect_callback;
14741 	return QDF_STATUS_SUCCESS;
14742 }
14743 
14744 /**
14745  * dp_deregister_data_stall_detect_cb() - de-register data stall callback
14746  * @soc_hdl: Datapath soc handle
14747  * @pdev_id: id of data path pdev handle
14748  * @data_stall_detect_callback: data stall callback function
14749  *
14750  * Return: QDF_STATUS Enumeration
14751  */
14752 static
14753 QDF_STATUS dp_deregister_data_stall_detect_cb(
14754 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14755 			data_stall_detect_cb data_stall_detect_callback)
14756 {
14757 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14758 	struct dp_pdev *pdev;
14759 
14760 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14761 	if (!pdev) {
14762 		dp_err("pdev NULL!");
14763 		return QDF_STATUS_E_INVAL;
14764 	}
14765 
14766 	pdev->data_stall_detect_callback = NULL;
14767 	return QDF_STATUS_SUCCESS;
14768 }
14769 
14770 /**
14771  * dp_txrx_post_data_stall_event() - post data stall event
14772  * @soc_hdl: Datapath soc handle
14773  * @indicator: Module triggering data stall
14774  * @data_stall_type: data stall event type
14775  * @pdev_id: pdev id
14776  * @vdev_id_bitmap: vdev id bitmap
14777  * @recovery_type: data stall recovery type
14778  *
14779  * Return: None
14780  */
14781 static void
14782 dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl,
14783 			      enum data_stall_log_event_indicator indicator,
14784 			      enum data_stall_log_event_type data_stall_type,
14785 			      uint32_t pdev_id, uint32_t vdev_id_bitmap,
14786 			      enum data_stall_log_recovery_type recovery_type)
14787 {
14788 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14789 	struct data_stall_event_info data_stall_info;
14790 	struct dp_pdev *pdev;
14791 
14792 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14793 	if (!pdev) {
14794 		dp_err("pdev NULL!");
14795 		return;
14796 	}
14797 
14798 	if (!pdev->data_stall_detect_callback) {
14799 		dp_err("data stall cb not registered!");
14800 		return;
14801 	}
14802 
14803 	dp_info("data_stall_type: %x pdev_id: %d",
14804 		data_stall_type, pdev_id);
14805 
14806 	data_stall_info.indicator = indicator;
14807 	data_stall_info.data_stall_type = data_stall_type;
14808 	data_stall_info.vdev_id_bitmap = vdev_id_bitmap;
14809 	data_stall_info.pdev_id = pdev_id;
14810 	data_stall_info.recovery_type = recovery_type;
14811 
14812 	pdev->data_stall_detect_callback(&data_stall_info);
14813 }
14814 #endif /* WLAN_SUPPORT_DATA_STALL */
14815 
14816 #ifdef WLAN_FEATURE_STATS_EXT
14817 /* rx hw stats event wait timeout in ms */
14818 #define DP_REO_STATUS_STATS_TIMEOUT 850
14819 /**
14820  * dp_txrx_ext_stats_request - request dp txrx extended stats request
14821  * @soc_hdl: soc handle
14822  * @pdev_id: pdev id
14823  * @req: stats request
14824  *
14825  * Return: QDF_STATUS
14826  */
14827 static QDF_STATUS
14828 dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14829 			  struct cdp_txrx_ext_stats *req)
14830 {
14831 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
14832 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14833 	int i = 0;
14834 	int tcl_ring_full = 0;
14835 
14836 	if (!pdev) {
14837 		dp_err("pdev is null");
14838 		return QDF_STATUS_E_INVAL;
14839 	}
14840 
14841 	dp_aggregate_pdev_stats(pdev);
14842 
14843 	for(i = 0 ; i < MAX_TCL_DATA_RINGS; i++)
14844 		tcl_ring_full += soc->stats.tx.tcl_ring_full[i];
14845 
14846 	req->tx_msdu_enqueue = pdev->stats.tx_i.processed.num;
14847 	req->tx_msdu_overflow = tcl_ring_full;
14848 	/* Error rate at LMAC */
14849 	req->rx_mpdu_received = soc->ext_stats.rx_mpdu_received +
14850 				pdev->stats.err.fw_reported_rxdma_error;
14851 	/* only count error source from RXDMA */
14852 	req->rx_mpdu_error = pdev->stats.err.fw_reported_rxdma_error;
14853 
14854 	/* Error rate at above the MAC */
14855 	req->rx_mpdu_delivered = soc->ext_stats.rx_mpdu_received;
14856 	req->rx_mpdu_missed = pdev->stats.err.reo_error;
14857 
14858 	dp_info("ext stats: tx_msdu_enq = %u, tx_msdu_overflow = %u, "
14859 		"rx_mpdu_receive = %u, rx_mpdu_delivered = %u, "
14860 		"rx_mpdu_missed = %u, rx_mpdu_error = %u",
14861 		req->tx_msdu_enqueue,
14862 		req->tx_msdu_overflow,
14863 		req->rx_mpdu_received,
14864 		req->rx_mpdu_delivered,
14865 		req->rx_mpdu_missed,
14866 		req->rx_mpdu_error);
14867 
14868 	return QDF_STATUS_SUCCESS;
14869 }
14870 
14871 /**
14872  * dp_rx_hw_stats_cb - request rx hw stats response callback
14873  * @soc: soc handle
14874  * @cb_ctxt: callback context
14875  * @reo_status: reo command response status
14876  *
14877  * Return: None
14878  */
14879 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
14880 			      union hal_reo_status *reo_status)
14881 {
14882 	struct dp_req_rx_hw_stats_t *rx_hw_stats = cb_ctxt;
14883 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
14884 	bool is_query_timeout;
14885 
14886 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
14887 	is_query_timeout = rx_hw_stats->is_query_timeout;
14888 	/* free the cb_ctxt if all pending tid stats query is received */
14889 	if (qdf_atomic_dec_and_test(&rx_hw_stats->pending_tid_stats_cnt)) {
14890 		if (!is_query_timeout) {
14891 			qdf_event_set(&soc->rx_hw_stats_event);
14892 			soc->is_last_stats_ctx_init = false;
14893 		}
14894 
14895 		qdf_mem_free(rx_hw_stats);
14896 	}
14897 
14898 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
14899 		dp_info("REO stats failure %d",
14900 			queue_status->header.status);
14901 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14902 		return;
14903 	}
14904 
14905 	if (!is_query_timeout) {
14906 		soc->ext_stats.rx_mpdu_received +=
14907 					queue_status->mpdu_frms_cnt;
14908 		soc->ext_stats.rx_mpdu_missed +=
14909 					queue_status->hole_cnt;
14910 	}
14911 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14912 }
14913 
14914 /**
14915  * dp_request_rx_hw_stats - request rx hardware stats
14916  * @soc_hdl: soc handle
14917  * @vdev_id: vdev id
14918  *
14919  * Return: None
14920  */
14921 static QDF_STATUS
14922 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
14923 {
14924 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
14925 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
14926 						     DP_MOD_ID_CDP);
14927 	struct dp_peer *peer = NULL;
14928 	QDF_STATUS status;
14929 	struct dp_req_rx_hw_stats_t *rx_hw_stats;
14930 	int rx_stats_sent_cnt = 0;
14931 	uint32_t last_rx_mpdu_received;
14932 	uint32_t last_rx_mpdu_missed;
14933 
14934 	if (!vdev) {
14935 		dp_err("vdev is null for vdev_id: %u", vdev_id);
14936 		status = QDF_STATUS_E_INVAL;
14937 		goto out;
14938 	}
14939 
14940 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_CDP);
14941 
14942 	if (!peer) {
14943 		dp_err("Peer is NULL");
14944 		status = QDF_STATUS_E_INVAL;
14945 		goto out;
14946 	}
14947 
14948 	rx_hw_stats = qdf_mem_malloc(sizeof(*rx_hw_stats));
14949 
14950 	if (!rx_hw_stats) {
14951 		dp_err("malloc failed for hw stats structure");
14952 		status = QDF_STATUS_E_INVAL;
14953 		goto out;
14954 	}
14955 
14956 	qdf_event_reset(&soc->rx_hw_stats_event);
14957 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
14958 	/* save the last soc cumulative stats and reset it to 0 */
14959 	last_rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
14960 	last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
14961 	soc->ext_stats.rx_mpdu_received = 0;
14962 	soc->ext_stats.rx_mpdu_missed = 0;
14963 
14964 	dp_debug("HW stats query start");
14965 	rx_stats_sent_cnt =
14966 		dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
14967 	if (!rx_stats_sent_cnt) {
14968 		dp_err("no tid stats sent successfully");
14969 		qdf_mem_free(rx_hw_stats);
14970 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14971 		status = QDF_STATUS_E_INVAL;
14972 		goto out;
14973 	}
14974 	qdf_atomic_set(&rx_hw_stats->pending_tid_stats_cnt,
14975 		       rx_stats_sent_cnt);
14976 	rx_hw_stats->is_query_timeout = false;
14977 	soc->is_last_stats_ctx_init = true;
14978 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
14979 
14980 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
14981 				       DP_REO_STATUS_STATS_TIMEOUT);
14982 	dp_debug("HW stats query end with %d", rx_stats_sent_cnt);
14983 
14984 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
14985 	if (status != QDF_STATUS_SUCCESS) {
14986 		dp_info("partial rx hw stats event collected with %d",
14987 			qdf_atomic_read(
14988 				&rx_hw_stats->pending_tid_stats_cnt));
14989 		if (soc->is_last_stats_ctx_init)
14990 			rx_hw_stats->is_query_timeout = true;
14991 		/**
14992 		 * If query timeout happened, use the last saved stats
14993 		 * for this time query.
14994 		 */
14995 		soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
14996 		soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
14997 		DP_STATS_INC(soc, rx.rx_hw_stats_timeout, 1);
14998 
14999 	}
15000 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
15001 
15002 out:
15003 	if (peer)
15004 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
15005 	if (vdev)
15006 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
15007 	DP_STATS_INC(soc, rx.rx_hw_stats_requested, 1);
15008 
15009 	return status;
15010 }
15011 
15012 /**
15013  * dp_reset_rx_hw_ext_stats - Reset rx hardware ext stats
15014  * @soc_hdl: soc handle
15015  *
15016  * Return: None
15017  */
15018 static
15019 void dp_reset_rx_hw_ext_stats(struct cdp_soc_t *soc_hdl)
15020 {
15021 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
15022 
15023 	soc->ext_stats.rx_mpdu_received = 0;
15024 	soc->ext_stats.rx_mpdu_missed = 0;
15025 }
15026 #endif /* WLAN_FEATURE_STATS_EXT */
15027 
15028 static
15029 uint32_t dp_get_tx_rings_grp_bitmap(struct cdp_soc_t *soc_hdl)
15030 {
15031 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
15032 
15033 	return soc->wlan_cfg_ctx->tx_rings_grp_bitmap;
15034 }
15035 
15036 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
15037 /**
15038  * dp_mark_first_wakeup_packet() - set flag to indicate that
15039  *    fw is compatible for marking first packet after wow wakeup
15040  * @soc_hdl: Datapath soc handle
15041  * @pdev_id: id of data path pdev handle
15042  * @value: 1 for enabled/ 0 for disabled
15043  *
15044  * Return: None
15045  */
15046 static void dp_mark_first_wakeup_packet(struct cdp_soc_t *soc_hdl,
15047 					uint8_t pdev_id, uint8_t value)
15048 {
15049 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15050 	struct dp_pdev *pdev;
15051 
15052 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15053 	if (!pdev) {
15054 		dp_err("pdev is NULL");
15055 		return;
15056 	}
15057 
15058 	pdev->is_first_wakeup_packet = value;
15059 }
15060 #endif
15061 
15062 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
15063 /**
15064  * dp_set_peer_txq_flush_config() - Set the peer txq flush configuration
15065  * @soc_hdl: Opaque handle to the DP soc object
15066  * @vdev_id: VDEV identifier
15067  * @mac: MAC address of the peer
15068  * @ac: access category mask
15069  * @tid: TID mask
15070  * @policy: Flush policy
15071  *
15072  * Return: 0 on success, errno on failure
15073  */
15074 static int dp_set_peer_txq_flush_config(struct cdp_soc_t *soc_hdl,
15075 					uint8_t vdev_id, uint8_t *mac,
15076 					uint8_t ac, uint32_t tid,
15077 					enum cdp_peer_txq_flush_policy policy)
15078 {
15079 	struct dp_soc *soc;
15080 
15081 	if (!soc_hdl) {
15082 		dp_err("soc is null");
15083 		return -EINVAL;
15084 	}
15085 	soc = cdp_soc_t_to_dp_soc(soc_hdl);
15086 	return target_if_peer_txq_flush_config(soc->ctrl_psoc, vdev_id,
15087 					       mac, ac, tid, policy);
15088 }
15089 #endif
15090 
15091 #ifdef CONNECTIVITY_PKTLOG
15092 /**
15093  * dp_register_packetdump_callback() - registers
15094  *  tx data packet, tx mgmt. packet and rx data packet
15095  *  dump callback handler.
15096  *
15097  * @soc_hdl: Datapath soc handle
15098  * @pdev_id: id of data path pdev handle
15099  * @dp_tx_packetdump_cb: tx packetdump cb
15100  * @dp_rx_packetdump_cb: rx packetdump cb
15101  *
15102  * This function is used to register tx data pkt, tx mgmt.
15103  * pkt and rx data pkt dump callback
15104  *
15105  * Return: None
15106  *
15107  */
15108 static inline
15109 void dp_register_packetdump_callback(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
15110 				     ol_txrx_pktdump_cb dp_tx_packetdump_cb,
15111 				     ol_txrx_pktdump_cb dp_rx_packetdump_cb)
15112 {
15113 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15114 	struct dp_pdev *pdev;
15115 
15116 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15117 	if (!pdev) {
15118 		dp_err("pdev is NULL!");
15119 		return;
15120 	}
15121 
15122 	pdev->dp_tx_packetdump_cb = dp_tx_packetdump_cb;
15123 	pdev->dp_rx_packetdump_cb = dp_rx_packetdump_cb;
15124 }
15125 
15126 /**
15127  * dp_deregister_packetdump_callback() - deregidters
15128  *  tx data packet, tx mgmt. packet and rx data packet
15129  *  dump callback handler
15130  * @soc_hdl: Datapath soc handle
15131  * @pdev_id: id of data path pdev handle
15132  *
15133  * This function is used to deregidter tx data pkt.,
15134  * tx mgmt. pkt and rx data pkt. dump callback
15135  *
15136  * Return: None
15137  *
15138  */
15139 static inline
15140 void dp_deregister_packetdump_callback(struct cdp_soc_t *soc_hdl,
15141 				       uint8_t pdev_id)
15142 {
15143 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15144 	struct dp_pdev *pdev;
15145 
15146 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15147 	if (!pdev) {
15148 		dp_err("pdev is NULL!");
15149 		return;
15150 	}
15151 
15152 	pdev->dp_tx_packetdump_cb = NULL;
15153 	pdev->dp_rx_packetdump_cb = NULL;
15154 }
15155 #endif
15156 
15157 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
15158 /**
15159  * dp_set_bus_vote_lvl_high() - Take a vote on bus bandwidth from dp
15160  * @soc_hdl: Datapath soc handle
15161  * @high: whether the bus bw is high or not
15162  *
15163  * Return: void
15164  */
15165 static void
15166 dp_set_bus_vote_lvl_high(ol_txrx_soc_handle soc_hdl, bool high)
15167 {
15168 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15169 
15170 	soc->high_throughput = high;
15171 }
15172 
15173 /**
15174  * dp_get_bus_vote_lvl_high() - get bus bandwidth vote to dp
15175  * @soc_hdl: Datapath soc handle
15176  *
15177  * Return: bool
15178  */
15179 static bool
15180 dp_get_bus_vote_lvl_high(ol_txrx_soc_handle soc_hdl)
15181 {
15182 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15183 
15184 	return soc->high_throughput;
15185 }
15186 #endif
15187 
15188 #ifdef DP_PEER_EXTENDED_API
15189 static struct cdp_misc_ops dp_ops_misc = {
15190 #ifdef FEATURE_WLAN_TDLS
15191 	.tx_non_std = dp_tx_non_std,
15192 #endif /* FEATURE_WLAN_TDLS */
15193 	.get_opmode = dp_get_opmode,
15194 #ifdef FEATURE_RUNTIME_PM
15195 	.runtime_suspend = dp_runtime_suspend,
15196 	.runtime_resume = dp_runtime_resume,
15197 #endif /* FEATURE_RUNTIME_PM */
15198 	.get_num_rx_contexts = dp_get_num_rx_contexts,
15199 	.get_tx_ack_stats = dp_tx_get_success_ack_stats,
15200 #ifdef WLAN_SUPPORT_DATA_STALL
15201 	.txrx_data_stall_cb_register = dp_register_data_stall_detect_cb,
15202 	.txrx_data_stall_cb_deregister = dp_deregister_data_stall_detect_cb,
15203 	.txrx_post_data_stall_event = dp_txrx_post_data_stall_event,
15204 #endif
15205 
15206 #ifdef WLAN_FEATURE_STATS_EXT
15207 	.txrx_ext_stats_request = dp_txrx_ext_stats_request,
15208 	.request_rx_hw_stats = dp_request_rx_hw_stats,
15209 	.reset_rx_hw_ext_stats = dp_reset_rx_hw_ext_stats,
15210 #endif /* WLAN_FEATURE_STATS_EXT */
15211 	.vdev_inform_ll_conn = dp_vdev_inform_ll_conn,
15212 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
15213 	.set_swlm_enable = dp_soc_set_swlm_enable,
15214 	.is_swlm_enabled = dp_soc_is_swlm_enabled,
15215 #endif
15216 	.display_txrx_hw_info = dp_display_srng_info,
15217 	.get_tx_rings_grp_bitmap = dp_get_tx_rings_grp_bitmap,
15218 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
15219 	.mark_first_wakeup_packet = dp_mark_first_wakeup_packet,
15220 #endif
15221 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
15222 	.set_peer_txq_flush_config = dp_set_peer_txq_flush_config,
15223 #endif
15224 #ifdef CONNECTIVITY_PKTLOG
15225 	.register_pktdump_cb = dp_register_packetdump_callback,
15226 	.unregister_pktdump_cb = dp_deregister_packetdump_callback,
15227 #endif
15228 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
15229 	.set_bus_vote_lvl_high = dp_set_bus_vote_lvl_high,
15230 	.get_bus_vote_lvl_high = dp_get_bus_vote_lvl_high,
15231 #endif
15232 };
15233 #endif
15234 
15235 #ifdef DP_FLOW_CTL
15236 static struct cdp_flowctl_ops dp_ops_flowctl = {
15237 	/* WIFI 3.0 DP implement as required. */
15238 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
15239 	.flow_pool_map_handler = dp_tx_flow_pool_map,
15240 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
15241 	.register_pause_cb = dp_txrx_register_pause_cb,
15242 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
15243 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
15244 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
15245 };
15246 
15247 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
15248 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
15249 };
15250 #endif
15251 
15252 #ifdef IPA_OFFLOAD
15253 static struct cdp_ipa_ops dp_ops_ipa = {
15254 	.ipa_get_resource = dp_ipa_get_resource,
15255 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
15256 	.ipa_iounmap_doorbell_vaddr = dp_ipa_iounmap_doorbell_vaddr,
15257 	.ipa_op_response = dp_ipa_op_response,
15258 	.ipa_register_op_cb = dp_ipa_register_op_cb,
15259 	.ipa_deregister_op_cb = dp_ipa_deregister_op_cb,
15260 	.ipa_get_stat = dp_ipa_get_stat,
15261 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
15262 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
15263 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
15264 	.ipa_setup = dp_ipa_setup,
15265 	.ipa_cleanup = dp_ipa_cleanup,
15266 	.ipa_setup_iface = dp_ipa_setup_iface,
15267 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
15268 	.ipa_enable_pipes = dp_ipa_enable_pipes,
15269 	.ipa_disable_pipes = dp_ipa_disable_pipes,
15270 	.ipa_set_perf_level = dp_ipa_set_perf_level,
15271 	.ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd,
15272 	.ipa_tx_buf_smmu_mapping = dp_ipa_tx_buf_smmu_mapping,
15273 	.ipa_tx_buf_smmu_unmapping = dp_ipa_tx_buf_smmu_unmapping,
15274 #ifdef QCA_ENHANCED_STATS_SUPPORT
15275 	.ipa_update_peer_rx_stats = dp_ipa_update_peer_rx_stats,
15276 #endif
15277 #ifdef IPA_WDS_EASYMESH_FEATURE
15278 	.ipa_ast_create = dp_ipa_ast_create,
15279 #endif
15280 };
15281 #endif
15282 
15283 #ifdef DP_POWER_SAVE
15284 static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
15285 {
15286 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15287 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15288 	int timeout = SUSPEND_DRAIN_WAIT;
15289 	int drain_wait_delay = 50; /* 50 ms */
15290 	int32_t tx_pending;
15291 
15292 	if (qdf_unlikely(!pdev)) {
15293 		dp_err("pdev is NULL");
15294 		return QDF_STATUS_E_INVAL;
15295 	}
15296 
15297 	/* Abort if there are any pending TX packets */
15298 	while ((tx_pending = dp_get_tx_pending((struct cdp_pdev *)pdev))) {
15299 		qdf_sleep(drain_wait_delay);
15300 		if (timeout <= 0) {
15301 			dp_info("TX frames are pending %d, abort suspend",
15302 				tx_pending);
15303 			dp_find_missing_tx_comp(soc);
15304 			return QDF_STATUS_E_TIMEOUT;
15305 		}
15306 		timeout = timeout - drain_wait_delay;
15307 	}
15308 
15309 	if (soc->intr_mode == DP_INTR_POLL)
15310 		qdf_timer_stop(&soc->int_timer);
15311 
15312 	/* Stop monitor reap timer and reap any pending frames in ring */
15313 	dp_monitor_reap_timer_suspend(soc);
15314 
15315 	dp_suspend_fse_cache_flush(soc);
15316 	dp_rx_fst_update_pm_suspend_status(soc, true);
15317 
15318 	return QDF_STATUS_SUCCESS;
15319 }
15320 
15321 static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
15322 {
15323 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15324 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15325 	uint8_t i;
15326 
15327 	if (qdf_unlikely(!pdev)) {
15328 		dp_err("pdev is NULL");
15329 		return QDF_STATUS_E_INVAL;
15330 	}
15331 
15332 	if (soc->intr_mode == DP_INTR_POLL)
15333 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
15334 
15335 	/* Start monitor reap timer */
15336 	dp_monitor_reap_timer_start(soc, CDP_MON_REAP_SOURCE_ANY);
15337 
15338 	dp_resume_fse_cache_flush(soc);
15339 
15340 	for (i = 0; i < soc->num_tcl_data_rings; i++)
15341 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
15342 
15343 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
15344 	dp_rx_fst_update_pm_suspend_status(soc, false);
15345 
15346 	dp_rx_fst_requeue_wq(soc);
15347 
15348 	return QDF_STATUS_SUCCESS;
15349 }
15350 
15351 /**
15352  * dp_process_wow_ack_rsp() - process wow ack response
15353  * @soc_hdl: datapath soc handle
15354  * @pdev_id: data path pdev handle id
15355  *
15356  * Return: none
15357  */
15358 static void dp_process_wow_ack_rsp(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
15359 {
15360 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15361 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15362 
15363 	if (qdf_unlikely(!pdev)) {
15364 		dp_err("pdev is NULL");
15365 		return;
15366 	}
15367 
15368 	/*
15369 	 * As part of wow enable FW disables the mon status ring and in wow ack
15370 	 * response from FW reap mon status ring to make sure no packets pending
15371 	 * in the ring.
15372 	 */
15373 	dp_monitor_reap_timer_suspend(soc);
15374 }
15375 
15376 /**
15377  * dp_process_target_suspend_req() - process target suspend request
15378  * @soc_hdl: datapath soc handle
15379  * @pdev_id: data path pdev handle id
15380  *
15381  * Return: none
15382  */
15383 static void dp_process_target_suspend_req(struct cdp_soc_t *soc_hdl,
15384 					  uint8_t pdev_id)
15385 {
15386 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15387 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15388 
15389 	if (qdf_unlikely(!pdev)) {
15390 		dp_err("pdev is NULL");
15391 		return;
15392 	}
15393 
15394 	/* Stop monitor reap timer and reap any pending frames in ring */
15395 	dp_monitor_reap_timer_suspend(soc);
15396 }
15397 
15398 static struct cdp_bus_ops dp_ops_bus = {
15399 	.bus_suspend = dp_bus_suspend,
15400 	.bus_resume = dp_bus_resume,
15401 	.process_wow_ack_rsp = dp_process_wow_ack_rsp,
15402 	.process_target_suspend_req = dp_process_target_suspend_req
15403 };
15404 #endif
15405 
15406 #ifdef DP_FLOW_CTL
15407 static struct cdp_throttle_ops dp_ops_throttle = {
15408 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
15409 };
15410 
15411 static struct cdp_cfg_ops dp_ops_cfg = {
15412 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
15413 };
15414 #endif
15415 
15416 #ifdef DP_PEER_EXTENDED_API
15417 static struct cdp_ocb_ops dp_ops_ocb = {
15418 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
15419 };
15420 
15421 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
15422 	.clear_stats = dp_txrx_clear_dump_stats,
15423 };
15424 
15425 static struct cdp_peer_ops dp_ops_peer = {
15426 	.register_peer = dp_register_peer,
15427 	.clear_peer = dp_clear_peer,
15428 	.find_peer_exist = dp_find_peer_exist,
15429 	.find_peer_exist_on_vdev = dp_find_peer_exist_on_vdev,
15430 	.find_peer_exist_on_other_vdev = dp_find_peer_exist_on_other_vdev,
15431 	.peer_state_update = dp_peer_state_update,
15432 	.get_vdevid = dp_get_vdevid,
15433 	.get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr,
15434 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
15435 	.get_peer_state = dp_get_peer_state,
15436 	.peer_flush_frags = dp_peer_flush_frags,
15437 	.set_peer_as_tdls_peer = dp_set_peer_as_tdls_peer,
15438 };
15439 #endif
15440 
15441 static void dp_soc_txrx_ops_attach(struct dp_soc *soc)
15442 {
15443 	soc->cdp_soc.ops->cmn_drv_ops = &dp_ops_cmn;
15444 	soc->cdp_soc.ops->ctrl_ops = &dp_ops_ctrl;
15445 	soc->cdp_soc.ops->me_ops = &dp_ops_me;
15446 	soc->cdp_soc.ops->host_stats_ops = &dp_ops_host_stats;
15447 	soc->cdp_soc.ops->wds_ops = &dp_ops_wds;
15448 	soc->cdp_soc.ops->raw_ops = &dp_ops_raw;
15449 #ifdef PEER_FLOW_CONTROL
15450 	soc->cdp_soc.ops->pflow_ops = &dp_ops_pflow;
15451 #endif /* PEER_FLOW_CONTROL */
15452 #ifdef DP_PEER_EXTENDED_API
15453 	soc->cdp_soc.ops->misc_ops = &dp_ops_misc;
15454 	soc->cdp_soc.ops->ocb_ops = &dp_ops_ocb;
15455 	soc->cdp_soc.ops->peer_ops = &dp_ops_peer;
15456 	soc->cdp_soc.ops->mob_stats_ops = &dp_ops_mob_stats;
15457 #endif
15458 #ifdef DP_FLOW_CTL
15459 	soc->cdp_soc.ops->cfg_ops = &dp_ops_cfg;
15460 	soc->cdp_soc.ops->flowctl_ops = &dp_ops_flowctl;
15461 	soc->cdp_soc.ops->l_flowctl_ops = &dp_ops_l_flowctl;
15462 	soc->cdp_soc.ops->throttle_ops = &dp_ops_throttle;
15463 #endif
15464 #ifdef IPA_OFFLOAD
15465 	soc->cdp_soc.ops->ipa_ops = &dp_ops_ipa;
15466 #endif
15467 #ifdef DP_POWER_SAVE
15468 	soc->cdp_soc.ops->bus_ops = &dp_ops_bus;
15469 #endif
15470 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
15471 	soc->cdp_soc.ops->cfr_ops = &dp_ops_cfr;
15472 #endif
15473 #ifdef WLAN_SUPPORT_MSCS
15474 	soc->cdp_soc.ops->mscs_ops = &dp_ops_mscs;
15475 #endif
15476 #ifdef WLAN_SUPPORT_MESH_LATENCY
15477 	soc->cdp_soc.ops->mesh_latency_ops = &dp_ops_mesh_latency;
15478 #endif
15479 #ifdef CONFIG_SAWF_DEF_QUEUES
15480 	soc->cdp_soc.ops->sawf_ops = &dp_ops_sawf;
15481 #endif
15482 #ifdef WLAN_SUPPORT_SCS
15483 	soc->cdp_soc.ops->scs_ops = &dp_ops_scs;
15484 #endif
15485 };
15486 
15487 /*
15488  * dp_soc_set_txrx_ring_map()
15489  * @dp_soc: DP handler for soc
15490  *
15491  * Return: Void
15492  */
15493 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
15494 {
15495 	uint32_t i;
15496 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
15497 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
15498 	}
15499 }
15500 
15501 qdf_export_symbol(dp_soc_set_txrx_ring_map);
15502 
15503 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
15504 	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574) || \
15505 	defined(QCA_WIFI_QCA5332)
15506 /**
15507  * dp_soc_attach_wifi3() - Attach txrx SOC
15508  * @ctrl_psoc: Opaque SOC handle from control plane
15509  * @params: SOC attach params
15510  *
15511  * Return: DP SOC handle on success, NULL on failure
15512  */
15513 struct cdp_soc_t *
15514 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
15515 		    struct cdp_soc_attach_params *params)
15516 {
15517 	struct dp_soc *dp_soc = NULL;
15518 
15519 	dp_soc = dp_soc_attach(ctrl_psoc, params);
15520 
15521 	return dp_soc_to_cdp_soc_t(dp_soc);
15522 }
15523 
15524 static inline void dp_soc_set_def_pdev(struct dp_soc *soc)
15525 {
15526 	int lmac_id;
15527 
15528 	for (lmac_id = 0; lmac_id < MAX_NUM_LMAC_HW; lmac_id++) {
15529 		/*Set default host PDEV ID for lmac_id*/
15530 		wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
15531 				      INVALID_PDEV_ID, lmac_id);
15532 	}
15533 }
15534 
15535 static uint32_t
15536 dp_get_link_desc_id_start(uint16_t arch_id)
15537 {
15538 	switch (arch_id) {
15539 	case CDP_ARCH_TYPE_LI:
15540 		return LINK_DESC_ID_START_21_BITS_COOKIE;
15541 	case CDP_ARCH_TYPE_BE:
15542 		return LINK_DESC_ID_START_20_BITS_COOKIE;
15543 	default:
15544 		dp_err("unknown arch_id 0x%x", arch_id);
15545 		QDF_BUG(0);
15546 		return LINK_DESC_ID_START_21_BITS_COOKIE;
15547 	}
15548 }
15549 
15550 /**
15551  * dp_soc_attach() - Attach txrx SOC
15552  * @ctrl_psoc: Opaque SOC handle from control plane
15553  * @params: SOC attach params
15554  *
15555  * Return: DP SOC handle on success, NULL on failure
15556  */
15557 static struct dp_soc *
15558 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
15559 	      struct cdp_soc_attach_params *params)
15560 {
15561 	struct dp_soc *soc =  NULL;
15562 	uint16_t arch_id;
15563 	struct hif_opaque_softc *hif_handle = params->hif_handle;
15564 	qdf_device_t qdf_osdev = params->qdf_osdev;
15565 	struct ol_if_ops *ol_ops = params->ol_ops;
15566 	uint16_t device_id = params->device_id;
15567 
15568 	if (!hif_handle) {
15569 		dp_err("HIF handle is NULL");
15570 		goto fail0;
15571 	}
15572 	arch_id = cdp_get_arch_type_from_devid(device_id);
15573 	soc = qdf_mem_malloc(dp_get_soc_context_size(device_id));
15574 	if (!soc) {
15575 		dp_err("DP SOC memory allocation failed");
15576 		goto fail0;
15577 	}
15578 
15579 	dp_info("soc memory allocated %pK", soc);
15580 	soc->hif_handle = hif_handle;
15581 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
15582 	if (!soc->hal_soc)
15583 		goto fail1;
15584 
15585 	hif_get_cmem_info(soc->hif_handle,
15586 			  &soc->cmem_base,
15587 			  &soc->cmem_total_size);
15588 	soc->cmem_avail_size = soc->cmem_total_size;
15589 	soc->device_id = device_id;
15590 	soc->cdp_soc.ops =
15591 		(struct cdp_ops *)qdf_mem_malloc(sizeof(struct cdp_ops));
15592 	if (!soc->cdp_soc.ops)
15593 		goto fail1;
15594 
15595 	dp_soc_txrx_ops_attach(soc);
15596 	soc->cdp_soc.ol_ops = ol_ops;
15597 	soc->ctrl_psoc = ctrl_psoc;
15598 	soc->osdev = qdf_osdev;
15599 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
15600 	hal_rx_get_tlv_size(soc->hal_soc, &soc->rx_pkt_tlv_size,
15601 			    &soc->rx_mon_pkt_tlv_size);
15602 	soc->idle_link_bm_id = hal_get_idle_link_bm_id(soc->hal_soc,
15603 						       params->mlo_chip_id);
15604 	soc->features.dmac_cmn_src_rxbuf_ring_enabled =
15605 		hal_dmac_cmn_src_rxbuf_ring_get(soc->hal_soc);
15606 	soc->arch_id = arch_id;
15607 	soc->link_desc_id_start =
15608 			dp_get_link_desc_id_start(soc->arch_id);
15609 	dp_configure_arch_ops(soc);
15610 
15611 	/* Reset wbm sg list and flags */
15612 	dp_rx_wbm_sg_list_reset(soc);
15613 
15614 	dp_soc_cfg_history_attach(soc);
15615 	dp_soc_tx_hw_desc_history_attach(soc);
15616 	dp_soc_rx_history_attach(soc);
15617 	dp_soc_mon_status_ring_history_attach(soc);
15618 	dp_soc_tx_history_attach(soc);
15619 	wlan_set_srng_cfg(&soc->wlan_srng_cfg);
15620 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
15621 	if (!soc->wlan_cfg_ctx) {
15622 		dp_err("wlan_cfg_ctx failed\n");
15623 		goto fail2;
15624 	}
15625 	dp_soc_cfg_attach(soc);
15626 
15627 	if (dp_hw_link_desc_pool_banks_alloc(soc, WLAN_INVALID_PDEV_ID)) {
15628 		dp_err("failed to allocate link desc pool banks");
15629 		goto fail3;
15630 	}
15631 
15632 	if (dp_hw_link_desc_ring_alloc(soc)) {
15633 		dp_err("failed to allocate link_desc_ring");
15634 		goto fail4;
15635 	}
15636 
15637 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_attach(soc,
15638 								 params))) {
15639 		dp_err("unable to do target specific attach");
15640 		goto fail5;
15641 	}
15642 
15643 	if (dp_soc_srng_alloc(soc)) {
15644 		dp_err("failed to allocate soc srng rings");
15645 		goto fail6;
15646 	}
15647 
15648 	if (dp_soc_tx_desc_sw_pools_alloc(soc)) {
15649 		dp_err("dp_soc_tx_desc_sw_pools_alloc failed");
15650 		goto fail7;
15651 	}
15652 
15653 	if (!dp_monitor_modularized_enable()) {
15654 		if (dp_mon_soc_attach_wrapper(soc)) {
15655 			dp_err("failed to attach monitor");
15656 			goto fail8;
15657 		}
15658 	}
15659 
15660 	if (dp_sysfs_initialize_stats(soc) != QDF_STATUS_SUCCESS) {
15661 		dp_err("failed to initialize dp stats sysfs file");
15662 		dp_sysfs_deinitialize_stats(soc);
15663 	}
15664 
15665 	dp_soc_swlm_attach(soc);
15666 	dp_soc_set_interrupt_mode(soc);
15667 	dp_soc_set_def_pdev(soc);
15668 
15669 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
15670 		qdf_dma_mem_stats_read(),
15671 		qdf_heap_mem_stats_read(),
15672 		qdf_skb_total_mem_stats_read());
15673 
15674 	return soc;
15675 fail8:
15676 	dp_soc_tx_desc_sw_pools_free(soc);
15677 fail7:
15678 	dp_soc_srng_free(soc);
15679 fail6:
15680 	soc->arch_ops.txrx_soc_detach(soc);
15681 fail5:
15682 	dp_hw_link_desc_ring_free(soc);
15683 fail4:
15684 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
15685 fail3:
15686 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
15687 fail2:
15688 	qdf_mem_free(soc->cdp_soc.ops);
15689 fail1:
15690 	qdf_mem_free(soc);
15691 fail0:
15692 	return NULL;
15693 }
15694 
15695 /**
15696  * dp_soc_init() - Initialize txrx SOC
15697  * @dp_soc: Opaque DP SOC handle
15698  * @htc_handle: Opaque HTC handle
15699  * @hif_handle: Opaque HIF handle
15700  *
15701  * Return: DP SOC handle on success, NULL on failure
15702  */
15703 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
15704 		  struct hif_opaque_softc *hif_handle)
15705 {
15706 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
15707 	bool is_monitor_mode = false;
15708 	uint8_t i;
15709 	int num_dp_msi;
15710 
15711 	wlan_minidump_log(soc, sizeof(*soc), soc->ctrl_psoc,
15712 			  WLAN_MD_DP_SOC, "dp_soc");
15713 
15714 	soc->hif_handle = hif_handle;
15715 
15716 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
15717 	if (!soc->hal_soc)
15718 		goto fail0;
15719 
15720 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_init(soc))) {
15721 		dp_err("unable to do target specific init");
15722 		goto fail0;
15723 	}
15724 
15725 	htt_soc = htt_soc_attach(soc, htc_handle);
15726 	if (!htt_soc)
15727 		goto fail1;
15728 
15729 	soc->htt_handle = htt_soc;
15730 
15731 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
15732 		goto fail2;
15733 
15734 	htt_set_htc_handle(htt_soc, htc_handle);
15735 
15736 	dp_soc_cfg_init(soc);
15737 
15738 	dp_monitor_soc_cfg_init(soc);
15739 	/* Reset/Initialize wbm sg list and flags */
15740 	dp_rx_wbm_sg_list_reset(soc);
15741 
15742 	/* Note: Any SRNG ring initialization should happen only after
15743 	 * Interrupt mode is set and followed by filling up the
15744 	 * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER.
15745 	 */
15746 	dp_soc_set_interrupt_mode(soc);
15747 	if (soc->cdp_soc.ol_ops->get_con_mode &&
15748 	    soc->cdp_soc.ol_ops->get_con_mode() ==
15749 	    QDF_GLOBAL_MONITOR_MODE) {
15750 		is_monitor_mode = true;
15751 		soc->curr_rx_pkt_tlv_size = soc->rx_mon_pkt_tlv_size;
15752 	} else {
15753 		soc->curr_rx_pkt_tlv_size = soc->rx_pkt_tlv_size;
15754 	}
15755 
15756 	num_dp_msi = dp_get_num_msi_available(soc, soc->intr_mode);
15757 	if (num_dp_msi < 0) {
15758 		dp_init_err("%pK: dp_interrupt assignment failed", soc);
15759 		goto fail3;
15760 	}
15761 
15762 	wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, num_dp_msi,
15763 				     soc->intr_mode, is_monitor_mode);
15764 
15765 	/* initialize WBM_IDLE_LINK ring */
15766 	if (dp_hw_link_desc_ring_init(soc)) {
15767 		dp_init_err("%pK: dp_hw_link_desc_ring_init failed", soc);
15768 		goto fail3;
15769 	}
15770 
15771 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
15772 
15773 	if (dp_soc_srng_init(soc)) {
15774 		dp_init_err("%pK: dp_soc_srng_init failed", soc);
15775 		goto fail4;
15776 	}
15777 
15778 	if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
15779 			       htt_get_htc_handle(htt_soc),
15780 			       soc->hal_soc, soc->osdev) == NULL)
15781 		goto fail5;
15782 
15783 	/* Initialize descriptors in TCL Rings */
15784 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
15785 		hal_tx_init_data_ring(soc->hal_soc,
15786 				      soc->tcl_data_ring[i].hal_srng);
15787 	}
15788 
15789 	if (dp_soc_tx_desc_sw_pools_init(soc)) {
15790 		dp_init_err("%pK: dp_tx_soc_attach failed", soc);
15791 		goto fail6;
15792 	}
15793 
15794 	if (soc->arch_ops.txrx_soc_ppeds_start) {
15795 		if (soc->arch_ops.txrx_soc_ppeds_start(soc)) {
15796 			dp_init_err("%pK: ppeds start failed", soc);
15797 			goto fail7;
15798 		}
15799 	}
15800 
15801 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
15802 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
15803 	soc->cce_disable = false;
15804 	soc->max_ast_ageout_count = MAX_AST_AGEOUT_COUNT;
15805 
15806 	soc->sta_mode_search_policy = DP_TX_ADDR_SEARCH_ADDR_POLICY;
15807 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
15808 	qdf_spinlock_create(&soc->vdev_map_lock);
15809 	qdf_atomic_init(&soc->num_tx_outstanding);
15810 	qdf_atomic_init(&soc->num_tx_exception);
15811 	soc->num_tx_allowed =
15812 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
15813 	soc->num_tx_spl_allowed =
15814 		wlan_cfg_get_dp_soc_tx_spl_device_limit(soc->wlan_cfg_ctx);
15815 	soc->num_reg_tx_allowed = soc->num_tx_allowed - soc->num_tx_spl_allowed;
15816 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
15817 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
15818 				CDP_CFG_MAX_PEER_ID);
15819 
15820 		if (ret != -EINVAL)
15821 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
15822 
15823 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
15824 				CDP_CFG_CCE_DISABLE);
15825 		if (ret == 1)
15826 			soc->cce_disable = true;
15827 	}
15828 
15829 	/*
15830 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
15831 	 * and IPQ5018 WMAC2 is not there in these platforms.
15832 	 */
15833 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018 ||
15834 	    soc->disable_mac2_intr)
15835 		dp_soc_disable_unused_mac_intr_mask(soc, 0x2);
15836 
15837 	/*
15838 	 * Skip registering hw ring interrupts for WMAC1 on IPQ5018
15839 	 * WMAC1 is not there in this platform.
15840 	 */
15841 	if (soc->disable_mac1_intr)
15842 		dp_soc_disable_unused_mac_intr_mask(soc, 0x1);
15843 
15844 	/* setup the global rx defrag waitlist */
15845 	TAILQ_INIT(&soc->rx.defrag.waitlist);
15846 	soc->rx.defrag.timeout_ms =
15847 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
15848 	soc->rx.defrag.next_flush_ms = 0;
15849 	soc->rx.flags.defrag_timeout_check =
15850 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
15851 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
15852 
15853 	dp_monitor_soc_init(soc);
15854 
15855 	qdf_atomic_set(&soc->cmn_init_done, 1);
15856 
15857 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
15858 
15859 	qdf_spinlock_create(&soc->ast_lock);
15860 	dp_peer_mec_spinlock_create(soc);
15861 
15862 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
15863 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
15864 	INIT_RX_HW_STATS_LOCK(soc);
15865 
15866 	qdf_nbuf_queue_init(&soc->invalid_buf_queue);
15867 	/* fill the tx/rx cpu ring map*/
15868 	dp_soc_set_txrx_ring_map(soc);
15869 
15870 	TAILQ_INIT(&soc->inactive_peer_list);
15871 	qdf_spinlock_create(&soc->inactive_peer_list_lock);
15872 	TAILQ_INIT(&soc->inactive_vdev_list);
15873 	qdf_spinlock_create(&soc->inactive_vdev_list_lock);
15874 	qdf_spinlock_create(&soc->htt_stats.lock);
15875 	/* initialize work queue for stats processing */
15876 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
15877 
15878 	dp_reo_desc_deferred_freelist_create(soc);
15879 
15880 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
15881 		qdf_dma_mem_stats_read(),
15882 		qdf_heap_mem_stats_read(),
15883 		qdf_skb_total_mem_stats_read());
15884 
15885 	soc->vdev_stats_id_map = 0;
15886 
15887 	return soc;
15888 fail7:
15889 	dp_soc_tx_desc_sw_pools_deinit(soc);
15890 fail6:
15891 	htt_soc_htc_dealloc(soc->htt_handle);
15892 fail5:
15893 	dp_soc_srng_deinit(soc);
15894 fail4:
15895 	dp_hw_link_desc_ring_deinit(soc);
15896 fail3:
15897 	htt_htc_pkt_pool_free(htt_soc);
15898 fail2:
15899 	htt_soc_detach(htt_soc);
15900 fail1:
15901 	soc->arch_ops.txrx_soc_deinit(soc);
15902 fail0:
15903 	return NULL;
15904 }
15905 
15906 /**
15907  * dp_soc_init_wifi3() - Initialize txrx SOC
15908  * @soc: Opaque DP SOC handle
15909  * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
15910  * @hif_handle: Opaque HIF handle
15911  * @htc_handle: Opaque HTC handle
15912  * @qdf_osdev: QDF device (Unused)
15913  * @ol_ops: Offload Operations (Unused)
15914  * @device_id: Device ID (Unused)
15915  *
15916  * Return: DP SOC handle on success, NULL on failure
15917  */
15918 void *dp_soc_init_wifi3(struct cdp_soc_t *soc,
15919 			struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
15920 			struct hif_opaque_softc *hif_handle,
15921 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
15922 			struct ol_if_ops *ol_ops, uint16_t device_id)
15923 {
15924 	return dp_soc_init((struct dp_soc *)soc, htc_handle, hif_handle);
15925 }
15926 
15927 #endif
15928 
15929 /*
15930  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
15931  *
15932  * @soc: handle to DP soc
15933  * @mac_id: MAC id
15934  *
15935  * Return: Return pdev corresponding to MAC
15936  */
15937 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
15938 {
15939 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
15940 		return (mac_id < MAX_PDEV_CNT) ? soc->pdev_list[mac_id] : NULL;
15941 
15942 	/* Typically for MCL as there only 1 PDEV*/
15943 	return soc->pdev_list[0];
15944 }
15945 
15946 void dp_update_num_mac_rings_for_dbs(struct dp_soc *soc,
15947 				     int *max_mac_rings)
15948 {
15949 	bool dbs_enable = false;
15950 
15951 	if (soc->cdp_soc.ol_ops->is_hw_dbs_capable)
15952 		dbs_enable = soc->cdp_soc.ol_ops->
15953 				is_hw_dbs_capable((void *)soc->ctrl_psoc);
15954 
15955 	*max_mac_rings = dbs_enable ? (*max_mac_rings) : 1;
15956 	dp_info("dbs_enable %d, max_mac_rings %d",
15957 		dbs_enable, *max_mac_rings);
15958 }
15959 
15960 qdf_export_symbol(dp_update_num_mac_rings_for_dbs);
15961 
15962 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
15963 /**
15964  * dp_get_cfr_rcc() - get cfr rcc config
15965  * @soc_hdl: Datapath soc handle
15966  * @pdev_id: id of objmgr pdev
15967  *
15968  * Return: true/false based on cfr mode setting
15969  */
15970 static
15971 bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
15972 {
15973 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15974 	struct dp_pdev *pdev = NULL;
15975 
15976 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
15977 	if (!pdev) {
15978 		dp_err("pdev is NULL");
15979 		return false;
15980 	}
15981 
15982 	return pdev->cfr_rcc_mode;
15983 }
15984 
15985 /**
15986  * dp_set_cfr_rcc() - enable/disable cfr rcc config
15987  * @soc_hdl: Datapath soc handle
15988  * @pdev_id: id of objmgr pdev
15989  * @enable: Enable/Disable cfr rcc mode
15990  *
15991  * Return: none
15992  */
15993 static
15994 void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, bool enable)
15995 {
15996 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
15997 	struct dp_pdev *pdev = NULL;
15998 
15999 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
16000 	if (!pdev) {
16001 		dp_err("pdev is NULL");
16002 		return;
16003 	}
16004 
16005 	pdev->cfr_rcc_mode = enable;
16006 }
16007 
16008 /*
16009  * dp_get_cfr_dbg_stats - Get the debug statistics for CFR
16010  * @soc_hdl: Datapath soc handle
16011  * @pdev_id: id of data path pdev handle
16012  * @cfr_rcc_stats: CFR RCC debug statistics buffer
16013  *
16014  * Return: none
16015  */
16016 static inline void
16017 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
16018 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats)
16019 {
16020 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
16021 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
16022 
16023 	if (!pdev) {
16024 		dp_err("Invalid pdev");
16025 		return;
16026 	}
16027 
16028 	qdf_mem_copy(cfr_rcc_stats, &pdev->stats.rcc,
16029 		     sizeof(struct cdp_cfr_rcc_stats));
16030 }
16031 
16032 /*
16033  * dp_clear_cfr_dbg_stats - Clear debug statistics for CFR
16034  * @soc_hdl: Datapath soc handle
16035  * @pdev_id: id of data path pdev handle
16036  *
16037  * Return: none
16038  */
16039 static void dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl,
16040 				   uint8_t pdev_id)
16041 {
16042 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
16043 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
16044 
16045 	if (!pdev) {
16046 		dp_err("dp pdev is NULL");
16047 		return;
16048 	}
16049 
16050 	qdf_mem_zero(&pdev->stats.rcc, sizeof(pdev->stats.rcc));
16051 }
16052 #endif
16053 
16054 /**
16055  * dp_bucket_index() - Return index from array
16056  *
16057  * @delay: delay measured
16058  * @array: array used to index corresponding delay
16059  * @delay_in_us: flag to indicate whether the delay in ms or us
16060  *
16061  * Return: index
16062  */
16063 static uint8_t
16064 dp_bucket_index(uint32_t delay, uint16_t *array, bool delay_in_us)
16065 {
16066 	uint8_t i = CDP_DELAY_BUCKET_0;
16067 	uint32_t thr_low, thr_high;
16068 
16069 	for (; i < CDP_DELAY_BUCKET_MAX - 1; i++) {
16070 		thr_low = array[i];
16071 		thr_high = array[i + 1];
16072 
16073 		if (delay_in_us) {
16074 			thr_low = thr_low * USEC_PER_MSEC;
16075 			thr_high = thr_high * USEC_PER_MSEC;
16076 		}
16077 		if (delay >= thr_low && delay <= thr_high)
16078 			return i;
16079 	}
16080 	return (CDP_DELAY_BUCKET_MAX - 1);
16081 }
16082 
16083 #ifdef HW_TX_DELAY_STATS_ENABLE
16084 /*
16085  * cdp_fw_to_hw_delay_range
16086  * Fw to hw delay ranges in milliseconds
16087  */
16088 static uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
16089 	0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
16090 #else
16091 static uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
16092 	0, 2, 4, 6, 8, 10, 20, 30, 40, 50, 100, 250, 500};
16093 #endif
16094 
16095 /*
16096  * cdp_sw_enq_delay_range
16097  * Software enqueue delay ranges in milliseconds
16098  */
16099 static uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
16100 	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
16101 
16102 /*
16103  * cdp_intfrm_delay_range
16104  * Interframe delay ranges in milliseconds
16105  */
16106 static uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
16107 	0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
16108 
16109 /**
16110  * dp_fill_delay_buckets() - Fill delay statistics bucket for each
16111  *				type of delay
16112  * @tstats: tid tx stats
16113  * @rstats: tid rx stats
16114  * @delay: delay in ms
16115  * @tid: tid value
16116  * @mode: type of tx delay mode
16117  * @ring_id: ring number
16118  * @delay_in_us: flag to indicate whether the delay in ms or us
16119  *
16120  * Return: pointer to cdp_delay_stats structure
16121  */
16122 static struct cdp_delay_stats *
16123 dp_fill_delay_buckets(struct cdp_tid_tx_stats *tstats,
16124 		      struct cdp_tid_rx_stats *rstats, uint32_t delay,
16125 		      uint8_t tid, uint8_t mode, uint8_t ring_id,
16126 		      bool delay_in_us)
16127 {
16128 	uint8_t delay_index = 0;
16129 	struct cdp_delay_stats *stats = NULL;
16130 
16131 	/*
16132 	 * Update delay stats in proper bucket
16133 	 */
16134 	switch (mode) {
16135 	/* Software Enqueue delay ranges */
16136 	case CDP_DELAY_STATS_SW_ENQ:
16137 		if (!tstats)
16138 			break;
16139 
16140 		delay_index = dp_bucket_index(delay, cdp_sw_enq_delay,
16141 					      delay_in_us);
16142 		tstats->swq_delay.delay_bucket[delay_index]++;
16143 		stats = &tstats->swq_delay;
16144 		break;
16145 
16146 	/* Tx Completion delay ranges */
16147 	case CDP_DELAY_STATS_FW_HW_TRANSMIT:
16148 		if (!tstats)
16149 			break;
16150 
16151 		delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay,
16152 					      delay_in_us);
16153 		tstats->hwtx_delay.delay_bucket[delay_index]++;
16154 		stats = &tstats->hwtx_delay;
16155 		break;
16156 
16157 	/* Interframe tx delay ranges */
16158 	case CDP_DELAY_STATS_TX_INTERFRAME:
16159 		if (!tstats)
16160 			break;
16161 
16162 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
16163 					      delay_in_us);
16164 		tstats->intfrm_delay.delay_bucket[delay_index]++;
16165 		stats = &tstats->intfrm_delay;
16166 		break;
16167 
16168 	/* Interframe rx delay ranges */
16169 	case CDP_DELAY_STATS_RX_INTERFRAME:
16170 		if (!rstats)
16171 			break;
16172 
16173 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
16174 					      delay_in_us);
16175 		rstats->intfrm_delay.delay_bucket[delay_index]++;
16176 		stats = &rstats->intfrm_delay;
16177 		break;
16178 
16179 	/* Ring reap to indication to network stack */
16180 	case CDP_DELAY_STATS_REAP_STACK:
16181 		if (!rstats)
16182 			break;
16183 
16184 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
16185 					      delay_in_us);
16186 		rstats->to_stack_delay.delay_bucket[delay_index]++;
16187 		stats = &rstats->to_stack_delay;
16188 		break;
16189 	default:
16190 		dp_debug("Incorrect delay mode: %d", mode);
16191 	}
16192 
16193 	return stats;
16194 }
16195 
16196 void dp_update_delay_stats(struct cdp_tid_tx_stats *tstats,
16197 			   struct cdp_tid_rx_stats *rstats, uint32_t delay,
16198 			   uint8_t tid, uint8_t mode, uint8_t ring_id,
16199 			   bool delay_in_us)
16200 {
16201 	struct cdp_delay_stats *dstats = NULL;
16202 
16203 	/*
16204 	 * Delay ranges are different for different delay modes
16205 	 * Get the correct index to update delay bucket
16206 	 */
16207 	dstats = dp_fill_delay_buckets(tstats, rstats, delay, tid, mode,
16208 				       ring_id, delay_in_us);
16209 	if (qdf_unlikely(!dstats))
16210 		return;
16211 
16212 	if (delay != 0) {
16213 		/*
16214 		 * Compute minimum,average and maximum
16215 		 * delay
16216 		 */
16217 		if (delay < dstats->min_delay)
16218 			dstats->min_delay = delay;
16219 
16220 		if (delay > dstats->max_delay)
16221 			dstats->max_delay = delay;
16222 
16223 		/*
16224 		 * Average over delay measured till now
16225 		 */
16226 		if (!dstats->avg_delay)
16227 			dstats->avg_delay = delay;
16228 		else
16229 			dstats->avg_delay = ((delay + dstats->avg_delay) >> 1);
16230 	}
16231 }
16232 
16233 /**
16234  * dp_get_peer_mac_list(): function to get peer mac list of vdev
16235  * @soc: Datapath soc handle
16236  * @vdev_id: vdev id
16237  * @newmac: Table of the clients mac
16238  * @mac_cnt: No. of MACs required
16239  * @limit: Limit the number of clients
16240  *
16241  * return: no of clients
16242  */
16243 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id,
16244 			      u_int8_t newmac[][QDF_MAC_ADDR_SIZE],
16245 			      u_int16_t mac_cnt, bool limit)
16246 {
16247 	struct dp_soc *dp_soc = (struct dp_soc *)soc;
16248 	struct dp_vdev *vdev =
16249 		dp_vdev_get_ref_by_id(dp_soc, vdev_id, DP_MOD_ID_CDP);
16250 	struct dp_peer *peer;
16251 	uint16_t new_mac_cnt = 0;
16252 
16253 	if (!vdev)
16254 		return new_mac_cnt;
16255 
16256 	if (limit && (vdev->num_peers > mac_cnt))
16257 		return 0;
16258 
16259 	qdf_spin_lock_bh(&vdev->peer_list_lock);
16260 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
16261 		if (peer->bss_peer)
16262 			continue;
16263 		if (new_mac_cnt < mac_cnt) {
16264 			WLAN_ADDR_COPY(newmac[new_mac_cnt], peer->mac_addr.raw);
16265 			new_mac_cnt++;
16266 		}
16267 	}
16268 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
16269 	dp_vdev_unref_delete(dp_soc, vdev, DP_MOD_ID_CDP);
16270 	return new_mac_cnt;
16271 }
16272 
16273 uint16_t dp_get_peer_id(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *mac)
16274 {
16275 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
16276 						       mac, 0, vdev_id,
16277 						       DP_MOD_ID_CDP);
16278 	uint16_t peer_id = HTT_INVALID_PEER;
16279 
16280 	if (!peer) {
16281 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
16282 		return peer_id;
16283 	}
16284 
16285 	peer_id = peer->peer_id;
16286 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
16287 	return peer_id;
16288 }
16289 
16290 #ifdef QCA_SUPPORT_WDS_EXTENDED
16291 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc,
16292 				  uint8_t vdev_id,
16293 				  uint8_t *mac,
16294 				  ol_txrx_rx_fp rx,
16295 				  ol_osif_peer_handle osif_peer)
16296 {
16297 	struct dp_txrx_peer *txrx_peer = NULL;
16298 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
16299 						       mac, 0, vdev_id,
16300 						       DP_MOD_ID_CDP);
16301 	QDF_STATUS status = QDF_STATUS_E_INVAL;
16302 
16303 	if (!peer) {
16304 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
16305 		return status;
16306 	}
16307 
16308 	txrx_peer = dp_get_txrx_peer(peer);
16309 	if (!txrx_peer) {
16310 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
16311 		return status;
16312 	}
16313 
16314 	if (rx) {
16315 		if (txrx_peer->osif_rx) {
16316 			status = QDF_STATUS_E_ALREADY;
16317 		} else {
16318 			txrx_peer->osif_rx = rx;
16319 			status = QDF_STATUS_SUCCESS;
16320 		}
16321 	} else {
16322 		if (txrx_peer->osif_rx) {
16323 			txrx_peer->osif_rx = NULL;
16324 			status = QDF_STATUS_SUCCESS;
16325 		} else {
16326 			status = QDF_STATUS_E_ALREADY;
16327 		}
16328 	}
16329 
16330 	txrx_peer->wds_ext.osif_peer = osif_peer;
16331 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
16332 
16333 	return status;
16334 }
16335 #endif /* QCA_SUPPORT_WDS_EXTENDED */
16336 
16337 /**
16338  * dp_pdev_srng_deinit() - de-initialize all pdev srng ring including
16339  *			   monitor rings
16340  * @pdev: Datapath pdev handle
16341  *
16342  */
16343 static void dp_pdev_srng_deinit(struct dp_pdev *pdev)
16344 {
16345 	struct dp_soc *soc = pdev->soc;
16346 	uint8_t i;
16347 
16348 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled)
16349 		dp_srng_deinit(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
16350 			       RXDMA_BUF,
16351 			       pdev->lmac_id);
16352 
16353 	if (!soc->rxdma2sw_rings_not_supported) {
16354 		for (i = 0;
16355 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
16356 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
16357 								 pdev->pdev_id);
16358 
16359 			wlan_minidump_remove(soc->rxdma_err_dst_ring[lmac_id].
16360 							base_vaddr_unaligned,
16361 					     soc->rxdma_err_dst_ring[lmac_id].
16362 								alloc_size,
16363 					     soc->ctrl_psoc,
16364 					     WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
16365 					     "rxdma_err_dst");
16366 			dp_srng_deinit(soc, &soc->rxdma_err_dst_ring[lmac_id],
16367 				       RXDMA_DST, lmac_id);
16368 		}
16369 	}
16370 
16371 
16372 }
16373 
16374 /**
16375  * dp_pdev_srng_init() - initialize all pdev srng rings including
16376  *			   monitor rings
16377  * @pdev: Datapath pdev handle
16378  *
16379  * return: QDF_STATUS_SUCCESS on success
16380  *	   QDF_STATUS_E_NOMEM on failure
16381  */
16382 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev)
16383 {
16384 	struct dp_soc *soc = pdev->soc;
16385 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16386 	uint32_t i;
16387 
16388 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16389 
16390 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
16391 		if (dp_srng_init(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
16392 				 RXDMA_BUF, 0, pdev->lmac_id)) {
16393 			dp_init_err("%pK: dp_srng_init failed rx refill ring",
16394 				    soc);
16395 			goto fail1;
16396 		}
16397 	}
16398 
16399 	/* LMAC RxDMA to SW Rings configuration */
16400 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
16401 		/* Only valid for MCL */
16402 		pdev = soc->pdev_list[0];
16403 
16404 	if (!soc->rxdma2sw_rings_not_supported) {
16405 		for (i = 0;
16406 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
16407 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
16408 								 pdev->pdev_id);
16409 			struct dp_srng *srng =
16410 				&soc->rxdma_err_dst_ring[lmac_id];
16411 
16412 			if (srng->hal_srng)
16413 				continue;
16414 
16415 			if (dp_srng_init(soc, srng, RXDMA_DST, 0, lmac_id)) {
16416 				dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring",
16417 					    soc);
16418 				goto fail1;
16419 			}
16420 			wlan_minidump_log(soc->rxdma_err_dst_ring[lmac_id].
16421 						base_vaddr_unaligned,
16422 					  soc->rxdma_err_dst_ring[lmac_id].
16423 						alloc_size,
16424 					  soc->ctrl_psoc,
16425 					  WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
16426 					  "rxdma_err_dst");
16427 		}
16428 	}
16429 	return QDF_STATUS_SUCCESS;
16430 
16431 fail1:
16432 	dp_pdev_srng_deinit(pdev);
16433 	return QDF_STATUS_E_NOMEM;
16434 }
16435 
16436 /**
16437  * dp_pdev_srng_free() - free all pdev srng rings including monitor rings
16438  * pdev: Datapath pdev handle
16439  *
16440  */
16441 static void dp_pdev_srng_free(struct dp_pdev *pdev)
16442 {
16443 	struct dp_soc *soc = pdev->soc;
16444 	uint8_t i;
16445 
16446 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled)
16447 		dp_srng_free(soc, &soc->rx_refill_buf_ring[pdev->lmac_id]);
16448 
16449 	if (!soc->rxdma2sw_rings_not_supported) {
16450 		for (i = 0;
16451 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
16452 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
16453 								 pdev->pdev_id);
16454 
16455 			dp_srng_free(soc, &soc->rxdma_err_dst_ring[lmac_id]);
16456 		}
16457 	}
16458 }
16459 
16460 /**
16461  * dp_pdev_srng_alloc() - allocate memory for all pdev srng rings including
16462  *			  monitor rings
16463  * pdev: Datapath pdev handle
16464  *
16465  * return: QDF_STATUS_SUCCESS on success
16466  *	   QDF_STATUS_E_NOMEM on failure
16467  */
16468 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev)
16469 {
16470 	struct dp_soc *soc = pdev->soc;
16471 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16472 	uint32_t ring_size;
16473 	uint32_t i;
16474 
16475 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16476 
16477 	ring_size = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
16478 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
16479 		if (dp_srng_alloc(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
16480 				  RXDMA_BUF, ring_size, 0)) {
16481 			dp_init_err("%pK: dp_srng_alloc failed rx refill ring",
16482 				    soc);
16483 			goto fail1;
16484 		}
16485 	}
16486 
16487 	ring_size = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
16488 	/* LMAC RxDMA to SW Rings configuration */
16489 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
16490 		/* Only valid for MCL */
16491 		pdev = soc->pdev_list[0];
16492 
16493 	if (!soc->rxdma2sw_rings_not_supported) {
16494 		for (i = 0;
16495 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
16496 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
16497 								 pdev->pdev_id);
16498 			struct dp_srng *srng =
16499 				&soc->rxdma_err_dst_ring[lmac_id];
16500 
16501 			if (srng->base_vaddr_unaligned)
16502 				continue;
16503 
16504 			if (dp_srng_alloc(soc, srng, RXDMA_DST, ring_size, 0)) {
16505 				dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring",
16506 					    soc);
16507 				goto fail1;
16508 			}
16509 		}
16510 	}
16511 
16512 	return QDF_STATUS_SUCCESS;
16513 fail1:
16514 	dp_pdev_srng_free(pdev);
16515 	return QDF_STATUS_E_NOMEM;
16516 }
16517 
16518 #ifndef WLAN_DP_DISABLE_TCL_CMD_CRED_SRNG
16519 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
16520 {
16521 	QDF_STATUS status;
16522 
16523 	if (soc->init_tcl_cmd_cred_ring) {
16524 		status =  dp_srng_init(soc, &soc->tcl_cmd_credit_ring,
16525 				       TCL_CMD_CREDIT, 0, 0);
16526 		if (QDF_IS_STATUS_ERROR(status))
16527 			return status;
16528 
16529 		wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
16530 				  soc->tcl_cmd_credit_ring.alloc_size,
16531 				  soc->ctrl_psoc,
16532 				  WLAN_MD_DP_SRNG_TCL_CMD,
16533 				  "wbm_desc_rel_ring");
16534 	}
16535 
16536 	return QDF_STATUS_SUCCESS;
16537 }
16538 
16539 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
16540 {
16541 	if (soc->init_tcl_cmd_cred_ring) {
16542 		wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
16543 				     soc->tcl_cmd_credit_ring.alloc_size,
16544 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_CMD,
16545 				     "wbm_desc_rel_ring");
16546 		dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring,
16547 			       TCL_CMD_CREDIT, 0);
16548 	}
16549 }
16550 
16551 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
16552 {
16553 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
16554 	uint32_t entries;
16555 	QDF_STATUS status;
16556 
16557 	entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx);
16558 	if (soc->init_tcl_cmd_cred_ring) {
16559 		status = dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring,
16560 				       TCL_CMD_CREDIT, entries, 0);
16561 		if (QDF_IS_STATUS_ERROR(status))
16562 			return status;
16563 	}
16564 
16565 	return QDF_STATUS_SUCCESS;
16566 }
16567 
16568 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
16569 {
16570 	if (soc->init_tcl_cmd_cred_ring)
16571 		dp_srng_free(soc, &soc->tcl_cmd_credit_ring);
16572 }
16573 
16574 static inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
16575 {
16576 	if (soc->init_tcl_cmd_cred_ring)
16577 		hal_tx_init_cmd_credit_ring(soc->hal_soc,
16578 					    soc->tcl_cmd_credit_ring.hal_srng);
16579 }
16580 #else
16581 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
16582 {
16583 	return QDF_STATUS_SUCCESS;
16584 }
16585 
16586 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
16587 {
16588 }
16589 
16590 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
16591 {
16592 	return QDF_STATUS_SUCCESS;
16593 }
16594 
16595 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
16596 {
16597 }
16598 
16599 static inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
16600 {
16601 }
16602 #endif
16603 
16604 #ifndef WLAN_DP_DISABLE_TCL_STATUS_SRNG
16605 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
16606 {
16607 	QDF_STATUS status;
16608 
16609 	status =  dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0);
16610 	if (QDF_IS_STATUS_ERROR(status))
16611 		return status;
16612 
16613 	wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned,
16614 			  soc->tcl_status_ring.alloc_size,
16615 			  soc->ctrl_psoc,
16616 			  WLAN_MD_DP_SRNG_TCL_STATUS,
16617 			  "wbm_desc_rel_ring");
16618 
16619 	return QDF_STATUS_SUCCESS;
16620 }
16621 
16622 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
16623 {
16624 	wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned,
16625 			     soc->tcl_status_ring.alloc_size,
16626 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_STATUS,
16627 			     "wbm_desc_rel_ring");
16628 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
16629 }
16630 
16631 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
16632 {
16633 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
16634 	uint32_t entries;
16635 	QDF_STATUS status = QDF_STATUS_SUCCESS;
16636 
16637 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
16638 	status = dp_srng_alloc(soc, &soc->tcl_status_ring,
16639 			       TCL_STATUS, entries, 0);
16640 
16641 	return status;
16642 }
16643 
16644 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
16645 {
16646 	dp_srng_free(soc, &soc->tcl_status_ring);
16647 }
16648 #else
16649 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
16650 {
16651 	return QDF_STATUS_SUCCESS;
16652 }
16653 
16654 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
16655 {
16656 }
16657 
16658 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
16659 {
16660 	return QDF_STATUS_SUCCESS;
16661 }
16662 
16663 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
16664 {
16665 }
16666 #endif
16667 
16668 /**
16669  * dp_soc_srng_deinit() - de-initialize soc srng rings
16670  * @soc: Datapath soc handle
16671  *
16672  */
16673 static void dp_soc_srng_deinit(struct dp_soc *soc)
16674 {
16675 	uint32_t i;
16676 
16677 	if (soc->arch_ops.txrx_soc_srng_deinit)
16678 		soc->arch_ops.txrx_soc_srng_deinit(soc);
16679 
16680 	/* Free the ring memories */
16681 	/* Common rings */
16682 	wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
16683 			     soc->wbm_desc_rel_ring.alloc_size,
16684 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_WBM_DESC_REL,
16685 			     "wbm_desc_rel_ring");
16686 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
16687 
16688 	/* Tx data rings */
16689 	for (i = 0; i < soc->num_tcl_data_rings; i++)
16690 		dp_deinit_tx_pair_by_index(soc, i);
16691 
16692 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16693 		dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
16694 		dp_ipa_deinit_alt_tx_ring(soc);
16695 	}
16696 
16697 	/* TCL command and status rings */
16698 	dp_soc_tcl_cmd_cred_srng_deinit(soc);
16699 	dp_soc_tcl_status_srng_deinit(soc);
16700 
16701 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
16702 		/* TODO: Get number of rings and ring sizes
16703 		 * from wlan_cfg
16704 		 */
16705 		wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned,
16706 				     soc->reo_dest_ring[i].alloc_size,
16707 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_DEST,
16708 				     "reo_dest_ring");
16709 		dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i);
16710 	}
16711 
16712 	/* REO reinjection ring */
16713 	wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned,
16714 			     soc->reo_reinject_ring.alloc_size,
16715 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_REINJECT,
16716 			     "reo_reinject_ring");
16717 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
16718 
16719 	/* Rx release ring */
16720 	wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned,
16721 			     soc->rx_rel_ring.alloc_size,
16722 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_RX_REL,
16723 			     "reo_release_ring");
16724 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
16725 
16726 	/* Rx exception ring */
16727 	/* TODO: Better to store ring_type and ring_num in
16728 	 * dp_srng during setup
16729 	 */
16730 	wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned,
16731 			     soc->reo_exception_ring.alloc_size,
16732 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_EXCEPTION,
16733 			     "reo_exception_ring");
16734 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
16735 
16736 	/* REO command and status rings */
16737 	wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned,
16738 			     soc->reo_cmd_ring.alloc_size,
16739 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_CMD,
16740 			     "reo_cmd_ring");
16741 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
16742 	wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned,
16743 			     soc->reo_status_ring.alloc_size,
16744 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_STATUS,
16745 			     "reo_status_ring");
16746 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
16747 }
16748 
16749 /**
16750  * dp_soc_srng_init() - Initialize soc level srng rings
16751  * @soc: Datapath soc handle
16752  *
16753  * return: QDF_STATUS_SUCCESS on success
16754  *	   QDF_STATUS_E_FAILURE on failure
16755  */
16756 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc)
16757 {
16758 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16759 	uint8_t i;
16760 	uint8_t wbm2_sw_rx_rel_ring_id;
16761 
16762 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16763 
16764 	dp_enable_verbose_debug(soc);
16765 
16766 	/* WBM descriptor release ring */
16767 	if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) {
16768 		dp_init_err("%pK: dp_srng_init failed for wbm_desc_rel_ring", soc);
16769 		goto fail1;
16770 	}
16771 
16772 	wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
16773 			  soc->wbm_desc_rel_ring.alloc_size,
16774 			  soc->ctrl_psoc,
16775 			  WLAN_MD_DP_SRNG_WBM_DESC_REL,
16776 			  "wbm_desc_rel_ring");
16777 
16778 	/* TCL command and status rings */
16779 	if (dp_soc_tcl_cmd_cred_srng_init(soc)) {
16780 		dp_init_err("%pK: dp_srng_init failed for tcl_cmd_ring", soc);
16781 		goto fail1;
16782 	}
16783 
16784 	if (dp_soc_tcl_status_srng_init(soc)) {
16785 		dp_init_err("%pK: dp_srng_init failed for tcl_status_ring", soc);
16786 		goto fail1;
16787 	}
16788 
16789 	/* REO reinjection ring */
16790 	if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) {
16791 		dp_init_err("%pK: dp_srng_init failed for reo_reinject_ring", soc);
16792 		goto fail1;
16793 	}
16794 
16795 	wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned,
16796 			  soc->reo_reinject_ring.alloc_size,
16797 			  soc->ctrl_psoc,
16798 			  WLAN_MD_DP_SRNG_REO_REINJECT,
16799 			  "reo_reinject_ring");
16800 
16801 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc_cfg_ctx);
16802 	/* Rx release ring */
16803 	if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
16804 			 wbm2_sw_rx_rel_ring_id, 0)) {
16805 		dp_init_err("%pK: dp_srng_init failed for rx_rel_ring", soc);
16806 		goto fail1;
16807 	}
16808 
16809 	wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned,
16810 			  soc->rx_rel_ring.alloc_size,
16811 			  soc->ctrl_psoc,
16812 			  WLAN_MD_DP_SRNG_RX_REL,
16813 			  "reo_release_ring");
16814 
16815 	/* Rx exception ring */
16816 	if (dp_srng_init(soc, &soc->reo_exception_ring,
16817 			 REO_EXCEPTION, 0, MAX_REO_DEST_RINGS)) {
16818 		dp_init_err("%pK: dp_srng_init failed - reo_exception", soc);
16819 		goto fail1;
16820 	}
16821 
16822 	wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned,
16823 			  soc->reo_exception_ring.alloc_size,
16824 			  soc->ctrl_psoc,
16825 			  WLAN_MD_DP_SRNG_REO_EXCEPTION,
16826 			  "reo_exception_ring");
16827 
16828 	/* REO command and status rings */
16829 	if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) {
16830 		dp_init_err("%pK: dp_srng_init failed for reo_cmd_ring", soc);
16831 		goto fail1;
16832 	}
16833 
16834 	wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned,
16835 			  soc->reo_cmd_ring.alloc_size,
16836 			  soc->ctrl_psoc,
16837 			  WLAN_MD_DP_SRNG_REO_CMD,
16838 			  "reo_cmd_ring");
16839 
16840 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
16841 	TAILQ_INIT(&soc->rx.reo_cmd_list);
16842 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
16843 
16844 	if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) {
16845 		dp_init_err("%pK: dp_srng_init failed for reo_status_ring", soc);
16846 		goto fail1;
16847 	}
16848 
16849 	wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned,
16850 			  soc->reo_status_ring.alloc_size,
16851 			  soc->ctrl_psoc,
16852 			  WLAN_MD_DP_SRNG_REO_STATUS,
16853 			  "reo_status_ring");
16854 
16855 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
16856 		if (dp_init_tx_ring_pair_by_index(soc, i))
16857 			goto fail1;
16858 	}
16859 
16860 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16861 		if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
16862 			goto fail1;
16863 
16864 		if (dp_ipa_init_alt_tx_ring(soc))
16865 			goto fail1;
16866 	}
16867 
16868 	dp_create_ext_stats_event(soc);
16869 
16870 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
16871 		/* Initialize REO destination ring */
16872 		if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) {
16873 			dp_init_err("%pK: dp_srng_init failed for reo_dest_ringn", soc);
16874 			goto fail1;
16875 		}
16876 
16877 		wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned,
16878 				  soc->reo_dest_ring[i].alloc_size,
16879 				  soc->ctrl_psoc,
16880 				  WLAN_MD_DP_SRNG_REO_DEST,
16881 				  "reo_dest_ring");
16882 	}
16883 
16884 	if (soc->arch_ops.txrx_soc_srng_init) {
16885 		if (soc->arch_ops.txrx_soc_srng_init(soc)) {
16886 			dp_init_err("%pK: dp_srng_init failed for arch rings",
16887 				    soc);
16888 			goto fail1;
16889 		}
16890 	}
16891 
16892 	return QDF_STATUS_SUCCESS;
16893 fail1:
16894 	/*
16895 	 * Cleanup will be done as part of soc_detach, which will
16896 	 * be called on pdev attach failure
16897 	 */
16898 	dp_soc_srng_deinit(soc);
16899 	return QDF_STATUS_E_FAILURE;
16900 }
16901 
16902 /**
16903  * dp_soc_srng_free() - free soc level srng rings
16904  * @soc: Datapath soc handle
16905  *
16906  */
16907 static void dp_soc_srng_free(struct dp_soc *soc)
16908 {
16909 	uint32_t i;
16910 
16911 	if (soc->arch_ops.txrx_soc_srng_free)
16912 		soc->arch_ops.txrx_soc_srng_free(soc);
16913 
16914 	dp_srng_free(soc, &soc->wbm_desc_rel_ring);
16915 
16916 	for (i = 0; i < soc->num_tcl_data_rings; i++)
16917 		dp_free_tx_ring_pair_by_index(soc, i);
16918 
16919 	/* Free IPA rings for TCL_TX and TCL_COMPL ring */
16920 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16921 		dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
16922 		dp_ipa_free_alt_tx_ring(soc);
16923 	}
16924 
16925 	dp_soc_tcl_cmd_cred_srng_free(soc);
16926 	dp_soc_tcl_status_srng_free(soc);
16927 
16928 	for (i = 0; i < soc->num_reo_dest_rings; i++)
16929 		dp_srng_free(soc, &soc->reo_dest_ring[i]);
16930 
16931 	dp_srng_free(soc, &soc->reo_reinject_ring);
16932 	dp_srng_free(soc, &soc->rx_rel_ring);
16933 
16934 	dp_srng_free(soc, &soc->reo_exception_ring);
16935 
16936 	dp_srng_free(soc, &soc->reo_cmd_ring);
16937 	dp_srng_free(soc, &soc->reo_status_ring);
16938 }
16939 
16940 /**
16941  * dp_soc_srng_alloc() - Allocate memory for soc level srng rings
16942  * @soc: Datapath soc handle
16943  *
16944  * return: QDF_STATUS_SUCCESS on success
16945  *	   QDF_STATUS_E_NOMEM on failure
16946  */
16947 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc)
16948 {
16949 	uint32_t entries;
16950 	uint32_t i;
16951 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
16952 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
16953 	uint32_t reo_dst_ring_size;
16954 
16955 	soc_cfg_ctx = soc->wlan_cfg_ctx;
16956 
16957 	/* sw2wbm link descriptor release ring */
16958 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
16959 	if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE,
16960 			  entries, 0)) {
16961 		dp_init_err("%pK: dp_srng_alloc failed for wbm_desc_rel_ring", soc);
16962 		goto fail1;
16963 	}
16964 
16965 	/* TCL command and status rings */
16966 	if (dp_soc_tcl_cmd_cred_srng_alloc(soc)) {
16967 		dp_init_err("%pK: dp_srng_alloc failed for tcl_cmd_ring", soc);
16968 		goto fail1;
16969 	}
16970 
16971 	if (dp_soc_tcl_status_srng_alloc(soc)) {
16972 		dp_init_err("%pK: dp_srng_alloc failed for tcl_status_ring", soc);
16973 		goto fail1;
16974 	}
16975 
16976 	/* REO reinjection ring */
16977 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
16978 	if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT,
16979 			  entries, 0)) {
16980 		dp_init_err("%pK: dp_srng_alloc failed for reo_reinject_ring", soc);
16981 		goto fail1;
16982 	}
16983 
16984 	/* Rx release ring */
16985 	entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx);
16986 	if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
16987 			  entries, 0)) {
16988 		dp_init_err("%pK: dp_srng_alloc failed for rx_rel_ring", soc);
16989 		goto fail1;
16990 	}
16991 
16992 	/* Rx exception ring */
16993 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
16994 	if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION,
16995 			  entries, 0)) {
16996 		dp_init_err("%pK: dp_srng_alloc failed - reo_exception", soc);
16997 		goto fail1;
16998 	}
16999 
17000 	/* REO command and status rings */
17001 	entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx);
17002 	if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) {
17003 		dp_init_err("%pK: dp_srng_alloc failed for reo_cmd_ring", soc);
17004 		goto fail1;
17005 	}
17006 
17007 	entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx);
17008 	if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS,
17009 			  entries, 0)) {
17010 		dp_init_err("%pK: dp_srng_alloc failed for reo_status_ring", soc);
17011 		goto fail1;
17012 	}
17013 
17014 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx);
17015 
17016 	/* Disable cached desc if NSS offload is enabled */
17017 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
17018 		cached = 0;
17019 
17020 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
17021 		if (dp_alloc_tx_ring_pair_by_index(soc, i))
17022 			goto fail1;
17023 	}
17024 
17025 	/* IPA rings for TCL_TX and TX_COMP will be allocated here */
17026 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
17027 		if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
17028 			goto fail1;
17029 
17030 		if (dp_ipa_alloc_alt_tx_ring(soc))
17031 			goto fail1;
17032 	}
17033 
17034 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
17035 		/* Setup REO destination ring */
17036 		if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST,
17037 				  reo_dst_ring_size, cached)) {
17038 			dp_init_err("%pK: dp_srng_alloc failed for reo_dest_ring", soc);
17039 			goto fail1;
17040 		}
17041 	}
17042 
17043 	if (soc->arch_ops.txrx_soc_srng_alloc) {
17044 		if (soc->arch_ops.txrx_soc_srng_alloc(soc)) {
17045 			dp_init_err("%pK: dp_srng_alloc failed for arch rings",
17046 				    soc);
17047 			goto fail1;
17048 		}
17049 	}
17050 
17051 	return QDF_STATUS_SUCCESS;
17052 
17053 fail1:
17054 	dp_soc_srng_free(soc);
17055 	return QDF_STATUS_E_NOMEM;
17056 }
17057 
17058 static void dp_soc_cfg_dump(struct dp_soc *soc, uint32_t target_type)
17059 {
17060 	dp_init_info("DP soc Dump for Target = %d", target_type);
17061 	dp_init_info("ast_override_support = %d, da_war_enabled = %d,",
17062 		     soc->ast_override_support, soc->da_war_enabled);
17063 
17064 	wlan_cfg_dp_soc_ctx_dump(soc->wlan_cfg_ctx);
17065 }
17066 
17067 /**
17068  * dp_soc_cfg_init() - initialize target specific configuration
17069  *		       during dp_soc_init
17070  * @soc: dp soc handle
17071  */
17072 static void dp_soc_cfg_init(struct dp_soc *soc)
17073 {
17074 	uint32_t target_type;
17075 
17076 	target_type = hal_get_target_type(soc->hal_soc);
17077 	switch (target_type) {
17078 	case TARGET_TYPE_QCA6290:
17079 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
17080 					       REO_DST_RING_SIZE_QCA6290);
17081 		soc->ast_override_support = 1;
17082 		soc->da_war_enabled = false;
17083 		break;
17084 	case TARGET_TYPE_QCA6390:
17085 	case TARGET_TYPE_QCA6490:
17086 	case TARGET_TYPE_QCA6750:
17087 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
17088 					       REO_DST_RING_SIZE_QCA6290);
17089 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
17090 		soc->ast_override_support = 1;
17091 		if (soc->cdp_soc.ol_ops->get_con_mode &&
17092 		    soc->cdp_soc.ol_ops->get_con_mode() ==
17093 		    QDF_GLOBAL_MONITOR_MODE) {
17094 			int int_ctx;
17095 
17096 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
17097 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
17098 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
17099 			}
17100 		}
17101 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
17102 		break;
17103 	case TARGET_TYPE_KIWI:
17104 	case TARGET_TYPE_MANGO:
17105 		soc->ast_override_support = 1;
17106 		soc->per_tid_basize_max_tid = 8;
17107 
17108 		if (soc->cdp_soc.ol_ops->get_con_mode &&
17109 		    soc->cdp_soc.ol_ops->get_con_mode() ==
17110 		    QDF_GLOBAL_MONITOR_MODE) {
17111 			int int_ctx;
17112 
17113 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS;
17114 			     int_ctx++) {
17115 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
17116 				if (dp_is_monitor_mode_using_poll(soc))
17117 					soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
17118 			}
17119 		}
17120 
17121 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
17122 		soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev = 1;
17123 		break;
17124 	case TARGET_TYPE_QCA8074:
17125 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
17126 		soc->da_war_enabled = true;
17127 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
17128 		break;
17129 	case TARGET_TYPE_QCA8074V2:
17130 	case TARGET_TYPE_QCA6018:
17131 	case TARGET_TYPE_QCA9574:
17132 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
17133 		soc->ast_override_support = 1;
17134 		soc->per_tid_basize_max_tid = 8;
17135 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
17136 		soc->da_war_enabled = false;
17137 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
17138 		break;
17139 	case TARGET_TYPE_QCN9000:
17140 		soc->ast_override_support = 1;
17141 		soc->da_war_enabled = false;
17142 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
17143 		soc->per_tid_basize_max_tid = 8;
17144 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
17145 		soc->lmac_polled_mode = 0;
17146 		soc->wbm_release_desc_rx_sg_support = 1;
17147 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
17148 		break;
17149 	case TARGET_TYPE_QCA5018:
17150 	case TARGET_TYPE_QCN6122:
17151 	case TARGET_TYPE_QCN9160:
17152 		soc->ast_override_support = 1;
17153 		soc->da_war_enabled = false;
17154 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
17155 		soc->per_tid_basize_max_tid = 8;
17156 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS_11AX;
17157 		soc->disable_mac1_intr = 1;
17158 		soc->disable_mac2_intr = 1;
17159 		soc->wbm_release_desc_rx_sg_support = 1;
17160 		break;
17161 	case TARGET_TYPE_QCN9224:
17162 		soc->ast_override_support = 1;
17163 		soc->da_war_enabled = false;
17164 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
17165 		soc->per_tid_basize_max_tid = 8;
17166 		soc->wbm_release_desc_rx_sg_support = 1;
17167 		soc->rxdma2sw_rings_not_supported = 1;
17168 		soc->wbm_sg_last_msdu_war = 1;
17169 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
17170 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
17171 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
17172 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
17173 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
17174 						  CFG_DP_HOST_AST_DB_ENABLE);
17175 		soc->features.wds_ext_ast_override_enable = true;
17176 		break;
17177 	case TARGET_TYPE_QCA5332:
17178 		soc->ast_override_support = 1;
17179 		soc->da_war_enabled = false;
17180 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
17181 		soc->per_tid_basize_max_tid = 8;
17182 		soc->wbm_release_desc_rx_sg_support = 1;
17183 		soc->rxdma2sw_rings_not_supported = 1;
17184 		soc->wbm_sg_last_msdu_war = 1;
17185 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
17186 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
17187 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS_5332;
17188 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
17189 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
17190 						  CFG_DP_HOST_AST_DB_ENABLE);
17191 		soc->features.wds_ext_ast_override_enable = true;
17192 		break;
17193 	default:
17194 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
17195 		qdf_assert_always(0);
17196 		break;
17197 	}
17198 	dp_soc_cfg_dump(soc, target_type);
17199 }
17200 
17201 /**
17202  * dp_soc_cfg_attach() - set target specific configuration in
17203  *			 dp soc cfg.
17204  * @soc: dp soc handle
17205  */
17206 static void dp_soc_cfg_attach(struct dp_soc *soc)
17207 {
17208 	int target_type;
17209 	int nss_cfg = 0;
17210 
17211 	target_type = hal_get_target_type(soc->hal_soc);
17212 	switch (target_type) {
17213 	case TARGET_TYPE_QCA6290:
17214 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
17215 					       REO_DST_RING_SIZE_QCA6290);
17216 		break;
17217 	case TARGET_TYPE_QCA6390:
17218 	case TARGET_TYPE_QCA6490:
17219 	case TARGET_TYPE_QCA6750:
17220 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
17221 					       REO_DST_RING_SIZE_QCA6290);
17222 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
17223 		break;
17224 	case TARGET_TYPE_KIWI:
17225 	case TARGET_TYPE_MANGO:
17226 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
17227 		break;
17228 	case TARGET_TYPE_QCA8074:
17229 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
17230 		break;
17231 	case TARGET_TYPE_QCA8074V2:
17232 	case TARGET_TYPE_QCA6018:
17233 	case TARGET_TYPE_QCA9574:
17234 	case TARGET_TYPE_QCN6122:
17235 	case TARGET_TYPE_QCN9160:
17236 	case TARGET_TYPE_QCA5018:
17237 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
17238 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
17239 		break;
17240 	case TARGET_TYPE_QCN9000:
17241 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
17242 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
17243 		break;
17244 	case TARGET_TYPE_QCN9224:
17245 	case TARGET_TYPE_QCA5332:
17246 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
17247 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
17248 		break;
17249 	default:
17250 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
17251 		qdf_assert_always(0);
17252 		break;
17253 	}
17254 
17255 	if (soc->cdp_soc.ol_ops->get_soc_nss_cfg)
17256 		nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc);
17257 
17258 	wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg);
17259 
17260 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
17261 		wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0);
17262 		wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0);
17263 		wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0);
17264 		wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0);
17265 		soc->init_tcl_cmd_cred_ring = false;
17266 		soc->num_tcl_data_rings =
17267 			wlan_cfg_num_nss_tcl_data_rings(soc->wlan_cfg_ctx);
17268 		soc->num_reo_dest_rings =
17269 			wlan_cfg_num_nss_reo_dest_rings(soc->wlan_cfg_ctx);
17270 
17271 	} else {
17272 		soc->init_tcl_cmd_cred_ring = true;
17273 		soc->num_tx_comp_rings =
17274 			wlan_cfg_num_tx_comp_rings(soc->wlan_cfg_ctx);
17275 		soc->num_tcl_data_rings =
17276 			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
17277 		soc->num_reo_dest_rings =
17278 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
17279 	}
17280 
17281 	soc->arch_ops.soc_cfg_attach(soc);
17282 }
17283 
17284 static inline  void dp_pdev_set_default_reo(struct dp_pdev *pdev)
17285 {
17286 	struct dp_soc *soc = pdev->soc;
17287 
17288 	switch (pdev->pdev_id) {
17289 	case 0:
17290 		pdev->reo_dest =
17291 			wlan_cfg_radio0_default_reo_get(soc->wlan_cfg_ctx);
17292 		break;
17293 
17294 	case 1:
17295 		pdev->reo_dest =
17296 			wlan_cfg_radio1_default_reo_get(soc->wlan_cfg_ctx);
17297 		break;
17298 
17299 	case 2:
17300 		pdev->reo_dest =
17301 			wlan_cfg_radio2_default_reo_get(soc->wlan_cfg_ctx);
17302 		break;
17303 
17304 	default:
17305 		dp_init_err("%pK: Invalid pdev_id %d for reo selection",
17306 			    soc, pdev->pdev_id);
17307 		break;
17308 	}
17309 }
17310 
17311 static QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc,
17312 				      HTC_HANDLE htc_handle,
17313 				      qdf_device_t qdf_osdev,
17314 				      uint8_t pdev_id)
17315 {
17316 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
17317 	int nss_cfg;
17318 	void *sojourn_buf;
17319 
17320 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
17321 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
17322 
17323 	soc_cfg_ctx = soc->wlan_cfg_ctx;
17324 	pdev->soc = soc;
17325 	pdev->pdev_id = pdev_id;
17326 
17327 	/*
17328 	 * Variable to prevent double pdev deinitialization during
17329 	 * radio detach execution .i.e. in the absence of any vdev.
17330 	 */
17331 	pdev->pdev_deinit = 0;
17332 
17333 	if (dp_wdi_event_attach(pdev)) {
17334 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
17335 			  "dp_wdi_evet_attach failed");
17336 		goto fail0;
17337 	}
17338 
17339 	if (dp_pdev_srng_init(pdev)) {
17340 		dp_init_err("%pK: Failed to initialize pdev srng rings", soc);
17341 		goto fail1;
17342 	}
17343 
17344 	/* Initialize descriptors in TCL Rings used by IPA */
17345 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
17346 		hal_tx_init_data_ring(soc->hal_soc,
17347 				      soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng);
17348 		dp_ipa_hal_tx_init_alt_data_ring(soc);
17349 	}
17350 
17351 	/*
17352 	 * Initialize command/credit ring descriptor
17353 	 * Command/CREDIT ring also used for sending DATA cmds
17354 	 */
17355 	dp_tx_init_cmd_credit_ring(soc);
17356 
17357 	dp_tx_pdev_init(pdev);
17358 
17359 	/*
17360 	 * set nss pdev config based on soc config
17361 	 */
17362 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
17363 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
17364 					 (nss_cfg & (1 << pdev_id)));
17365 	pdev->target_pdev_id =
17366 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
17367 
17368 	if (soc->preferred_hw_mode == WMI_HOST_HW_MODE_2G_PHYB &&
17369 	    pdev->lmac_id == PHYB_2G_LMAC_ID) {
17370 		pdev->target_pdev_id = PHYB_2G_TARGET_PDEV_ID;
17371 	}
17372 
17373 	/* Reset the cpu ring map if radio is NSS offloaded */
17374 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
17375 		dp_soc_reset_cpu_ring_map(soc);
17376 		dp_soc_reset_intr_mask(soc);
17377 	}
17378 
17379 	/* Reset the cpu ring map if radio is NSS offloaded */
17380 	dp_soc_reset_ipa_vlan_intr_mask(soc);
17381 
17382 	TAILQ_INIT(&pdev->vdev_list);
17383 	qdf_spinlock_create(&pdev->vdev_list_lock);
17384 	pdev->vdev_count = 0;
17385 	pdev->is_lro_hash_configured = 0;
17386 
17387 	qdf_spinlock_create(&pdev->tx_mutex);
17388 	pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_INVALID_LMAC_ID;
17389 	pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_INVALID_LMAC_ID;
17390 	pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_INVALID_LMAC_ID;
17391 
17392 	DP_STATS_INIT(pdev);
17393 
17394 	dp_local_peer_id_pool_init(pdev);
17395 
17396 	dp_dscp_tid_map_setup(pdev);
17397 	dp_pcp_tid_map_setup(pdev);
17398 
17399 	/* set the reo destination during initialization */
17400 	dp_pdev_set_default_reo(pdev);
17401 
17402 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
17403 
17404 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
17405 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
17406 			      TRUE);
17407 
17408 	if (!pdev->sojourn_buf) {
17409 		dp_init_err("%pK: Failed to allocate sojourn buf", soc);
17410 		goto fail2;
17411 	}
17412 	sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
17413 	qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
17414 
17415 	qdf_event_create(&pdev->fw_peer_stats_event);
17416 	qdf_event_create(&pdev->fw_stats_event);
17417 	qdf_event_create(&pdev->fw_obss_stats_event);
17418 
17419 	pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
17420 	pdev->num_tx_spl_allowed =
17421 		wlan_cfg_get_num_tx_spl_desc(soc->wlan_cfg_ctx);
17422 	pdev->num_reg_tx_allowed =
17423 		pdev->num_tx_allowed - pdev->num_tx_spl_allowed;
17424 	if (dp_rxdma_ring_setup(soc, pdev)) {
17425 		dp_init_err("%pK: RXDMA ring config failed", soc);
17426 		goto fail3;
17427 	}
17428 
17429 	if (dp_init_ipa_rx_refill_buf_ring(soc, pdev))
17430 		goto fail3;
17431 
17432 	if (dp_ipa_ring_resource_setup(soc, pdev))
17433 		goto fail4;
17434 
17435 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
17436 		dp_init_err("%pK: dp_ipa_uc_attach failed", soc);
17437 		goto fail4;
17438 	}
17439 
17440 	if (dp_pdev_bkp_stats_attach(pdev) != QDF_STATUS_SUCCESS) {
17441 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
17442 			  FL("dp_pdev_bkp_stats_attach failed"));
17443 		goto fail5;
17444 	}
17445 
17446 	if (dp_monitor_pdev_init(pdev)) {
17447 		dp_init_err("%pK: dp_monitor_pdev_init failed\n", soc);
17448 		goto fail6;
17449 	}
17450 
17451 	/* initialize sw rx descriptors */
17452 	dp_rx_pdev_desc_pool_init(pdev);
17453 	/* allocate buffers and replenish the RxDMA ring */
17454 	dp_rx_pdev_buffers_alloc(pdev);
17455 
17456 	dp_init_tso_stats(pdev);
17457 
17458 	pdev->rx_fast_flag = false;
17459 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
17460 		qdf_dma_mem_stats_read(),
17461 		qdf_heap_mem_stats_read(),
17462 		qdf_skb_total_mem_stats_read());
17463 
17464 	return QDF_STATUS_SUCCESS;
17465 fail6:
17466 	dp_pdev_bkp_stats_detach(pdev);
17467 fail5:
17468 	dp_ipa_uc_detach(soc, pdev);
17469 fail4:
17470 	dp_deinit_ipa_rx_refill_buf_ring(soc, pdev);
17471 fail3:
17472 	dp_rxdma_ring_cleanup(soc, pdev);
17473 	qdf_nbuf_free(pdev->sojourn_buf);
17474 fail2:
17475 	qdf_spinlock_destroy(&pdev->tx_mutex);
17476 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
17477 	dp_pdev_srng_deinit(pdev);
17478 fail1:
17479 	dp_wdi_event_detach(pdev);
17480 fail0:
17481 	return QDF_STATUS_E_FAILURE;
17482 }
17483 
17484 /*
17485  * dp_pdev_init_wifi3() - Init txrx pdev
17486  * @htc_handle: HTC handle for host-target interface
17487  * @qdf_osdev: QDF OS device
17488  * @force: Force deinit
17489  *
17490  * Return: QDF_STATUS
17491  */
17492 static QDF_STATUS dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
17493 				     HTC_HANDLE htc_handle,
17494 				     qdf_device_t qdf_osdev,
17495 				     uint8_t pdev_id)
17496 {
17497 	return dp_pdev_init(txrx_soc, htc_handle, qdf_osdev, pdev_id);
17498 }
17499 
17500 #ifdef FEATURE_DIRECT_LINK
17501 struct dp_srng *dp_setup_direct_link_refill_ring(struct cdp_soc_t *soc_hdl,
17502 						 uint8_t pdev_id)
17503 {
17504 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
17505 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
17506 
17507 	if (!pdev) {
17508 		dp_err("DP pdev is NULL");
17509 		return NULL;
17510 	}
17511 
17512 	if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring4,
17513 			  RXDMA_BUF, DIRECT_LINK_REFILL_RING_ENTRIES, false)) {
17514 		dp_err("SRNG alloc failed for rx_refill_buf_ring4");
17515 		return NULL;
17516 	}
17517 
17518 	if (dp_srng_init(soc, &pdev->rx_refill_buf_ring4,
17519 			 RXDMA_BUF, DIRECT_LINK_REFILL_RING_IDX, 0)) {
17520 		dp_err("SRNG init failed for rx_refill_buf_ring4");
17521 		dp_srng_free(soc, &pdev->rx_refill_buf_ring4);
17522 		return NULL;
17523 	}
17524 
17525 	if (htt_srng_setup(soc->htt_handle, pdev_id,
17526 			   pdev->rx_refill_buf_ring4.hal_srng, RXDMA_BUF)) {
17527 		dp_srng_deinit(soc, &pdev->rx_refill_buf_ring4, RXDMA_BUF,
17528 			       DIRECT_LINK_REFILL_RING_IDX);
17529 		dp_srng_free(soc, &pdev->rx_refill_buf_ring4);
17530 		return NULL;
17531 	}
17532 
17533 	return &pdev->rx_refill_buf_ring4;
17534 }
17535 
17536 void dp_destroy_direct_link_refill_ring(struct cdp_soc_t *soc_hdl,
17537 					uint8_t pdev_id)
17538 {
17539 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
17540 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
17541 
17542 	if (!pdev) {
17543 		dp_err("DP pdev is NULL");
17544 		return;
17545 	}
17546 
17547 	dp_srng_deinit(soc, &pdev->rx_refill_buf_ring4, RXDMA_BUF, 0);
17548 	dp_srng_free(soc, &pdev->rx_refill_buf_ring4);
17549 }
17550 #endif
17551