xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision d0c05845839e5f2ba5a8dcebe0cd3e4cd4e8dfcf)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <wlan_ipa_obj_mgmt_api.h>
21 #include <qdf_types.h>
22 #include <qdf_lock.h>
23 #include <qdf_net_types.h>
24 #include <qdf_lro.h>
25 #include <qdf_module.h>
26 #include <hal_hw_headers.h>
27 #include <hal_api.h>
28 #include <hif.h>
29 #include <htt.h>
30 #include <wdi_event.h>
31 #include <queue.h>
32 #include "dp_types.h"
33 #include "dp_internal.h"
34 #include "dp_tx.h"
35 #include "dp_tx_desc.h"
36 #include "dp_rx.h"
37 #ifdef DP_RATETABLE_SUPPORT
38 #include "dp_ratetable.h"
39 #endif
40 #include <cdp_txrx_handle.h>
41 #include <wlan_cfg.h>
42 #include <wlan_utility.h>
43 #include "cdp_txrx_cmn_struct.h"
44 #include "cdp_txrx_stats_struct.h"
45 #include "cdp_txrx_cmn_reg.h"
46 #include <qdf_util.h>
47 #include "dp_peer.h"
48 #include "htt_stats.h"
49 #include "dp_htt.h"
50 #ifdef WLAN_SUPPORT_RX_FISA
51 #include <dp_fisa_rx.h>
52 #endif
53 #include "htt_ppdu_stats.h"
54 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
55 #include "cfg_ucfg_api.h"
56 #include <wlan_module_ids.h>
57 
58 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
59 #include "cdp_txrx_flow_ctrl_v2.h"
60 #else
61 
62 static inline void
63 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
64 {
65 	return;
66 }
67 #endif
68 #ifdef WIFI_MONITOR_SUPPORT
69 #include <dp_mon.h>
70 #endif
71 #include "dp_ipa.h"
72 #ifdef FEATURE_WDS
73 #include "dp_txrx_wds.h"
74 #endif
75 #ifdef WLAN_SUPPORT_MSCS
76 #include "dp_mscs.h"
77 #endif
78 #ifdef WLAN_SUPPORT_MESH_LATENCY
79 #include "dp_mesh_latency.h"
80 #endif
81 #ifdef WLAN_SUPPORT_SCS
82 #include "dp_scs.h"
83 #endif
84 #ifdef ATH_SUPPORT_IQUE
85 #include "dp_txrx_me.h"
86 #endif
87 #if defined(DP_CON_MON)
88 #ifndef REMOVE_PKT_LOG
89 #include <pktlog_ac_api.h>
90 #include <pktlog_ac.h>
91 #endif
92 #endif
93 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
94 #include <wlan_dp_swlm.h>
95 #endif
96 #ifdef CONFIG_SAWF_DEF_QUEUES
97 #include "dp_sawf.h"
98 #endif
99 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
100 #include <target_if_dp.h>
101 #endif
102 
103 #ifdef WLAN_FEATURE_STATS_EXT
104 #define INIT_RX_HW_STATS_LOCK(_soc) \
105 	qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
106 #define DEINIT_RX_HW_STATS_LOCK(_soc) \
107 	qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
108 #else
109 #define INIT_RX_HW_STATS_LOCK(_soc)  /* no op */
110 #define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
111 #endif
112 
113 #if defined(DP_PEER_EXTENDED_API) || defined(WLAN_DP_PENDING_MEM_FLUSH)
114 #define SET_PEER_REF_CNT_ONE(_peer) \
115 	qdf_atomic_set(&(_peer)->ref_cnt, 1)
116 #else
117 #define SET_PEER_REF_CNT_ONE(_peer)
118 #endif
119 
120 #ifdef WLAN_SYSFS_DP_STATS
121 /* sysfs event wait time for firmware stat request unit millseconds */
122 #define WLAN_SYSFS_STAT_REQ_WAIT_MS 3000
123 #endif
124 
125 #ifdef QCA_DP_ENABLE_TX_COMP_RING4
126 #define TXCOMP_RING4_NUM 3
127 #else
128 #define TXCOMP_RING4_NUM WBM2SW_TXCOMP_RING4_NUM
129 #endif
130 
131 #ifdef QCA_DP_TX_FW_METADATA_V2
132 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) \
133 		HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
134 #else
135 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) \
136 		HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
137 #endif
138 
139 QDF_COMPILE_TIME_ASSERT(max_rx_rings_check,
140 			MAX_REO_DEST_RINGS == CDP_MAX_RX_RINGS);
141 
142 QDF_COMPILE_TIME_ASSERT(max_tx_rings_check,
143 			MAX_TCL_DATA_RINGS == CDP_MAX_TX_COMP_RINGS);
144 
145 #define dp_init_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_INIT, params)
146 #define dp_init_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_INIT, params)
147 #define dp_init_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_INIT, params)
148 #define dp_init_info(params...) \
149 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_INIT, ## params)
150 #define dp_init_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_INIT, params)
151 
152 #define dp_vdev_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_VDEV, params)
153 #define dp_vdev_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_VDEV, params)
154 #define dp_vdev_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_VDEV, params)
155 #define dp_vdev_info(params...) \
156 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_VDEV, ## params)
157 #define dp_vdev_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_VDEV, params)
158 
159 void dp_configure_arch_ops(struct dp_soc *soc);
160 qdf_size_t dp_get_soc_context_size(uint16_t device_id);
161 
162 /*
163  * The max size of cdp_peer_stats_param_t is limited to 16 bytes.
164  * If the buffer size is exceeding this size limit,
165  * dp_txrx_get_peer_stats is to be used instead.
166  */
167 QDF_COMPILE_TIME_ASSERT(cdp_peer_stats_param_t_max_size,
168 			(sizeof(cdp_peer_stats_param_t) <= 16));
169 
170 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
171 /*
172  * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS
173  * also should be updated accordingly
174  */
175 QDF_COMPILE_TIME_ASSERT(num_intr_grps,
176 			HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS);
177 
178 /*
179  * HIF_EVENT_HIST_MAX should always be power of 2
180  */
181 QDF_COMPILE_TIME_ASSERT(hif_event_history_size,
182 			(HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0);
183 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
184 
185 /*
186  * If WLAN_CFG_INT_NUM_CONTEXTS is changed,
187  * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated
188  */
189 QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs,
190 			WLAN_CFG_INT_NUM_CONTEXTS_MAX >=
191 			WLAN_CFG_INT_NUM_CONTEXTS);
192 
193 static QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl);
194 static QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl);
195 
196 static void dp_pdev_srng_deinit(struct dp_pdev *pdev);
197 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev);
198 static void dp_pdev_srng_free(struct dp_pdev *pdev);
199 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev);
200 
201 static void dp_soc_srng_deinit(struct dp_soc *soc);
202 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc);
203 static void dp_soc_srng_free(struct dp_soc *soc);
204 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc);
205 
206 static void dp_soc_cfg_init(struct dp_soc *soc);
207 static void dp_soc_cfg_attach(struct dp_soc *soc);
208 
209 static inline
210 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
211 				struct cdp_pdev_attach_params *params);
212 
213 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id);
214 
215 static QDF_STATUS
216 dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
217 		   HTC_HANDLE htc_handle,
218 		   qdf_device_t qdf_osdev,
219 		   uint8_t pdev_id);
220 
221 static QDF_STATUS
222 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, int force);
223 
224 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc);
225 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc);
226 
227 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
228 		  struct hif_opaque_softc *hif_handle);
229 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
230 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc,
231 				       uint8_t pdev_id,
232 				       int force);
233 static struct dp_soc *
234 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
235 	      struct cdp_soc_attach_params *params);
236 static inline QDF_STATUS dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl,
237 					      uint8_t vdev_id,
238 					      uint8_t *peer_mac_addr,
239 					      enum cdp_peer_type peer_type);
240 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
241 				       uint8_t vdev_id,
242 				       uint8_t *peer_mac, uint32_t bitmap,
243 				       enum cdp_peer_type peer_type);
244 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
245 				bool unmap_only,
246 				bool mlo_peers_only);
247 #ifdef ENABLE_VERBOSE_DEBUG
248 bool is_dp_verbose_debug_enabled;
249 #endif
250 
251 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
252 static bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
253 static void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
254 			   bool enable);
255 static inline void
256 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
257 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats);
258 static inline void
259 dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
260 #endif
261 
262 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
263 						uint8_t index);
264 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index);
265 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index);
266 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
267 						 uint8_t index);
268 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
269 					    enum hal_ring_type ring_type,
270 					    int ring_num);
271 
272 #ifdef FEATURE_AST
273 void dp_print_mlo_ast_stats(struct dp_soc *soc);
274 #endif
275 
276 #define DP_INTR_POLL_TIMER_MS	5
277 
278 #define MON_VDEV_TIMER_INIT 0x1
279 #define MON_VDEV_TIMER_RUNNING 0x2
280 
281 #define DP_MCS_LENGTH (6*MAX_MCS)
282 
283 #define DP_CURR_FW_STATS_AVAIL 19
284 #define DP_HTT_DBG_EXT_STATS_MAX 256
285 #define DP_MAX_SLEEP_TIME 100
286 #ifndef QCA_WIFI_3_0_EMU
287 #define SUSPEND_DRAIN_WAIT 500
288 #else
289 #define SUSPEND_DRAIN_WAIT 3000
290 #endif
291 
292 #ifdef IPA_OFFLOAD
293 /* Exclude IPA rings from the interrupt context */
294 #define TX_RING_MASK_VAL	0xb
295 #define RX_RING_MASK_VAL	0x7
296 #else
297 #define TX_RING_MASK_VAL	0xF
298 #define RX_RING_MASK_VAL	0xF
299 #endif
300 
301 #define STR_MAXLEN	64
302 
303 #define RNG_ERR		"SRNG setup failed for"
304 
305 /**
306  * default_dscp_tid_map - Default DSCP-TID mapping
307  *
308  * DSCP        TID
309  * 000000      0
310  * 001000      1
311  * 010000      2
312  * 011000      3
313  * 100000      4
314  * 101000      5
315  * 110000      6
316  * 111000      7
317  */
318 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
319 	0, 0, 0, 0, 0, 0, 0, 0,
320 	1, 1, 1, 1, 1, 1, 1, 1,
321 	2, 2, 2, 2, 2, 2, 2, 2,
322 	3, 3, 3, 3, 3, 3, 3, 3,
323 	4, 4, 4, 4, 4, 4, 4, 4,
324 	5, 5, 5, 5, 5, 5, 5, 5,
325 	6, 6, 6, 6, 6, 6, 6, 6,
326 	7, 7, 7, 7, 7, 7, 7, 7,
327 };
328 
329 /**
330  * default_pcp_tid_map - Default PCP-TID mapping
331  *
332  * PCP     TID
333  * 000      0
334  * 001      1
335  * 010      2
336  * 011      3
337  * 100      4
338  * 101      5
339  * 110      6
340  * 111      7
341  */
342 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
343 	0, 1, 2, 3, 4, 5, 6, 7,
344 };
345 
346 /**
347  * @brief Cpu to tx ring map
348  */
349 uint8_t
350 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
351 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
352 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
353 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
354 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
355 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
356 #ifdef WLAN_TX_PKT_CAPTURE_ENH
357 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
358 #endif
359 };
360 
361 qdf_export_symbol(dp_cpu_ring_map);
362 
363 /**
364  * @brief Select the type of statistics
365  */
366 enum dp_stats_type {
367 	STATS_FW = 0,
368 	STATS_HOST = 1,
369 	STATS_TYPE_MAX = 2,
370 };
371 
372 /**
373  * @brief General Firmware statistics options
374  *
375  */
376 enum dp_fw_stats {
377 	TXRX_FW_STATS_INVALID	= -1,
378 };
379 
380 /**
381  * dp_stats_mapping_table - Firmware and Host statistics
382  * currently supported
383  */
384 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
385 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
386 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
387 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
388 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
389 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
390 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
391 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
392 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
393 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
394 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
395 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
396 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
397 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
398 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
399 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
400 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
401 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
402 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
403 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
404 	/* Last ENUM for HTT FW STATS */
405 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
406 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
407 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
408 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
409 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
410 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
411 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
412 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
413 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
414 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
415 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
416 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
417 	{TXRX_FW_STATS_INVALID, TXRX_NAPI_STATS},
418 	{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
419 	{TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS},
420 	{TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS},
421 	{TXRX_FW_STATS_INVALID, TXRX_SOC_REO_HW_DESC_DUMP},
422 	{TXRX_FW_STATS_INVALID, TXRX_SOC_WBM_IDLE_HPTP_DUMP},
423 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_USAGE_WM_STATS},
424 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, TXRX_HOST_STATS_INVALID},
425 	{HTT_DBG_EXT_STATS_TX_SOUNDING_INFO, TXRX_HOST_STATS_INVALID}
426 };
427 
428 /* MCL specific functions */
429 #if defined(DP_CON_MON)
430 
431 #ifdef DP_CON_MON_MSI_ENABLED
432 /**
433  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
434  * @soc: pointer to dp_soc handle
435  * @intr_ctx_num: interrupt context number for which mon mask is needed
436  *
437  * For MCL, monitor mode rings are being processed in timer contexts (polled).
438  * This function is returning 0, since in interrupt mode(softirq based RX),
439  * we donot want to process monitor mode rings in a softirq.
440  *
441  * So, in case packet log is enabled for SAP/STA/P2P modes,
442  * regular interrupt processing will not process monitor mode rings. It would be
443  * done in a separate timer context.
444  *
445  * Return: 0
446  */
447 static inline uint32_t
448 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
449 {
450 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
451 }
452 #else
453 /**
454  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
455  * @soc: pointer to dp_soc handle
456  * @intr_ctx_num: interrupt context number for which mon mask is needed
457  *
458  * For MCL, monitor mode rings are being processed in timer contexts (polled).
459  * This function is returning 0, since in interrupt mode(softirq based RX),
460  * we donot want to process monitor mode rings in a softirq.
461  *
462  * So, in case packet log is enabled for SAP/STA/P2P modes,
463  * regular interrupt processing will not process monitor mode rings. It would be
464  * done in a separate timer context.
465  *
466  * Return: 0
467  */
468 static inline uint32_t
469 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
470 {
471 	return 0;
472 }
473 #endif
474 
475 #ifdef IPA_OFFLOAD
476 /**
477  * dp_get_num_rx_contexts() - get number of RX contexts
478  * @soc_hdl: cdp opaque soc handle
479  *
480  * Return: number of RX contexts
481  */
482 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
483 {
484 	int num_rx_contexts;
485 	uint32_t reo_ring_map;
486 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
487 
488 	reo_ring_map = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
489 
490 	switch (soc->arch_id) {
491 	case CDP_ARCH_TYPE_BE:
492 		/* 2 REO rings are used for IPA */
493 		reo_ring_map &=  ~(BIT(3) | BIT(7));
494 
495 		break;
496 	case CDP_ARCH_TYPE_LI:
497 		/* 1 REO ring is used for IPA */
498 		reo_ring_map &=  ~BIT(3);
499 		break;
500 	default:
501 		dp_err("unkonwn arch_id 0x%x", soc->arch_id);
502 		QDF_BUG(0);
503 	}
504 	/*
505 	 * qdf_get_hweight32 prefer over qdf_get_hweight8 in case map is scaled
506 	 * in future
507 	 */
508 	num_rx_contexts = qdf_get_hweight32(reo_ring_map);
509 
510 	return num_rx_contexts;
511 }
512 #else
513 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
514 {
515 	int num_rx_contexts;
516 	uint32_t reo_config;
517 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
518 
519 	reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
520 	/*
521 	 * qdf_get_hweight32 prefer over qdf_get_hweight8 in case map is scaled
522 	 * in future
523 	 */
524 	num_rx_contexts = qdf_get_hweight32(reo_config);
525 
526 	return num_rx_contexts;
527 }
528 #endif
529 
530 #else
531 
532 /**
533  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
534  * @soc: pointer to dp_soc handle
535  * @intr_ctx_num: interrupt context number for which mon mask is needed
536  *
537  * Return: mon mask value
538  */
539 static inline
540 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
541 {
542 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
543 }
544 
545 /**
546  * dp_soc_reset_mon_intr_mask() - reset mon intr mask
547  * @soc: pointer to dp_soc handle
548  *
549  * Return:
550  */
551 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc)
552 {
553 	int i;
554 
555 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
556 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
557 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
558 	}
559 }
560 
561 qdf_export_symbol(dp_soc_reset_mon_intr_mask);
562 
563 /*
564  * dp_service_lmac_rings()- timer to reap lmac rings
565  * @arg: SoC Handle
566  *
567  * Return:
568  *
569  */
570 static void dp_service_lmac_rings(void *arg)
571 {
572 	struct dp_soc *soc = (struct dp_soc *)arg;
573 	int ring = 0, i;
574 	struct dp_pdev *pdev = NULL;
575 	union dp_rx_desc_list_elem_t *desc_list = NULL;
576 	union dp_rx_desc_list_elem_t *tail = NULL;
577 
578 	/* Process LMAC interrupts */
579 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
580 		int mac_for_pdev = ring;
581 		struct dp_srng *rx_refill_buf_ring;
582 
583 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
584 		if (!pdev)
585 			continue;
586 
587 		rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev];
588 
589 		dp_monitor_process(soc, NULL, mac_for_pdev,
590 				   QCA_NAPI_BUDGET);
591 
592 		for (i = 0;
593 		     i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
594 			dp_rxdma_err_process(&soc->intr_ctx[i], soc,
595 					     mac_for_pdev,
596 					     QCA_NAPI_BUDGET);
597 
598 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF,
599 						  mac_for_pdev))
600 			dp_rx_buffers_replenish(soc, mac_for_pdev,
601 						rx_refill_buf_ring,
602 						&soc->rx_desc_buf[mac_for_pdev],
603 						0, &desc_list, &tail);
604 	}
605 
606 	qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
607 }
608 
609 #endif
610 
611 #ifdef FEATURE_MEC
612 void dp_peer_mec_flush_entries(struct dp_soc *soc)
613 {
614 	unsigned int index;
615 	struct dp_mec_entry *mecentry, *mecentry_next;
616 
617 	TAILQ_HEAD(, dp_mec_entry) free_list;
618 	TAILQ_INIT(&free_list);
619 
620 	if (!soc->mec_hash.mask)
621 		return;
622 
623 	if (!soc->mec_hash.bins)
624 		return;
625 
626 	if (!qdf_atomic_read(&soc->mec_cnt))
627 		return;
628 
629 	qdf_spin_lock_bh(&soc->mec_lock);
630 	for (index = 0; index <= soc->mec_hash.mask; index++) {
631 		if (!TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
632 			TAILQ_FOREACH_SAFE(mecentry, &soc->mec_hash.bins[index],
633 					   hash_list_elem, mecentry_next) {
634 			    dp_peer_mec_detach_entry(soc, mecentry, &free_list);
635 			}
636 		}
637 	}
638 	qdf_spin_unlock_bh(&soc->mec_lock);
639 
640 	dp_peer_mec_free_list(soc, &free_list);
641 }
642 
643 /**
644  * dp_print_mec_entries() - Dump MEC entries in table
645  * @soc: Datapath soc handle
646  *
647  * Return: none
648  */
649 static void dp_print_mec_stats(struct dp_soc *soc)
650 {
651 	int i;
652 	uint32_t index;
653 	struct dp_mec_entry *mecentry = NULL, *mec_list;
654 	uint32_t num_entries = 0;
655 
656 	DP_PRINT_STATS("MEC Stats:");
657 	DP_PRINT_STATS("   Entries Added   = %d", soc->stats.mec.added);
658 	DP_PRINT_STATS("   Entries Deleted = %d", soc->stats.mec.deleted);
659 
660 	if (!qdf_atomic_read(&soc->mec_cnt))
661 		return;
662 
663 	mec_list = qdf_mem_malloc(sizeof(*mecentry) * DP_PEER_MAX_MEC_ENTRY);
664 	if (!mec_list) {
665 		dp_peer_warn("%pK: failed to allocate mec_list", soc);
666 		return;
667 	}
668 
669 	DP_PRINT_STATS("MEC Table:");
670 	for (index = 0; index <= soc->mec_hash.mask; index++) {
671 		qdf_spin_lock_bh(&soc->mec_lock);
672 		if (TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
673 			qdf_spin_unlock_bh(&soc->mec_lock);
674 			continue;
675 		}
676 
677 		TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index],
678 			      hash_list_elem) {
679 			qdf_mem_copy(&mec_list[num_entries], mecentry,
680 				     sizeof(*mecentry));
681 			num_entries++;
682 		}
683 		qdf_spin_unlock_bh(&soc->mec_lock);
684 	}
685 
686 	if (!num_entries) {
687 		qdf_mem_free(mec_list);
688 		return;
689 	}
690 
691 	for (i = 0; i < num_entries; i++) {
692 		DP_PRINT_STATS("%6d mac_addr = " QDF_MAC_ADDR_FMT
693 			       " is_active = %d pdev_id = %d vdev_id = %d",
694 			       i,
695 			       QDF_MAC_ADDR_REF(mec_list[i].mac_addr.raw),
696 			       mec_list[i].is_active,
697 			       mec_list[i].pdev_id,
698 			       mec_list[i].vdev_id);
699 	}
700 	qdf_mem_free(mec_list);
701 }
702 #else
703 static void dp_print_mec_stats(struct dp_soc *soc)
704 {
705 }
706 #endif
707 
708 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
709 				 uint8_t vdev_id,
710 				 uint8_t *peer_mac,
711 				 uint8_t *mac_addr,
712 				 enum cdp_txrx_ast_entry_type type,
713 				 uint32_t flags)
714 {
715 	int ret = -1;
716 	QDF_STATUS status = QDF_STATUS_SUCCESS;
717 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
718 						       peer_mac, 0, vdev_id,
719 						       DP_MOD_ID_CDP);
720 
721 	if (!peer) {
722 		dp_peer_debug("Peer is NULL!");
723 		return ret;
724 	}
725 
726 	status = dp_peer_add_ast((struct dp_soc *)soc_hdl,
727 				 peer,
728 				 mac_addr,
729 				 type,
730 				 flags);
731 	if ((status == QDF_STATUS_SUCCESS) ||
732 	    (status == QDF_STATUS_E_ALREADY) ||
733 	    (status == QDF_STATUS_E_AGAIN))
734 		ret = 0;
735 
736 	dp_hmwds_ast_add_notify(peer, mac_addr,
737 				type, status, false);
738 
739 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
740 
741 	return ret;
742 }
743 
744 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
745 						uint8_t vdev_id,
746 						uint8_t *peer_mac,
747 						uint8_t *wds_macaddr,
748 						uint32_t flags)
749 {
750 	int status = -1;
751 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
752 	struct dp_ast_entry  *ast_entry = NULL;
753 	struct dp_peer *peer;
754 
755 	if (soc->ast_offload_support)
756 		return status;
757 
758 	peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
759 				      peer_mac, 0, vdev_id,
760 				      DP_MOD_ID_CDP);
761 
762 	if (!peer) {
763 		dp_peer_debug("Peer is NULL!");
764 		return status;
765 	}
766 
767 	qdf_spin_lock_bh(&soc->ast_lock);
768 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
769 						    peer->vdev->pdev->pdev_id);
770 
771 	if (ast_entry) {
772 		status = dp_peer_update_ast(soc,
773 					    peer,
774 					    ast_entry, flags);
775 	}
776 	qdf_spin_unlock_bh(&soc->ast_lock);
777 
778 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
779 
780 	return status;
781 }
782 
783 /*
784  * dp_peer_reset_ast_entries() - Deletes all HMWDS entries for a peer
785  * @soc_handle:		Datapath SOC handle
786  * @peer:		DP peer
787  * @arg:		callback argument
788  *
789  * Return: None
790  */
791 static void
792 dp_peer_reset_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
793 {
794 	struct dp_ast_entry *ast_entry = NULL;
795 	struct dp_ast_entry *tmp_ast_entry;
796 
797 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
798 		if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
799 		    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
800 			dp_peer_del_ast(soc, ast_entry);
801 	}
802 }
803 
804 /*
805  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
806  * @soc_handle:		Datapath SOC handle
807  * @wds_macaddr:	WDS entry MAC Address
808  * @peer_macaddr:	WDS entry MAC Address
809  * @vdev_id:		id of vdev handle
810  * Return: QDF_STATUS
811  */
812 static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
813 					 uint8_t *wds_macaddr,
814 					 uint8_t *peer_mac_addr,
815 					 uint8_t vdev_id)
816 {
817 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
818 	struct dp_ast_entry *ast_entry = NULL;
819 	struct dp_peer *peer;
820 	struct dp_pdev *pdev;
821 	struct dp_vdev *vdev;
822 
823 	if (soc->ast_offload_support)
824 		return QDF_STATUS_E_FAILURE;
825 
826 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
827 
828 	if (!vdev)
829 		return QDF_STATUS_E_FAILURE;
830 
831 	pdev = vdev->pdev;
832 
833 	if (peer_mac_addr) {
834 		peer = dp_peer_find_hash_find(soc, peer_mac_addr,
835 					      0, vdev->vdev_id,
836 					      DP_MOD_ID_CDP);
837 		if (!peer) {
838 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
839 			return QDF_STATUS_E_FAILURE;
840 		}
841 
842 		qdf_spin_lock_bh(&soc->ast_lock);
843 		dp_peer_reset_ast_entries(soc, peer, NULL);
844 		qdf_spin_unlock_bh(&soc->ast_lock);
845 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
846 	} else if (wds_macaddr) {
847 		qdf_spin_lock_bh(&soc->ast_lock);
848 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
849 							    pdev->pdev_id);
850 
851 		if (ast_entry) {
852 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
853 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
854 				dp_peer_del_ast(soc, ast_entry);
855 		}
856 		qdf_spin_unlock_bh(&soc->ast_lock);
857 	}
858 
859 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
860 	return QDF_STATUS_SUCCESS;
861 }
862 
863 /*
864  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
865  * @soc:		Datapath SOC handle
866  * @vdev_id:		id of vdev object
867  *
868  * Return: QDF_STATUS
869  */
870 static QDF_STATUS
871 dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
872 			     uint8_t vdev_id)
873 {
874 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
875 
876 	if (soc->ast_offload_support)
877 		return QDF_STATUS_SUCCESS;
878 
879 	qdf_spin_lock_bh(&soc->ast_lock);
880 
881 	dp_soc_iterate_peer(soc, dp_peer_reset_ast_entries, NULL,
882 			    DP_MOD_ID_CDP);
883 	qdf_spin_unlock_bh(&soc->ast_lock);
884 
885 	return QDF_STATUS_SUCCESS;
886 }
887 
888 /*
889  * dp_peer_flush_ast_entries() - Delete all wds and hmwds ast entries of a peer
890  * @soc:		Datapath SOC
891  * @peer:		Datapath peer
892  * @arg:		arg to callback
893  *
894  * Return: None
895  */
896 static void
897 dp_peer_flush_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
898 {
899 	struct dp_ast_entry *ase = NULL;
900 	struct dp_ast_entry *temp_ase;
901 
902 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
903 		if ((ase->type ==
904 			CDP_TXRX_AST_TYPE_STATIC) ||
905 			(ase->type ==
906 			 CDP_TXRX_AST_TYPE_SELF) ||
907 			(ase->type ==
908 			 CDP_TXRX_AST_TYPE_STA_BSS))
909 			continue;
910 		dp_peer_del_ast(soc, ase);
911 	}
912 }
913 
914 /*
915  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
916  * @soc:		Datapath SOC handle
917  *
918  * Return: None
919  */
920 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
921 {
922 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
923 
924 	qdf_spin_lock_bh(&soc->ast_lock);
925 
926 	dp_soc_iterate_peer(soc, dp_peer_flush_ast_entries, NULL,
927 			    DP_MOD_ID_CDP);
928 
929 	qdf_spin_unlock_bh(&soc->ast_lock);
930 	dp_peer_mec_flush_entries(soc);
931 }
932 
933 /**
934  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
935  *                                       and return ast entry information
936  *                                       of first ast entry found in the
937  *                                       table with given mac address
938  *
939  * @soc : data path soc handle
940  * @ast_mac_addr : AST entry mac address
941  * @ast_entry_info : ast entry information
942  *
943  * return : true if ast entry found with ast_mac_addr
944  *          false if ast entry not found
945  */
946 static bool dp_peer_get_ast_info_by_soc_wifi3
947 	(struct cdp_soc_t *soc_hdl,
948 	 uint8_t *ast_mac_addr,
949 	 struct cdp_ast_entry_info *ast_entry_info)
950 {
951 	struct dp_ast_entry *ast_entry = NULL;
952 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
953 	struct dp_peer *peer = NULL;
954 
955 	if (soc->ast_offload_support)
956 		return false;
957 
958 	qdf_spin_lock_bh(&soc->ast_lock);
959 
960 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
961 	if ((!ast_entry) ||
962 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
963 		qdf_spin_unlock_bh(&soc->ast_lock);
964 		return false;
965 	}
966 
967 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
968 				     DP_MOD_ID_AST);
969 	if (!peer) {
970 		qdf_spin_unlock_bh(&soc->ast_lock);
971 		return false;
972 	}
973 
974 	ast_entry_info->type = ast_entry->type;
975 	ast_entry_info->pdev_id = ast_entry->pdev_id;
976 	ast_entry_info->vdev_id = ast_entry->vdev_id;
977 	ast_entry_info->peer_id = ast_entry->peer_id;
978 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
979 		     &peer->mac_addr.raw[0],
980 		     QDF_MAC_ADDR_SIZE);
981 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
982 	qdf_spin_unlock_bh(&soc->ast_lock);
983 	return true;
984 }
985 
986 /**
987  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
988  *                                          and return ast entry information
989  *                                          if mac address and pdev_id matches
990  *
991  * @soc : data path soc handle
992  * @ast_mac_addr : AST entry mac address
993  * @pdev_id : pdev_id
994  * @ast_entry_info : ast entry information
995  *
996  * return : true if ast entry found with ast_mac_addr
997  *          false if ast entry not found
998  */
999 static bool dp_peer_get_ast_info_by_pdevid_wifi3
1000 		(struct cdp_soc_t *soc_hdl,
1001 		 uint8_t *ast_mac_addr,
1002 		 uint8_t pdev_id,
1003 		 struct cdp_ast_entry_info *ast_entry_info)
1004 {
1005 	struct dp_ast_entry *ast_entry;
1006 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
1007 	struct dp_peer *peer = NULL;
1008 
1009 	if (soc->ast_offload_support)
1010 		return false;
1011 
1012 	qdf_spin_lock_bh(&soc->ast_lock);
1013 
1014 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr,
1015 						    pdev_id);
1016 
1017 	if ((!ast_entry) ||
1018 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
1019 		qdf_spin_unlock_bh(&soc->ast_lock);
1020 		return false;
1021 	}
1022 
1023 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1024 				     DP_MOD_ID_AST);
1025 	if (!peer) {
1026 		qdf_spin_unlock_bh(&soc->ast_lock);
1027 		return false;
1028 	}
1029 
1030 	ast_entry_info->type = ast_entry->type;
1031 	ast_entry_info->pdev_id = ast_entry->pdev_id;
1032 	ast_entry_info->vdev_id = ast_entry->vdev_id;
1033 	ast_entry_info->peer_id = ast_entry->peer_id;
1034 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
1035 		     &peer->mac_addr.raw[0],
1036 		     QDF_MAC_ADDR_SIZE);
1037 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1038 	qdf_spin_unlock_bh(&soc->ast_lock);
1039 	return true;
1040 }
1041 
1042 /**
1043  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
1044  *                            with given mac address
1045  *
1046  * @soc : data path soc handle
1047  * @ast_mac_addr : AST entry mac address
1048  * @callback : callback function to called on ast delete response from FW
1049  * @cookie : argument to be passed to callback
1050  *
1051  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
1052  *          is sent
1053  *          QDF_STATUS_E_INVAL false if ast entry not found
1054  */
1055 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
1056 					       uint8_t *mac_addr,
1057 					       txrx_ast_free_cb callback,
1058 					       void *cookie)
1059 
1060 {
1061 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1062 	struct dp_ast_entry *ast_entry = NULL;
1063 	txrx_ast_free_cb cb = NULL;
1064 	void *arg = NULL;
1065 
1066 	if (soc->ast_offload_support)
1067 		return -QDF_STATUS_E_INVAL;
1068 
1069 	qdf_spin_lock_bh(&soc->ast_lock);
1070 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1071 	if (!ast_entry) {
1072 		qdf_spin_unlock_bh(&soc->ast_lock);
1073 		return -QDF_STATUS_E_INVAL;
1074 	}
1075 
1076 	if (ast_entry->callback) {
1077 		cb = ast_entry->callback;
1078 		arg = ast_entry->cookie;
1079 	}
1080 
1081 	ast_entry->callback = callback;
1082 	ast_entry->cookie = cookie;
1083 
1084 	/*
1085 	 * if delete_in_progress is set AST delete is sent to target
1086 	 * and host is waiting for response should not send delete
1087 	 * again
1088 	 */
1089 	if (!ast_entry->delete_in_progress)
1090 		dp_peer_del_ast(soc, ast_entry);
1091 
1092 	qdf_spin_unlock_bh(&soc->ast_lock);
1093 	if (cb) {
1094 		cb(soc->ctrl_psoc,
1095 		   dp_soc_to_cdp_soc(soc),
1096 		   arg,
1097 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1098 	}
1099 	return QDF_STATUS_SUCCESS;
1100 }
1101 
1102 /**
1103  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
1104  *                                   table if mac address and pdev_id matches
1105  *
1106  * @soc : data path soc handle
1107  * @ast_mac_addr : AST entry mac address
1108  * @pdev_id : pdev id
1109  * @callback : callback function to called on ast delete response from FW
1110  * @cookie : argument to be passed to callback
1111  *
1112  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
1113  *          is sent
1114  *          QDF_STATUS_E_INVAL false if ast entry not found
1115  */
1116 
1117 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
1118 						uint8_t *mac_addr,
1119 						uint8_t pdev_id,
1120 						txrx_ast_free_cb callback,
1121 						void *cookie)
1122 
1123 {
1124 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1125 	struct dp_ast_entry *ast_entry;
1126 	txrx_ast_free_cb cb = NULL;
1127 	void *arg = NULL;
1128 
1129 	if (soc->ast_offload_support)
1130 		return -QDF_STATUS_E_INVAL;
1131 
1132 	qdf_spin_lock_bh(&soc->ast_lock);
1133 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
1134 
1135 	if (!ast_entry) {
1136 		qdf_spin_unlock_bh(&soc->ast_lock);
1137 		return -QDF_STATUS_E_INVAL;
1138 	}
1139 
1140 	if (ast_entry->callback) {
1141 		cb = ast_entry->callback;
1142 		arg = ast_entry->cookie;
1143 	}
1144 
1145 	ast_entry->callback = callback;
1146 	ast_entry->cookie = cookie;
1147 
1148 	/*
1149 	 * if delete_in_progress is set AST delete is sent to target
1150 	 * and host is waiting for response should not sent delete
1151 	 * again
1152 	 */
1153 	if (!ast_entry->delete_in_progress)
1154 		dp_peer_del_ast(soc, ast_entry);
1155 
1156 	qdf_spin_unlock_bh(&soc->ast_lock);
1157 
1158 	if (cb) {
1159 		cb(soc->ctrl_psoc,
1160 		   dp_soc_to_cdp_soc(soc),
1161 		   arg,
1162 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1163 	}
1164 	return QDF_STATUS_SUCCESS;
1165 }
1166 
1167 /**
1168  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
1169  * @ring_num: ring num of the ring being queried
1170  * @grp_mask: the grp_mask array for the ring type in question.
1171  *
1172  * The grp_mask array is indexed by group number and the bit fields correspond
1173  * to ring numbers.  We are finding which interrupt group a ring belongs to.
1174  *
1175  * Return: the index in the grp_mask array with the ring number.
1176  * -QDF_STATUS_E_NOENT if no entry is found
1177  */
1178 static int dp_srng_find_ring_in_mask(int ring_num, uint8_t *grp_mask)
1179 {
1180 	int ext_group_num;
1181 	uint8_t mask = 1 << ring_num;
1182 
1183 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
1184 	     ext_group_num++) {
1185 		if (mask & grp_mask[ext_group_num])
1186 			return ext_group_num;
1187 	}
1188 
1189 	return -QDF_STATUS_E_NOENT;
1190 }
1191 
1192 /**
1193  * dp_is_msi_group_number_invalid() - check msi_group_number valid or not
1194  * @msi_group_number: MSI group number.
1195  * @msi_data_count: MSI data count.
1196  *
1197  * Return: true if msi_group_number is invalid.
1198  */
1199 #ifdef WLAN_ONE_MSI_VECTOR
1200 static bool dp_is_msi_group_number_invalid(int msi_group_number,
1201 					   int msi_data_count)
1202 {
1203 	return false;
1204 }
1205 #else
1206 static bool dp_is_msi_group_number_invalid(int msi_group_number,
1207 					   int msi_data_count)
1208 {
1209 	return msi_group_number > msi_data_count;
1210 }
1211 #endif
1212 
1213 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1214 /**
1215  * dp_is_reo_ring_num_in_nf_grp1() - Check if the current reo ring is part of
1216  *				rx_near_full_grp1 mask
1217  * @soc: Datapath SoC Handle
1218  * @ring_num: REO ring number
1219  *
1220  * Return: 1 if the ring_num belongs to reo_nf_grp1,
1221  *	   0, otherwise.
1222  */
1223 static inline int
1224 dp_is_reo_ring_num_in_nf_grp1(struct dp_soc *soc, int ring_num)
1225 {
1226 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_1 & (1 << ring_num));
1227 }
1228 
1229 /**
1230  * dp_is_reo_ring_num_in_nf_grp2() - Check if the current reo ring is part of
1231  *				rx_near_full_grp2 mask
1232  * @soc: Datapath SoC Handle
1233  * @ring_num: REO ring number
1234  *
1235  * Return: 1 if the ring_num belongs to reo_nf_grp2,
1236  *	   0, otherwise.
1237  */
1238 static inline int
1239 dp_is_reo_ring_num_in_nf_grp2(struct dp_soc *soc, int ring_num)
1240 {
1241 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_2 & (1 << ring_num));
1242 }
1243 
1244 /**
1245  * dp_srng_get_near_full_irq_mask() - Get near-full irq mask for a particular
1246  *				ring type and number
1247  * @soc: Datapath SoC handle
1248  * @ring_type: SRNG type
1249  * @ring_num: ring num
1250  *
1251  * Return: near ful irq mask pointer
1252  */
1253 static inline
1254 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
1255 					enum hal_ring_type ring_type,
1256 					int ring_num)
1257 {
1258 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
1259 	uint8_t wbm2_sw_rx_rel_ring_id;
1260 	uint8_t *nf_irq_mask = NULL;
1261 
1262 	switch (ring_type) {
1263 	case WBM2SW_RELEASE:
1264 		wbm2_sw_rx_rel_ring_id =
1265 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
1266 		if (ring_num != wbm2_sw_rx_rel_ring_id) {
1267 			nf_irq_mask = &soc->wlan_cfg_ctx->
1268 					int_tx_ring_near_full_irq_mask[0];
1269 		}
1270 		break;
1271 	case REO_DST:
1272 		if (dp_is_reo_ring_num_in_nf_grp1(soc, ring_num))
1273 			nf_irq_mask =
1274 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_1_mask[0];
1275 		else if (dp_is_reo_ring_num_in_nf_grp2(soc, ring_num))
1276 			nf_irq_mask =
1277 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_2_mask[0];
1278 		else
1279 			qdf_assert(0);
1280 		break;
1281 	default:
1282 		break;
1283 	}
1284 
1285 	return nf_irq_mask;
1286 }
1287 
1288 /**
1289  * dp_srng_set_msi2_ring_params() - Set the msi2 addr/data in the ring params
1290  * @soc: Datapath SoC handle
1291  * @ring_params: srng params handle
1292  * @msi2_addr: MSI2 addr to be set for the SRNG
1293  * @msi2_data: MSI2 data to be set for the SRNG
1294  *
1295  * Return: None
1296  */
1297 static inline
1298 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
1299 				  struct hal_srng_params *ring_params,
1300 				  qdf_dma_addr_t msi2_addr,
1301 				  uint32_t msi2_data)
1302 {
1303 	ring_params->msi2_addr = msi2_addr;
1304 	ring_params->msi2_data = msi2_data;
1305 }
1306 
1307 /**
1308  * dp_srng_msi2_setup() - Setup MSI2 details for near full IRQ of an SRNG
1309  * @soc: Datapath SoC handle
1310  * @ring_params: ring_params for SRNG
1311  * @ring_type: SENG type
1312  * @ring_num: ring number for the SRNG
1313  * @nf_msi_grp_num: near full msi group number
1314  *
1315  * Return: None
1316  */
1317 static inline void
1318 dp_srng_msi2_setup(struct dp_soc *soc,
1319 		   struct hal_srng_params *ring_params,
1320 		   int ring_type, int ring_num, int nf_msi_grp_num)
1321 {
1322 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1323 	int msi_data_count, ret;
1324 
1325 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1326 					  &msi_data_count, &msi_data_start,
1327 					  &msi_irq_start);
1328 	if (ret)
1329 		return;
1330 
1331 	if (nf_msi_grp_num < 0) {
1332 		dp_init_info("%pK: ring near full IRQ not part of an ext_group; ring_type: %d,ring_num %d",
1333 			     soc, ring_type, ring_num);
1334 		ring_params->msi2_addr = 0;
1335 		ring_params->msi2_data = 0;
1336 		return;
1337 	}
1338 
1339 	if (dp_is_msi_group_number_invalid(nf_msi_grp_num, msi_data_count)) {
1340 		dp_init_warn("%pK: 2 msi_groups will share an msi for near full IRQ; msi_group_num %d",
1341 			     soc, nf_msi_grp_num);
1342 		QDF_ASSERT(0);
1343 	}
1344 
1345 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1346 
1347 	ring_params->nf_irq_support = 1;
1348 	ring_params->msi2_addr = addr_low;
1349 	ring_params->msi2_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1350 	ring_params->msi2_data = (nf_msi_grp_num % msi_data_count)
1351 		+ msi_data_start;
1352 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1353 }
1354 
1355 /* Percentage of ring entries considered as nearly full */
1356 #define DP_NF_HIGH_THRESH_PERCENTAGE	75
1357 /* Percentage of ring entries considered as critically full */
1358 #define DP_NF_CRIT_THRESH_PERCENTAGE	90
1359 /* Percentage of ring entries considered as safe threshold */
1360 #define DP_NF_SAFE_THRESH_PERCENTAGE	50
1361 
1362 /**
1363  * dp_srng_configure_nf_interrupt_thresholds() - Configure the thresholds for
1364  *			near full irq
1365  * @soc: Datapath SoC handle
1366  * @ring_params: ring params for SRNG
1367  * @ring_type: ring type
1368  */
1369 static inline void
1370 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
1371 					  struct hal_srng_params *ring_params,
1372 					  int ring_type)
1373 {
1374 	if (ring_params->nf_irq_support) {
1375 		ring_params->high_thresh = (ring_params->num_entries *
1376 					    DP_NF_HIGH_THRESH_PERCENTAGE) / 100;
1377 		ring_params->crit_thresh = (ring_params->num_entries *
1378 					    DP_NF_CRIT_THRESH_PERCENTAGE) / 100;
1379 		ring_params->safe_thresh = (ring_params->num_entries *
1380 					    DP_NF_SAFE_THRESH_PERCENTAGE) /100;
1381 	}
1382 }
1383 
1384 /**
1385  * dp_srng_set_nf_thresholds() - Set the near full thresholds to srng data
1386  *			structure from the ring params
1387  * @soc: Datapath SoC handle
1388  * @srng: SRNG handle
1389  * @ring_params: ring params for a SRNG
1390  *
1391  * Return: None
1392  */
1393 static inline void
1394 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
1395 			  struct hal_srng_params *ring_params)
1396 {
1397 	srng->crit_thresh = ring_params->crit_thresh;
1398 	srng->safe_thresh = ring_params->safe_thresh;
1399 }
1400 
1401 #else
1402 static inline
1403 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
1404 					enum hal_ring_type ring_type,
1405 					int ring_num)
1406 {
1407 	return NULL;
1408 }
1409 
1410 static inline
1411 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
1412 				  struct hal_srng_params *ring_params,
1413 				  qdf_dma_addr_t msi2_addr,
1414 				  uint32_t msi2_data)
1415 {
1416 }
1417 
1418 static inline void
1419 dp_srng_msi2_setup(struct dp_soc *soc,
1420 		   struct hal_srng_params *ring_params,
1421 		   int ring_type, int ring_num, int nf_msi_grp_num)
1422 {
1423 }
1424 
1425 static inline void
1426 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
1427 					  struct hal_srng_params *ring_params,
1428 					  int ring_type)
1429 {
1430 }
1431 
1432 static inline void
1433 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
1434 			  struct hal_srng_params *ring_params)
1435 {
1436 }
1437 #endif
1438 
1439 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
1440 				       enum hal_ring_type ring_type,
1441 				       int ring_num,
1442 				       int *reg_msi_grp_num,
1443 				       bool nf_irq_support,
1444 				       int *nf_msi_grp_num)
1445 {
1446 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
1447 	uint8_t *grp_mask, *nf_irq_mask = NULL;
1448 	bool nf_irq_enabled = false;
1449 	uint8_t wbm2_sw_rx_rel_ring_id;
1450 
1451 	switch (ring_type) {
1452 	case WBM2SW_RELEASE:
1453 		wbm2_sw_rx_rel_ring_id =
1454 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
1455 		if (ring_num == wbm2_sw_rx_rel_ring_id) {
1456 			/* dp_rx_wbm_err_process - soc->rx_rel_ring */
1457 			grp_mask = &cfg_ctx->int_rx_wbm_rel_ring_mask[0];
1458 			ring_num = 0;
1459 		} else { /* dp_tx_comp_handler - soc->tx_comp_ring */
1460 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1461 			nf_irq_mask = dp_srng_get_near_full_irq_mask(soc,
1462 								     ring_type,
1463 								     ring_num);
1464 			if (nf_irq_mask)
1465 				nf_irq_enabled = true;
1466 
1467 			/*
1468 			 * Using ring 4 as 4th tx completion ring since ring 3
1469 			 * is Rx error ring
1470 			 */
1471 			if (ring_num == WBM2SW_TXCOMP_RING4_NUM)
1472 				ring_num = TXCOMP_RING4_NUM;
1473 		}
1474 	break;
1475 
1476 	case REO_EXCEPTION:
1477 		/* dp_rx_err_process - &soc->reo_exception_ring */
1478 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
1479 	break;
1480 
1481 	case REO_DST:
1482 		/* dp_rx_process - soc->reo_dest_ring */
1483 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1484 		nf_irq_mask = dp_srng_get_near_full_irq_mask(soc, ring_type,
1485 							     ring_num);
1486 		if (nf_irq_mask)
1487 			nf_irq_enabled = true;
1488 	break;
1489 
1490 	case REO_STATUS:
1491 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
1492 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
1493 	break;
1494 
1495 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
1496 	case RXDMA_MONITOR_STATUS:
1497 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
1498 	case RXDMA_MONITOR_DST:
1499 		/* dp_mon_process */
1500 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
1501 	break;
1502 	case TX_MONITOR_DST:
1503 		/* dp_tx_mon_process */
1504 		grp_mask = &soc->wlan_cfg_ctx->int_tx_mon_ring_mask[0];
1505 	break;
1506 	case RXDMA_DST:
1507 		/* dp_rxdma_err_process */
1508 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
1509 	break;
1510 
1511 	case RXDMA_BUF:
1512 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1513 	break;
1514 
1515 	case RXDMA_MONITOR_BUF:
1516 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
1517 	break;
1518 
1519 	case TX_MONITOR_BUF:
1520 		grp_mask = &soc->wlan_cfg_ctx->int_host2txmon_ring_mask[0];
1521 	break;
1522 
1523 	case TCL_DATA:
1524 	/* CMD_CREDIT_RING is used as command in 8074 and credit in 9000 */
1525 	case TCL_CMD_CREDIT:
1526 	case REO_CMD:
1527 	case SW2WBM_RELEASE:
1528 	case WBM_IDLE_LINK:
1529 		/* normally empty SW_TO_HW rings */
1530 		return -QDF_STATUS_E_NOENT;
1531 	break;
1532 
1533 	case TCL_STATUS:
1534 	case REO_REINJECT:
1535 		/* misc unused rings */
1536 		return -QDF_STATUS_E_NOENT;
1537 	break;
1538 
1539 	case CE_SRC:
1540 	case CE_DST:
1541 	case CE_DST_STATUS:
1542 		/* CE_rings - currently handled by hif */
1543 	default:
1544 		return -QDF_STATUS_E_NOENT;
1545 	break;
1546 	}
1547 
1548 	*reg_msi_grp_num = dp_srng_find_ring_in_mask(ring_num, grp_mask);
1549 
1550 	if (nf_irq_support && nf_irq_enabled) {
1551 		*nf_msi_grp_num = dp_srng_find_ring_in_mask(ring_num,
1552 							    nf_irq_mask);
1553 	}
1554 
1555 	return QDF_STATUS_SUCCESS;
1556 }
1557 
1558 /*
1559  * dp_get_num_msi_available()- API to get number of MSIs available
1560  * @dp_soc: DP soc Handle
1561  * @interrupt_mode: Mode of interrupts
1562  *
1563  * Return: Number of MSIs available or 0 in case of integrated
1564  */
1565 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
1566 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1567 {
1568 	return 0;
1569 }
1570 #else
1571 /*
1572  * dp_get_num_msi_available()- API to get number of MSIs available
1573  * @dp_soc: DP soc Handle
1574  * @interrupt_mode: Mode of interrupts
1575  *
1576  * Return: Number of MSIs available or 0 in case of integrated
1577  */
1578 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1579 {
1580 	int msi_data_count;
1581 	int msi_data_start;
1582 	int msi_irq_start;
1583 	int ret;
1584 
1585 	if (interrupt_mode == DP_INTR_INTEGRATED) {
1586 		return 0;
1587 	} else if (interrupt_mode == DP_INTR_MSI || interrupt_mode ==
1588 		   DP_INTR_POLL) {
1589 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1590 						  &msi_data_count,
1591 						  &msi_data_start,
1592 						  &msi_irq_start);
1593 		if (ret) {
1594 			qdf_err("Unable to get DP MSI assignment %d",
1595 				interrupt_mode);
1596 			return -EINVAL;
1597 		}
1598 		return msi_data_count;
1599 	}
1600 	qdf_err("Interrupt mode invalid %d", interrupt_mode);
1601 	return -EINVAL;
1602 }
1603 #endif
1604 
1605 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
1606 			      *ring_params, int ring_type, int ring_num)
1607 {
1608 	int reg_msi_grp_num;
1609 	/*
1610 	 * nf_msi_grp_num needs to be initialized with negative value,
1611 	 * to avoid configuring near-full msi for WBM2SW3 ring
1612 	 */
1613 	int nf_msi_grp_num = -1;
1614 	int msi_data_count;
1615 	int ret;
1616 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1617 	bool nf_irq_support;
1618 
1619 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1620 					    &msi_data_count, &msi_data_start,
1621 					    &msi_irq_start);
1622 
1623 	if (ret)
1624 		return;
1625 
1626 	nf_irq_support = hal_srng_is_near_full_irq_supported(soc->hal_soc,
1627 							     ring_type,
1628 							     ring_num);
1629 	ret = dp_srng_calculate_msi_group(soc, ring_type, ring_num,
1630 					  &reg_msi_grp_num,
1631 					  nf_irq_support,
1632 					  &nf_msi_grp_num);
1633 	if (ret < 0) {
1634 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1635 			     soc, ring_type, ring_num);
1636 		ring_params->msi_addr = 0;
1637 		ring_params->msi_data = 0;
1638 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
1639 		return;
1640 	}
1641 
1642 	if (reg_msi_grp_num < 0) {
1643 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1644 			     soc, ring_type, ring_num);
1645 		ring_params->msi_addr = 0;
1646 		ring_params->msi_data = 0;
1647 		goto configure_msi2;
1648 	}
1649 
1650 	if (dp_is_msi_group_number_invalid(reg_msi_grp_num, msi_data_count)) {
1651 		dp_init_warn("%pK: 2 msi_groups will share an msi; msi_group_num %d",
1652 			     soc, reg_msi_grp_num);
1653 		QDF_ASSERT(0);
1654 	}
1655 
1656 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1657 
1658 	ring_params->msi_addr = addr_low;
1659 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1660 	ring_params->msi_data = (reg_msi_grp_num % msi_data_count)
1661 		+ msi_data_start;
1662 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1663 
1664 	dp_debug("ring type %u ring_num %u msi->data %u msi_addr %llx",
1665 		 ring_type, ring_num, ring_params->msi_data,
1666 		 (uint64_t)ring_params->msi_addr);
1667 
1668 configure_msi2:
1669 	if (!nf_irq_support) {
1670 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
1671 		return;
1672 	}
1673 
1674 	dp_srng_msi2_setup(soc, ring_params, ring_type, ring_num,
1675 			   nf_msi_grp_num);
1676 }
1677 
1678 #ifdef FEATURE_AST
1679 /**
1680  * dp_print_mlo_ast_stats() - Print AST stats for MLO peers
1681  *
1682  * @soc : core DP soc context
1683  *
1684  * Return: void
1685  */
1686 void dp_print_mlo_ast_stats(struct dp_soc *soc)
1687 {
1688 	if (soc->arch_ops.print_mlo_ast_stats)
1689 		soc->arch_ops.print_mlo_ast_stats(soc);
1690 }
1691 
1692 /**
1693  * dp_print_peer_ast_entries() - Dump AST entries of peer
1694  * @soc: Datapath soc handle
1695  * @peer: Datapath peer
1696  * @arg: argument to iterate function
1697  *
1698  * return void
1699  */
1700 void
1701 dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1702 {
1703 	struct dp_ast_entry *ase, *tmp_ase;
1704 	uint32_t num_entries = 0;
1705 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1706 			"NONE", "STATIC", "SELF", "WDS", "HMWDS", "BSS",
1707 			"DA", "HMWDS_SEC", "MLD"};
1708 
1709 	DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1710 	    DP_PRINT_STATS("%6d mac_addr = "QDF_MAC_ADDR_FMT
1711 		    " peer_mac_addr = "QDF_MAC_ADDR_FMT
1712 		    " peer_id = %u"
1713 		    " type = %s"
1714 		    " next_hop = %d"
1715 		    " is_active = %d"
1716 		    " ast_idx = %d"
1717 		    " ast_hash = %d"
1718 		    " delete_in_progress = %d"
1719 		    " pdev_id = %d"
1720 		    " vdev_id = %d",
1721 		    ++num_entries,
1722 		    QDF_MAC_ADDR_REF(ase->mac_addr.raw),
1723 		    QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1724 		    ase->peer_id,
1725 		    type[ase->type],
1726 		    ase->next_hop,
1727 		    ase->is_active,
1728 		    ase->ast_idx,
1729 		    ase->ast_hash_value,
1730 		    ase->delete_in_progress,
1731 		    ase->pdev_id,
1732 		    ase->vdev_id);
1733 	}
1734 }
1735 
1736 /**
1737  * dp_print_ast_stats() - Dump AST table contents
1738  * @soc: Datapath soc handle
1739  *
1740  * return void
1741  */
1742 void dp_print_ast_stats(struct dp_soc *soc)
1743 {
1744 	DP_PRINT_STATS("AST Stats:");
1745 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1746 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1747 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1748 	DP_PRINT_STATS("	Entries MAP ERR  = %d", soc->stats.ast.map_err);
1749 	DP_PRINT_STATS("	Entries Mismatch ERR  = %d",
1750 		       soc->stats.ast.ast_mismatch);
1751 
1752 	DP_PRINT_STATS("AST Table:");
1753 
1754 	qdf_spin_lock_bh(&soc->ast_lock);
1755 
1756 	dp_soc_iterate_peer(soc, dp_print_peer_ast_entries, NULL,
1757 			    DP_MOD_ID_GENERIC_STATS);
1758 
1759 	qdf_spin_unlock_bh(&soc->ast_lock);
1760 
1761 	dp_print_mlo_ast_stats(soc);
1762 }
1763 #else
1764 void dp_print_ast_stats(struct dp_soc *soc)
1765 {
1766 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1767 	return;
1768 }
1769 #endif
1770 
1771 /**
1772  * dp_print_peer_info() - Dump peer info
1773  * @soc: Datapath soc handle
1774  * @peer: Datapath peer handle
1775  * @arg: argument to iter function
1776  *
1777  * return void
1778  */
1779 static void
1780 dp_print_peer_info(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1781 {
1782 	struct dp_txrx_peer *txrx_peer = NULL;
1783 
1784 	txrx_peer = dp_get_txrx_peer(peer);
1785 	if (!txrx_peer)
1786 		return;
1787 
1788 	DP_PRINT_STATS(" peer id = %d"
1789 		       " peer_mac_addr = "QDF_MAC_ADDR_FMT
1790 		       " nawds_enabled = %d"
1791 		       " bss_peer = %d"
1792 		       " wds_enabled = %d"
1793 		       " tx_cap_enabled = %d"
1794 		       " rx_cap_enabled = %d",
1795 		       peer->peer_id,
1796 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1797 		       txrx_peer->nawds_enabled,
1798 		       txrx_peer->bss_peer,
1799 		       txrx_peer->wds_enabled,
1800 		       peer->monitor_peer ?
1801 					peer->monitor_peer->tx_cap_enabled : 0,
1802 		       peer->monitor_peer ?
1803 					peer->monitor_peer->rx_cap_enabled : 0);
1804 }
1805 
1806 /**
1807  * dp_print_peer_table() - Dump all Peer stats
1808  * @vdev: Datapath Vdev handle
1809  *
1810  * return void
1811  */
1812 static void dp_print_peer_table(struct dp_vdev *vdev)
1813 {
1814 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1815 	dp_vdev_iterate_peer(vdev, dp_print_peer_info, NULL,
1816 			     DP_MOD_ID_GENERIC_STATS);
1817 }
1818 
1819 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
1820 /**
1821  * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt
1822  * threshold values from the wlan_srng_cfg table for each ring type
1823  * @soc: device handle
1824  * @ring_params: per ring specific parameters
1825  * @ring_type: Ring type
1826  * @ring_num: Ring number for a given ring type
1827  *
1828  * Fill the ring params with the interrupt threshold
1829  * configuration parameters available in the per ring type wlan_srng_cfg
1830  * table.
1831  *
1832  * Return: None
1833  */
1834 static void
1835 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1836 				       struct hal_srng_params *ring_params,
1837 				       int ring_type, int ring_num,
1838 				       int num_entries)
1839 {
1840 	uint8_t wbm2_sw_rx_rel_ring_id;
1841 
1842 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
1843 
1844 	if (ring_type == REO_DST) {
1845 		ring_params->intr_timer_thres_us =
1846 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1847 		ring_params->intr_batch_cntr_thres_entries =
1848 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1849 	} else if (ring_type == WBM2SW_RELEASE &&
1850 		   (ring_num == wbm2_sw_rx_rel_ring_id)) {
1851 		ring_params->intr_timer_thres_us =
1852 				wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1853 		ring_params->intr_batch_cntr_thres_entries =
1854 				wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1855 	} else {
1856 		ring_params->intr_timer_thres_us =
1857 				soc->wlan_srng_cfg[ring_type].timer_threshold;
1858 		ring_params->intr_batch_cntr_thres_entries =
1859 				soc->wlan_srng_cfg[ring_type].batch_count_threshold;
1860 	}
1861 	ring_params->low_threshold =
1862 			soc->wlan_srng_cfg[ring_type].low_threshold;
1863 	if (ring_params->low_threshold)
1864 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1865 
1866 	dp_srng_configure_nf_interrupt_thresholds(soc, ring_params, ring_type);
1867 }
1868 #else
1869 static void
1870 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1871 				       struct hal_srng_params *ring_params,
1872 				       int ring_type, int ring_num,
1873 				       int num_entries)
1874 {
1875 	uint8_t wbm2_sw_rx_rel_ring_id;
1876 
1877 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
1878 
1879 	if (ring_type == REO_DST) {
1880 		ring_params->intr_timer_thres_us =
1881 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1882 		ring_params->intr_batch_cntr_thres_entries =
1883 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1884 	} else if (ring_type == WBM2SW_RELEASE &&
1885 		   (ring_num < wbm2_sw_rx_rel_ring_id ||
1886 		   ring_num == WBM2SW_TXCOMP_RING4_NUM)) {
1887 		ring_params->intr_timer_thres_us =
1888 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1889 		ring_params->intr_batch_cntr_thres_entries =
1890 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1891 	} else {
1892 		ring_params->intr_timer_thres_us =
1893 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1894 		ring_params->intr_batch_cntr_thres_entries =
1895 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1896 	}
1897 
1898 	/* These rings donot require interrupt to host. Make them zero */
1899 	switch (ring_type) {
1900 	case REO_REINJECT:
1901 	case REO_CMD:
1902 	case TCL_DATA:
1903 	case TCL_CMD_CREDIT:
1904 	case TCL_STATUS:
1905 	case WBM_IDLE_LINK:
1906 	case SW2WBM_RELEASE:
1907 	case PPE2TCL:
1908 	case SW2RXDMA_NEW:
1909 		ring_params->intr_timer_thres_us = 0;
1910 		ring_params->intr_batch_cntr_thres_entries = 0;
1911 		break;
1912 	}
1913 
1914 	/* Enable low threshold interrupts for rx buffer rings (regular and
1915 	 * monitor buffer rings.
1916 	 * TODO: See if this is required for any other ring
1917 	 */
1918 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1919 	    (ring_type == RXDMA_MONITOR_STATUS ||
1920 	    (ring_type == TX_MONITOR_BUF))) {
1921 		/* TODO: Setting low threshold to 1/8th of ring size
1922 		 * see if this needs to be configurable
1923 		 */
1924 		ring_params->low_threshold = num_entries >> 3;
1925 		ring_params->intr_timer_thres_us =
1926 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1927 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1928 		ring_params->intr_batch_cntr_thres_entries = 0;
1929 	}
1930 
1931 	/* During initialisation monitor rings are only filled with
1932 	 * MON_BUF_MIN_ENTRIES entries. So low threshold needs to be set to
1933 	 * a value less than that. Low threshold value is reconfigured again
1934 	 * to 1/8th of the ring size when monitor vap is created.
1935 	 */
1936 	if (ring_type == RXDMA_MONITOR_BUF)
1937 		ring_params->low_threshold = MON_BUF_MIN_ENTRIES >> 1;
1938 
1939 	/* In case of PCI chipsets, we dont have PPDU end interrupts,
1940 	 * so MONITOR STATUS ring is reaped by receiving MSI from srng.
1941 	 * Keep batch threshold as 8 so that interrupt is received for
1942 	 * every 4 packets in MONITOR_STATUS ring
1943 	 */
1944 	if ((ring_type == RXDMA_MONITOR_STATUS) &&
1945 	    (soc->intr_mode == DP_INTR_MSI))
1946 		ring_params->intr_batch_cntr_thres_entries = 4;
1947 }
1948 #endif
1949 
1950 #ifdef DP_MEM_PRE_ALLOC
1951 
1952 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
1953 			   size_t ctxt_size)
1954 {
1955 	void *ctxt_mem;
1956 
1957 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_context) {
1958 		dp_warn("dp_prealloc_get_context null!");
1959 		goto dynamic_alloc;
1960 	}
1961 
1962 	ctxt_mem = soc->cdp_soc.ol_ops->dp_prealloc_get_context(ctxt_type,
1963 								ctxt_size);
1964 
1965 	if (ctxt_mem)
1966 		goto end;
1967 
1968 dynamic_alloc:
1969 	dp_info("Pre-alloc type %d, size %zu failed, need dynamic-alloc",
1970 		ctxt_type, ctxt_size);
1971 	ctxt_mem = qdf_mem_malloc(ctxt_size);
1972 end:
1973 	return ctxt_mem;
1974 }
1975 
1976 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
1977 			 void *vaddr)
1978 {
1979 	QDF_STATUS status;
1980 
1981 	if (soc->cdp_soc.ol_ops->dp_prealloc_put_context) {
1982 		status = soc->cdp_soc.ol_ops->dp_prealloc_put_context(
1983 								ctxt_type,
1984 								vaddr);
1985 	} else {
1986 		dp_warn("dp_prealloc_put_context null!");
1987 		status = QDF_STATUS_E_NOSUPPORT;
1988 	}
1989 
1990 	if (QDF_IS_STATUS_ERROR(status)) {
1991 		dp_info("Context type %d not pre-allocated", ctxt_type);
1992 		qdf_mem_free(vaddr);
1993 	}
1994 }
1995 
1996 static inline
1997 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
1998 					   struct dp_srng *srng,
1999 					   uint32_t ring_type)
2000 {
2001 	void *mem;
2002 
2003 	qdf_assert(!srng->is_mem_prealloc);
2004 
2005 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_consistent) {
2006 		dp_warn("dp_prealloc_get_consistent is null!");
2007 		goto qdf;
2008 	}
2009 
2010 	mem =
2011 		soc->cdp_soc.ol_ops->dp_prealloc_get_consistent
2012 						(&srng->alloc_size,
2013 						 &srng->base_vaddr_unaligned,
2014 						 &srng->base_paddr_unaligned,
2015 						 &srng->base_paddr_aligned,
2016 						 DP_RING_BASE_ALIGN, ring_type);
2017 
2018 	if (mem) {
2019 		srng->is_mem_prealloc = true;
2020 		goto end;
2021 	}
2022 qdf:
2023 	mem =  qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
2024 						&srng->base_vaddr_unaligned,
2025 						&srng->base_paddr_unaligned,
2026 						&srng->base_paddr_aligned,
2027 						DP_RING_BASE_ALIGN);
2028 end:
2029 	dp_info("%s memory %pK dp_srng %pK ring_type %d alloc_size %d num_entries %d",
2030 		srng->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", mem,
2031 		srng, ring_type, srng->alloc_size, srng->num_entries);
2032 	return mem;
2033 }
2034 
2035 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
2036 					       struct dp_srng *srng)
2037 {
2038 	if (srng->is_mem_prealloc) {
2039 		if (!soc->cdp_soc.ol_ops->dp_prealloc_put_consistent) {
2040 			dp_warn("dp_prealloc_put_consistent is null!");
2041 			QDF_BUG(0);
2042 			return;
2043 		}
2044 		soc->cdp_soc.ol_ops->dp_prealloc_put_consistent
2045 						(srng->alloc_size,
2046 						 srng->base_vaddr_unaligned,
2047 						 srng->base_paddr_unaligned);
2048 
2049 	} else {
2050 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2051 					srng->alloc_size,
2052 					srng->base_vaddr_unaligned,
2053 					srng->base_paddr_unaligned, 0);
2054 	}
2055 }
2056 
2057 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
2058 				   enum dp_desc_type desc_type,
2059 				   struct qdf_mem_multi_page_t *pages,
2060 				   size_t element_size,
2061 				   uint32_t element_num,
2062 				   qdf_dma_context_t memctxt,
2063 				   bool cacheable)
2064 {
2065 	if (!soc->cdp_soc.ol_ops->dp_get_multi_pages) {
2066 		dp_warn("dp_get_multi_pages is null!");
2067 		goto qdf;
2068 	}
2069 
2070 	pages->num_pages = 0;
2071 	pages->is_mem_prealloc = 0;
2072 	soc->cdp_soc.ol_ops->dp_get_multi_pages(desc_type,
2073 						element_size,
2074 						element_num,
2075 						pages,
2076 						cacheable);
2077 	if (pages->num_pages)
2078 		goto end;
2079 
2080 qdf:
2081 	qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size,
2082 				  element_num, memctxt, cacheable);
2083 end:
2084 	dp_info("%s desc_type %d element_size %d element_num %d cacheable %d",
2085 		pages->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc",
2086 		desc_type, (int)element_size, element_num, cacheable);
2087 }
2088 
2089 void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
2090 				  enum dp_desc_type desc_type,
2091 				  struct qdf_mem_multi_page_t *pages,
2092 				  qdf_dma_context_t memctxt,
2093 				  bool cacheable)
2094 {
2095 	if (pages->is_mem_prealloc) {
2096 		if (!soc->cdp_soc.ol_ops->dp_put_multi_pages) {
2097 			dp_warn("dp_put_multi_pages is null!");
2098 			QDF_BUG(0);
2099 			return;
2100 		}
2101 
2102 		soc->cdp_soc.ol_ops->dp_put_multi_pages(desc_type, pages);
2103 		qdf_mem_zero(pages, sizeof(*pages));
2104 	} else {
2105 		qdf_mem_multi_pages_free(soc->osdev, pages,
2106 					 memctxt, cacheable);
2107 	}
2108 }
2109 
2110 #else
2111 
2112 static inline
2113 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
2114 					   struct dp_srng *srng,
2115 					   uint32_t ring_type)
2116 
2117 {
2118 	void *mem;
2119 
2120 	mem = qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
2121 					       &srng->base_vaddr_unaligned,
2122 					       &srng->base_paddr_unaligned,
2123 					       &srng->base_paddr_aligned,
2124 					       DP_RING_BASE_ALIGN);
2125 	if (mem)
2126 		qdf_mem_set(srng->base_vaddr_unaligned, 0, srng->alloc_size);
2127 
2128 	return mem;
2129 }
2130 
2131 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
2132 					       struct dp_srng *srng)
2133 {
2134 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2135 				srng->alloc_size,
2136 				srng->base_vaddr_unaligned,
2137 				srng->base_paddr_unaligned, 0);
2138 }
2139 
2140 #endif /* DP_MEM_PRE_ALLOC */
2141 
2142 /*
2143  * dp_srng_free() - Free SRNG memory
2144  * @soc  : Data path soc handle
2145  * @srng : SRNG pointer
2146  *
2147  * return: None
2148  */
2149 void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng)
2150 {
2151 	if (srng->alloc_size && srng->base_vaddr_unaligned) {
2152 		if (!srng->cached) {
2153 			dp_srng_mem_free_consistent(soc, srng);
2154 		} else {
2155 			qdf_mem_free(srng->base_vaddr_unaligned);
2156 		}
2157 		srng->alloc_size = 0;
2158 		srng->base_vaddr_unaligned = NULL;
2159 	}
2160 	srng->hal_srng = NULL;
2161 }
2162 
2163 qdf_export_symbol(dp_srng_free);
2164 
2165 #ifdef DISABLE_MON_RING_MSI_CFG
2166 /*
2167  * dp_skip_msi_cfg() - Check if msi cfg has to be skipped for ring_type
2168  * @ring_type: sring type
2169  *
2170  * Return: True if msi cfg should be skipped for srng type else false
2171  */
2172 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2173 {
2174 	if (ring_type == RXDMA_MONITOR_STATUS)
2175 		return true;
2176 
2177 	return false;
2178 }
2179 #else
2180 #ifdef DP_CON_MON_MSI_ENABLED
2181 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2182 {
2183 	if (soc->cdp_soc.ol_ops->get_con_mode &&
2184 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE) {
2185 		if (ring_type == REO_DST || ring_type == RXDMA_DST)
2186 			return true;
2187 	} else if (ring_type == RXDMA_MONITOR_STATUS) {
2188 		return true;
2189 	}
2190 
2191 	return false;
2192 }
2193 #else
2194 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2195 {
2196 	return false;
2197 }
2198 #endif /* DP_CON_MON_MSI_ENABLED */
2199 #endif /* DISABLE_MON_RING_MSI_CFG */
2200 
2201 /*
2202  * dp_srng_init() - Initialize SRNG
2203  * @soc  : Data path soc handle
2204  * @srng : SRNG pointer
2205  * @ring_type : Ring Type
2206  * @ring_num: Ring number
2207  * @mac_id: mac_id
2208  *
2209  * return: QDF_STATUS
2210  */
2211 QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
2212 			int ring_type, int ring_num, int mac_id)
2213 {
2214 	hal_soc_handle_t hal_soc = soc->hal_soc;
2215 	struct hal_srng_params ring_params;
2216 
2217 	if (srng->hal_srng) {
2218 		dp_init_err("%pK: Ring type: %d, num:%d is already initialized",
2219 			    soc, ring_type, ring_num);
2220 		return QDF_STATUS_SUCCESS;
2221 	}
2222 
2223 	/* memset the srng ring to zero */
2224 	qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size);
2225 
2226 	qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params));
2227 	ring_params.ring_base_paddr = srng->base_paddr_aligned;
2228 	ring_params.ring_base_vaddr = srng->base_vaddr_aligned;
2229 
2230 	ring_params.num_entries = srng->num_entries;
2231 
2232 	dp_info("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
2233 		ring_type, ring_num,
2234 		(void *)ring_params.ring_base_vaddr,
2235 		(void *)ring_params.ring_base_paddr,
2236 		ring_params.num_entries);
2237 
2238 	if (soc->intr_mode == DP_INTR_MSI && !dp_skip_msi_cfg(soc, ring_type)) {
2239 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
2240 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
2241 				 ring_type, ring_num);
2242 	} else {
2243 		ring_params.msi_data = 0;
2244 		ring_params.msi_addr = 0;
2245 		dp_srng_set_msi2_ring_params(soc, &ring_params, 0, 0);
2246 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
2247 				 ring_type, ring_num);
2248 	}
2249 
2250 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
2251 					       ring_type, ring_num,
2252 					       srng->num_entries);
2253 
2254 	dp_srng_set_nf_thresholds(soc, srng, &ring_params);
2255 
2256 	if (srng->cached)
2257 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
2258 
2259 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
2260 					mac_id, &ring_params);
2261 
2262 	if (!srng->hal_srng) {
2263 		dp_srng_free(soc, srng);
2264 		return QDF_STATUS_E_FAILURE;
2265 	}
2266 
2267 	return QDF_STATUS_SUCCESS;
2268 }
2269 
2270 qdf_export_symbol(dp_srng_init);
2271 
2272 /*
2273  * dp_srng_alloc() - Allocate memory for SRNG
2274  * @soc  : Data path soc handle
2275  * @srng : SRNG pointer
2276  * @ring_type : Ring Type
2277  * @num_entries: Number of entries
2278  * @cached: cached flag variable
2279  *
2280  * return: QDF_STATUS
2281  */
2282 QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
2283 			 int ring_type, uint32_t num_entries,
2284 			 bool cached)
2285 {
2286 	hal_soc_handle_t hal_soc = soc->hal_soc;
2287 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
2288 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
2289 
2290 	if (srng->base_vaddr_unaligned) {
2291 		dp_init_err("%pK: Ring type: %d, is already allocated",
2292 			    soc, ring_type);
2293 		return QDF_STATUS_SUCCESS;
2294 	}
2295 
2296 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
2297 	srng->hal_srng = NULL;
2298 	srng->alloc_size = num_entries * entry_size;
2299 	srng->num_entries = num_entries;
2300 	srng->cached = cached;
2301 
2302 	if (!cached) {
2303 		srng->base_vaddr_aligned =
2304 		    dp_srng_aligned_mem_alloc_consistent(soc,
2305 							 srng,
2306 							 ring_type);
2307 	} else {
2308 		srng->base_vaddr_aligned = qdf_aligned_malloc(
2309 					&srng->alloc_size,
2310 					&srng->base_vaddr_unaligned,
2311 					&srng->base_paddr_unaligned,
2312 					&srng->base_paddr_aligned,
2313 					DP_RING_BASE_ALIGN);
2314 	}
2315 
2316 	if (!srng->base_vaddr_aligned)
2317 		return QDF_STATUS_E_NOMEM;
2318 
2319 	return QDF_STATUS_SUCCESS;
2320 }
2321 
2322 qdf_export_symbol(dp_srng_alloc);
2323 
2324 /*
2325  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
2326  * @soc: DP SOC handle
2327  * @srng: source ring structure
2328  * @ring_type: type of ring
2329  * @ring_num: ring number
2330  *
2331  * Return: None
2332  */
2333 void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
2334 		    int ring_type, int ring_num)
2335 {
2336 	if (!srng->hal_srng) {
2337 		dp_init_err("%pK: Ring type: %d, num:%d not setup",
2338 			    soc, ring_type, ring_num);
2339 		return;
2340 	}
2341 
2342 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
2343 	srng->hal_srng = NULL;
2344 }
2345 
2346 qdf_export_symbol(dp_srng_deinit);
2347 
2348 /* TODO: Need this interface from HIF */
2349 void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle);
2350 
2351 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2352 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2353 			 hal_ring_handle_t hal_ring_hdl)
2354 {
2355 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2356 	uint32_t hp, tp;
2357 	uint8_t ring_id;
2358 
2359 	if (!int_ctx)
2360 		return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2361 
2362 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2363 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2364 
2365 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2366 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START);
2367 
2368 	return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2369 }
2370 
2371 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2372 			hal_ring_handle_t hal_ring_hdl)
2373 {
2374 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2375 	uint32_t hp, tp;
2376 	uint8_t ring_id;
2377 
2378 	if (!int_ctx)
2379 		return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2380 
2381 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2382 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2383 
2384 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2385 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END);
2386 
2387 	return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2388 }
2389 
2390 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2391 					      uint8_t hist_group_id)
2392 {
2393 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2394 			 0, 0, 0, HIF_EVENT_TIMER_ENTRY);
2395 }
2396 
2397 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2398 					     uint8_t hist_group_id)
2399 {
2400 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2401 			 0, 0, 0, HIF_EVENT_TIMER_EXIT);
2402 }
2403 #else
2404 
2405 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2406 					      uint8_t hist_group_id)
2407 {
2408 }
2409 
2410 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2411 					     uint8_t hist_group_id)
2412 {
2413 }
2414 
2415 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
2416 
2417 /*
2418  * dp_should_timer_irq_yield() - Decide if the bottom half should yield
2419  * @soc: DP soc handle
2420  * @work_done: work done in softirq context
2421  * @start_time: start time for the softirq
2422  *
2423  * Return: enum with yield code
2424  */
2425 enum timer_yield_status
2426 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
2427 			  uint64_t start_time)
2428 {
2429 	uint64_t cur_time = qdf_get_log_timestamp();
2430 
2431 	if (!work_done)
2432 		return DP_TIMER_WORK_DONE;
2433 
2434 	if (cur_time - start_time > DP_MAX_TIMER_EXEC_TIME_TICKS)
2435 		return DP_TIMER_TIME_EXHAUST;
2436 
2437 	return DP_TIMER_NO_YIELD;
2438 }
2439 
2440 qdf_export_symbol(dp_should_timer_irq_yield);
2441 
2442 static int dp_process_rxdma_dst_ring(struct dp_soc *soc,
2443 				     struct dp_intr *int_ctx,
2444 				     int mac_for_pdev,
2445 				     int total_budget)
2446 {
2447 	return dp_rxdma_err_process(int_ctx, soc, mac_for_pdev,
2448 				    total_budget);
2449 }
2450 
2451 /**
2452  * dp_process_lmac_rings() - Process LMAC rings
2453  * @int_ctx: interrupt context
2454  * @total_budget: budget of work which can be done
2455  *
2456  * Return: work done
2457  */
2458 static int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget)
2459 {
2460 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2461 	struct dp_soc *soc = int_ctx->soc;
2462 	uint32_t remaining_quota = total_budget;
2463 	struct dp_pdev *pdev = NULL;
2464 	uint32_t work_done  = 0;
2465 	int budget = total_budget;
2466 	int ring = 0;
2467 
2468 	/* Process LMAC interrupts */
2469 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
2470 		int mac_for_pdev = ring;
2471 
2472 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
2473 		if (!pdev)
2474 			continue;
2475 		if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
2476 			work_done = dp_monitor_process(soc, int_ctx,
2477 						       mac_for_pdev,
2478 						       remaining_quota);
2479 			if (work_done)
2480 				intr_stats->num_rx_mon_ring_masks++;
2481 			budget -= work_done;
2482 			if (budget <= 0)
2483 				goto budget_done;
2484 			remaining_quota = budget;
2485 		}
2486 
2487 		if (int_ctx->tx_mon_ring_mask & (1 << mac_for_pdev)) {
2488 			work_done = dp_tx_mon_process(soc, int_ctx,
2489 						      mac_for_pdev,
2490 						      remaining_quota);
2491 			if (work_done)
2492 				intr_stats->num_tx_mon_ring_masks++;
2493 			budget -= work_done;
2494 			if (budget <= 0)
2495 				goto budget_done;
2496 			remaining_quota = budget;
2497 		}
2498 
2499 		if (int_ctx->rxdma2host_ring_mask &
2500 				(1 << mac_for_pdev)) {
2501 			work_done = dp_process_rxdma_dst_ring(soc, int_ctx,
2502 							      mac_for_pdev,
2503 							      remaining_quota);
2504 			if (work_done)
2505 				intr_stats->num_rxdma2host_ring_masks++;
2506 			budget -=  work_done;
2507 			if (budget <= 0)
2508 				goto budget_done;
2509 			remaining_quota = budget;
2510 		}
2511 
2512 		if (int_ctx->host2rxdma_ring_mask & (1 << mac_for_pdev)) {
2513 			union dp_rx_desc_list_elem_t *desc_list = NULL;
2514 			union dp_rx_desc_list_elem_t *tail = NULL;
2515 			struct dp_srng *rx_refill_buf_ring;
2516 			struct rx_desc_pool *rx_desc_pool;
2517 
2518 			rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2519 			if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
2520 				rx_refill_buf_ring =
2521 					&soc->rx_refill_buf_ring[mac_for_pdev];
2522 			else
2523 				rx_refill_buf_ring =
2524 					&soc->rx_refill_buf_ring[pdev->lmac_id];
2525 
2526 			intr_stats->num_host2rxdma_ring_masks++;
2527 			dp_rx_buffers_lt_replenish_simple(soc, mac_for_pdev,
2528 							  rx_refill_buf_ring,
2529 							  rx_desc_pool,
2530 							  0,
2531 							  &desc_list,
2532 							  &tail);
2533 		}
2534 
2535 	}
2536 
2537 	if (int_ctx->host2rxdma_mon_ring_mask)
2538 		dp_rx_mon_buf_refill(int_ctx);
2539 
2540 	if (int_ctx->host2txmon_ring_mask)
2541 		dp_tx_mon_buf_refill(int_ctx);
2542 
2543 budget_done:
2544 	return total_budget - budget;
2545 }
2546 
2547 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
2548 /**
2549  * dp_service_near_full_srngs() - Bottom half handler to process the near
2550  *				full IRQ on a SRNG
2551  * @dp_ctx: Datapath SoC handle
2552  * @dp_budget: Number of SRNGs which can be processed in a single attempt
2553  *		without rescheduling
2554  *
2555  * Return: remaining budget/quota for the soc device
2556  */
2557 static uint32_t dp_service_near_full_srngs(void *dp_ctx, uint32_t dp_budget)
2558 {
2559 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2560 	struct dp_soc *soc = int_ctx->soc;
2561 
2562 	/*
2563 	 * dp_service_near_full_srngs arch ops should be initialized always
2564 	 * if the NEAR FULL IRQ feature is enabled.
2565 	 */
2566 	return soc->arch_ops.dp_service_near_full_srngs(soc, int_ctx,
2567 							dp_budget);
2568 }
2569 #endif
2570 
2571 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2572 
2573 /*
2574  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
2575  * @dp_ctx: DP SOC handle
2576  * @budget: Number of frames/descriptors that can be processed in one shot
2577  *
2578  * Return: remaining budget/quota for the soc device
2579  */
2580 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
2581 {
2582 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2583 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2584 	struct dp_soc *soc = int_ctx->soc;
2585 	int ring = 0;
2586 	int index;
2587 	uint32_t work_done  = 0;
2588 	int budget = dp_budget;
2589 	uint8_t tx_mask = int_ctx->tx_ring_mask;
2590 	uint8_t rx_mask = int_ctx->rx_ring_mask;
2591 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
2592 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
2593 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2594 	uint32_t remaining_quota = dp_budget;
2595 
2596 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n",
2597 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
2598 			 reo_status_mask,
2599 			 int_ctx->rx_mon_ring_mask,
2600 			 int_ctx->host2rxdma_ring_mask,
2601 			 int_ctx->rxdma2host_ring_mask);
2602 
2603 	/* Process Tx completion interrupts first to return back buffers */
2604 	for (index = 0; index < soc->num_tx_comp_rings; index++) {
2605 		if (!(1 << wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) & tx_mask))
2606 			continue;
2607 		work_done = dp_tx_comp_handler(int_ctx,
2608 					       soc,
2609 					       soc->tx_comp_ring[index].hal_srng,
2610 					       index, remaining_quota);
2611 		if (work_done) {
2612 			intr_stats->num_tx_ring_masks[index]++;
2613 			dp_verbose_debug("tx mask 0x%x index %d, budget %d, work_done %d",
2614 					 tx_mask, index, budget,
2615 					 work_done);
2616 		}
2617 		budget -= work_done;
2618 		if (budget <= 0)
2619 			goto budget_done;
2620 
2621 		remaining_quota = budget;
2622 	}
2623 
2624 	/* Process REO Exception ring interrupt */
2625 	if (rx_err_mask) {
2626 		work_done = dp_rx_err_process(int_ctx, soc,
2627 					      soc->reo_exception_ring.hal_srng,
2628 					      remaining_quota);
2629 
2630 		if (work_done) {
2631 			intr_stats->num_rx_err_ring_masks++;
2632 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
2633 					 work_done, budget);
2634 		}
2635 
2636 		budget -=  work_done;
2637 		if (budget <= 0) {
2638 			goto budget_done;
2639 		}
2640 		remaining_quota = budget;
2641 	}
2642 
2643 	/* Process Rx WBM release ring interrupt */
2644 	if (rx_wbm_rel_mask) {
2645 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
2646 						  soc->rx_rel_ring.hal_srng,
2647 						  remaining_quota);
2648 
2649 		if (work_done) {
2650 			intr_stats->num_rx_wbm_rel_ring_masks++;
2651 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
2652 					 work_done, budget);
2653 		}
2654 
2655 		budget -=  work_done;
2656 		if (budget <= 0) {
2657 			goto budget_done;
2658 		}
2659 		remaining_quota = budget;
2660 	}
2661 
2662 	/* Process Rx interrupts */
2663 	if (rx_mask) {
2664 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
2665 			if (!(rx_mask & (1 << ring)))
2666 				continue;
2667 			work_done = soc->arch_ops.dp_rx_process(int_ctx,
2668 						  soc->reo_dest_ring[ring].hal_srng,
2669 						  ring,
2670 						  remaining_quota);
2671 			if (work_done) {
2672 				intr_stats->num_rx_ring_masks[ring]++;
2673 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
2674 						 rx_mask, ring,
2675 						 work_done, budget);
2676 				budget -=  work_done;
2677 				if (budget <= 0)
2678 					goto budget_done;
2679 				remaining_quota = budget;
2680 			}
2681 		}
2682 	}
2683 
2684 	if (reo_status_mask) {
2685 		if (dp_reo_status_ring_handler(int_ctx, soc))
2686 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2687 	}
2688 
2689 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
2690 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2691 		if (work_done) {
2692 			budget -=  work_done;
2693 			if (budget <= 0)
2694 				goto budget_done;
2695 			remaining_quota = budget;
2696 		}
2697 	}
2698 
2699 	qdf_lro_flush(int_ctx->lro_ctx);
2700 	intr_stats->num_masks++;
2701 
2702 budget_done:
2703 	return dp_budget - budget;
2704 }
2705 
2706 #else /* QCA_HOST_MODE_WIFI_DISABLED */
2707 
2708 /*
2709  * dp_service_srngs() - Top level handler for DP Monitor Ring interrupts
2710  * @dp_ctx: DP SOC handle
2711  * @budget: Number of frames/descriptors that can be processed in one shot
2712  *
2713  * Return: remaining budget/quota for the soc device
2714  */
2715 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
2716 {
2717 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2718 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2719 	struct dp_soc *soc = int_ctx->soc;
2720 	uint32_t remaining_quota = dp_budget;
2721 	uint32_t work_done  = 0;
2722 	int budget = dp_budget;
2723 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2724 
2725 	if (reo_status_mask) {
2726 		if (dp_reo_status_ring_handler(int_ctx, soc))
2727 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2728 	}
2729 
2730 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
2731 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2732 		if (work_done) {
2733 			budget -=  work_done;
2734 			if (budget <= 0)
2735 				goto budget_done;
2736 			remaining_quota = budget;
2737 		}
2738 	}
2739 
2740 	qdf_lro_flush(int_ctx->lro_ctx);
2741 	intr_stats->num_masks++;
2742 
2743 budget_done:
2744 	return dp_budget - budget;
2745 }
2746 
2747 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2748 
2749 /* dp_interrupt_timer()- timer poll for interrupts
2750  *
2751  * @arg: SoC Handle
2752  *
2753  * Return:
2754  *
2755  */
2756 static void dp_interrupt_timer(void *arg)
2757 {
2758 	struct dp_soc *soc = (struct dp_soc *) arg;
2759 	struct dp_pdev *pdev = soc->pdev_list[0];
2760 	enum timer_yield_status yield = DP_TIMER_NO_YIELD;
2761 	uint32_t work_done  = 0, total_work_done = 0;
2762 	int budget = 0xffff, i;
2763 	uint32_t remaining_quota = budget;
2764 	uint64_t start_time;
2765 	uint32_t lmac_id = DP_MON_INVALID_LMAC_ID;
2766 	uint8_t dp_intr_id = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
2767 	uint32_t lmac_iter;
2768 	int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
2769 	enum reg_wifi_band mon_band;
2770 
2771 	/*
2772 	 * this logic makes all data path interfacing rings (UMAC/LMAC)
2773 	 * and Monitor rings polling mode when NSS offload is disabled
2774 	 */
2775 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx) &&
2776 	    !wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2777 		if (qdf_atomic_read(&soc->cmn_init_done)) {
2778 			for (i = 0; i < wlan_cfg_get_num_contexts(
2779 						soc->wlan_cfg_ctx); i++)
2780 				dp_service_srngs(&soc->intr_ctx[i], 0xffff);
2781 
2782 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
2783 		}
2784 		return;
2785 	}
2786 
2787 	if (!qdf_atomic_read(&soc->cmn_init_done))
2788 		return;
2789 
2790 	if (dp_monitor_is_chan_band_known(pdev)) {
2791 		mon_band = dp_monitor_get_chan_band(pdev);
2792 		lmac_id = pdev->ch_band_lmac_id_mapping[mon_band];
2793 		if (qdf_likely(lmac_id != DP_MON_INVALID_LMAC_ID)) {
2794 			dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id];
2795 			dp_srng_record_timer_entry(soc, dp_intr_id);
2796 		}
2797 	}
2798 
2799 	start_time = qdf_get_log_timestamp();
2800 	dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
2801 
2802 	while (yield == DP_TIMER_NO_YIELD) {
2803 		for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) {
2804 			if (lmac_iter == lmac_id)
2805 				work_done = dp_monitor_process(soc,
2806 						&soc->intr_ctx[dp_intr_id],
2807 						lmac_iter, remaining_quota);
2808 			else
2809 				work_done =
2810 					dp_monitor_drop_packets_for_mac(pdev,
2811 							     lmac_iter,
2812 							     remaining_quota);
2813 			if (work_done) {
2814 				budget -=  work_done;
2815 				if (budget <= 0) {
2816 					yield = DP_TIMER_WORK_EXHAUST;
2817 					goto budget_done;
2818 				}
2819 				remaining_quota = budget;
2820 				total_work_done += work_done;
2821 			}
2822 		}
2823 
2824 		yield = dp_should_timer_irq_yield(soc, total_work_done,
2825 						  start_time);
2826 		total_work_done = 0;
2827 	}
2828 
2829 budget_done:
2830 	if (yield == DP_TIMER_WORK_EXHAUST ||
2831 	    yield == DP_TIMER_TIME_EXHAUST)
2832 		qdf_timer_mod(&soc->int_timer, 1);
2833 	else
2834 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
2835 
2836 	if (lmac_id != DP_MON_INVALID_LMAC_ID)
2837 		dp_srng_record_timer_exit(soc, dp_intr_id);
2838 }
2839 
2840 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2841 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
2842 					struct dp_intr *intr_ctx)
2843 {
2844 	if (intr_ctx->rx_mon_ring_mask)
2845 		return true;
2846 
2847 	return false;
2848 }
2849 #else
2850 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
2851 					struct dp_intr *intr_ctx)
2852 {
2853 	return false;
2854 }
2855 #endif
2856 
2857 /*
2858  * dp_soc_attach_poll() - Register handlers for DP interrupts
2859  * @txrx_soc: DP SOC handle
2860  *
2861  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
2862  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
2863  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
2864  *
2865  * Return: 0 for success, nonzero for failure.
2866  */
2867 static QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
2868 {
2869 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2870 	int i;
2871 	int lmac_id = 0;
2872 
2873 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
2874 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
2875 	soc->intr_mode = DP_INTR_POLL;
2876 
2877 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
2878 		soc->intr_ctx[i].dp_intr_id = i;
2879 		soc->intr_ctx[i].tx_ring_mask =
2880 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
2881 		soc->intr_ctx[i].rx_ring_mask =
2882 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
2883 		soc->intr_ctx[i].rx_mon_ring_mask =
2884 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
2885 		soc->intr_ctx[i].rx_err_ring_mask =
2886 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
2887 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
2888 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
2889 		soc->intr_ctx[i].reo_status_ring_mask =
2890 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
2891 		soc->intr_ctx[i].rxdma2host_ring_mask =
2892 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
2893 		soc->intr_ctx[i].soc = soc;
2894 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
2895 
2896 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
2897 			hif_event_history_init(soc->hif_handle, i);
2898 			soc->mon_intr_id_lmac_map[lmac_id] = i;
2899 			lmac_id++;
2900 		}
2901 	}
2902 
2903 	qdf_timer_init(soc->osdev, &soc->int_timer,
2904 			dp_interrupt_timer, (void *)soc,
2905 			QDF_TIMER_TYPE_WAKE_APPS);
2906 
2907 	return QDF_STATUS_SUCCESS;
2908 }
2909 
2910 /**
2911  * dp_soc_set_interrupt_mode() - Set the interrupt mode in soc
2912  * soc: DP soc handle
2913  *
2914  * Set the appropriate interrupt mode flag in the soc
2915  */
2916 static void dp_soc_set_interrupt_mode(struct dp_soc *soc)
2917 {
2918 	uint32_t msi_base_data, msi_vector_start;
2919 	int msi_vector_count, ret;
2920 
2921 	soc->intr_mode = DP_INTR_INTEGRATED;
2922 
2923 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
2924 	    (dp_is_monitor_mode_using_poll(soc) &&
2925 	     soc->cdp_soc.ol_ops->get_con_mode &&
2926 	     soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) {
2927 		soc->intr_mode = DP_INTR_POLL;
2928 	} else {
2929 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
2930 						  &msi_vector_count,
2931 						  &msi_base_data,
2932 						  &msi_vector_start);
2933 		if (ret)
2934 			return;
2935 
2936 		soc->intr_mode = DP_INTR_MSI;
2937 	}
2938 }
2939 
2940 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc);
2941 #if defined(DP_INTR_POLL_BOTH)
2942 /*
2943  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
2944  * @txrx_soc: DP SOC handle
2945  *
2946  * Call the appropriate attach function based on the mode of operation.
2947  * This is a WAR for enabling monitor mode.
2948  *
2949  * Return: 0 for success. nonzero for failure.
2950  */
2951 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2952 {
2953 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2954 
2955 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
2956 	    (dp_is_monitor_mode_using_poll(soc) &&
2957 	     soc->cdp_soc.ol_ops->get_con_mode &&
2958 	     soc->cdp_soc.ol_ops->get_con_mode() ==
2959 	     QDF_GLOBAL_MONITOR_MODE)) {
2960 		dp_info("Poll mode");
2961 		return dp_soc_attach_poll(txrx_soc);
2962 	} else {
2963 		dp_info("Interrupt  mode");
2964 		return dp_soc_interrupt_attach(txrx_soc);
2965 	}
2966 }
2967 #else
2968 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
2969 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2970 {
2971 	return dp_soc_attach_poll(txrx_soc);
2972 }
2973 #else
2974 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2975 {
2976 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2977 
2978 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx))
2979 		return dp_soc_attach_poll(txrx_soc);
2980 	else
2981 		return dp_soc_interrupt_attach(txrx_soc);
2982 }
2983 #endif
2984 #endif
2985 
2986 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
2987 /**
2988  * dp_soc_interrupt_map_calculate_wifi3_pci_legacy()
2989  * Calculate interrupt map for legacy interrupts
2990  * @soc: DP soc handle
2991  * @intr_ctx_num: Interrupt context number
2992  * @irq_id_map: IRQ map
2993  * num_irq_r: Number of interrupts assigned for this context
2994  *
2995  * Return: void
2996  */
2997 static void dp_soc_interrupt_map_calculate_wifi3_pci_legacy(struct dp_soc *soc,
2998 							    int intr_ctx_num,
2999 							    int *irq_id_map,
3000 							    int *num_irq_r)
3001 {
3002 	int j;
3003 	int num_irq = 0;
3004 	int tx_mask = wlan_cfg_get_tx_ring_mask(
3005 					soc->wlan_cfg_ctx, intr_ctx_num);
3006 	int rx_mask = wlan_cfg_get_rx_ring_mask(
3007 					soc->wlan_cfg_ctx, intr_ctx_num);
3008 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
3009 					soc->wlan_cfg_ctx, intr_ctx_num);
3010 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3011 					soc->wlan_cfg_ctx, intr_ctx_num);
3012 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3013 					soc->wlan_cfg_ctx, intr_ctx_num);
3014 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3015 					soc->wlan_cfg_ctx, intr_ctx_num);
3016 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3017 					soc->wlan_cfg_ctx, intr_ctx_num);
3018 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3019 					soc->wlan_cfg_ctx, intr_ctx_num);
3020 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3021 					soc->wlan_cfg_ctx, intr_ctx_num);
3022 	soc->intr_mode = DP_INTR_LEGACY_VIRTUAL_IRQ;
3023 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
3024 		if (tx_mask & (1 << j))
3025 			irq_id_map[num_irq++] = (wbm2sw0_release - j);
3026 		if (rx_mask & (1 << j))
3027 			irq_id_map[num_irq++] = (reo2sw1_intr - j);
3028 		if (rx_mon_mask & (1 << j))
3029 			irq_id_map[num_irq++] = (rxmon2sw_p0_dest0 - j);
3030 		if (rx_err_ring_mask & (1 << j))
3031 			irq_id_map[num_irq++] = (reo2sw0_intr - j);
3032 		if (rx_wbm_rel_ring_mask & (1 << j))
3033 			irq_id_map[num_irq++] = (wbm2sw5_release - j);
3034 		if (reo_status_ring_mask & (1 << j))
3035 			irq_id_map[num_irq++] = (reo_status - j);
3036 		if (rxdma2host_ring_mask & (1 << j))
3037 			irq_id_map[num_irq++] = (rxdma2sw_dst_ring0 - j);
3038 		if (host2rxdma_ring_mask & (1 << j))
3039 			irq_id_map[num_irq++] = (sw2rxdma_0 - j);
3040 		if (host2rxdma_mon_ring_mask & (1 << j))
3041 			irq_id_map[num_irq++] = (sw2rxmon_src_ring - j);
3042 	}
3043 	*num_irq_r = num_irq;
3044 }
3045 #else
3046 /**
3047  * dp_soc_interrupt_map_calculate_wifi3_pci_legacy()
3048  * Calculate interrupt map for legacy interrupts
3049  * @soc: DP soc handle
3050  * @intr_ctx_num: Interrupt context number
3051  * @irq_id_map: IRQ map
3052  * num_irq_r: Number of interrupts assigned for this context
3053  *
3054  * Return: void
3055  */
3056 static void dp_soc_interrupt_map_calculate_wifi3_pci_legacy(struct dp_soc *soc,
3057 							    int intr_ctx_num,
3058 							    int *irq_id_map,
3059 							    int *num_irq_r)
3060 {
3061 }
3062 #endif
3063 
3064 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
3065 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
3066 {
3067 	int j;
3068 	int num_irq = 0;
3069 
3070 	int tx_mask =
3071 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3072 	int rx_mask =
3073 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3074 	int rx_mon_mask =
3075 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3076 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3077 					soc->wlan_cfg_ctx, intr_ctx_num);
3078 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3079 					soc->wlan_cfg_ctx, intr_ctx_num);
3080 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3081 					soc->wlan_cfg_ctx, intr_ctx_num);
3082 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3083 					soc->wlan_cfg_ctx, intr_ctx_num);
3084 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3085 					soc->wlan_cfg_ctx, intr_ctx_num);
3086 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3087 					soc->wlan_cfg_ctx, intr_ctx_num);
3088 
3089 	soc->intr_mode = DP_INTR_INTEGRATED;
3090 
3091 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
3092 
3093 		if (tx_mask & (1 << j)) {
3094 			irq_id_map[num_irq++] =
3095 				(wbm2host_tx_completions_ring1 - j);
3096 		}
3097 
3098 		if (rx_mask & (1 << j)) {
3099 			irq_id_map[num_irq++] =
3100 				(reo2host_destination_ring1 - j);
3101 		}
3102 
3103 		if (rxdma2host_ring_mask & (1 << j)) {
3104 			irq_id_map[num_irq++] =
3105 				rxdma2host_destination_ring_mac1 - j;
3106 		}
3107 
3108 		if (host2rxdma_ring_mask & (1 << j)) {
3109 			irq_id_map[num_irq++] =
3110 				host2rxdma_host_buf_ring_mac1 -	j;
3111 		}
3112 
3113 		if (host2rxdma_mon_ring_mask & (1 << j)) {
3114 			irq_id_map[num_irq++] =
3115 				host2rxdma_monitor_ring1 - j;
3116 		}
3117 
3118 		if (rx_mon_mask & (1 << j)) {
3119 			irq_id_map[num_irq++] =
3120 				ppdu_end_interrupts_mac1 - j;
3121 			irq_id_map[num_irq++] =
3122 				rxdma2host_monitor_status_ring_mac1 - j;
3123 			irq_id_map[num_irq++] =
3124 				rxdma2host_monitor_destination_mac1 - j;
3125 		}
3126 
3127 		if (rx_wbm_rel_ring_mask & (1 << j))
3128 			irq_id_map[num_irq++] = wbm2host_rx_release;
3129 
3130 		if (rx_err_ring_mask & (1 << j))
3131 			irq_id_map[num_irq++] = reo2host_exception;
3132 
3133 		if (reo_status_ring_mask & (1 << j))
3134 			irq_id_map[num_irq++] = reo2host_status;
3135 
3136 	}
3137 	*num_irq_r = num_irq;
3138 }
3139 
3140 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
3141 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
3142 		int msi_vector_count, int msi_vector_start)
3143 {
3144 	int tx_mask = wlan_cfg_get_tx_ring_mask(
3145 					soc->wlan_cfg_ctx, intr_ctx_num);
3146 	int rx_mask = wlan_cfg_get_rx_ring_mask(
3147 					soc->wlan_cfg_ctx, intr_ctx_num);
3148 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
3149 					soc->wlan_cfg_ctx, intr_ctx_num);
3150 	int tx_mon_mask = wlan_cfg_get_tx_mon_ring_mask(
3151 					soc->wlan_cfg_ctx, intr_ctx_num);
3152 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3153 					soc->wlan_cfg_ctx, intr_ctx_num);
3154 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3155 					soc->wlan_cfg_ctx, intr_ctx_num);
3156 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3157 					soc->wlan_cfg_ctx, intr_ctx_num);
3158 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3159 					soc->wlan_cfg_ctx, intr_ctx_num);
3160 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3161 					soc->wlan_cfg_ctx, intr_ctx_num);
3162 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3163 					soc->wlan_cfg_ctx, intr_ctx_num);
3164 	int rx_near_full_grp_1_mask =
3165 		wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
3166 						     intr_ctx_num);
3167 	int rx_near_full_grp_2_mask =
3168 		wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
3169 						     intr_ctx_num);
3170 	int tx_ring_near_full_mask =
3171 		wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
3172 						    intr_ctx_num);
3173 
3174 	int host2txmon_ring_mask =
3175 		wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx,
3176 						  intr_ctx_num);
3177 	unsigned int vector =
3178 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
3179 	int num_irq = 0;
3180 
3181 	soc->intr_mode = DP_INTR_MSI;
3182 
3183 	if (tx_mask | rx_mask | rx_mon_mask | tx_mon_mask | rx_err_ring_mask |
3184 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask |
3185 	    host2rxdma_ring_mask | host2rxdma_mon_ring_mask |
3186 	    rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
3187 	    tx_ring_near_full_mask | host2txmon_ring_mask)
3188 		irq_id_map[num_irq++] =
3189 			pld_get_msi_irq(soc->osdev->dev, vector);
3190 
3191 	*num_irq_r = num_irq;
3192 }
3193 
3194 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
3195 				    int *irq_id_map, int *num_irq)
3196 {
3197 	int msi_vector_count, ret;
3198 	uint32_t msi_base_data, msi_vector_start;
3199 
3200 	if (pld_get_enable_intx(soc->osdev->dev)) {
3201 		return dp_soc_interrupt_map_calculate_wifi3_pci_legacy(soc,
3202 				intr_ctx_num, irq_id_map, num_irq);
3203 	}
3204 
3205 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
3206 					    &msi_vector_count,
3207 					    &msi_base_data,
3208 					    &msi_vector_start);
3209 	if (ret)
3210 		return dp_soc_interrupt_map_calculate_integrated(soc,
3211 				intr_ctx_num, irq_id_map, num_irq);
3212 
3213 	else
3214 		dp_soc_interrupt_map_calculate_msi(soc,
3215 				intr_ctx_num, irq_id_map, num_irq,
3216 				msi_vector_count, msi_vector_start);
3217 }
3218 
3219 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
3220 /**
3221  * dp_soc_near_full_interrupt_attach() - Register handler for DP near fill irq
3222  * @soc: DP soc handle
3223  * @num_irq: IRQ number
3224  * @irq_id_map: IRQ map
3225  * intr_id: interrupt context ID
3226  *
3227  * Return: 0 for success. nonzero for failure.
3228  */
3229 static inline int
3230 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
3231 				  int irq_id_map[], int intr_id)
3232 {
3233 	return hif_register_ext_group(soc->hif_handle,
3234 				      num_irq, irq_id_map,
3235 				      dp_service_near_full_srngs,
3236 				      &soc->intr_ctx[intr_id], "dp_nf_intr",
3237 				      HIF_EXEC_NAPI_TYPE,
3238 				      QCA_NAPI_DEF_SCALE_BIN_SHIFT);
3239 }
3240 #else
3241 static inline int
3242 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
3243 				  int *irq_id_map, int intr_id)
3244 {
3245 	return 0;
3246 }
3247 #endif
3248 
3249 /*
3250  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
3251  * @txrx_soc: DP SOC handle
3252  *
3253  * Return: none
3254  */
3255 static void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
3256 {
3257 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3258 	int i;
3259 
3260 	if (soc->intr_mode == DP_INTR_POLL) {
3261 		qdf_timer_free(&soc->int_timer);
3262 	} else {
3263 		hif_deconfigure_ext_group_interrupts(soc->hif_handle);
3264 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
3265 		hif_deregister_exec_group(soc->hif_handle, "dp_nf_intr");
3266 	}
3267 
3268 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3269 		soc->intr_ctx[i].tx_ring_mask = 0;
3270 		soc->intr_ctx[i].rx_ring_mask = 0;
3271 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
3272 		soc->intr_ctx[i].rx_err_ring_mask = 0;
3273 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
3274 		soc->intr_ctx[i].reo_status_ring_mask = 0;
3275 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
3276 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
3277 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
3278 		soc->intr_ctx[i].rx_near_full_grp_1_mask = 0;
3279 		soc->intr_ctx[i].rx_near_full_grp_2_mask = 0;
3280 		soc->intr_ctx[i].tx_ring_near_full_mask = 0;
3281 		soc->intr_ctx[i].tx_mon_ring_mask = 0;
3282 		soc->intr_ctx[i].host2txmon_ring_mask = 0;
3283 		soc->intr_ctx[i].umac_reset_intr_mask = 0;
3284 
3285 		hif_event_history_deinit(soc->hif_handle, i);
3286 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
3287 	}
3288 
3289 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3290 		    sizeof(soc->mon_intr_id_lmac_map),
3291 		    DP_MON_INVALID_LMAC_ID);
3292 }
3293 
3294 /*
3295  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
3296  * @txrx_soc: DP SOC handle
3297  *
3298  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
3299  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
3300  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
3301  *
3302  * Return: 0 for success. nonzero for failure.
3303  */
3304 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
3305 {
3306 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3307 
3308 	int i = 0;
3309 	int num_irq = 0;
3310 	int rx_err_ring_intr_ctxt_id = HIF_MAX_GROUP;
3311 	int lmac_id = 0;
3312 	int napi_scale;
3313 
3314 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3315 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
3316 
3317 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3318 		int ret = 0;
3319 
3320 		/* Map of IRQ ids registered with one interrupt context */
3321 		int irq_id_map[HIF_MAX_GRP_IRQ];
3322 
3323 		int tx_mask =
3324 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
3325 		int rx_mask =
3326 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
3327 		int rx_mon_mask =
3328 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
3329 		int tx_mon_ring_mask =
3330 			wlan_cfg_get_tx_mon_ring_mask(soc->wlan_cfg_ctx, i);
3331 		int rx_err_ring_mask =
3332 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
3333 		int rx_wbm_rel_ring_mask =
3334 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
3335 		int reo_status_ring_mask =
3336 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
3337 		int rxdma2host_ring_mask =
3338 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
3339 		int host2rxdma_ring_mask =
3340 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
3341 		int host2rxdma_mon_ring_mask =
3342 			wlan_cfg_get_host2rxdma_mon_ring_mask(
3343 				soc->wlan_cfg_ctx, i);
3344 		int rx_near_full_grp_1_mask =
3345 			wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
3346 							     i);
3347 		int rx_near_full_grp_2_mask =
3348 			wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
3349 							     i);
3350 		int tx_ring_near_full_mask =
3351 			wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
3352 							    i);
3353 		int host2txmon_ring_mask =
3354 			wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx, i);
3355 		int umac_reset_intr_mask =
3356 			wlan_cfg_get_umac_reset_intr_mask(soc->wlan_cfg_ctx, i);
3357 
3358 		soc->intr_ctx[i].dp_intr_id = i;
3359 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
3360 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
3361 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
3362 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
3363 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
3364 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
3365 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
3366 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
3367 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
3368 			 host2rxdma_mon_ring_mask;
3369 		soc->intr_ctx[i].rx_near_full_grp_1_mask =
3370 						rx_near_full_grp_1_mask;
3371 		soc->intr_ctx[i].rx_near_full_grp_2_mask =
3372 						rx_near_full_grp_2_mask;
3373 		soc->intr_ctx[i].tx_ring_near_full_mask =
3374 						tx_ring_near_full_mask;
3375 		soc->intr_ctx[i].tx_mon_ring_mask = tx_mon_ring_mask;
3376 		soc->intr_ctx[i].host2txmon_ring_mask = host2txmon_ring_mask;
3377 		soc->intr_ctx[i].umac_reset_intr_mask = umac_reset_intr_mask;
3378 
3379 		soc->intr_ctx[i].soc = soc;
3380 
3381 		num_irq = 0;
3382 
3383 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
3384 					       &num_irq);
3385 
3386 		if (rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
3387 		    tx_ring_near_full_mask) {
3388 			dp_soc_near_full_interrupt_attach(soc, num_irq,
3389 							  irq_id_map, i);
3390 		} else {
3391 			napi_scale = wlan_cfg_get_napi_scale_factor(
3392 							    soc->wlan_cfg_ctx);
3393 			if (!napi_scale)
3394 				napi_scale = QCA_NAPI_DEF_SCALE_BIN_SHIFT;
3395 
3396 			ret = hif_register_ext_group(soc->hif_handle,
3397 				num_irq, irq_id_map, dp_service_srngs,
3398 				&soc->intr_ctx[i], "dp_intr",
3399 				HIF_EXEC_NAPI_TYPE, napi_scale);
3400 		}
3401 
3402 		dp_debug(" int ctx %u num_irq %u irq_id_map %u %u",
3403 			 i, num_irq, irq_id_map[0], irq_id_map[1]);
3404 
3405 		if (ret) {
3406 			dp_init_err("%pK: failed, ret = %d", soc, ret);
3407 			dp_soc_interrupt_detach(txrx_soc);
3408 			return QDF_STATUS_E_FAILURE;
3409 		}
3410 
3411 		hif_event_history_init(soc->hif_handle, i);
3412 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
3413 
3414 		if (rx_err_ring_mask)
3415 			rx_err_ring_intr_ctxt_id = i;
3416 
3417 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
3418 			soc->mon_intr_id_lmac_map[lmac_id] = i;
3419 			lmac_id++;
3420 		}
3421 	}
3422 
3423 	hif_configure_ext_group_interrupts(soc->hif_handle);
3424 	if (rx_err_ring_intr_ctxt_id != HIF_MAX_GROUP)
3425 		hif_config_irq_clear_cpu_affinity(soc->hif_handle,
3426 						  rx_err_ring_intr_ctxt_id, 0);
3427 
3428 	return QDF_STATUS_SUCCESS;
3429 }
3430 
3431 #define AVG_MAX_MPDUS_PER_TID 128
3432 #define AVG_TIDS_PER_CLIENT 2
3433 #define AVG_FLOWS_PER_TID 2
3434 #define AVG_MSDUS_PER_FLOW 128
3435 #define AVG_MSDUS_PER_MPDU 4
3436 
3437 /*
3438  * dp_hw_link_desc_pool_banks_free() - Free h/w link desc pool banks
3439  * @soc: DP SOC handle
3440  * @mac_id: mac id
3441  *
3442  * Return: none
3443  */
3444 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
3445 {
3446 	struct qdf_mem_multi_page_t *pages;
3447 
3448 	if (mac_id != WLAN_INVALID_PDEV_ID) {
3449 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3450 	} else {
3451 		pages = &soc->link_desc_pages;
3452 	}
3453 
3454 	if (!pages) {
3455 		dp_err("can not get link desc pages");
3456 		QDF_ASSERT(0);
3457 		return;
3458 	}
3459 
3460 	if (pages->dma_pages) {
3461 		wlan_minidump_remove((void *)
3462 				     pages->dma_pages->page_v_addr_start,
3463 				     pages->num_pages * pages->page_size,
3464 				     soc->ctrl_psoc,
3465 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3466 				     "hw_link_desc_bank");
3467 		dp_desc_multi_pages_mem_free(soc, DP_HW_LINK_DESC_TYPE,
3468 					     pages, 0, false);
3469 	}
3470 }
3471 
3472 qdf_export_symbol(dp_hw_link_desc_pool_banks_free);
3473 
3474 /*
3475  * dp_hw_link_desc_pool_banks_alloc() - Allocate h/w link desc pool banks
3476  * @soc: DP SOC handle
3477  * @mac_id: mac id
3478  *
3479  * Allocates memory pages for link descriptors, the page size is 4K for
3480  * MCL and 2MB for WIN. if the mac_id is invalid link descriptor pages are
3481  * allocated for regular RX/TX and if the there is a proper mac_id link
3482  * descriptors are allocated for RX monitor mode.
3483  *
3484  * Return: QDF_STATUS_SUCCESS: Success
3485  *	   QDF_STATUS_E_FAILURE: Failure
3486  */
3487 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
3488 {
3489 	hal_soc_handle_t hal_soc = soc->hal_soc;
3490 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
3491 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
3492 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
3493 	uint32_t num_mpdus_per_link_desc = hal_num_mpdus_per_link_desc(hal_soc);
3494 	uint32_t num_msdus_per_link_desc = hal_num_msdus_per_link_desc(hal_soc);
3495 	uint32_t num_mpdu_links_per_queue_desc =
3496 		hal_num_mpdu_links_per_queue_desc(hal_soc);
3497 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3498 	uint32_t *total_link_descs, total_mem_size;
3499 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
3500 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
3501 	uint32_t num_entries;
3502 	struct qdf_mem_multi_page_t *pages;
3503 	struct dp_srng *dp_srng;
3504 	uint8_t minidump_str[MINIDUMP_STR_SIZE];
3505 
3506 	/* Only Tx queue descriptors are allocated from common link descriptor
3507 	 * pool Rx queue descriptors are not included in this because (REO queue
3508 	 * extension descriptors) they are expected to be allocated contiguously
3509 	 * with REO queue descriptors
3510 	 */
3511 	if (mac_id != WLAN_INVALID_PDEV_ID) {
3512 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3513 		/* dp_monitor_get_link_desc_pages returns NULL only
3514 		 * if monitor SOC is  NULL
3515 		 */
3516 		if (!pages) {
3517 			dp_err("can not get link desc pages");
3518 			QDF_ASSERT(0);
3519 			return QDF_STATUS_E_FAULT;
3520 		}
3521 		dp_srng = &soc->rxdma_mon_desc_ring[mac_id];
3522 		num_entries = dp_srng->alloc_size /
3523 			hal_srng_get_entrysize(soc->hal_soc,
3524 					       RXDMA_MONITOR_DESC);
3525 		total_link_descs = dp_monitor_get_total_link_descs(soc, mac_id);
3526 		qdf_str_lcopy(minidump_str, "mon_link_desc_bank",
3527 			      MINIDUMP_STR_SIZE);
3528 	} else {
3529 		num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3530 			AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
3531 
3532 		num_mpdu_queue_descs = num_mpdu_link_descs /
3533 			num_mpdu_links_per_queue_desc;
3534 
3535 		num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3536 			AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
3537 			num_msdus_per_link_desc;
3538 
3539 		num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3540 			AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
3541 
3542 		num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
3543 			num_tx_msdu_link_descs + num_rx_msdu_link_descs;
3544 
3545 		pages = &soc->link_desc_pages;
3546 		total_link_descs = &soc->total_link_descs;
3547 		qdf_str_lcopy(minidump_str, "link_desc_bank",
3548 			      MINIDUMP_STR_SIZE);
3549 	}
3550 
3551 	/* If link descriptor banks are allocated, return from here */
3552 	if (pages->num_pages)
3553 		return QDF_STATUS_SUCCESS;
3554 
3555 	/* Round up to power of 2 */
3556 	*total_link_descs = 1;
3557 	while (*total_link_descs < num_entries)
3558 		*total_link_descs <<= 1;
3559 
3560 	dp_init_info("%pK: total_link_descs: %u, link_desc_size: %d",
3561 		     soc, *total_link_descs, link_desc_size);
3562 	total_mem_size =  *total_link_descs * link_desc_size;
3563 	total_mem_size += link_desc_align;
3564 
3565 	dp_init_info("%pK: total_mem_size: %d",
3566 		     soc, total_mem_size);
3567 
3568 	dp_set_max_page_size(pages, max_alloc_size);
3569 	dp_desc_multi_pages_mem_alloc(soc, DP_HW_LINK_DESC_TYPE,
3570 				      pages,
3571 				      link_desc_size,
3572 				      *total_link_descs,
3573 				      0, false);
3574 	if (!pages->num_pages) {
3575 		dp_err("Multi page alloc fail for hw link desc pool");
3576 		return QDF_STATUS_E_FAULT;
3577 	}
3578 
3579 	wlan_minidump_log(pages->dma_pages->page_v_addr_start,
3580 			  pages->num_pages * pages->page_size,
3581 			  soc->ctrl_psoc,
3582 			  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3583 			  "hw_link_desc_bank");
3584 
3585 	return QDF_STATUS_SUCCESS;
3586 }
3587 
3588 /*
3589  * dp_hw_link_desc_ring_free() - Free h/w link desc rings
3590  * @soc: DP SOC handle
3591  *
3592  * Return: none
3593  */
3594 static void dp_hw_link_desc_ring_free(struct dp_soc *soc)
3595 {
3596 	uint32_t i;
3597 	uint32_t size = soc->wbm_idle_scatter_buf_size;
3598 	void *vaddr = soc->wbm_idle_link_ring.base_vaddr_unaligned;
3599 	qdf_dma_addr_t paddr;
3600 
3601 	if (soc->wbm_idle_scatter_buf_base_vaddr[0]) {
3602 		for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3603 			vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3604 			paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3605 			if (vaddr) {
3606 				qdf_mem_free_consistent(soc->osdev,
3607 							soc->osdev->dev,
3608 							size,
3609 							vaddr,
3610 							paddr,
3611 							0);
3612 				vaddr = NULL;
3613 			}
3614 		}
3615 	} else {
3616 		wlan_minidump_remove(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3617 				     soc->wbm_idle_link_ring.alloc_size,
3618 				     soc->ctrl_psoc,
3619 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3620 				     "wbm_idle_link_ring");
3621 		dp_srng_free(soc, &soc->wbm_idle_link_ring);
3622 	}
3623 }
3624 
3625 /*
3626  * dp_hw_link_desc_ring_alloc() - Allocate hw link desc rings
3627  * @soc: DP SOC handle
3628  *
3629  * Allocate memory for WBM_IDLE_LINK srng ring if the number of
3630  * link descriptors is less then the max_allocated size. else
3631  * allocate memory for wbm_idle_scatter_buffer.
3632  *
3633  * Return: QDF_STATUS_SUCCESS: success
3634  *         QDF_STATUS_E_NO_MEM: No memory (Failure)
3635  */
3636 static QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc)
3637 {
3638 	uint32_t entry_size, i;
3639 	uint32_t total_mem_size;
3640 	qdf_dma_addr_t *baseaddr = NULL;
3641 	struct dp_srng *dp_srng;
3642 	uint32_t ring_type;
3643 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3644 	uint32_t tlds;
3645 
3646 	ring_type = WBM_IDLE_LINK;
3647 	dp_srng = &soc->wbm_idle_link_ring;
3648 	tlds = soc->total_link_descs;
3649 
3650 	entry_size = hal_srng_get_entrysize(soc->hal_soc, ring_type);
3651 	total_mem_size = entry_size * tlds;
3652 
3653 	if (total_mem_size <= max_alloc_size) {
3654 		if (dp_srng_alloc(soc, dp_srng, ring_type, tlds, 0)) {
3655 			dp_init_err("%pK: Link desc idle ring setup failed",
3656 				    soc);
3657 			goto fail;
3658 		}
3659 
3660 		wlan_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3661 				  soc->wbm_idle_link_ring.alloc_size,
3662 				  soc->ctrl_psoc,
3663 				  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3664 				  "wbm_idle_link_ring");
3665 	} else {
3666 		uint32_t num_scatter_bufs;
3667 		uint32_t num_entries_per_buf;
3668 		uint32_t buf_size = 0;
3669 
3670 		soc->wbm_idle_scatter_buf_size =
3671 			hal_idle_list_scatter_buf_size(soc->hal_soc);
3672 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
3673 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
3674 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
3675 					soc->hal_soc, total_mem_size,
3676 					soc->wbm_idle_scatter_buf_size);
3677 
3678 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
3679 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3680 				  FL("scatter bufs size out of bounds"));
3681 			goto fail;
3682 		}
3683 
3684 		for (i = 0; i < num_scatter_bufs; i++) {
3685 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
3686 			buf_size = soc->wbm_idle_scatter_buf_size;
3687 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
3688 				qdf_mem_alloc_consistent(soc->osdev,
3689 							 soc->osdev->dev,
3690 							 buf_size,
3691 							 baseaddr);
3692 
3693 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
3694 				QDF_TRACE(QDF_MODULE_ID_DP,
3695 					  QDF_TRACE_LEVEL_ERROR,
3696 					  FL("Scatter lst memory alloc fail"));
3697 				goto fail;
3698 			}
3699 		}
3700 		soc->num_scatter_bufs = num_scatter_bufs;
3701 	}
3702 	return QDF_STATUS_SUCCESS;
3703 
3704 fail:
3705 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3706 		void *vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3707 		qdf_dma_addr_t paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3708 
3709 		if (vaddr) {
3710 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
3711 						soc->wbm_idle_scatter_buf_size,
3712 						vaddr,
3713 						paddr, 0);
3714 			vaddr = NULL;
3715 		}
3716 	}
3717 	return QDF_STATUS_E_NOMEM;
3718 }
3719 
3720 qdf_export_symbol(dp_hw_link_desc_pool_banks_alloc);
3721 
3722 /*
3723  * dp_hw_link_desc_ring_init() - Initialize hw link desc rings
3724  * @soc: DP SOC handle
3725  *
3726  * Return: QDF_STATUS_SUCCESS: success
3727  *         QDF_STATUS_E_FAILURE: failure
3728  */
3729 static QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc)
3730 {
3731 	struct dp_srng *dp_srng = &soc->wbm_idle_link_ring;
3732 
3733 	if (dp_srng->base_vaddr_unaligned) {
3734 		if (dp_srng_init(soc, dp_srng, WBM_IDLE_LINK, 0, 0))
3735 			return QDF_STATUS_E_FAILURE;
3736 	}
3737 	return QDF_STATUS_SUCCESS;
3738 }
3739 
3740 /*
3741  * dp_hw_link_desc_ring_deinit() - Reset hw link desc rings
3742  * @soc: DP SOC handle
3743  *
3744  * Return: None
3745  */
3746 static void dp_hw_link_desc_ring_deinit(struct dp_soc *soc)
3747 {
3748 	dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0);
3749 }
3750 
3751 /*
3752  * dp_hw_link_desc_ring_replenish() - Replenish hw link desc rings
3753  * @soc: DP SOC handle
3754  * @mac_id: mac id
3755  *
3756  * Return: None
3757  */
3758 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id)
3759 {
3760 	uint32_t cookie = 0;
3761 	uint32_t page_idx = 0;
3762 	struct qdf_mem_multi_page_t *pages;
3763 	struct qdf_mem_dma_page_t *dma_pages;
3764 	uint32_t offset = 0;
3765 	uint32_t count = 0;
3766 	uint32_t desc_id = 0;
3767 	void *desc_srng;
3768 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
3769 	uint32_t *total_link_descs_addr;
3770 	uint32_t total_link_descs;
3771 	uint32_t scatter_buf_num;
3772 	uint32_t num_entries_per_buf = 0;
3773 	uint32_t rem_entries;
3774 	uint32_t num_descs_per_page;
3775 	uint32_t num_scatter_bufs = 0;
3776 	uint8_t *scatter_buf_ptr;
3777 	void *desc;
3778 
3779 	num_scatter_bufs = soc->num_scatter_bufs;
3780 
3781 	if (mac_id == WLAN_INVALID_PDEV_ID) {
3782 		pages = &soc->link_desc_pages;
3783 		total_link_descs = soc->total_link_descs;
3784 		desc_srng = soc->wbm_idle_link_ring.hal_srng;
3785 	} else {
3786 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3787 		/* dp_monitor_get_link_desc_pages returns NULL only
3788 		 * if monitor SOC is  NULL
3789 		 */
3790 		if (!pages) {
3791 			dp_err("can not get link desc pages");
3792 			QDF_ASSERT(0);
3793 			return;
3794 		}
3795 		total_link_descs_addr =
3796 				dp_monitor_get_total_link_descs(soc, mac_id);
3797 		total_link_descs = *total_link_descs_addr;
3798 		desc_srng = soc->rxdma_mon_desc_ring[mac_id].hal_srng;
3799 	}
3800 
3801 	dma_pages = pages->dma_pages;
3802 	do {
3803 		qdf_mem_zero(dma_pages[page_idx].page_v_addr_start,
3804 			     pages->page_size);
3805 		page_idx++;
3806 	} while (page_idx < pages->num_pages);
3807 
3808 	if (desc_srng) {
3809 		hal_srng_access_start_unlocked(soc->hal_soc, desc_srng);
3810 		page_idx = 0;
3811 		count = 0;
3812 		offset = 0;
3813 		pages = &soc->link_desc_pages;
3814 		while ((desc = hal_srng_src_get_next(soc->hal_soc,
3815 						     desc_srng)) &&
3816 			(count < total_link_descs)) {
3817 			page_idx = count / pages->num_element_per_page;
3818 			if (desc_id == pages->num_element_per_page)
3819 				desc_id = 0;
3820 
3821 			offset = count % pages->num_element_per_page;
3822 			cookie = LINK_DESC_COOKIE(desc_id, page_idx,
3823 						  soc->link_desc_id_start);
3824 
3825 			hal_set_link_desc_addr(soc->hal_soc, desc, cookie,
3826 					       dma_pages[page_idx].page_p_addr
3827 					       + (offset * link_desc_size),
3828 					       soc->idle_link_bm_id);
3829 			count++;
3830 			desc_id++;
3831 		}
3832 		hal_srng_access_end_unlocked(soc->hal_soc, desc_srng);
3833 	} else {
3834 		/* Populate idle list scatter buffers with link descriptor
3835 		 * pointers
3836 		 */
3837 		scatter_buf_num = 0;
3838 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
3839 					soc->hal_soc,
3840 					soc->wbm_idle_scatter_buf_size);
3841 
3842 		scatter_buf_ptr = (uint8_t *)(
3843 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
3844 		rem_entries = num_entries_per_buf;
3845 		pages = &soc->link_desc_pages;
3846 		page_idx = 0; count = 0;
3847 		offset = 0;
3848 		num_descs_per_page = pages->num_element_per_page;
3849 
3850 		while (count < total_link_descs) {
3851 			page_idx = count / num_descs_per_page;
3852 			offset = count % num_descs_per_page;
3853 			if (desc_id == pages->num_element_per_page)
3854 				desc_id = 0;
3855 
3856 			cookie = LINK_DESC_COOKIE(desc_id, page_idx,
3857 						  soc->link_desc_id_start);
3858 			hal_set_link_desc_addr(soc->hal_soc,
3859 					       (void *)scatter_buf_ptr,
3860 					       cookie,
3861 					       dma_pages[page_idx].page_p_addr +
3862 					       (offset * link_desc_size),
3863 					       soc->idle_link_bm_id);
3864 			rem_entries--;
3865 			if (rem_entries) {
3866 				scatter_buf_ptr += link_desc_size;
3867 			} else {
3868 				rem_entries = num_entries_per_buf;
3869 				scatter_buf_num++;
3870 				if (scatter_buf_num >= num_scatter_bufs)
3871 					break;
3872 				scatter_buf_ptr = (uint8_t *)
3873 					(soc->wbm_idle_scatter_buf_base_vaddr[
3874 					 scatter_buf_num]);
3875 			}
3876 			count++;
3877 			desc_id++;
3878 		}
3879 		/* Setup link descriptor idle list in HW */
3880 		hal_setup_link_idle_list(soc->hal_soc,
3881 			soc->wbm_idle_scatter_buf_base_paddr,
3882 			soc->wbm_idle_scatter_buf_base_vaddr,
3883 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
3884 			(uint32_t)(scatter_buf_ptr -
3885 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
3886 			scatter_buf_num-1])), total_link_descs);
3887 	}
3888 }
3889 
3890 qdf_export_symbol(dp_link_desc_ring_replenish);
3891 
3892 #ifdef IPA_OFFLOAD
3893 #define USE_1_IPA_RX_REO_RING 1
3894 #define USE_2_IPA_RX_REO_RINGS 2
3895 #define REO_DST_RING_SIZE_QCA6290 1023
3896 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
3897 #define REO_DST_RING_SIZE_QCA8074 1023
3898 #define REO_DST_RING_SIZE_QCN9000 2048
3899 #else
3900 #define REO_DST_RING_SIZE_QCA8074 8
3901 #define REO_DST_RING_SIZE_QCN9000 8
3902 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
3903 
3904 #ifdef IPA_WDI3_TX_TWO_PIPES
3905 #ifdef DP_MEMORY_OPT
3906 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3907 {
3908 	return dp_init_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3909 }
3910 
3911 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3912 {
3913 	dp_deinit_tx_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3914 }
3915 
3916 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3917 {
3918 	return dp_alloc_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3919 }
3920 
3921 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3922 {
3923 	dp_free_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3924 }
3925 
3926 #else /* !DP_MEMORY_OPT */
3927 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3928 {
3929 	return 0;
3930 }
3931 
3932 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3933 {
3934 }
3935 
3936 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3937 {
3938 	return 0
3939 }
3940 
3941 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3942 {
3943 }
3944 #endif /* DP_MEMORY_OPT */
3945 
3946 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
3947 {
3948 	hal_tx_init_data_ring(soc->hal_soc,
3949 			      soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng);
3950 }
3951 
3952 #else /* !IPA_WDI3_TX_TWO_PIPES */
3953 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3954 {
3955 	return 0;
3956 }
3957 
3958 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3959 {
3960 }
3961 
3962 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3963 {
3964 	return 0;
3965 }
3966 
3967 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3968 {
3969 }
3970 
3971 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
3972 {
3973 }
3974 
3975 #endif /* IPA_WDI3_TX_TWO_PIPES */
3976 
3977 #else
3978 
3979 #define REO_DST_RING_SIZE_QCA6290 1024
3980 
3981 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3982 {
3983 	return 0;
3984 }
3985 
3986 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3987 {
3988 }
3989 
3990 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3991 {
3992 	return 0;
3993 }
3994 
3995 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3996 {
3997 }
3998 
3999 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
4000 {
4001 }
4002 
4003 #endif /* IPA_OFFLOAD */
4004 
4005 /*
4006  * dp_soc_reset_ring_map() - Reset cpu ring map
4007  * @soc: Datapath soc handler
4008  *
4009  * This api resets the default cpu ring map
4010  */
4011 
4012 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
4013 {
4014 	uint8_t i;
4015 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4016 
4017 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
4018 		switch (nss_config) {
4019 		case dp_nss_cfg_first_radio:
4020 			/*
4021 			 * Setting Tx ring map for one nss offloaded radio
4022 			 */
4023 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
4024 			break;
4025 
4026 		case dp_nss_cfg_second_radio:
4027 			/*
4028 			 * Setting Tx ring for two nss offloaded radios
4029 			 */
4030 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
4031 			break;
4032 
4033 		case dp_nss_cfg_dbdc:
4034 			/*
4035 			 * Setting Tx ring map for 2 nss offloaded radios
4036 			 */
4037 			soc->tx_ring_map[i] =
4038 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
4039 			break;
4040 
4041 		case dp_nss_cfg_dbtc:
4042 			/*
4043 			 * Setting Tx ring map for 3 nss offloaded radios
4044 			 */
4045 			soc->tx_ring_map[i] =
4046 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
4047 			break;
4048 
4049 		default:
4050 			dp_err("tx_ring_map failed due to invalid nss cfg");
4051 			break;
4052 		}
4053 	}
4054 }
4055 
4056 /*
4057  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
4058  * @dp_soc - DP soc handle
4059  * @ring_type - ring type
4060  * @ring_num - ring_num
4061  *
4062  * return 0 or 1
4063  */
4064 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
4065 {
4066 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4067 	uint8_t status = 0;
4068 
4069 	switch (ring_type) {
4070 	case WBM2SW_RELEASE:
4071 	case REO_DST:
4072 	case RXDMA_BUF:
4073 	case REO_EXCEPTION:
4074 		status = ((nss_config) & (1 << ring_num));
4075 		break;
4076 	default:
4077 		break;
4078 	}
4079 
4080 	return status;
4081 }
4082 
4083 /*
4084  * dp_soc_disable_unused_mac_intr_mask() - reset interrupt mask for
4085  *					  unused WMAC hw rings
4086  * @dp_soc - DP Soc handle
4087  * @mac_num - wmac num
4088  *
4089  * Return: Return void
4090  */
4091 static void dp_soc_disable_unused_mac_intr_mask(struct dp_soc *soc,
4092 						int mac_num)
4093 {
4094 	uint8_t *grp_mask = NULL;
4095 	int group_number;
4096 
4097 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
4098 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4099 	wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4100 					  group_number, 0x0);
4101 
4102 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
4103 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4104 	wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
4105 				      group_number, 0x0);
4106 
4107 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
4108 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4109 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
4110 					  group_number, 0x0);
4111 
4112 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
4113 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4114 	wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
4115 					      group_number, 0x0);
4116 }
4117 
4118 #ifdef IPA_OFFLOAD
4119 #ifdef IPA_WDI3_VLAN_SUPPORT
4120 /*
4121  * dp_soc_reset_ipa_vlan_intr_mask() - reset interrupt mask for IPA offloaded
4122  * ring for vlan tagged traffic
4123  * @dp_soc - DP Soc handle
4124  *
4125  * Return: Return void
4126  */
4127 static void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
4128 {
4129 	uint8_t *grp_mask = NULL;
4130 	int group_number, mask;
4131 
4132 	if (!wlan_ipa_is_vlan_enabled())
4133 		return;
4134 
4135 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
4136 
4137 	group_number = dp_srng_find_ring_in_mask(IPA_ALT_REO_DEST_RING_IDX, grp_mask);
4138 	if (group_number < 0) {
4139 		dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4140 			      soc, REO_DST, IPA_ALT_REO_DEST_RING_IDX);
4141 		return;
4142 	}
4143 
4144 	mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
4145 
4146 	/* reset the interrupt mask for offloaded ring */
4147 	mask &= (~(1 << IPA_ALT_REO_DEST_RING_IDX));
4148 
4149 	/*
4150 	 * set the interrupt mask to zero for rx offloaded radio.
4151 	 */
4152 	wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4153 }
4154 #else
4155 static inline
4156 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
4157 { }
4158 #endif /* IPA_WDI3_VLAN_SUPPORT */
4159 #else
4160 static inline
4161 void dp_soc_reset_ipa_vlan_intr_mask(struct dp_soc *soc)
4162 { }
4163 #endif /* IPA_OFFLOAD */
4164 
4165 /*
4166  * dp_soc_reset_intr_mask() - reset interrupt mask
4167  * @dp_soc - DP Soc handle
4168  *
4169  * Return: Return void
4170  */
4171 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
4172 {
4173 	uint8_t j;
4174 	uint8_t *grp_mask = NULL;
4175 	int group_number, mask, num_ring;
4176 
4177 	/* number of tx ring */
4178 	num_ring = soc->num_tcl_data_rings;
4179 
4180 	/*
4181 	 * group mask for tx completion  ring.
4182 	 */
4183 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
4184 
4185 	/* loop and reset the mask for only offloaded ring */
4186 	for (j = 0; j < WLAN_CFG_NUM_TCL_DATA_RINGS; j++) {
4187 		/*
4188 		 * Group number corresponding to tx offloaded ring.
4189 		 */
4190 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4191 		if (group_number < 0) {
4192 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4193 				      soc, WBM2SW_RELEASE, j);
4194 			continue;
4195 		}
4196 
4197 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
4198 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j) &&
4199 		    (!mask)) {
4200 			continue;
4201 		}
4202 
4203 		/* reset the tx mask for offloaded ring */
4204 		mask &= (~(1 << j));
4205 
4206 		/*
4207 		 * reset the interrupt mask for offloaded ring.
4208 		 */
4209 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4210 	}
4211 
4212 	/* number of rx rings */
4213 	num_ring = soc->num_reo_dest_rings;
4214 
4215 	/*
4216 	 * group mask for reo destination ring.
4217 	 */
4218 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
4219 
4220 	/* loop and reset the mask for only offloaded ring */
4221 	for (j = 0; j < WLAN_CFG_NUM_REO_DEST_RING; j++) {
4222 		/*
4223 		 * Group number corresponding to rx offloaded ring.
4224 		 */
4225 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4226 		if (group_number < 0) {
4227 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4228 				      soc, REO_DST, j);
4229 			continue;
4230 		}
4231 
4232 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
4233 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j) &&
4234 		    (!mask)) {
4235 			continue;
4236 		}
4237 
4238 		/* reset the interrupt mask for offloaded ring */
4239 		mask &= (~(1 << j));
4240 
4241 		/*
4242 		 * set the interrupt mask to zero for rx offloaded radio.
4243 		 */
4244 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4245 	}
4246 
4247 	/*
4248 	 * group mask for Rx buffer refill ring
4249 	 */
4250 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
4251 
4252 	/* loop and reset the mask for only offloaded ring */
4253 	for (j = 0; j < MAX_PDEV_CNT; j++) {
4254 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
4255 
4256 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
4257 			continue;
4258 		}
4259 
4260 		/*
4261 		 * Group number corresponding to rx offloaded ring.
4262 		 */
4263 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
4264 		if (group_number < 0) {
4265 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4266 				      soc, REO_DST, lmac_id);
4267 			continue;
4268 		}
4269 
4270 		/* set the interrupt mask for offloaded ring */
4271 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4272 				group_number);
4273 		mask &= (~(1 << lmac_id));
4274 
4275 		/*
4276 		 * set the interrupt mask to zero for rx offloaded radio.
4277 		 */
4278 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4279 			group_number, mask);
4280 	}
4281 
4282 	grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
4283 
4284 	for (j = 0; j < num_ring; j++) {
4285 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_EXCEPTION, j)) {
4286 			continue;
4287 		}
4288 
4289 		/*
4290 		 * Group number corresponding to rx err ring.
4291 		 */
4292 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4293 		if (group_number < 0) {
4294 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4295 				      soc, REO_EXCEPTION, j);
4296 			continue;
4297 		}
4298 
4299 		wlan_cfg_set_rx_err_ring_mask(soc->wlan_cfg_ctx,
4300 					      group_number, 0);
4301 	}
4302 }
4303 
4304 #ifdef IPA_OFFLOAD
4305 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
4306 			 uint32_t *remap1, uint32_t *remap2)
4307 {
4308 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX] = {
4309 				REO_REMAP_SW1, REO_REMAP_SW2, REO_REMAP_SW3,
4310 				REO_REMAP_SW5, REO_REMAP_SW6, REO_REMAP_SW7};
4311 
4312 	switch (soc->arch_id) {
4313 	case CDP_ARCH_TYPE_BE:
4314 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4315 					      soc->num_reo_dest_rings -
4316 					      USE_2_IPA_RX_REO_RINGS, remap1,
4317 					      remap2);
4318 		break;
4319 
4320 	case CDP_ARCH_TYPE_LI:
4321 		if (wlan_ipa_is_vlan_enabled()) {
4322 			hal_compute_reo_remap_ix2_ix3(
4323 					soc->hal_soc, ring,
4324 					soc->num_reo_dest_rings -
4325 					USE_2_IPA_RX_REO_RINGS, remap1,
4326 					remap2);
4327 
4328 		} else {
4329 			hal_compute_reo_remap_ix2_ix3(
4330 					soc->hal_soc, ring,
4331 					soc->num_reo_dest_rings -
4332 					USE_1_IPA_RX_REO_RING, remap1,
4333 					remap2);
4334 		}
4335 
4336 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
4337 		break;
4338 	default:
4339 		dp_err("unkonwn arch_id 0x%x", soc->arch_id);
4340 		QDF_BUG(0);
4341 
4342 	}
4343 
4344 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
4345 
4346 	return true;
4347 }
4348 
4349 #ifdef IPA_WDI3_TX_TWO_PIPES
4350 static bool dp_ipa_is_alt_tx_ring(int index)
4351 {
4352 	return index == IPA_TX_ALT_RING_IDX;
4353 }
4354 
4355 static bool dp_ipa_is_alt_tx_comp_ring(int index)
4356 {
4357 	return index == IPA_TX_ALT_COMP_RING_IDX;
4358 }
4359 #else /* !IPA_WDI3_TX_TWO_PIPES */
4360 static bool dp_ipa_is_alt_tx_ring(int index)
4361 {
4362 	return false;
4363 }
4364 
4365 static bool dp_ipa_is_alt_tx_comp_ring(int index)
4366 {
4367 	return false;
4368 }
4369 #endif /* IPA_WDI3_TX_TWO_PIPES */
4370 
4371 /**
4372  * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA
4373  *
4374  * @tx_ring_num: Tx ring number
4375  * @tx_ipa_ring_sz: Return param only updated for IPA.
4376  * @soc_cfg_ctx: dp soc cfg context
4377  *
4378  * Return: None
4379  */
4380 static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz,
4381 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4382 {
4383 	if (!soc_cfg_ctx->ipa_enabled)
4384 		return;
4385 
4386 	if (tx_ring_num == IPA_TCL_DATA_RING_IDX)
4387 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_ring_size(soc_cfg_ctx);
4388 	else if (dp_ipa_is_alt_tx_ring(tx_ring_num))
4389 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_alt_ring_size(soc_cfg_ctx);
4390 }
4391 
4392 /**
4393  * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA
4394  *
4395  * @tx_comp_ring_num: Tx comp ring number
4396  * @tx_comp_ipa_ring_sz: Return param only updated for IPA.
4397  * @soc_cfg_ctx: dp soc cfg context
4398  *
4399  * Return: None
4400  */
4401 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
4402 					 int *tx_comp_ipa_ring_sz,
4403 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4404 {
4405 	if (!soc_cfg_ctx->ipa_enabled)
4406 		return;
4407 
4408 	if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX)
4409 		*tx_comp_ipa_ring_sz =
4410 				wlan_cfg_ipa_tx_comp_ring_size(soc_cfg_ctx);
4411 	else if (dp_ipa_is_alt_tx_comp_ring(tx_comp_ring_num))
4412 		*tx_comp_ipa_ring_sz =
4413 				wlan_cfg_ipa_tx_alt_comp_ring_size(soc_cfg_ctx);
4414 }
4415 #else
4416 static uint8_t dp_reo_ring_selection(uint32_t value, uint32_t *ring)
4417 {
4418 	uint8_t num = 0;
4419 
4420 	switch (value) {
4421 	/* should we have all the different possible ring configs */
4422 	case 0xFF:
4423 		num = 8;
4424 		ring[0] = REO_REMAP_SW1;
4425 		ring[1] = REO_REMAP_SW2;
4426 		ring[2] = REO_REMAP_SW3;
4427 		ring[3] = REO_REMAP_SW4;
4428 		ring[4] = REO_REMAP_SW5;
4429 		ring[5] = REO_REMAP_SW6;
4430 		ring[6] = REO_REMAP_SW7;
4431 		ring[7] = REO_REMAP_SW8;
4432 		break;
4433 
4434 	case 0x3F:
4435 		num = 6;
4436 		ring[0] = REO_REMAP_SW1;
4437 		ring[1] = REO_REMAP_SW2;
4438 		ring[2] = REO_REMAP_SW3;
4439 		ring[3] = REO_REMAP_SW4;
4440 		ring[4] = REO_REMAP_SW5;
4441 		ring[5] = REO_REMAP_SW6;
4442 		break;
4443 
4444 	case 0xF:
4445 		num = 4;
4446 		ring[0] = REO_REMAP_SW1;
4447 		ring[1] = REO_REMAP_SW2;
4448 		ring[2] = REO_REMAP_SW3;
4449 		ring[3] = REO_REMAP_SW4;
4450 		break;
4451 	case 0xE:
4452 		num = 3;
4453 		ring[0] = REO_REMAP_SW2;
4454 		ring[1] = REO_REMAP_SW3;
4455 		ring[2] = REO_REMAP_SW4;
4456 		break;
4457 	case 0xD:
4458 		num = 3;
4459 		ring[0] = REO_REMAP_SW1;
4460 		ring[1] = REO_REMAP_SW3;
4461 		ring[2] = REO_REMAP_SW4;
4462 		break;
4463 	case 0xC:
4464 		num = 2;
4465 		ring[0] = REO_REMAP_SW3;
4466 		ring[1] = REO_REMAP_SW4;
4467 		break;
4468 	case 0xB:
4469 		num = 3;
4470 		ring[0] = REO_REMAP_SW1;
4471 		ring[1] = REO_REMAP_SW2;
4472 		ring[2] = REO_REMAP_SW4;
4473 		break;
4474 	case 0xA:
4475 		num = 2;
4476 		ring[0] = REO_REMAP_SW2;
4477 		ring[1] = REO_REMAP_SW4;
4478 		break;
4479 	case 0x9:
4480 		num = 2;
4481 		ring[0] = REO_REMAP_SW1;
4482 		ring[1] = REO_REMAP_SW4;
4483 		break;
4484 	case 0x8:
4485 		num = 1;
4486 		ring[0] = REO_REMAP_SW4;
4487 		break;
4488 	case 0x7:
4489 		num = 3;
4490 		ring[0] = REO_REMAP_SW1;
4491 		ring[1] = REO_REMAP_SW2;
4492 		ring[2] = REO_REMAP_SW3;
4493 		break;
4494 	case 0x6:
4495 		num = 2;
4496 		ring[0] = REO_REMAP_SW2;
4497 		ring[1] = REO_REMAP_SW3;
4498 		break;
4499 	case 0x5:
4500 		num = 2;
4501 		ring[0] = REO_REMAP_SW1;
4502 		ring[1] = REO_REMAP_SW3;
4503 		break;
4504 	case 0x4:
4505 		num = 1;
4506 		ring[0] = REO_REMAP_SW3;
4507 		break;
4508 	case 0x3:
4509 		num = 2;
4510 		ring[0] = REO_REMAP_SW1;
4511 		ring[1] = REO_REMAP_SW2;
4512 		break;
4513 	case 0x2:
4514 		num = 1;
4515 		ring[0] = REO_REMAP_SW2;
4516 		break;
4517 	case 0x1:
4518 		num = 1;
4519 		ring[0] = REO_REMAP_SW1;
4520 		break;
4521 	default:
4522 		dp_err("unkonwn reo ring map 0x%x", value);
4523 		QDF_BUG(0);
4524 	}
4525 	return num;
4526 }
4527 
4528 bool dp_reo_remap_config(struct dp_soc *soc,
4529 			 uint32_t *remap0,
4530 			 uint32_t *remap1,
4531 			 uint32_t *remap2)
4532 {
4533 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4534 	uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
4535 	uint8_t target_type, num;
4536 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX];
4537 	uint32_t value;
4538 
4539 	target_type = hal_get_target_type(soc->hal_soc);
4540 
4541 	switch (offload_radio) {
4542 	case dp_nss_cfg_default:
4543 		value = reo_config & WLAN_CFG_NUM_REO_RINGS_MAP_MAX;
4544 		num = dp_reo_ring_selection(value, ring);
4545 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4546 					      num, remap1, remap2);
4547 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
4548 
4549 		break;
4550 	case dp_nss_cfg_first_radio:
4551 		value = reo_config & 0xE;
4552 		num = dp_reo_ring_selection(value, ring);
4553 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4554 					      num, remap1, remap2);
4555 
4556 		break;
4557 	case dp_nss_cfg_second_radio:
4558 		value = reo_config & 0xD;
4559 		num = dp_reo_ring_selection(value, ring);
4560 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4561 					      num, remap1, remap2);
4562 
4563 		break;
4564 	case dp_nss_cfg_dbdc:
4565 	case dp_nss_cfg_dbtc:
4566 		/* return false if both or all are offloaded to NSS */
4567 		return false;
4568 
4569 	}
4570 
4571 	dp_debug("remap1 %x remap2 %x offload_radio %u",
4572 		 *remap1, *remap2, offload_radio);
4573 	return true;
4574 }
4575 
4576 static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz,
4577 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4578 {
4579 }
4580 
4581 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
4582 					 int *tx_comp_ipa_ring_sz,
4583 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4584 {
4585 }
4586 #endif /* IPA_OFFLOAD */
4587 
4588 /*
4589  * dp_reo_frag_dst_set() - configure reo register to set the
4590  *                        fragment destination ring
4591  * @soc : Datapath soc
4592  * @frag_dst_ring : output parameter to set fragment destination ring
4593  *
4594  * Based on offload_radio below fragment destination rings is selected
4595  * 0 - TCL
4596  * 1 - SW1
4597  * 2 - SW2
4598  * 3 - SW3
4599  * 4 - SW4
4600  * 5 - Release
4601  * 6 - FW
4602  * 7 - alternate select
4603  *
4604  * return: void
4605  */
4606 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
4607 {
4608 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4609 
4610 	switch (offload_radio) {
4611 	case dp_nss_cfg_default:
4612 		*frag_dst_ring = REO_REMAP_TCL;
4613 		break;
4614 	case dp_nss_cfg_first_radio:
4615 		/*
4616 		 * This configuration is valid for single band radio which
4617 		 * is also NSS offload.
4618 		 */
4619 	case dp_nss_cfg_dbdc:
4620 	case dp_nss_cfg_dbtc:
4621 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
4622 		break;
4623 	default:
4624 		dp_init_err("%pK: dp_reo_frag_dst_set invalid offload radio config", soc);
4625 		break;
4626 	}
4627 }
4628 
4629 #ifdef ENABLE_VERBOSE_DEBUG
4630 static void dp_enable_verbose_debug(struct dp_soc *soc)
4631 {
4632 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4633 
4634 	soc_cfg_ctx = soc->wlan_cfg_ctx;
4635 
4636 	if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask)
4637 		is_dp_verbose_debug_enabled = true;
4638 
4639 	if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask)
4640 		hal_set_verbose_debug(true);
4641 	else
4642 		hal_set_verbose_debug(false);
4643 }
4644 #else
4645 static void dp_enable_verbose_debug(struct dp_soc *soc)
4646 {
4647 }
4648 #endif
4649 
4650 #ifdef WLAN_FEATURE_STATS_EXT
4651 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
4652 {
4653 	qdf_event_create(&soc->rx_hw_stats_event);
4654 }
4655 #else
4656 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
4657 {
4658 }
4659 #endif
4660 
4661 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index)
4662 {
4663 	int tcl_ring_num, wbm_ring_num;
4664 
4665 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
4666 						index,
4667 						&tcl_ring_num,
4668 						&wbm_ring_num);
4669 
4670 	if (tcl_ring_num == -1) {
4671 		dp_err("incorrect tcl ring num for index %u", index);
4672 		return;
4673 	}
4674 
4675 	wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned,
4676 			     soc->tcl_data_ring[index].alloc_size,
4677 			     soc->ctrl_psoc,
4678 			     WLAN_MD_DP_SRNG_TCL_DATA,
4679 			     "tcl_data_ring");
4680 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
4681 	dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA,
4682 		       tcl_ring_num);
4683 
4684 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
4685 		return;
4686 
4687 	wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned,
4688 			     soc->tx_comp_ring[index].alloc_size,
4689 			     soc->ctrl_psoc,
4690 			     WLAN_MD_DP_SRNG_TX_COMP,
4691 			     "tcl_comp_ring");
4692 	dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4693 		       wbm_ring_num);
4694 }
4695 
4696 /**
4697  * dp_init_tx_ring_pair_by_index() - The function inits tcl data/wbm completion
4698  * ring pair
4699  * @soc: DP soc pointer
4700  * @index: index of soc->tcl_data or soc->tx_comp to initialize
4701  *
4702  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
4703  */
4704 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
4705 						uint8_t index)
4706 {
4707 	int tcl_ring_num, wbm_ring_num;
4708 	uint8_t bm_id;
4709 
4710 	if (index >= MAX_TCL_DATA_RINGS) {
4711 		dp_err("unexpected index!");
4712 		QDF_BUG(0);
4713 		goto fail1;
4714 	}
4715 
4716 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
4717 						index,
4718 						&tcl_ring_num,
4719 						&wbm_ring_num);
4720 
4721 	if (tcl_ring_num == -1) {
4722 		dp_err("incorrect tcl ring num for index %u", index);
4723 		goto fail1;
4724 	}
4725 
4726 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
4727 	if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA,
4728 			 tcl_ring_num, 0)) {
4729 		dp_err("dp_srng_init failed for tcl_data_ring");
4730 		goto fail1;
4731 	}
4732 	wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned,
4733 			  soc->tcl_data_ring[index].alloc_size,
4734 			  soc->ctrl_psoc,
4735 			  WLAN_MD_DP_SRNG_TCL_DATA,
4736 			  "tcl_data_ring");
4737 
4738 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
4739 		goto set_rbm;
4740 
4741 	if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4742 			 wbm_ring_num, 0)) {
4743 		dp_err("dp_srng_init failed for tx_comp_ring");
4744 		goto fail1;
4745 	}
4746 
4747 	wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned,
4748 			  soc->tx_comp_ring[index].alloc_size,
4749 			  soc->ctrl_psoc,
4750 			  WLAN_MD_DP_SRNG_TX_COMP,
4751 			  "tcl_comp_ring");
4752 set_rbm:
4753 	bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_ring_num);
4754 
4755 	soc->arch_ops.tx_implicit_rbm_set(soc, tcl_ring_num, bm_id);
4756 
4757 	return QDF_STATUS_SUCCESS;
4758 
4759 fail1:
4760 	return QDF_STATUS_E_FAILURE;
4761 }
4762 
4763 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index)
4764 {
4765 	dp_debug("index %u", index);
4766 	dp_srng_free(soc, &soc->tcl_data_ring[index]);
4767 	dp_srng_free(soc, &soc->tx_comp_ring[index]);
4768 }
4769 
4770 /**
4771  * dp_alloc_tx_ring_pair_by_index() - The function allocs tcl data/wbm2sw
4772  * ring pair for the given "index"
4773  * @soc: DP soc pointer
4774  * @index: index of soc->tcl_data or soc->tx_comp to initialize
4775  *
4776  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
4777  */
4778 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
4779 						 uint8_t index)
4780 {
4781 	int tx_ring_size;
4782 	int tx_comp_ring_size;
4783 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
4784 	int cached = 0;
4785 
4786 	if (index >= MAX_TCL_DATA_RINGS) {
4787 		dp_err("unexpected index!");
4788 		QDF_BUG(0);
4789 		goto fail1;
4790 	}
4791 
4792 	dp_debug("index %u", index);
4793 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
4794 	dp_ipa_get_tx_ring_size(index, &tx_ring_size, soc_cfg_ctx);
4795 
4796 	if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA,
4797 			  tx_ring_size, cached)) {
4798 		dp_err("dp_srng_alloc failed for tcl_data_ring");
4799 		goto fail1;
4800 	}
4801 
4802 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
4803 	dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size, soc_cfg_ctx);
4804 	/* Enable cached TCL desc if NSS offload is disabled */
4805 	if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
4806 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
4807 
4808 	if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) ==
4809 	    INVALID_WBM_RING_NUM)
4810 		return QDF_STATUS_SUCCESS;
4811 
4812 	if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4813 			  tx_comp_ring_size, cached)) {
4814 		dp_err("dp_srng_alloc failed for tx_comp_ring");
4815 		goto fail1;
4816 	}
4817 
4818 	return QDF_STATUS_SUCCESS;
4819 
4820 fail1:
4821 	return QDF_STATUS_E_FAILURE;
4822 }
4823 
4824 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
4825 {
4826 	struct cdp_lro_hash_config lro_hash;
4827 	QDF_STATUS status;
4828 
4829 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
4830 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
4831 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
4832 		dp_err("LRO, GRO and RX hash disabled");
4833 		return QDF_STATUS_E_FAILURE;
4834 	}
4835 
4836 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
4837 
4838 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
4839 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
4840 		lro_hash.lro_enable = 1;
4841 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
4842 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
4843 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
4844 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
4845 	}
4846 
4847 	soc->arch_ops.get_rx_hash_key(soc, &lro_hash);
4848 
4849 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
4850 
4851 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
4852 		QDF_BUG(0);
4853 		dp_err("lro_hash_config not configured");
4854 		return QDF_STATUS_E_FAILURE;
4855 	}
4856 
4857 	status = soc->cdp_soc.ol_ops->lro_hash_config(soc->ctrl_psoc,
4858 						      pdev->pdev_id,
4859 						      &lro_hash);
4860 	if (!QDF_IS_STATUS_SUCCESS(status)) {
4861 		dp_err("failed to send lro_hash_config to FW %u", status);
4862 		return status;
4863 	}
4864 
4865 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
4866 		lro_hash.lro_enable, lro_hash.tcp_flag,
4867 		lro_hash.tcp_flag_mask);
4868 
4869 	dp_info("toeplitz_hash_ipv4:");
4870 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4871 			   lro_hash.toeplitz_hash_ipv4,
4872 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
4873 			   LRO_IPV4_SEED_ARR_SZ));
4874 
4875 	dp_info("toeplitz_hash_ipv6:");
4876 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4877 			   lro_hash.toeplitz_hash_ipv6,
4878 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
4879 			   LRO_IPV6_SEED_ARR_SZ));
4880 
4881 	return status;
4882 }
4883 
4884 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
4885 /*
4886  * dp_reap_timer_init() - initialize the reap timer
4887  * @soc: data path SoC handle
4888  *
4889  * Return: void
4890  */
4891 static void dp_reap_timer_init(struct dp_soc *soc)
4892 {
4893 	/*
4894 	 * Timer to reap rxdma status rings.
4895 	 * Needed until we enable ppdu end interrupts
4896 	 */
4897 	dp_monitor_reap_timer_init(soc);
4898 	dp_monitor_vdev_timer_init(soc);
4899 }
4900 
4901 /*
4902  * dp_reap_timer_deinit() - de-initialize the reap timer
4903  * @soc: data path SoC handle
4904  *
4905  * Return: void
4906  */
4907 static void dp_reap_timer_deinit(struct dp_soc *soc)
4908 {
4909 	dp_monitor_reap_timer_deinit(soc);
4910 }
4911 #else
4912 /* WIN use case */
4913 static void dp_reap_timer_init(struct dp_soc *soc)
4914 {
4915 	/* Configure LMAC rings in Polled mode */
4916 	if (soc->lmac_polled_mode) {
4917 		/*
4918 		 * Timer to reap lmac rings.
4919 		 */
4920 		qdf_timer_init(soc->osdev, &soc->lmac_reap_timer,
4921 			       dp_service_lmac_rings, (void *)soc,
4922 			       QDF_TIMER_TYPE_WAKE_APPS);
4923 		soc->lmac_timer_init = 1;
4924 		qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
4925 	}
4926 }
4927 
4928 static void dp_reap_timer_deinit(struct dp_soc *soc)
4929 {
4930 	if (soc->lmac_timer_init) {
4931 		qdf_timer_stop(&soc->lmac_reap_timer);
4932 		qdf_timer_free(&soc->lmac_reap_timer);
4933 		soc->lmac_timer_init = 0;
4934 	}
4935 }
4936 #endif
4937 
4938 #ifdef QCA_HOST2FW_RXBUF_RING
4939 /*
4940  * dp_rxdma_ring_alloc() - allocate the RXDMA rings
4941  * @soc: data path SoC handle
4942  * @pdev: Physical device handle
4943  *
4944  * Return: 0 - success, > 0 - failure
4945  */
4946 static int dp_rxdma_ring_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
4947 {
4948 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
4949 	int max_mac_rings;
4950 	int i;
4951 	int ring_size;
4952 
4953 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
4954 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
4955 	ring_size =  wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx);
4956 
4957 	for (i = 0; i < max_mac_rings; i++) {
4958 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
4959 		if (dp_srng_alloc(soc, &pdev->rx_mac_buf_ring[i],
4960 				  RXDMA_BUF, ring_size, 0)) {
4961 			dp_init_err("%pK: failed rx mac ring setup", soc);
4962 			return QDF_STATUS_E_FAILURE;
4963 		}
4964 	}
4965 	return QDF_STATUS_SUCCESS;
4966 }
4967 
4968 /*
4969  * dp_rxdma_ring_setup() - configure the RXDMA rings
4970  * @soc: data path SoC handle
4971  * @pdev: Physical device handle
4972  *
4973  * Return: 0 - success, > 0 - failure
4974  */
4975 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
4976 {
4977 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
4978 	int max_mac_rings;
4979 	int i;
4980 
4981 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
4982 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
4983 
4984 	for (i = 0; i < max_mac_rings; i++) {
4985 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
4986 		if (dp_srng_init(soc, &pdev->rx_mac_buf_ring[i],
4987 				 RXDMA_BUF, 1, i)) {
4988 			dp_init_err("%pK: failed rx mac ring setup", soc);
4989 			return QDF_STATUS_E_FAILURE;
4990 		}
4991 	}
4992 	return QDF_STATUS_SUCCESS;
4993 }
4994 
4995 /*
4996  * dp_rxdma_ring_cleanup() - Deinit the RXDMA rings and reap timer
4997  * @soc: data path SoC handle
4998  * @pdev: Physical device handle
4999  *
5000  * Return: void
5001  */
5002 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
5003 {
5004 	int i;
5005 
5006 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
5007 		dp_srng_deinit(soc, &pdev->rx_mac_buf_ring[i], RXDMA_BUF, 1);
5008 
5009 	dp_reap_timer_deinit(soc);
5010 }
5011 
5012 /*
5013  * dp_rxdma_ring_free() - Free the RXDMA rings
5014  * @pdev: Physical device handle
5015  *
5016  * Return: void
5017  */
5018 static void dp_rxdma_ring_free(struct dp_pdev *pdev)
5019 {
5020 	int i;
5021 
5022 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
5023 		dp_srng_free(pdev->soc, &pdev->rx_mac_buf_ring[i]);
5024 }
5025 
5026 #else
5027 static int dp_rxdma_ring_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
5028 {
5029 	return QDF_STATUS_SUCCESS;
5030 }
5031 
5032 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
5033 {
5034 	return QDF_STATUS_SUCCESS;
5035 }
5036 
5037 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
5038 {
5039 	dp_reap_timer_deinit(soc);
5040 }
5041 
5042 static void dp_rxdma_ring_free(struct dp_pdev *pdev)
5043 {
5044 }
5045 #endif
5046 
5047 /**
5048  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
5049  * @pdev - DP_PDEV handle
5050  *
5051  * Return: void
5052  */
5053 static inline void
5054 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
5055 {
5056 	uint8_t map_id;
5057 	struct dp_soc *soc = pdev->soc;
5058 
5059 	if (!soc)
5060 		return;
5061 
5062 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
5063 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
5064 			     default_dscp_tid_map,
5065 			     sizeof(default_dscp_tid_map));
5066 	}
5067 
5068 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
5069 		hal_tx_set_dscp_tid_map(soc->hal_soc,
5070 					default_dscp_tid_map,
5071 					map_id);
5072 	}
5073 }
5074 
5075 /**
5076  * dp_pcp_tid_map_setup(): Initialize the pcp-tid maps
5077  * @pdev - DP_PDEV handle
5078  *
5079  * Return: void
5080  */
5081 static inline void
5082 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
5083 {
5084 	struct dp_soc *soc = pdev->soc;
5085 
5086 	if (!soc)
5087 		return;
5088 
5089 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
5090 		     sizeof(default_pcp_tid_map));
5091 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
5092 }
5093 
5094 #ifdef IPA_OFFLOAD
5095 /**
5096  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
5097  * @soc: data path instance
5098  * @pdev: core txrx pdev context
5099  *
5100  * Return: QDF_STATUS_SUCCESS: success
5101  *         QDF_STATUS_E_RESOURCES: Error return
5102  */
5103 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5104 					   struct dp_pdev *pdev)
5105 {
5106 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5107 	int entries;
5108 
5109 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5110 		soc_cfg_ctx = soc->wlan_cfg_ctx;
5111 		entries =
5112 			wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
5113 
5114 		/* Setup second Rx refill buffer ring */
5115 		if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
5116 				  entries, 0)) {
5117 			dp_init_err("%pK: dp_srng_alloc failed second"
5118 				    "rx refill ring", soc);
5119 			return QDF_STATUS_E_FAILURE;
5120 		}
5121 	}
5122 
5123 	return QDF_STATUS_SUCCESS;
5124 }
5125 
5126 #ifdef IPA_WDI3_VLAN_SUPPORT
5127 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5128 					       struct dp_pdev *pdev)
5129 {
5130 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5131 	int entries;
5132 
5133 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5134 	    wlan_ipa_is_vlan_enabled()) {
5135 		soc_cfg_ctx = soc->wlan_cfg_ctx;
5136 		entries =
5137 			wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
5138 
5139 		/* Setup second Rx refill buffer ring */
5140 		if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF,
5141 				  entries, 0)) {
5142 			dp_init_err("%pK: alloc failed for 3rd rx refill ring",
5143 				    soc);
5144 			return QDF_STATUS_E_FAILURE;
5145 		}
5146 	}
5147 
5148 	return QDF_STATUS_SUCCESS;
5149 }
5150 
5151 static int dp_init_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5152 					      struct dp_pdev *pdev)
5153 {
5154 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5155 	    wlan_ipa_is_vlan_enabled()) {
5156 		if (dp_srng_init(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF,
5157 				 IPA_RX_ALT_REFILL_BUF_RING_IDX,
5158 				 pdev->pdev_id)) {
5159 			dp_init_err("%pK: init failed for 3rd rx refill ring",
5160 				    soc);
5161 			return QDF_STATUS_E_FAILURE;
5162 		}
5163 	}
5164 
5165 	return QDF_STATUS_SUCCESS;
5166 }
5167 
5168 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5169 						 struct dp_pdev *pdev)
5170 {
5171 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5172 	    wlan_ipa_is_vlan_enabled())
5173 		dp_srng_deinit(soc, &pdev->rx_refill_buf_ring3, RXDMA_BUF, 0);
5174 }
5175 
5176 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5177 					       struct dp_pdev *pdev)
5178 {
5179 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) &&
5180 	    wlan_ipa_is_vlan_enabled())
5181 		dp_srng_free(soc, &pdev->rx_refill_buf_ring3);
5182 }
5183 #else
5184 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5185 					       struct dp_pdev *pdev)
5186 {
5187 	return QDF_STATUS_SUCCESS;
5188 }
5189 
5190 static int dp_init_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5191 					      struct dp_pdev *pdev)
5192 {
5193 	return QDF_STATUS_SUCCESS;
5194 }
5195 
5196 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5197 						 struct dp_pdev *pdev)
5198 {
5199 }
5200 
5201 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5202 					       struct dp_pdev *pdev)
5203 {
5204 }
5205 #endif
5206 
5207 /**
5208  * dp_deinit_ipa_rx_refill_buf_ring - deinit second Rx refill buffer ring
5209  * @soc: data path instance
5210  * @pdev: core txrx pdev context
5211  *
5212  * Return: void
5213  */
5214 static void dp_deinit_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5215 					     struct dp_pdev *pdev)
5216 {
5217 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
5218 		dp_srng_deinit(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 0);
5219 }
5220 
5221 /**
5222  * dp_init_ipa_rx_refill_buf_ring - Init second Rx refill buffer ring
5223  * @soc: data path instance
5224  * @pdev: core txrx pdev context
5225  *
5226  * Return: QDF_STATUS_SUCCESS: success
5227  *         QDF_STATUS_E_RESOURCES: Error return
5228  */
5229 static int dp_init_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5230 					  struct dp_pdev *pdev)
5231 {
5232 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5233 		if (dp_srng_init(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
5234 				 IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id)) {
5235 			dp_init_err("%pK: dp_srng_init failed second"
5236 				    "rx refill ring", soc);
5237 			return QDF_STATUS_E_FAILURE;
5238 		}
5239 	}
5240 
5241 	if (dp_init_ipa_rx_alt_refill_buf_ring(soc, pdev)) {
5242 		dp_deinit_ipa_rx_refill_buf_ring(soc, pdev);
5243 		return QDF_STATUS_E_FAILURE;
5244 	}
5245 
5246 	return QDF_STATUS_SUCCESS;
5247 }
5248 
5249 /**
5250  * dp_free_ipa_rx_refill_buf_ring - free second Rx refill buffer ring
5251  * @soc: data path instance
5252  * @pdev: core txrx pdev context
5253  *
5254  * Return: void
5255  */
5256 static void dp_free_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5257 					   struct dp_pdev *pdev)
5258 {
5259 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
5260 		dp_srng_free(soc, &pdev->rx_refill_buf_ring2);
5261 }
5262 #else
5263 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5264 					   struct dp_pdev *pdev)
5265 {
5266 	return QDF_STATUS_SUCCESS;
5267 }
5268 
5269 static int dp_init_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5270 					  struct dp_pdev *pdev)
5271 {
5272 	return QDF_STATUS_SUCCESS;
5273 }
5274 
5275 static void dp_deinit_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5276 					     struct dp_pdev *pdev)
5277 {
5278 }
5279 
5280 static void dp_free_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5281 					   struct dp_pdev *pdev)
5282 {
5283 }
5284 
5285 static int dp_setup_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5286 					       struct dp_pdev *pdev)
5287 {
5288 	return QDF_STATUS_SUCCESS;
5289 }
5290 
5291 static void dp_deinit_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5292 						 struct dp_pdev *pdev)
5293 {
5294 }
5295 
5296 static void dp_free_ipa_rx_alt_refill_buf_ring(struct dp_soc *soc,
5297 					       struct dp_pdev *pdev)
5298 {
5299 }
5300 #endif
5301 
5302 #ifdef DP_TX_HW_DESC_HISTORY
5303 /**
5304  * dp_soc_tx_hw_desc_history_attach - Attach TX HW descriptor history
5305  *
5306  * @soc: DP soc handle
5307  *
5308  * Return: None
5309  */
5310 static void dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
5311 {
5312 	soc->tx_hw_desc_history = dp_context_alloc_mem(
5313 			soc, DP_TX_HW_DESC_HIST_TYPE,
5314 			sizeof(*soc->tx_hw_desc_history));
5315 	if (soc->tx_hw_desc_history)
5316 		soc->tx_hw_desc_history->index = 0;
5317 }
5318 
5319 static void dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
5320 {
5321 	dp_context_free_mem(soc, DP_TX_HW_DESC_HIST_TYPE,
5322 			    soc->tx_hw_desc_history);
5323 }
5324 
5325 #else /* DP_TX_HW_DESC_HISTORY */
5326 static inline void
5327 dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
5328 {
5329 }
5330 
5331 static inline void
5332 dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
5333 {
5334 }
5335 #endif /* DP_TX_HW_DESC_HISTORY */
5336 
5337 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
5338 #ifndef RX_DEFRAG_DO_NOT_REINJECT
5339 /**
5340  * dp_soc_rx_reinject_ring_history_attach - Attach the reo reinject ring
5341  *					    history.
5342  * @soc: DP soc handle
5343  *
5344  * Return: None
5345  */
5346 static void dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
5347 {
5348 	soc->rx_reinject_ring_history =
5349 		dp_context_alloc_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
5350 				     sizeof(struct dp_rx_reinject_history));
5351 	if (soc->rx_reinject_ring_history)
5352 		qdf_atomic_init(&soc->rx_reinject_ring_history->index);
5353 }
5354 #else /* RX_DEFRAG_DO_NOT_REINJECT */
5355 static inline void
5356 dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
5357 {
5358 }
5359 #endif /* RX_DEFRAG_DO_NOT_REINJECT */
5360 
5361 /**
5362  * dp_soc_rx_history_attach() - Attach the ring history record buffers
5363  * @soc: DP soc structure
5364  *
5365  * This function allocates the memory for recording the rx ring, rx error
5366  * ring and the reinject ring entries. There is no error returned in case
5367  * of allocation failure since the record function checks if the history is
5368  * initialized or not. We do not want to fail the driver load in case of
5369  * failure to allocate memory for debug history.
5370  *
5371  * Returns: None
5372  */
5373 static void dp_soc_rx_history_attach(struct dp_soc *soc)
5374 {
5375 	int i;
5376 	uint32_t rx_ring_hist_size;
5377 	uint32_t rx_refill_ring_hist_size;
5378 
5379 	rx_ring_hist_size = sizeof(*soc->rx_ring_history[0]);
5380 	rx_refill_ring_hist_size = sizeof(*soc->rx_refill_ring_history[0]);
5381 
5382 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5383 		soc->rx_ring_history[i] = dp_context_alloc_mem(
5384 				soc, DP_RX_RING_HIST_TYPE, rx_ring_hist_size);
5385 		if (soc->rx_ring_history[i])
5386 			qdf_atomic_init(&soc->rx_ring_history[i]->index);
5387 	}
5388 
5389 	soc->rx_err_ring_history = dp_context_alloc_mem(
5390 			soc, DP_RX_ERR_RING_HIST_TYPE, rx_ring_hist_size);
5391 	if (soc->rx_err_ring_history)
5392 		qdf_atomic_init(&soc->rx_err_ring_history->index);
5393 
5394 	dp_soc_rx_reinject_ring_history_attach(soc);
5395 
5396 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5397 		soc->rx_refill_ring_history[i] = dp_context_alloc_mem(
5398 						soc,
5399 						DP_RX_REFILL_RING_HIST_TYPE,
5400 						rx_refill_ring_hist_size);
5401 
5402 		if (soc->rx_refill_ring_history[i])
5403 			qdf_atomic_init(&soc->rx_refill_ring_history[i]->index);
5404 	}
5405 }
5406 
5407 static void dp_soc_rx_history_detach(struct dp_soc *soc)
5408 {
5409 	int i;
5410 
5411 	for (i = 0; i < MAX_REO_DEST_RINGS; i++)
5412 		dp_context_free_mem(soc, DP_RX_RING_HIST_TYPE,
5413 				    soc->rx_ring_history[i]);
5414 
5415 	dp_context_free_mem(soc, DP_RX_ERR_RING_HIST_TYPE,
5416 			    soc->rx_err_ring_history);
5417 
5418 	/*
5419 	 * No need for a featurized detach since qdf_mem_free takes
5420 	 * care of NULL pointer.
5421 	 */
5422 	dp_context_free_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
5423 			    soc->rx_reinject_ring_history);
5424 
5425 	for (i = 0; i < MAX_PDEV_CNT; i++)
5426 		dp_context_free_mem(soc, DP_RX_REFILL_RING_HIST_TYPE,
5427 				    soc->rx_refill_ring_history[i]);
5428 }
5429 
5430 #else
5431 static inline void dp_soc_rx_history_attach(struct dp_soc *soc)
5432 {
5433 }
5434 
5435 static inline void dp_soc_rx_history_detach(struct dp_soc *soc)
5436 {
5437 }
5438 #endif
5439 
5440 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY
5441 /**
5442  * dp_soc_mon_status_ring_history_attach() - Attach the monitor status
5443  *					     buffer record history.
5444  * @soc: DP soc handle
5445  *
5446  * This function allocates memory to track the event for a monitor
5447  * status buffer, before its parsed and freed.
5448  *
5449  * Return: None
5450  */
5451 static void dp_soc_mon_status_ring_history_attach(struct dp_soc *soc)
5452 {
5453 	soc->mon_status_ring_history = dp_context_alloc_mem(soc,
5454 				DP_MON_STATUS_BUF_HIST_TYPE,
5455 				sizeof(struct dp_mon_status_ring_history));
5456 	if (!soc->mon_status_ring_history) {
5457 		dp_err("Failed to alloc memory for mon status ring history");
5458 		return;
5459 	}
5460 }
5461 
5462 /**
5463  * dp_soc_mon_status_ring_history_detach() - Detach the monitor status buffer
5464  *					     record history.
5465  * @soc: DP soc handle
5466  *
5467  * Return: None
5468  */
5469 static void dp_soc_mon_status_ring_history_detach(struct dp_soc *soc)
5470 {
5471 	dp_context_free_mem(soc, DP_MON_STATUS_BUF_HIST_TYPE,
5472 			    soc->mon_status_ring_history);
5473 }
5474 #else
5475 static void dp_soc_mon_status_ring_history_attach(struct dp_soc *soc)
5476 {
5477 }
5478 
5479 static void dp_soc_mon_status_ring_history_detach(struct dp_soc *soc)
5480 {
5481 }
5482 #endif
5483 
5484 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
5485 /**
5486  * dp_soc_tx_history_attach() - Attach the ring history record buffers
5487  * @soc: DP soc structure
5488  *
5489  * This function allocates the memory for recording the tx tcl ring and
5490  * the tx comp ring entries. There is no error returned in case
5491  * of allocation failure since the record function checks if the history is
5492  * initialized or not. We do not want to fail the driver load in case of
5493  * failure to allocate memory for debug history.
5494  *
5495  * Returns: None
5496  */
5497 static void dp_soc_tx_history_attach(struct dp_soc *soc)
5498 {
5499 	uint32_t tx_tcl_hist_size;
5500 	uint32_t tx_comp_hist_size;
5501 
5502 	tx_tcl_hist_size = sizeof(*soc->tx_tcl_history);
5503 	soc->tx_tcl_history = dp_context_alloc_mem(soc, DP_TX_TCL_HIST_TYPE,
5504 						   tx_tcl_hist_size);
5505 	if (soc->tx_tcl_history)
5506 		qdf_atomic_init(&soc->tx_tcl_history->index);
5507 
5508 	tx_comp_hist_size = sizeof(*soc->tx_comp_history);
5509 	soc->tx_comp_history = dp_context_alloc_mem(soc, DP_TX_COMP_HIST_TYPE,
5510 						    tx_comp_hist_size);
5511 	if (soc->tx_comp_history)
5512 		qdf_atomic_init(&soc->tx_comp_history->index);
5513 }
5514 
5515 /**
5516  * dp_soc_tx_history_detach() - Detach the ring history record buffers
5517  * @soc: DP soc structure
5518  *
5519  * This function frees the memory for recording the tx tcl ring and
5520  * the tx comp ring entries.
5521  *
5522  * Returns: None
5523  */
5524 static void dp_soc_tx_history_detach(struct dp_soc *soc)
5525 {
5526 	dp_context_free_mem(soc, DP_TX_TCL_HIST_TYPE, soc->tx_tcl_history);
5527 	dp_context_free_mem(soc, DP_TX_COMP_HIST_TYPE, soc->tx_comp_history);
5528 }
5529 
5530 #else
5531 static inline void dp_soc_tx_history_attach(struct dp_soc *soc)
5532 {
5533 }
5534 
5535 static inline void dp_soc_tx_history_detach(struct dp_soc *soc)
5536 {
5537 }
5538 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
5539 
5540 /*
5541 * dp_pdev_attach_wifi3() - attach txrx pdev
5542 * @txrx_soc: Datapath SOC handle
5543 * @params: Params for PDEV attach
5544 *
5545 * Return: QDF_STATUS
5546 */
5547 static inline
5548 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
5549 				struct cdp_pdev_attach_params *params)
5550 {
5551 	qdf_size_t pdev_context_size;
5552 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5553 	struct dp_pdev *pdev = NULL;
5554 	uint8_t pdev_id = params->pdev_id;
5555 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5556 	int nss_cfg;
5557 
5558 	pdev_context_size =
5559 		soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_PDEV);
5560 	if (pdev_context_size)
5561 		pdev = dp_context_alloc_mem(soc, DP_PDEV_TYPE, pdev_context_size);
5562 
5563 	if (!pdev) {
5564 		dp_init_err("%pK: DP PDEV memory allocation failed",
5565 			    soc);
5566 		goto fail0;
5567 	}
5568 	wlan_minidump_log(pdev, sizeof(*pdev), soc->ctrl_psoc,
5569 			  WLAN_MD_DP_PDEV, "dp_pdev");
5570 
5571 	soc_cfg_ctx = soc->wlan_cfg_ctx;
5572 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
5573 
5574 	if (!pdev->wlan_cfg_ctx) {
5575 		dp_init_err("%pK: pdev cfg_attach failed", soc);
5576 		goto fail1;
5577 	}
5578 
5579 	/*
5580 	 * set nss pdev config based on soc config
5581 	 */
5582 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
5583 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
5584 					 (nss_cfg & (1 << pdev_id)));
5585 
5586 	pdev->soc = soc;
5587 	pdev->pdev_id = pdev_id;
5588 	soc->pdev_list[pdev_id] = pdev;
5589 
5590 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
5591 	soc->pdev_count++;
5592 
5593 	/* Allocate memory for pdev srng rings */
5594 	if (dp_pdev_srng_alloc(pdev)) {
5595 		dp_init_err("%pK: dp_pdev_srng_alloc failed", soc);
5596 		goto fail2;
5597 	}
5598 
5599 	/* Setup second Rx refill buffer ring */
5600 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev)) {
5601 		dp_init_err("%pK: dp_srng_alloc failed rxrefill2 ring",
5602 			    soc);
5603 		goto fail3;
5604 	}
5605 
5606 	/* Allocate memory for pdev rxdma rings */
5607 	if (dp_rxdma_ring_alloc(soc, pdev)) {
5608 		dp_init_err("%pK: dp_rxdma_ring_alloc failed", soc);
5609 		goto fail4;
5610 	}
5611 
5612 	/* Rx specific init */
5613 	if (dp_rx_pdev_desc_pool_alloc(pdev)) {
5614 		dp_init_err("%pK: dp_rx_pdev_attach failed", soc);
5615 		goto fail4;
5616 	}
5617 
5618 	if (dp_monitor_pdev_attach(pdev)) {
5619 		dp_init_err("%pK: dp_monitor_pdev_attach failed", soc);
5620 		goto fail5;
5621 	}
5622 
5623 	soc->arch_ops.txrx_pdev_attach(pdev, params);
5624 
5625 	/* Setup third Rx refill buffer ring */
5626 	if (dp_setup_ipa_rx_alt_refill_buf_ring(soc, pdev)) {
5627 		dp_init_err("%pK: dp_srng_alloc failed rxrefill3 ring",
5628 			    soc);
5629 		goto fail6;
5630 	}
5631 
5632 	return QDF_STATUS_SUCCESS;
5633 
5634 fail6:
5635 	dp_monitor_pdev_detach(pdev);
5636 fail5:
5637 	dp_rx_pdev_desc_pool_free(pdev);
5638 fail4:
5639 	dp_rxdma_ring_free(pdev);
5640 	dp_free_ipa_rx_refill_buf_ring(soc, pdev);
5641 fail3:
5642 	dp_pdev_srng_free(pdev);
5643 fail2:
5644 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
5645 fail1:
5646 	soc->pdev_list[pdev_id] = NULL;
5647 	qdf_mem_free(pdev);
5648 fail0:
5649 	return QDF_STATUS_E_FAILURE;
5650 }
5651 
5652 /**
5653  * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev
5654  * @pdev: Datapath PDEV handle
5655  *
5656  * This is the last chance to flush all pending dp vdevs/peers,
5657  * some peer/vdev leak case like Non-SSR + peer unmap missing
5658  * will be covered here.
5659  *
5660  * Return: None
5661  */
5662 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
5663 {
5664 	struct dp_soc *soc = pdev->soc;
5665 	struct dp_vdev *vdev_arr[MAX_VDEV_CNT] = {0};
5666 	uint32_t i = 0;
5667 	uint32_t num_vdevs = 0;
5668 	struct dp_vdev *vdev = NULL;
5669 
5670 	if (TAILQ_EMPTY(&soc->inactive_vdev_list))
5671 		return;
5672 
5673 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
5674 	TAILQ_FOREACH(vdev, &soc->inactive_vdev_list,
5675 		      inactive_list_elem) {
5676 		if (vdev->pdev != pdev)
5677 			continue;
5678 
5679 		vdev_arr[num_vdevs] = vdev;
5680 		num_vdevs++;
5681 		/* take reference to free */
5682 		dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CDP);
5683 	}
5684 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
5685 
5686 	for (i = 0; i < num_vdevs; i++) {
5687 		dp_vdev_flush_peers((struct cdp_vdev *)vdev_arr[i], 0, 0);
5688 		dp_vdev_unref_delete(soc, vdev_arr[i], DP_MOD_ID_CDP);
5689 	}
5690 }
5691 
5692 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
5693 /**
5694  * dp_vdev_stats_hw_offload_target_config() - Send HTT command to FW
5695  *                                          for enable/disable of HW vdev stats
5696  * @soc: Datapath soc handle
5697  * @pdev_id: INVALID_PDEV_ID for all pdevs or 0,1,2 for individual pdev
5698  * @enable: flag to reprsent enable/disable of hw vdev stats
5699  *
5700  * Return: none
5701  */
5702 static void dp_vdev_stats_hw_offload_target_config(struct dp_soc *soc,
5703 						   uint8_t pdev_id,
5704 						   bool enable)
5705 {
5706 	/* Check SOC level config for HW offload vdev stats support */
5707 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
5708 		dp_debug("%pK: HW vdev offload stats is disabled", soc);
5709 		return;
5710 	}
5711 
5712 	/* Send HTT command to FW for enable of stats */
5713 	dp_h2t_hw_vdev_stats_config_send(soc, pdev_id, enable, false, 0);
5714 }
5715 
5716 /**
5717  * dp_vdev_stats_hw_offload_target_clear() - Clear HW vdev stats on target
5718  * @soc: Datapath soc handle
5719  * @pdev_id: pdev_id (0,1,2)
5720  * @bitmask: bitmask with vdev_id(s) for which stats are to be cleared on HW
5721  *
5722  * Return: none
5723  */
5724 static
5725 void dp_vdev_stats_hw_offload_target_clear(struct dp_soc *soc, uint8_t pdev_id,
5726 					   uint64_t vdev_id_bitmask)
5727 {
5728 	/* Check SOC level config for HW offload vdev stats support */
5729 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
5730 		dp_debug("%pK: HW vdev offload stats is disabled", soc);
5731 		return;
5732 	}
5733 
5734 	/* Send HTT command to FW for reset of stats */
5735 	dp_h2t_hw_vdev_stats_config_send(soc, pdev_id, true, true,
5736 					 vdev_id_bitmask);
5737 }
5738 #else
5739 static void
5740 dp_vdev_stats_hw_offload_target_config(struct dp_soc *soc, uint8_t pdev_id,
5741 				       bool enable)
5742 {
5743 }
5744 
5745 static
5746 void dp_vdev_stats_hw_offload_target_clear(struct dp_soc *soc, uint8_t pdev_id,
5747 					   uint64_t vdev_id_bitmask)
5748 {
5749 }
5750 #endif /*QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT */
5751 
5752 /**
5753  * dp_pdev_deinit() - Deinit txrx pdev
5754  * @txrx_pdev: Datapath PDEV handle
5755  * @force: Force deinit
5756  *
5757  * Return: None
5758  */
5759 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
5760 {
5761 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
5762 	qdf_nbuf_t curr_nbuf, next_nbuf;
5763 
5764 	if (pdev->pdev_deinit)
5765 		return;
5766 
5767 	dp_tx_me_exit(pdev);
5768 	dp_rx_fst_detach(pdev->soc, pdev);
5769 	dp_rx_pdev_buffers_free(pdev);
5770 	dp_rx_pdev_desc_pool_deinit(pdev);
5771 	dp_pdev_bkp_stats_detach(pdev);
5772 	qdf_event_destroy(&pdev->fw_peer_stats_event);
5773 	qdf_event_destroy(&pdev->fw_stats_event);
5774 	if (pdev->sojourn_buf)
5775 		qdf_nbuf_free(pdev->sojourn_buf);
5776 
5777 	dp_pdev_flush_pending_vdevs(pdev);
5778 	dp_tx_desc_flush(pdev, NULL, true);
5779 
5780 	qdf_spinlock_destroy(&pdev->tx_mutex);
5781 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
5782 
5783 	dp_monitor_pdev_deinit(pdev);
5784 
5785 	dp_pdev_srng_deinit(pdev);
5786 
5787 	dp_ipa_uc_detach(pdev->soc, pdev);
5788 	dp_deinit_ipa_rx_alt_refill_buf_ring(pdev->soc, pdev);
5789 	dp_deinit_ipa_rx_refill_buf_ring(pdev->soc, pdev);
5790 	dp_rxdma_ring_cleanup(pdev->soc, pdev);
5791 
5792 	curr_nbuf = pdev->invalid_peer_head_msdu;
5793 	while (curr_nbuf) {
5794 		next_nbuf = qdf_nbuf_next(curr_nbuf);
5795 		dp_rx_nbuf_free(curr_nbuf);
5796 		curr_nbuf = next_nbuf;
5797 	}
5798 	pdev->invalid_peer_head_msdu = NULL;
5799 	pdev->invalid_peer_tail_msdu = NULL;
5800 
5801 	dp_wdi_event_detach(pdev);
5802 	pdev->pdev_deinit = 1;
5803 }
5804 
5805 /**
5806  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
5807  * @psoc: Datapath psoc handle
5808  * @pdev_id: Id of datapath PDEV handle
5809  * @force: Force deinit
5810  *
5811  * Return: QDF_STATUS
5812  */
5813 static QDF_STATUS
5814 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
5815 		     int force)
5816 {
5817 	struct dp_pdev *txrx_pdev;
5818 
5819 	txrx_pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
5820 						       pdev_id);
5821 
5822 	if (!txrx_pdev)
5823 		return QDF_STATUS_E_FAILURE;
5824 
5825 	dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force);
5826 
5827 	return QDF_STATUS_SUCCESS;
5828 }
5829 
5830 /*
5831  * dp_pdev_post_attach() - Do post pdev attach after dev_alloc_name
5832  * @txrx_pdev: Datapath PDEV handle
5833  *
5834  * Return: None
5835  */
5836 static void dp_pdev_post_attach(struct cdp_pdev *txrx_pdev)
5837 {
5838 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
5839 
5840 	dp_monitor_tx_capture_debugfs_init(pdev);
5841 
5842 	if (dp_pdev_htt_stats_dbgfs_init(pdev)) {
5843 		dp_init_err("%pK: Failed to initialize pdev HTT stats debugfs", pdev->soc);
5844 	}
5845 }
5846 
5847 /*
5848  * dp_pdev_post_attach_wifi3() - attach txrx pdev post
5849  * @psoc: Datapath soc handle
5850  * @pdev_id: pdev id of pdev
5851  *
5852  * Return: QDF_STATUS
5853  */
5854 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *soc,
5855 				     uint8_t pdev_id)
5856 {
5857 	struct dp_pdev *pdev;
5858 
5859 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
5860 						  pdev_id);
5861 
5862 	if (!pdev) {
5863 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
5864 			    (struct dp_soc *)soc, pdev_id);
5865 		return QDF_STATUS_E_FAILURE;
5866 	}
5867 
5868 	dp_pdev_post_attach((struct cdp_pdev *)pdev);
5869 	return QDF_STATUS_SUCCESS;
5870 }
5871 
5872 /*
5873  * dp_pdev_detach() - Complete rest of pdev detach
5874  * @txrx_pdev: Datapath PDEV handle
5875  * @force: Force deinit
5876  *
5877  * Return: None
5878  */
5879 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
5880 {
5881 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
5882 	struct dp_soc *soc = pdev->soc;
5883 
5884 	dp_pdev_htt_stats_dbgfs_deinit(pdev);
5885 	dp_rx_pdev_desc_pool_free(pdev);
5886 	dp_monitor_pdev_detach(pdev);
5887 	dp_rxdma_ring_free(pdev);
5888 	dp_free_ipa_rx_refill_buf_ring(soc, pdev);
5889 	dp_free_ipa_rx_alt_refill_buf_ring(soc, pdev);
5890 	dp_pdev_srng_free(pdev);
5891 
5892 	soc->pdev_count--;
5893 	soc->pdev_list[pdev->pdev_id] = NULL;
5894 
5895 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
5896 	wlan_minidump_remove(pdev, sizeof(*pdev), soc->ctrl_psoc,
5897 			     WLAN_MD_DP_PDEV, "dp_pdev");
5898 	dp_context_free_mem(soc, DP_PDEV_TYPE, pdev);
5899 }
5900 
5901 /*
5902  * dp_pdev_detach_wifi3() - detach txrx pdev
5903  * @psoc: Datapath soc handle
5904  * @pdev_id: pdev id of pdev
5905  * @force: Force detach
5906  *
5907  * Return: QDF_STATUS
5908  */
5909 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
5910 				       int force)
5911 {
5912 	struct dp_pdev *pdev;
5913 	struct dp_soc *soc = (struct dp_soc *)psoc;
5914 
5915 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
5916 						  pdev_id);
5917 
5918 	if (!pdev) {
5919 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
5920 			    (struct dp_soc *)psoc, pdev_id);
5921 		return QDF_STATUS_E_FAILURE;
5922 	}
5923 
5924 	soc->arch_ops.txrx_pdev_detach(pdev);
5925 
5926 	dp_pdev_detach((struct cdp_pdev *)pdev, force);
5927 	return QDF_STATUS_SUCCESS;
5928 }
5929 
5930 /*
5931  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
5932  * @soc: DP SOC handle
5933  */
5934 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
5935 {
5936 	struct reo_desc_list_node *desc;
5937 	struct dp_rx_tid *rx_tid;
5938 
5939 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
5940 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
5941 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
5942 		rx_tid = &desc->rx_tid;
5943 		qdf_mem_unmap_nbytes_single(soc->osdev,
5944 			rx_tid->hw_qdesc_paddr,
5945 			QDF_DMA_BIDIRECTIONAL,
5946 			rx_tid->hw_qdesc_alloc_size);
5947 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
5948 		qdf_mem_free(desc);
5949 	}
5950 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
5951 	qdf_list_destroy(&soc->reo_desc_freelist);
5952 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
5953 }
5954 
5955 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
5956 /*
5957  * dp_reo_desc_deferred_freelist_create() - Initialize the resources used
5958  *                                          for deferred reo desc list
5959  * @psoc: Datapath soc handle
5960  *
5961  * Return: void
5962  */
5963 static void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
5964 {
5965 	qdf_spinlock_create(&soc->reo_desc_deferred_freelist_lock);
5966 	qdf_list_create(&soc->reo_desc_deferred_freelist,
5967 			REO_DESC_DEFERRED_FREELIST_SIZE);
5968 	soc->reo_desc_deferred_freelist_init = true;
5969 }
5970 
5971 /*
5972  * dp_reo_desc_deferred_freelist_destroy() - loop the deferred free list &
5973  *                                           free the leftover REO QDESCs
5974  * @psoc: Datapath soc handle
5975  *
5976  * Return: void
5977  */
5978 static void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
5979 {
5980 	struct reo_desc_deferred_freelist_node *desc;
5981 
5982 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
5983 	soc->reo_desc_deferred_freelist_init = false;
5984 	while (qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
5985 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
5986 		qdf_mem_unmap_nbytes_single(soc->osdev,
5987 					    desc->hw_qdesc_paddr,
5988 					    QDF_DMA_BIDIRECTIONAL,
5989 					    desc->hw_qdesc_alloc_size);
5990 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
5991 		qdf_mem_free(desc);
5992 	}
5993 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
5994 
5995 	qdf_list_destroy(&soc->reo_desc_deferred_freelist);
5996 	qdf_spinlock_destroy(&soc->reo_desc_deferred_freelist_lock);
5997 }
5998 #else
5999 static inline void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
6000 {
6001 }
6002 
6003 static inline void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
6004 {
6005 }
6006 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
6007 
6008 /*
6009  * dp_soc_reset_txrx_ring_map() - reset tx ring map
6010  * @soc: DP SOC handle
6011  *
6012  */
6013 static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc)
6014 {
6015 	uint32_t i;
6016 
6017 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++)
6018 		soc->tx_ring_map[i] = 0;
6019 }
6020 
6021 /*
6022  * dp_soc_print_inactive_objects() - prints inactive peer and vdev list
6023  * @soc: DP SOC handle
6024  *
6025  */
6026 static void dp_soc_print_inactive_objects(struct dp_soc *soc)
6027 {
6028 	struct dp_peer *peer = NULL;
6029 	struct dp_peer *tmp_peer = NULL;
6030 	struct dp_vdev *vdev = NULL;
6031 	struct dp_vdev *tmp_vdev = NULL;
6032 	int i = 0;
6033 	uint32_t count;
6034 
6035 	if (TAILQ_EMPTY(&soc->inactive_peer_list) &&
6036 	    TAILQ_EMPTY(&soc->inactive_vdev_list))
6037 		return;
6038 
6039 	TAILQ_FOREACH_SAFE(peer, &soc->inactive_peer_list,
6040 			   inactive_list_elem, tmp_peer) {
6041 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
6042 			count = qdf_atomic_read(&peer->mod_refs[i]);
6043 			if (count)
6044 				DP_PRINT_STATS("peer %pK Module id %u ==> %u",
6045 					       peer, i, count);
6046 		}
6047 	}
6048 
6049 	TAILQ_FOREACH_SAFE(vdev, &soc->inactive_vdev_list,
6050 			   inactive_list_elem, tmp_vdev) {
6051 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
6052 			count = qdf_atomic_read(&vdev->mod_refs[i]);
6053 			if (count)
6054 				DP_PRINT_STATS("vdev %pK Module id %u ==> %u",
6055 					       vdev, i, count);
6056 		}
6057 	}
6058 	QDF_BUG(0);
6059 }
6060 
6061 /**
6062  * dp_soc_deinit() - Deinitialize txrx SOC
6063  * @txrx_soc: Opaque DP SOC handle
6064  *
6065  * Return: None
6066  */
6067 static void dp_soc_deinit(void *txrx_soc)
6068 {
6069 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6070 	struct htt_soc *htt_soc = soc->htt_handle;
6071 	struct dp_mon_ops *mon_ops;
6072 
6073 	qdf_atomic_set(&soc->cmn_init_done, 0);
6074 
6075 	soc->arch_ops.txrx_soc_deinit(soc);
6076 
6077 	mon_ops = dp_mon_ops_get(soc);
6078 	if (mon_ops && mon_ops->mon_soc_deinit)
6079 		mon_ops->mon_soc_deinit(soc);
6080 
6081 	/* free peer tables & AST tables allocated during peer_map_attach */
6082 	if (soc->peer_map_attach_success) {
6083 		dp_peer_find_detach(soc);
6084 		soc->arch_ops.txrx_peer_map_detach(soc);
6085 		soc->peer_map_attach_success = FALSE;
6086 	}
6087 
6088 	qdf_flush_work(&soc->htt_stats.work);
6089 	qdf_disable_work(&soc->htt_stats.work);
6090 
6091 	qdf_spinlock_destroy(&soc->htt_stats.lock);
6092 
6093 	dp_soc_reset_txrx_ring_map(soc);
6094 
6095 	dp_reo_desc_freelist_destroy(soc);
6096 	dp_reo_desc_deferred_freelist_destroy(soc);
6097 
6098 	DEINIT_RX_HW_STATS_LOCK(soc);
6099 
6100 	qdf_spinlock_destroy(&soc->ast_lock);
6101 
6102 	dp_peer_mec_spinlock_destroy(soc);
6103 
6104 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
6105 
6106 	qdf_nbuf_queue_free(&soc->invalid_buf_queue);
6107 
6108 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
6109 
6110 	qdf_spinlock_destroy(&soc->vdev_map_lock);
6111 
6112 	dp_reo_cmdlist_destroy(soc);
6113 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
6114 
6115 	dp_soc_tx_desc_sw_pools_deinit(soc);
6116 
6117 	dp_soc_srng_deinit(soc);
6118 
6119 	dp_hw_link_desc_ring_deinit(soc);
6120 
6121 	dp_soc_print_inactive_objects(soc);
6122 	qdf_spinlock_destroy(&soc->inactive_peer_list_lock);
6123 	qdf_spinlock_destroy(&soc->inactive_vdev_list_lock);
6124 
6125 	htt_soc_htc_dealloc(soc->htt_handle);
6126 
6127 	htt_soc_detach(htt_soc);
6128 
6129 	/* Free wbm sg list and reset flags in down path */
6130 	dp_rx_wbm_sg_list_deinit(soc);
6131 
6132 	wlan_minidump_remove(soc, sizeof(*soc), soc->ctrl_psoc,
6133 			     WLAN_MD_DP_SOC, "dp_soc");
6134 }
6135 
6136 /**
6137  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
6138  * @txrx_soc: Opaque DP SOC handle
6139  *
6140  * Return: None
6141  */
6142 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc)
6143 {
6144 	dp_soc_deinit(txrx_soc);
6145 }
6146 
6147 /*
6148  * dp_soc_detach() - Detach rest of txrx SOC
6149  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
6150  *
6151  * Return: None
6152  */
6153 static void dp_soc_detach(struct cdp_soc_t *txrx_soc)
6154 {
6155 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6156 
6157 	soc->arch_ops.txrx_soc_detach(soc);
6158 
6159 	dp_runtime_deinit();
6160 
6161 	dp_sysfs_deinitialize_stats(soc);
6162 	dp_soc_swlm_detach(soc);
6163 	dp_soc_tx_desc_sw_pools_free(soc);
6164 	dp_soc_srng_free(soc);
6165 	dp_hw_link_desc_ring_free(soc);
6166 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
6167 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
6168 	dp_soc_tx_hw_desc_history_detach(soc);
6169 	dp_soc_tx_history_detach(soc);
6170 	dp_soc_mon_status_ring_history_detach(soc);
6171 	dp_soc_rx_history_detach(soc);
6172 
6173 	if (!dp_monitor_modularized_enable()) {
6174 		dp_mon_soc_detach_wrapper(soc);
6175 	}
6176 
6177 	qdf_mem_free(soc->cdp_soc.ops);
6178 	qdf_mem_free(soc);
6179 }
6180 
6181 /*
6182  * dp_soc_detach_wifi3() - Detach txrx SOC
6183  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
6184  *
6185  * Return: None
6186  */
6187 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc)
6188 {
6189 	dp_soc_detach(txrx_soc);
6190 }
6191 
6192 /*
6193  * dp_rxdma_ring_config() - configure the RX DMA rings
6194  *
6195  * This function is used to configure the MAC rings.
6196  * On MCL host provides buffers in Host2FW ring
6197  * FW refills (copies) buffers to the ring and updates
6198  * ring_idx in register
6199  *
6200  * @soc: data path SoC handle
6201  *
6202  * Return: zero on success, non-zero on failure
6203  */
6204 #ifdef QCA_HOST2FW_RXBUF_RING
6205 static inline void
6206 dp_htt_setup_rxdma_err_dst_ring(struct dp_soc *soc, int mac_id,
6207 				int lmac_id)
6208 {
6209 	if (soc->rxdma_err_dst_ring[lmac_id].hal_srng)
6210 		htt_srng_setup(soc->htt_handle, mac_id,
6211 			       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
6212 			       RXDMA_DST);
6213 }
6214 
6215 #ifdef IPA_WDI3_VLAN_SUPPORT
6216 static inline
6217 void dp_rxdma_setup_refill_ring3(struct dp_soc *soc,
6218 				 struct dp_pdev *pdev,
6219 				 uint8_t idx)
6220 {
6221 	if (pdev->rx_refill_buf_ring3.hal_srng)
6222 		htt_srng_setup(soc->htt_handle, idx,
6223 			       pdev->rx_refill_buf_ring3.hal_srng,
6224 			       RXDMA_BUF);
6225 }
6226 #else
6227 static inline
6228 void dp_rxdma_setup_refill_ring3(struct dp_soc *soc,
6229 				 struct dp_pdev *pdev,
6230 				 uint8_t idx)
6231 { }
6232 #endif
6233 
6234 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
6235 {
6236 	int i;
6237 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6238 
6239 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6240 		struct dp_pdev *pdev = soc->pdev_list[i];
6241 
6242 		if (pdev) {
6243 			int mac_id;
6244 			int max_mac_rings =
6245 				 wlan_cfg_get_num_mac_rings
6246 				(pdev->wlan_cfg_ctx);
6247 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
6248 
6249 			htt_srng_setup(soc->htt_handle, i,
6250 				       soc->rx_refill_buf_ring[lmac_id]
6251 				       .hal_srng,
6252 				       RXDMA_BUF);
6253 
6254 			if (pdev->rx_refill_buf_ring2.hal_srng)
6255 				htt_srng_setup(soc->htt_handle, i,
6256 					       pdev->rx_refill_buf_ring2
6257 					       .hal_srng,
6258 					       RXDMA_BUF);
6259 
6260 			dp_rxdma_setup_refill_ring3(soc, pdev, i);
6261 
6262 			dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
6263 			dp_err("pdev_id %d max_mac_rings %d",
6264 			       pdev->pdev_id, max_mac_rings);
6265 
6266 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
6267 				int mac_for_pdev =
6268 					dp_get_mac_id_for_pdev(mac_id,
6269 							       pdev->pdev_id);
6270 				/*
6271 				 * Obtain lmac id from pdev to access the LMAC
6272 				 * ring in soc context
6273 				 */
6274 				lmac_id =
6275 				dp_get_lmac_id_for_pdev_id(soc,
6276 							   mac_id,
6277 							   pdev->pdev_id);
6278 				QDF_TRACE(QDF_MODULE_ID_TXRX,
6279 					 QDF_TRACE_LEVEL_ERROR,
6280 					 FL("mac_id %d"), mac_for_pdev);
6281 
6282 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
6283 					 pdev->rx_mac_buf_ring[mac_id]
6284 						.hal_srng,
6285 					 RXDMA_BUF);
6286 
6287 				if (!soc->rxdma2sw_rings_not_supported)
6288 					dp_htt_setup_rxdma_err_dst_ring(soc,
6289 						mac_for_pdev, lmac_id);
6290 
6291 				/* Configure monitor mode rings */
6292 				status = dp_monitor_htt_srng_setup(soc, pdev,
6293 								   lmac_id,
6294 								   mac_for_pdev);
6295 				if (status != QDF_STATUS_SUCCESS) {
6296 					dp_err("Failed to send htt monitor messages to target");
6297 					return status;
6298 				}
6299 
6300 			}
6301 		}
6302 	}
6303 
6304 	dp_reap_timer_init(soc);
6305 	return status;
6306 }
6307 #else
6308 /* This is only for WIN */
6309 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
6310 {
6311 	int i;
6312 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6313 	int mac_for_pdev;
6314 	int lmac_id;
6315 
6316 	/* Configure monitor mode rings */
6317 	dp_monitor_soc_htt_srng_setup(soc);
6318 
6319 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6320 		struct dp_pdev *pdev =  soc->pdev_list[i];
6321 
6322 		if (!pdev)
6323 			continue;
6324 
6325 		mac_for_pdev = i;
6326 		lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
6327 
6328 		if (soc->rx_refill_buf_ring[lmac_id].hal_srng)
6329 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
6330 				       soc->rx_refill_buf_ring[lmac_id].
6331 				       hal_srng, RXDMA_BUF);
6332 
6333 		/* Configure monitor mode rings */
6334 		dp_monitor_htt_srng_setup(soc, pdev,
6335 					  lmac_id,
6336 					  mac_for_pdev);
6337 		if (!soc->rxdma2sw_rings_not_supported)
6338 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
6339 				       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
6340 				       RXDMA_DST);
6341 	}
6342 
6343 	dp_reap_timer_init(soc);
6344 	return status;
6345 }
6346 #endif
6347 
6348 /*
6349  * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine
6350  *
6351  * This function is used to configure the FSE HW block in RX OLE on a
6352  * per pdev basis. Here, we will be programming parameters related to
6353  * the Flow Search Table.
6354  *
6355  * @soc: data path SoC handle
6356  *
6357  * Return: zero on success, non-zero on failure
6358  */
6359 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
6360 static QDF_STATUS
6361 dp_rx_target_fst_config(struct dp_soc *soc)
6362 {
6363 	int i;
6364 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6365 
6366 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6367 		struct dp_pdev *pdev = soc->pdev_list[i];
6368 
6369 		/* Flow search is not enabled if NSS offload is enabled */
6370 		if (pdev &&
6371 		    !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
6372 			status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev);
6373 			if (status != QDF_STATUS_SUCCESS)
6374 				break;
6375 		}
6376 	}
6377 	return status;
6378 }
6379 #elif defined(WLAN_SUPPORT_RX_FISA)
6380 /**
6381  * dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW
6382  * @soc: SoC handle
6383  *
6384  * Return: Success
6385  */
6386 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
6387 {
6388 	QDF_STATUS status;
6389 	struct dp_rx_fst *fst = soc->rx_fst;
6390 
6391 	/* Check if it is enabled in the INI */
6392 	if (!soc->fisa_enable) {
6393 		dp_err("RX FISA feature is disabled");
6394 		return QDF_STATUS_E_NOSUPPORT;
6395 	}
6396 
6397 	status = dp_rx_flow_send_fst_fw_setup(soc, soc->pdev_list[0]);
6398 	if (QDF_IS_STATUS_ERROR(status)) {
6399 		dp_err("dp_rx_flow_send_fst_fw_setup failed %d",
6400 		       status);
6401 		return status;
6402 	}
6403 
6404 	if (soc->fst_cmem_base) {
6405 		soc->fst_in_cmem = true;
6406 		dp_rx_fst_update_cmem_params(soc, fst->max_entries,
6407 					     soc->fst_cmem_base & 0xffffffff,
6408 					     soc->fst_cmem_base >> 32);
6409 	}
6410 	return status;
6411 }
6412 
6413 #define FISA_MAX_TIMEOUT 0xffffffff
6414 #define FISA_DISABLE_TIMEOUT 0
6415 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
6416 {
6417 	struct dp_htt_rx_fisa_cfg fisa_config;
6418 
6419 	fisa_config.pdev_id = 0;
6420 	fisa_config.fisa_timeout = FISA_MAX_TIMEOUT;
6421 
6422 	return dp_htt_rx_fisa_config(soc->pdev_list[0], &fisa_config);
6423 }
6424 
6425 #else /* !WLAN_SUPPORT_RX_FISA */
6426 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
6427 {
6428 	return QDF_STATUS_SUCCESS;
6429 }
6430 #endif /* !WLAN_SUPPORT_RX_FISA */
6431 
6432 #ifndef WLAN_SUPPORT_RX_FISA
6433 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
6434 {
6435 	return QDF_STATUS_SUCCESS;
6436 }
6437 
6438 static QDF_STATUS dp_rx_dump_fisa_stats(struct dp_soc *soc)
6439 {
6440 	return QDF_STATUS_SUCCESS;
6441 }
6442 
6443 static void dp_rx_dump_fisa_table(struct dp_soc *soc)
6444 {
6445 }
6446 
6447 static void dp_suspend_fse_cache_flush(struct dp_soc *soc)
6448 {
6449 }
6450 
6451 static void dp_resume_fse_cache_flush(struct dp_soc *soc)
6452 {
6453 }
6454 #endif /* !WLAN_SUPPORT_RX_FISA */
6455 
6456 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR
6457 static inline QDF_STATUS dp_print_swlm_stats(struct dp_soc *soc)
6458 {
6459 	return QDF_STATUS_SUCCESS;
6460 }
6461 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */
6462 
6463 #ifdef WLAN_SUPPORT_PPEDS
6464 /*
6465  * dp_soc_target_ppe_rxole_rxdma_cfg() - Configure the RxOLe and RxDMA for PPE
6466  * @soc: DP Tx/Rx handle
6467  *
6468  * Return: QDF_STATUS
6469  */
6470 static
6471 QDF_STATUS dp_soc_target_ppe_rxole_rxdma_cfg(struct dp_soc *soc)
6472 {
6473 	struct dp_htt_rxdma_rxole_ppe_config htt_cfg = {0};
6474 	QDF_STATUS status;
6475 
6476 	/*
6477 	 * Program RxDMA to override the reo destination indication
6478 	 * with REO2PPE_DST_IND, when use_ppe is set to 1 in RX_MSDU_END,
6479 	 * thereby driving the packet to REO2PPE ring.
6480 	 * If the MSDU is spanning more than 1 buffer, then this
6481 	 * override is not done.
6482 	 */
6483 	htt_cfg.override = 1;
6484 	htt_cfg.reo_destination_indication = REO2PPE_DST_IND;
6485 	htt_cfg.multi_buffer_msdu_override_en = 0;
6486 
6487 	/*
6488 	 * Override use_ppe to 0 in RxOLE for the following
6489 	 * cases.
6490 	 */
6491 	htt_cfg.intra_bss_override = 1;
6492 	htt_cfg.decap_raw_override = 1;
6493 	htt_cfg.decap_nwifi_override = 1;
6494 	htt_cfg.ip_frag_override = 1;
6495 
6496 	status = dp_htt_rxdma_rxole_ppe_cfg_set(soc, &htt_cfg);
6497 	if (status != QDF_STATUS_SUCCESS)
6498 		dp_err("RxOLE and RxDMA PPE config failed %d", status);
6499 
6500 	return status;
6501 }
6502 #else
6503 static inline
6504 QDF_STATUS dp_soc_target_ppe_rxole_rxdma_cfg(struct dp_soc *soc)
6505 {
6506 	return QDF_STATUS_SUCCESS;
6507 }
6508 #endif /* WLAN_SUPPORT_PPEDS */
6509 
6510 /*
6511  * dp_soc_attach_target_wifi3() - SOC initialization in the target
6512  * @cdp_soc: Opaque Datapath SOC handle
6513  *
6514  * Return: zero on success, non-zero on failure
6515  */
6516 static QDF_STATUS
6517 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
6518 {
6519 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
6520 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6521 
6522 	htt_soc_attach_target(soc->htt_handle);
6523 
6524 	status = dp_soc_target_ppe_rxole_rxdma_cfg(soc);
6525 	if (status != QDF_STATUS_SUCCESS) {
6526 		dp_err("Failed to send htt RxOLE and RxDMA messages to target");
6527 		return status;
6528 	}
6529 
6530 	status = dp_rxdma_ring_config(soc);
6531 	if (status != QDF_STATUS_SUCCESS) {
6532 		dp_err("Failed to send htt srng setup messages to target");
6533 		return status;
6534 	}
6535 
6536 	status = soc->arch_ops.dp_rxdma_ring_sel_cfg(soc);
6537 	if (status != QDF_STATUS_SUCCESS) {
6538 		dp_err("Failed to send htt ring config message to target");
6539 		return status;
6540 	}
6541 
6542 	status = dp_soc_umac_reset_init(soc);
6543 	if (status != QDF_STATUS_SUCCESS &&
6544 	    status != QDF_STATUS_E_NOSUPPORT) {
6545 		dp_err("Failed to initialize UMAC reset");
6546 		return status;
6547 	}
6548 
6549 	status = dp_rx_target_fst_config(soc);
6550 	if (status != QDF_STATUS_SUCCESS &&
6551 	    status != QDF_STATUS_E_NOSUPPORT) {
6552 		dp_err("Failed to send htt fst setup config message to target");
6553 		return status;
6554 	}
6555 
6556 	if (status == QDF_STATUS_SUCCESS) {
6557 		status = dp_rx_fisa_config(soc);
6558 		if (status != QDF_STATUS_SUCCESS) {
6559 			dp_err("Failed to send htt FISA config message to target");
6560 			return status;
6561 		}
6562 	}
6563 
6564 	DP_STATS_INIT(soc);
6565 
6566 	dp_runtime_init(soc);
6567 
6568 	/* Enable HW vdev offload stats if feature is supported */
6569 	dp_vdev_stats_hw_offload_target_config(soc, INVALID_PDEV_ID, true);
6570 
6571 	/* initialize work queue for stats processing */
6572 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
6573 
6574 	return QDF_STATUS_SUCCESS;
6575 }
6576 
6577 /*
6578  * dp_vdev_id_map_tbl_add() - Add vdev into vdev_id table
6579  * @soc: SoC handle
6580  * @vdev: vdev handle
6581  * @vdev_id: vdev_id
6582  *
6583  * Return: None
6584  */
6585 static void dp_vdev_id_map_tbl_add(struct dp_soc *soc,
6586 				   struct dp_vdev *vdev,
6587 				   uint8_t vdev_id)
6588 {
6589 	QDF_ASSERT(vdev_id <= MAX_VDEV_CNT);
6590 
6591 	qdf_spin_lock_bh(&soc->vdev_map_lock);
6592 
6593 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
6594 			QDF_STATUS_SUCCESS) {
6595 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK vdev_id %u",
6596 			     soc, vdev, vdev_id);
6597 		qdf_spin_unlock_bh(&soc->vdev_map_lock);
6598 		return;
6599 	}
6600 
6601 	if (!soc->vdev_id_map[vdev_id])
6602 		soc->vdev_id_map[vdev_id] = vdev;
6603 	else
6604 		QDF_ASSERT(0);
6605 
6606 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
6607 }
6608 
6609 /*
6610  * dp_vdev_id_map_tbl_remove() - remove vdev from vdev_id table
6611  * @soc: SoC handle
6612  * @vdev: vdev handle
6613  *
6614  * Return: None
6615  */
6616 static void dp_vdev_id_map_tbl_remove(struct dp_soc *soc,
6617 				      struct dp_vdev *vdev)
6618 {
6619 	qdf_spin_lock_bh(&soc->vdev_map_lock);
6620 	QDF_ASSERT(soc->vdev_id_map[vdev->vdev_id] == vdev);
6621 
6622 	soc->vdev_id_map[vdev->vdev_id] = NULL;
6623 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
6624 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
6625 }
6626 
6627 /*
6628  * dp_vdev_pdev_list_add() - add vdev into pdev's list
6629  * @soc: soc handle
6630  * @pdev: pdev handle
6631  * @vdev: vdev handle
6632  *
6633  * return: none
6634  */
6635 static void dp_vdev_pdev_list_add(struct dp_soc *soc,
6636 				  struct dp_pdev *pdev,
6637 				  struct dp_vdev *vdev)
6638 {
6639 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
6640 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
6641 			QDF_STATUS_SUCCESS) {
6642 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK",
6643 			     soc, vdev);
6644 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6645 		return;
6646 	}
6647 	/* add this vdev into the pdev's list */
6648 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
6649 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6650 }
6651 
6652 /*
6653  * dp_vdev_pdev_list_remove() - remove vdev from pdev's list
6654  * @soc: SoC handle
6655  * @pdev: pdev handle
6656  * @vdev: VDEV handle
6657  *
6658  * Return: none
6659  */
6660 static void dp_vdev_pdev_list_remove(struct dp_soc *soc,
6661 				     struct dp_pdev *pdev,
6662 				     struct dp_vdev *vdev)
6663 {
6664 	uint8_t found = 0;
6665 	struct dp_vdev *tmpvdev = NULL;
6666 
6667 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
6668 	TAILQ_FOREACH(tmpvdev, &pdev->vdev_list, vdev_list_elem) {
6669 		if (tmpvdev == vdev) {
6670 			found = 1;
6671 			break;
6672 		}
6673 	}
6674 
6675 	if (found) {
6676 		TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
6677 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
6678 	} else {
6679 		dp_vdev_debug("%pK: vdev:%pK not found in pdev:%pK vdevlist:%pK",
6680 			      soc, vdev, pdev, &pdev->vdev_list);
6681 		QDF_ASSERT(0);
6682 	}
6683 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6684 }
6685 
6686 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
6687 /*
6688  * dp_vdev_init_rx_eapol() - initializing osif_rx_eapol
6689  * @vdev: Datapath VDEV handle
6690  *
6691  * Return: None
6692  */
6693 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev)
6694 {
6695 	vdev->osif_rx_eapol = NULL;
6696 }
6697 
6698 /*
6699  * dp_vdev_register_rx_eapol() - Register VDEV operations for rx_eapol
6700  * @vdev: DP vdev handle
6701  * @txrx_ops: Tx and Rx operations
6702  *
6703  * Return: None
6704  */
6705 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev,
6706 					     struct ol_txrx_ops *txrx_ops)
6707 {
6708 	vdev->osif_rx_eapol = txrx_ops->rx.rx_eapol;
6709 }
6710 #else
6711 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev)
6712 {
6713 }
6714 
6715 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev,
6716 					     struct ol_txrx_ops *txrx_ops)
6717 {
6718 }
6719 #endif
6720 
6721 #ifdef WLAN_FEATURE_11BE_MLO
6722 #if defined(WLAN_MLO_MULTI_CHIP) && defined(WLAN_MCAST_MLO)
6723 static inline void dp_vdev_save_mld_info(struct dp_vdev *vdev,
6724 					 struct cdp_vdev_info *vdev_info)
6725 {
6726 	if (qdf_is_macaddr_zero((struct qdf_mac_addr *)vdev_info->mld_mac_addr))
6727 		vdev->mlo_vdev = false;
6728 	else
6729 		vdev->mlo_vdev = true;
6730 }
6731 #else
6732 static inline void dp_vdev_save_mld_info(struct dp_vdev *vdev,
6733 					 struct cdp_vdev_info *vdev_info)
6734 {
6735 }
6736 #endif
6737 static inline void dp_vdev_save_mld_addr(struct dp_vdev *vdev,
6738 					 struct cdp_vdev_info *vdev_info)
6739 {
6740 	if (vdev_info->mld_mac_addr)
6741 		qdf_mem_copy(&vdev->mld_mac_addr.raw[0],
6742 			     vdev_info->mld_mac_addr, QDF_MAC_ADDR_SIZE);
6743 
6744 	dp_vdev_save_mld_info(vdev, vdev_info);
6745 
6746 }
6747 #else
6748 static inline void dp_vdev_save_mld_addr(struct dp_vdev *vdev,
6749 					 struct cdp_vdev_info *vdev_info)
6750 {
6751 
6752 }
6753 #endif
6754 
6755 /*
6756 * dp_vdev_attach_wifi3() - attach txrx vdev
6757 * @txrx_pdev: Datapath PDEV handle
6758 * @pdev_id: PDEV ID for vdev creation
6759 * @vdev_info: parameters used for vdev creation
6760 *
6761 * Return: status
6762 */
6763 static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc,
6764 				       uint8_t pdev_id,
6765 				       struct cdp_vdev_info *vdev_info)
6766 {
6767 	int i = 0;
6768 	qdf_size_t vdev_context_size;
6769 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
6770 	struct dp_pdev *pdev =
6771 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
6772 						   pdev_id);
6773 	struct dp_vdev *vdev;
6774 	uint8_t *vdev_mac_addr = vdev_info->vdev_mac_addr;
6775 	uint8_t vdev_id = vdev_info->vdev_id;
6776 	enum wlan_op_mode op_mode = vdev_info->op_mode;
6777 	enum wlan_op_subtype subtype = vdev_info->subtype;
6778 	uint8_t vdev_stats_id = vdev_info->vdev_stats_id;
6779 
6780 	vdev_context_size =
6781 		soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_VDEV);
6782 	vdev = qdf_mem_malloc(vdev_context_size);
6783 
6784 	if (!pdev) {
6785 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
6786 			    cdp_soc, pdev_id);
6787 		qdf_mem_free(vdev);
6788 		goto fail0;
6789 	}
6790 
6791 	if (!vdev) {
6792 		dp_init_err("%pK: DP VDEV memory allocation failed",
6793 			    cdp_soc);
6794 		goto fail0;
6795 	}
6796 
6797 	wlan_minidump_log(vdev, sizeof(*vdev), soc->ctrl_psoc,
6798 			  WLAN_MD_DP_VDEV, "dp_vdev");
6799 
6800 	vdev->pdev = pdev;
6801 	vdev->vdev_id = vdev_id;
6802 	vdev->vdev_stats_id = vdev_stats_id;
6803 	vdev->opmode = op_mode;
6804 	vdev->subtype = subtype;
6805 	vdev->osdev = soc->osdev;
6806 
6807 	vdev->osif_rx = NULL;
6808 	vdev->osif_rsim_rx_decap = NULL;
6809 	vdev->osif_get_key = NULL;
6810 	vdev->osif_tx_free_ext = NULL;
6811 	vdev->osif_vdev = NULL;
6812 
6813 	vdev->delete.pending = 0;
6814 	vdev->safemode = 0;
6815 	vdev->drop_unenc = 1;
6816 	vdev->sec_type = cdp_sec_type_none;
6817 	vdev->multipass_en = false;
6818 	vdev->wrap_vdev = false;
6819 	dp_vdev_init_rx_eapol(vdev);
6820 	qdf_atomic_init(&vdev->ref_cnt);
6821 	for (i = 0; i < DP_MOD_ID_MAX; i++)
6822 		qdf_atomic_init(&vdev->mod_refs[i]);
6823 
6824 	/* Take one reference for create*/
6825 	qdf_atomic_inc(&vdev->ref_cnt);
6826 	qdf_atomic_inc(&vdev->mod_refs[DP_MOD_ID_CONFIG]);
6827 	vdev->num_peers = 0;
6828 #ifdef notyet
6829 	vdev->filters_num = 0;
6830 #endif
6831 	vdev->lmac_id = pdev->lmac_id;
6832 
6833 	qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
6834 
6835 	dp_vdev_save_mld_addr(vdev, vdev_info);
6836 
6837 	/* TODO: Initialize default HTT meta data that will be used in
6838 	 * TCL descriptors for packets transmitted from this VDEV
6839 	 */
6840 
6841 	qdf_spinlock_create(&vdev->peer_list_lock);
6842 	TAILQ_INIT(&vdev->peer_list);
6843 	dp_peer_multipass_list_init(vdev);
6844 	if ((soc->intr_mode == DP_INTR_POLL) &&
6845 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
6846 		if ((pdev->vdev_count == 0) ||
6847 		    (wlan_op_mode_monitor == vdev->opmode))
6848 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
6849 	} else if (dp_soc_get_con_mode(soc) == QDF_GLOBAL_MISSION_MODE &&
6850 		   soc->intr_mode == DP_INTR_MSI &&
6851 		   wlan_op_mode_monitor == vdev->opmode) {
6852 		/* Timer to reap status ring in mission mode */
6853 		dp_monitor_vdev_timer_start(soc);
6854 	}
6855 
6856 	dp_vdev_id_map_tbl_add(soc, vdev, vdev_id);
6857 
6858 	if (wlan_op_mode_monitor == vdev->opmode) {
6859 		if (dp_monitor_vdev_attach(vdev) == QDF_STATUS_SUCCESS) {
6860 			dp_monitor_pdev_set_mon_vdev(vdev);
6861 			return dp_monitor_vdev_set_monitor_mode_buf_rings(pdev);
6862 		}
6863 		return QDF_STATUS_E_FAILURE;
6864 	}
6865 
6866 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
6867 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
6868 	vdev->dscp_tid_map_id = 0;
6869 	vdev->mcast_enhancement_en = 0;
6870 	vdev->igmp_mcast_enhanc_en = 0;
6871 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
6872 	vdev->prev_tx_enq_tstamp = 0;
6873 	vdev->prev_rx_deliver_tstamp = 0;
6874 	vdev->skip_sw_tid_classification = DP_TX_HW_DSCP_TID_MAP_VALID;
6875 
6876 	dp_vdev_pdev_list_add(soc, pdev, vdev);
6877 	pdev->vdev_count++;
6878 
6879 	if (wlan_op_mode_sta != vdev->opmode &&
6880 	    wlan_op_mode_ndi != vdev->opmode)
6881 		vdev->ap_bridge_enabled = true;
6882 	else
6883 		vdev->ap_bridge_enabled = false;
6884 	dp_init_info("%pK: wlan_cfg_ap_bridge_enabled %d",
6885 		     cdp_soc, vdev->ap_bridge_enabled);
6886 
6887 	dp_tx_vdev_attach(vdev);
6888 
6889 	dp_monitor_vdev_attach(vdev);
6890 	if (!pdev->is_lro_hash_configured) {
6891 		if (QDF_IS_STATUS_SUCCESS(dp_lro_hash_setup(soc, pdev)))
6892 			pdev->is_lro_hash_configured = true;
6893 		else
6894 			dp_err("LRO hash setup failure!");
6895 	}
6896 
6897 	dp_info("Created vdev %pK ("QDF_MAC_ADDR_FMT")", vdev,
6898 		QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
6899 	DP_STATS_INIT(vdev);
6900 
6901 	if (QDF_IS_STATUS_ERROR(soc->arch_ops.txrx_vdev_attach(soc, vdev)))
6902 		goto fail0;
6903 
6904 	if (wlan_op_mode_sta == vdev->opmode)
6905 		dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id,
6906 				     vdev->mac_addr.raw, CDP_LINK_PEER_TYPE);
6907 	return QDF_STATUS_SUCCESS;
6908 
6909 fail0:
6910 	return QDF_STATUS_E_FAILURE;
6911 }
6912 
6913 #ifndef QCA_HOST_MODE_WIFI_DISABLED
6914 /**
6915  * dp_vdev_register_tx_handler() - Register Tx handler
6916  * @vdev: struct dp_vdev *
6917  * @soc: struct dp_soc *
6918  * @txrx_ops: struct ol_txrx_ops *
6919  */
6920 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
6921 					       struct dp_soc *soc,
6922 					       struct ol_txrx_ops *txrx_ops)
6923 {
6924 	/* Enable vdev_id check only for ap, if flag is enabled */
6925 	if (vdev->mesh_vdev)
6926 		txrx_ops->tx.tx = dp_tx_send_mesh;
6927 	else if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
6928 		 (vdev->opmode == wlan_op_mode_ap))
6929 		txrx_ops->tx.tx = dp_tx_send_vdev_id_check;
6930 	else
6931 		txrx_ops->tx.tx = dp_tx_send;
6932 
6933 	/* Avoid check in regular exception Path */
6934 	if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
6935 	    (vdev->opmode == wlan_op_mode_ap))
6936 		txrx_ops->tx.tx_exception = dp_tx_send_exception_vdev_id_check;
6937 	else
6938 		txrx_ops->tx.tx_exception = dp_tx_send_exception;
6939 
6940 	dp_info("Configure tx_vdev_id_chk_handler Feature Flag: %d and mode:%d for vdev_id:%d",
6941 		wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx),
6942 		vdev->opmode, vdev->vdev_id);
6943 }
6944 #else /* QCA_HOST_MODE_WIFI_DISABLED */
6945 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
6946 					       struct dp_soc *soc,
6947 					       struct ol_txrx_ops *txrx_ops)
6948 {
6949 }
6950 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
6951 
6952 /**
6953  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
6954  * @soc: Datapath soc handle
6955  * @vdev_id: id of Datapath VDEV handle
6956  * @osif_vdev: OSIF vdev handle
6957  * @txrx_ops: Tx and Rx operations
6958  *
6959  * Return: DP VDEV handle on success, NULL on failure
6960  */
6961 static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc_hdl,
6962 					 uint8_t vdev_id,
6963 					 ol_osif_vdev_handle osif_vdev,
6964 					 struct ol_txrx_ops *txrx_ops)
6965 {
6966 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6967 	struct dp_vdev *vdev =	dp_vdev_get_ref_by_id(soc, vdev_id,
6968 						      DP_MOD_ID_CDP);
6969 
6970 	if (!vdev)
6971 		return QDF_STATUS_E_FAILURE;
6972 
6973 	vdev->osif_vdev = osif_vdev;
6974 	vdev->osif_rx = txrx_ops->rx.rx;
6975 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
6976 	vdev->osif_rx_flush = txrx_ops->rx.rx_flush;
6977 	vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush;
6978 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
6979 	vdev->osif_fisa_rx = txrx_ops->rx.osif_fisa_rx;
6980 	vdev->osif_fisa_flush = txrx_ops->rx.osif_fisa_flush;
6981 	vdev->osif_get_key = txrx_ops->get_key;
6982 	dp_monitor_vdev_register_osif(vdev, txrx_ops);
6983 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
6984 	vdev->tx_comp = txrx_ops->tx.tx_comp;
6985 	vdev->stats_cb = txrx_ops->rx.stats_rx;
6986 	vdev->tx_classify_critical_pkt_cb =
6987 		txrx_ops->tx.tx_classify_critical_pkt_cb;
6988 #ifdef notyet
6989 #if ATH_SUPPORT_WAPI
6990 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
6991 #endif
6992 #endif
6993 #ifdef UMAC_SUPPORT_PROXY_ARP
6994 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
6995 #endif
6996 	vdev->me_convert = txrx_ops->me_convert;
6997 	vdev->get_tsf_time = txrx_ops->get_tsf_time;
6998 
6999 	dp_vdev_register_rx_eapol(vdev, txrx_ops);
7000 
7001 	dp_vdev_register_tx_handler(vdev, soc, txrx_ops);
7002 
7003 	dp_init_info("%pK: DP Vdev Register success", soc);
7004 
7005 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7006 	return QDF_STATUS_SUCCESS;
7007 }
7008 
7009 #ifdef WLAN_FEATURE_11BE_MLO
7010 void dp_peer_delete(struct dp_soc *soc,
7011 		    struct dp_peer *peer,
7012 		    void *arg)
7013 {
7014 	if (!peer->valid)
7015 		return;
7016 
7017 	dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
7018 			     peer->vdev->vdev_id,
7019 			     peer->mac_addr.raw, 0,
7020 			     peer->peer_type);
7021 }
7022 #else
7023 void dp_peer_delete(struct dp_soc *soc,
7024 		    struct dp_peer *peer,
7025 		    void *arg)
7026 {
7027 	if (!peer->valid)
7028 		return;
7029 
7030 	dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
7031 			     peer->vdev->vdev_id,
7032 			     peer->mac_addr.raw, 0,
7033 			     CDP_LINK_PEER_TYPE);
7034 }
7035 #endif
7036 
7037 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
7038 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg)
7039 {
7040 	if (!peer->valid)
7041 		return;
7042 
7043 	if (IS_MLO_DP_LINK_PEER(peer))
7044 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
7045 				     peer->vdev->vdev_id,
7046 				     peer->mac_addr.raw, 0,
7047 				     CDP_LINK_PEER_TYPE);
7048 }
7049 #else
7050 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg)
7051 {
7052 }
7053 #endif
7054 /**
7055  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
7056  * @vdev: Datapath VDEV handle
7057  * @unmap_only: Flag to indicate "only unmap"
7058  *
7059  * Return: void
7060  */
7061 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
7062 				bool unmap_only,
7063 				bool mlo_peers_only)
7064 {
7065 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7066 	struct dp_pdev *pdev = vdev->pdev;
7067 	struct dp_soc *soc = pdev->soc;
7068 	struct dp_peer *peer;
7069 	uint32_t i = 0;
7070 
7071 
7072 	if (!unmap_only) {
7073 		if (!mlo_peers_only)
7074 			dp_vdev_iterate_peer_lock_safe(vdev,
7075 						       dp_peer_delete,
7076 						       NULL,
7077 						       DP_MOD_ID_CDP);
7078 		else
7079 			dp_vdev_iterate_peer_lock_safe(vdev,
7080 						       dp_mlo_peer_delete,
7081 						       NULL,
7082 						       DP_MOD_ID_CDP);
7083 	}
7084 
7085 	for (i = 0; i < soc->max_peer_id ; i++) {
7086 		peer = __dp_peer_get_ref_by_id(soc, i, DP_MOD_ID_CDP);
7087 
7088 		if (!peer)
7089 			continue;
7090 
7091 		if (peer->vdev != vdev) {
7092 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7093 			continue;
7094 		}
7095 
7096 		if (!mlo_peers_only) {
7097 			dp_info("peer: " QDF_MAC_ADDR_FMT " is getting unmap",
7098 				QDF_MAC_ADDR_REF(peer->mac_addr.raw));
7099 			dp_rx_peer_unmap_handler(soc, i,
7100 						 vdev->vdev_id,
7101 						 peer->mac_addr.raw, 0,
7102 						 DP_PEER_WDS_COUNT_INVALID);
7103 			SET_PEER_REF_CNT_ONE(peer);
7104 		} else if (IS_MLO_DP_LINK_PEER(peer) ||
7105 			   IS_MLO_DP_MLD_PEER(peer)) {
7106 			dp_info("peer: " QDF_MAC_ADDR_FMT " is getting unmap",
7107 				QDF_MAC_ADDR_REF(peer->mac_addr.raw));
7108 			dp_rx_peer_unmap_handler(soc, i,
7109 						 vdev->vdev_id,
7110 						 peer->mac_addr.raw, 0,
7111 						 DP_PEER_WDS_COUNT_INVALID);
7112 			SET_PEER_REF_CNT_ONE(peer);
7113 		}
7114 
7115 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7116 	}
7117 }
7118 
7119 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
7120 /*
7121  * dp_txrx_alloc_vdev_stats_id()- Allocate vdev_stats_id
7122  * @soc_hdl: Datapath soc handle
7123  * @vdev_stats_id: Address of vdev_stats_id
7124  *
7125  * Return: QDF_STATUS
7126  */
7127 static QDF_STATUS dp_txrx_alloc_vdev_stats_id(struct cdp_soc_t *soc_hdl,
7128 					      uint8_t *vdev_stats_id)
7129 {
7130 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7131 	uint8_t id = 0;
7132 
7133 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
7134 		*vdev_stats_id = CDP_INVALID_VDEV_STATS_ID;
7135 		return QDF_STATUS_E_FAILURE;
7136 	}
7137 
7138 	while (id < CDP_MAX_VDEV_STATS_ID) {
7139 		if (!qdf_atomic_test_and_set_bit(id, &soc->vdev_stats_id_map)) {
7140 			*vdev_stats_id = id;
7141 			return QDF_STATUS_SUCCESS;
7142 		}
7143 		id++;
7144 	}
7145 
7146 	*vdev_stats_id = CDP_INVALID_VDEV_STATS_ID;
7147 	return QDF_STATUS_E_FAILURE;
7148 }
7149 
7150 /*
7151  * dp_txrx_reset_vdev_stats_id() - Reset vdev_stats_id in dp_soc
7152  * @soc_hdl: Datapath soc handle
7153  * @vdev_stats_id: vdev_stats_id to reset in dp_soc
7154  *
7155  * Return: none
7156  */
7157 static void dp_txrx_reset_vdev_stats_id(struct cdp_soc_t *soc_hdl,
7158 					uint8_t vdev_stats_id)
7159 {
7160 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7161 
7162 	if ((!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) ||
7163 	    (vdev_stats_id >= CDP_MAX_VDEV_STATS_ID))
7164 		return;
7165 
7166 	qdf_atomic_clear_bit(vdev_stats_id, &soc->vdev_stats_id_map);
7167 }
7168 #else
7169 static void dp_txrx_reset_vdev_stats_id(struct cdp_soc_t *soc,
7170 					uint8_t vdev_stats_id)
7171 {}
7172 #endif
7173 /*
7174  * dp_vdev_detach_wifi3() - Detach txrx vdev
7175  * @cdp_soc: Datapath soc handle
7176  * @vdev_id: VDEV Id
7177  * @callback: Callback OL_IF on completion of detach
7178  * @cb_context:	Callback context
7179  *
7180  */
7181 static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc,
7182 				       uint8_t vdev_id,
7183 				       ol_txrx_vdev_delete_cb callback,
7184 				       void *cb_context)
7185 {
7186 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
7187 	struct dp_pdev *pdev;
7188 	struct dp_neighbour_peer *peer = NULL;
7189 	struct dp_peer *vap_self_peer = NULL;
7190 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7191 						     DP_MOD_ID_CDP);
7192 
7193 	if (!vdev)
7194 		return QDF_STATUS_E_FAILURE;
7195 
7196 	soc->arch_ops.txrx_vdev_detach(soc, vdev);
7197 
7198 	pdev = vdev->pdev;
7199 
7200 	vap_self_peer = dp_sta_vdev_self_peer_ref_n_get(soc, vdev,
7201 							DP_MOD_ID_CONFIG);
7202 	if (vap_self_peer) {
7203 		qdf_spin_lock_bh(&soc->ast_lock);
7204 		if (vap_self_peer->self_ast_entry) {
7205 			dp_peer_del_ast(soc, vap_self_peer->self_ast_entry);
7206 			vap_self_peer->self_ast_entry = NULL;
7207 		}
7208 		qdf_spin_unlock_bh(&soc->ast_lock);
7209 
7210 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc, vdev->vdev_id,
7211 				     vap_self_peer->mac_addr.raw, 0,
7212 				     CDP_LINK_PEER_TYPE);
7213 		dp_peer_unref_delete(vap_self_peer, DP_MOD_ID_CONFIG);
7214 	}
7215 
7216 	/*
7217 	 * If Target is hung, flush all peers before detaching vdev
7218 	 * this will free all references held due to missing
7219 	 * unmap commands from Target
7220 	 */
7221 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
7222 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, false, false);
7223 	else if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
7224 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, true, false);
7225 
7226 	/* indicate that the vdev needs to be deleted */
7227 	vdev->delete.pending = 1;
7228 	dp_rx_vdev_detach(vdev);
7229 	/*
7230 	 * move it after dp_rx_vdev_detach(),
7231 	 * as the call back done in dp_rx_vdev_detach()
7232 	 * still need to get vdev pointer by vdev_id.
7233 	 */
7234 	dp_vdev_id_map_tbl_remove(soc, vdev);
7235 
7236 	dp_monitor_neighbour_peer_list_remove(pdev, vdev, peer);
7237 
7238 	dp_txrx_reset_vdev_stats_id(cdp_soc, vdev->vdev_stats_id);
7239 
7240 	dp_tx_vdev_multipass_deinit(vdev);
7241 
7242 	if (vdev->vdev_dp_ext_handle) {
7243 		qdf_mem_free(vdev->vdev_dp_ext_handle);
7244 		vdev->vdev_dp_ext_handle = NULL;
7245 	}
7246 	vdev->delete.callback = callback;
7247 	vdev->delete.context = cb_context;
7248 
7249 	if (vdev->opmode != wlan_op_mode_monitor)
7250 		dp_vdev_pdev_list_remove(soc, pdev, vdev);
7251 
7252 	pdev->vdev_count--;
7253 	/* release reference taken above for find */
7254 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7255 
7256 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
7257 	TAILQ_INSERT_TAIL(&soc->inactive_vdev_list, vdev, inactive_list_elem);
7258 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
7259 
7260 	/* release reference taken at dp_vdev_create */
7261 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
7262 
7263 	return QDF_STATUS_SUCCESS;
7264 }
7265 
7266 #ifdef WLAN_FEATURE_11BE_MLO
7267 /**
7268  * is_dp_peer_can_reuse() - check if the dp_peer match condition to be reused
7269  * @vdev: Target DP vdev handle
7270  * @peer: DP peer handle to be checked
7271  * @peer_mac_addr: Target peer mac address
7272  * @peer_type: Target peer type
7273  *
7274  * Return: true - if match, false - not match
7275  */
7276 static inline
7277 bool is_dp_peer_can_reuse(struct dp_vdev *vdev,
7278 			  struct dp_peer *peer,
7279 			  uint8_t *peer_mac_addr,
7280 			  enum cdp_peer_type peer_type)
7281 {
7282 	if (peer->bss_peer && (peer->vdev == vdev) &&
7283 	    (peer->peer_type == peer_type) &&
7284 	    (qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw,
7285 			 QDF_MAC_ADDR_SIZE) == 0))
7286 		return true;
7287 
7288 	return false;
7289 }
7290 #else
7291 static inline
7292 bool is_dp_peer_can_reuse(struct dp_vdev *vdev,
7293 			  struct dp_peer *peer,
7294 			  uint8_t *peer_mac_addr,
7295 			  enum cdp_peer_type peer_type)
7296 {
7297 	if (peer->bss_peer && (peer->vdev == vdev) &&
7298 	    (qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw,
7299 			 QDF_MAC_ADDR_SIZE) == 0))
7300 		return true;
7301 
7302 	return false;
7303 }
7304 #endif
7305 
7306 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
7307 						uint8_t *peer_mac_addr,
7308 						enum cdp_peer_type peer_type)
7309 {
7310 	struct dp_peer *peer;
7311 	struct dp_soc *soc = vdev->pdev->soc;
7312 
7313 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
7314 	TAILQ_FOREACH(peer, &soc->inactive_peer_list,
7315 		      inactive_list_elem) {
7316 
7317 		/* reuse bss peer only when vdev matches*/
7318 		if (is_dp_peer_can_reuse(vdev, peer,
7319 					 peer_mac_addr, peer_type)) {
7320 			/* increment ref count for cdp_peer_create*/
7321 			if (dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG) ==
7322 						QDF_STATUS_SUCCESS) {
7323 				TAILQ_REMOVE(&soc->inactive_peer_list, peer,
7324 					     inactive_list_elem);
7325 				qdf_spin_unlock_bh
7326 					(&soc->inactive_peer_list_lock);
7327 				return peer;
7328 			}
7329 		}
7330 	}
7331 
7332 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
7333 	return NULL;
7334 }
7335 
7336 #ifdef FEATURE_AST
7337 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
7338 					       struct dp_pdev *pdev,
7339 					       uint8_t *peer_mac_addr)
7340 {
7341 	struct dp_ast_entry *ast_entry;
7342 
7343 	if (soc->ast_offload_support)
7344 		return;
7345 
7346 	qdf_spin_lock_bh(&soc->ast_lock);
7347 	if (soc->ast_override_support)
7348 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
7349 							    pdev->pdev_id);
7350 	else
7351 		ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
7352 
7353 	if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress)
7354 		dp_peer_del_ast(soc, ast_entry);
7355 
7356 	qdf_spin_unlock_bh(&soc->ast_lock);
7357 }
7358 #else
7359 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
7360 					       struct dp_pdev *pdev,
7361 					       uint8_t *peer_mac_addr)
7362 {
7363 }
7364 #endif
7365 
7366 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
7367 /*
7368  * dp_peer_hw_txrx_stats_init() - Initialize hw_txrx_stats_en in dp_peer
7369  * @soc: Datapath soc handle
7370  * @peer: Datapath peer handle
7371  *
7372  * Return: none
7373  */
7374 static inline
7375 void dp_peer_hw_txrx_stats_init(struct dp_soc *soc,
7376 				struct dp_txrx_peer *txrx_peer)
7377 {
7378 	txrx_peer->hw_txrx_stats_en =
7379 		wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx);
7380 }
7381 #else
7382 static inline
7383 void dp_peer_hw_txrx_stats_init(struct dp_soc *soc,
7384 				struct dp_txrx_peer *txrx_peer)
7385 {
7386 	txrx_peer->hw_txrx_stats_en = 0;
7387 }
7388 #endif
7389 
7390 static QDF_STATUS dp_txrx_peer_detach(struct dp_soc *soc, struct dp_peer *peer)
7391 {
7392 	struct dp_txrx_peer *txrx_peer;
7393 	struct dp_pdev *pdev;
7394 
7395 	/* dp_txrx_peer exists for mld peer and legacy peer */
7396 	if (peer->txrx_peer) {
7397 		txrx_peer = peer->txrx_peer;
7398 		peer->txrx_peer = NULL;
7399 		pdev = txrx_peer->vdev->pdev;
7400 
7401 		dp_peer_defrag_rx_tids_deinit(txrx_peer);
7402 		/*
7403 		 * Deallocate the extended stats contenxt
7404 		 */
7405 		dp_peer_delay_stats_ctx_dealloc(soc, txrx_peer);
7406 		dp_peer_rx_bufq_resources_deinit(txrx_peer);
7407 		dp_peer_jitter_stats_ctx_dealloc(pdev, txrx_peer);
7408 		dp_peer_sawf_stats_ctx_free(soc, txrx_peer);
7409 
7410 		qdf_mem_free(txrx_peer);
7411 	}
7412 
7413 	return QDF_STATUS_SUCCESS;
7414 }
7415 
7416 static QDF_STATUS dp_txrx_peer_attach(struct dp_soc *soc, struct dp_peer *peer)
7417 {
7418 	struct dp_txrx_peer *txrx_peer;
7419 	struct dp_pdev *pdev;
7420 
7421 	txrx_peer = (struct dp_txrx_peer *)qdf_mem_malloc(sizeof(*txrx_peer));
7422 
7423 	if (!txrx_peer)
7424 		return QDF_STATUS_E_NOMEM; /* failure */
7425 
7426 	txrx_peer->peer_id = HTT_INVALID_PEER;
7427 	/* initialize the peer_id */
7428 	txrx_peer->vdev = peer->vdev;
7429 	pdev = peer->vdev->pdev;
7430 
7431 	DP_STATS_INIT(txrx_peer);
7432 
7433 	dp_wds_ext_peer_init(txrx_peer);
7434 	dp_peer_rx_bufq_resources_init(txrx_peer);
7435 	dp_peer_hw_txrx_stats_init(soc, txrx_peer);
7436 	/*
7437 	 * Allocate peer extended stats context. Fall through in
7438 	 * case of failure as its not an implicit requirement to have
7439 	 * this object for regular statistics updates.
7440 	 */
7441 	if (dp_peer_delay_stats_ctx_alloc(soc, txrx_peer) !=
7442 					  QDF_STATUS_SUCCESS)
7443 		dp_warn("peer delay_stats ctx alloc failed");
7444 
7445 	/*
7446 	 * Alloctate memory for jitter stats. Fall through in
7447 	 * case of failure as its not an implicit requirement to have
7448 	 * this object for regular statistics updates.
7449 	 */
7450 	if (dp_peer_jitter_stats_ctx_alloc(pdev, txrx_peer) !=
7451 					   QDF_STATUS_SUCCESS)
7452 		dp_warn("peer jitter_stats ctx alloc failed");
7453 
7454 	dp_set_peer_isolation(txrx_peer, false);
7455 
7456 	dp_peer_defrag_rx_tids_init(txrx_peer);
7457 
7458 	if (dp_peer_sawf_stats_ctx_alloc(soc, txrx_peer) != QDF_STATUS_SUCCESS)
7459 		dp_warn("peer sawf stats alloc failed");
7460 
7461 	dp_txrx_peer_attach_add(soc, peer, txrx_peer);
7462 
7463 	return QDF_STATUS_SUCCESS;
7464 }
7465 
7466 static inline
7467 void dp_txrx_peer_stats_clr(struct dp_txrx_peer *txrx_peer)
7468 {
7469 	if (!txrx_peer)
7470 		return;
7471 
7472 	txrx_peer->tx_failed = 0;
7473 	txrx_peer->comp_pkt.num = 0;
7474 	txrx_peer->comp_pkt.bytes = 0;
7475 	txrx_peer->to_stack.num = 0;
7476 	txrx_peer->to_stack.bytes = 0;
7477 
7478 	DP_STATS_CLR(txrx_peer);
7479 	dp_peer_delay_stats_ctx_clr(txrx_peer);
7480 	dp_peer_jitter_stats_ctx_clr(txrx_peer);
7481 }
7482 
7483 /*
7484  * dp_peer_create_wifi3() - attach txrx peer
7485  * @soc_hdl: Datapath soc handle
7486  * @vdev_id: id of vdev
7487  * @peer_mac_addr: Peer MAC address
7488  * @peer_type: link or MLD peer type
7489  *
7490  * Return: 0 on success, -1 on failure
7491  */
7492 static QDF_STATUS
7493 dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
7494 		     uint8_t *peer_mac_addr, enum cdp_peer_type peer_type)
7495 {
7496 	struct dp_peer *peer;
7497 	int i;
7498 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7499 	struct dp_pdev *pdev;
7500 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
7501 	struct dp_vdev *vdev = NULL;
7502 
7503 	if (!peer_mac_addr)
7504 		return QDF_STATUS_E_FAILURE;
7505 
7506 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
7507 
7508 	if (!vdev)
7509 		return QDF_STATUS_E_FAILURE;
7510 
7511 	pdev = vdev->pdev;
7512 	soc = pdev->soc;
7513 
7514 	/*
7515 	 * If a peer entry with given MAC address already exists,
7516 	 * reuse the peer and reset the state of peer.
7517 	 */
7518 	peer = dp_peer_can_reuse(vdev, peer_mac_addr, peer_type);
7519 
7520 	if (peer) {
7521 		qdf_atomic_init(&peer->is_default_route_set);
7522 		dp_peer_cleanup(vdev, peer);
7523 
7524 		dp_peer_vdev_list_add(soc, vdev, peer);
7525 		dp_peer_find_hash_add(soc, peer);
7526 
7527 		dp_peer_rx_tids_create(peer);
7528 		if (IS_MLO_DP_MLD_PEER(peer))
7529 			dp_mld_peer_init_link_peers_info(peer);
7530 
7531 		qdf_spin_lock_bh(&soc->ast_lock);
7532 		dp_peer_delete_ast_entries(soc, peer);
7533 		qdf_spin_unlock_bh(&soc->ast_lock);
7534 
7535 		if ((vdev->opmode == wlan_op_mode_sta) &&
7536 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
7537 		     QDF_MAC_ADDR_SIZE)) {
7538 			ast_type = CDP_TXRX_AST_TYPE_SELF;
7539 		}
7540 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
7541 
7542 		peer->valid = 1;
7543 		peer->is_tdls_peer = false;
7544 		dp_local_peer_id_alloc(pdev, peer);
7545 
7546 		qdf_spinlock_create(&peer->peer_info_lock);
7547 
7548 		DP_STATS_INIT(peer);
7549 
7550 		/*
7551 		 * In tx_monitor mode, filter may be set for unassociated peer
7552 		 * when unassociated peer get associated peer need to
7553 		 * update tx_cap_enabled flag to support peer filter.
7554 		 */
7555 		if (!IS_MLO_DP_MLD_PEER(peer)) {
7556 			dp_monitor_peer_tx_capture_filter_check(pdev, peer);
7557 			dp_monitor_peer_reset_stats(soc, peer);
7558 		}
7559 
7560 		if (peer->txrx_peer) {
7561 			dp_peer_rx_bufq_resources_init(peer->txrx_peer);
7562 			dp_txrx_peer_stats_clr(peer->txrx_peer);
7563 			dp_set_peer_isolation(peer->txrx_peer, false);
7564 			dp_wds_ext_peer_init(peer->txrx_peer);
7565 			dp_peer_hw_txrx_stats_init(soc, peer->txrx_peer);
7566 		}
7567 
7568 		dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
7569 
7570 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7571 		return QDF_STATUS_SUCCESS;
7572 	} else {
7573 		/*
7574 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
7575 		 * need to remove the AST entry which was earlier added as a WDS
7576 		 * entry.
7577 		 * If an AST entry exists, but no peer entry exists with a given
7578 		 * MAC addresses, we could deduce it as a WDS entry
7579 		 */
7580 		dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
7581 	}
7582 
7583 #ifdef notyet
7584 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
7585 		soc->mempool_ol_ath_peer);
7586 #else
7587 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
7588 #endif
7589 	wlan_minidump_log(peer,
7590 			  sizeof(*peer),
7591 			  soc->ctrl_psoc,
7592 			  WLAN_MD_DP_PEER, "dp_peer");
7593 	if (!peer) {
7594 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7595 		return QDF_STATUS_E_FAILURE; /* failure */
7596 	}
7597 
7598 	qdf_mem_zero(peer, sizeof(struct dp_peer));
7599 
7600 	/* store provided params */
7601 	peer->vdev = vdev;
7602 
7603 	/* initialize the peer_id */
7604 	peer->peer_id = HTT_INVALID_PEER;
7605 
7606 	qdf_mem_copy(
7607 		&peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
7608 
7609 	DP_PEER_SET_TYPE(peer, peer_type);
7610 	if (IS_MLO_DP_MLD_PEER(peer)) {
7611 		if (dp_txrx_peer_attach(soc, peer) !=
7612 				QDF_STATUS_SUCCESS)
7613 			goto fail; /* failure */
7614 
7615 		dp_mld_peer_init_link_peers_info(peer);
7616 	} else if (dp_monitor_peer_attach(soc, peer) !=
7617 				QDF_STATUS_SUCCESS)
7618 		dp_warn("peer monitor ctx alloc failed");
7619 
7620 	TAILQ_INIT(&peer->ast_entry_list);
7621 
7622 	/* get the vdev reference for new peer */
7623 	dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CHILD);
7624 
7625 	if ((vdev->opmode == wlan_op_mode_sta) &&
7626 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
7627 			 QDF_MAC_ADDR_SIZE)) {
7628 		ast_type = CDP_TXRX_AST_TYPE_SELF;
7629 	}
7630 	qdf_spinlock_create(&peer->peer_state_lock);
7631 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
7632 	qdf_spinlock_create(&peer->peer_info_lock);
7633 
7634 	/* reset the ast index to flowid table */
7635 	dp_peer_reset_flowq_map(peer);
7636 
7637 	qdf_atomic_init(&peer->ref_cnt);
7638 
7639 	for (i = 0; i < DP_MOD_ID_MAX; i++)
7640 		qdf_atomic_init(&peer->mod_refs[i]);
7641 
7642 	/* keep one reference for attach */
7643 	qdf_atomic_inc(&peer->ref_cnt);
7644 	qdf_atomic_inc(&peer->mod_refs[DP_MOD_ID_CONFIG]);
7645 
7646 	dp_peer_vdev_list_add(soc, vdev, peer);
7647 
7648 	/* TODO: See if hash based search is required */
7649 	dp_peer_find_hash_add(soc, peer);
7650 
7651 	/* Initialize the peer state */
7652 	peer->state = OL_TXRX_PEER_STATE_DISC;
7653 
7654 	dp_info("vdev %pK created peer %pK ("QDF_MAC_ADDR_FMT") ref_cnt: %d",
7655 		vdev, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
7656 		qdf_atomic_read(&peer->ref_cnt));
7657 	/*
7658 	 * For every peer MAp message search and set if bss_peer
7659 	 */
7660 	if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
7661 			QDF_MAC_ADDR_SIZE) == 0 &&
7662 			(wlan_op_mode_sta != vdev->opmode)) {
7663 		dp_info("vdev bss_peer!!");
7664 		peer->bss_peer = 1;
7665 		if (peer->txrx_peer)
7666 			peer->txrx_peer->bss_peer = 1;
7667 	}
7668 
7669 	if (wlan_op_mode_sta == vdev->opmode &&
7670 	    qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
7671 			QDF_MAC_ADDR_SIZE) == 0) {
7672 		peer->sta_self_peer = 1;
7673 	}
7674 
7675 	dp_peer_rx_tids_create(peer);
7676 
7677 	peer->valid = 1;
7678 	dp_local_peer_id_alloc(pdev, peer);
7679 	DP_STATS_INIT(peer);
7680 
7681 	if (dp_peer_sawf_ctx_alloc(soc, peer) != QDF_STATUS_SUCCESS)
7682 		dp_warn("peer sawf context alloc failed");
7683 
7684 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
7685 
7686 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7687 
7688 	return QDF_STATUS_SUCCESS;
7689 fail:
7690 	qdf_mem_free(peer);
7691 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7692 
7693 	return QDF_STATUS_E_FAILURE;
7694 }
7695 
7696 static QDF_STATUS dp_peer_legacy_setup(struct dp_soc *soc, struct dp_peer *peer)
7697 {
7698 	/* txrx_peer might exist already in peer reuse case */
7699 	if (peer->txrx_peer)
7700 		return QDF_STATUS_SUCCESS;
7701 
7702 	if (dp_txrx_peer_attach(soc, peer) !=
7703 				QDF_STATUS_SUCCESS) {
7704 		dp_err("peer txrx ctx alloc failed");
7705 		return QDF_STATUS_E_FAILURE;
7706 	}
7707 
7708 	return QDF_STATUS_SUCCESS;
7709 }
7710 
7711 #ifdef WLAN_FEATURE_11BE_MLO
7712 QDF_STATUS dp_peer_mlo_setup(
7713 			struct dp_soc *soc,
7714 			struct dp_peer *peer,
7715 			uint8_t vdev_id,
7716 			struct cdp_peer_setup_info *setup_info)
7717 {
7718 	struct dp_peer *mld_peer = NULL;
7719 
7720 	/* Non-MLO connection, do nothing */
7721 	if (!setup_info || !setup_info->mld_peer_mac)
7722 		return QDF_STATUS_SUCCESS;
7723 
7724 	dp_info("link peer:" QDF_MAC_ADDR_FMT "mld peer:" QDF_MAC_ADDR_FMT
7725 		"assoc_link %d, primary_link %d",
7726 		QDF_MAC_ADDR_REF(peer->mac_addr.raw),
7727 		QDF_MAC_ADDR_REF(setup_info->mld_peer_mac),
7728 		setup_info->is_first_link,
7729 		setup_info->is_primary_link);
7730 
7731 	/* if this is the first link peer */
7732 	if (setup_info->is_first_link)
7733 		/* create MLD peer */
7734 		dp_peer_create_wifi3((struct cdp_soc_t *)soc,
7735 				     vdev_id,
7736 				     setup_info->mld_peer_mac,
7737 				     CDP_MLD_PEER_TYPE);
7738 
7739 	peer->first_link = setup_info->is_first_link;
7740 	peer->primary_link = setup_info->is_primary_link;
7741 	mld_peer = dp_mld_peer_find_hash_find(soc,
7742 					      setup_info->mld_peer_mac,
7743 					      0, vdev_id, DP_MOD_ID_CDP);
7744 	if (mld_peer) {
7745 		if (setup_info->is_first_link) {
7746 			/* assign rx_tid to mld peer */
7747 			mld_peer->rx_tid = peer->rx_tid;
7748 			/* no cdp_peer_setup for MLD peer,
7749 			 * set it for addba processing
7750 			 */
7751 			qdf_atomic_set(&mld_peer->is_default_route_set, 1);
7752 		} else {
7753 			/* free link peer origial rx_tids mem */
7754 			dp_peer_rx_tids_destroy(peer);
7755 			/* assign mld peer rx_tid to link peer */
7756 			peer->rx_tid = mld_peer->rx_tid;
7757 		}
7758 
7759 		if (setup_info->is_primary_link &&
7760 		    !setup_info->is_first_link) {
7761 			/*
7762 			 * if first link is not the primary link,
7763 			 * then need to change mld_peer->vdev as
7764 			 * primary link dp_vdev is not same one
7765 			 * during mld peer creation.
7766 			 */
7767 
7768 			/* relase the ref to original dp_vdev */
7769 			dp_vdev_unref_delete(soc, mld_peer->vdev,
7770 					     DP_MOD_ID_CHILD);
7771 			/*
7772 			 * get the ref to new dp_vdev,
7773 			 * increase dp_vdev ref_cnt
7774 			 */
7775 			mld_peer->vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7776 							       DP_MOD_ID_CHILD);
7777 			mld_peer->txrx_peer->vdev = mld_peer->vdev;
7778 		}
7779 
7780 		/* associate mld and link peer */
7781 		dp_link_peer_add_mld_peer(peer, mld_peer);
7782 		dp_mld_peer_add_link_peer(mld_peer, peer);
7783 
7784 		mld_peer->txrx_peer->mld_peer = 1;
7785 		dp_peer_unref_delete(mld_peer, DP_MOD_ID_CDP);
7786 	} else {
7787 		peer->mld_peer = NULL;
7788 		dp_err("mld peer" QDF_MAC_ADDR_FMT "not found!",
7789 		       QDF_MAC_ADDR_REF(setup_info->mld_peer_mac));
7790 		return QDF_STATUS_E_FAILURE;
7791 	}
7792 
7793 	return QDF_STATUS_SUCCESS;
7794 }
7795 
7796 /*
7797  * dp_mlo_peer_authorize() - authorize MLO peer
7798  * @soc: soc handle
7799  * @peer: pointer to link peer
7800  *
7801  * return void
7802  */
7803 static void dp_mlo_peer_authorize(struct dp_soc *soc,
7804 				  struct dp_peer *peer)
7805 {
7806 	int i;
7807 	struct dp_peer *link_peer = NULL;
7808 	struct dp_peer *mld_peer = peer->mld_peer;
7809 	struct dp_mld_link_peers link_peers_info;
7810 
7811 	if (!mld_peer)
7812 		return;
7813 
7814 	/* get link peers with reference */
7815 	dp_get_link_peers_ref_from_mld_peer(soc, mld_peer,
7816 					    &link_peers_info,
7817 					    DP_MOD_ID_CDP);
7818 
7819 	for (i = 0; i < link_peers_info.num_links; i++) {
7820 		link_peer = link_peers_info.link_peers[i];
7821 
7822 		if (!link_peer->authorize) {
7823 			dp_release_link_peers_ref(&link_peers_info,
7824 						  DP_MOD_ID_CDP);
7825 			mld_peer->authorize = false;
7826 			return;
7827 		}
7828 	}
7829 
7830 	/* if we are here all link peers are authorized,
7831 	 * authorize ml_peer also
7832 	 */
7833 	mld_peer->authorize = true;
7834 
7835 	/* release link peers reference */
7836 	dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
7837 }
7838 #endif
7839 
7840 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
7841 				  enum cdp_host_reo_dest_ring *reo_dest,
7842 				  bool *hash_based)
7843 {
7844 	struct dp_soc *soc;
7845 	struct dp_pdev *pdev;
7846 
7847 	pdev = vdev->pdev;
7848 	soc = pdev->soc;
7849 	/*
7850 	 * hash based steering is disabled for Radios which are offloaded
7851 	 * to NSS
7852 	 */
7853 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
7854 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
7855 
7856 	/*
7857 	 * Below line of code will ensure the proper reo_dest ring is chosen
7858 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
7859 	 */
7860 	*reo_dest = pdev->reo_dest;
7861 }
7862 
7863 #ifdef IPA_OFFLOAD
7864 /**
7865  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
7866  * @vdev: Virtual device
7867  *
7868  * Return: true if the vdev is of subtype P2P
7869  *	   false if the vdev is of any other subtype
7870  */
7871 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
7872 {
7873 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
7874 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
7875 	    vdev->subtype == wlan_op_subtype_p2p_go)
7876 		return true;
7877 
7878 	return false;
7879 }
7880 
7881 /*
7882  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
7883  * @vdev: Datapath VDEV handle
7884  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
7885  * @hash_based: pointer to hash value (enabled/disabled) to be populated
7886  *
7887  * If IPA is enabled in ini, for SAP mode, disable hash based
7888  * steering, use default reo_dst ring for RX. Use config values for other modes.
7889  * Return: None
7890  */
7891 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
7892 				       enum cdp_host_reo_dest_ring *reo_dest,
7893 				       bool *hash_based)
7894 {
7895 	struct dp_soc *soc;
7896 	struct dp_pdev *pdev;
7897 
7898 	pdev = vdev->pdev;
7899 	soc = pdev->soc;
7900 
7901 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
7902 
7903 	/* For P2P-GO interfaces we do not need to change the REO
7904 	 * configuration even if IPA config is enabled
7905 	 */
7906 	if (dp_is_vdev_subtype_p2p(vdev))
7907 		return;
7908 
7909 	/*
7910 	 * If IPA is enabled, disable hash-based flow steering and set
7911 	 * reo_dest_ring_4 as the REO ring to receive packets on.
7912 	 * IPA is configured to reap reo_dest_ring_4.
7913 	 *
7914 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
7915 	 * value enum value is from 1 - 4.
7916 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
7917 	 */
7918 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
7919 		if (vdev->opmode == wlan_op_mode_ap) {
7920 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
7921 			*hash_based = 0;
7922 		} else if (vdev->opmode == wlan_op_mode_sta &&
7923 			   dp_ipa_is_mdm_platform()) {
7924 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
7925 		}
7926 	}
7927 }
7928 
7929 #else
7930 
7931 /*
7932  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
7933  * @vdev: Datapath VDEV handle
7934  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
7935  * @hash_based: pointer to hash value (enabled/disabled) to be populated
7936  *
7937  * Use system config values for hash based steering.
7938  * Return: None
7939  */
7940 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
7941 				       enum cdp_host_reo_dest_ring *reo_dest,
7942 				       bool *hash_based)
7943 {
7944 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
7945 }
7946 #endif /* IPA_OFFLOAD */
7947 
7948 /*
7949  * dp_peer_setup_wifi3() - initialize the peer
7950  * @soc_hdl: soc handle object
7951  * @vdev_id : vdev_id of vdev object
7952  * @peer_mac: Peer's mac address
7953  * @peer_setup_info: peer setup info for MLO
7954  *
7955  * Return: QDF_STATUS
7956  */
7957 static QDF_STATUS
7958 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
7959 		    uint8_t *peer_mac,
7960 		    struct cdp_peer_setup_info *setup_info)
7961 {
7962 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7963 	struct dp_pdev *pdev;
7964 	bool hash_based = 0;
7965 	enum cdp_host_reo_dest_ring reo_dest;
7966 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7967 	struct dp_vdev *vdev = NULL;
7968 	struct dp_peer *peer =
7969 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
7970 					       DP_MOD_ID_CDP);
7971 	struct dp_peer *mld_peer = NULL;
7972 	enum wlan_op_mode vdev_opmode;
7973 	uint8_t lmac_peer_id_msb = 0;
7974 
7975 	if (!peer)
7976 		return QDF_STATUS_E_FAILURE;
7977 
7978 	vdev = peer->vdev;
7979 	if (!vdev) {
7980 		status = QDF_STATUS_E_FAILURE;
7981 		goto fail;
7982 	}
7983 
7984 	/* save vdev related member in case vdev freed */
7985 	vdev_opmode = vdev->opmode;
7986 	pdev = vdev->pdev;
7987 	dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
7988 
7989 	dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
7990 		pdev->pdev_id, vdev->vdev_id,
7991 		vdev->opmode, hash_based, reo_dest);
7992 
7993 	/*
7994 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
7995 	 * i.e both the devices have same MAC address. In these
7996 	 * cases we want such pkts to be processed in NULL Q handler
7997 	 * which is REO2TCL ring. for this reason we should
7998 	 * not setup reo_queues and default route for bss_peer.
7999 	 */
8000 	if (!IS_MLO_DP_MLD_PEER(peer))
8001 		dp_monitor_peer_tx_init(pdev, peer);
8002 
8003 	if (!setup_info)
8004 		if (dp_peer_legacy_setup(soc, peer) !=
8005 				QDF_STATUS_SUCCESS) {
8006 			status = QDF_STATUS_E_RESOURCES;
8007 			goto fail;
8008 		}
8009 
8010 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
8011 		status = QDF_STATUS_E_FAILURE;
8012 		goto fail;
8013 	}
8014 
8015 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
8016 		/* TODO: Check the destination ring number to be passed to FW */
8017 		soc->cdp_soc.ol_ops->peer_set_default_routing(
8018 				soc->ctrl_psoc,
8019 				peer->vdev->pdev->pdev_id,
8020 				peer->mac_addr.raw,
8021 				peer->vdev->vdev_id, hash_based, reo_dest,
8022 				lmac_peer_id_msb);
8023 	}
8024 
8025 	qdf_atomic_set(&peer->is_default_route_set, 1);
8026 
8027 	status = dp_peer_mlo_setup(soc, peer, vdev->vdev_id, setup_info);
8028 	if (QDF_IS_STATUS_ERROR(status)) {
8029 		dp_peer_err("peer mlo setup failed");
8030 		qdf_assert_always(0);
8031 	}
8032 
8033 	if (vdev_opmode != wlan_op_mode_monitor) {
8034 		/* In case of MLD peer, switch peer to mld peer and
8035 		 * do peer_rx_init.
8036 		 */
8037 		if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
8038 		    IS_MLO_DP_LINK_PEER(peer)) {
8039 			if (setup_info && setup_info->is_first_link) {
8040 				mld_peer = DP_GET_MLD_PEER_FROM_PEER(peer);
8041 				if (mld_peer)
8042 					dp_peer_rx_init(pdev, mld_peer);
8043 				else
8044 					dp_peer_err("MLD peer null. Primary link peer:%pK", peer);
8045 			}
8046 		} else {
8047 			dp_peer_rx_init(pdev, peer);
8048 		}
8049 	}
8050 
8051 	if (!IS_MLO_DP_MLD_PEER(peer))
8052 		dp_peer_ppdu_delayed_ba_init(peer);
8053 
8054 fail:
8055 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8056 	return status;
8057 }
8058 
8059 /*
8060  * dp_cp_peer_del_resp_handler - Handle the peer delete response
8061  * @soc_hdl: Datapath SOC handle
8062  * @vdev_id: id of virtual device object
8063  * @mac_addr: Mac address of the peer
8064  *
8065  * Return: QDF_STATUS
8066  */
8067 static QDF_STATUS dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl,
8068 					      uint8_t vdev_id,
8069 					      uint8_t *mac_addr)
8070 {
8071 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8072 	struct dp_ast_entry  *ast_entry = NULL;
8073 	txrx_ast_free_cb cb = NULL;
8074 	void *cookie;
8075 
8076 	if (soc->ast_offload_support)
8077 		return QDF_STATUS_E_INVAL;
8078 
8079 	qdf_spin_lock_bh(&soc->ast_lock);
8080 
8081 	ast_entry =
8082 		dp_peer_ast_hash_find_by_vdevid(soc, mac_addr,
8083 						vdev_id);
8084 
8085 	/* in case of qwrap we have multiple BSS peers
8086 	 * with same mac address
8087 	 *
8088 	 * AST entry for this mac address will be created
8089 	 * only for one peer hence it will be NULL here
8090 	 */
8091 	if ((!ast_entry || !ast_entry->delete_in_progress) ||
8092 	    (ast_entry->peer_id != HTT_INVALID_PEER)) {
8093 		qdf_spin_unlock_bh(&soc->ast_lock);
8094 		return QDF_STATUS_E_FAILURE;
8095 	}
8096 
8097 	if (ast_entry->is_mapped)
8098 		soc->ast_table[ast_entry->ast_idx] = NULL;
8099 
8100 	DP_STATS_INC(soc, ast.deleted, 1);
8101 	dp_peer_ast_hash_remove(soc, ast_entry);
8102 
8103 	cb = ast_entry->callback;
8104 	cookie = ast_entry->cookie;
8105 	ast_entry->callback = NULL;
8106 	ast_entry->cookie = NULL;
8107 
8108 	soc->num_ast_entries--;
8109 	qdf_spin_unlock_bh(&soc->ast_lock);
8110 
8111 	if (cb) {
8112 		cb(soc->ctrl_psoc,
8113 		   dp_soc_to_cdp_soc(soc),
8114 		   cookie,
8115 		   CDP_TXRX_AST_DELETED);
8116 	}
8117 	qdf_mem_free(ast_entry);
8118 
8119 	return QDF_STATUS_SUCCESS;
8120 }
8121 
8122 /*
8123  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
8124  * @txrx_soc: cdp soc handle
8125  * @ac: Access category
8126  * @value: timeout value in millisec
8127  *
8128  * Return: void
8129  */
8130 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
8131 				    uint8_t ac, uint32_t value)
8132 {
8133 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
8134 
8135 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
8136 }
8137 
8138 /*
8139  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
8140  * @txrx_soc: cdp soc handle
8141  * @ac: access category
8142  * @value: timeout value in millisec
8143  *
8144  * Return: void
8145  */
8146 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
8147 				    uint8_t ac, uint32_t *value)
8148 {
8149 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
8150 
8151 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
8152 }
8153 
8154 /*
8155  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
8156  * @txrx_soc: cdp soc handle
8157  * @pdev_id: id of physical device object
8158  * @val: reo destination ring index (1 - 4)
8159  *
8160  * Return: QDF_STATUS
8161  */
8162 static QDF_STATUS
8163 dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
8164 		     enum cdp_host_reo_dest_ring val)
8165 {
8166 	struct dp_pdev *pdev =
8167 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
8168 						   pdev_id);
8169 
8170 	if (pdev) {
8171 		pdev->reo_dest = val;
8172 		return QDF_STATUS_SUCCESS;
8173 	}
8174 
8175 	return QDF_STATUS_E_FAILURE;
8176 }
8177 
8178 /*
8179  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
8180  * @txrx_soc: cdp soc handle
8181  * @pdev_id: id of physical device object
8182  *
8183  * Return: reo destination ring index
8184  */
8185 static enum cdp_host_reo_dest_ring
8186 dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id)
8187 {
8188 	struct dp_pdev *pdev =
8189 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
8190 						   pdev_id);
8191 
8192 	if (pdev)
8193 		return pdev->reo_dest;
8194 	else
8195 		return cdp_host_reo_dest_ring_unknown;
8196 }
8197 
8198 #ifdef WLAN_SUPPORT_MSCS
8199 /*
8200  * dp_record_mscs_params - MSCS parameters sent by the STA in
8201  * the MSCS Request to the AP. The AP makes a note of these
8202  * parameters while comparing the MSDUs sent by the STA, to
8203  * send the downlink traffic with correct User priority.
8204  * @soc - Datapath soc handle
8205  * @peer_mac - STA Mac address
8206  * @vdev_id - ID of the vdev handle
8207  * @mscs_params - Structure having MSCS parameters obtained
8208  * from handshake
8209  * @active - Flag to set MSCS active/inactive
8210  * return type - QDF_STATUS - Success/Invalid
8211  */
8212 static QDF_STATUS
8213 dp_record_mscs_params(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
8214 		      uint8_t vdev_id, struct cdp_mscs_params *mscs_params,
8215 		      bool active)
8216 {
8217 	struct dp_peer *peer;
8218 	QDF_STATUS status = QDF_STATUS_E_INVAL;
8219 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8220 
8221 	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
8222 				      DP_MOD_ID_CDP);
8223 
8224 	if (!peer) {
8225 		dp_err("Peer is NULL!");
8226 		goto fail;
8227 	}
8228 	if (!active) {
8229 		dp_info("MSCS Procedure is terminated");
8230 		peer->mscs_active = active;
8231 		goto fail;
8232 	}
8233 
8234 	if (mscs_params->classifier_type == IEEE80211_TCLAS_MASK_CLA_TYPE_4) {
8235 		/* Populate entries inside IPV4 database first */
8236 		peer->mscs_ipv4_parameter.user_priority_bitmap =
8237 			mscs_params->user_pri_bitmap;
8238 		peer->mscs_ipv4_parameter.user_priority_limit =
8239 			mscs_params->user_pri_limit;
8240 		peer->mscs_ipv4_parameter.classifier_mask =
8241 			mscs_params->classifier_mask;
8242 
8243 		/* Populate entries inside IPV6 database */
8244 		peer->mscs_ipv6_parameter.user_priority_bitmap =
8245 			mscs_params->user_pri_bitmap;
8246 		peer->mscs_ipv6_parameter.user_priority_limit =
8247 			mscs_params->user_pri_limit;
8248 		peer->mscs_ipv6_parameter.classifier_mask =
8249 			mscs_params->classifier_mask;
8250 		peer->mscs_active = 1;
8251 		dp_info("\n\tMSCS Procedure request based parameters for "QDF_MAC_ADDR_FMT"\n"
8252 			"\tClassifier_type = %d\tUser priority bitmap = %x\n"
8253 			"\tUser priority limit = %x\tClassifier mask = %x",
8254 			QDF_MAC_ADDR_REF(peer_mac),
8255 			mscs_params->classifier_type,
8256 			peer->mscs_ipv4_parameter.user_priority_bitmap,
8257 			peer->mscs_ipv4_parameter.user_priority_limit,
8258 			peer->mscs_ipv4_parameter.classifier_mask);
8259 	}
8260 
8261 	status = QDF_STATUS_SUCCESS;
8262 fail:
8263 	if (peer)
8264 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8265 	return status;
8266 }
8267 #endif
8268 
8269 /*
8270  * dp_get_sec_type() - Get the security type
8271  * @soc: soc handle
8272  * @vdev_id: id of dp handle
8273  * @peer_mac: mac of datapath PEER handle
8274  * @sec_idx:    Security id (mcast, ucast)
8275  *
8276  * return sec_type: Security type
8277  */
8278 static int dp_get_sec_type(struct cdp_soc_t *soc, uint8_t vdev_id,
8279 			   uint8_t *peer_mac, uint8_t sec_idx)
8280 {
8281 	int sec_type = 0;
8282 	struct dp_peer *peer =
8283 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
8284 						       peer_mac, 0, vdev_id,
8285 						       DP_MOD_ID_CDP);
8286 
8287 	if (!peer) {
8288 		dp_cdp_err("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
8289 		return sec_type;
8290 	}
8291 
8292 	if (!peer->txrx_peer) {
8293 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8294 		dp_peer_debug("%pK: txrx peer is NULL!\n", soc);
8295 		return sec_type;
8296 	}
8297 	sec_type = peer->txrx_peer->security[sec_idx].sec_type;
8298 
8299 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8300 	return sec_type;
8301 }
8302 
8303 /*
8304  * dp_peer_authorize() - authorize txrx peer
8305  * @soc: soc handle
8306  * @vdev_id: id of dp handle
8307  * @peer_mac: mac of datapath PEER handle
8308  * @authorize
8309  *
8310  */
8311 static QDF_STATUS
8312 dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8313 		  uint8_t *peer_mac, uint32_t authorize)
8314 {
8315 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8316 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8317 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(soc, peer_mac,
8318 							      0, vdev_id,
8319 							      DP_MOD_ID_CDP);
8320 
8321 	if (!peer) {
8322 		dp_cdp_debug("%pK: Peer is NULL!\n", soc);
8323 		status = QDF_STATUS_E_FAILURE;
8324 	} else {
8325 		peer->authorize = authorize ? 1 : 0;
8326 		if (peer->txrx_peer)
8327 			peer->txrx_peer->authorize = peer->authorize;
8328 
8329 		if (!peer->authorize)
8330 			dp_peer_flush_frags(soc_hdl, vdev_id, peer_mac);
8331 
8332 		dp_mlo_peer_authorize(soc, peer);
8333 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8334 	}
8335 
8336 	return status;
8337 }
8338 
8339 /*
8340  * dp_peer_get_authorize() - get peer authorize status
8341  * @soc: soc handle
8342  * @vdev_id: id of dp handle
8343  * @peer_mac: mac of datapath PEER handle
8344  *
8345  * Retusn: true is peer is authorized, false otherwise
8346  */
8347 static bool
8348 dp_peer_get_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8349 		      uint8_t *peer_mac)
8350 {
8351 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8352 	bool authorize = false;
8353 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
8354 						      0, vdev_id,
8355 						      DP_MOD_ID_CDP);
8356 
8357 	if (!peer) {
8358 		dp_cdp_debug("%pK: Peer is NULL!\n", soc);
8359 		return authorize;
8360 	}
8361 
8362 	authorize = peer->authorize;
8363 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8364 
8365 	return authorize;
8366 }
8367 
8368 /**
8369  * dp_vdev_unref_delete() - check and process vdev delete
8370  * @soc : DP specific soc pointer
8371  * @vdev: DP specific vdev pointer
8372  * @mod_id: module id
8373  *
8374  */
8375 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev,
8376 			  enum dp_mod_id mod_id)
8377 {
8378 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
8379 	void *vdev_delete_context = NULL;
8380 	uint8_t vdev_id = vdev->vdev_id;
8381 	struct dp_pdev *pdev = vdev->pdev;
8382 	struct dp_vdev *tmp_vdev = NULL;
8383 	uint8_t found = 0;
8384 
8385 	QDF_ASSERT(qdf_atomic_dec_return(&vdev->mod_refs[mod_id]) >= 0);
8386 
8387 	/* Return if this is not the last reference*/
8388 	if (!qdf_atomic_dec_and_test(&vdev->ref_cnt))
8389 		return;
8390 
8391 	/*
8392 	 * This should be set as last reference need to released
8393 	 * after cdp_vdev_detach() is called
8394 	 *
8395 	 * if this assert is hit there is a ref count issue
8396 	 */
8397 	QDF_ASSERT(vdev->delete.pending);
8398 
8399 	vdev_delete_cb = vdev->delete.callback;
8400 	vdev_delete_context = vdev->delete.context;
8401 
8402 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")- its last peer is done",
8403 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
8404 
8405 	if (wlan_op_mode_monitor == vdev->opmode) {
8406 		dp_monitor_vdev_delete(soc, vdev);
8407 		goto free_vdev;
8408 	}
8409 
8410 	/* all peers are gone, go ahead and delete it */
8411 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
8412 			FLOW_TYPE_VDEV, vdev_id);
8413 	dp_tx_vdev_detach(vdev);
8414 	dp_monitor_vdev_detach(vdev);
8415 
8416 free_vdev:
8417 	qdf_spinlock_destroy(&vdev->peer_list_lock);
8418 
8419 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
8420 	TAILQ_FOREACH(tmp_vdev, &soc->inactive_vdev_list,
8421 		      inactive_list_elem) {
8422 		if (tmp_vdev == vdev) {
8423 			found = 1;
8424 			break;
8425 		}
8426 	}
8427 	if (found)
8428 		TAILQ_REMOVE(&soc->inactive_vdev_list, vdev,
8429 			     inactive_list_elem);
8430 	/* delete this peer from the list */
8431 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
8432 
8433 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")",
8434 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
8435 	wlan_minidump_remove(vdev, sizeof(*vdev), soc->ctrl_psoc,
8436 			     WLAN_MD_DP_VDEV, "dp_vdev");
8437 	qdf_mem_free(vdev);
8438 	vdev = NULL;
8439 
8440 	if (vdev_delete_cb)
8441 		vdev_delete_cb(vdev_delete_context);
8442 }
8443 
8444 qdf_export_symbol(dp_vdev_unref_delete);
8445 
8446 /*
8447  * dp_peer_unref_delete() - unref and delete peer
8448  * @peer_handle:    Datapath peer handle
8449  * @mod_id:         ID of module releasing reference
8450  *
8451  */
8452 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id mod_id)
8453 {
8454 	struct dp_vdev *vdev = peer->vdev;
8455 	struct dp_pdev *pdev = vdev->pdev;
8456 	struct dp_soc *soc = pdev->soc;
8457 	uint16_t peer_id;
8458 	struct dp_peer *tmp_peer;
8459 	bool found = false;
8460 
8461 	if (mod_id > DP_MOD_ID_RX)
8462 		QDF_ASSERT(qdf_atomic_dec_return(&peer->mod_refs[mod_id]) >= 0);
8463 
8464 	/*
8465 	 * Hold the lock all the way from checking if the peer ref count
8466 	 * is zero until the peer references are removed from the hash
8467 	 * table and vdev list (if the peer ref count is zero).
8468 	 * This protects against a new HL tx operation starting to use the
8469 	 * peer object just after this function concludes it's done being used.
8470 	 * Furthermore, the lock needs to be held while checking whether the
8471 	 * vdev's list of peers is empty, to make sure that list is not modified
8472 	 * concurrently with the empty check.
8473 	 */
8474 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
8475 		peer_id = peer->peer_id;
8476 
8477 		/*
8478 		 * Make sure that the reference to the peer in
8479 		 * peer object map is removed
8480 		 */
8481 		QDF_ASSERT(peer_id == HTT_INVALID_PEER);
8482 
8483 		dp_peer_debug("Deleting peer %pK ("QDF_MAC_ADDR_FMT")", peer,
8484 			      QDF_MAC_ADDR_REF(peer->mac_addr.raw));
8485 
8486 		dp_peer_sawf_ctx_free(soc, peer);
8487 
8488 		wlan_minidump_remove(peer, sizeof(*peer), soc->ctrl_psoc,
8489 				     WLAN_MD_DP_PEER, "dp_peer");
8490 
8491 		qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
8492 		TAILQ_FOREACH(tmp_peer, &soc->inactive_peer_list,
8493 			      inactive_list_elem) {
8494 			if (tmp_peer == peer) {
8495 				found = 1;
8496 				break;
8497 			}
8498 		}
8499 		if (found)
8500 			TAILQ_REMOVE(&soc->inactive_peer_list, peer,
8501 				     inactive_list_elem);
8502 		/* delete this peer from the list */
8503 		qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
8504 		DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
8505 		dp_peer_update_state(soc, peer, DP_PEER_STATE_FREED);
8506 
8507 		/* cleanup the peer data */
8508 		dp_peer_cleanup(vdev, peer);
8509 
8510 		if (!IS_MLO_DP_MLD_PEER(peer))
8511 			dp_monitor_peer_detach(soc, peer);
8512 
8513 		qdf_spinlock_destroy(&peer->peer_state_lock);
8514 
8515 		dp_txrx_peer_detach(soc, peer);
8516 		qdf_mem_free(peer);
8517 
8518 		/*
8519 		 * Decrement ref count taken at peer create
8520 		 */
8521 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CHILD);
8522 	}
8523 }
8524 
8525 qdf_export_symbol(dp_peer_unref_delete);
8526 
8527 /*
8528  * dp_txrx_peer_unref_delete() - unref and delete peer
8529  * @handle: Datapath txrx ref handle
8530  * @mod_id: Module ID of the caller
8531  *
8532  */
8533 void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle,
8534 			       enum dp_mod_id mod_id)
8535 {
8536 	dp_peer_unref_delete((struct dp_peer *)handle, mod_id);
8537 }
8538 
8539 qdf_export_symbol(dp_txrx_peer_unref_delete);
8540 
8541 /*
8542  * dp_peer_delete_wifi3() – Delete txrx peer
8543  * @soc_hdl: soc handle
8544  * @vdev_id: id of dp handle
8545  * @peer_mac: mac of datapath PEER handle
8546  * @bitmap: bitmap indicating special handling of request.
8547  * @peer_type: peer type (link or MLD)
8548  *
8549  */
8550 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
8551 				       uint8_t vdev_id,
8552 				       uint8_t *peer_mac, uint32_t bitmap,
8553 				       enum cdp_peer_type peer_type)
8554 {
8555 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8556 	struct dp_peer *peer;
8557 	struct cdp_peer_info peer_info = { 0 };
8558 	struct dp_vdev *vdev = NULL;
8559 
8560 	DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac,
8561 				 false, peer_type);
8562 	peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP);
8563 
8564 	/* Peer can be null for monitor vap mac address */
8565 	if (!peer) {
8566 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
8567 			  "%s: Invalid peer\n", __func__);
8568 		return QDF_STATUS_E_FAILURE;
8569 	}
8570 
8571 	if (!peer->valid) {
8572 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8573 		dp_err("Invalid peer: "QDF_MAC_ADDR_FMT,
8574 			QDF_MAC_ADDR_REF(peer_mac));
8575 		return QDF_STATUS_E_ALREADY;
8576 	}
8577 
8578 	vdev = peer->vdev;
8579 
8580 	if (!vdev) {
8581 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8582 		return QDF_STATUS_E_FAILURE;
8583 	}
8584 
8585 	peer->valid = 0;
8586 
8587 	dp_init_info("%pK: peer %pK (" QDF_MAC_ADDR_FMT ")",
8588 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
8589 
8590 	dp_local_peer_id_free(peer->vdev->pdev, peer);
8591 
8592 	/* Drop all rx packets before deleting peer */
8593 	dp_clear_peer_internal(soc, peer);
8594 
8595 	qdf_spinlock_destroy(&peer->peer_info_lock);
8596 	dp_peer_multipass_list_remove(peer);
8597 
8598 	/* remove the reference to the peer from the hash table */
8599 	dp_peer_find_hash_remove(soc, peer);
8600 
8601 	dp_peer_vdev_list_remove(soc, vdev, peer);
8602 
8603 	dp_peer_mlo_delete(peer);
8604 
8605 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
8606 	TAILQ_INSERT_TAIL(&soc->inactive_peer_list, peer,
8607 			  inactive_list_elem);
8608 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
8609 
8610 	/*
8611 	 * Remove the reference added during peer_attach.
8612 	 * The peer will still be left allocated until the
8613 	 * PEER_UNMAP message arrives to remove the other
8614 	 * reference, added by the PEER_MAP message.
8615 	 */
8616 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
8617 	/*
8618 	 * Remove the reference taken above
8619 	 */
8620 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8621 
8622 	return QDF_STATUS_SUCCESS;
8623 }
8624 
8625 #ifdef DP_RX_UDP_OVER_PEER_ROAM
8626 static QDF_STATUS dp_update_roaming_peer_wifi3(struct cdp_soc_t *soc_hdl,
8627 					       uint8_t vdev_id,
8628 					       uint8_t *peer_mac,
8629 					       uint32_t auth_status)
8630 {
8631 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8632 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8633 						     DP_MOD_ID_CDP);
8634 	if (!vdev)
8635 		return QDF_STATUS_E_FAILURE;
8636 
8637 	vdev->roaming_peer_status = auth_status;
8638 	qdf_mem_copy(vdev->roaming_peer_mac.raw, peer_mac,
8639 		     QDF_MAC_ADDR_SIZE);
8640 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8641 
8642 	return QDF_STATUS_SUCCESS;
8643 }
8644 #endif
8645 /*
8646  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
8647  * @soc_hdl: Datapath soc handle
8648  * @vdev_id: virtual interface id
8649  *
8650  * Return: MAC address on success, NULL on failure.
8651  *
8652  */
8653 static uint8_t *dp_get_vdev_mac_addr_wifi3(struct cdp_soc_t *soc_hdl,
8654 					   uint8_t vdev_id)
8655 {
8656 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8657 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8658 						     DP_MOD_ID_CDP);
8659 	uint8_t *mac = NULL;
8660 
8661 	if (!vdev)
8662 		return NULL;
8663 
8664 	mac = vdev->mac_addr.raw;
8665 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8666 
8667 	return mac;
8668 }
8669 
8670 /*
8671  * dp_vdev_set_wds() - Enable per packet stats
8672  * @soc: DP soc handle
8673  * @vdev_id: id of DP VDEV handle
8674  * @val: value
8675  *
8676  * Return: none
8677  */
8678 static int dp_vdev_set_wds(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8679 			   uint32_t val)
8680 {
8681 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8682 	struct dp_vdev *vdev =
8683 		dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id,
8684 				      DP_MOD_ID_CDP);
8685 
8686 	if (!vdev)
8687 		return QDF_STATUS_E_FAILURE;
8688 
8689 	vdev->wds_enabled = val;
8690 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8691 
8692 	return QDF_STATUS_SUCCESS;
8693 }
8694 
8695 static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
8696 {
8697 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8698 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8699 						     DP_MOD_ID_CDP);
8700 	int opmode;
8701 
8702 	if (!vdev) {
8703 		dp_err("vdev for id %d is NULL", vdev_id);
8704 		return -EINVAL;
8705 	}
8706 	opmode = vdev->opmode;
8707 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8708 
8709 	return opmode;
8710 }
8711 
8712 /**
8713  * dp_get_os_rx_handles_from_vdev_wifi3() - Get os rx handles for a vdev
8714  * @soc_hdl: ol_txrx_soc_handle handle
8715  * @vdev_id: vdev id for which os rx handles are needed
8716  * @stack_fn_p: pointer to stack function pointer
8717  * @osif_handle_p: pointer to ol_osif_vdev_handle
8718  *
8719  * Return: void
8720  */
8721 static
8722 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_soc_t *soc_hdl,
8723 					  uint8_t vdev_id,
8724 					  ol_txrx_rx_fp *stack_fn_p,
8725 					  ol_osif_vdev_handle *osif_vdev_p)
8726 {
8727 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8728 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8729 						     DP_MOD_ID_CDP);
8730 
8731 	if (qdf_unlikely(!vdev)) {
8732 		*stack_fn_p = NULL;
8733 		*osif_vdev_p = NULL;
8734 		return;
8735 	}
8736 	*stack_fn_p = vdev->osif_rx_stack;
8737 	*osif_vdev_p = vdev->osif_vdev;
8738 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8739 }
8740 
8741 /**
8742  * dp_get_ctrl_pdev_from_vdev() - Get control pdev of vdev
8743  * @soc_hdl: datapath soc handle
8744  * @vdev_id: virtual device/interface id
8745  *
8746  * Return: Handle to control pdev
8747  */
8748 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(
8749 						struct cdp_soc_t *soc_hdl,
8750 						uint8_t vdev_id)
8751 {
8752 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8753 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8754 						     DP_MOD_ID_CDP);
8755 	struct dp_pdev *pdev;
8756 
8757 	if (!vdev)
8758 		return NULL;
8759 
8760 	pdev = vdev->pdev;
8761 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8762 	return pdev ? (struct cdp_cfg *)pdev->wlan_cfg_ctx : NULL;
8763 }
8764 
8765 /**
8766  * dp_get_tx_pending() - read pending tx
8767  * @pdev_handle: Datapath PDEV handle
8768  *
8769  * Return: outstanding tx
8770  */
8771 static int32_t dp_get_tx_pending(struct cdp_pdev *pdev_handle)
8772 {
8773 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8774 
8775 	return qdf_atomic_read(&pdev->num_tx_outstanding);
8776 }
8777 
8778 /**
8779  * dp_get_peer_mac_from_peer_id() - get peer mac
8780  * @pdev_handle: Datapath PDEV handle
8781  * @peer_id: Peer ID
8782  * @peer_mac: MAC addr of PEER
8783  *
8784  * Return: QDF_STATUS
8785  */
8786 static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc,
8787 					       uint32_t peer_id,
8788 					       uint8_t *peer_mac)
8789 {
8790 	struct dp_peer *peer;
8791 
8792 	if (soc && peer_mac) {
8793 		peer = dp_peer_get_ref_by_id((struct dp_soc *)soc,
8794 					     (uint16_t)peer_id,
8795 					     DP_MOD_ID_CDP);
8796 		if (peer) {
8797 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
8798 				     QDF_MAC_ADDR_SIZE);
8799 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8800 			return QDF_STATUS_SUCCESS;
8801 		}
8802 	}
8803 
8804 	return QDF_STATUS_E_FAILURE;
8805 }
8806 
8807 #ifdef MESH_MODE_SUPPORT
8808 static
8809 void dp_vdev_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
8810 {
8811 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
8812 
8813 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
8814 	vdev->mesh_vdev = val;
8815 	if (val)
8816 		vdev->skip_sw_tid_classification |=
8817 			DP_TX_MESH_ENABLED;
8818 	else
8819 		vdev->skip_sw_tid_classification &=
8820 			~DP_TX_MESH_ENABLED;
8821 }
8822 
8823 /*
8824  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
8825  * @vdev_hdl: virtual device object
8826  * @val: value to be set
8827  *
8828  * Return: void
8829  */
8830 static
8831 void dp_vdev_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
8832 {
8833 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
8834 
8835 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
8836 	vdev->mesh_rx_filter = val;
8837 }
8838 #endif
8839 
8840 /*
8841  * dp_vdev_set_hlos_tid_override() - to set hlos tid override
8842  * @vdev_hdl: virtual device object
8843  * @val: value to be set
8844  *
8845  * Return: void
8846  */
8847 static
8848 void dp_vdev_set_hlos_tid_override(struct dp_vdev *vdev, uint32_t val)
8849 {
8850 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
8851 	if (val)
8852 		vdev->skip_sw_tid_classification |=
8853 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
8854 	else
8855 		vdev->skip_sw_tid_classification &=
8856 			~DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
8857 }
8858 
8859 /*
8860  * dp_vdev_get_hlos_tid_override() - to get hlos tid override flag
8861  * @vdev_hdl: virtual device object
8862  * @val: value to be set
8863  *
8864  * Return: 1 if this flag is set
8865  */
8866 static
8867 uint8_t dp_vdev_get_hlos_tid_override(struct cdp_vdev *vdev_hdl)
8868 {
8869 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
8870 
8871 	return !!(vdev->skip_sw_tid_classification &
8872 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED);
8873 }
8874 
8875 #ifdef VDEV_PEER_PROTOCOL_COUNT
8876 static void dp_enable_vdev_peer_protocol_count(struct cdp_soc_t *soc_hdl,
8877 					       int8_t vdev_id,
8878 					       bool enable)
8879 {
8880 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8881 	struct dp_vdev *vdev;
8882 
8883 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
8884 	if (!vdev)
8885 		return;
8886 
8887 	dp_info("enable %d vdev_id %d", enable, vdev_id);
8888 	vdev->peer_protocol_count_track = enable;
8889 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8890 }
8891 
8892 static void dp_enable_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
8893 						   int8_t vdev_id,
8894 						   int drop_mask)
8895 {
8896 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8897 	struct dp_vdev *vdev;
8898 
8899 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
8900 	if (!vdev)
8901 		return;
8902 
8903 	dp_info("drop_mask %d vdev_id %d", drop_mask, vdev_id);
8904 	vdev->peer_protocol_count_dropmask = drop_mask;
8905 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8906 }
8907 
8908 static int dp_is_vdev_peer_protocol_count_enabled(struct cdp_soc_t *soc_hdl,
8909 						  int8_t vdev_id)
8910 {
8911 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8912 	struct dp_vdev *vdev;
8913 	int peer_protocol_count_track;
8914 
8915 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
8916 	if (!vdev)
8917 		return 0;
8918 
8919 	dp_info("enable %d vdev_id %d", vdev->peer_protocol_count_track,
8920 		vdev_id);
8921 	peer_protocol_count_track =
8922 		vdev->peer_protocol_count_track;
8923 
8924 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8925 	return peer_protocol_count_track;
8926 }
8927 
8928 static int dp_get_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
8929 					       int8_t vdev_id)
8930 {
8931 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8932 	struct dp_vdev *vdev;
8933 	int peer_protocol_count_dropmask;
8934 
8935 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
8936 	if (!vdev)
8937 		return 0;
8938 
8939 	dp_info("drop_mask %d vdev_id %d", vdev->peer_protocol_count_dropmask,
8940 		vdev_id);
8941 	peer_protocol_count_dropmask =
8942 		vdev->peer_protocol_count_dropmask;
8943 
8944 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8945 	return peer_protocol_count_dropmask;
8946 }
8947 
8948 #endif
8949 
8950 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data)
8951 {
8952 	uint8_t pdev_count;
8953 
8954 	for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) {
8955 		if (soc->pdev_list[pdev_count] &&
8956 		    soc->pdev_list[pdev_count] == data)
8957 			return true;
8958 	}
8959 	return false;
8960 }
8961 
8962 /**
8963  * dp_rx_bar_stats_cb(): BAR received stats callback
8964  * @soc: SOC handle
8965  * @cb_ctxt: Call back context
8966  * @reo_status: Reo status
8967  *
8968  * return: void
8969  */
8970 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
8971 	union hal_reo_status *reo_status)
8972 {
8973 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
8974 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
8975 
8976 	if (!dp_check_pdev_exists(soc, pdev)) {
8977 		dp_err_rl("pdev doesn't exist");
8978 		return;
8979 	}
8980 
8981 	if (!qdf_atomic_read(&soc->cmn_init_done))
8982 		return;
8983 
8984 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
8985 		DP_PRINT_STATS("REO stats failure %d",
8986 			       queue_status->header.status);
8987 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
8988 		return;
8989 	}
8990 
8991 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
8992 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
8993 
8994 }
8995 
8996 /**
8997  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
8998  * @vdev: DP VDEV handle
8999  *
9000  * return: void
9001  */
9002 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
9003 			     struct cdp_vdev_stats *vdev_stats)
9004 {
9005 	struct dp_soc *soc = NULL;
9006 
9007 	if (!vdev || !vdev->pdev)
9008 		return;
9009 
9010 	soc = vdev->pdev->soc;
9011 
9012 	dp_update_vdev_ingress_stats(vdev);
9013 
9014 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
9015 
9016 	dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats,
9017 			     DP_MOD_ID_GENERIC_STATS);
9018 
9019 	dp_update_vdev_rate_stats(vdev_stats, &vdev->stats);
9020 
9021 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9022 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
9023 			     vdev_stats, vdev->vdev_id,
9024 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
9025 #endif
9026 }
9027 
9028 void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
9029 {
9030 	struct dp_vdev *vdev = NULL;
9031 	struct dp_soc *soc;
9032 	struct cdp_vdev_stats *vdev_stats =
9033 			qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
9034 
9035 	if (!vdev_stats) {
9036 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
9037 			   pdev->soc);
9038 		return;
9039 	}
9040 
9041 	soc = pdev->soc;
9042 
9043 	qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
9044 	qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
9045 	qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
9046 	qdf_mem_zero(&pdev->stats.rx_i, sizeof(pdev->stats.rx_i));
9047 
9048 	if (dp_monitor_is_enable_mcopy_mode(pdev))
9049 		dp_monitor_invalid_peer_update_pdev_stats(soc, pdev);
9050 
9051 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
9052 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
9053 
9054 		dp_aggregate_vdev_stats(vdev, vdev_stats);
9055 		dp_update_pdev_stats(pdev, vdev_stats);
9056 		dp_update_pdev_ingress_stats(pdev, vdev);
9057 	}
9058 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
9059 	qdf_mem_free(vdev_stats);
9060 
9061 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9062 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
9063 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
9064 #endif
9065 }
9066 
9067 /**
9068  * dp_vdev_getstats() - get vdev packet level stats
9069  * @vdev_handle: Datapath VDEV handle
9070  * @stats: cdp network device stats structure
9071  *
9072  * Return: QDF_STATUS
9073  */
9074 static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle,
9075 				   struct cdp_dev_stats *stats)
9076 {
9077 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
9078 	struct dp_pdev *pdev;
9079 	struct dp_soc *soc;
9080 	struct cdp_vdev_stats *vdev_stats;
9081 
9082 	if (!vdev)
9083 		return QDF_STATUS_E_FAILURE;
9084 
9085 	pdev = vdev->pdev;
9086 	if (!pdev)
9087 		return QDF_STATUS_E_FAILURE;
9088 
9089 	soc = pdev->soc;
9090 
9091 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
9092 
9093 	if (!vdev_stats) {
9094 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
9095 			   soc);
9096 		return QDF_STATUS_E_FAILURE;
9097 	}
9098 
9099 	dp_aggregate_vdev_stats(vdev, vdev_stats);
9100 
9101 	stats->tx_packets = vdev_stats->tx.comp_pkt.num;
9102 	stats->tx_bytes = vdev_stats->tx.comp_pkt.bytes;
9103 
9104 	stats->tx_errors = vdev_stats->tx.tx_failed;
9105 	stats->tx_dropped = vdev_stats->tx_i.dropped.dropped_pkt.num +
9106 			    vdev_stats->tx_i.sg.dropped_host.num +
9107 			    vdev_stats->tx_i.mcast_en.dropped_map_error +
9108 			    vdev_stats->tx_i.mcast_en.dropped_self_mac +
9109 			    vdev_stats->tx_i.mcast_en.dropped_send_fail +
9110 			    vdev_stats->tx.nawds_mcast_drop;
9111 
9112 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
9113 		stats->rx_packets = vdev_stats->rx.to_stack.num;
9114 		stats->rx_bytes = vdev_stats->rx.to_stack.bytes;
9115 	} else {
9116 		stats->rx_packets = vdev_stats->rx_i.reo_rcvd_pkt.num +
9117 				    vdev_stats->rx_i.null_q_desc_pkt.num +
9118 				    vdev_stats->rx_i.routed_eapol_pkt.num;
9119 		stats->rx_bytes = vdev_stats->rx_i.reo_rcvd_pkt.bytes +
9120 				  vdev_stats->rx_i.null_q_desc_pkt.bytes +
9121 				  vdev_stats->rx_i.routed_eapol_pkt.bytes;
9122 	}
9123 
9124 	stats->rx_errors = vdev_stats->rx.err.mic_err +
9125 			   vdev_stats->rx.err.decrypt_err +
9126 			   vdev_stats->rx.err.fcserr +
9127 			   vdev_stats->rx.err.pn_err +
9128 			   vdev_stats->rx.err.oor_err +
9129 			   vdev_stats->rx.err.jump_2k_err +
9130 			   vdev_stats->rx.err.rxdma_wifi_parse_err;
9131 
9132 	stats->rx_dropped = vdev_stats->rx.mec_drop.num +
9133 			    vdev_stats->rx.multipass_rx_pkt_drop +
9134 			    vdev_stats->rx.peer_unauth_rx_pkt_drop +
9135 			    vdev_stats->rx.policy_check_drop +
9136 			    vdev_stats->rx.nawds_mcast_drop +
9137 			    vdev_stats->rx.mcast_3addr_drop;
9138 
9139 	qdf_mem_free(vdev_stats);
9140 
9141 	return QDF_STATUS_SUCCESS;
9142 }
9143 
9144 /**
9145  * dp_pdev_getstats() - get pdev packet level stats
9146  * @pdev_handle: Datapath PDEV handle
9147  * @stats: cdp network device stats structure
9148  *
9149  * Return: QDF_STATUS
9150  */
9151 static void dp_pdev_getstats(struct cdp_pdev *pdev_handle,
9152 			     struct cdp_dev_stats *stats)
9153 {
9154 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9155 
9156 	dp_aggregate_pdev_stats(pdev);
9157 
9158 	stats->tx_packets = pdev->stats.tx.comp_pkt.num;
9159 	stats->tx_bytes = pdev->stats.tx.comp_pkt.bytes;
9160 
9161 	stats->tx_errors = pdev->stats.tx.tx_failed;
9162 	stats->tx_dropped = pdev->stats.tx_i.dropped.dropped_pkt.num +
9163 			    pdev->stats.tx_i.sg.dropped_host.num +
9164 			    pdev->stats.tx_i.mcast_en.dropped_map_error +
9165 			    pdev->stats.tx_i.mcast_en.dropped_self_mac +
9166 			    pdev->stats.tx_i.mcast_en.dropped_send_fail +
9167 			    pdev->stats.tx.nawds_mcast_drop +
9168 			    pdev->stats.tso_stats.dropped_host.num;
9169 
9170 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(pdev->soc->wlan_cfg_ctx)) {
9171 		stats->rx_packets = pdev->stats.rx.to_stack.num;
9172 		stats->rx_bytes = pdev->stats.rx.to_stack.bytes;
9173 	} else {
9174 		stats->rx_packets = pdev->stats.rx_i.reo_rcvd_pkt.num +
9175 				    pdev->stats.rx_i.null_q_desc_pkt.num +
9176 				    pdev->stats.rx_i.routed_eapol_pkt.num;
9177 		stats->rx_bytes = pdev->stats.rx_i.reo_rcvd_pkt.bytes +
9178 				  pdev->stats.rx_i.null_q_desc_pkt.bytes +
9179 				  pdev->stats.rx_i.routed_eapol_pkt.bytes;
9180 	}
9181 
9182 	stats->rx_errors = pdev->stats.err.ip_csum_err +
9183 		pdev->stats.err.tcp_udp_csum_err +
9184 		pdev->stats.rx.err.mic_err +
9185 		pdev->stats.rx.err.decrypt_err +
9186 		pdev->stats.rx.err.fcserr +
9187 		pdev->stats.rx.err.pn_err +
9188 		pdev->stats.rx.err.oor_err +
9189 		pdev->stats.rx.err.jump_2k_err +
9190 		pdev->stats.rx.err.rxdma_wifi_parse_err;
9191 	stats->rx_dropped = pdev->stats.dropped.msdu_not_done +
9192 		pdev->stats.dropped.mec +
9193 		pdev->stats.dropped.mesh_filter +
9194 		pdev->stats.dropped.wifi_parse +
9195 		pdev->stats.dropped.mon_rx_drop +
9196 		pdev->stats.dropped.mon_radiotap_update_err +
9197 		pdev->stats.rx.mec_drop.num +
9198 		pdev->stats.rx.multipass_rx_pkt_drop +
9199 		pdev->stats.rx.peer_unauth_rx_pkt_drop +
9200 		pdev->stats.rx.policy_check_drop +
9201 		pdev->stats.rx.nawds_mcast_drop +
9202 		pdev->stats.rx.mcast_3addr_drop;
9203 }
9204 
9205 /**
9206  * dp_get_device_stats() - get interface level packet stats
9207  * @soc: soc handle
9208  * @id : vdev_id or pdev_id based on type
9209  * @stats: cdp network device stats structure
9210  * @type: device type pdev/vdev
9211  *
9212  * Return: QDF_STATUS
9213  */
9214 static QDF_STATUS dp_get_device_stats(struct cdp_soc_t *soc_hdl, uint8_t id,
9215 				      struct cdp_dev_stats *stats,
9216 				      uint8_t type)
9217 {
9218 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9219 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
9220 	struct dp_vdev *vdev;
9221 
9222 	switch (type) {
9223 	case UPDATE_VDEV_STATS:
9224 		vdev = dp_vdev_get_ref_by_id(soc, id, DP_MOD_ID_CDP);
9225 
9226 		if (vdev) {
9227 			status = dp_vdev_getstats((struct cdp_vdev *)vdev,
9228 						  stats);
9229 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9230 		}
9231 		return status;
9232 	case UPDATE_PDEV_STATS:
9233 		{
9234 			struct dp_pdev *pdev =
9235 				dp_get_pdev_from_soc_pdev_id_wifi3(
9236 						(struct dp_soc *)soc,
9237 						 id);
9238 			if (pdev) {
9239 				dp_pdev_getstats((struct cdp_pdev *)pdev,
9240 						 stats);
9241 				return QDF_STATUS_SUCCESS;
9242 			}
9243 		}
9244 		break;
9245 	default:
9246 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9247 			"apstats cannot be updated for this input "
9248 			"type %d", type);
9249 		break;
9250 	}
9251 
9252 	return QDF_STATUS_E_FAILURE;
9253 }
9254 
9255 const
9256 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
9257 {
9258 	switch (ring_type) {
9259 	case REO_DST:
9260 		return "Reo_dst";
9261 	case REO_EXCEPTION:
9262 		return "Reo_exception";
9263 	case REO_CMD:
9264 		return "Reo_cmd";
9265 	case REO_REINJECT:
9266 		return "Reo_reinject";
9267 	case REO_STATUS:
9268 		return "Reo_status";
9269 	case WBM2SW_RELEASE:
9270 		return "wbm2sw_release";
9271 	case TCL_DATA:
9272 		return "tcl_data";
9273 	case TCL_CMD_CREDIT:
9274 		return "tcl_cmd_credit";
9275 	case TCL_STATUS:
9276 		return "tcl_status";
9277 	case SW2WBM_RELEASE:
9278 		return "sw2wbm_release";
9279 	case RXDMA_BUF:
9280 		return "Rxdma_buf";
9281 	case RXDMA_DST:
9282 		return "Rxdma_dst";
9283 	case RXDMA_MONITOR_BUF:
9284 		return "Rxdma_monitor_buf";
9285 	case RXDMA_MONITOR_DESC:
9286 		return "Rxdma_monitor_desc";
9287 	case RXDMA_MONITOR_STATUS:
9288 		return "Rxdma_monitor_status";
9289 	case RXDMA_MONITOR_DST:
9290 		return "Rxdma_monitor_destination";
9291 	case WBM_IDLE_LINK:
9292 		return "WBM_hw_idle_link";
9293 	default:
9294 		dp_err("Invalid ring type");
9295 		break;
9296 	}
9297 	return "Invalid";
9298 }
9299 
9300 /*
9301  * dp_print_napi_stats(): NAPI stats
9302  * @soc - soc handle
9303  */
9304 void dp_print_napi_stats(struct dp_soc *soc)
9305 {
9306 	hif_print_napi_stats(soc->hif_handle);
9307 }
9308 
9309 /**
9310  * dp_txrx_host_peer_stats_clr): Reinitialize the txrx peer stats
9311  * @soc: Datapath soc
9312  * @peer: Datatpath peer
9313  * @arg: argument to iter function
9314  *
9315  * Return: QDF_STATUS
9316  */
9317 static inline void
9318 dp_txrx_host_peer_stats_clr(struct dp_soc *soc,
9319 			    struct dp_peer *peer,
9320 			    void *arg)
9321 {
9322 	struct dp_txrx_peer *txrx_peer = NULL;
9323 	struct dp_peer *tgt_peer = NULL;
9324 	struct cdp_interface_peer_stats peer_stats_intf;
9325 
9326 	qdf_mem_zero(&peer_stats_intf, sizeof(struct cdp_interface_peer_stats));
9327 
9328 	DP_STATS_CLR(peer);
9329 	/* Clear monitor peer stats */
9330 	dp_monitor_peer_reset_stats(soc, peer);
9331 
9332 	/* Clear MLD peer stats only when link peer is primary */
9333 	if (dp_peer_is_primary_link_peer(peer)) {
9334 		tgt_peer = dp_get_tgt_peer_from_peer(peer);
9335 		if (tgt_peer) {
9336 			DP_STATS_CLR(tgt_peer);
9337 			txrx_peer = tgt_peer->txrx_peer;
9338 			dp_txrx_peer_stats_clr(txrx_peer);
9339 		}
9340 	}
9341 
9342 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9343 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, peer->vdev->pdev->soc,
9344 			     &peer_stats_intf,  peer->peer_id,
9345 			     UPDATE_PEER_STATS, peer->vdev->pdev->pdev_id);
9346 #endif
9347 }
9348 
9349 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING
9350 static inline void dp_srng_clear_ring_usage_wm_stats(struct dp_soc *soc)
9351 {
9352 	int ring;
9353 
9354 	for (ring = 0; ring < soc->num_reo_dest_rings; ring++)
9355 		hal_srng_clear_ring_usage_wm_locked(soc->hal_soc,
9356 					    soc->reo_dest_ring[ring].hal_srng);
9357 }
9358 #else
9359 static inline void dp_srng_clear_ring_usage_wm_stats(struct dp_soc *soc)
9360 {
9361 }
9362 #endif
9363 
9364 /**
9365  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
9366  * @vdev: DP_VDEV handle
9367  * @dp_soc: DP_SOC handle
9368  *
9369  * Return: QDF_STATUS
9370  */
9371 static inline QDF_STATUS
9372 dp_txrx_host_stats_clr(struct dp_vdev *vdev, struct dp_soc *soc)
9373 {
9374 	if (!vdev || !vdev->pdev)
9375 		return QDF_STATUS_E_FAILURE;
9376 
9377 	/*
9378 	 * if NSS offload is enabled, then send message
9379 	 * to NSS FW to clear the stats. Once NSS FW clears the statistics
9380 	 * then clear host statistics.
9381 	 */
9382 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
9383 		if (soc->cdp_soc.ol_ops->nss_stats_clr)
9384 			soc->cdp_soc.ol_ops->nss_stats_clr(soc->ctrl_psoc,
9385 							   vdev->vdev_id);
9386 	}
9387 
9388 	dp_vdev_stats_hw_offload_target_clear(soc, vdev->pdev->pdev_id,
9389 					      (1 << vdev->vdev_id));
9390 
9391 	DP_STATS_CLR(vdev->pdev);
9392 	DP_STATS_CLR(vdev->pdev->soc);
9393 	DP_STATS_CLR(vdev);
9394 
9395 	hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
9396 
9397 	dp_vdev_iterate_peer(vdev, dp_txrx_host_peer_stats_clr, NULL,
9398 			     DP_MOD_ID_GENERIC_STATS);
9399 
9400 	dp_srng_clear_ring_usage_wm_stats(soc);
9401 
9402 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9403 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
9404 			     &vdev->stats,  vdev->vdev_id,
9405 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
9406 #endif
9407 	return QDF_STATUS_SUCCESS;
9408 }
9409 
9410 /**
9411  * dp_get_peer_calibr_stats()- Get peer calibrated stats
9412  * @peer: Datapath peer
9413  * @peer_stats: buffer for peer stats
9414  *
9415  * Return: none
9416  */
9417 static inline
9418 void dp_get_peer_calibr_stats(struct dp_peer *peer,
9419 			      struct cdp_peer_stats *peer_stats)
9420 {
9421 	struct dp_peer *tgt_peer;
9422 
9423 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
9424 	if (!tgt_peer)
9425 		return;
9426 
9427 	peer_stats->tx.last_per = tgt_peer->stats.tx.last_per;
9428 	peer_stats->tx.tx_bytes_success_last =
9429 				tgt_peer->stats.tx.tx_bytes_success_last;
9430 	peer_stats->tx.tx_data_success_last =
9431 					tgt_peer->stats.tx.tx_data_success_last;
9432 	peer_stats->tx.tx_byte_rate = tgt_peer->stats.tx.tx_byte_rate;
9433 	peer_stats->tx.tx_data_rate = tgt_peer->stats.tx.tx_data_rate;
9434 	peer_stats->tx.tx_data_ucast_last =
9435 					tgt_peer->stats.tx.tx_data_ucast_last;
9436 	peer_stats->tx.tx_data_ucast_rate =
9437 					tgt_peer->stats.tx.tx_data_ucast_rate;
9438 	peer_stats->tx.inactive_time = tgt_peer->stats.tx.inactive_time;
9439 	peer_stats->rx.rx_bytes_success_last =
9440 				tgt_peer->stats.rx.rx_bytes_success_last;
9441 	peer_stats->rx.rx_data_success_last =
9442 				tgt_peer->stats.rx.rx_data_success_last;
9443 	peer_stats->rx.rx_byte_rate = tgt_peer->stats.rx.rx_byte_rate;
9444 	peer_stats->rx.rx_data_rate = tgt_peer->stats.rx.rx_data_rate;
9445 }
9446 
9447 /**
9448  * dp_get_peer_basic_stats()- Get peer basic stats
9449  * @peer: Datapath peer
9450  * @peer_stats: buffer for peer stats
9451  *
9452  * Return: none
9453  */
9454 #ifdef QCA_ENHANCED_STATS_SUPPORT
9455 static inline
9456 void dp_get_peer_basic_stats(struct dp_peer *peer,
9457 			     struct cdp_peer_stats *peer_stats)
9458 {
9459 	struct dp_txrx_peer *txrx_peer;
9460 
9461 	txrx_peer = dp_get_txrx_peer(peer);
9462 	if (!txrx_peer)
9463 		return;
9464 
9465 	peer_stats->tx.comp_pkt.num += txrx_peer->comp_pkt.num;
9466 	peer_stats->tx.comp_pkt.bytes += txrx_peer->comp_pkt.bytes;
9467 	peer_stats->tx.tx_failed += txrx_peer->tx_failed;
9468 	peer_stats->rx.to_stack.num += txrx_peer->to_stack.num;
9469 	peer_stats->rx.to_stack.bytes += txrx_peer->to_stack.bytes;
9470 }
9471 #else
9472 static inline
9473 void dp_get_peer_basic_stats(struct dp_peer *peer,
9474 			     struct cdp_peer_stats *peer_stats)
9475 {
9476 	struct dp_txrx_peer *txrx_peer;
9477 
9478 	txrx_peer = peer->txrx_peer;
9479 	if (!txrx_peer)
9480 		return;
9481 
9482 	peer_stats->tx.comp_pkt.num += txrx_peer->comp_pkt.num;
9483 	peer_stats->tx.comp_pkt.bytes += txrx_peer->comp_pkt.bytes;
9484 	peer_stats->tx.tx_failed += txrx_peer->tx_failed;
9485 	peer_stats->rx.to_stack.num += txrx_peer->to_stack.num;
9486 	peer_stats->rx.to_stack.bytes += txrx_peer->to_stack.bytes;
9487 }
9488 #endif
9489 
9490 /**
9491  * dp_get_peer_per_pkt_stats()- Get peer per pkt stats
9492  * @peer: Datapath peer
9493  * @peer_stats: buffer for peer stats
9494  *
9495  * Return: none
9496  */
9497 #ifdef QCA_ENHANCED_STATS_SUPPORT
9498 static inline
9499 void dp_get_peer_per_pkt_stats(struct dp_peer *peer,
9500 			       struct cdp_peer_stats *peer_stats)
9501 {
9502 	struct dp_txrx_peer *txrx_peer;
9503 	struct dp_peer_per_pkt_stats *per_pkt_stats;
9504 
9505 	txrx_peer = dp_get_txrx_peer(peer);
9506 	if (!txrx_peer)
9507 		return;
9508 
9509 	per_pkt_stats = &txrx_peer->stats.per_pkt_stats;
9510 	DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats);
9511 }
9512 #else
9513 static inline
9514 void dp_get_peer_per_pkt_stats(struct dp_peer *peer,
9515 			       struct cdp_peer_stats *peer_stats)
9516 {
9517 	struct dp_txrx_peer *txrx_peer;
9518 	struct dp_peer_per_pkt_stats *per_pkt_stats;
9519 
9520 	txrx_peer = peer->txrx_peer;
9521 	if (!txrx_peer)
9522 		return;
9523 
9524 	per_pkt_stats = &txrx_peer->stats.per_pkt_stats;
9525 	DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats);
9526 }
9527 #endif
9528 
9529 /**
9530  * dp_get_peer_extd_stats()- Get peer extd stats
9531  * @peer: Datapath peer
9532  * @peer_stats: buffer for peer stats
9533  *
9534  * Return: none
9535  */
9536 #ifdef QCA_ENHANCED_STATS_SUPPORT
9537 #ifdef WLAN_FEATURE_11BE_MLO
9538 static inline
9539 void dp_get_peer_extd_stats(struct dp_peer *peer,
9540 			    struct cdp_peer_stats *peer_stats)
9541 {
9542 	struct dp_soc *soc = peer->vdev->pdev->soc;
9543 
9544 	if (IS_MLO_DP_MLD_PEER(peer)) {
9545 		uint8_t i;
9546 		struct dp_peer *link_peer;
9547 		struct dp_soc *link_peer_soc;
9548 		struct dp_mld_link_peers link_peers_info;
9549 
9550 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
9551 						    &link_peers_info,
9552 						    DP_MOD_ID_CDP);
9553 		for (i = 0; i < link_peers_info.num_links; i++) {
9554 			link_peer = link_peers_info.link_peers[i];
9555 			link_peer_soc = link_peer->vdev->pdev->soc;
9556 			dp_monitor_peer_get_stats(link_peer_soc, link_peer,
9557 						  peer_stats,
9558 						  UPDATE_PEER_STATS);
9559 		}
9560 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
9561 	} else {
9562 		dp_monitor_peer_get_stats(soc, peer, peer_stats,
9563 					  UPDATE_PEER_STATS);
9564 	}
9565 }
9566 #else
9567 static inline
9568 void dp_get_peer_extd_stats(struct dp_peer *peer,
9569 			    struct cdp_peer_stats *peer_stats)
9570 {
9571 	struct dp_soc *soc = peer->vdev->pdev->soc;
9572 
9573 	dp_monitor_peer_get_stats(soc, peer, peer_stats, UPDATE_PEER_STATS);
9574 }
9575 #endif
9576 #else
9577 static inline
9578 void dp_get_peer_extd_stats(struct dp_peer *peer,
9579 			    struct cdp_peer_stats *peer_stats)
9580 {
9581 	struct dp_txrx_peer *txrx_peer;
9582 	struct dp_peer_extd_stats *extd_stats;
9583 
9584 	txrx_peer = peer->txrx_peer;
9585 	if (!txrx_peer)
9586 		return;
9587 
9588 	extd_stats = &txrx_peer->stats.extd_stats;
9589 	DP_UPDATE_EXTD_STATS(peer_stats, extd_stats);
9590 }
9591 #endif
9592 
9593 /**
9594  * dp_get_peer_stats()- Get peer stats
9595  * @peer: Datapath peer
9596  * @peer_stats: buffer for peer stats
9597  *
9598  * Return: none
9599  */
9600 static inline
9601 void dp_get_peer_stats(struct dp_peer *peer, struct cdp_peer_stats *peer_stats)
9602 {
9603 	dp_get_peer_calibr_stats(peer, peer_stats);
9604 
9605 	dp_get_peer_basic_stats(peer, peer_stats);
9606 
9607 	dp_get_peer_per_pkt_stats(peer, peer_stats);
9608 
9609 	dp_get_peer_extd_stats(peer, peer_stats);
9610 }
9611 
9612 /*
9613  * dp_get_host_peer_stats()- function to print peer stats
9614  * @soc: dp_soc handle
9615  * @mac_addr: mac address of the peer
9616  *
9617  * Return: QDF_STATUS
9618  */
9619 static QDF_STATUS
9620 dp_get_host_peer_stats(struct cdp_soc_t *soc, uint8_t *mac_addr)
9621 {
9622 	struct dp_peer *peer = NULL;
9623 	struct cdp_peer_stats *peer_stats = NULL;
9624 
9625 	if (!mac_addr) {
9626 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9627 			  "%s: NULL peer mac addr\n", __func__);
9628 		return QDF_STATUS_E_FAILURE;
9629 	}
9630 
9631 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
9632 				      mac_addr, 0,
9633 				      DP_VDEV_ALL,
9634 				      DP_MOD_ID_CDP);
9635 	if (!peer) {
9636 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9637 			  "%s: Invalid peer\n", __func__);
9638 		return QDF_STATUS_E_FAILURE;
9639 	}
9640 
9641 	peer_stats = qdf_mem_malloc(sizeof(struct cdp_peer_stats));
9642 	if (!peer_stats) {
9643 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9644 			  "%s: Memory allocation failed for cdp_peer_stats\n",
9645 			  __func__);
9646 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9647 		return QDF_STATUS_E_NOMEM;
9648 	}
9649 
9650 	qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats));
9651 
9652 	dp_get_peer_stats(peer, peer_stats);
9653 	dp_print_peer_stats(peer, peer_stats);
9654 
9655 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
9656 
9657 	qdf_mem_free(peer_stats);
9658 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9659 
9660 	return QDF_STATUS_SUCCESS;
9661 }
9662 
9663 /* *
9664  * dp_dump_wbm_idle_hptp() -dump wbm idle ring, hw hp tp info.
9665  * @soc: dp soc.
9666  * @pdev: dp pdev.
9667  *
9668  * Return: None.
9669  */
9670 static void
9671 dp_dump_wbm_idle_hptp(struct dp_soc *soc, struct dp_pdev *pdev)
9672 {
9673 	uint32_t hw_head;
9674 	uint32_t hw_tail;
9675 	struct dp_srng *srng;
9676 
9677 	if (!soc) {
9678 		dp_err("soc is NULL");
9679 		return;
9680 	}
9681 
9682 	if (!pdev) {
9683 		dp_err("pdev is NULL");
9684 		return;
9685 	}
9686 
9687 	srng = &pdev->soc->wbm_idle_link_ring;
9688 	if (!srng) {
9689 		dp_err("wbm_idle_link_ring srng is NULL");
9690 		return;
9691 	}
9692 
9693 	hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &hw_head,
9694 			&hw_tail, WBM_IDLE_LINK);
9695 
9696 	dp_debug("WBM_IDLE_LINK: HW hp: %d, HW tp: %d",
9697 			hw_head, hw_tail);
9698 }
9699 
9700 
9701 /**
9702  * dp_txrx_stats_help() - Helper function for Txrx_Stats
9703  *
9704  * Return: None
9705  */
9706 static void dp_txrx_stats_help(void)
9707 {
9708 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
9709 	dp_info("stats_option:");
9710 	dp_info("  1 -- HTT Tx Statistics");
9711 	dp_info("  2 -- HTT Rx Statistics");
9712 	dp_info("  3 -- HTT Tx HW Queue Statistics");
9713 	dp_info("  4 -- HTT Tx HW Sched Statistics");
9714 	dp_info("  5 -- HTT Error Statistics");
9715 	dp_info("  6 -- HTT TQM Statistics");
9716 	dp_info("  7 -- HTT TQM CMDQ Statistics");
9717 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
9718 	dp_info("  9 -- HTT Tx Rate Statistics");
9719 	dp_info(" 10 -- HTT Rx Rate Statistics");
9720 	dp_info(" 11 -- HTT Peer Statistics");
9721 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
9722 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
9723 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
9724 	dp_info(" 15 -- HTT SRNG Statistics");
9725 	dp_info(" 16 -- HTT SFM Info Statistics");
9726 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
9727 	dp_info(" 18 -- HTT Peer List Details");
9728 	dp_info(" 20 -- Clear Host Statistics");
9729 	dp_info(" 21 -- Host Rx Rate Statistics");
9730 	dp_info(" 22 -- Host Tx Rate Statistics");
9731 	dp_info(" 23 -- Host Tx Statistics");
9732 	dp_info(" 24 -- Host Rx Statistics");
9733 	dp_info(" 25 -- Host AST Statistics");
9734 	dp_info(" 26 -- Host SRNG PTR Statistics");
9735 	dp_info(" 27 -- Host Mon Statistics");
9736 	dp_info(" 28 -- Host REO Queue Statistics");
9737 	dp_info(" 29 -- Host Soc cfg param Statistics");
9738 	dp_info(" 30 -- Host pdev cfg param Statistics");
9739 	dp_info(" 31 -- Host NAPI stats");
9740 	dp_info(" 32 -- Host Interrupt stats");
9741 	dp_info(" 33 -- Host FISA stats");
9742 	dp_info(" 34 -- Host Register Work stats");
9743 	dp_info(" 35 -- HW REO Queue stats");
9744 	dp_info(" 36 -- Host WBM IDLE link desc ring HP/TP");
9745 	dp_info(" 37 -- Host SRNG usage watermark stats");
9746 }
9747 
9748 /**
9749  * dp_print_host_stats()- Function to print the stats aggregated at host
9750  * @vdev_handle: DP_VDEV handle
9751  * @req: host stats type
9752  * @soc: dp soc handler
9753  *
9754  * Return: 0 on success, print error message in case of failure
9755  */
9756 static int
9757 dp_print_host_stats(struct dp_vdev *vdev,
9758 		    struct cdp_txrx_stats_req *req,
9759 		    struct dp_soc *soc)
9760 {
9761 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
9762 	enum cdp_host_txrx_stats type =
9763 			dp_stats_mapping_table[req->stats][STATS_HOST];
9764 
9765 	dp_aggregate_pdev_stats(pdev);
9766 
9767 	switch (type) {
9768 	case TXRX_CLEAR_STATS:
9769 		dp_txrx_host_stats_clr(vdev, soc);
9770 		break;
9771 	case TXRX_RX_RATE_STATS:
9772 		dp_print_rx_rates(vdev);
9773 		break;
9774 	case TXRX_TX_RATE_STATS:
9775 		dp_print_tx_rates(vdev);
9776 		break;
9777 	case TXRX_TX_HOST_STATS:
9778 		dp_print_pdev_tx_stats(pdev);
9779 		dp_print_soc_tx_stats(pdev->soc);
9780 		break;
9781 	case TXRX_RX_HOST_STATS:
9782 		dp_print_pdev_rx_stats(pdev);
9783 		dp_print_soc_rx_stats(pdev->soc);
9784 		break;
9785 	case TXRX_AST_STATS:
9786 		dp_print_ast_stats(pdev->soc);
9787 		dp_print_mec_stats(pdev->soc);
9788 		dp_print_peer_table(vdev);
9789 		break;
9790 	case TXRX_SRNG_PTR_STATS:
9791 		dp_print_ring_stats(pdev);
9792 		break;
9793 	case TXRX_RX_MON_STATS:
9794 		dp_monitor_print_pdev_rx_mon_stats(pdev);
9795 		break;
9796 	case TXRX_REO_QUEUE_STATS:
9797 		dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc,
9798 				       req->peer_addr);
9799 		break;
9800 	case TXRX_SOC_CFG_PARAMS:
9801 		dp_print_soc_cfg_params(pdev->soc);
9802 		break;
9803 	case TXRX_PDEV_CFG_PARAMS:
9804 		dp_print_pdev_cfg_params(pdev);
9805 		break;
9806 	case TXRX_NAPI_STATS:
9807 		dp_print_napi_stats(pdev->soc);
9808 		break;
9809 	case TXRX_SOC_INTERRUPT_STATS:
9810 		dp_print_soc_interrupt_stats(pdev->soc);
9811 		break;
9812 	case TXRX_SOC_FSE_STATS:
9813 		dp_rx_dump_fisa_table(pdev->soc);
9814 		break;
9815 	case TXRX_HAL_REG_WRITE_STATS:
9816 		hal_dump_reg_write_stats(pdev->soc->hal_soc);
9817 		hal_dump_reg_write_srng_stats(pdev->soc->hal_soc);
9818 		break;
9819 	case TXRX_SOC_REO_HW_DESC_DUMP:
9820 		dp_get_rx_reo_queue_info((struct cdp_soc_t *)pdev->soc,
9821 					 vdev->vdev_id);
9822 		break;
9823 	case TXRX_SOC_WBM_IDLE_HPTP_DUMP:
9824 		dp_dump_wbm_idle_hptp(pdev->soc, pdev);
9825 		break;
9826 	case TXRX_SRNG_USAGE_WM_STATS:
9827 		/* Dump usage watermark stats for all SRNGs */
9828 		dp_dump_srng_high_wm_stats(soc, 0xFF);
9829 		break;
9830 	default:
9831 		dp_info("Wrong Input For TxRx Host Stats");
9832 		dp_txrx_stats_help();
9833 		break;
9834 	}
9835 	return 0;
9836 }
9837 
9838 /*
9839  * dp_pdev_tid_stats_ingress_inc
9840  * @pdev: pdev handle
9841  * @val: increase in value
9842  *
9843  * Return: void
9844  */
9845 static void
9846 dp_pdev_tid_stats_ingress_inc(struct dp_pdev *pdev, uint32_t val)
9847 {
9848 	pdev->stats.tid_stats.ingress_stack += val;
9849 }
9850 
9851 /*
9852  * dp_pdev_tid_stats_osif_drop
9853  * @pdev: pdev handle
9854  * @val: increase in value
9855  *
9856  * Return: void
9857  */
9858 static void
9859 dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val)
9860 {
9861 	pdev->stats.tid_stats.osif_drop += val;
9862 }
9863 
9864 /*
9865  * dp_get_fw_peer_stats()- function to print peer stats
9866  * @soc: soc handle
9867  * @pdev_id : id of the pdev handle
9868  * @mac_addr: mac address of the peer
9869  * @cap: Type of htt stats requested
9870  * @is_wait: if set, wait on completion from firmware response
9871  *
9872  * Currently Supporting only MAC ID based requests Only
9873  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
9874  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
9875  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
9876  *
9877  * Return: QDF_STATUS
9878  */
9879 static QDF_STATUS
9880 dp_get_fw_peer_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
9881 		     uint8_t *mac_addr,
9882 		     uint32_t cap, uint32_t is_wait)
9883 {
9884 	int i;
9885 	uint32_t config_param0 = 0;
9886 	uint32_t config_param1 = 0;
9887 	uint32_t config_param2 = 0;
9888 	uint32_t config_param3 = 0;
9889 	struct dp_pdev *pdev =
9890 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9891 						   pdev_id);
9892 
9893 	if (!pdev)
9894 		return QDF_STATUS_E_FAILURE;
9895 
9896 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
9897 	config_param0 |= (1 << (cap + 1));
9898 
9899 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
9900 		config_param1 |= (1 << i);
9901 	}
9902 
9903 	config_param2 |= (mac_addr[0] & 0x000000ff);
9904 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
9905 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
9906 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
9907 
9908 	config_param3 |= (mac_addr[4] & 0x000000ff);
9909 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
9910 
9911 	if (is_wait) {
9912 		qdf_event_reset(&pdev->fw_peer_stats_event);
9913 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
9914 					  config_param0, config_param1,
9915 					  config_param2, config_param3,
9916 					  0, DBG_STATS_COOKIE_DP_STATS, 0);
9917 		qdf_wait_single_event(&pdev->fw_peer_stats_event,
9918 				      DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
9919 	} else {
9920 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
9921 					  config_param0, config_param1,
9922 					  config_param2, config_param3,
9923 					  0, DBG_STATS_COOKIE_DEFAULT, 0);
9924 	}
9925 
9926 	return QDF_STATUS_SUCCESS;
9927 
9928 }
9929 
9930 /* This struct definition will be removed from here
9931  * once it get added in FW headers*/
9932 struct httstats_cmd_req {
9933     uint32_t    config_param0;
9934     uint32_t    config_param1;
9935     uint32_t    config_param2;
9936     uint32_t    config_param3;
9937     int cookie;
9938     u_int8_t    stats_id;
9939 };
9940 
9941 /*
9942  * dp_get_htt_stats: function to process the httstas request
9943  * @soc: DP soc handle
9944  * @pdev_id: id of pdev handle
9945  * @data: pointer to request data
9946  * @data_len: length for request data
9947  *
9948  * return: QDF_STATUS
9949  */
9950 static QDF_STATUS
9951 dp_get_htt_stats(struct cdp_soc_t *soc, uint8_t pdev_id, void *data,
9952 		 uint32_t data_len)
9953 {
9954 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
9955 	struct dp_pdev *pdev =
9956 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9957 						   pdev_id);
9958 
9959 	if (!pdev)
9960 		return QDF_STATUS_E_FAILURE;
9961 
9962 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
9963 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
9964 				req->config_param0, req->config_param1,
9965 				req->config_param2, req->config_param3,
9966 				req->cookie, DBG_STATS_COOKIE_DEFAULT, 0);
9967 
9968 	return QDF_STATUS_SUCCESS;
9969 }
9970 
9971 /**
9972  * dp_set_pdev_tidmap_prty_wifi3(): update tidmap priority in pdev
9973  * @pdev: DP_PDEV handle
9974  * @prio: tidmap priority value passed by the user
9975  *
9976  * Return: QDF_STATUS_SUCCESS on success
9977  */
9978 static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct dp_pdev *pdev,
9979 						uint8_t prio)
9980 {
9981 	struct dp_soc *soc = pdev->soc;
9982 
9983 	soc->tidmap_prty = prio;
9984 
9985 	hal_tx_set_tidmap_prty(soc->hal_soc, prio);
9986 	return QDF_STATUS_SUCCESS;
9987 }
9988 
9989 /*
9990  * dp_get_peer_param: function to get parameters in peer
9991  * @cdp_soc: DP soc handle
9992  * @vdev_id: id of vdev handle
9993  * @peer_mac: peer mac address
9994  * @param: parameter type to be set
9995  * @val : address of buffer
9996  *
9997  * Return: val
9998  */
9999 static QDF_STATUS dp_get_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
10000 				    uint8_t *peer_mac,
10001 				    enum cdp_peer_param_type param,
10002 				    cdp_config_param_type *val)
10003 {
10004 	return QDF_STATUS_SUCCESS;
10005 }
10006 
10007 /*
10008  * dp_set_peer_param: function to set parameters in peer
10009  * @cdp_soc: DP soc handle
10010  * @vdev_id: id of vdev handle
10011  * @peer_mac: peer mac address
10012  * @param: parameter type to be set
10013  * @val: value of parameter to be set
10014  *
10015  * Return: 0 for success. nonzero for failure.
10016  */
10017 static QDF_STATUS dp_set_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
10018 				    uint8_t *peer_mac,
10019 				    enum cdp_peer_param_type param,
10020 				    cdp_config_param_type val)
10021 {
10022 	struct dp_peer *peer =
10023 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
10024 						       peer_mac, 0, vdev_id,
10025 						       DP_MOD_ID_CDP);
10026 	struct dp_txrx_peer *txrx_peer;
10027 
10028 	if (!peer)
10029 		return QDF_STATUS_E_FAILURE;
10030 
10031 	txrx_peer = peer->txrx_peer;
10032 	if (!txrx_peer) {
10033 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10034 		return QDF_STATUS_E_FAILURE;
10035 	}
10036 
10037 	switch (param) {
10038 	case CDP_CONFIG_NAWDS:
10039 		txrx_peer->nawds_enabled = val.cdp_peer_param_nawds;
10040 		break;
10041 	case CDP_CONFIG_ISOLATION:
10042 		dp_set_peer_isolation(txrx_peer, val.cdp_peer_param_isolation);
10043 		break;
10044 	case CDP_CONFIG_IN_TWT:
10045 		txrx_peer->in_twt = !!(val.cdp_peer_param_in_twt);
10046 		break;
10047 	default:
10048 		break;
10049 	}
10050 
10051 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10052 
10053 	return QDF_STATUS_SUCCESS;
10054 }
10055 
10056 /*
10057  * dp_get_pdev_param: function to get parameters from pdev
10058  * @cdp_soc: DP soc handle
10059  * @pdev_id: id of pdev handle
10060  * @param: parameter type to be get
10061  * @value : buffer for value
10062  *
10063  * Return: status
10064  */
10065 static QDF_STATUS dp_get_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
10066 				    enum cdp_pdev_param_type param,
10067 				    cdp_config_param_type *val)
10068 {
10069 	struct cdp_pdev *pdev = (struct cdp_pdev *)
10070 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10071 						   pdev_id);
10072 	if (!pdev)
10073 		return QDF_STATUS_E_FAILURE;
10074 
10075 	switch (param) {
10076 	case CDP_CONFIG_VOW:
10077 		val->cdp_pdev_param_cfg_vow =
10078 				((struct dp_pdev *)pdev)->delay_stats_flag;
10079 		break;
10080 	case CDP_TX_PENDING:
10081 		val->cdp_pdev_param_tx_pending = dp_get_tx_pending(pdev);
10082 		break;
10083 	case CDP_FILTER_MCAST_DATA:
10084 		val->cdp_pdev_param_fltr_mcast =
10085 				dp_monitor_pdev_get_filter_mcast_data(pdev);
10086 		break;
10087 	case CDP_FILTER_NO_DATA:
10088 		val->cdp_pdev_param_fltr_none =
10089 				dp_monitor_pdev_get_filter_non_data(pdev);
10090 		break;
10091 	case CDP_FILTER_UCAST_DATA:
10092 		val->cdp_pdev_param_fltr_ucast =
10093 				dp_monitor_pdev_get_filter_ucast_data(pdev);
10094 		break;
10095 	case CDP_MONITOR_CHANNEL:
10096 		val->cdp_pdev_param_monitor_chan =
10097 			((struct dp_pdev *)pdev)->monitor_pdev->mon_chan_num;
10098 		break;
10099 	case CDP_MONITOR_FREQUENCY:
10100 		val->cdp_pdev_param_mon_freq =
10101 			((struct dp_pdev *)pdev)->monitor_pdev->mon_chan_freq;
10102 		break;
10103 	default:
10104 		return QDF_STATUS_E_FAILURE;
10105 	}
10106 
10107 	return QDF_STATUS_SUCCESS;
10108 }
10109 
10110 /*
10111  * dp_set_pdev_param: function to set parameters in pdev
10112  * @cdp_soc: DP soc handle
10113  * @pdev_id: id of pdev handle
10114  * @param: parameter type to be set
10115  * @val: value of parameter to be set
10116  *
10117  * Return: 0 for success. nonzero for failure.
10118  */
10119 static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
10120 				    enum cdp_pdev_param_type param,
10121 				    cdp_config_param_type val)
10122 {
10123 	int target_type;
10124 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
10125 	struct dp_pdev *pdev =
10126 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10127 						   pdev_id);
10128 	enum reg_wifi_band chan_band;
10129 
10130 	if (!pdev)
10131 		return QDF_STATUS_E_FAILURE;
10132 
10133 	target_type = hal_get_target_type(soc->hal_soc);
10134 	switch (target_type) {
10135 	case TARGET_TYPE_QCA6750:
10136 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC0_LMAC_ID;
10137 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
10138 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
10139 		break;
10140 	case TARGET_TYPE_KIWI:
10141 	case TARGET_TYPE_MANGO:
10142 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC0_LMAC_ID;
10143 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
10144 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
10145 		break;
10146 	default:
10147 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC1_LMAC_ID;
10148 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
10149 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
10150 		break;
10151 	}
10152 
10153 	switch (param) {
10154 	case CDP_CONFIG_TX_CAPTURE:
10155 		return dp_monitor_config_debug_sniffer(pdev,
10156 						val.cdp_pdev_param_tx_capture);
10157 	case CDP_CONFIG_DEBUG_SNIFFER:
10158 		return dp_monitor_config_debug_sniffer(pdev,
10159 						val.cdp_pdev_param_dbg_snf);
10160 	case CDP_CONFIG_BPR_ENABLE:
10161 		return dp_monitor_set_bpr_enable(pdev,
10162 						 val.cdp_pdev_param_bpr_enable);
10163 	case CDP_CONFIG_PRIMARY_RADIO:
10164 		pdev->is_primary = val.cdp_pdev_param_primary_radio;
10165 		break;
10166 	case CDP_CONFIG_CAPTURE_LATENCY:
10167 		pdev->latency_capture_enable = val.cdp_pdev_param_cptr_latcy;
10168 		break;
10169 	case CDP_INGRESS_STATS:
10170 		dp_pdev_tid_stats_ingress_inc(pdev,
10171 					      val.cdp_pdev_param_ingrs_stats);
10172 		break;
10173 	case CDP_OSIF_DROP:
10174 		dp_pdev_tid_stats_osif_drop(pdev,
10175 					    val.cdp_pdev_param_osif_drop);
10176 		break;
10177 	case CDP_CONFIG_ENH_RX_CAPTURE:
10178 		return dp_monitor_config_enh_rx_capture(pdev,
10179 						val.cdp_pdev_param_en_rx_cap);
10180 	case CDP_CONFIG_ENH_TX_CAPTURE:
10181 		return dp_monitor_config_enh_tx_capture(pdev,
10182 						val.cdp_pdev_param_en_tx_cap);
10183 	case CDP_CONFIG_HMMC_TID_OVERRIDE:
10184 		pdev->hmmc_tid_override_en = val.cdp_pdev_param_hmmc_tid_ovrd;
10185 		break;
10186 	case CDP_CONFIG_HMMC_TID_VALUE:
10187 		pdev->hmmc_tid = val.cdp_pdev_param_hmmc_tid;
10188 		break;
10189 	case CDP_CHAN_NOISE_FLOOR:
10190 		pdev->chan_noise_floor = val.cdp_pdev_param_chn_noise_flr;
10191 		break;
10192 	case CDP_TIDMAP_PRTY:
10193 		dp_set_pdev_tidmap_prty_wifi3(pdev,
10194 					      val.cdp_pdev_param_tidmap_prty);
10195 		break;
10196 	case CDP_FILTER_NEIGH_PEERS:
10197 		dp_monitor_set_filter_neigh_peers(pdev,
10198 					val.cdp_pdev_param_fltr_neigh_peers);
10199 		break;
10200 	case CDP_MONITOR_CHANNEL:
10201 		dp_monitor_set_chan_num(pdev, val.cdp_pdev_param_monitor_chan);
10202 		break;
10203 	case CDP_MONITOR_FREQUENCY:
10204 		chan_band = wlan_reg_freq_to_band(val.cdp_pdev_param_mon_freq);
10205 		dp_monitor_set_chan_freq(pdev, val.cdp_pdev_param_mon_freq);
10206 		dp_monitor_set_chan_band(pdev, chan_band);
10207 		break;
10208 	case CDP_CONFIG_BSS_COLOR:
10209 		dp_monitor_set_bsscolor(pdev, val.cdp_pdev_param_bss_color);
10210 		break;
10211 	case CDP_SET_ATF_STATS_ENABLE:
10212 		dp_monitor_set_atf_stats_enable(pdev,
10213 					val.cdp_pdev_param_atf_stats_enable);
10214 		break;
10215 	case CDP_CONFIG_SPECIAL_VAP:
10216 		dp_monitor_pdev_config_scan_spcl_vap(pdev,
10217 					val.cdp_pdev_param_config_special_vap);
10218 		dp_monitor_vdev_set_monitor_mode_buf_rings(pdev);
10219 		break;
10220 	case CDP_RESET_SCAN_SPCL_VAP_STATS_ENABLE:
10221 		dp_monitor_pdev_reset_scan_spcl_vap_stats_enable(pdev,
10222 				val.cdp_pdev_param_reset_scan_spcl_vap_stats_enable);
10223 		break;
10224 	case CDP_CONFIG_ENHANCED_STATS_ENABLE:
10225 		pdev->enhanced_stats_en = val.cdp_pdev_param_enhanced_stats_enable;
10226 		break;
10227 	case CDP_ISOLATION:
10228 		pdev->isolation = val.cdp_pdev_param_isolation;
10229 		break;
10230 	case CDP_CONFIG_UNDECODED_METADATA_CAPTURE_ENABLE:
10231 		return dp_monitor_config_undecoded_metadata_capture(pdev,
10232 				val.cdp_pdev_param_undecoded_metadata_enable);
10233 		break;
10234 	default:
10235 		return QDF_STATUS_E_INVAL;
10236 	}
10237 	return QDF_STATUS_SUCCESS;
10238 }
10239 
10240 #ifdef QCA_UNDECODED_METADATA_SUPPORT
10241 static
10242 QDF_STATUS dp_set_pdev_phyrx_error_mask(struct cdp_soc_t *cdp_soc,
10243 					uint8_t pdev_id, uint32_t mask,
10244 					uint32_t mask_cont)
10245 {
10246 	struct dp_pdev *pdev =
10247 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10248 						   pdev_id);
10249 
10250 	if (!pdev)
10251 		return QDF_STATUS_E_FAILURE;
10252 
10253 	return dp_monitor_config_undecoded_metadata_phyrx_error_mask(pdev,
10254 				mask, mask_cont);
10255 }
10256 
10257 static
10258 QDF_STATUS dp_get_pdev_phyrx_error_mask(struct cdp_soc_t *cdp_soc,
10259 					uint8_t pdev_id, uint32_t *mask,
10260 					uint32_t *mask_cont)
10261 {
10262 	struct dp_pdev *pdev =
10263 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10264 						   pdev_id);
10265 
10266 	if (!pdev)
10267 		return QDF_STATUS_E_FAILURE;
10268 
10269 	return dp_monitor_get_undecoded_metadata_phyrx_error_mask(pdev,
10270 				mask, mask_cont);
10271 }
10272 #endif
10273 
10274 #ifdef QCA_PEER_EXT_STATS
10275 static void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
10276 					  qdf_nbuf_t nbuf)
10277 {
10278 	struct dp_peer *peer = NULL;
10279 	uint16_t peer_id, ring_id;
10280 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
10281 	struct dp_peer_delay_stats *delay_stats = NULL;
10282 
10283 	peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
10284 	if (peer_id > soc->max_peer_id)
10285 		return;
10286 
10287 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
10288 	if (qdf_unlikely(!peer))
10289 		return;
10290 
10291 	if (qdf_unlikely(!peer->txrx_peer)) {
10292 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10293 		return;
10294 	}
10295 
10296 	if (qdf_likely(peer->txrx_peer->delay_stats)) {
10297 		delay_stats = peer->txrx_peer->delay_stats;
10298 		ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
10299 		dp_rx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
10300 					nbuf);
10301 	}
10302 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10303 }
10304 #else
10305 static inline void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
10306 						 qdf_nbuf_t nbuf)
10307 {
10308 }
10309 #endif
10310 
10311 /*
10312  * dp_calculate_delay_stats: function to get rx delay stats
10313  * @cdp_soc: DP soc handle
10314  * @vdev_id: id of DP vdev handle
10315  * @nbuf: skb
10316  *
10317  * Return: QDF_STATUS
10318  */
10319 static QDF_STATUS
10320 dp_calculate_delay_stats(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
10321 			 qdf_nbuf_t nbuf)
10322 {
10323 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
10324 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10325 						     DP_MOD_ID_CDP);
10326 
10327 	if (!vdev)
10328 		return QDF_STATUS_SUCCESS;
10329 
10330 	if (vdev->pdev->delay_stats_flag)
10331 		dp_rx_compute_delay(vdev, nbuf);
10332 	else
10333 		dp_rx_update_peer_delay_stats(soc, nbuf);
10334 
10335 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10336 	return QDF_STATUS_SUCCESS;
10337 }
10338 
10339 /*
10340  * dp_get_vdev_param: function to get parameters from vdev
10341  * @cdp_soc : DP soc handle
10342  * @vdev_id: id of DP vdev handle
10343  * @param: parameter type to get value
10344  * @val: buffer address
10345  *
10346  * return: status
10347  */
10348 static QDF_STATUS dp_get_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
10349 				    enum cdp_vdev_param_type param,
10350 				    cdp_config_param_type *val)
10351 {
10352 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
10353 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10354 						     DP_MOD_ID_CDP);
10355 
10356 	if (!vdev)
10357 		return QDF_STATUS_E_FAILURE;
10358 
10359 	switch (param) {
10360 	case CDP_ENABLE_WDS:
10361 		val->cdp_vdev_param_wds = vdev->wds_enabled;
10362 		break;
10363 	case CDP_ENABLE_MEC:
10364 		val->cdp_vdev_param_mec = vdev->mec_enabled;
10365 		break;
10366 	case CDP_ENABLE_DA_WAR:
10367 		val->cdp_vdev_param_da_war = vdev->pdev->soc->da_war_enabled;
10368 		break;
10369 	case CDP_ENABLE_IGMP_MCAST_EN:
10370 		val->cdp_vdev_param_igmp_mcast_en = vdev->igmp_mcast_enhanc_en;
10371 		break;
10372 	case CDP_ENABLE_MCAST_EN:
10373 		val->cdp_vdev_param_mcast_en = vdev->mcast_enhancement_en;
10374 		break;
10375 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
10376 		val->cdp_vdev_param_hlos_tid_override =
10377 			    dp_vdev_get_hlos_tid_override((struct cdp_vdev *)vdev);
10378 		break;
10379 	case CDP_ENABLE_PEER_AUTHORIZE:
10380 		val->cdp_vdev_param_peer_authorize =
10381 			    vdev->peer_authorize;
10382 		break;
10383 	case CDP_TX_ENCAP_TYPE:
10384 		val->cdp_vdev_param_tx_encap = vdev->tx_encap_type;
10385 		break;
10386 	case CDP_ENABLE_CIPHER:
10387 		val->cdp_vdev_param_cipher_en = vdev->sec_type;
10388 		break;
10389 #ifdef WLAN_SUPPORT_MESH_LATENCY
10390 	case CDP_ENABLE_PEER_TID_LATENCY:
10391 		val->cdp_vdev_param_peer_tid_latency_enable =
10392 			vdev->peer_tid_latency_enabled;
10393 		break;
10394 	case CDP_SET_VAP_MESH_TID:
10395 		val->cdp_vdev_param_mesh_tid =
10396 				vdev->mesh_tid_latency_config.latency_tid;
10397 		break;
10398 #endif
10399 	default:
10400 		dp_cdp_err("%pK: param value %d is wrong",
10401 			   soc, param);
10402 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10403 		return QDF_STATUS_E_FAILURE;
10404 	}
10405 
10406 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10407 	return QDF_STATUS_SUCCESS;
10408 }
10409 
10410 /*
10411  * dp_set_vdev_param: function to set parameters in vdev
10412  * @cdp_soc : DP soc handle
10413  * @vdev_id: id of DP vdev handle
10414  * @param: parameter type to get value
10415  * @val: value
10416  *
10417  * return: QDF_STATUS
10418  */
10419 static QDF_STATUS
10420 dp_set_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
10421 		  enum cdp_vdev_param_type param, cdp_config_param_type val)
10422 {
10423 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
10424 	struct dp_vdev *vdev =
10425 		dp_vdev_get_ref_by_id(dsoc, vdev_id, DP_MOD_ID_CDP);
10426 	uint32_t var = 0;
10427 
10428 	if (!vdev)
10429 		return QDF_STATUS_E_FAILURE;
10430 
10431 	switch (param) {
10432 	case CDP_ENABLE_WDS:
10433 		dp_cdp_err("%pK: wds_enable %d for vdev(%pK) id(%d)\n",
10434 			   dsoc, val.cdp_vdev_param_wds, vdev, vdev->vdev_id);
10435 		vdev->wds_enabled = val.cdp_vdev_param_wds;
10436 		break;
10437 	case CDP_ENABLE_MEC:
10438 		dp_cdp_err("%pK: mec_enable %d for vdev(%pK) id(%d)\n",
10439 			   dsoc, val.cdp_vdev_param_mec, vdev, vdev->vdev_id);
10440 		vdev->mec_enabled = val.cdp_vdev_param_mec;
10441 		break;
10442 	case CDP_ENABLE_DA_WAR:
10443 		dp_cdp_err("%pK: da_war_enable %d for vdev(%pK) id(%d)\n",
10444 			   dsoc, val.cdp_vdev_param_da_war, vdev, vdev->vdev_id);
10445 		vdev->pdev->soc->da_war_enabled = val.cdp_vdev_param_da_war;
10446 		dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
10447 					     vdev->pdev->soc));
10448 		break;
10449 	case CDP_ENABLE_NAWDS:
10450 		vdev->nawds_enabled = val.cdp_vdev_param_nawds;
10451 		break;
10452 	case CDP_ENABLE_MCAST_EN:
10453 		vdev->mcast_enhancement_en = val.cdp_vdev_param_mcast_en;
10454 		break;
10455 	case CDP_ENABLE_IGMP_MCAST_EN:
10456 		vdev->igmp_mcast_enhanc_en = val.cdp_vdev_param_igmp_mcast_en;
10457 		break;
10458 	case CDP_ENABLE_PROXYSTA:
10459 		vdev->proxysta_vdev = val.cdp_vdev_param_proxysta;
10460 		break;
10461 	case CDP_UPDATE_TDLS_FLAGS:
10462 		vdev->tdls_link_connected = val.cdp_vdev_param_tdls_flags;
10463 		break;
10464 	case CDP_CFG_WDS_AGING_TIMER:
10465 		var = val.cdp_vdev_param_aging_tmr;
10466 		if (!var)
10467 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
10468 		else if (var != vdev->wds_aging_timer_val)
10469 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, var);
10470 
10471 		vdev->wds_aging_timer_val = var;
10472 		break;
10473 	case CDP_ENABLE_AP_BRIDGE:
10474 		if (wlan_op_mode_sta != vdev->opmode)
10475 			vdev->ap_bridge_enabled = val.cdp_vdev_param_ap_brdg_en;
10476 		else
10477 			vdev->ap_bridge_enabled = false;
10478 		break;
10479 	case CDP_ENABLE_CIPHER:
10480 		vdev->sec_type = val.cdp_vdev_param_cipher_en;
10481 		break;
10482 	case CDP_ENABLE_QWRAP_ISOLATION:
10483 		vdev->isolation_vdev = val.cdp_vdev_param_qwrap_isolation;
10484 		break;
10485 	case CDP_UPDATE_MULTIPASS:
10486 		vdev->multipass_en = val.cdp_vdev_param_update_multipass;
10487 		break;
10488 	case CDP_TX_ENCAP_TYPE:
10489 		vdev->tx_encap_type = val.cdp_vdev_param_tx_encap;
10490 		break;
10491 	case CDP_RX_DECAP_TYPE:
10492 		vdev->rx_decap_type = val.cdp_vdev_param_rx_decap;
10493 		break;
10494 	case CDP_TID_VDEV_PRTY:
10495 		vdev->tidmap_prty = val.cdp_vdev_param_tidmap_prty;
10496 		break;
10497 	case CDP_TIDMAP_TBL_ID:
10498 		vdev->tidmap_tbl_id = val.cdp_vdev_param_tidmap_tbl_id;
10499 		break;
10500 #ifdef MESH_MODE_SUPPORT
10501 	case CDP_MESH_RX_FILTER:
10502 		dp_vdev_set_mesh_rx_filter((struct cdp_vdev *)vdev,
10503 					   val.cdp_vdev_param_mesh_rx_filter);
10504 		break;
10505 	case CDP_MESH_MODE:
10506 		dp_vdev_set_mesh_mode((struct cdp_vdev *)vdev,
10507 				      val.cdp_vdev_param_mesh_mode);
10508 		break;
10509 #endif
10510 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
10511 		dp_info("vdev_id %d enable hlod tid override %d", vdev_id,
10512 			val.cdp_vdev_param_hlos_tid_override);
10513 		dp_vdev_set_hlos_tid_override(vdev,
10514 				val.cdp_vdev_param_hlos_tid_override);
10515 		break;
10516 #ifdef QCA_SUPPORT_WDS_EXTENDED
10517 	case CDP_CFG_WDS_EXT:
10518 		if (vdev->opmode == wlan_op_mode_ap)
10519 			vdev->wds_ext_enabled = val.cdp_vdev_param_wds_ext;
10520 		break;
10521 #endif
10522 	case CDP_ENABLE_PEER_AUTHORIZE:
10523 		vdev->peer_authorize = val.cdp_vdev_param_peer_authorize;
10524 		break;
10525 #ifdef WLAN_SUPPORT_MESH_LATENCY
10526 	case CDP_ENABLE_PEER_TID_LATENCY:
10527 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
10528 			val.cdp_vdev_param_peer_tid_latency_enable);
10529 		vdev->peer_tid_latency_enabled =
10530 			val.cdp_vdev_param_peer_tid_latency_enable;
10531 		break;
10532 	case CDP_SET_VAP_MESH_TID:
10533 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
10534 			val.cdp_vdev_param_mesh_tid);
10535 		vdev->mesh_tid_latency_config.latency_tid
10536 				= val.cdp_vdev_param_mesh_tid;
10537 		break;
10538 #endif
10539 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
10540 	case CDP_SKIP_BAR_UPDATE_AP:
10541 		dp_info("vdev_id %d skip BAR update: %u", vdev_id,
10542 			val.cdp_skip_bar_update);
10543 		vdev->skip_bar_update = val.cdp_skip_bar_update;
10544 		vdev->skip_bar_update_last_ts = 0;
10545 		break;
10546 #endif
10547 	case CDP_DROP_3ADDR_MCAST:
10548 		dp_info("vdev_id %d drop 3 addr mcast :%d", vdev_id,
10549 			val.cdp_drop_3addr_mcast);
10550 		vdev->drop_3addr_mcast = val.cdp_drop_3addr_mcast;
10551 		break;
10552 	case CDP_ENABLE_WRAP:
10553 		vdev->wrap_vdev = val.cdp_vdev_param_wrap;
10554 		break;
10555 	default:
10556 		break;
10557 	}
10558 
10559 	dp_tx_vdev_update_search_flags((struct dp_vdev *)vdev);
10560 	dsoc->arch_ops.txrx_set_vdev_param(dsoc, vdev, param, val);
10561 	dp_vdev_unref_delete(dsoc, vdev, DP_MOD_ID_CDP);
10562 
10563 	return QDF_STATUS_SUCCESS;
10564 }
10565 
10566 /*
10567  * dp_set_psoc_param: function to set parameters in psoc
10568  * @cdp_soc : DP soc handle
10569  * @param: parameter type to be set
10570  * @val: value of parameter to be set
10571  *
10572  * return: QDF_STATUS
10573  */
10574 static QDF_STATUS
10575 dp_set_psoc_param(struct cdp_soc_t *cdp_soc,
10576 		  enum cdp_psoc_param_type param, cdp_config_param_type val)
10577 {
10578 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
10579 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->wlan_cfg_ctx;
10580 
10581 	switch (param) {
10582 	case CDP_ENABLE_RATE_STATS:
10583 		soc->peerstats_enabled = val.cdp_psoc_param_en_rate_stats;
10584 		break;
10585 	case CDP_SET_NSS_CFG:
10586 		wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx,
10587 					    val.cdp_psoc_param_en_nss_cfg);
10588 		/*
10589 		 * TODO: masked out based on the per offloaded radio
10590 		 */
10591 		switch (val.cdp_psoc_param_en_nss_cfg) {
10592 		case dp_nss_cfg_default:
10593 			break;
10594 		case dp_nss_cfg_first_radio:
10595 		/*
10596 		 * This configuration is valid for single band radio which
10597 		 * is also NSS offload.
10598 		 */
10599 		case dp_nss_cfg_dbdc:
10600 		case dp_nss_cfg_dbtc:
10601 			wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
10602 			wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
10603 			wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
10604 			wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
10605 			break;
10606 		default:
10607 			dp_cdp_err("%pK: Invalid offload config %d",
10608 				   soc, val.cdp_psoc_param_en_nss_cfg);
10609 		}
10610 
10611 			dp_cdp_err("%pK: nss-wifi<0> nss config is enabled"
10612 				   , soc);
10613 		break;
10614 	case CDP_SET_PREFERRED_HW_MODE:
10615 		soc->preferred_hw_mode = val.cdp_psoc_param_preferred_hw_mode;
10616 		break;
10617 	case CDP_IPA_ENABLE:
10618 		soc->wlan_cfg_ctx->ipa_enabled = val.cdp_ipa_enabled;
10619 		break;
10620 	case CDP_CFG_VDEV_STATS_HW_OFFLOAD:
10621 		wlan_cfg_set_vdev_stats_hw_offload_config(wlan_cfg_ctx,
10622 				val.cdp_psoc_param_vdev_stats_hw_offload);
10623 		break;
10624 	case CDP_SAWF_ENABLE:
10625 		wlan_cfg_set_sawf_config(wlan_cfg_ctx, val.cdp_sawf_enabled);
10626 		break;
10627 	default:
10628 		break;
10629 	}
10630 
10631 	return QDF_STATUS_SUCCESS;
10632 }
10633 
10634 /*
10635  * dp_get_psoc_param: function to get parameters in soc
10636  * @cdp_soc : DP soc handle
10637  * @param: parameter type to be set
10638  * @val: address of buffer
10639  *
10640  * return: status
10641  */
10642 static QDF_STATUS dp_get_psoc_param(struct cdp_soc_t *cdp_soc,
10643 				    enum cdp_psoc_param_type param,
10644 				    cdp_config_param_type *val)
10645 {
10646 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
10647 
10648 	if (!soc)
10649 		return QDF_STATUS_E_FAILURE;
10650 
10651 	switch (param) {
10652 	case CDP_CFG_PEER_EXT_STATS:
10653 		val->cdp_psoc_param_pext_stats =
10654 			wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx);
10655 		break;
10656 	case CDP_CFG_VDEV_STATS_HW_OFFLOAD:
10657 		val->cdp_psoc_param_vdev_stats_hw_offload =
10658 			wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx);
10659 		break;
10660 	default:
10661 		dp_warn("Invalid param");
10662 		break;
10663 	}
10664 
10665 	return QDF_STATUS_SUCCESS;
10666 }
10667 
10668 /*
10669  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
10670  * @soc: DP_SOC handle
10671  * @vdev_id: id of DP_VDEV handle
10672  * @map_id:ID of map that needs to be updated
10673  *
10674  * Return: QDF_STATUS
10675  */
10676 static QDF_STATUS dp_set_vdev_dscp_tid_map_wifi3(ol_txrx_soc_handle cdp_soc,
10677 						 uint8_t vdev_id,
10678 						 uint8_t map_id)
10679 {
10680 	cdp_config_param_type val;
10681 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
10682 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10683 						     DP_MOD_ID_CDP);
10684 	if (vdev) {
10685 		vdev->dscp_tid_map_id = map_id;
10686 		val.cdp_vdev_param_dscp_tid_map_id = map_id;
10687 		soc->arch_ops.txrx_set_vdev_param(soc,
10688 						  vdev,
10689 						  CDP_UPDATE_DSCP_TO_TID_MAP,
10690 						  val);
10691 		/* Updatr flag for transmit tid classification */
10692 		if (vdev->dscp_tid_map_id < soc->num_hw_dscp_tid_map)
10693 			vdev->skip_sw_tid_classification |=
10694 				DP_TX_HW_DSCP_TID_MAP_VALID;
10695 		else
10696 			vdev->skip_sw_tid_classification &=
10697 				~DP_TX_HW_DSCP_TID_MAP_VALID;
10698 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10699 		return QDF_STATUS_SUCCESS;
10700 	}
10701 
10702 	return QDF_STATUS_E_FAILURE;
10703 }
10704 
10705 #ifdef DP_RATETABLE_SUPPORT
10706 static int dp_txrx_get_ratekbps(int preamb, int mcs,
10707 				int htflag, int gintval)
10708 {
10709 	uint32_t rix;
10710 	uint16_t ratecode;
10711 	enum cdp_punctured_modes punc_mode = NO_PUNCTURE;
10712 
10713 	return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
10714 			       (uint8_t)preamb, 1, punc_mode,
10715 			       &rix, &ratecode);
10716 }
10717 #else
10718 static int dp_txrx_get_ratekbps(int preamb, int mcs,
10719 				int htflag, int gintval)
10720 {
10721 	return 0;
10722 }
10723 #endif
10724 
10725 /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
10726  * @soc: DP soc handle
10727  * @pdev_id: id of DP pdev handle
10728  * @pdev_stats: buffer to copy to
10729  *
10730  * return : status success/failure
10731  */
10732 static QDF_STATUS
10733 dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
10734 		       struct cdp_pdev_stats *pdev_stats)
10735 {
10736 	struct dp_pdev *pdev =
10737 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10738 						   pdev_id);
10739 	if (!pdev)
10740 		return QDF_STATUS_E_FAILURE;
10741 
10742 	dp_aggregate_pdev_stats(pdev);
10743 
10744 	qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats));
10745 	return QDF_STATUS_SUCCESS;
10746 }
10747 
10748 /* dp_txrx_update_vdev_me_stats(): Update vdev ME stats sent from CDP
10749  * @vdev: DP vdev handle
10750  * @buf: buffer containing specific stats structure
10751  *
10752  * Returns: void
10753  */
10754 static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev,
10755 					 void *buf)
10756 {
10757 	struct cdp_tx_ingress_stats *host_stats = NULL;
10758 
10759 	if (!buf) {
10760 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
10761 		return;
10762 	}
10763 	host_stats = (struct cdp_tx_ingress_stats *)buf;
10764 
10765 	DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt,
10766 			 host_stats->mcast_en.mcast_pkt.num,
10767 			 host_stats->mcast_en.mcast_pkt.bytes);
10768 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error,
10769 		     host_stats->mcast_en.dropped_map_error);
10770 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac,
10771 		     host_stats->mcast_en.dropped_self_mac);
10772 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail,
10773 		     host_stats->mcast_en.dropped_send_fail);
10774 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast,
10775 		     host_stats->mcast_en.ucast);
10776 	DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc,
10777 		     host_stats->mcast_en.fail_seg_alloc);
10778 	DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail,
10779 		     host_stats->mcast_en.clone_fail);
10780 }
10781 
10782 /* dp_txrx_update_vdev_igmp_me_stats(): Update vdev IGMP ME stats sent from CDP
10783  * @vdev: DP vdev handle
10784  * @buf: buffer containing specific stats structure
10785  *
10786  * Returns: void
10787  */
10788 static void dp_txrx_update_vdev_igmp_me_stats(struct dp_vdev *vdev,
10789 					      void *buf)
10790 {
10791 	struct cdp_tx_ingress_stats *host_stats = NULL;
10792 
10793 	if (!buf) {
10794 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
10795 		return;
10796 	}
10797 	host_stats = (struct cdp_tx_ingress_stats *)buf;
10798 
10799 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_rcvd,
10800 		     host_stats->igmp_mcast_en.igmp_rcvd);
10801 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_ucast_converted,
10802 		     host_stats->igmp_mcast_en.igmp_ucast_converted);
10803 }
10804 
10805 /* dp_txrx_update_vdev_host_stats(): Update stats sent through CDP
10806  * @soc: DP soc handle
10807  * @vdev_id: id of DP vdev handle
10808  * @buf: buffer containing specific stats structure
10809  * @stats_id: stats type
10810  *
10811  * Returns: QDF_STATUS
10812  */
10813 static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc_hdl,
10814 						 uint8_t vdev_id,
10815 						 void *buf,
10816 						 uint16_t stats_id)
10817 {
10818 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10819 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10820 						     DP_MOD_ID_CDP);
10821 
10822 	if (!vdev) {
10823 		dp_cdp_err("%pK: Invalid vdev handle", soc);
10824 		return QDF_STATUS_E_FAILURE;
10825 	}
10826 
10827 	switch (stats_id) {
10828 	case DP_VDEV_STATS_PKT_CNT_ONLY:
10829 		break;
10830 	case DP_VDEV_STATS_TX_ME:
10831 		dp_txrx_update_vdev_me_stats(vdev, buf);
10832 		dp_txrx_update_vdev_igmp_me_stats(vdev, buf);
10833 		break;
10834 	default:
10835 		qdf_info("Invalid stats_id %d", stats_id);
10836 		break;
10837 	}
10838 
10839 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10840 	return QDF_STATUS_SUCCESS;
10841 }
10842 
10843 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
10844  * @soc: soc handle
10845  * @vdev_id: id of vdev handle
10846  * @peer_mac: mac of DP_PEER handle
10847  * @peer_stats: buffer to copy to
10848  * return : status success/failure
10849  */
10850 static QDF_STATUS
10851 dp_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
10852 		       uint8_t *peer_mac, struct cdp_peer_stats *peer_stats)
10853 {
10854 	QDF_STATUS status = QDF_STATUS_SUCCESS;
10855 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
10856 						       peer_mac, 0, vdev_id,
10857 						       DP_MOD_ID_CDP);
10858 
10859 	qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats));
10860 
10861 	if (!peer)
10862 		return QDF_STATUS_E_FAILURE;
10863 
10864 	dp_get_peer_stats(peer, peer_stats);
10865 
10866 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10867 
10868 	return status;
10869 }
10870 
10871 /* dp_txrx_get_peer_stats_param - will return specified cdp_peer_stats
10872  * @param soc - soc handle
10873  * @param vdev_id - vdev_id of vdev object
10874  * @param peer_mac - mac address of the peer
10875  * @param type - enum of required stats
10876  * @param buf - buffer to hold the value
10877  * return : status success/failure
10878  */
10879 static QDF_STATUS
10880 dp_txrx_get_peer_stats_param(struct cdp_soc_t *soc, uint8_t vdev_id,
10881 			     uint8_t *peer_mac, enum cdp_peer_stats_type type,
10882 			     cdp_peer_stats_param_t *buf)
10883 {
10884 	QDF_STATUS ret;
10885 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
10886 						      peer_mac, 0, vdev_id,
10887 						      DP_MOD_ID_CDP);
10888 
10889 	if (!peer) {
10890 		dp_peer_err("%pK: Invalid Peer for Mac " QDF_MAC_ADDR_FMT,
10891 			    soc, QDF_MAC_ADDR_REF(peer_mac));
10892 		return QDF_STATUS_E_FAILURE;
10893 	}
10894 
10895 	if (type >= cdp_peer_per_pkt_stats_min &&
10896 	    type < cdp_peer_per_pkt_stats_max) {
10897 		ret = dp_txrx_get_peer_per_pkt_stats_param(peer, type, buf);
10898 	} else if (type >= cdp_peer_extd_stats_min &&
10899 		   type < cdp_peer_extd_stats_max) {
10900 		ret = dp_txrx_get_peer_extd_stats_param(peer, type, buf);
10901 	} else {
10902 		dp_err("%pK: Invalid stat type requested", soc);
10903 		ret = QDF_STATUS_E_FAILURE;
10904 	}
10905 
10906 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10907 
10908 	return ret;
10909 }
10910 
10911 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
10912  * @soc: soc handle
10913  * @vdev_id: id of vdev handle
10914  * @peer_mac: mac of DP_PEER handle
10915  *
10916  * return : QDF_STATUS
10917  */
10918 #ifdef WLAN_FEATURE_11BE_MLO
10919 static QDF_STATUS
10920 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
10921 			 uint8_t *peer_mac)
10922 {
10923 	QDF_STATUS status = QDF_STATUS_SUCCESS;
10924 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10925 	struct dp_peer *peer =
10926 			dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
10927 						       vdev_id, DP_MOD_ID_CDP);
10928 
10929 	if (!peer)
10930 		return QDF_STATUS_E_FAILURE;
10931 
10932 	DP_STATS_CLR(peer);
10933 	dp_txrx_peer_stats_clr(peer->txrx_peer);
10934 
10935 	if (IS_MLO_DP_MLD_PEER(peer)) {
10936 		uint8_t i;
10937 		struct dp_peer *link_peer;
10938 		struct dp_soc *link_peer_soc;
10939 		struct dp_mld_link_peers link_peers_info;
10940 
10941 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
10942 						    &link_peers_info,
10943 						    DP_MOD_ID_CDP);
10944 		for (i = 0; i < link_peers_info.num_links; i++) {
10945 			link_peer = link_peers_info.link_peers[i];
10946 			link_peer_soc = link_peer->vdev->pdev->soc;
10947 
10948 			DP_STATS_CLR(link_peer);
10949 			dp_monitor_peer_reset_stats(link_peer_soc, link_peer);
10950 		}
10951 
10952 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
10953 	} else {
10954 		dp_monitor_peer_reset_stats(soc, peer);
10955 	}
10956 
10957 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10958 
10959 	return status;
10960 }
10961 #else
10962 static QDF_STATUS
10963 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
10964 			 uint8_t *peer_mac)
10965 {
10966 	QDF_STATUS status = QDF_STATUS_SUCCESS;
10967 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
10968 						      peer_mac, 0, vdev_id,
10969 						      DP_MOD_ID_CDP);
10970 
10971 	if (!peer)
10972 		return QDF_STATUS_E_FAILURE;
10973 
10974 	DP_STATS_CLR(peer);
10975 	dp_txrx_peer_stats_clr(peer->txrx_peer);
10976 	dp_monitor_peer_reset_stats((struct dp_soc *)soc, peer);
10977 
10978 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10979 
10980 	return status;
10981 }
10982 #endif
10983 
10984 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
10985  * @vdev_handle: DP_VDEV handle
10986  * @buf: buffer for vdev stats
10987  *
10988  * return : int
10989  */
10990 static int dp_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
10991 				  void *buf, bool is_aggregate)
10992 {
10993 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10994 	struct cdp_vdev_stats *vdev_stats;
10995 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10996 						     DP_MOD_ID_CDP);
10997 
10998 	if (!vdev)
10999 		return 1;
11000 
11001 	vdev_stats = (struct cdp_vdev_stats *)buf;
11002 
11003 	if (is_aggregate) {
11004 		dp_aggregate_vdev_stats(vdev, buf);
11005 	} else {
11006 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
11007 	}
11008 
11009 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11010 	return 0;
11011 }
11012 
11013 /*
11014  * dp_get_total_per(): get total per
11015  * @soc: DP soc handle
11016  * @pdev_id: id of DP_PDEV handle
11017  *
11018  * Return: % error rate using retries per packet and success packets
11019  */
11020 static int dp_get_total_per(struct cdp_soc_t *soc, uint8_t pdev_id)
11021 {
11022 	struct dp_pdev *pdev =
11023 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11024 						   pdev_id);
11025 
11026 	if (!pdev)
11027 		return 0;
11028 
11029 	dp_aggregate_pdev_stats(pdev);
11030 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
11031 		return 0;
11032 	return ((pdev->stats.tx.retries * 100) /
11033 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
11034 }
11035 
11036 /*
11037  * dp_txrx_stats_publish(): publish pdev stats into a buffer
11038  * @soc: DP soc handle
11039  * @pdev_id: id of DP_PDEV handle
11040  * @buf: to hold pdev_stats
11041  *
11042  * Return: int
11043  */
11044 static int
11045 dp_txrx_stats_publish(struct cdp_soc_t *soc, uint8_t pdev_id,
11046 		      struct cdp_stats_extd *buf)
11047 {
11048 	struct cdp_txrx_stats_req req = {0,};
11049 	struct dp_pdev *pdev =
11050 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11051 						   pdev_id);
11052 
11053 	if (!pdev)
11054 		return TXRX_STATS_LEVEL_OFF;
11055 
11056 	if (pdev->pending_fw_response)
11057 		return TXRX_STATS_LEVEL_OFF;
11058 
11059 	dp_aggregate_pdev_stats(pdev);
11060 
11061 	pdev->pending_fw_response = true;
11062 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
11063 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
11064 	pdev->fw_stats_tlv_bitmap_rcvd = 0;
11065 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
11066 				req.param1, req.param2, req.param3, 0,
11067 				req.cookie_val, 0);
11068 
11069 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
11070 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
11071 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
11072 				req.param1, req.param2, req.param3, 0,
11073 				req.cookie_val, 0);
11074 
11075 	qdf_event_reset(&pdev->fw_stats_event);
11076 	qdf_wait_single_event(&pdev->fw_stats_event, DP_MAX_SLEEP_TIME);
11077 	qdf_mem_copy(buf, &pdev->stats, sizeof(struct cdp_pdev_stats));
11078 	pdev->pending_fw_response = false;
11079 
11080 	return TXRX_STATS_LEVEL;
11081 }
11082 
11083 /**
11084  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
11085  * @soc: soc handle
11086  * @pdev_id: id of DP_PDEV handle
11087  * @map_id: ID of map that needs to be updated
11088  * @tos: index value in map
11089  * @tid: tid value passed by the user
11090  *
11091  * Return: QDF_STATUS
11092  */
11093 static QDF_STATUS
11094 dp_set_pdev_dscp_tid_map_wifi3(struct cdp_soc_t *soc_handle,
11095 			       uint8_t pdev_id,
11096 			       uint8_t map_id,
11097 			       uint8_t tos, uint8_t tid)
11098 {
11099 	uint8_t dscp;
11100 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11101 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11102 
11103 	if (!pdev)
11104 		return QDF_STATUS_E_FAILURE;
11105 
11106 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
11107 	pdev->dscp_tid_map[map_id][dscp] = tid;
11108 
11109 	if (map_id < soc->num_hw_dscp_tid_map)
11110 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
11111 				       map_id, dscp);
11112 	else
11113 		return QDF_STATUS_E_FAILURE;
11114 
11115 	return QDF_STATUS_SUCCESS;
11116 }
11117 
11118 #ifdef WLAN_SYSFS_DP_STATS
11119 /*
11120  * dp_sysfs_event_trigger(): Trigger event to wait for firmware
11121  * stats request response.
11122  * @soc: soc handle
11123  * @cookie_val: cookie value
11124  *
11125  * @Return: QDF_STATUS
11126  */
11127 static QDF_STATUS
11128 dp_sysfs_event_trigger(struct dp_soc *soc, uint32_t cookie_val)
11129 {
11130 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11131 	/* wait for firmware response for sysfs stats request */
11132 	if (cookie_val == DBG_SYSFS_STATS_COOKIE) {
11133 		if (!soc) {
11134 			dp_cdp_err("soc is NULL");
11135 			return QDF_STATUS_E_FAILURE;
11136 		}
11137 		/* wait for event completion */
11138 		status = qdf_wait_single_event(&soc->sysfs_config->sysfs_txrx_fw_request_done,
11139 					       WLAN_SYSFS_STAT_REQ_WAIT_MS);
11140 		if (status == QDF_STATUS_SUCCESS)
11141 			dp_cdp_info("sysfs_txrx_fw_request_done event completed");
11142 		else if (status == QDF_STATUS_E_TIMEOUT)
11143 			dp_cdp_warn("sysfs_txrx_fw_request_done event expired");
11144 		else
11145 			dp_cdp_warn("sysfs_txrx_fw_request_done event erro code %d", status);
11146 	}
11147 
11148 	return status;
11149 }
11150 #else /* WLAN_SYSFS_DP_STATS */
11151 /*
11152  * dp_sysfs_event_trigger(): Trigger event to wait for firmware
11153  * stats request response.
11154  * @soc: soc handle
11155  * @cookie_val: cookie value
11156  *
11157  * @Return: QDF_STATUS
11158  */
11159 static QDF_STATUS
11160 dp_sysfs_event_trigger(struct dp_soc *soc, uint32_t cookie_val)
11161 {
11162 	return QDF_STATUS_SUCCESS;
11163 }
11164 #endif /* WLAN_SYSFS_DP_STATS */
11165 
11166 /**
11167  * dp_fw_stats_process(): Process TXRX FW stats request.
11168  * @vdev_handle: DP VDEV handle
11169  * @req: stats request
11170  *
11171  * return: QDF_STATUS
11172  */
11173 static QDF_STATUS
11174 dp_fw_stats_process(struct dp_vdev *vdev,
11175 		    struct cdp_txrx_stats_req *req)
11176 {
11177 	struct dp_pdev *pdev = NULL;
11178 	struct dp_soc *soc = NULL;
11179 	uint32_t stats = req->stats;
11180 	uint8_t mac_id = req->mac_id;
11181 	uint32_t cookie_val = DBG_STATS_COOKIE_DEFAULT;
11182 
11183 	if (!vdev) {
11184 		DP_TRACE(NONE, "VDEV not found");
11185 		return QDF_STATUS_E_FAILURE;
11186 	}
11187 
11188 	pdev = vdev->pdev;
11189 	if (!pdev) {
11190 		DP_TRACE(NONE, "PDEV not found");
11191 		return QDF_STATUS_E_FAILURE;
11192 	}
11193 
11194 	soc = pdev->soc;
11195 	if (!soc) {
11196 		DP_TRACE(NONE, "soc not found");
11197 		return QDF_STATUS_E_FAILURE;
11198 	}
11199 
11200 	/* In case request is from host sysfs for displaying stats on console */
11201 	if (req->cookie_val == DBG_SYSFS_STATS_COOKIE)
11202 		cookie_val = DBG_SYSFS_STATS_COOKIE;
11203 
11204 	/*
11205 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
11206 	 * from param0 to param3 according to below rule:
11207 	 *
11208 	 * PARAM:
11209 	 *   - config_param0 : start_offset (stats type)
11210 	 *   - config_param1 : stats bmask from start offset
11211 	 *   - config_param2 : stats bmask from start offset + 32
11212 	 *   - config_param3 : stats bmask from start offset + 64
11213 	 */
11214 	if (req->stats == CDP_TXRX_STATS_0) {
11215 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
11216 		req->param1 = 0xFFFFFFFF;
11217 		req->param2 = 0xFFFFFFFF;
11218 		req->param3 = 0xFFFFFFFF;
11219 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
11220 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
11221 	}
11222 
11223 	if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT) {
11224 		dp_h2t_ext_stats_msg_send(pdev,
11225 					  HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT,
11226 					  req->param0, req->param1, req->param2,
11227 					  req->param3, 0, cookie_val,
11228 					  mac_id);
11229 	} else {
11230 		dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
11231 					  req->param1, req->param2, req->param3,
11232 					  0, cookie_val, mac_id);
11233 	}
11234 
11235 	dp_sysfs_event_trigger(soc, cookie_val);
11236 
11237 	return QDF_STATUS_SUCCESS;
11238 }
11239 
11240 /**
11241  * dp_txrx_stats_request - function to map to firmware and host stats
11242  * @soc: soc handle
11243  * @vdev_id: virtual device ID
11244  * @req: stats request
11245  *
11246  * Return: QDF_STATUS
11247  */
11248 static
11249 QDF_STATUS dp_txrx_stats_request(struct cdp_soc_t *soc_handle,
11250 				 uint8_t vdev_id,
11251 				 struct cdp_txrx_stats_req *req)
11252 {
11253 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_handle);
11254 	int host_stats;
11255 	int fw_stats;
11256 	enum cdp_stats stats;
11257 	int num_stats;
11258 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11259 						     DP_MOD_ID_CDP);
11260 	QDF_STATUS status = QDF_STATUS_E_INVAL;
11261 
11262 	if (!vdev || !req) {
11263 		dp_cdp_err("%pK: Invalid vdev/req instance", soc);
11264 		status = QDF_STATUS_E_INVAL;
11265 		goto fail0;
11266 	}
11267 
11268 	if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) {
11269 		dp_err("Invalid mac id request");
11270 		status = QDF_STATUS_E_INVAL;
11271 		goto fail0;
11272 	}
11273 
11274 	stats = req->stats;
11275 	if (stats >= CDP_TXRX_MAX_STATS) {
11276 		status = QDF_STATUS_E_INVAL;
11277 		goto fail0;
11278 	}
11279 
11280 	/*
11281 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
11282 	 *			has to be updated if new FW HTT stats added
11283 	 */
11284 	if (stats > CDP_TXRX_STATS_HTT_MAX)
11285 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
11286 
11287 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
11288 
11289 	if (stats >= num_stats) {
11290 		dp_cdp_err("%pK : Invalid stats option: %d", soc, stats);
11291 		status = QDF_STATUS_E_INVAL;
11292 		goto fail0;
11293 	}
11294 
11295 	req->stats = stats;
11296 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
11297 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
11298 
11299 	dp_info("stats: %u fw_stats_type: %d host_stats: %d",
11300 		stats, fw_stats, host_stats);
11301 
11302 	if (fw_stats != TXRX_FW_STATS_INVALID) {
11303 		/* update request with FW stats type */
11304 		req->stats = fw_stats;
11305 		status = dp_fw_stats_process(vdev, req);
11306 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
11307 			(host_stats <= TXRX_HOST_STATS_MAX))
11308 		status = dp_print_host_stats(vdev, req, soc);
11309 	else
11310 		dp_cdp_info("%pK: Wrong Input for TxRx Stats", soc);
11311 fail0:
11312 	if (vdev)
11313 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11314 	return status;
11315 }
11316 
11317 /*
11318  * dp_txrx_dump_stats() -  Dump statistics
11319  * @value - Statistics option
11320  */
11321 static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value,
11322 				     enum qdf_stats_verbosity_level level)
11323 {
11324 	struct dp_soc *soc =
11325 		(struct dp_soc *)psoc;
11326 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11327 
11328 	if (!soc) {
11329 		dp_cdp_err("%pK: soc is NULL", soc);
11330 		return QDF_STATUS_E_INVAL;
11331 	}
11332 
11333 	switch (value) {
11334 	case CDP_TXRX_PATH_STATS:
11335 		dp_txrx_path_stats(soc);
11336 		dp_print_soc_interrupt_stats(soc);
11337 		hal_dump_reg_write_stats(soc->hal_soc);
11338 		dp_pdev_print_tx_delay_stats(soc);
11339 		/* Dump usage watermark stats for core TX/RX SRNGs */
11340 		dp_dump_srng_high_wm_stats(soc, (1 << REO_DST));
11341 		break;
11342 
11343 	case CDP_RX_RING_STATS:
11344 		dp_print_per_ring_stats(soc);
11345 		break;
11346 
11347 	case CDP_TXRX_TSO_STATS:
11348 		dp_print_tso_stats(soc, level);
11349 		break;
11350 
11351 	case CDP_DUMP_TX_FLOW_POOL_INFO:
11352 		if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH)
11353 			cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
11354 		else
11355 			dp_tx_dump_flow_pool_info_compact(soc);
11356 		break;
11357 
11358 	case CDP_DP_NAPI_STATS:
11359 		dp_print_napi_stats(soc);
11360 		break;
11361 
11362 	case CDP_TXRX_DESC_STATS:
11363 		/* TODO: NOT IMPLEMENTED */
11364 		break;
11365 
11366 	case CDP_DP_RX_FISA_STATS:
11367 		dp_rx_dump_fisa_stats(soc);
11368 		break;
11369 
11370 	case CDP_DP_SWLM_STATS:
11371 		dp_print_swlm_stats(soc);
11372 		break;
11373 
11374 	case CDP_DP_TX_HW_LATENCY_STATS:
11375 		dp_pdev_print_tx_delay_stats(soc);
11376 		break;
11377 
11378 	default:
11379 		status = QDF_STATUS_E_INVAL;
11380 		break;
11381 	}
11382 
11383 	return status;
11384 
11385 }
11386 
11387 #ifdef WLAN_SYSFS_DP_STATS
11388 static
11389 void dp_sysfs_get_stat_type(struct dp_soc *soc, uint32_t *mac_id,
11390 			    uint32_t *stat_type)
11391 {
11392 	qdf_spinlock_acquire(&soc->sysfs_config->rw_stats_lock);
11393 	*stat_type = soc->sysfs_config->stat_type_requested;
11394 	*mac_id   = soc->sysfs_config->mac_id;
11395 
11396 	qdf_spinlock_release(&soc->sysfs_config->rw_stats_lock);
11397 }
11398 
11399 static
11400 void dp_sysfs_update_config_buf_params(struct dp_soc *soc,
11401 				       uint32_t curr_len,
11402 				       uint32_t max_buf_len,
11403 				       char *buf)
11404 {
11405 	qdf_spinlock_acquire(&soc->sysfs_config->sysfs_write_user_buffer);
11406 	/* set sysfs_config parameters */
11407 	soc->sysfs_config->buf = buf;
11408 	soc->sysfs_config->curr_buffer_length = curr_len;
11409 	soc->sysfs_config->max_buffer_length = max_buf_len;
11410 	qdf_spinlock_release(&soc->sysfs_config->sysfs_write_user_buffer);
11411 }
11412 
11413 static
11414 QDF_STATUS dp_sysfs_fill_stats(ol_txrx_soc_handle soc_hdl,
11415 			       char *buf, uint32_t buf_size)
11416 {
11417 	uint32_t mac_id = 0;
11418 	uint32_t stat_type = 0;
11419 	uint32_t fw_stats = 0;
11420 	uint32_t host_stats = 0;
11421 	enum cdp_stats stats;
11422 	struct cdp_txrx_stats_req req;
11423 	uint32_t num_stats;
11424 	struct dp_soc *soc = NULL;
11425 
11426 	if (!soc_hdl) {
11427 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
11428 		return QDF_STATUS_E_INVAL;
11429 	}
11430 
11431 	soc = cdp_soc_t_to_dp_soc(soc_hdl);
11432 
11433 	if (!soc) {
11434 		dp_cdp_err("%pK: soc is NULL", soc);
11435 		return QDF_STATUS_E_INVAL;
11436 	}
11437 
11438 	dp_sysfs_get_stat_type(soc, &mac_id, &stat_type);
11439 
11440 	stats = stat_type;
11441 	if (stats >= CDP_TXRX_MAX_STATS) {
11442 		dp_cdp_info("sysfs stat type requested is invalid");
11443 		return QDF_STATUS_E_INVAL;
11444 	}
11445 	/*
11446 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
11447 	 *			has to be updated if new FW HTT stats added
11448 	 */
11449 	if (stats > CDP_TXRX_MAX_STATS)
11450 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
11451 
11452 	num_stats = QDF_ARRAY_SIZE(dp_stats_mapping_table);
11453 
11454 	if (stats >= num_stats) {
11455 		dp_cdp_err("%pK : Invalid stats option: %d, max num stats: %d",
11456 				soc, stats, num_stats);
11457 		return QDF_STATUS_E_INVAL;
11458 	}
11459 
11460 	/* build request */
11461 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
11462 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
11463 
11464 	req.stats = stat_type;
11465 	req.mac_id = mac_id;
11466 	/* request stats to be printed */
11467 	qdf_mutex_acquire(&soc->sysfs_config->sysfs_read_lock);
11468 
11469 	if (fw_stats != TXRX_FW_STATS_INVALID) {
11470 		/* update request with FW stats type */
11471 		req.cookie_val = DBG_SYSFS_STATS_COOKIE;
11472 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
11473 			(host_stats <= TXRX_HOST_STATS_MAX)) {
11474 		req.cookie_val = DBG_STATS_COOKIE_DEFAULT;
11475 		soc->sysfs_config->process_id = qdf_get_current_pid();
11476 		soc->sysfs_config->printing_mode = PRINTING_MODE_ENABLED;
11477 	}
11478 
11479 	dp_sysfs_update_config_buf_params(soc, 0, buf_size, buf);
11480 
11481 	dp_txrx_stats_request(soc_hdl, mac_id, &req);
11482 	soc->sysfs_config->process_id = 0;
11483 	soc->sysfs_config->printing_mode = PRINTING_MODE_DISABLED;
11484 
11485 	dp_sysfs_update_config_buf_params(soc, 0, 0, NULL);
11486 
11487 	qdf_mutex_release(&soc->sysfs_config->sysfs_read_lock);
11488 	return QDF_STATUS_SUCCESS;
11489 }
11490 
11491 static
11492 QDF_STATUS dp_sysfs_set_stat_type(ol_txrx_soc_handle soc_hdl,
11493 				  uint32_t stat_type, uint32_t mac_id)
11494 {
11495 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11496 
11497 	if (!soc_hdl) {
11498 		dp_cdp_err("%pK: soc is NULL", soc);
11499 		return QDF_STATUS_E_INVAL;
11500 	}
11501 
11502 	qdf_spinlock_acquire(&soc->sysfs_config->rw_stats_lock);
11503 
11504 	soc->sysfs_config->stat_type_requested = stat_type;
11505 	soc->sysfs_config->mac_id = mac_id;
11506 
11507 	qdf_spinlock_release(&soc->sysfs_config->rw_stats_lock);
11508 
11509 	return QDF_STATUS_SUCCESS;
11510 }
11511 
11512 static
11513 QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl)
11514 {
11515 	struct dp_soc *soc;
11516 	QDF_STATUS status;
11517 
11518 	if (!soc_hdl) {
11519 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
11520 		return QDF_STATUS_E_INVAL;
11521 	}
11522 
11523 	soc = soc_hdl;
11524 
11525 	soc->sysfs_config = qdf_mem_malloc(sizeof(struct sysfs_stats_config));
11526 	if (!soc->sysfs_config) {
11527 		dp_cdp_err("failed to allocate memory for sysfs_config no memory");
11528 		return QDF_STATUS_E_NOMEM;
11529 	}
11530 
11531 	status = qdf_event_create(&soc->sysfs_config->sysfs_txrx_fw_request_done);
11532 	/* create event for fw stats request from sysfs */
11533 	if (status != QDF_STATUS_SUCCESS) {
11534 		dp_cdp_err("failed to create event sysfs_txrx_fw_request_done");
11535 		qdf_mem_free(soc->sysfs_config);
11536 		soc->sysfs_config = NULL;
11537 		return QDF_STATUS_E_FAILURE;
11538 	}
11539 
11540 	qdf_spinlock_create(&soc->sysfs_config->rw_stats_lock);
11541 	qdf_mutex_create(&soc->sysfs_config->sysfs_read_lock);
11542 	qdf_spinlock_create(&soc->sysfs_config->sysfs_write_user_buffer);
11543 
11544 	return QDF_STATUS_SUCCESS;
11545 }
11546 
11547 static
11548 QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl)
11549 {
11550 	struct dp_soc *soc;
11551 	QDF_STATUS status;
11552 
11553 	if (!soc_hdl) {
11554 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
11555 		return QDF_STATUS_E_INVAL;
11556 	}
11557 
11558 	soc = soc_hdl;
11559 	if (!soc->sysfs_config) {
11560 		dp_cdp_err("soc->sysfs_config is NULL");
11561 		return QDF_STATUS_E_FAILURE;
11562 	}
11563 
11564 	status = qdf_event_destroy(&soc->sysfs_config->sysfs_txrx_fw_request_done);
11565 	if (status != QDF_STATUS_SUCCESS)
11566 		dp_cdp_err("Failed to detroy event sysfs_txrx_fw_request_done ");
11567 
11568 	qdf_mutex_destroy(&soc->sysfs_config->sysfs_read_lock);
11569 	qdf_spinlock_destroy(&soc->sysfs_config->rw_stats_lock);
11570 	qdf_spinlock_destroy(&soc->sysfs_config->sysfs_write_user_buffer);
11571 
11572 	qdf_mem_free(soc->sysfs_config);
11573 
11574 	return QDF_STATUS_SUCCESS;
11575 }
11576 
11577 #else /* WLAN_SYSFS_DP_STATS */
11578 
11579 static
11580 QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl)
11581 {
11582 	return QDF_STATUS_SUCCESS;
11583 }
11584 
11585 static
11586 QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl)
11587 {
11588 	return QDF_STATUS_SUCCESS;
11589 }
11590 #endif /* WLAN_SYSFS_DP_STATS */
11591 
11592 /**
11593  * dp_txrx_clear_dump_stats() - clear dumpStats
11594  * @soc- soc handle
11595  * @value - stats option
11596  *
11597  * Return: 0 - Success, non-zero - failure
11598  */
11599 static
11600 QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
11601 				    uint8_t value)
11602 {
11603 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11604 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11605 
11606 	if (!soc) {
11607 		dp_err("soc is NULL");
11608 		return QDF_STATUS_E_INVAL;
11609 	}
11610 
11611 	switch (value) {
11612 	case CDP_TXRX_TSO_STATS:
11613 		dp_txrx_clear_tso_stats(soc);
11614 		break;
11615 
11616 	case CDP_DP_TX_HW_LATENCY_STATS:
11617 		dp_pdev_clear_tx_delay_stats(soc);
11618 		break;
11619 
11620 	default:
11621 		status = QDF_STATUS_E_INVAL;
11622 		break;
11623 	}
11624 
11625 	return status;
11626 }
11627 
11628 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
11629 /**
11630  * dp_update_flow_control_parameters() - API to store datapath
11631  *                            config parameters
11632  * @soc: soc handle
11633  * @cfg: ini parameter handle
11634  *
11635  * Return: void
11636  */
11637 static inline
11638 void dp_update_flow_control_parameters(struct dp_soc *soc,
11639 				struct cdp_config_params *params)
11640 {
11641 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
11642 					params->tx_flow_stop_queue_threshold;
11643 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
11644 					params->tx_flow_start_queue_offset;
11645 }
11646 #else
11647 static inline
11648 void dp_update_flow_control_parameters(struct dp_soc *soc,
11649 				struct cdp_config_params *params)
11650 {
11651 }
11652 #endif
11653 
11654 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
11655 /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */
11656 #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024
11657 
11658 /* Max packet limit for RX REAP Loop (dp_rx_process) */
11659 #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024
11660 
11661 static
11662 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
11663 					struct cdp_config_params *params)
11664 {
11665 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit =
11666 				params->tx_comp_loop_pkt_limit;
11667 
11668 	if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX)
11669 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true;
11670 	else
11671 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false;
11672 
11673 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit =
11674 				params->rx_reap_loop_pkt_limit;
11675 
11676 	if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX)
11677 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = true;
11678 	else
11679 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = false;
11680 
11681 	soc->wlan_cfg_ctx->rx_hp_oos_update_limit =
11682 				params->rx_hp_oos_update_limit;
11683 
11684 	dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u",
11685 		soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit,
11686 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check,
11687 		soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit,
11688 		soc->wlan_cfg_ctx->rx_enable_eol_data_check,
11689 		soc->wlan_cfg_ctx->rx_hp_oos_update_limit);
11690 }
11691 
11692 static void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
11693 				      uint32_t rx_limit)
11694 {
11695 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit = tx_limit;
11696 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit = rx_limit;
11697 }
11698 
11699 #else
11700 static inline
11701 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
11702 					struct cdp_config_params *params)
11703 { }
11704 
11705 static inline
11706 void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
11707 			       uint32_t rx_limit)
11708 {
11709 }
11710 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
11711 
11712 /**
11713  * dp_update_config_parameters() - API to store datapath
11714  *                            config parameters
11715  * @soc: soc handle
11716  * @cfg: ini parameter handle
11717  *
11718  * Return: status
11719  */
11720 static
11721 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
11722 				struct cdp_config_params *params)
11723 {
11724 	struct dp_soc *soc = (struct dp_soc *)psoc;
11725 
11726 	if (!(soc)) {
11727 		dp_cdp_err("%pK: Invalid handle", soc);
11728 		return QDF_STATUS_E_INVAL;
11729 	}
11730 
11731 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
11732 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
11733 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
11734 	soc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload =
11735 				params->p2p_tcp_udp_checksumoffload;
11736 	soc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload =
11737 				params->nan_tcp_udp_checksumoffload;
11738 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
11739 				params->tcp_udp_checksumoffload;
11740 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
11741 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
11742 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
11743 
11744 	dp_update_rx_soft_irq_limit_params(soc, params);
11745 	dp_update_flow_control_parameters(soc, params);
11746 
11747 	return QDF_STATUS_SUCCESS;
11748 }
11749 
11750 static struct cdp_wds_ops dp_ops_wds = {
11751 	.vdev_set_wds = dp_vdev_set_wds,
11752 #ifdef WDS_VENDOR_EXTENSION
11753 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
11754 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
11755 #endif
11756 };
11757 
11758 /*
11759  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
11760  * @soc_hdl - datapath soc handle
11761  * @vdev_id - virtual interface id
11762  * @callback - callback function
11763  * @ctxt: callback context
11764  *
11765  */
11766 static void
11767 dp_txrx_data_tx_cb_set(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
11768 		       ol_txrx_data_tx_cb callback, void *ctxt)
11769 {
11770 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11771 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11772 						     DP_MOD_ID_CDP);
11773 
11774 	if (!vdev)
11775 		return;
11776 
11777 	vdev->tx_non_std_data_callback.func = callback;
11778 	vdev->tx_non_std_data_callback.ctxt = ctxt;
11779 
11780 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11781 }
11782 
11783 /**
11784  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
11785  * @soc: datapath soc handle
11786  * @pdev_id: id of datapath pdev handle
11787  *
11788  * Return: opaque pointer to dp txrx handle
11789  */
11790 static void *dp_pdev_get_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id)
11791 {
11792 	struct dp_pdev *pdev =
11793 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11794 						   pdev_id);
11795 	if (qdf_unlikely(!pdev))
11796 		return NULL;
11797 
11798 	return pdev->dp_txrx_handle;
11799 }
11800 
11801 /**
11802  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
11803  * @soc: datapath soc handle
11804  * @pdev_id: id of datapath pdev handle
11805  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
11806  *
11807  * Return: void
11808  */
11809 static void
11810 dp_pdev_set_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id,
11811 			   void *dp_txrx_hdl)
11812 {
11813 	struct dp_pdev *pdev =
11814 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11815 						   pdev_id);
11816 
11817 	if (!pdev)
11818 		return;
11819 
11820 	pdev->dp_txrx_handle = dp_txrx_hdl;
11821 }
11822 
11823 /**
11824  * dp_vdev_get_dp_ext_handle() - get dp handle from vdev
11825  * @soc: datapath soc handle
11826  * @vdev_id: vdev id
11827  *
11828  * Return: opaque pointer to dp txrx handle
11829  */
11830 static void *dp_vdev_get_dp_ext_handle(ol_txrx_soc_handle soc_hdl,
11831 				       uint8_t vdev_id)
11832 {
11833 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11834 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11835 						     DP_MOD_ID_CDP);
11836 	void *dp_ext_handle;
11837 
11838 	if (!vdev)
11839 		return NULL;
11840 	dp_ext_handle = vdev->vdev_dp_ext_handle;
11841 
11842 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11843 	return dp_ext_handle;
11844 }
11845 
11846 /**
11847  * dp_vdev_set_dp_ext_handle() - set dp handle in vdev
11848  * @soc: datapath soc handle
11849  * @vdev_id: vdev id
11850  * @size: size of advance dp handle
11851  *
11852  * Return: QDF_STATUS
11853  */
11854 static QDF_STATUS
11855 dp_vdev_set_dp_ext_handle(ol_txrx_soc_handle soc_hdl, uint8_t vdev_id,
11856 			  uint16_t size)
11857 {
11858 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11859 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11860 						     DP_MOD_ID_CDP);
11861 	void *dp_ext_handle;
11862 
11863 	if (!vdev)
11864 		return QDF_STATUS_E_FAILURE;
11865 
11866 	dp_ext_handle = qdf_mem_malloc(size);
11867 
11868 	if (!dp_ext_handle) {
11869 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11870 		return QDF_STATUS_E_FAILURE;
11871 	}
11872 
11873 	vdev->vdev_dp_ext_handle = dp_ext_handle;
11874 
11875 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11876 	return QDF_STATUS_SUCCESS;
11877 }
11878 
11879 /**
11880  * dp_vdev_inform_ll_conn() - Inform vdev to add/delete a latency critical
11881  *			      connection for this vdev
11882  * @soc_hdl: CDP soc handle
11883  * @vdev_id: vdev ID
11884  * @action: Add/Delete action
11885  *
11886  * Returns: QDF_STATUS.
11887  */
11888 static QDF_STATUS
11889 dp_vdev_inform_ll_conn(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
11890 		       enum vdev_ll_conn_actions action)
11891 {
11892 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11893 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11894 						     DP_MOD_ID_CDP);
11895 
11896 	if (!vdev) {
11897 		dp_err("LL connection action for invalid vdev %d", vdev_id);
11898 		return QDF_STATUS_E_FAILURE;
11899 	}
11900 
11901 	switch (action) {
11902 	case CDP_VDEV_LL_CONN_ADD:
11903 		vdev->num_latency_critical_conn++;
11904 		break;
11905 
11906 	case CDP_VDEV_LL_CONN_DEL:
11907 		vdev->num_latency_critical_conn--;
11908 		break;
11909 
11910 	default:
11911 		dp_err("LL connection action invalid %d", action);
11912 		break;
11913 	}
11914 
11915 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11916 	return QDF_STATUS_SUCCESS;
11917 }
11918 
11919 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
11920 /**
11921  * dp_soc_set_swlm_enable() - Enable/Disable SWLM if initialized.
11922  * @soc_hdl: CDP Soc handle
11923  * @value: Enable/Disable value
11924  *
11925  * Returns: QDF_STATUS
11926  */
11927 static QDF_STATUS dp_soc_set_swlm_enable(struct cdp_soc_t *soc_hdl,
11928 					 uint8_t value)
11929 {
11930 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11931 
11932 	if (!soc->swlm.is_init) {
11933 		dp_err("SWLM is not initialized");
11934 		return QDF_STATUS_E_FAILURE;
11935 	}
11936 
11937 	soc->swlm.is_enabled = !!value;
11938 
11939 	return QDF_STATUS_SUCCESS;
11940 }
11941 
11942 /**
11943  * dp_soc_is_swlm_enabled() - Check if SWLM is enabled.
11944  * @soc_hdl: CDP Soc handle
11945  *
11946  * Returns: QDF_STATUS
11947  */
11948 static uint8_t dp_soc_is_swlm_enabled(struct cdp_soc_t *soc_hdl)
11949 {
11950 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11951 
11952 	return soc->swlm.is_enabled;
11953 }
11954 #endif
11955 
11956 /**
11957  * dp_display_srng_info() - Dump the srng HP TP info
11958  * @soc_hdl: CDP Soc handle
11959  *
11960  * This function dumps the SW hp/tp values for the important rings.
11961  * HW hp/tp values are not being dumped, since it can lead to
11962  * READ NOC error when UMAC is in low power state. MCC does not have
11963  * device force wake working yet.
11964  *
11965  * Return: none
11966  */
11967 static void dp_display_srng_info(struct cdp_soc_t *soc_hdl)
11968 {
11969 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11970 	hal_soc_handle_t hal_soc = soc->hal_soc;
11971 	uint32_t hp, tp, i;
11972 
11973 	dp_info("SRNG HP-TP data:");
11974 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
11975 		hal_get_sw_hptp(hal_soc, soc->tcl_data_ring[i].hal_srng,
11976 				&tp, &hp);
11977 		dp_info("TCL DATA ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
11978 
11979 		if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, i) ==
11980 		    INVALID_WBM_RING_NUM)
11981 			continue;
11982 
11983 		hal_get_sw_hptp(hal_soc, soc->tx_comp_ring[i].hal_srng,
11984 				&tp, &hp);
11985 		dp_info("TX comp ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
11986 	}
11987 
11988 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
11989 		hal_get_sw_hptp(hal_soc, soc->reo_dest_ring[i].hal_srng,
11990 				&tp, &hp);
11991 		dp_info("REO DST ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
11992 	}
11993 
11994 	hal_get_sw_hptp(hal_soc, soc->reo_exception_ring.hal_srng, &tp, &hp);
11995 	dp_info("REO exception ring: hp=0x%x, tp=0x%x", hp, tp);
11996 
11997 	hal_get_sw_hptp(hal_soc, soc->rx_rel_ring.hal_srng, &tp, &hp);
11998 	dp_info("WBM RX release ring: hp=0x%x, tp=0x%x", hp, tp);
11999 
12000 	hal_get_sw_hptp(hal_soc, soc->wbm_desc_rel_ring.hal_srng, &tp, &hp);
12001 	dp_info("WBM desc release ring: hp=0x%x, tp=0x%x", hp, tp);
12002 }
12003 
12004 /**
12005  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
12006  * @soc_handle: datapath soc handle
12007  *
12008  * Return: opaque pointer to external dp (non-core DP)
12009  */
12010 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
12011 {
12012 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12013 
12014 	return soc->external_txrx_handle;
12015 }
12016 
12017 /**
12018  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
12019  * @soc_handle: datapath soc handle
12020  * @txrx_handle: opaque pointer to external dp (non-core DP)
12021  *
12022  * Return: void
12023  */
12024 static void
12025 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
12026 {
12027 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12028 
12029 	soc->external_txrx_handle = txrx_handle;
12030 }
12031 
12032 /**
12033  * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
12034  * @soc_hdl: datapath soc handle
12035  * @pdev_id: id of the datapath pdev handle
12036  * @lmac_id: lmac id
12037  *
12038  * Return: QDF_STATUS
12039  */
12040 static QDF_STATUS
12041 dp_soc_map_pdev_to_lmac
12042 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12043 	 uint32_t lmac_id)
12044 {
12045 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12046 
12047 	wlan_cfg_set_hw_mac_idx(soc->wlan_cfg_ctx,
12048 				pdev_id,
12049 				lmac_id);
12050 
12051 	/*Set host PDEV ID for lmac_id*/
12052 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
12053 			      pdev_id,
12054 			      lmac_id);
12055 
12056 	return QDF_STATUS_SUCCESS;
12057 }
12058 
12059 /**
12060  * dp_soc_handle_pdev_mode_change() - Update pdev to lmac mapping
12061  * @soc_hdl: datapath soc handle
12062  * @pdev_id: id of the datapath pdev handle
12063  * @lmac_id: lmac id
12064  *
12065  * In the event of a dynamic mode change, update the pdev to lmac mapping
12066  *
12067  * Return: QDF_STATUS
12068  */
12069 static QDF_STATUS
12070 dp_soc_handle_pdev_mode_change
12071 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12072 	 uint32_t lmac_id)
12073 {
12074 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12075 	struct dp_vdev *vdev = NULL;
12076 	uint8_t hw_pdev_id, mac_id;
12077 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc,
12078 								  pdev_id);
12079 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
12080 
12081 	if (qdf_unlikely(!pdev))
12082 		return QDF_STATUS_E_FAILURE;
12083 
12084 	pdev->lmac_id = lmac_id;
12085 	pdev->target_pdev_id =
12086 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
12087 	dp_info(" mode change %d %d\n", pdev->pdev_id, pdev->lmac_id);
12088 
12089 	/*Set host PDEV ID for lmac_id*/
12090 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
12091 			      pdev->pdev_id,
12092 			      lmac_id);
12093 
12094 	hw_pdev_id =
12095 		dp_get_target_pdev_id_for_host_pdev_id(soc,
12096 						       pdev->pdev_id);
12097 
12098 	/*
12099 	 * When NSS offload is enabled, send pdev_id->lmac_id
12100 	 * and pdev_id to hw_pdev_id to NSS FW
12101 	 */
12102 	if (nss_config) {
12103 		mac_id = pdev->lmac_id;
12104 		if (soc->cdp_soc.ol_ops->pdev_update_lmac_n_target_pdev_id)
12105 			soc->cdp_soc.ol_ops->
12106 				pdev_update_lmac_n_target_pdev_id(
12107 				soc->ctrl_psoc,
12108 				&pdev_id, &mac_id, &hw_pdev_id);
12109 	}
12110 
12111 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
12112 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
12113 		DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
12114 					       hw_pdev_id);
12115 		vdev->lmac_id = pdev->lmac_id;
12116 	}
12117 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
12118 
12119 	return QDF_STATUS_SUCCESS;
12120 }
12121 
12122 /**
12123  * dp_soc_set_pdev_status_down() - set pdev down/up status
12124  * @soc: datapath soc handle
12125  * @pdev_id: id of datapath pdev handle
12126  * @is_pdev_down: pdev down/up status
12127  *
12128  * Return: QDF_STATUS
12129  */
12130 static QDF_STATUS
12131 dp_soc_set_pdev_status_down(struct cdp_soc_t *soc, uint8_t pdev_id,
12132 			    bool is_pdev_down)
12133 {
12134 	struct dp_pdev *pdev =
12135 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12136 						   pdev_id);
12137 	if (!pdev)
12138 		return QDF_STATUS_E_FAILURE;
12139 
12140 	pdev->is_pdev_down = is_pdev_down;
12141 	return QDF_STATUS_SUCCESS;
12142 }
12143 
12144 /**
12145  * dp_get_cfg_capabilities() - get dp capabilities
12146  * @soc_handle: datapath soc handle
12147  * @dp_caps: enum for dp capabilities
12148  *
12149  * Return: bool to determine if dp caps is enabled
12150  */
12151 static bool
12152 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
12153 			enum cdp_capabilities dp_caps)
12154 {
12155 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12156 
12157 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
12158 }
12159 
12160 #ifdef FEATURE_AST
12161 static QDF_STATUS
12162 dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
12163 		       uint8_t *peer_mac)
12164 {
12165 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12166 	QDF_STATUS status = QDF_STATUS_SUCCESS;
12167 	struct dp_peer *peer =
12168 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
12169 					       DP_MOD_ID_CDP);
12170 
12171 	/* Peer can be null for monitor vap mac address */
12172 	if (!peer) {
12173 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
12174 			  "%s: Invalid peer\n", __func__);
12175 		return QDF_STATUS_E_FAILURE;
12176 	}
12177 
12178 	dp_peer_update_state(soc, peer, DP_PEER_STATE_LOGICAL_DELETE);
12179 
12180 	qdf_spin_lock_bh(&soc->ast_lock);
12181 	dp_peer_delete_ast_entries(soc, peer);
12182 	qdf_spin_unlock_bh(&soc->ast_lock);
12183 
12184 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
12185 	return status;
12186 }
12187 #endif
12188 
12189 #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS
12190 /**
12191  * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
12192  * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
12193  * @soc: cdp_soc handle
12194  * @pdev_id: id of cdp_pdev handle
12195  * @protocol_type: protocol type for which stats should be displayed
12196  *
12197  * Return: none
12198  */
12199 static inline void
12200 dp_dump_pdev_rx_protocol_tag_stats(struct cdp_soc_t  *soc, uint8_t pdev_id,
12201 				   uint16_t protocol_type)
12202 {
12203 }
12204 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
12205 
12206 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
12207 /**
12208  * dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be
12209  * applied to the desired protocol type packets
12210  * @soc: soc handle
12211  * @pdev_id: id of cdp_pdev handle
12212  * @enable_rx_protocol_tag - bitmask that indicates what protocol types
12213  * are enabled for tagging. zero indicates disable feature, non-zero indicates
12214  * enable feature
12215  * @protocol_type: new protocol type for which the tag is being added
12216  * @tag: user configured tag for the new protocol
12217  *
12218  * Return: Success
12219  */
12220 static inline QDF_STATUS
12221 dp_update_pdev_rx_protocol_tag(struct cdp_soc_t  *soc, uint8_t pdev_id,
12222 			       uint32_t enable_rx_protocol_tag,
12223 			       uint16_t protocol_type,
12224 			       uint16_t tag)
12225 {
12226 	return QDF_STATUS_SUCCESS;
12227 }
12228 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
12229 
12230 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
12231 /**
12232  * dp_set_rx_flow_tag - add/delete a flow
12233  * @soc: soc handle
12234  * @pdev_id: id of cdp_pdev handle
12235  * @flow_info: flow tuple that is to be added to/deleted from flow search table
12236  *
12237  * Return: Success
12238  */
12239 static inline QDF_STATUS
12240 dp_set_rx_flow_tag(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
12241 		   struct cdp_rx_flow_info *flow_info)
12242 {
12243 	return QDF_STATUS_SUCCESS;
12244 }
12245 /**
12246  * dp_dump_rx_flow_tag_stats - dump the number of packets tagged for
12247  * given flow 5-tuple
12248  * @cdp_soc: soc handle
12249  * @pdev_id: id of cdp_pdev handle
12250  * @flow_info: flow 5-tuple for which stats should be displayed
12251  *
12252  * Return: Success
12253  */
12254 static inline QDF_STATUS
12255 dp_dump_rx_flow_tag_stats(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
12256 			  struct cdp_rx_flow_info *flow_info)
12257 {
12258 	return QDF_STATUS_SUCCESS;
12259 }
12260 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
12261 
12262 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
12263 					   uint32_t max_peers,
12264 					   uint32_t max_ast_index,
12265 					   uint8_t peer_map_unmap_versions)
12266 {
12267 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12268 	QDF_STATUS status;
12269 
12270 	soc->max_peers = max_peers;
12271 
12272 	wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
12273 
12274 	status = soc->arch_ops.txrx_peer_map_attach(soc);
12275 	if (!QDF_IS_STATUS_SUCCESS(status)) {
12276 		dp_err("failure in allocating peer tables");
12277 		return QDF_STATUS_E_FAILURE;
12278 	}
12279 
12280 	dp_info("max_peers %u, calculated max_peers %u max_ast_index: %u\n",
12281 		max_peers, soc->max_peer_id, max_ast_index);
12282 
12283 	status = dp_peer_find_attach(soc);
12284 	if (!QDF_IS_STATUS_SUCCESS(status)) {
12285 		dp_err("Peer find attach failure");
12286 		goto fail;
12287 	}
12288 
12289 	soc->peer_map_unmap_versions = peer_map_unmap_versions;
12290 	soc->peer_map_attach_success = TRUE;
12291 
12292 	return QDF_STATUS_SUCCESS;
12293 fail:
12294 	soc->arch_ops.txrx_peer_map_detach(soc);
12295 
12296 	return status;
12297 }
12298 
12299 static QDF_STATUS dp_soc_set_param(struct cdp_soc_t  *soc_hdl,
12300 				   enum cdp_soc_param_t param,
12301 				   uint32_t value)
12302 {
12303 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12304 
12305 	switch (param) {
12306 	case DP_SOC_PARAM_MSDU_EXCEPTION_DESC:
12307 		soc->num_msdu_exception_desc = value;
12308 		dp_info("num_msdu exception_desc %u",
12309 			value);
12310 		break;
12311 	case DP_SOC_PARAM_CMEM_FSE_SUPPORT:
12312 		if (wlan_cfg_is_fst_in_cmem_enabled(soc->wlan_cfg_ctx))
12313 			soc->fst_in_cmem = !!value;
12314 		dp_info("FW supports CMEM FSE %u", value);
12315 		break;
12316 	case DP_SOC_PARAM_MAX_AST_AGEOUT:
12317 		soc->max_ast_ageout_count = value;
12318 		dp_info("Max ast ageout count %u", soc->max_ast_ageout_count);
12319 		break;
12320 	case DP_SOC_PARAM_EAPOL_OVER_CONTROL_PORT:
12321 		soc->eapol_over_control_port = value;
12322 		dp_info("Eapol over control_port:%d",
12323 			soc->eapol_over_control_port);
12324 		break;
12325 	case DP_SOC_PARAM_MULTI_PEER_GRP_CMD_SUPPORT:
12326 		soc->multi_peer_grp_cmd_supported = value;
12327 		dp_info("Multi Peer group command support:%d",
12328 			soc->multi_peer_grp_cmd_supported);
12329 		break;
12330 	case DP_SOC_PARAM_RSSI_DBM_CONV_SUPPORT:
12331 		soc->features.rssi_dbm_conv_support = value;
12332 		dp_info("Rssi dbm converstion support:%u",
12333 			soc->features.rssi_dbm_conv_support);
12334 		break;
12335 	case DP_SOC_PARAM_UMAC_HW_RESET_SUPPORT:
12336 		soc->features.umac_hw_reset_support = value;
12337 		dp_info("UMAC HW reset support :%u",
12338 			soc->features.umac_hw_reset_support);
12339 		break;
12340 	default:
12341 		dp_info("not handled param %d ", param);
12342 		break;
12343 	}
12344 
12345 	return QDF_STATUS_SUCCESS;
12346 }
12347 
12348 static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
12349 				      void *stats_ctx)
12350 {
12351 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12352 
12353 	soc->rate_stats_ctx = (struct cdp_soc_rate_stats_ctx *)stats_ctx;
12354 }
12355 
12356 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
12357 /**
12358  * dp_peer_flush_rate_stats_req(): Flush peer rate stats
12359  * @soc: Datapath SOC handle
12360  * @peer: Datapath peer
12361  * @arg: argument to iter function
12362  *
12363  * Return: QDF_STATUS
12364  */
12365 static void
12366 dp_peer_flush_rate_stats_req(struct dp_soc *soc, struct dp_peer *peer,
12367 			     void *arg)
12368 {
12369 	if (peer->bss_peer)
12370 		return;
12371 
12372 	dp_wdi_event_handler(
12373 		WDI_EVENT_FLUSH_RATE_STATS_REQ,
12374 		soc, dp_monitor_peer_get_peerstats_ctx(soc, peer),
12375 		peer->peer_id,
12376 		WDI_NO_VAL, peer->vdev->pdev->pdev_id);
12377 }
12378 
12379 /**
12380  * dp_flush_rate_stats_req(): Flush peer rate stats in pdev
12381  * @soc_hdl: Datapath SOC handle
12382  * @pdev_id: pdev_id
12383  *
12384  * Return: QDF_STATUS
12385  */
12386 static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
12387 					  uint8_t pdev_id)
12388 {
12389 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12390 	struct dp_pdev *pdev =
12391 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12392 						   pdev_id);
12393 	if (!pdev)
12394 		return QDF_STATUS_E_FAILURE;
12395 
12396 	dp_pdev_iterate_peer(pdev, dp_peer_flush_rate_stats_req, NULL,
12397 			     DP_MOD_ID_CDP);
12398 
12399 	return QDF_STATUS_SUCCESS;
12400 }
12401 #else
12402 static inline QDF_STATUS
12403 dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
12404 			uint8_t pdev_id)
12405 {
12406 	return QDF_STATUS_SUCCESS;
12407 }
12408 #endif
12409 
12410 static void *dp_peer_get_peerstats_ctx(struct cdp_soc_t *soc_hdl,
12411 				       uint8_t vdev_id,
12412 				       uint8_t *mac_addr)
12413 {
12414 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12415 	struct dp_peer *peer;
12416 	void *peerstats_ctx = NULL;
12417 
12418 	if (mac_addr) {
12419 		peer = dp_peer_find_hash_find(soc, mac_addr,
12420 					      0, vdev_id,
12421 					      DP_MOD_ID_CDP);
12422 		if (!peer)
12423 			return NULL;
12424 
12425 		if (!IS_MLO_DP_MLD_PEER(peer))
12426 			peerstats_ctx = dp_monitor_peer_get_peerstats_ctx(soc,
12427 									  peer);
12428 
12429 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
12430 	}
12431 
12432 	return peerstats_ctx;
12433 }
12434 
12435 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
12436 static QDF_STATUS dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
12437 					   uint8_t pdev_id,
12438 					   void *buf)
12439 {
12440 	 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS,
12441 			      (struct dp_soc *)soc, buf, HTT_INVALID_PEER,
12442 			      WDI_NO_VAL, pdev_id);
12443 	return QDF_STATUS_SUCCESS;
12444 }
12445 #else
12446 static inline QDF_STATUS
12447 dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
12448 			 uint8_t pdev_id,
12449 			 void *buf)
12450 {
12451 	return QDF_STATUS_SUCCESS;
12452 }
12453 #endif
12454 
12455 static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle)
12456 {
12457 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12458 
12459 	return soc->rate_stats_ctx;
12460 }
12461 
12462 /*
12463  * dp_get_cfg() - get dp cfg
12464  * @soc: cdp soc handle
12465  * @cfg: cfg enum
12466  *
12467  * Return: cfg value
12468  */
12469 static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg)
12470 {
12471 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
12472 	uint32_t value = 0;
12473 
12474 	switch (cfg) {
12475 	case cfg_dp_enable_data_stall:
12476 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
12477 		break;
12478 	case cfg_dp_enable_p2p_ip_tcp_udp_checksum_offload:
12479 		value = dpsoc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload;
12480 		break;
12481 	case cfg_dp_enable_nan_ip_tcp_udp_checksum_offload:
12482 		value = dpsoc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload;
12483 		break;
12484 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
12485 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
12486 		break;
12487 	case cfg_dp_disable_legacy_mode_csum_offload:
12488 		value = dpsoc->wlan_cfg_ctx->
12489 					legacy_mode_checksumoffload_disable;
12490 		break;
12491 	case cfg_dp_tso_enable:
12492 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
12493 		break;
12494 	case cfg_dp_lro_enable:
12495 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
12496 		break;
12497 	case cfg_dp_gro_enable:
12498 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
12499 		break;
12500 	case cfg_dp_tc_based_dyn_gro_enable:
12501 		value = dpsoc->wlan_cfg_ctx->tc_based_dynamic_gro;
12502 		break;
12503 	case cfg_dp_tc_ingress_prio:
12504 		value = dpsoc->wlan_cfg_ctx->tc_ingress_prio;
12505 		break;
12506 	case cfg_dp_sg_enable:
12507 		value = dpsoc->wlan_cfg_ctx->sg_enabled;
12508 		break;
12509 	case cfg_dp_tx_flow_start_queue_offset:
12510 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
12511 		break;
12512 	case cfg_dp_tx_flow_stop_queue_threshold:
12513 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
12514 		break;
12515 	case cfg_dp_disable_intra_bss_fwd:
12516 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
12517 		break;
12518 	case cfg_dp_pktlog_buffer_size:
12519 		value = dpsoc->wlan_cfg_ctx->pktlog_buffer_size;
12520 		break;
12521 	case cfg_dp_wow_check_rx_pending:
12522 		value = dpsoc->wlan_cfg_ctx->wow_check_rx_pending_enable;
12523 		break;
12524 	default:
12525 		value =  0;
12526 	}
12527 
12528 	return value;
12529 }
12530 
12531 #ifdef PEER_FLOW_CONTROL
12532 /**
12533  * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params
12534  * @soc_handle: datapath soc handle
12535  * @pdev_id: id of datapath pdev handle
12536  * @param: ol ath params
12537  * @value: value of the flag
12538  * @buff: Buffer to be passed
12539  *
12540  * Implemented this function same as legacy function. In legacy code, single
12541  * function is used to display stats and update pdev params.
12542  *
12543  * Return: 0 for success. nonzero for failure.
12544  */
12545 static uint32_t dp_tx_flow_ctrl_configure_pdev(struct cdp_soc_t *soc_handle,
12546 					       uint8_t pdev_id,
12547 					       enum _dp_param_t param,
12548 					       uint32_t value, void *buff)
12549 {
12550 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12551 	struct dp_pdev *pdev =
12552 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12553 						   pdev_id);
12554 
12555 	if (qdf_unlikely(!pdev))
12556 		return 1;
12557 
12558 	soc = pdev->soc;
12559 	if (!soc)
12560 		return 1;
12561 
12562 	switch (param) {
12563 #ifdef QCA_ENH_V3_STATS_SUPPORT
12564 	case DP_PARAM_VIDEO_DELAY_STATS_FC:
12565 		if (value)
12566 			pdev->delay_stats_flag = true;
12567 		else
12568 			pdev->delay_stats_flag = false;
12569 		break;
12570 	case DP_PARAM_VIDEO_STATS_FC:
12571 		qdf_print("------- TID Stats ------\n");
12572 		dp_pdev_print_tid_stats(pdev);
12573 		qdf_print("------ Delay Stats ------\n");
12574 		dp_pdev_print_delay_stats(pdev);
12575 		qdf_print("------ Rx Error Stats ------\n");
12576 		dp_pdev_print_rx_error_stats(pdev);
12577 		break;
12578 #endif
12579 	case DP_PARAM_TOTAL_Q_SIZE:
12580 		{
12581 			uint32_t tx_min, tx_max;
12582 
12583 			tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx);
12584 			tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
12585 
12586 			if (!buff) {
12587 				if ((value >= tx_min) && (value <= tx_max)) {
12588 					pdev->num_tx_allowed = value;
12589 				} else {
12590 					dp_tx_info("%pK: Failed to update num_tx_allowed, Q_min = %d Q_max = %d",
12591 						   soc, tx_min, tx_max);
12592 					break;
12593 				}
12594 			} else {
12595 				*(int *)buff = pdev->num_tx_allowed;
12596 			}
12597 		}
12598 		break;
12599 	default:
12600 		dp_tx_info("%pK: not handled param %d ", soc, param);
12601 		break;
12602 	}
12603 
12604 	return 0;
12605 }
12606 #endif
12607 
12608 /**
12609  * dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev
12610  * @psoc: dp soc handle
12611  * @pdev_id: id of DP_PDEV handle
12612  * @pcp: pcp value
12613  * @tid: tid value passed by the user
12614  *
12615  * Return: QDF_STATUS_SUCCESS on success
12616  */
12617 static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
12618 						uint8_t pdev_id,
12619 						uint8_t pcp, uint8_t tid)
12620 {
12621 	struct dp_soc *soc = (struct dp_soc *)psoc;
12622 
12623 	soc->pcp_tid_map[pcp] = tid;
12624 
12625 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
12626 	return QDF_STATUS_SUCCESS;
12627 }
12628 
12629 /**
12630  * dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev
12631  * @soc: DP soc handle
12632  * @vdev_id: id of DP_VDEV handle
12633  * @pcp: pcp value
12634  * @tid: tid value passed by the user
12635  *
12636  * Return: QDF_STATUS_SUCCESS on success
12637  */
12638 static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc_hdl,
12639 						uint8_t vdev_id,
12640 						uint8_t pcp, uint8_t tid)
12641 {
12642 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12643 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12644 						     DP_MOD_ID_CDP);
12645 
12646 	if (!vdev)
12647 		return QDF_STATUS_E_FAILURE;
12648 
12649 	vdev->pcp_tid_map[pcp] = tid;
12650 
12651 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12652 	return QDF_STATUS_SUCCESS;
12653 }
12654 
12655 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
12656 static void dp_drain_txrx(struct cdp_soc_t *soc_handle)
12657 {
12658 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12659 	uint32_t cur_tx_limit, cur_rx_limit;
12660 	uint32_t budget = 0xffff;
12661 	uint32_t val;
12662 	int i;
12663 
12664 	cur_tx_limit = soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit;
12665 	cur_rx_limit = soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit;
12666 
12667 	/* Temporarily increase soft irq limits when going to drain
12668 	 * the UMAC/LMAC SRNGs and restore them after polling.
12669 	 * Though the budget is on higher side, the TX/RX reaping loops
12670 	 * will not execute longer as both TX and RX would be suspended
12671 	 * by the time this API is called.
12672 	 */
12673 	dp_update_soft_irq_limits(soc, budget, budget);
12674 
12675 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
12676 		dp_service_srngs(&soc->intr_ctx[i], budget);
12677 
12678 	dp_update_soft_irq_limits(soc, cur_tx_limit, cur_rx_limit);
12679 
12680 	/* Do a dummy read at offset 0; this will ensure all
12681 	 * pendings writes(HP/TP) are flushed before read returns.
12682 	 */
12683 	val = HAL_REG_READ((struct hal_soc *)soc->hal_soc, 0);
12684 	dp_debug("Register value at offset 0: %u\n", val);
12685 }
12686 #endif
12687 
12688 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
12689 static void
12690 dp_set_pkt_capture_mode(struct cdp_soc_t *soc_handle, bool val)
12691 {
12692 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12693 
12694 	soc->wlan_cfg_ctx->pkt_capture_mode = val;
12695 }
12696 #endif
12697 
12698 #ifdef HW_TX_DELAY_STATS_ENABLE
12699 /**
12700  * dp_enable_disable_vdev_tx_delay_stats(): Start/Stop tx delay stats capture
12701  * @soc: DP soc handle
12702  * @vdev_id: vdev id
12703  * @value: value
12704  *
12705  * Return: None
12706  */
12707 static void
12708 dp_enable_disable_vdev_tx_delay_stats(struct cdp_soc_t *soc_hdl,
12709 				      uint8_t vdev_id,
12710 				      uint8_t value)
12711 {
12712 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12713 	struct dp_vdev *vdev = NULL;
12714 
12715 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
12716 	if (!vdev)
12717 		return;
12718 
12719 	vdev->hw_tx_delay_stats_enabled = value;
12720 
12721 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12722 }
12723 
12724 /**
12725  * dp_check_vdev_tx_delay_stats_enabled() - check the feature is enabled or not
12726  * @soc: DP soc handle
12727  * @vdev_id: vdev id
12728  *
12729  * Returns: 1 if enabled, 0 if disabled
12730  */
12731 static uint8_t
12732 dp_check_vdev_tx_delay_stats_enabled(struct cdp_soc_t *soc_hdl,
12733 				     uint8_t vdev_id)
12734 {
12735 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12736 	struct dp_vdev *vdev;
12737 	uint8_t ret_val = 0;
12738 
12739 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
12740 	if (!vdev)
12741 		return ret_val;
12742 
12743 	ret_val = vdev->hw_tx_delay_stats_enabled;
12744 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12745 
12746 	return ret_val;
12747 }
12748 #endif
12749 
12750 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
12751 static void
12752 dp_recovery_vdev_flush_peers(struct cdp_soc_t *cdp_soc,
12753 			     uint8_t vdev_id,
12754 			     bool mlo_peers_only)
12755 {
12756 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
12757 	struct dp_vdev *vdev;
12758 
12759 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
12760 
12761 	if (!vdev)
12762 		return;
12763 
12764 	dp_vdev_flush_peers((struct cdp_vdev *)vdev, false, mlo_peers_only);
12765 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12766 }
12767 #endif
12768 
12769 static struct cdp_cmn_ops dp_ops_cmn = {
12770 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
12771 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
12772 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
12773 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
12774 	.txrx_pdev_post_attach = dp_pdev_post_attach_wifi3,
12775 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
12776 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
12777 	.txrx_peer_create = dp_peer_create_wifi3,
12778 	.txrx_peer_setup = dp_peer_setup_wifi3,
12779 #ifdef FEATURE_AST
12780 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
12781 #else
12782 	.txrx_peer_teardown = NULL,
12783 #endif
12784 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
12785 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
12786 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
12787 	.txrx_peer_get_ast_info_by_pdev =
12788 		dp_peer_get_ast_info_by_pdevid_wifi3,
12789 	.txrx_peer_ast_delete_by_soc =
12790 		dp_peer_ast_entry_del_by_soc,
12791 	.txrx_peer_ast_delete_by_pdev =
12792 		dp_peer_ast_entry_del_by_pdev,
12793 	.txrx_peer_delete = dp_peer_delete_wifi3,
12794 #ifdef DP_RX_UDP_OVER_PEER_ROAM
12795 	.txrx_update_roaming_peer = dp_update_roaming_peer_wifi3,
12796 #endif
12797 	.txrx_vdev_register = dp_vdev_register_wifi3,
12798 	.txrx_soc_detach = dp_soc_detach_wifi3,
12799 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
12800 	.txrx_soc_init = dp_soc_init_wifi3,
12801 #ifndef QCA_HOST_MODE_WIFI_DISABLED
12802 	.txrx_tso_soc_attach = dp_tso_soc_attach,
12803 	.txrx_tso_soc_detach = dp_tso_soc_detach,
12804 	.tx_send = dp_tx_send,
12805 	.tx_send_exc = dp_tx_send_exception,
12806 #endif
12807 	.txrx_pdev_init = dp_pdev_init_wifi3,
12808 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
12809 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
12810 	.txrx_ath_getstats = dp_get_device_stats,
12811 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
12812 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
12813 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
12814 	.delba_process = dp_delba_process_wifi3,
12815 	.set_addba_response = dp_set_addba_response,
12816 	.flush_cache_rx_queue = NULL,
12817 	.tid_update_ba_win_size = dp_rx_tid_update_ba_win_size,
12818 	/* TODO: get API's for dscp-tid need to be added*/
12819 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
12820 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
12821 	.txrx_get_total_per = dp_get_total_per,
12822 	.txrx_stats_request = dp_txrx_stats_request,
12823 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
12824 	.display_stats = dp_txrx_dump_stats,
12825 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
12826 	.txrx_intr_detach = dp_soc_interrupt_detach,
12827 	.set_pn_check = dp_set_pn_check_wifi3,
12828 	.set_key_sec_type = dp_set_key_sec_type_wifi3,
12829 	.update_config_parameters = dp_update_config_parameters,
12830 	/* TODO: Add other functions */
12831 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
12832 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
12833 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
12834 	.get_vdev_dp_ext_txrx_handle = dp_vdev_get_dp_ext_handle,
12835 	.set_vdev_dp_ext_txrx_handle = dp_vdev_set_dp_ext_handle,
12836 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
12837 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
12838 	.map_pdev_to_lmac = dp_soc_map_pdev_to_lmac,
12839 	.handle_mode_change = dp_soc_handle_pdev_mode_change,
12840 	.set_pdev_status_down = dp_soc_set_pdev_status_down,
12841 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
12842 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
12843 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
12844 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
12845 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
12846 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
12847 	.set_soc_param = dp_soc_set_param,
12848 	.txrx_get_os_rx_handles_from_vdev =
12849 					dp_get_os_rx_handles_from_vdev_wifi3,
12850 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
12851 	.get_dp_capabilities = dp_get_cfg_capabilities,
12852 	.txrx_get_cfg = dp_get_cfg,
12853 	.set_rate_stats_ctx = dp_soc_set_rate_stats_ctx,
12854 	.get_rate_stats_ctx = dp_soc_get_rate_stats_ctx,
12855 	.txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats,
12856 	.txrx_flush_rate_stats_request = dp_flush_rate_stats_req,
12857 	.txrx_peer_get_peerstats_ctx = dp_peer_get_peerstats_ctx,
12858 
12859 	.set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3,
12860 	.set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3,
12861 
12862 	.txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler,
12863 #ifdef QCA_MULTIPASS_SUPPORT
12864 	.set_vlan_groupkey = dp_set_vlan_groupkey,
12865 #endif
12866 	.get_peer_mac_list = dp_get_peer_mac_list,
12867 	.get_peer_id = dp_get_peer_id,
12868 #ifdef QCA_SUPPORT_WDS_EXTENDED
12869 	.set_wds_ext_peer_rx = dp_wds_ext_set_peer_rx,
12870 #endif /* QCA_SUPPORT_WDS_EXTENDED */
12871 
12872 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
12873 	.txrx_drain = dp_drain_txrx,
12874 #endif
12875 #if defined(FEATURE_RUNTIME_PM)
12876 	.set_rtpm_tput_policy = dp_set_rtpm_tput_policy_requirement,
12877 #endif
12878 #ifdef WLAN_SYSFS_DP_STATS
12879 	.txrx_sysfs_fill_stats = dp_sysfs_fill_stats,
12880 	.txrx_sysfs_set_stat_type = dp_sysfs_set_stat_type,
12881 #endif /* WLAN_SYSFS_DP_STATS */
12882 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
12883 	.set_pkt_capture_mode = dp_set_pkt_capture_mode,
12884 #endif
12885 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
12886 	.txrx_recovery_vdev_flush_peers = dp_recovery_vdev_flush_peers,
12887 #endif
12888 	.txrx_umac_reset_deinit = dp_soc_umac_reset_deinit,
12889 };
12890 
12891 static struct cdp_ctrl_ops dp_ops_ctrl = {
12892 	.txrx_peer_authorize = dp_peer_authorize,
12893 	.txrx_peer_get_authorize = dp_peer_get_authorize,
12894 #ifdef VDEV_PEER_PROTOCOL_COUNT
12895 	.txrx_enable_peer_protocol_count = dp_enable_vdev_peer_protocol_count,
12896 	.txrx_set_peer_protocol_drop_mask =
12897 		dp_enable_vdev_peer_protocol_drop_mask,
12898 	.txrx_is_peer_protocol_count_enabled =
12899 		dp_is_vdev_peer_protocol_count_enabled,
12900 	.txrx_get_peer_protocol_drop_mask = dp_get_vdev_peer_protocol_drop_mask,
12901 #endif
12902 	.txrx_set_vdev_param = dp_set_vdev_param,
12903 	.txrx_set_psoc_param = dp_set_psoc_param,
12904 	.txrx_get_psoc_param = dp_get_psoc_param,
12905 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
12906 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
12907 	.txrx_get_sec_type = dp_get_sec_type,
12908 	.txrx_wdi_event_sub = dp_wdi_event_sub,
12909 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
12910 	.txrx_set_pdev_param = dp_set_pdev_param,
12911 	.txrx_get_pdev_param = dp_get_pdev_param,
12912 	.txrx_set_peer_param = dp_set_peer_param,
12913 	.txrx_get_peer_param = dp_get_peer_param,
12914 #ifdef VDEV_PEER_PROTOCOL_COUNT
12915 	.txrx_peer_protocol_cnt = dp_peer_stats_update_protocol_cnt,
12916 #endif
12917 #ifdef WLAN_SUPPORT_MSCS
12918 	.txrx_record_mscs_params = dp_record_mscs_params,
12919 #endif
12920 	.set_key = dp_set_michael_key,
12921 	.txrx_get_vdev_param = dp_get_vdev_param,
12922 	.calculate_delay_stats = dp_calculate_delay_stats,
12923 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
12924 	.txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag,
12925 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
12926 	.txrx_dump_pdev_rx_protocol_tag_stats =
12927 				dp_dump_pdev_rx_protocol_tag_stats,
12928 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
12929 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
12930 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
12931 	.txrx_set_rx_flow_tag = dp_set_rx_flow_tag,
12932 	.txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats,
12933 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
12934 #ifdef QCA_MULTIPASS_SUPPORT
12935 	.txrx_peer_set_vlan_id = dp_peer_set_vlan_id,
12936 #endif /*QCA_MULTIPASS_SUPPORT*/
12937 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(CONFIG_SAWF)
12938 	.txrx_set_delta_tsf = dp_set_delta_tsf,
12939 #endif
12940 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
12941 	.txrx_set_tsf_ul_delay_report = dp_set_tsf_ul_delay_report,
12942 	.txrx_get_uplink_delay = dp_get_uplink_delay,
12943 #endif
12944 #ifdef QCA_UNDECODED_METADATA_SUPPORT
12945 	.txrx_set_pdev_phyrx_error_mask = dp_set_pdev_phyrx_error_mask,
12946 	.txrx_get_pdev_phyrx_error_mask = dp_get_pdev_phyrx_error_mask,
12947 #endif
12948 	.txrx_peer_flush_frags = dp_peer_flush_frags,
12949 };
12950 
12951 static struct cdp_me_ops dp_ops_me = {
12952 #ifndef QCA_HOST_MODE_WIFI_DISABLED
12953 #ifdef ATH_SUPPORT_IQUE
12954 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
12955 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
12956 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
12957 #endif
12958 #endif
12959 };
12960 
12961 static struct cdp_host_stats_ops dp_ops_host_stats = {
12962 	.txrx_per_peer_stats = dp_get_host_peer_stats,
12963 	.get_fw_peer_stats = dp_get_fw_peer_stats,
12964 	.get_htt_stats = dp_get_htt_stats,
12965 	.txrx_stats_publish = dp_txrx_stats_publish,
12966 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
12967 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
12968 	.txrx_get_soc_stats = dp_txrx_get_soc_stats,
12969 	.txrx_get_peer_stats_param = dp_txrx_get_peer_stats_param,
12970 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
12971 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
12972 	.txrx_get_ratekbps = dp_txrx_get_ratekbps,
12973 	.txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats,
12974 	.txrx_get_peer_delay_stats = dp_txrx_get_peer_delay_stats,
12975 	.txrx_get_peer_jitter_stats = dp_txrx_get_peer_jitter_stats,
12976 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
12977 	.txrx_alloc_vdev_stats_id = dp_txrx_alloc_vdev_stats_id,
12978 	.txrx_reset_vdev_stats_id = dp_txrx_reset_vdev_stats_id,
12979 #endif
12980 #ifdef WLAN_TX_PKT_CAPTURE_ENH
12981 	.get_peer_tx_capture_stats = dp_peer_get_tx_capture_stats,
12982 	.get_pdev_tx_capture_stats = dp_pdev_get_tx_capture_stats,
12983 #endif /* WLAN_TX_PKT_CAPTURE_ENH */
12984 #ifdef HW_TX_DELAY_STATS_ENABLE
12985 	.enable_disable_vdev_tx_delay_stats =
12986 				dp_enable_disable_vdev_tx_delay_stats,
12987 	.is_tx_delay_stats_enabled = dp_check_vdev_tx_delay_stats_enabled,
12988 #endif
12989 	.txrx_get_pdev_tid_stats = dp_pdev_get_tid_stats,
12990 #ifdef WLAN_TELEMETRY_STATS_SUPPORT
12991 	.txrx_pdev_telemetry_stats = dp_get_pdev_telemetry_stats,
12992 	.txrx_peer_telemetry_stats = dp_get_peer_telemetry_stats,
12993 #endif
12994 	/* TODO */
12995 };
12996 
12997 static struct cdp_raw_ops dp_ops_raw = {
12998 	/* TODO */
12999 };
13000 
13001 #ifdef PEER_FLOW_CONTROL
13002 static struct cdp_pflow_ops dp_ops_pflow = {
13003 	dp_tx_flow_ctrl_configure_pdev,
13004 };
13005 #endif /* CONFIG_WIN */
13006 
13007 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
13008 static struct cdp_cfr_ops dp_ops_cfr = {
13009 	.txrx_cfr_filter = NULL,
13010 	.txrx_get_cfr_rcc = dp_get_cfr_rcc,
13011 	.txrx_set_cfr_rcc = dp_set_cfr_rcc,
13012 	.txrx_get_cfr_dbg_stats = dp_get_cfr_dbg_stats,
13013 	.txrx_clear_cfr_dbg_stats = dp_clear_cfr_dbg_stats,
13014 };
13015 #endif
13016 
13017 #ifdef WLAN_SUPPORT_MSCS
13018 static struct cdp_mscs_ops dp_ops_mscs = {
13019 	.mscs_peer_lookup_n_get_priority = dp_mscs_peer_lookup_n_get_priority,
13020 };
13021 #endif
13022 
13023 #ifdef WLAN_SUPPORT_MESH_LATENCY
13024 static struct cdp_mesh_latency_ops dp_ops_mesh_latency = {
13025 	.mesh_latency_update_peer_parameter =
13026 		dp_mesh_latency_update_peer_parameter,
13027 };
13028 #endif
13029 
13030 #ifdef WLAN_SUPPORT_SCS
13031 static struct cdp_scs_ops dp_ops_scs = {
13032 	.scs_peer_lookup_n_rule_match = dp_scs_peer_lookup_n_rule_match,
13033 };
13034 #endif
13035 
13036 #ifdef CONFIG_SAWF_DEF_QUEUES
13037 static struct cdp_sawf_ops dp_ops_sawf = {
13038 	.sawf_def_queues_map_req = dp_sawf_def_queues_map_req,
13039 	.sawf_def_queues_unmap_req = dp_sawf_def_queues_unmap_req,
13040 	.sawf_def_queues_get_map_report =
13041 		dp_sawf_def_queues_get_map_report,
13042 #ifdef CONFIG_SAWF
13043 	.txrx_get_peer_sawf_delay_stats = dp_sawf_get_peer_delay_stats,
13044 	.txrx_get_peer_sawf_tx_stats = dp_sawf_get_peer_tx_stats,
13045 	.sawf_mpdu_stats_req = dp_sawf_mpdu_stats_req,
13046 	.sawf_mpdu_details_stats_req = dp_sawf_mpdu_details_stats_req,
13047 	.txrx_sawf_set_mov_avg_params = dp_sawf_set_mov_avg_params,
13048 	.txrx_sawf_set_sla_params = dp_sawf_set_sla_params,
13049 	.txrx_sawf_init_telemtery_params = dp_sawf_init_telemetry_params,
13050 	.telemetry_get_throughput_stats = dp_sawf_get_tx_stats,
13051 	.telemetry_get_mpdu_stats = dp_sawf_get_mpdu_sched_stats,
13052 	.telemetry_get_drop_stats = dp_sawf_get_drop_stats,
13053 #endif
13054 };
13055 #endif
13056 
13057 #if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
13058 /**
13059  * dp_flush_ring_hptp() - Update ring shadow
13060  *			  register HP/TP address when runtime
13061  *                        resume
13062  * @opaque_soc: DP soc context
13063  *
13064  * Return: None
13065  */
13066 static
13067 void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
13068 {
13069 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
13070 						 HAL_SRNG_FLUSH_EVENT)) {
13071 		/* Acquire the lock */
13072 		hal_srng_access_start(soc->hal_soc, hal_srng);
13073 
13074 		hal_srng_access_end(soc->hal_soc, hal_srng);
13075 
13076 		hal_srng_set_flush_last_ts(hal_srng);
13077 
13078 		dp_debug("flushed");
13079 	}
13080 }
13081 #endif
13082 
13083 #ifdef DP_TX_TRACKING
13084 
13085 #define DP_TX_COMP_MAX_LATENCY_MS 30000
13086 /**
13087  * dp_tx_comp_delay_check() - calculate time latency for tx completion per pkt
13088  * @tx_desc: tx descriptor
13089  *
13090  * Calculate time latency for tx completion per pkt and trigger self recovery
13091  * when the delay is more than threshold value.
13092  *
13093  * Return: True if delay is more than threshold
13094  */
13095 static bool dp_tx_comp_delay_check(struct dp_tx_desc_s *tx_desc)
13096 {
13097 	uint64_t time_latency, timestamp_tick = tx_desc->timestamp_tick;
13098 	qdf_ktime_t current_time = qdf_ktime_real_get();
13099 	qdf_ktime_t timestamp = tx_desc->timestamp;
13100 
13101 	if (!timestamp)
13102 		return false;
13103 
13104 	if (dp_tx_pkt_tracepoints_enabled()) {
13105 		time_latency = qdf_ktime_to_ms(current_time) -
13106 				qdf_ktime_to_ms(timestamp);
13107 		if (time_latency >= DP_TX_COMP_MAX_LATENCY_MS) {
13108 			dp_err_rl("enqueued: %llu ms, current : %llu ms",
13109 				  timestamp, current_time);
13110 			return true;
13111 		}
13112 	} else {
13113 		current_time = qdf_system_ticks();
13114 		time_latency = qdf_system_ticks_to_msecs(current_time -
13115 							 timestamp_tick);
13116 		if (time_latency >= DP_TX_COMP_MAX_LATENCY_MS) {
13117 			dp_err_rl("enqueued: %u ms, current : %u ms",
13118 				  qdf_system_ticks_to_msecs(timestamp),
13119 				  qdf_system_ticks_to_msecs(current_time));
13120 			return true;
13121 		}
13122 	}
13123 
13124 	return false;
13125 }
13126 
13127 #if defined(CONFIG_SLUB_DEBUG_ON)
13128 /**
13129  * dp_find_missing_tx_comp() - check for leaked descriptor in tx path
13130  * @soc - DP SOC context
13131  *
13132  * Parse through descriptors in all pools and validate magic number and
13133  * completion time. Trigger self recovery if magic value is corrupted.
13134  *
13135  * Return: None.
13136  */
13137 static void dp_find_missing_tx_comp(struct dp_soc *soc)
13138 {
13139 	uint8_t i;
13140 	uint32_t j;
13141 	uint32_t num_desc, page_id, offset;
13142 	uint16_t num_desc_per_page;
13143 	struct dp_tx_desc_s *tx_desc = NULL;
13144 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
13145 	bool send_fw_stats_cmd = false;
13146 	uint8_t vdev_id;
13147 
13148 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
13149 		tx_desc_pool = &soc->tx_desc[i];
13150 		if (!(tx_desc_pool->pool_size) ||
13151 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
13152 		    !(tx_desc_pool->desc_pages.cacheable_pages))
13153 			continue;
13154 
13155 		num_desc = tx_desc_pool->pool_size;
13156 		num_desc_per_page =
13157 			tx_desc_pool->desc_pages.num_element_per_page;
13158 		for (j = 0; j < num_desc; j++) {
13159 			page_id = j / num_desc_per_page;
13160 			offset = j % num_desc_per_page;
13161 
13162 			if (qdf_unlikely(!(tx_desc_pool->
13163 					 desc_pages.cacheable_pages)))
13164 				break;
13165 
13166 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
13167 			if (tx_desc->magic == DP_TX_MAGIC_PATTERN_FREE) {
13168 				continue;
13169 			} else if (tx_desc->magic ==
13170 				   DP_TX_MAGIC_PATTERN_INUSE) {
13171 				if (dp_tx_comp_delay_check(tx_desc)) {
13172 					dp_err_rl("Tx completion not rcvd for id: %u",
13173 						  tx_desc->id);
13174 
13175 					if (!send_fw_stats_cmd) {
13176 						send_fw_stats_cmd = true;
13177 						vdev_id = i;
13178 					}
13179 				}
13180 			} else {
13181 				dp_err_rl("tx desc %u corrupted, flags: 0x%x",
13182 				       tx_desc->id, tx_desc->flags);
13183 			}
13184 		}
13185 	}
13186 
13187 	/*
13188 	 * The unit test command to dump FW stats is required only once as the
13189 	 * stats are dumped at pdev level and not vdev level.
13190 	 */
13191 	if (send_fw_stats_cmd && soc->cdp_soc.ol_ops->dp_send_unit_test_cmd) {
13192 		uint32_t fw_stats_args[2] = {533, 1};
13193 
13194 		soc->cdp_soc.ol_ops->dp_send_unit_test_cmd(vdev_id,
13195 							   WLAN_MODULE_TX, 2,
13196 							   fw_stats_args);
13197 	}
13198 }
13199 #else
13200 static void dp_find_missing_tx_comp(struct dp_soc *soc)
13201 {
13202 	uint8_t i;
13203 	uint32_t j;
13204 	uint32_t num_desc, page_id, offset;
13205 	uint16_t num_desc_per_page;
13206 	struct dp_tx_desc_s *tx_desc = NULL;
13207 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
13208 
13209 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
13210 		tx_desc_pool = &soc->tx_desc[i];
13211 		if (!(tx_desc_pool->pool_size) ||
13212 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
13213 		    !(tx_desc_pool->desc_pages.cacheable_pages))
13214 			continue;
13215 
13216 		num_desc = tx_desc_pool->pool_size;
13217 		num_desc_per_page =
13218 			tx_desc_pool->desc_pages.num_element_per_page;
13219 		for (j = 0; j < num_desc; j++) {
13220 			page_id = j / num_desc_per_page;
13221 			offset = j % num_desc_per_page;
13222 
13223 			if (qdf_unlikely(!(tx_desc_pool->
13224 					 desc_pages.cacheable_pages)))
13225 				break;
13226 
13227 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
13228 			if (tx_desc->magic == DP_TX_MAGIC_PATTERN_FREE) {
13229 				continue;
13230 			} else if (tx_desc->magic ==
13231 				   DP_TX_MAGIC_PATTERN_INUSE) {
13232 				if (dp_tx_comp_delay_check(tx_desc)) {
13233 					dp_err_rl("Tx completion not rcvd for id: %u",
13234 						  tx_desc->id);
13235 					if (tx_desc->vdev_id == DP_INVALID_VDEV_ID) {
13236 						tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
13237 						dp_tx_comp_free_buf(soc, tx_desc);
13238 						dp_tx_desc_release(tx_desc, i);
13239 						DP_STATS_INC(soc,
13240 							     tx.tx_comp_force_freed, 1);
13241 						dp_err_rl("Tx completion force freed");
13242 					}
13243 				}
13244 			} else {
13245 				dp_err_rl("tx desc %u corrupted, flags: 0x%x",
13246 					  tx_desc->id, tx_desc->flags);
13247 			}
13248 		}
13249 	}
13250 }
13251 #endif /* CONFIG_SLUB_DEBUG_ON */
13252 #else
13253 static inline void dp_find_missing_tx_comp(struct dp_soc *soc)
13254 {
13255 }
13256 #endif
13257 
13258 #ifdef FEATURE_RUNTIME_PM
13259 /**
13260  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
13261  * @soc_hdl: Datapath soc handle
13262  * @pdev_id: id of data path pdev handle
13263  *
13264  * DP is ready to runtime suspend if there are no pending TX packets.
13265  *
13266  * Return: QDF_STATUS
13267  */
13268 static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
13269 {
13270 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13271 	struct dp_pdev *pdev;
13272 	uint8_t i;
13273 	int32_t tx_pending;
13274 
13275 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13276 	if (!pdev) {
13277 		dp_err("pdev is NULL");
13278 		return QDF_STATUS_E_INVAL;
13279 	}
13280 
13281 	/* Abort if there are any pending TX packets */
13282 	tx_pending = dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev));
13283 	if (tx_pending) {
13284 		dp_info_rl("%pK: Abort suspend due to pending TX packets %d",
13285 			   soc, tx_pending);
13286 		dp_find_missing_tx_comp(soc);
13287 		/* perform a force flush if tx is pending */
13288 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
13289 			hal_srng_set_event(soc->tcl_data_ring[i].hal_srng,
13290 					   HAL_SRNG_FLUSH_EVENT);
13291 			dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
13292 		}
13293 		qdf_atomic_set(&soc->tx_pending_rtpm, 0);
13294 
13295 		return QDF_STATUS_E_AGAIN;
13296 	}
13297 
13298 	if (dp_runtime_get_refcount(soc)) {
13299 		dp_init_info("refcount: %d", dp_runtime_get_refcount(soc));
13300 
13301 		return QDF_STATUS_E_AGAIN;
13302 	}
13303 
13304 	if (soc->intr_mode == DP_INTR_POLL)
13305 		qdf_timer_stop(&soc->int_timer);
13306 
13307 	dp_rx_fst_update_pm_suspend_status(soc, true);
13308 
13309 	return QDF_STATUS_SUCCESS;
13310 }
13311 
13312 #define DP_FLUSH_WAIT_CNT 10
13313 #define DP_RUNTIME_SUSPEND_WAIT_MS 10
13314 /**
13315  * dp_runtime_resume() - ensure DP is ready to runtime resume
13316  * @soc_hdl: Datapath soc handle
13317  * @pdev_id: id of data path pdev handle
13318  *
13319  * Resume DP for runtime PM.
13320  *
13321  * Return: QDF_STATUS
13322  */
13323 static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
13324 {
13325 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13326 	int i, suspend_wait = 0;
13327 
13328 	if (soc->intr_mode == DP_INTR_POLL)
13329 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
13330 
13331 	/*
13332 	 * Wait until dp runtime refcount becomes zero or time out, then flush
13333 	 * pending tx for runtime suspend.
13334 	 */
13335 	while (dp_runtime_get_refcount(soc) &&
13336 	       suspend_wait < DP_FLUSH_WAIT_CNT) {
13337 		qdf_sleep(DP_RUNTIME_SUSPEND_WAIT_MS);
13338 		suspend_wait++;
13339 	}
13340 
13341 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
13342 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
13343 	}
13344 	qdf_atomic_set(&soc->tx_pending_rtpm, 0);
13345 
13346 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
13347 	dp_rx_fst_update_pm_suspend_status(soc, false);
13348 
13349 	return QDF_STATUS_SUCCESS;
13350 }
13351 #endif /* FEATURE_RUNTIME_PM */
13352 
13353 /**
13354  * dp_tx_get_success_ack_stats() - get tx success completion count
13355  * @soc_hdl: Datapath soc handle
13356  * @vdevid: vdev identifier
13357  *
13358  * Return: tx success ack count
13359  */
13360 static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl,
13361 					    uint8_t vdev_id)
13362 {
13363 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13364 	struct cdp_vdev_stats *vdev_stats = NULL;
13365 	uint32_t tx_success;
13366 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
13367 						     DP_MOD_ID_CDP);
13368 
13369 	if (!vdev) {
13370 		dp_cdp_err("%pK: Invalid vdev id %d", soc, vdev_id);
13371 		return 0;
13372 	}
13373 
13374 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
13375 	if (!vdev_stats) {
13376 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats", soc);
13377 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13378 		return 0;
13379 	}
13380 
13381 	dp_aggregate_vdev_stats(vdev, vdev_stats);
13382 
13383 	tx_success = vdev_stats->tx.tx_success.num;
13384 	qdf_mem_free(vdev_stats);
13385 
13386 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13387 	return tx_success;
13388 }
13389 
13390 #ifdef WLAN_SUPPORT_DATA_STALL
13391 /**
13392  * dp_register_data_stall_detect_cb() - register data stall callback
13393  * @soc_hdl: Datapath soc handle
13394  * @pdev_id: id of data path pdev handle
13395  * @data_stall_detect_callback: data stall callback function
13396  *
13397  * Return: QDF_STATUS Enumeration
13398  */
13399 static
13400 QDF_STATUS dp_register_data_stall_detect_cb(
13401 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
13402 			data_stall_detect_cb data_stall_detect_callback)
13403 {
13404 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13405 	struct dp_pdev *pdev;
13406 
13407 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13408 	if (!pdev) {
13409 		dp_err("pdev NULL!");
13410 		return QDF_STATUS_E_INVAL;
13411 	}
13412 
13413 	pdev->data_stall_detect_callback = data_stall_detect_callback;
13414 	return QDF_STATUS_SUCCESS;
13415 }
13416 
13417 /**
13418  * dp_deregister_data_stall_detect_cb() - de-register data stall callback
13419  * @soc_hdl: Datapath soc handle
13420  * @pdev_id: id of data path pdev handle
13421  * @data_stall_detect_callback: data stall callback function
13422  *
13423  * Return: QDF_STATUS Enumeration
13424  */
13425 static
13426 QDF_STATUS dp_deregister_data_stall_detect_cb(
13427 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
13428 			data_stall_detect_cb data_stall_detect_callback)
13429 {
13430 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13431 	struct dp_pdev *pdev;
13432 
13433 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13434 	if (!pdev) {
13435 		dp_err("pdev NULL!");
13436 		return QDF_STATUS_E_INVAL;
13437 	}
13438 
13439 	pdev->data_stall_detect_callback = NULL;
13440 	return QDF_STATUS_SUCCESS;
13441 }
13442 
13443 /**
13444  * dp_txrx_post_data_stall_event() - post data stall event
13445  * @soc_hdl: Datapath soc handle
13446  * @indicator: Module triggering data stall
13447  * @data_stall_type: data stall event type
13448  * @pdev_id: pdev id
13449  * @vdev_id_bitmap: vdev id bitmap
13450  * @recovery_type: data stall recovery type
13451  *
13452  * Return: None
13453  */
13454 static void
13455 dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl,
13456 			      enum data_stall_log_event_indicator indicator,
13457 			      enum data_stall_log_event_type data_stall_type,
13458 			      uint32_t pdev_id, uint32_t vdev_id_bitmap,
13459 			      enum data_stall_log_recovery_type recovery_type)
13460 {
13461 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13462 	struct data_stall_event_info data_stall_info;
13463 	struct dp_pdev *pdev;
13464 
13465 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13466 	if (!pdev) {
13467 		dp_err("pdev NULL!");
13468 		return;
13469 	}
13470 
13471 	if (!pdev->data_stall_detect_callback) {
13472 		dp_err("data stall cb not registered!");
13473 		return;
13474 	}
13475 
13476 	dp_info("data_stall_type: %x pdev_id: %d",
13477 		data_stall_type, pdev_id);
13478 
13479 	data_stall_info.indicator = indicator;
13480 	data_stall_info.data_stall_type = data_stall_type;
13481 	data_stall_info.vdev_id_bitmap = vdev_id_bitmap;
13482 	data_stall_info.pdev_id = pdev_id;
13483 	data_stall_info.recovery_type = recovery_type;
13484 
13485 	pdev->data_stall_detect_callback(&data_stall_info);
13486 }
13487 #endif /* WLAN_SUPPORT_DATA_STALL */
13488 
13489 #ifdef WLAN_FEATURE_STATS_EXT
13490 /* rx hw stats event wait timeout in ms */
13491 #define DP_REO_STATUS_STATS_TIMEOUT 1500
13492 /**
13493  * dp_txrx_ext_stats_request - request dp txrx extended stats request
13494  * @soc_hdl: soc handle
13495  * @pdev_id: pdev id
13496  * @req: stats request
13497  *
13498  * Return: QDF_STATUS
13499  */
13500 static QDF_STATUS
13501 dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
13502 			  struct cdp_txrx_ext_stats *req)
13503 {
13504 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13505 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13506 	int i = 0;
13507 	int tcl_ring_full = 0;
13508 
13509 	if (!pdev) {
13510 		dp_err("pdev is null");
13511 		return QDF_STATUS_E_INVAL;
13512 	}
13513 
13514 	dp_aggregate_pdev_stats(pdev);
13515 
13516 	for(i = 0 ; i < MAX_TCL_DATA_RINGS; i++)
13517 		tcl_ring_full += soc->stats.tx.tcl_ring_full[i];
13518 
13519 	req->tx_msdu_enqueue = pdev->stats.tx_i.processed.num;
13520 	req->tx_msdu_overflow = tcl_ring_full;
13521 	req->rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
13522 	req->rx_mpdu_delivered = soc->ext_stats.rx_mpdu_received;
13523 	req->rx_mpdu_missed = pdev->stats.err.reo_error;
13524 	/* only count error source from RXDMA */
13525 	req->rx_mpdu_error = pdev->stats.err.rxdma_error;
13526 
13527 	dp_info("ext stats: tx_msdu_enq = %u, tx_msdu_overflow = %u, "
13528 		"tx_mpdu_recieve = %u, rx_mpdu_delivered = %u, "
13529 		"rx_mpdu_missed = %u, rx_mpdu_error = %u",
13530 		req->tx_msdu_enqueue,
13531 		req->tx_msdu_overflow,
13532 		req->rx_mpdu_received,
13533 		req->rx_mpdu_delivered,
13534 		req->rx_mpdu_missed,
13535 		req->rx_mpdu_error);
13536 
13537 	return QDF_STATUS_SUCCESS;
13538 }
13539 
13540 /**
13541  * dp_rx_hw_stats_cb - request rx hw stats response callback
13542  * @soc: soc handle
13543  * @cb_ctxt: callback context
13544  * @reo_status: reo command response status
13545  *
13546  * Return: None
13547  */
13548 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
13549 			      union hal_reo_status *reo_status)
13550 {
13551 	struct dp_req_rx_hw_stats_t *rx_hw_stats = cb_ctxt;
13552 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
13553 	bool is_query_timeout;
13554 
13555 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
13556 	is_query_timeout = rx_hw_stats->is_query_timeout;
13557 	/* free the cb_ctxt if all pending tid stats query is received */
13558 	if (qdf_atomic_dec_and_test(&rx_hw_stats->pending_tid_stats_cnt)) {
13559 		if (!is_query_timeout) {
13560 			qdf_event_set(&soc->rx_hw_stats_event);
13561 			soc->is_last_stats_ctx_init = false;
13562 		}
13563 
13564 		qdf_mem_free(rx_hw_stats);
13565 	}
13566 
13567 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
13568 		dp_info("REO stats failure %d",
13569 			queue_status->header.status);
13570 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
13571 		return;
13572 	}
13573 
13574 	if (!is_query_timeout) {
13575 		soc->ext_stats.rx_mpdu_received +=
13576 					queue_status->mpdu_frms_cnt;
13577 		soc->ext_stats.rx_mpdu_missed +=
13578 					queue_status->hole_cnt;
13579 	}
13580 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
13581 }
13582 
13583 /**
13584  * dp_request_rx_hw_stats - request rx hardware stats
13585  * @soc_hdl: soc handle
13586  * @vdev_id: vdev id
13587  *
13588  * Return: None
13589  */
13590 static QDF_STATUS
13591 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
13592 {
13593 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13594 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
13595 						     DP_MOD_ID_CDP);
13596 	struct dp_peer *peer = NULL;
13597 	QDF_STATUS status;
13598 	struct dp_req_rx_hw_stats_t *rx_hw_stats;
13599 	int rx_stats_sent_cnt = 0;
13600 	uint32_t last_rx_mpdu_received;
13601 	uint32_t last_rx_mpdu_missed;
13602 
13603 	if (!vdev) {
13604 		dp_err("vdev is null for vdev_id: %u", vdev_id);
13605 		status = QDF_STATUS_E_INVAL;
13606 		goto out;
13607 	}
13608 
13609 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_CDP);
13610 
13611 	if (!peer) {
13612 		dp_err("Peer is NULL");
13613 		status = QDF_STATUS_E_INVAL;
13614 		goto out;
13615 	}
13616 
13617 	rx_hw_stats = qdf_mem_malloc(sizeof(*rx_hw_stats));
13618 
13619 	if (!rx_hw_stats) {
13620 		dp_err("malloc failed for hw stats structure");
13621 		status = QDF_STATUS_E_INVAL;
13622 		goto out;
13623 	}
13624 
13625 	qdf_event_reset(&soc->rx_hw_stats_event);
13626 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
13627 	/* save the last soc cumulative stats and reset it to 0 */
13628 	last_rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
13629 	last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
13630 	soc->ext_stats.rx_mpdu_received = 0;
13631 
13632 	rx_stats_sent_cnt =
13633 		dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
13634 	if (!rx_stats_sent_cnt) {
13635 		dp_err("no tid stats sent successfully");
13636 		qdf_mem_free(rx_hw_stats);
13637 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
13638 		status = QDF_STATUS_E_INVAL;
13639 		goto out;
13640 	}
13641 	qdf_atomic_set(&rx_hw_stats->pending_tid_stats_cnt,
13642 		       rx_stats_sent_cnt);
13643 	rx_hw_stats->is_query_timeout = false;
13644 	soc->is_last_stats_ctx_init = true;
13645 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
13646 
13647 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
13648 				       DP_REO_STATUS_STATS_TIMEOUT);
13649 
13650 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
13651 	if (status != QDF_STATUS_SUCCESS) {
13652 		dp_info("rx hw stats event timeout");
13653 		if (soc->is_last_stats_ctx_init)
13654 			rx_hw_stats->is_query_timeout = true;
13655 		/**
13656 		 * If query timeout happened, use the last saved stats
13657 		 * for this time query.
13658 		 */
13659 		soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
13660 		soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
13661 	}
13662 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
13663 
13664 out:
13665 	if (peer)
13666 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
13667 	if (vdev)
13668 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13669 
13670 	return status;
13671 }
13672 
13673 /**
13674  * dp_reset_rx_hw_ext_stats - Reset rx hardware ext stats
13675  * @soc_hdl: soc handle
13676  *
13677  * Return: None
13678  */
13679 static
13680 void dp_reset_rx_hw_ext_stats(struct cdp_soc_t *soc_hdl)
13681 {
13682 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13683 
13684 	soc->ext_stats.rx_mpdu_received = 0;
13685 	soc->ext_stats.rx_mpdu_missed = 0;
13686 }
13687 #endif /* WLAN_FEATURE_STATS_EXT */
13688 
13689 static
13690 uint32_t dp_get_tx_rings_grp_bitmap(struct cdp_soc_t *soc_hdl)
13691 {
13692 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13693 
13694 	return soc->wlan_cfg_ctx->tx_rings_grp_bitmap;
13695 }
13696 
13697 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
13698 /**
13699  * dp_mark_first_wakeup_packet() - set flag to indicate that
13700  *    fw is compatible for marking first packet after wow wakeup
13701  * @soc_hdl: Datapath soc handle
13702  * @pdev_id: id of data path pdev handle
13703  * @value: 1 for enabled/ 0 for disabled
13704  *
13705  * Return: None
13706  */
13707 static void dp_mark_first_wakeup_packet(struct cdp_soc_t *soc_hdl,
13708 					uint8_t pdev_id, uint8_t value)
13709 {
13710 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13711 	struct dp_pdev *pdev;
13712 
13713 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13714 	if (!pdev) {
13715 		dp_err("pdev is NULL");
13716 		return;
13717 	}
13718 
13719 	pdev->is_first_wakeup_packet = value;
13720 }
13721 #endif
13722 
13723 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
13724 /**
13725  * dp_set_peer_txq_flush_config() - Set the peer txq flush configuration
13726  * @soc_hdl: Opaque handle to the DP soc object
13727  * @vdev_id: VDEV identifier
13728  * @mac: MAC address of the peer
13729  * @ac: access category mask
13730  * @tid: TID mask
13731  * @policy: Flush policy
13732  *
13733  * Return: 0 on success, errno on failure
13734  */
13735 static int dp_set_peer_txq_flush_config(struct cdp_soc_t *soc_hdl,
13736 					uint8_t vdev_id, uint8_t *mac,
13737 					uint8_t ac, uint32_t tid,
13738 					enum cdp_peer_txq_flush_policy policy)
13739 {
13740 	struct dp_soc *soc;
13741 
13742 	if (!soc_hdl) {
13743 		dp_err("soc is null");
13744 		return -EINVAL;
13745 	}
13746 	soc = cdp_soc_t_to_dp_soc(soc_hdl);
13747 	return target_if_peer_txq_flush_config(soc->ctrl_psoc, vdev_id,
13748 					       mac, ac, tid, policy);
13749 }
13750 #endif
13751 
13752 #ifdef CONNECTIVITY_PKTLOG
13753 /**
13754  * dp_register_packetdump_callback() - registers
13755  *  tx data packet, tx mgmt. packet and rx data packet
13756  *  dump callback handler.
13757  *
13758  * @soc_hdl: Datapath soc handle
13759  * @pdev_id: id of data path pdev handle
13760  * @dp_tx_packetdump_cb: tx packetdump cb
13761  * @dp_rx_packetdump_cb: rx packetdump cb
13762  *
13763  * This function is used to register tx data pkt, tx mgmt.
13764  * pkt and rx data pkt dump callback
13765  *
13766  * Return: None
13767  *
13768  */
13769 static inline
13770 void dp_register_packetdump_callback(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
13771 				     ol_txrx_pktdump_cb dp_tx_packetdump_cb,
13772 				     ol_txrx_pktdump_cb dp_rx_packetdump_cb)
13773 {
13774 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13775 	struct dp_pdev *pdev;
13776 
13777 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13778 	if (!pdev) {
13779 		dp_err("pdev is NULL!");
13780 		return;
13781 	}
13782 
13783 	pdev->dp_tx_packetdump_cb = dp_tx_packetdump_cb;
13784 	pdev->dp_rx_packetdump_cb = dp_rx_packetdump_cb;
13785 }
13786 
13787 /**
13788  * dp_deregister_packetdump_callback() - deregidters
13789  *  tx data packet, tx mgmt. packet and rx data packet
13790  *  dump callback handler
13791  * @soc_hdl: Datapath soc handle
13792  * @pdev_id: id of data path pdev handle
13793  *
13794  * This function is used to deregidter tx data pkt.,
13795  * tx mgmt. pkt and rx data pkt. dump callback
13796  *
13797  * Return: None
13798  *
13799  */
13800 static inline
13801 void dp_deregister_packetdump_callback(struct cdp_soc_t *soc_hdl,
13802 				       uint8_t pdev_id)
13803 {
13804 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13805 	struct dp_pdev *pdev;
13806 
13807 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13808 	if (!pdev) {
13809 		dp_err("pdev is NULL!");
13810 		return;
13811 	}
13812 
13813 	pdev->dp_tx_packetdump_cb = NULL;
13814 	pdev->dp_rx_packetdump_cb = NULL;
13815 }
13816 #endif
13817 
13818 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
13819 /**
13820  * dp_set_bus_vote_lvl_high() - Take a vote on bus bandwidth from dp
13821  * @soc_hdl: Datapath soc handle
13822  * @high: whether the bus bw is high or not
13823  *
13824  * Return: void
13825  */
13826 static void
13827 dp_set_bus_vote_lvl_high(ol_txrx_soc_handle soc_hdl, bool high)
13828 {
13829 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13830 
13831 	soc->high_throughput = high;
13832 }
13833 
13834 /**
13835  * dp_get_bus_vote_lvl_high() - get bus bandwidth vote to dp
13836  * @soc_hdl: Datapath soc handle
13837  *
13838  * Return: bool
13839  */
13840 static bool
13841 dp_get_bus_vote_lvl_high(ol_txrx_soc_handle soc_hdl)
13842 {
13843 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13844 
13845 	return soc->high_throughput;
13846 }
13847 #endif
13848 
13849 #ifdef DP_PEER_EXTENDED_API
13850 static struct cdp_misc_ops dp_ops_misc = {
13851 #ifdef FEATURE_WLAN_TDLS
13852 	.tx_non_std = dp_tx_non_std,
13853 #endif /* FEATURE_WLAN_TDLS */
13854 	.get_opmode = dp_get_opmode,
13855 #ifdef FEATURE_RUNTIME_PM
13856 	.runtime_suspend = dp_runtime_suspend,
13857 	.runtime_resume = dp_runtime_resume,
13858 #endif /* FEATURE_RUNTIME_PM */
13859 	.get_num_rx_contexts = dp_get_num_rx_contexts,
13860 	.get_tx_ack_stats = dp_tx_get_success_ack_stats,
13861 #ifdef WLAN_SUPPORT_DATA_STALL
13862 	.txrx_data_stall_cb_register = dp_register_data_stall_detect_cb,
13863 	.txrx_data_stall_cb_deregister = dp_deregister_data_stall_detect_cb,
13864 	.txrx_post_data_stall_event = dp_txrx_post_data_stall_event,
13865 #endif
13866 
13867 #ifdef WLAN_FEATURE_STATS_EXT
13868 	.txrx_ext_stats_request = dp_txrx_ext_stats_request,
13869 	.request_rx_hw_stats = dp_request_rx_hw_stats,
13870 	.reset_rx_hw_ext_stats = dp_reset_rx_hw_ext_stats,
13871 #endif /* WLAN_FEATURE_STATS_EXT */
13872 	.vdev_inform_ll_conn = dp_vdev_inform_ll_conn,
13873 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
13874 	.set_swlm_enable = dp_soc_set_swlm_enable,
13875 	.is_swlm_enabled = dp_soc_is_swlm_enabled,
13876 #endif
13877 	.display_txrx_hw_info = dp_display_srng_info,
13878 	.get_tx_rings_grp_bitmap = dp_get_tx_rings_grp_bitmap,
13879 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
13880 	.mark_first_wakeup_packet = dp_mark_first_wakeup_packet,
13881 #endif
13882 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
13883 	.set_peer_txq_flush_config = dp_set_peer_txq_flush_config,
13884 #endif
13885 #ifdef CONNECTIVITY_PKTLOG
13886 	.register_pktdump_cb = dp_register_packetdump_callback,
13887 	.unregister_pktdump_cb = dp_deregister_packetdump_callback,
13888 #endif
13889 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
13890 	.set_bus_vote_lvl_high = dp_set_bus_vote_lvl_high,
13891 	.get_bus_vote_lvl_high = dp_get_bus_vote_lvl_high,
13892 #endif
13893 };
13894 #endif
13895 
13896 #ifdef DP_FLOW_CTL
13897 static struct cdp_flowctl_ops dp_ops_flowctl = {
13898 	/* WIFI 3.0 DP implement as required. */
13899 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
13900 	.flow_pool_map_handler = dp_tx_flow_pool_map,
13901 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
13902 	.register_pause_cb = dp_txrx_register_pause_cb,
13903 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
13904 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
13905 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
13906 };
13907 
13908 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
13909 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
13910 };
13911 #endif
13912 
13913 #ifdef IPA_OFFLOAD
13914 static struct cdp_ipa_ops dp_ops_ipa = {
13915 	.ipa_get_resource = dp_ipa_get_resource,
13916 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
13917 	.ipa_iounmap_doorbell_vaddr = dp_ipa_iounmap_doorbell_vaddr,
13918 	.ipa_op_response = dp_ipa_op_response,
13919 	.ipa_register_op_cb = dp_ipa_register_op_cb,
13920 	.ipa_deregister_op_cb = dp_ipa_deregister_op_cb,
13921 	.ipa_get_stat = dp_ipa_get_stat,
13922 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
13923 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
13924 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
13925 	.ipa_setup = dp_ipa_setup,
13926 	.ipa_cleanup = dp_ipa_cleanup,
13927 	.ipa_setup_iface = dp_ipa_setup_iface,
13928 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
13929 	.ipa_enable_pipes = dp_ipa_enable_pipes,
13930 	.ipa_disable_pipes = dp_ipa_disable_pipes,
13931 	.ipa_set_perf_level = dp_ipa_set_perf_level,
13932 	.ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd,
13933 	.ipa_tx_buf_smmu_mapping = dp_ipa_tx_buf_smmu_mapping,
13934 	.ipa_tx_buf_smmu_unmapping = dp_ipa_tx_buf_smmu_unmapping,
13935 #ifdef IPA_WDS_EASYMESH_FEATURE
13936 	.ipa_ast_create = dp_ipa_ast_create,
13937 #endif
13938 };
13939 #endif
13940 
13941 #ifdef DP_POWER_SAVE
13942 static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
13943 {
13944 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13945 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13946 	int timeout = SUSPEND_DRAIN_WAIT;
13947 	int drain_wait_delay = 50; /* 50 ms */
13948 	int32_t tx_pending;
13949 
13950 	if (qdf_unlikely(!pdev)) {
13951 		dp_err("pdev is NULL");
13952 		return QDF_STATUS_E_INVAL;
13953 	}
13954 
13955 	/* Abort if there are any pending TX packets */
13956 	while ((tx_pending = dp_get_tx_pending((struct cdp_pdev *)pdev))) {
13957 		qdf_sleep(drain_wait_delay);
13958 		if (timeout <= 0) {
13959 			dp_info("TX frames are pending %d, abort suspend",
13960 				tx_pending);
13961 			dp_find_missing_tx_comp(soc);
13962 			return QDF_STATUS_E_TIMEOUT;
13963 		}
13964 		timeout = timeout - drain_wait_delay;
13965 	}
13966 
13967 	if (soc->intr_mode == DP_INTR_POLL)
13968 		qdf_timer_stop(&soc->int_timer);
13969 
13970 	/* Stop monitor reap timer and reap any pending frames in ring */
13971 	dp_monitor_reap_timer_suspend(soc);
13972 
13973 	dp_suspend_fse_cache_flush(soc);
13974 
13975 	return QDF_STATUS_SUCCESS;
13976 }
13977 
13978 static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
13979 {
13980 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13981 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13982 	uint8_t i;
13983 
13984 	if (qdf_unlikely(!pdev)) {
13985 		dp_err("pdev is NULL");
13986 		return QDF_STATUS_E_INVAL;
13987 	}
13988 
13989 	if (soc->intr_mode == DP_INTR_POLL)
13990 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
13991 
13992 	/* Start monitor reap timer */
13993 	dp_monitor_reap_timer_start(soc, CDP_MON_REAP_SOURCE_ANY);
13994 
13995 	dp_resume_fse_cache_flush(soc);
13996 
13997 	for (i = 0; i < soc->num_tcl_data_rings; i++)
13998 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
13999 
14000 	return QDF_STATUS_SUCCESS;
14001 }
14002 
14003 /**
14004  * dp_process_wow_ack_rsp() - process wow ack response
14005  * @soc_hdl: datapath soc handle
14006  * @pdev_id: data path pdev handle id
14007  *
14008  * Return: none
14009  */
14010 static void dp_process_wow_ack_rsp(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
14011 {
14012 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14013 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14014 
14015 	if (qdf_unlikely(!pdev)) {
14016 		dp_err("pdev is NULL");
14017 		return;
14018 	}
14019 
14020 	/*
14021 	 * As part of wow enable FW disables the mon status ring and in wow ack
14022 	 * response from FW reap mon status ring to make sure no packets pending
14023 	 * in the ring.
14024 	 */
14025 	dp_monitor_reap_timer_suspend(soc);
14026 }
14027 
14028 /**
14029  * dp_process_target_suspend_req() - process target suspend request
14030  * @soc_hdl: datapath soc handle
14031  * @pdev_id: data path pdev handle id
14032  *
14033  * Return: none
14034  */
14035 static void dp_process_target_suspend_req(struct cdp_soc_t *soc_hdl,
14036 					  uint8_t pdev_id)
14037 {
14038 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14039 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14040 
14041 	if (qdf_unlikely(!pdev)) {
14042 		dp_err("pdev is NULL");
14043 		return;
14044 	}
14045 
14046 	/* Stop monitor reap timer and reap any pending frames in ring */
14047 	dp_monitor_reap_timer_suspend(soc);
14048 }
14049 
14050 static struct cdp_bus_ops dp_ops_bus = {
14051 	.bus_suspend = dp_bus_suspend,
14052 	.bus_resume = dp_bus_resume,
14053 	.process_wow_ack_rsp = dp_process_wow_ack_rsp,
14054 	.process_target_suspend_req = dp_process_target_suspend_req
14055 };
14056 #endif
14057 
14058 #ifdef DP_FLOW_CTL
14059 static struct cdp_throttle_ops dp_ops_throttle = {
14060 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
14061 };
14062 
14063 static struct cdp_cfg_ops dp_ops_cfg = {
14064 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
14065 };
14066 #endif
14067 
14068 #ifdef DP_PEER_EXTENDED_API
14069 static struct cdp_ocb_ops dp_ops_ocb = {
14070 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
14071 };
14072 
14073 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
14074 	.clear_stats = dp_txrx_clear_dump_stats,
14075 };
14076 
14077 static struct cdp_peer_ops dp_ops_peer = {
14078 	.register_peer = dp_register_peer,
14079 	.clear_peer = dp_clear_peer,
14080 	.find_peer_exist = dp_find_peer_exist,
14081 	.find_peer_exist_on_vdev = dp_find_peer_exist_on_vdev,
14082 	.find_peer_exist_on_other_vdev = dp_find_peer_exist_on_other_vdev,
14083 	.peer_state_update = dp_peer_state_update,
14084 	.get_vdevid = dp_get_vdevid,
14085 	.get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr,
14086 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
14087 	.get_peer_state = dp_get_peer_state,
14088 	.peer_flush_frags = dp_peer_flush_frags,
14089 	.set_peer_as_tdls_peer = dp_set_peer_as_tdls_peer,
14090 };
14091 #endif
14092 
14093 static void dp_soc_txrx_ops_attach(struct dp_soc *soc)
14094 {
14095 	soc->cdp_soc.ops->cmn_drv_ops = &dp_ops_cmn;
14096 	soc->cdp_soc.ops->ctrl_ops = &dp_ops_ctrl;
14097 	soc->cdp_soc.ops->me_ops = &dp_ops_me;
14098 	soc->cdp_soc.ops->host_stats_ops = &dp_ops_host_stats;
14099 	soc->cdp_soc.ops->wds_ops = &dp_ops_wds;
14100 	soc->cdp_soc.ops->raw_ops = &dp_ops_raw;
14101 #ifdef PEER_FLOW_CONTROL
14102 	soc->cdp_soc.ops->pflow_ops = &dp_ops_pflow;
14103 #endif /* PEER_FLOW_CONTROL */
14104 #ifdef DP_PEER_EXTENDED_API
14105 	soc->cdp_soc.ops->misc_ops = &dp_ops_misc;
14106 	soc->cdp_soc.ops->ocb_ops = &dp_ops_ocb;
14107 	soc->cdp_soc.ops->peer_ops = &dp_ops_peer;
14108 	soc->cdp_soc.ops->mob_stats_ops = &dp_ops_mob_stats;
14109 #endif
14110 #ifdef DP_FLOW_CTL
14111 	soc->cdp_soc.ops->cfg_ops = &dp_ops_cfg;
14112 	soc->cdp_soc.ops->flowctl_ops = &dp_ops_flowctl;
14113 	soc->cdp_soc.ops->l_flowctl_ops = &dp_ops_l_flowctl;
14114 	soc->cdp_soc.ops->throttle_ops = &dp_ops_throttle;
14115 #endif
14116 #ifdef IPA_OFFLOAD
14117 	soc->cdp_soc.ops->ipa_ops = &dp_ops_ipa;
14118 #endif
14119 #ifdef DP_POWER_SAVE
14120 	soc->cdp_soc.ops->bus_ops = &dp_ops_bus;
14121 #endif
14122 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
14123 	soc->cdp_soc.ops->cfr_ops = &dp_ops_cfr;
14124 #endif
14125 #ifdef WLAN_SUPPORT_MSCS
14126 	soc->cdp_soc.ops->mscs_ops = &dp_ops_mscs;
14127 #endif
14128 #ifdef WLAN_SUPPORT_MESH_LATENCY
14129 	soc->cdp_soc.ops->mesh_latency_ops = &dp_ops_mesh_latency;
14130 #endif
14131 #ifdef CONFIG_SAWF_DEF_QUEUES
14132 	soc->cdp_soc.ops->sawf_ops = &dp_ops_sawf;
14133 #endif
14134 #ifdef WLAN_SUPPORT_SCS
14135 	soc->cdp_soc.ops->scs_ops = &dp_ops_scs;
14136 #endif
14137 };
14138 
14139 /*
14140  * dp_soc_set_txrx_ring_map()
14141  * @dp_soc: DP handler for soc
14142  *
14143  * Return: Void
14144  */
14145 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
14146 {
14147 	uint32_t i;
14148 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
14149 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
14150 	}
14151 }
14152 
14153 qdf_export_symbol(dp_soc_set_txrx_ring_map);
14154 
14155 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
14156 	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574)
14157 /**
14158  * dp_soc_attach_wifi3() - Attach txrx SOC
14159  * @ctrl_psoc: Opaque SOC handle from control plane
14160  * @params: SOC attach params
14161  *
14162  * Return: DP SOC handle on success, NULL on failure
14163  */
14164 struct cdp_soc_t *
14165 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
14166 		    struct cdp_soc_attach_params *params)
14167 {
14168 	struct dp_soc *dp_soc = NULL;
14169 
14170 	dp_soc = dp_soc_attach(ctrl_psoc, params);
14171 
14172 	return dp_soc_to_cdp_soc_t(dp_soc);
14173 }
14174 
14175 static inline void dp_soc_set_def_pdev(struct dp_soc *soc)
14176 {
14177 	int lmac_id;
14178 
14179 	for (lmac_id = 0; lmac_id < MAX_NUM_LMAC_HW; lmac_id++) {
14180 		/*Set default host PDEV ID for lmac_id*/
14181 		wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
14182 				      INVALID_PDEV_ID, lmac_id);
14183 	}
14184 }
14185 
14186 static uint32_t
14187 dp_get_link_desc_id_start(uint16_t arch_id)
14188 {
14189 	switch (arch_id) {
14190 	case CDP_ARCH_TYPE_LI:
14191 		return LINK_DESC_ID_START_21_BITS_COOKIE;
14192 	case CDP_ARCH_TYPE_BE:
14193 		return LINK_DESC_ID_START_20_BITS_COOKIE;
14194 	default:
14195 		dp_err("unkonwn arch_id 0x%x", arch_id);
14196 		QDF_BUG(0);
14197 		return LINK_DESC_ID_START_21_BITS_COOKIE;
14198 	}
14199 }
14200 
14201 /**
14202  * dp_soc_attach() - Attach txrx SOC
14203  * @ctrl_psoc: Opaque SOC handle from control plane
14204  * @params: SOC attach params
14205  *
14206  * Return: DP SOC handle on success, NULL on failure
14207  */
14208 static struct dp_soc *
14209 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
14210 	      struct cdp_soc_attach_params *params)
14211 {
14212 	int int_ctx;
14213 	struct dp_soc *soc =  NULL;
14214 	uint16_t arch_id;
14215 	struct hif_opaque_softc *hif_handle = params->hif_handle;
14216 	qdf_device_t qdf_osdev = params->qdf_osdev;
14217 	struct ol_if_ops *ol_ops = params->ol_ops;
14218 	uint16_t device_id = params->device_id;
14219 
14220 	if (!hif_handle) {
14221 		dp_err("HIF handle is NULL");
14222 		goto fail0;
14223 	}
14224 	arch_id = cdp_get_arch_type_from_devid(device_id);
14225 	soc = qdf_mem_malloc(dp_get_soc_context_size(device_id));
14226 	if (!soc) {
14227 		dp_err("DP SOC memory allocation failed");
14228 		goto fail0;
14229 	}
14230 
14231 	dp_info("soc memory allocated %pK", soc);
14232 	soc->hif_handle = hif_handle;
14233 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
14234 	if (!soc->hal_soc)
14235 		goto fail1;
14236 
14237 	hif_get_cmem_info(soc->hif_handle,
14238 			  &soc->cmem_base,
14239 			  &soc->cmem_total_size);
14240 	soc->cmem_avail_size = soc->cmem_total_size;
14241 	int_ctx = 0;
14242 	soc->device_id = device_id;
14243 	soc->cdp_soc.ops =
14244 		(struct cdp_ops *)qdf_mem_malloc(sizeof(struct cdp_ops));
14245 	if (!soc->cdp_soc.ops)
14246 		goto fail1;
14247 
14248 	dp_soc_txrx_ops_attach(soc);
14249 	soc->cdp_soc.ol_ops = ol_ops;
14250 	soc->ctrl_psoc = ctrl_psoc;
14251 	soc->osdev = qdf_osdev;
14252 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
14253 	hal_rx_get_tlv_size(soc->hal_soc, &soc->rx_pkt_tlv_size,
14254 			    &soc->rx_mon_pkt_tlv_size);
14255 	soc->idle_link_bm_id = hal_get_idle_link_bm_id(soc->hal_soc,
14256 						       params->mlo_chip_id);
14257 	soc->features.dmac_cmn_src_rxbuf_ring_enabled =
14258 		hal_dmac_cmn_src_rxbuf_ring_get(soc->hal_soc);
14259 	soc->arch_id = arch_id;
14260 	soc->link_desc_id_start =
14261 			dp_get_link_desc_id_start(soc->arch_id);
14262 	dp_configure_arch_ops(soc);
14263 
14264 	/* Reset wbm sg list and flags */
14265 	dp_rx_wbm_sg_list_reset(soc);
14266 
14267 	dp_soc_tx_hw_desc_history_attach(soc);
14268 	dp_soc_rx_history_attach(soc);
14269 	dp_soc_mon_status_ring_history_attach(soc);
14270 	dp_soc_tx_history_attach(soc);
14271 	wlan_set_srng_cfg(&soc->wlan_srng_cfg);
14272 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
14273 	if (!soc->wlan_cfg_ctx) {
14274 		dp_err("wlan_cfg_ctx failed\n");
14275 		goto fail2;
14276 	}
14277 	dp_soc_cfg_attach(soc);
14278 
14279 	if (dp_hw_link_desc_pool_banks_alloc(soc, WLAN_INVALID_PDEV_ID)) {
14280 		dp_err("failed to allocate link desc pool banks");
14281 		goto fail3;
14282 	}
14283 
14284 	if (dp_hw_link_desc_ring_alloc(soc)) {
14285 		dp_err("failed to allocate link_desc_ring");
14286 		goto fail4;
14287 	}
14288 
14289 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_attach(soc,
14290 								 params))) {
14291 		dp_err("unable to do target specific attach");
14292 		goto fail5;
14293 	}
14294 
14295 	if (dp_soc_srng_alloc(soc)) {
14296 		dp_err("failed to allocate soc srng rings");
14297 		goto fail6;
14298 	}
14299 
14300 	if (dp_soc_tx_desc_sw_pools_alloc(soc)) {
14301 		dp_err("dp_soc_tx_desc_sw_pools_alloc failed");
14302 		goto fail7;
14303 	}
14304 
14305 	if (!dp_monitor_modularized_enable()) {
14306 		if (dp_mon_soc_attach_wrapper(soc)) {
14307 			dp_err("failed to attach monitor");
14308 			goto fail8;
14309 		}
14310 	}
14311 
14312 	if (dp_sysfs_initialize_stats(soc) != QDF_STATUS_SUCCESS) {
14313 		dp_err("failed to initialize dp stats sysfs file");
14314 		dp_sysfs_deinitialize_stats(soc);
14315 	}
14316 
14317 	dp_soc_swlm_attach(soc);
14318 	dp_soc_set_interrupt_mode(soc);
14319 	dp_soc_set_def_pdev(soc);
14320 
14321 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
14322 		qdf_dma_mem_stats_read(),
14323 		qdf_heap_mem_stats_read(),
14324 		qdf_skb_total_mem_stats_read());
14325 
14326 	return soc;
14327 fail8:
14328 	dp_soc_tx_desc_sw_pools_free(soc);
14329 fail7:
14330 	dp_soc_srng_free(soc);
14331 fail6:
14332 	soc->arch_ops.txrx_soc_detach(soc);
14333 fail5:
14334 	dp_hw_link_desc_ring_free(soc);
14335 fail4:
14336 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
14337 fail3:
14338 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
14339 fail2:
14340 	qdf_mem_free(soc->cdp_soc.ops);
14341 fail1:
14342 	qdf_mem_free(soc);
14343 fail0:
14344 	return NULL;
14345 }
14346 
14347 /**
14348  * dp_soc_init() - Initialize txrx SOC
14349  * @dp_soc: Opaque DP SOC handle
14350  * @htc_handle: Opaque HTC handle
14351  * @hif_handle: Opaque HIF handle
14352  *
14353  * Return: DP SOC handle on success, NULL on failure
14354  */
14355 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
14356 		  struct hif_opaque_softc *hif_handle)
14357 {
14358 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
14359 	bool is_monitor_mode = false;
14360 	struct hal_reo_params reo_params;
14361 	uint8_t i;
14362 	int num_dp_msi;
14363 	struct dp_mon_ops *mon_ops;
14364 
14365 	wlan_minidump_log(soc, sizeof(*soc), soc->ctrl_psoc,
14366 			  WLAN_MD_DP_SOC, "dp_soc");
14367 
14368 	soc->hif_handle = hif_handle;
14369 
14370 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
14371 	if (!soc->hal_soc)
14372 		goto fail0;
14373 
14374 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_init(soc))) {
14375 		dp_err("unable to do target specific init");
14376 		goto fail0;
14377 	}
14378 
14379 	htt_soc = htt_soc_attach(soc, htc_handle);
14380 	if (!htt_soc)
14381 		goto fail1;
14382 
14383 	soc->htt_handle = htt_soc;
14384 
14385 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
14386 		goto fail2;
14387 
14388 	htt_set_htc_handle(htt_soc, htc_handle);
14389 
14390 	dp_soc_cfg_init(soc);
14391 
14392 	dp_monitor_soc_cfg_init(soc);
14393 	/* Reset/Initialize wbm sg list and flags */
14394 	dp_rx_wbm_sg_list_reset(soc);
14395 
14396 	/* Note: Any SRNG ring initialization should happen only after
14397 	 * Interrupt mode is set and followed by filling up the
14398 	 * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER.
14399 	 */
14400 	dp_soc_set_interrupt_mode(soc);
14401 	if (soc->cdp_soc.ol_ops->get_con_mode &&
14402 	    soc->cdp_soc.ol_ops->get_con_mode() ==
14403 	    QDF_GLOBAL_MONITOR_MODE)
14404 		is_monitor_mode = true;
14405 
14406 	num_dp_msi = dp_get_num_msi_available(soc, soc->intr_mode);
14407 	if (num_dp_msi < 0) {
14408 		dp_init_err("%pK: dp_interrupt assignment failed", soc);
14409 		goto fail3;
14410 	}
14411 
14412 	wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, num_dp_msi,
14413 				     soc->intr_mode, is_monitor_mode);
14414 
14415 	/* initialize WBM_IDLE_LINK ring */
14416 	if (dp_hw_link_desc_ring_init(soc)) {
14417 		dp_init_err("%pK: dp_hw_link_desc_ring_init failed", soc);
14418 		goto fail3;
14419 	}
14420 
14421 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
14422 
14423 	if (dp_soc_srng_init(soc)) {
14424 		dp_init_err("%pK: dp_soc_srng_init failed", soc);
14425 		goto fail4;
14426 	}
14427 
14428 	if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
14429 			       htt_get_htc_handle(htt_soc),
14430 			       soc->hal_soc, soc->osdev) == NULL)
14431 		goto fail5;
14432 
14433 	/* Initialize descriptors in TCL Rings */
14434 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
14435 		hal_tx_init_data_ring(soc->hal_soc,
14436 				      soc->tcl_data_ring[i].hal_srng);
14437 	}
14438 
14439 	if (dp_soc_tx_desc_sw_pools_init(soc)) {
14440 		dp_init_err("%pK: dp_tx_soc_attach failed", soc);
14441 		goto fail6;
14442 	}
14443 
14444 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
14445 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
14446 	soc->cce_disable = false;
14447 	soc->max_ast_ageout_count = MAX_AST_AGEOUT_COUNT;
14448 
14449 	soc->sta_mode_search_policy = DP_TX_ADDR_SEARCH_ADDR_POLICY;
14450 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
14451 	qdf_spinlock_create(&soc->vdev_map_lock);
14452 	qdf_atomic_init(&soc->num_tx_outstanding);
14453 	qdf_atomic_init(&soc->num_tx_exception);
14454 	soc->num_tx_allowed =
14455 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
14456 
14457 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
14458 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
14459 				CDP_CFG_MAX_PEER_ID);
14460 
14461 		if (ret != -EINVAL)
14462 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
14463 
14464 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
14465 				CDP_CFG_CCE_DISABLE);
14466 		if (ret == 1)
14467 			soc->cce_disable = true;
14468 	}
14469 
14470 	/*
14471 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
14472 	 * and IPQ5018 WMAC2 is not there in these platforms.
14473 	 */
14474 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018 ||
14475 	    soc->disable_mac2_intr)
14476 		dp_soc_disable_unused_mac_intr_mask(soc, 0x2);
14477 
14478 	/*
14479 	 * Skip registering hw ring interrupts for WMAC1 on IPQ5018
14480 	 * WMAC1 is not there in this platform.
14481 	 */
14482 	if (soc->disable_mac1_intr)
14483 		dp_soc_disable_unused_mac_intr_mask(soc, 0x1);
14484 
14485 	/* Setup HW REO */
14486 	qdf_mem_zero(&reo_params, sizeof(reo_params));
14487 
14488 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
14489 		/*
14490 		 * Reo ring remap is not required if both radios
14491 		 * are offloaded to NSS
14492 		 */
14493 
14494 		if (dp_reo_remap_config(soc, &reo_params.remap0,
14495 					&reo_params.remap1,
14496 					&reo_params.remap2))
14497 			reo_params.rx_hash_enabled = true;
14498 		else
14499 			reo_params.rx_hash_enabled = false;
14500 	}
14501 
14502 	/* setup the global rx defrag waitlist */
14503 	TAILQ_INIT(&soc->rx.defrag.waitlist);
14504 	soc->rx.defrag.timeout_ms =
14505 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
14506 	soc->rx.defrag.next_flush_ms = 0;
14507 	soc->rx.flags.defrag_timeout_check =
14508 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
14509 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
14510 
14511 	/*
14512 	 * set the fragment destination ring
14513 	 */
14514 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
14515 
14516 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
14517 		reo_params.alt_dst_ind_0 = REO_REMAP_RELEASE;
14518 
14519 	hal_reo_setup(soc->hal_soc, &reo_params);
14520 
14521 	hal_reo_set_err_dst_remap(soc->hal_soc);
14522 
14523 	soc->features.pn_in_reo_dest = hal_reo_enable_pn_in_dest(soc->hal_soc);
14524 
14525 	mon_ops = dp_mon_ops_get(soc);
14526 	if (mon_ops && mon_ops->mon_soc_init)
14527 		mon_ops->mon_soc_init(soc);
14528 
14529 	qdf_atomic_set(&soc->cmn_init_done, 1);
14530 
14531 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
14532 
14533 	qdf_spinlock_create(&soc->ast_lock);
14534 	dp_peer_mec_spinlock_create(soc);
14535 
14536 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
14537 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
14538 	INIT_RX_HW_STATS_LOCK(soc);
14539 
14540 	qdf_nbuf_queue_init(&soc->invalid_buf_queue);
14541 	/* fill the tx/rx cpu ring map*/
14542 	dp_soc_set_txrx_ring_map(soc);
14543 
14544 	TAILQ_INIT(&soc->inactive_peer_list);
14545 	qdf_spinlock_create(&soc->inactive_peer_list_lock);
14546 	TAILQ_INIT(&soc->inactive_vdev_list);
14547 	qdf_spinlock_create(&soc->inactive_vdev_list_lock);
14548 	qdf_spinlock_create(&soc->htt_stats.lock);
14549 	/* initialize work queue for stats processing */
14550 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
14551 
14552 	dp_reo_desc_deferred_freelist_create(soc);
14553 
14554 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
14555 		qdf_dma_mem_stats_read(),
14556 		qdf_heap_mem_stats_read(),
14557 		qdf_skb_total_mem_stats_read());
14558 
14559 	soc->vdev_stats_id_map = 0;
14560 
14561 	return soc;
14562 fail6:
14563 	htt_soc_htc_dealloc(soc->htt_handle);
14564 fail5:
14565 	dp_soc_srng_deinit(soc);
14566 fail4:
14567 	dp_hw_link_desc_ring_deinit(soc);
14568 fail3:
14569 	htt_htc_pkt_pool_free(htt_soc);
14570 fail2:
14571 	htt_soc_detach(htt_soc);
14572 fail1:
14573 	soc->arch_ops.txrx_soc_deinit(soc);
14574 fail0:
14575 	return NULL;
14576 }
14577 
14578 /**
14579  * dp_soc_init_wifi3() - Initialize txrx SOC
14580  * @soc: Opaque DP SOC handle
14581  * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
14582  * @hif_handle: Opaque HIF handle
14583  * @htc_handle: Opaque HTC handle
14584  * @qdf_osdev: QDF device (Unused)
14585  * @ol_ops: Offload Operations (Unused)
14586  * @device_id: Device ID (Unused)
14587  *
14588  * Return: DP SOC handle on success, NULL on failure
14589  */
14590 void *dp_soc_init_wifi3(struct cdp_soc_t *soc,
14591 			struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
14592 			struct hif_opaque_softc *hif_handle,
14593 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
14594 			struct ol_if_ops *ol_ops, uint16_t device_id)
14595 {
14596 	return dp_soc_init((struct dp_soc *)soc, htc_handle, hif_handle);
14597 }
14598 
14599 #endif
14600 
14601 /*
14602  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
14603  *
14604  * @soc: handle to DP soc
14605  * @mac_id: MAC id
14606  *
14607  * Return: Return pdev corresponding to MAC
14608  */
14609 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
14610 {
14611 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
14612 		return (mac_id < MAX_PDEV_CNT) ? soc->pdev_list[mac_id] : NULL;
14613 
14614 	/* Typically for MCL as there only 1 PDEV*/
14615 	return soc->pdev_list[0];
14616 }
14617 
14618 void dp_update_num_mac_rings_for_dbs(struct dp_soc *soc,
14619 				     int *max_mac_rings)
14620 {
14621 	bool dbs_enable = false;
14622 
14623 	if (soc->cdp_soc.ol_ops->is_hw_dbs_capable)
14624 		dbs_enable = soc->cdp_soc.ol_ops->
14625 				is_hw_dbs_capable((void *)soc->ctrl_psoc);
14626 
14627 	*max_mac_rings = dbs_enable ? (*max_mac_rings) : 1;
14628 	dp_info("dbs_enable %d, max_mac_rings %d",
14629 		dbs_enable, *max_mac_rings);
14630 }
14631 
14632 qdf_export_symbol(dp_update_num_mac_rings_for_dbs);
14633 
14634 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
14635 /**
14636  * dp_get_cfr_rcc() - get cfr rcc config
14637  * @soc_hdl: Datapath soc handle
14638  * @pdev_id: id of objmgr pdev
14639  *
14640  * Return: true/false based on cfr mode setting
14641  */
14642 static
14643 bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
14644 {
14645 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14646 	struct dp_pdev *pdev = NULL;
14647 
14648 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14649 	if (!pdev) {
14650 		dp_err("pdev is NULL");
14651 		return false;
14652 	}
14653 
14654 	return pdev->cfr_rcc_mode;
14655 }
14656 
14657 /**
14658  * dp_set_cfr_rcc() - enable/disable cfr rcc config
14659  * @soc_hdl: Datapath soc handle
14660  * @pdev_id: id of objmgr pdev
14661  * @enable: Enable/Disable cfr rcc mode
14662  *
14663  * Return: none
14664  */
14665 static
14666 void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, bool enable)
14667 {
14668 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14669 	struct dp_pdev *pdev = NULL;
14670 
14671 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14672 	if (!pdev) {
14673 		dp_err("pdev is NULL");
14674 		return;
14675 	}
14676 
14677 	pdev->cfr_rcc_mode = enable;
14678 }
14679 
14680 /*
14681  * dp_get_cfr_dbg_stats - Get the debug statistics for CFR
14682  * @soc_hdl: Datapath soc handle
14683  * @pdev_id: id of data path pdev handle
14684  * @cfr_rcc_stats: CFR RCC debug statistics buffer
14685  *
14686  * Return: none
14687  */
14688 static inline void
14689 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14690 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats)
14691 {
14692 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14693 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14694 
14695 	if (!pdev) {
14696 		dp_err("Invalid pdev");
14697 		return;
14698 	}
14699 
14700 	qdf_mem_copy(cfr_rcc_stats, &pdev->stats.rcc,
14701 		     sizeof(struct cdp_cfr_rcc_stats));
14702 }
14703 
14704 /*
14705  * dp_clear_cfr_dbg_stats - Clear debug statistics for CFR
14706  * @soc_hdl: Datapath soc handle
14707  * @pdev_id: id of data path pdev handle
14708  *
14709  * Return: none
14710  */
14711 static void dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl,
14712 				   uint8_t pdev_id)
14713 {
14714 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14715 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14716 
14717 	if (!pdev) {
14718 		dp_err("dp pdev is NULL");
14719 		return;
14720 	}
14721 
14722 	qdf_mem_zero(&pdev->stats.rcc, sizeof(pdev->stats.rcc));
14723 }
14724 #endif
14725 
14726 /**
14727  * dp_bucket_index() - Return index from array
14728  *
14729  * @delay: delay measured
14730  * @array: array used to index corresponding delay
14731  * @delay_in_us: flag to indicate whether the delay in ms or us
14732  *
14733  * Return: index
14734  */
14735 static uint8_t
14736 dp_bucket_index(uint32_t delay, uint16_t *array, bool delay_in_us)
14737 {
14738 	uint8_t i = CDP_DELAY_BUCKET_0;
14739 	uint32_t thr_low, thr_high;
14740 
14741 	for (; i < CDP_DELAY_BUCKET_MAX - 1; i++) {
14742 		thr_low = array[i];
14743 		thr_high = array[i + 1];
14744 
14745 		if (delay_in_us) {
14746 			thr_low = thr_low * USEC_PER_MSEC;
14747 			thr_high = thr_high * USEC_PER_MSEC;
14748 		}
14749 		if (delay >= thr_low && delay <= thr_high)
14750 			return i;
14751 	}
14752 	return (CDP_DELAY_BUCKET_MAX - 1);
14753 }
14754 
14755 #ifdef HW_TX_DELAY_STATS_ENABLE
14756 /*
14757  * cdp_fw_to_hw_delay_range
14758  * Fw to hw delay ranges in milliseconds
14759  */
14760 static uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
14761 	0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
14762 #else
14763 static uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
14764 	0, 2, 4, 6, 8, 10, 20, 30, 40, 50, 100, 250, 500};
14765 #endif
14766 
14767 /*
14768  * cdp_sw_enq_delay_range
14769  * Software enqueue delay ranges in milliseconds
14770  */
14771 static uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
14772 	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
14773 
14774 /*
14775  * cdp_intfrm_delay_range
14776  * Interframe delay ranges in milliseconds
14777  */
14778 static uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
14779 	0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
14780 
14781 /**
14782  * dp_fill_delay_buckets() - Fill delay statistics bucket for each
14783  *				type of delay
14784  * @tstats: tid tx stats
14785  * @rstats: tid rx stats
14786  * @delay: delay in ms
14787  * @tid: tid value
14788  * @mode: type of tx delay mode
14789  * @ring_id: ring number
14790  * @delay_in_us: flag to indicate whether the delay in ms or us
14791  *
14792  * Return: pointer to cdp_delay_stats structure
14793  */
14794 static struct cdp_delay_stats *
14795 dp_fill_delay_buckets(struct cdp_tid_tx_stats *tstats,
14796 		      struct cdp_tid_rx_stats *rstats, uint32_t delay,
14797 		      uint8_t tid, uint8_t mode, uint8_t ring_id,
14798 		      bool delay_in_us)
14799 {
14800 	uint8_t delay_index = 0;
14801 	struct cdp_delay_stats *stats = NULL;
14802 
14803 	/*
14804 	 * Update delay stats in proper bucket
14805 	 */
14806 	switch (mode) {
14807 	/* Software Enqueue delay ranges */
14808 	case CDP_DELAY_STATS_SW_ENQ:
14809 		if (!tstats)
14810 			break;
14811 
14812 		delay_index = dp_bucket_index(delay, cdp_sw_enq_delay,
14813 					      delay_in_us);
14814 		tstats->swq_delay.delay_bucket[delay_index]++;
14815 		stats = &tstats->swq_delay;
14816 		break;
14817 
14818 	/* Tx Completion delay ranges */
14819 	case CDP_DELAY_STATS_FW_HW_TRANSMIT:
14820 		if (!tstats)
14821 			break;
14822 
14823 		delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay,
14824 					      delay_in_us);
14825 		tstats->hwtx_delay.delay_bucket[delay_index]++;
14826 		stats = &tstats->hwtx_delay;
14827 		break;
14828 
14829 	/* Interframe tx delay ranges */
14830 	case CDP_DELAY_STATS_TX_INTERFRAME:
14831 		if (!tstats)
14832 			break;
14833 
14834 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
14835 					      delay_in_us);
14836 		tstats->intfrm_delay.delay_bucket[delay_index]++;
14837 		stats = &tstats->intfrm_delay;
14838 		break;
14839 
14840 	/* Interframe rx delay ranges */
14841 	case CDP_DELAY_STATS_RX_INTERFRAME:
14842 		if (!rstats)
14843 			break;
14844 
14845 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
14846 					      delay_in_us);
14847 		rstats->intfrm_delay.delay_bucket[delay_index]++;
14848 		stats = &rstats->intfrm_delay;
14849 		break;
14850 
14851 	/* Ring reap to indication to network stack */
14852 	case CDP_DELAY_STATS_REAP_STACK:
14853 		if (!rstats)
14854 			break;
14855 
14856 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
14857 					      delay_in_us);
14858 		rstats->to_stack_delay.delay_bucket[delay_index]++;
14859 		stats = &rstats->to_stack_delay;
14860 		break;
14861 	default:
14862 		dp_debug("Incorrect delay mode: %d", mode);
14863 	}
14864 
14865 	return stats;
14866 }
14867 
14868 void dp_update_delay_stats(struct cdp_tid_tx_stats *tstats,
14869 			   struct cdp_tid_rx_stats *rstats, uint32_t delay,
14870 			   uint8_t tid, uint8_t mode, uint8_t ring_id,
14871 			   bool delay_in_us)
14872 {
14873 	struct cdp_delay_stats *dstats = NULL;
14874 
14875 	/*
14876 	 * Delay ranges are different for different delay modes
14877 	 * Get the correct index to update delay bucket
14878 	 */
14879 	dstats = dp_fill_delay_buckets(tstats, rstats, delay, tid, mode,
14880 				       ring_id, delay_in_us);
14881 	if (qdf_unlikely(!dstats))
14882 		return;
14883 
14884 	if (delay != 0) {
14885 		/*
14886 		 * Compute minimum,average and maximum
14887 		 * delay
14888 		 */
14889 		if (delay < dstats->min_delay)
14890 			dstats->min_delay = delay;
14891 
14892 		if (delay > dstats->max_delay)
14893 			dstats->max_delay = delay;
14894 
14895 		/*
14896 		 * Average over delay measured till now
14897 		 */
14898 		if (!dstats->avg_delay)
14899 			dstats->avg_delay = delay;
14900 		else
14901 			dstats->avg_delay = ((delay + dstats->avg_delay) >> 1);
14902 	}
14903 }
14904 
14905 /**
14906  * dp_get_peer_mac_list(): function to get peer mac list of vdev
14907  * @soc: Datapath soc handle
14908  * @vdev_id: vdev id
14909  * @newmac: Table of the clients mac
14910  * @mac_cnt: No. of MACs required
14911  * @limit: Limit the number of clients
14912  *
14913  * return: no of clients
14914  */
14915 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id,
14916 			      u_int8_t newmac[][QDF_MAC_ADDR_SIZE],
14917 			      u_int16_t mac_cnt, bool limit)
14918 {
14919 	struct dp_soc *dp_soc = (struct dp_soc *)soc;
14920 	struct dp_vdev *vdev =
14921 		dp_vdev_get_ref_by_id(dp_soc, vdev_id, DP_MOD_ID_CDP);
14922 	struct dp_peer *peer;
14923 	uint16_t new_mac_cnt = 0;
14924 
14925 	if (!vdev)
14926 		return new_mac_cnt;
14927 
14928 	if (limit && (vdev->num_peers > mac_cnt))
14929 		return 0;
14930 
14931 	qdf_spin_lock_bh(&vdev->peer_list_lock);
14932 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
14933 		if (peer->bss_peer)
14934 			continue;
14935 		if (new_mac_cnt < mac_cnt) {
14936 			WLAN_ADDR_COPY(newmac[new_mac_cnt], peer->mac_addr.raw);
14937 			new_mac_cnt++;
14938 		}
14939 	}
14940 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
14941 	dp_vdev_unref_delete(dp_soc, vdev, DP_MOD_ID_CDP);
14942 	return new_mac_cnt;
14943 }
14944 
14945 uint16_t dp_get_peer_id(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *mac)
14946 {
14947 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
14948 						       mac, 0, vdev_id,
14949 						       DP_MOD_ID_CDP);
14950 	uint16_t peer_id = HTT_INVALID_PEER;
14951 
14952 	if (!peer) {
14953 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
14954 		return peer_id;
14955 	}
14956 
14957 	peer_id = peer->peer_id;
14958 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
14959 	return peer_id;
14960 }
14961 
14962 #ifdef QCA_SUPPORT_WDS_EXTENDED
14963 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc,
14964 				  uint8_t vdev_id,
14965 				  uint8_t *mac,
14966 				  ol_txrx_rx_fp rx,
14967 				  ol_osif_peer_handle osif_peer)
14968 {
14969 	struct dp_txrx_peer *txrx_peer = NULL;
14970 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
14971 						       mac, 0, vdev_id,
14972 						       DP_MOD_ID_CDP);
14973 	QDF_STATUS status = QDF_STATUS_E_INVAL;
14974 
14975 	if (!peer) {
14976 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
14977 		return status;
14978 	}
14979 
14980 	txrx_peer = dp_get_txrx_peer(peer);
14981 	if (!txrx_peer) {
14982 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
14983 		return status;
14984 	}
14985 
14986 	if (rx) {
14987 		if (txrx_peer->osif_rx) {
14988 			status = QDF_STATUS_E_ALREADY;
14989 		} else {
14990 			txrx_peer->osif_rx = rx;
14991 			status = QDF_STATUS_SUCCESS;
14992 		}
14993 	} else {
14994 		if (txrx_peer->osif_rx) {
14995 			txrx_peer->osif_rx = NULL;
14996 			status = QDF_STATUS_SUCCESS;
14997 		} else {
14998 			status = QDF_STATUS_E_ALREADY;
14999 		}
15000 	}
15001 
15002 	txrx_peer->wds_ext.osif_peer = osif_peer;
15003 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
15004 
15005 	return status;
15006 }
15007 #endif /* QCA_SUPPORT_WDS_EXTENDED */
15008 
15009 /**
15010  * dp_pdev_srng_deinit() - de-initialize all pdev srng ring including
15011  *			   monitor rings
15012  * @pdev: Datapath pdev handle
15013  *
15014  */
15015 static void dp_pdev_srng_deinit(struct dp_pdev *pdev)
15016 {
15017 	struct dp_soc *soc = pdev->soc;
15018 	uint8_t i;
15019 
15020 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled)
15021 		dp_srng_deinit(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
15022 			       RXDMA_BUF,
15023 			       pdev->lmac_id);
15024 
15025 	if (!soc->rxdma2sw_rings_not_supported) {
15026 		for (i = 0;
15027 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
15028 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
15029 								 pdev->pdev_id);
15030 
15031 			wlan_minidump_remove(soc->rxdma_err_dst_ring[lmac_id].
15032 							base_vaddr_unaligned,
15033 					     soc->rxdma_err_dst_ring[lmac_id].
15034 								alloc_size,
15035 					     soc->ctrl_psoc,
15036 					     WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
15037 					     "rxdma_err_dst");
15038 			dp_srng_deinit(soc, &soc->rxdma_err_dst_ring[lmac_id],
15039 				       RXDMA_DST, lmac_id);
15040 		}
15041 	}
15042 
15043 
15044 }
15045 
15046 /**
15047  * dp_pdev_srng_init() - initialize all pdev srng rings including
15048  *			   monitor rings
15049  * @pdev: Datapath pdev handle
15050  *
15051  * return: QDF_STATUS_SUCCESS on success
15052  *	   QDF_STATUS_E_NOMEM on failure
15053  */
15054 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev)
15055 {
15056 	struct dp_soc *soc = pdev->soc;
15057 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
15058 	uint32_t i;
15059 
15060 	soc_cfg_ctx = soc->wlan_cfg_ctx;
15061 
15062 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
15063 		if (dp_srng_init(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
15064 				 RXDMA_BUF, 0, pdev->lmac_id)) {
15065 			dp_init_err("%pK: dp_srng_init failed rx refill ring",
15066 				    soc);
15067 			goto fail1;
15068 		}
15069 	}
15070 
15071 	/* LMAC RxDMA to SW Rings configuration */
15072 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
15073 		/* Only valid for MCL */
15074 		pdev = soc->pdev_list[0];
15075 
15076 	if (!soc->rxdma2sw_rings_not_supported) {
15077 		for (i = 0;
15078 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
15079 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
15080 								 pdev->pdev_id);
15081 			struct dp_srng *srng =
15082 				&soc->rxdma_err_dst_ring[lmac_id];
15083 
15084 			if (srng->hal_srng)
15085 				continue;
15086 
15087 			if (dp_srng_init(soc, srng, RXDMA_DST, 0, lmac_id)) {
15088 				dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring",
15089 					    soc);
15090 				goto fail1;
15091 			}
15092 			wlan_minidump_log(soc->rxdma_err_dst_ring[lmac_id].
15093 						base_vaddr_unaligned,
15094 					  soc->rxdma_err_dst_ring[lmac_id].
15095 						alloc_size,
15096 					  soc->ctrl_psoc,
15097 					  WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
15098 					  "rxdma_err_dst");
15099 		}
15100 	}
15101 	return QDF_STATUS_SUCCESS;
15102 
15103 fail1:
15104 	dp_pdev_srng_deinit(pdev);
15105 	return QDF_STATUS_E_NOMEM;
15106 }
15107 
15108 /**
15109  * dp_pdev_srng_free() - free all pdev srng rings including monitor rings
15110  * pdev: Datapath pdev handle
15111  *
15112  */
15113 static void dp_pdev_srng_free(struct dp_pdev *pdev)
15114 {
15115 	struct dp_soc *soc = pdev->soc;
15116 	uint8_t i;
15117 
15118 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled)
15119 		dp_srng_free(soc, &soc->rx_refill_buf_ring[pdev->lmac_id]);
15120 
15121 	if (!soc->rxdma2sw_rings_not_supported) {
15122 		for (i = 0;
15123 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
15124 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
15125 								 pdev->pdev_id);
15126 
15127 			dp_srng_free(soc, &soc->rxdma_err_dst_ring[lmac_id]);
15128 		}
15129 	}
15130 }
15131 
15132 /**
15133  * dp_pdev_srng_alloc() - allocate memory for all pdev srng rings including
15134  *			  monitor rings
15135  * pdev: Datapath pdev handle
15136  *
15137  * return: QDF_STATUS_SUCCESS on success
15138  *	   QDF_STATUS_E_NOMEM on failure
15139  */
15140 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev)
15141 {
15142 	struct dp_soc *soc = pdev->soc;
15143 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
15144 	uint32_t ring_size;
15145 	uint32_t i;
15146 
15147 	soc_cfg_ctx = soc->wlan_cfg_ctx;
15148 
15149 	ring_size = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
15150 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
15151 		if (dp_srng_alloc(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
15152 				  RXDMA_BUF, ring_size, 0)) {
15153 			dp_init_err("%pK: dp_srng_alloc failed rx refill ring",
15154 				    soc);
15155 			goto fail1;
15156 		}
15157 	}
15158 
15159 	ring_size = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
15160 	/* LMAC RxDMA to SW Rings configuration */
15161 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
15162 		/* Only valid for MCL */
15163 		pdev = soc->pdev_list[0];
15164 
15165 	if (!soc->rxdma2sw_rings_not_supported) {
15166 		for (i = 0;
15167 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
15168 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
15169 								 pdev->pdev_id);
15170 			struct dp_srng *srng =
15171 				&soc->rxdma_err_dst_ring[lmac_id];
15172 
15173 			if (srng->base_vaddr_unaligned)
15174 				continue;
15175 
15176 			if (dp_srng_alloc(soc, srng, RXDMA_DST, ring_size, 0)) {
15177 				dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring",
15178 					    soc);
15179 				goto fail1;
15180 			}
15181 		}
15182 	}
15183 
15184 	return QDF_STATUS_SUCCESS;
15185 fail1:
15186 	dp_pdev_srng_free(pdev);
15187 	return QDF_STATUS_E_NOMEM;
15188 }
15189 
15190 #ifndef WLAN_DP_DISABLE_TCL_CMD_CRED_SRNG
15191 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
15192 {
15193 	QDF_STATUS status;
15194 
15195 	if (soc->init_tcl_cmd_cred_ring) {
15196 		status =  dp_srng_init(soc, &soc->tcl_cmd_credit_ring,
15197 				       TCL_CMD_CREDIT, 0, 0);
15198 		if (QDF_IS_STATUS_ERROR(status))
15199 			return status;
15200 
15201 		wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
15202 				  soc->tcl_cmd_credit_ring.alloc_size,
15203 				  soc->ctrl_psoc,
15204 				  WLAN_MD_DP_SRNG_TCL_CMD,
15205 				  "wbm_desc_rel_ring");
15206 	}
15207 
15208 	return QDF_STATUS_SUCCESS;
15209 }
15210 
15211 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
15212 {
15213 	if (soc->init_tcl_cmd_cred_ring) {
15214 		wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
15215 				     soc->tcl_cmd_credit_ring.alloc_size,
15216 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_CMD,
15217 				     "wbm_desc_rel_ring");
15218 		dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring,
15219 			       TCL_CMD_CREDIT, 0);
15220 	}
15221 }
15222 
15223 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
15224 {
15225 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
15226 	uint32_t entries;
15227 	QDF_STATUS status;
15228 
15229 	entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx);
15230 	if (soc->init_tcl_cmd_cred_ring) {
15231 		status = dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring,
15232 				       TCL_CMD_CREDIT, entries, 0);
15233 		if (QDF_IS_STATUS_ERROR(status))
15234 			return status;
15235 	}
15236 
15237 	return QDF_STATUS_SUCCESS;
15238 }
15239 
15240 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
15241 {
15242 	if (soc->init_tcl_cmd_cred_ring)
15243 		dp_srng_free(soc, &soc->tcl_cmd_credit_ring);
15244 }
15245 
15246 static inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
15247 {
15248 	if (soc->init_tcl_cmd_cred_ring)
15249 		hal_tx_init_cmd_credit_ring(soc->hal_soc,
15250 					    soc->tcl_cmd_credit_ring.hal_srng);
15251 }
15252 #else
15253 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
15254 {
15255 	return QDF_STATUS_SUCCESS;
15256 }
15257 
15258 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
15259 {
15260 }
15261 
15262 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
15263 {
15264 	return QDF_STATUS_SUCCESS;
15265 }
15266 
15267 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
15268 {
15269 }
15270 
15271 static inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
15272 {
15273 }
15274 #endif
15275 
15276 #ifndef WLAN_DP_DISABLE_TCL_STATUS_SRNG
15277 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
15278 {
15279 	QDF_STATUS status;
15280 
15281 	status =  dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0);
15282 	if (QDF_IS_STATUS_ERROR(status))
15283 		return status;
15284 
15285 	wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned,
15286 			  soc->tcl_status_ring.alloc_size,
15287 			  soc->ctrl_psoc,
15288 			  WLAN_MD_DP_SRNG_TCL_STATUS,
15289 			  "wbm_desc_rel_ring");
15290 
15291 	return QDF_STATUS_SUCCESS;
15292 }
15293 
15294 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
15295 {
15296 	wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned,
15297 			     soc->tcl_status_ring.alloc_size,
15298 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_STATUS,
15299 			     "wbm_desc_rel_ring");
15300 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
15301 }
15302 
15303 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
15304 {
15305 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
15306 	uint32_t entries;
15307 	QDF_STATUS status = QDF_STATUS_SUCCESS;
15308 
15309 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
15310 	status = dp_srng_alloc(soc, &soc->tcl_status_ring,
15311 			       TCL_STATUS, entries, 0);
15312 
15313 	return status;
15314 }
15315 
15316 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
15317 {
15318 	dp_srng_free(soc, &soc->tcl_status_ring);
15319 }
15320 #else
15321 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
15322 {
15323 	return QDF_STATUS_SUCCESS;
15324 }
15325 
15326 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
15327 {
15328 }
15329 
15330 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
15331 {
15332 	return QDF_STATUS_SUCCESS;
15333 }
15334 
15335 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
15336 {
15337 }
15338 #endif
15339 
15340 /**
15341  * dp_soc_srng_deinit() - de-initialize soc srng rings
15342  * @soc: Datapath soc handle
15343  *
15344  */
15345 static void dp_soc_srng_deinit(struct dp_soc *soc)
15346 {
15347 	uint32_t i;
15348 
15349 	if (soc->arch_ops.txrx_soc_srng_deinit)
15350 		soc->arch_ops.txrx_soc_srng_deinit(soc);
15351 
15352 	/* Free the ring memories */
15353 	/* Common rings */
15354 	wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
15355 			     soc->wbm_desc_rel_ring.alloc_size,
15356 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_WBM_DESC_REL,
15357 			     "wbm_desc_rel_ring");
15358 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
15359 
15360 	/* Tx data rings */
15361 	for (i = 0; i < soc->num_tcl_data_rings; i++)
15362 		dp_deinit_tx_pair_by_index(soc, i);
15363 
15364 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
15365 		dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
15366 		dp_ipa_deinit_alt_tx_ring(soc);
15367 	}
15368 
15369 	/* TCL command and status rings */
15370 	dp_soc_tcl_cmd_cred_srng_deinit(soc);
15371 	dp_soc_tcl_status_srng_deinit(soc);
15372 
15373 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
15374 		/* TODO: Get number of rings and ring sizes
15375 		 * from wlan_cfg
15376 		 */
15377 		wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned,
15378 				     soc->reo_dest_ring[i].alloc_size,
15379 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_DEST,
15380 				     "reo_dest_ring");
15381 		dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i);
15382 	}
15383 
15384 	/* REO reinjection ring */
15385 	wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned,
15386 			     soc->reo_reinject_ring.alloc_size,
15387 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_REINJECT,
15388 			     "reo_reinject_ring");
15389 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
15390 
15391 	/* Rx release ring */
15392 	wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned,
15393 			     soc->rx_rel_ring.alloc_size,
15394 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_RX_REL,
15395 			     "reo_release_ring");
15396 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
15397 
15398 	/* Rx exception ring */
15399 	/* TODO: Better to store ring_type and ring_num in
15400 	 * dp_srng during setup
15401 	 */
15402 	wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned,
15403 			     soc->reo_exception_ring.alloc_size,
15404 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_EXCEPTION,
15405 			     "reo_exception_ring");
15406 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
15407 
15408 	/* REO command and status rings */
15409 	wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned,
15410 			     soc->reo_cmd_ring.alloc_size,
15411 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_CMD,
15412 			     "reo_cmd_ring");
15413 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
15414 	wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned,
15415 			     soc->reo_status_ring.alloc_size,
15416 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_STATUS,
15417 			     "reo_status_ring");
15418 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
15419 }
15420 
15421 /**
15422  * dp_soc_srng_init() - Initialize soc level srng rings
15423  * @soc: Datapath soc handle
15424  *
15425  * return: QDF_STATUS_SUCCESS on success
15426  *	   QDF_STATUS_E_FAILURE on failure
15427  */
15428 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc)
15429 {
15430 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
15431 	uint8_t i;
15432 	uint8_t wbm2_sw_rx_rel_ring_id;
15433 
15434 	soc_cfg_ctx = soc->wlan_cfg_ctx;
15435 
15436 	dp_enable_verbose_debug(soc);
15437 
15438 	/* WBM descriptor release ring */
15439 	if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) {
15440 		dp_init_err("%pK: dp_srng_init failed for wbm_desc_rel_ring", soc);
15441 		goto fail1;
15442 	}
15443 
15444 	wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
15445 			  soc->wbm_desc_rel_ring.alloc_size,
15446 			  soc->ctrl_psoc,
15447 			  WLAN_MD_DP_SRNG_WBM_DESC_REL,
15448 			  "wbm_desc_rel_ring");
15449 
15450 	/* TCL command and status rings */
15451 	if (dp_soc_tcl_cmd_cred_srng_init(soc)) {
15452 		dp_init_err("%pK: dp_srng_init failed for tcl_cmd_ring", soc);
15453 		goto fail1;
15454 	}
15455 
15456 	if (dp_soc_tcl_status_srng_init(soc)) {
15457 		dp_init_err("%pK: dp_srng_init failed for tcl_status_ring", soc);
15458 		goto fail1;
15459 	}
15460 
15461 	/* REO reinjection ring */
15462 	if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) {
15463 		dp_init_err("%pK: dp_srng_init failed for reo_reinject_ring", soc);
15464 		goto fail1;
15465 	}
15466 
15467 	wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned,
15468 			  soc->reo_reinject_ring.alloc_size,
15469 			  soc->ctrl_psoc,
15470 			  WLAN_MD_DP_SRNG_REO_REINJECT,
15471 			  "reo_reinject_ring");
15472 
15473 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc_cfg_ctx);
15474 	/* Rx release ring */
15475 	if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
15476 			 wbm2_sw_rx_rel_ring_id, 0)) {
15477 		dp_init_err("%pK: dp_srng_init failed for rx_rel_ring", soc);
15478 		goto fail1;
15479 	}
15480 
15481 	wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned,
15482 			  soc->rx_rel_ring.alloc_size,
15483 			  soc->ctrl_psoc,
15484 			  WLAN_MD_DP_SRNG_RX_REL,
15485 			  "reo_release_ring");
15486 
15487 	/* Rx exception ring */
15488 	if (dp_srng_init(soc, &soc->reo_exception_ring,
15489 			 REO_EXCEPTION, 0, MAX_REO_DEST_RINGS)) {
15490 		dp_init_err("%pK: dp_srng_init failed - reo_exception", soc);
15491 		goto fail1;
15492 	}
15493 
15494 	wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned,
15495 			  soc->reo_exception_ring.alloc_size,
15496 			  soc->ctrl_psoc,
15497 			  WLAN_MD_DP_SRNG_REO_EXCEPTION,
15498 			  "reo_exception_ring");
15499 
15500 	/* REO command and status rings */
15501 	if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) {
15502 		dp_init_err("%pK: dp_srng_init failed for reo_cmd_ring", soc);
15503 		goto fail1;
15504 	}
15505 
15506 	wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned,
15507 			  soc->reo_cmd_ring.alloc_size,
15508 			  soc->ctrl_psoc,
15509 			  WLAN_MD_DP_SRNG_REO_CMD,
15510 			  "reo_cmd_ring");
15511 
15512 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
15513 	TAILQ_INIT(&soc->rx.reo_cmd_list);
15514 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
15515 
15516 	if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) {
15517 		dp_init_err("%pK: dp_srng_init failed for reo_status_ring", soc);
15518 		goto fail1;
15519 	}
15520 
15521 	wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned,
15522 			  soc->reo_status_ring.alloc_size,
15523 			  soc->ctrl_psoc,
15524 			  WLAN_MD_DP_SRNG_REO_STATUS,
15525 			  "reo_status_ring");
15526 
15527 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
15528 		if (dp_init_tx_ring_pair_by_index(soc, i))
15529 			goto fail1;
15530 	}
15531 
15532 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
15533 		if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
15534 			goto fail1;
15535 
15536 		if (dp_ipa_init_alt_tx_ring(soc))
15537 			goto fail1;
15538 	}
15539 
15540 	dp_create_ext_stats_event(soc);
15541 
15542 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
15543 		/* Initialize REO destination ring */
15544 		if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) {
15545 			dp_init_err("%pK: dp_srng_init failed for reo_dest_ringn", soc);
15546 			goto fail1;
15547 		}
15548 
15549 		wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned,
15550 				  soc->reo_dest_ring[i].alloc_size,
15551 				  soc->ctrl_psoc,
15552 				  WLAN_MD_DP_SRNG_REO_DEST,
15553 				  "reo_dest_ring");
15554 	}
15555 
15556 	if (soc->arch_ops.txrx_soc_srng_init) {
15557 		if (soc->arch_ops.txrx_soc_srng_init(soc)) {
15558 			dp_init_err("%pK: dp_srng_init failed for arch rings",
15559 				    soc);
15560 			goto fail1;
15561 		}
15562 	}
15563 
15564 	return QDF_STATUS_SUCCESS;
15565 fail1:
15566 	/*
15567 	 * Cleanup will be done as part of soc_detach, which will
15568 	 * be called on pdev attach failure
15569 	 */
15570 	dp_soc_srng_deinit(soc);
15571 	return QDF_STATUS_E_FAILURE;
15572 }
15573 
15574 /**
15575  * dp_soc_srng_free() - free soc level srng rings
15576  * @soc: Datapath soc handle
15577  *
15578  */
15579 static void dp_soc_srng_free(struct dp_soc *soc)
15580 {
15581 	uint32_t i;
15582 
15583 	if (soc->arch_ops.txrx_soc_srng_free)
15584 		soc->arch_ops.txrx_soc_srng_free(soc);
15585 
15586 	dp_srng_free(soc, &soc->wbm_desc_rel_ring);
15587 
15588 	for (i = 0; i < soc->num_tcl_data_rings; i++)
15589 		dp_free_tx_ring_pair_by_index(soc, i);
15590 
15591 	/* Free IPA rings for TCL_TX and TCL_COMPL ring */
15592 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
15593 		dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
15594 		dp_ipa_free_alt_tx_ring(soc);
15595 	}
15596 
15597 	dp_soc_tcl_cmd_cred_srng_free(soc);
15598 	dp_soc_tcl_status_srng_free(soc);
15599 
15600 	for (i = 0; i < soc->num_reo_dest_rings; i++)
15601 		dp_srng_free(soc, &soc->reo_dest_ring[i]);
15602 
15603 	dp_srng_free(soc, &soc->reo_reinject_ring);
15604 	dp_srng_free(soc, &soc->rx_rel_ring);
15605 
15606 	dp_srng_free(soc, &soc->reo_exception_ring);
15607 
15608 	dp_srng_free(soc, &soc->reo_cmd_ring);
15609 	dp_srng_free(soc, &soc->reo_status_ring);
15610 }
15611 
15612 /**
15613  * dp_soc_srng_alloc() - Allocate memory for soc level srng rings
15614  * @soc: Datapath soc handle
15615  *
15616  * return: QDF_STATUS_SUCCESS on success
15617  *	   QDF_STATUS_E_NOMEM on failure
15618  */
15619 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc)
15620 {
15621 	uint32_t entries;
15622 	uint32_t i;
15623 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
15624 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
15625 	uint32_t tx_comp_ring_size, tx_ring_size, reo_dst_ring_size;
15626 
15627 	soc_cfg_ctx = soc->wlan_cfg_ctx;
15628 
15629 	/* sw2wbm link descriptor release ring */
15630 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
15631 	if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE,
15632 			  entries, 0)) {
15633 		dp_init_err("%pK: dp_srng_alloc failed for wbm_desc_rel_ring", soc);
15634 		goto fail1;
15635 	}
15636 
15637 	/* TCL command and status rings */
15638 	if (dp_soc_tcl_cmd_cred_srng_alloc(soc)) {
15639 		dp_init_err("%pK: dp_srng_alloc failed for tcl_cmd_ring", soc);
15640 		goto fail1;
15641 	}
15642 
15643 	if (dp_soc_tcl_status_srng_alloc(soc)) {
15644 		dp_init_err("%pK: dp_srng_alloc failed for tcl_status_ring", soc);
15645 		goto fail1;
15646 	}
15647 
15648 	/* REO reinjection ring */
15649 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
15650 	if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT,
15651 			  entries, 0)) {
15652 		dp_init_err("%pK: dp_srng_alloc failed for reo_reinject_ring", soc);
15653 		goto fail1;
15654 	}
15655 
15656 	/* Rx release ring */
15657 	entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx);
15658 	if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
15659 			  entries, 0)) {
15660 		dp_init_err("%pK: dp_srng_alloc failed for rx_rel_ring", soc);
15661 		goto fail1;
15662 	}
15663 
15664 	/* Rx exception ring */
15665 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
15666 	if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION,
15667 			  entries, 0)) {
15668 		dp_init_err("%pK: dp_srng_alloc failed - reo_exception", soc);
15669 		goto fail1;
15670 	}
15671 
15672 	/* REO command and status rings */
15673 	entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx);
15674 	if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) {
15675 		dp_init_err("%pK: dp_srng_alloc failed for reo_cmd_ring", soc);
15676 		goto fail1;
15677 	}
15678 
15679 	entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx);
15680 	if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS,
15681 			  entries, 0)) {
15682 		dp_init_err("%pK: dp_srng_alloc failed for reo_status_ring", soc);
15683 		goto fail1;
15684 	}
15685 
15686 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
15687 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
15688 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx);
15689 
15690 	/* Disable cached desc if NSS offload is enabled */
15691 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
15692 		cached = 0;
15693 
15694 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
15695 		if (dp_alloc_tx_ring_pair_by_index(soc, i))
15696 			goto fail1;
15697 	}
15698 
15699 	/* IPA rings for TCL_TX and TX_COMP will be allocated here */
15700 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
15701 		if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
15702 			goto fail1;
15703 
15704 		if (dp_ipa_alloc_alt_tx_ring(soc))
15705 			goto fail1;
15706 	}
15707 
15708 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
15709 		/* Setup REO destination ring */
15710 		if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST,
15711 				  reo_dst_ring_size, cached)) {
15712 			dp_init_err("%pK: dp_srng_alloc failed for reo_dest_ring", soc);
15713 			goto fail1;
15714 		}
15715 	}
15716 
15717 	if (soc->arch_ops.txrx_soc_srng_alloc) {
15718 		if (soc->arch_ops.txrx_soc_srng_alloc(soc)) {
15719 			dp_init_err("%pK: dp_srng_alloc failed for arch rings",
15720 				    soc);
15721 			goto fail1;
15722 		}
15723 	}
15724 
15725 	return QDF_STATUS_SUCCESS;
15726 
15727 fail1:
15728 	dp_soc_srng_free(soc);
15729 	return QDF_STATUS_E_NOMEM;
15730 }
15731 
15732 static void dp_soc_cfg_dump(struct dp_soc *soc, uint32_t target_type)
15733 {
15734 	dp_init_info("DP soc Dump for Target = %d", target_type);
15735 	dp_init_info("ast_override_support = %d, da_war_enabled = %d,",
15736 		     soc->ast_override_support, soc->da_war_enabled);
15737 
15738 	wlan_cfg_dp_soc_ctx_dump(soc->wlan_cfg_ctx);
15739 }
15740 
15741 /**
15742  * dp_soc_cfg_init() - initialize target specific configuration
15743  *		       during dp_soc_init
15744  * @soc: dp soc handle
15745  */
15746 static void dp_soc_cfg_init(struct dp_soc *soc)
15747 {
15748 	uint32_t target_type;
15749 
15750 	target_type = hal_get_target_type(soc->hal_soc);
15751 	switch (target_type) {
15752 	case TARGET_TYPE_QCA6290:
15753 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
15754 					       REO_DST_RING_SIZE_QCA6290);
15755 		soc->ast_override_support = 1;
15756 		soc->da_war_enabled = false;
15757 		break;
15758 	case TARGET_TYPE_QCA6390:
15759 	case TARGET_TYPE_QCA6490:
15760 	case TARGET_TYPE_QCA6750:
15761 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
15762 					       REO_DST_RING_SIZE_QCA6290);
15763 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
15764 		soc->ast_override_support = 1;
15765 		if (soc->cdp_soc.ol_ops->get_con_mode &&
15766 		    soc->cdp_soc.ol_ops->get_con_mode() ==
15767 		    QDF_GLOBAL_MONITOR_MODE) {
15768 			int int_ctx;
15769 
15770 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
15771 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
15772 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
15773 			}
15774 		}
15775 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
15776 		break;
15777 	case TARGET_TYPE_KIWI:
15778 	case TARGET_TYPE_MANGO:
15779 		soc->ast_override_support = 1;
15780 		soc->per_tid_basize_max_tid = 8;
15781 
15782 		if (soc->cdp_soc.ol_ops->get_con_mode &&
15783 		    soc->cdp_soc.ol_ops->get_con_mode() ==
15784 		    QDF_GLOBAL_MONITOR_MODE) {
15785 			int int_ctx;
15786 
15787 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS;
15788 			     int_ctx++) {
15789 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
15790 				if (dp_is_monitor_mode_using_poll(soc))
15791 					soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
15792 			}
15793 		}
15794 
15795 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
15796 		soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev = 1;
15797 		/* use only MAC0 status ring */
15798 		soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev = 1;
15799 		break;
15800 	case TARGET_TYPE_QCA8074:
15801 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
15802 		soc->da_war_enabled = true;
15803 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
15804 		break;
15805 	case TARGET_TYPE_QCA8074V2:
15806 	case TARGET_TYPE_QCA6018:
15807 	case TARGET_TYPE_QCA9574:
15808 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
15809 		soc->ast_override_support = 1;
15810 		soc->per_tid_basize_max_tid = 8;
15811 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
15812 		soc->da_war_enabled = false;
15813 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
15814 		break;
15815 	case TARGET_TYPE_QCN9000:
15816 		soc->ast_override_support = 1;
15817 		soc->da_war_enabled = false;
15818 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
15819 		soc->per_tid_basize_max_tid = 8;
15820 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
15821 		soc->lmac_polled_mode = 0;
15822 		soc->wbm_release_desc_rx_sg_support = 1;
15823 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
15824 		break;
15825 	case TARGET_TYPE_QCA5018:
15826 	case TARGET_TYPE_QCN6122:
15827 		soc->ast_override_support = 1;
15828 		soc->da_war_enabled = false;
15829 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
15830 		soc->per_tid_basize_max_tid = 8;
15831 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS_11AX;
15832 		soc->disable_mac1_intr = 1;
15833 		soc->disable_mac2_intr = 1;
15834 		soc->wbm_release_desc_rx_sg_support = 1;
15835 		break;
15836 	case TARGET_TYPE_QCN9224:
15837 		soc->ast_override_support = 1;
15838 		soc->da_war_enabled = false;
15839 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
15840 		soc->per_tid_basize_max_tid = 8;
15841 		soc->wbm_release_desc_rx_sg_support = 1;
15842 		soc->rxdma2sw_rings_not_supported = 1;
15843 		soc->wbm_sg_last_msdu_war = 1;
15844 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
15845 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
15846 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
15847 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
15848 		soc->host_ast_db_enable = cfg_get(soc->ctrl_psoc,
15849 						  CFG_DP_HOST_AST_DB_ENABLE);
15850 		break;
15851 	default:
15852 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
15853 		qdf_assert_always(0);
15854 		break;
15855 	}
15856 	dp_soc_cfg_dump(soc, target_type);
15857 }
15858 
15859 /**
15860  * dp_soc_cfg_attach() - set target specific configuration in
15861  *			 dp soc cfg.
15862  * @soc: dp soc handle
15863  */
15864 static void dp_soc_cfg_attach(struct dp_soc *soc)
15865 {
15866 	int target_type;
15867 	int nss_cfg = 0;
15868 
15869 	target_type = hal_get_target_type(soc->hal_soc);
15870 	switch (target_type) {
15871 	case TARGET_TYPE_QCA6290:
15872 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
15873 					       REO_DST_RING_SIZE_QCA6290);
15874 		break;
15875 	case TARGET_TYPE_QCA6390:
15876 	case TARGET_TYPE_QCA6490:
15877 	case TARGET_TYPE_QCA6750:
15878 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
15879 					       REO_DST_RING_SIZE_QCA6290);
15880 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
15881 		break;
15882 	case TARGET_TYPE_KIWI:
15883 	case TARGET_TYPE_MANGO:
15884 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
15885 		break;
15886 	case TARGET_TYPE_QCA8074:
15887 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
15888 		break;
15889 	case TARGET_TYPE_QCA8074V2:
15890 	case TARGET_TYPE_QCA6018:
15891 	case TARGET_TYPE_QCA9574:
15892 	case TARGET_TYPE_QCN6122:
15893 	case TARGET_TYPE_QCA5018:
15894 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
15895 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
15896 		break;
15897 	case TARGET_TYPE_QCN9000:
15898 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
15899 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
15900 		break;
15901 	case TARGET_TYPE_QCN9224:
15902 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
15903 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
15904 		break;
15905 	default:
15906 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
15907 		qdf_assert_always(0);
15908 		break;
15909 	}
15910 
15911 	if (soc->cdp_soc.ol_ops->get_soc_nss_cfg)
15912 		nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc);
15913 
15914 	wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg);
15915 
15916 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
15917 		wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0);
15918 		wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0);
15919 		wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0);
15920 		wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0);
15921 		soc->init_tcl_cmd_cred_ring = false;
15922 		soc->num_tcl_data_rings =
15923 			wlan_cfg_num_nss_tcl_data_rings(soc->wlan_cfg_ctx);
15924 		soc->num_reo_dest_rings =
15925 			wlan_cfg_num_nss_reo_dest_rings(soc->wlan_cfg_ctx);
15926 
15927 	} else {
15928 		soc->init_tcl_cmd_cred_ring = true;
15929 		soc->num_tx_comp_rings =
15930 			wlan_cfg_num_tx_comp_rings(soc->wlan_cfg_ctx);
15931 		soc->num_tcl_data_rings =
15932 			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
15933 		soc->num_reo_dest_rings =
15934 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
15935 	}
15936 
15937 	soc->arch_ops.soc_cfg_attach(soc);
15938 }
15939 
15940 static inline  void dp_pdev_set_default_reo(struct dp_pdev *pdev)
15941 {
15942 	struct dp_soc *soc = pdev->soc;
15943 
15944 	switch (pdev->pdev_id) {
15945 	case 0:
15946 		pdev->reo_dest =
15947 			wlan_cfg_radio0_default_reo_get(soc->wlan_cfg_ctx);
15948 		break;
15949 
15950 	case 1:
15951 		pdev->reo_dest =
15952 			wlan_cfg_radio1_default_reo_get(soc->wlan_cfg_ctx);
15953 		break;
15954 
15955 	case 2:
15956 		pdev->reo_dest =
15957 			wlan_cfg_radio2_default_reo_get(soc->wlan_cfg_ctx);
15958 		break;
15959 
15960 	default:
15961 		dp_init_err("%pK: Invalid pdev_id %d for reo selection",
15962 			    soc, pdev->pdev_id);
15963 		break;
15964 	}
15965 }
15966 
15967 static QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc,
15968 				      HTC_HANDLE htc_handle,
15969 				      qdf_device_t qdf_osdev,
15970 				      uint8_t pdev_id)
15971 {
15972 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
15973 	int nss_cfg;
15974 	void *sojourn_buf;
15975 	QDF_STATUS ret;
15976 
15977 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
15978 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
15979 
15980 	soc_cfg_ctx = soc->wlan_cfg_ctx;
15981 	pdev->soc = soc;
15982 	pdev->pdev_id = pdev_id;
15983 
15984 	/*
15985 	 * Variable to prevent double pdev deinitialization during
15986 	 * radio detach execution .i.e. in the absence of any vdev.
15987 	 */
15988 	pdev->pdev_deinit = 0;
15989 
15990 	if (dp_wdi_event_attach(pdev)) {
15991 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
15992 			  "dp_wdi_evet_attach failed");
15993 		goto fail0;
15994 	}
15995 
15996 	if (dp_pdev_srng_init(pdev)) {
15997 		dp_init_err("%pK: Failed to initialize pdev srng rings", soc);
15998 		goto fail1;
15999 	}
16000 
16001 	/* Initialize descriptors in TCL Rings used by IPA */
16002 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
16003 		hal_tx_init_data_ring(soc->hal_soc,
16004 				      soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng);
16005 		dp_ipa_hal_tx_init_alt_data_ring(soc);
16006 	}
16007 
16008 	/*
16009 	 * Initialize command/credit ring descriptor
16010 	 * Command/CREDIT ring also used for sending DATA cmds
16011 	 */
16012 	dp_tx_init_cmd_credit_ring(soc);
16013 
16014 	dp_tx_pdev_init(pdev);
16015 
16016 	/*
16017 	 * set nss pdev config based on soc config
16018 	 */
16019 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
16020 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
16021 					 (nss_cfg & (1 << pdev_id)));
16022 	pdev->target_pdev_id =
16023 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
16024 
16025 	if (soc->preferred_hw_mode == WMI_HOST_HW_MODE_2G_PHYB &&
16026 	    pdev->lmac_id == PHYB_2G_LMAC_ID) {
16027 		pdev->target_pdev_id = PHYB_2G_TARGET_PDEV_ID;
16028 	}
16029 
16030 	/* Reset the cpu ring map if radio is NSS offloaded */
16031 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
16032 		dp_soc_reset_cpu_ring_map(soc);
16033 		dp_soc_reset_intr_mask(soc);
16034 	}
16035 
16036 	/* Reset the cpu ring map if radio is NSS offloaded */
16037 	dp_soc_reset_ipa_vlan_intr_mask(soc);
16038 
16039 	TAILQ_INIT(&pdev->vdev_list);
16040 	qdf_spinlock_create(&pdev->vdev_list_lock);
16041 	pdev->vdev_count = 0;
16042 	pdev->is_lro_hash_configured = 0;
16043 
16044 	qdf_spinlock_create(&pdev->tx_mutex);
16045 	pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_INVALID_LMAC_ID;
16046 	pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_INVALID_LMAC_ID;
16047 	pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_INVALID_LMAC_ID;
16048 
16049 	DP_STATS_INIT(pdev);
16050 
16051 	dp_local_peer_id_pool_init(pdev);
16052 
16053 	dp_dscp_tid_map_setup(pdev);
16054 	dp_pcp_tid_map_setup(pdev);
16055 
16056 	/* set the reo destination during initialization */
16057 	dp_pdev_set_default_reo(pdev);
16058 
16059 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
16060 
16061 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
16062 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
16063 			      TRUE);
16064 
16065 	if (!pdev->sojourn_buf) {
16066 		dp_init_err("%pK: Failed to allocate sojourn buf", soc);
16067 		goto fail2;
16068 	}
16069 	sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
16070 	qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
16071 
16072 	qdf_event_create(&pdev->fw_peer_stats_event);
16073 	qdf_event_create(&pdev->fw_stats_event);
16074 
16075 	pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
16076 
16077 	if (dp_rxdma_ring_setup(soc, pdev)) {
16078 		dp_init_err("%pK: RXDMA ring config failed", soc);
16079 		goto fail3;
16080 	}
16081 
16082 	if (dp_init_ipa_rx_refill_buf_ring(soc, pdev))
16083 		goto fail3;
16084 
16085 	if (dp_ipa_ring_resource_setup(soc, pdev))
16086 		goto fail4;
16087 
16088 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
16089 		dp_init_err("%pK: dp_ipa_uc_attach failed", soc);
16090 		goto fail4;
16091 	}
16092 
16093 	ret = dp_rx_fst_attach(soc, pdev);
16094 	if ((ret != QDF_STATUS_SUCCESS) &&
16095 	    (ret != QDF_STATUS_E_NOSUPPORT)) {
16096 		dp_init_err("%pK: RX Flow Search Table attach failed: pdev %d err %d",
16097 			    soc, pdev_id, ret);
16098 		goto fail5;
16099 	}
16100 
16101 	if (dp_pdev_bkp_stats_attach(pdev) != QDF_STATUS_SUCCESS) {
16102 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
16103 			  FL("dp_pdev_bkp_stats_attach failed"));
16104 		goto fail6;
16105 	}
16106 
16107 	if (dp_monitor_pdev_init(pdev)) {
16108 		dp_init_err("%pK: dp_monitor_pdev_init failed\n", soc);
16109 		goto fail7;
16110 	}
16111 
16112 	/* initialize sw rx descriptors */
16113 	dp_rx_pdev_desc_pool_init(pdev);
16114 	/* allocate buffers and replenish the RxDMA ring */
16115 	dp_rx_pdev_buffers_alloc(pdev);
16116 
16117 	dp_init_tso_stats(pdev);
16118 
16119 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
16120 		qdf_dma_mem_stats_read(),
16121 		qdf_heap_mem_stats_read(),
16122 		qdf_skb_total_mem_stats_read());
16123 
16124 	return QDF_STATUS_SUCCESS;
16125 fail7:
16126 	dp_pdev_bkp_stats_detach(pdev);
16127 fail6:
16128 	dp_rx_fst_detach(soc, pdev);
16129 fail5:
16130 	dp_ipa_uc_detach(soc, pdev);
16131 fail4:
16132 	dp_deinit_ipa_rx_refill_buf_ring(soc, pdev);
16133 fail3:
16134 	dp_rxdma_ring_cleanup(soc, pdev);
16135 	qdf_nbuf_free(pdev->sojourn_buf);
16136 fail2:
16137 	qdf_spinlock_destroy(&pdev->tx_mutex);
16138 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
16139 	dp_pdev_srng_deinit(pdev);
16140 fail1:
16141 	dp_wdi_event_detach(pdev);
16142 fail0:
16143 	return QDF_STATUS_E_FAILURE;
16144 }
16145 
16146 /*
16147  * dp_pdev_init_wifi3() - Init txrx pdev
16148  * @htc_handle: HTC handle for host-target interface
16149  * @qdf_osdev: QDF OS device
16150  * @force: Force deinit
16151  *
16152  * Return: QDF_STATUS
16153  */
16154 static QDF_STATUS dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
16155 				     HTC_HANDLE htc_handle,
16156 				     qdf_device_t qdf_osdev,
16157 				     uint8_t pdev_id)
16158 {
16159 	return dp_pdev_init(txrx_soc, htc_handle, qdf_osdev, pdev_id);
16160 }
16161 
16162