xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision 93bf7e1fb1dea8a6aa71b99d27aa38419711f42e)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <qdf_types.h>
21 #include <qdf_lock.h>
22 #include <qdf_net_types.h>
23 #include <qdf_lro.h>
24 #include <qdf_module.h>
25 #include <hal_hw_headers.h>
26 #include <hal_api.h>
27 #include <hif.h>
28 #include <htt.h>
29 #include <wdi_event.h>
30 #include <queue.h>
31 #include "dp_types.h"
32 #include "dp_internal.h"
33 #include "dp_tx.h"
34 #include "dp_tx_desc.h"
35 #include "dp_rx.h"
36 #ifdef DP_RATETABLE_SUPPORT
37 #include "dp_ratetable.h"
38 #endif
39 #include <cdp_txrx_handle.h>
40 #include <wlan_cfg.h>
41 #include <wlan_utility.h>
42 #include "cdp_txrx_cmn_struct.h"
43 #include "cdp_txrx_stats_struct.h"
44 #include "cdp_txrx_cmn_reg.h"
45 #include <qdf_util.h>
46 #include "dp_peer.h"
47 #include "htt_stats.h"
48 #include "dp_htt.h"
49 #ifdef WLAN_SUPPORT_RX_FISA
50 #include <dp_fisa_rx.h>
51 #endif
52 #include "htt_ppdu_stats.h"
53 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
54 #include "cfg_ucfg_api.h"
55 #include <wlan_module_ids.h>
56 
57 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
58 #include "cdp_txrx_flow_ctrl_v2.h"
59 #else
60 
61 static inline void
62 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
63 {
64 	return;
65 }
66 #endif
67 #ifdef WIFI_MONITOR_SUPPORT
68 #include <dp_mon.h>
69 #endif
70 #include "dp_ipa.h"
71 #ifdef FEATURE_WDS
72 #include "dp_txrx_wds.h"
73 #endif
74 #ifdef WLAN_SUPPORT_MSCS
75 #include "dp_mscs.h"
76 #endif
77 #ifdef WLAN_SUPPORT_MESH_LATENCY
78 #include "dp_mesh_latency.h"
79 #endif
80 #ifdef ATH_SUPPORT_IQUE
81 #include "dp_txrx_me.h"
82 #endif
83 #if defined(DP_CON_MON)
84 #ifndef REMOVE_PKT_LOG
85 #include <pktlog_ac_api.h>
86 #include <pktlog_ac.h>
87 #endif
88 #endif
89 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
90 #include <dp_swlm.h>
91 #endif
92 #ifdef CONFIG_SAWF_DEF_QUEUES
93 #include "dp_sawf.h"
94 #endif
95 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
96 #include <target_if_dp.h>
97 #endif
98 
99 #ifdef WLAN_FEATURE_STATS_EXT
100 #define INIT_RX_HW_STATS_LOCK(_soc) \
101 	qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
102 #define DEINIT_RX_HW_STATS_LOCK(_soc) \
103 	qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
104 #else
105 #define INIT_RX_HW_STATS_LOCK(_soc)  /* no op */
106 #define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
107 #endif
108 
109 #if defined(DP_PEER_EXTENDED_API) || defined(WLAN_DP_PENDING_MEM_FLUSH)
110 #define SET_PEER_REF_CNT_ONE(_peer) \
111 	qdf_atomic_set(&(_peer)->ref_cnt, 1)
112 #else
113 #define SET_PEER_REF_CNT_ONE(_peer)
114 #endif
115 
116 #ifdef WLAN_SYSFS_DP_STATS
117 /* sysfs event wait time for firmware stat request unit millseconds */
118 #define WLAN_SYSFS_STAT_REQ_WAIT_MS 3000
119 #endif
120 
121 #ifdef QCA_DP_ENABLE_TX_COMP_RING4
122 #define TXCOMP_RING4_NUM 3
123 #else
124 #define TXCOMP_RING4_NUM WBM2SW_TXCOMP_RING4_NUM
125 #endif
126 
127 #ifdef QCA_DP_TX_FW_METADATA_V2
128 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) \
129 		HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
130 #else
131 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) \
132 		HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
133 #endif
134 
135 QDF_COMPILE_TIME_ASSERT(max_rx_rings_check,
136 			MAX_REO_DEST_RINGS == CDP_MAX_RX_RINGS);
137 
138 QDF_COMPILE_TIME_ASSERT(max_tx_rings_check,
139 			MAX_TCL_DATA_RINGS == CDP_MAX_TX_COMP_RINGS);
140 
141 #define dp_init_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_INIT, params)
142 #define dp_init_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_INIT, params)
143 #define dp_init_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_INIT, params)
144 #define dp_init_info(params...) \
145 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_INIT, ## params)
146 #define dp_init_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_INIT, params)
147 
148 #define dp_vdev_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_VDEV, params)
149 #define dp_vdev_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_VDEV, params)
150 #define dp_vdev_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_VDEV, params)
151 #define dp_vdev_info(params...) \
152 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_VDEV, ## params)
153 #define dp_vdev_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_VDEV, params)
154 
155 void dp_configure_arch_ops(struct dp_soc *soc);
156 qdf_size_t dp_get_soc_context_size(uint16_t device_id);
157 
158 /*
159  * The max size of cdp_peer_stats_param_t is limited to 16 bytes.
160  * If the buffer size is exceeding this size limit,
161  * dp_txrx_get_peer_stats is to be used instead.
162  */
163 QDF_COMPILE_TIME_ASSERT(cdp_peer_stats_param_t_max_size,
164 			(sizeof(cdp_peer_stats_param_t) <= 16));
165 
166 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
167 /*
168  * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS
169  * also should be updated accordingly
170  */
171 QDF_COMPILE_TIME_ASSERT(num_intr_grps,
172 			HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS);
173 
174 /*
175  * HIF_EVENT_HIST_MAX should always be power of 2
176  */
177 QDF_COMPILE_TIME_ASSERT(hif_event_history_size,
178 			(HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0);
179 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
180 
181 /*
182  * If WLAN_CFG_INT_NUM_CONTEXTS is changed,
183  * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated
184  */
185 QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs,
186 			WLAN_CFG_INT_NUM_CONTEXTS_MAX >=
187 			WLAN_CFG_INT_NUM_CONTEXTS);
188 
189 static QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl);
190 static QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl);
191 
192 static void dp_pdev_srng_deinit(struct dp_pdev *pdev);
193 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev);
194 static void dp_pdev_srng_free(struct dp_pdev *pdev);
195 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev);
196 
197 static void dp_soc_srng_deinit(struct dp_soc *soc);
198 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc);
199 static void dp_soc_srng_free(struct dp_soc *soc);
200 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc);
201 
202 static void dp_soc_cfg_init(struct dp_soc *soc);
203 static void dp_soc_cfg_attach(struct dp_soc *soc);
204 
205 static inline
206 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
207 				struct cdp_pdev_attach_params *params);
208 
209 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id);
210 
211 static QDF_STATUS
212 dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
213 		   HTC_HANDLE htc_handle,
214 		   qdf_device_t qdf_osdev,
215 		   uint8_t pdev_id);
216 
217 static QDF_STATUS
218 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, int force);
219 
220 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc);
221 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc);
222 
223 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
224 		  struct hif_opaque_softc *hif_handle);
225 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
226 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc,
227 				       uint8_t pdev_id,
228 				       int force);
229 static struct dp_soc *
230 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
231 	      struct cdp_soc_attach_params *params);
232 static inline QDF_STATUS dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl,
233 					      uint8_t vdev_id,
234 					      uint8_t *peer_mac_addr,
235 					      enum cdp_peer_type peer_type);
236 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
237 				       uint8_t vdev_id,
238 				       uint8_t *peer_mac, uint32_t bitmap);
239 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
240 				bool unmap_only);
241 #ifdef ENABLE_VERBOSE_DEBUG
242 bool is_dp_verbose_debug_enabled;
243 #endif
244 
245 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
246 static bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
247 static void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
248 			   bool enable);
249 static inline void
250 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
251 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats);
252 static inline void
253 dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
254 #endif
255 
256 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
257 						uint8_t index);
258 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index);
259 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index);
260 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
261 						 uint8_t index);
262 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
263 					    enum hal_ring_type ring_type,
264 					    int ring_num);
265 
266 #define DP_INTR_POLL_TIMER_MS	5
267 
268 #define MON_VDEV_TIMER_INIT 0x1
269 #define MON_VDEV_TIMER_RUNNING 0x2
270 
271 #define DP_MCS_LENGTH (6*MAX_MCS)
272 
273 #define DP_CURR_FW_STATS_AVAIL 19
274 #define DP_HTT_DBG_EXT_STATS_MAX 256
275 #define DP_MAX_SLEEP_TIME 100
276 #ifndef QCA_WIFI_3_0_EMU
277 #define SUSPEND_DRAIN_WAIT 500
278 #else
279 #define SUSPEND_DRAIN_WAIT 3000
280 #endif
281 
282 #ifdef IPA_OFFLOAD
283 /* Exclude IPA rings from the interrupt context */
284 #define TX_RING_MASK_VAL	0xb
285 #define RX_RING_MASK_VAL	0x7
286 #else
287 #define TX_RING_MASK_VAL	0xF
288 #define RX_RING_MASK_VAL	0xF
289 #endif
290 
291 #define STR_MAXLEN	64
292 
293 #define RNG_ERR		"SRNG setup failed for"
294 
295 /**
296  * default_dscp_tid_map - Default DSCP-TID mapping
297  *
298  * DSCP        TID
299  * 000000      0
300  * 001000      1
301  * 010000      2
302  * 011000      3
303  * 100000      4
304  * 101000      5
305  * 110000      6
306  * 111000      7
307  */
308 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
309 	0, 0, 0, 0, 0, 0, 0, 0,
310 	1, 1, 1, 1, 1, 1, 1, 1,
311 	2, 2, 2, 2, 2, 2, 2, 2,
312 	3, 3, 3, 3, 3, 3, 3, 3,
313 	4, 4, 4, 4, 4, 4, 4, 4,
314 	5, 5, 5, 5, 5, 5, 5, 5,
315 	6, 6, 6, 6, 6, 6, 6, 6,
316 	7, 7, 7, 7, 7, 7, 7, 7,
317 };
318 
319 /**
320  * default_pcp_tid_map - Default PCP-TID mapping
321  *
322  * PCP     TID
323  * 000      0
324  * 001      1
325  * 010      2
326  * 011      3
327  * 100      4
328  * 101      5
329  * 110      6
330  * 111      7
331  */
332 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
333 	0, 1, 2, 3, 4, 5, 6, 7,
334 };
335 
336 /**
337  * @brief Cpu to tx ring map
338  */
339 uint8_t
340 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
341 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
342 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
343 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
344 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
345 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
346 #ifdef WLAN_TX_PKT_CAPTURE_ENH
347 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
348 #endif
349 };
350 
351 qdf_export_symbol(dp_cpu_ring_map);
352 
353 /**
354  * @brief Select the type of statistics
355  */
356 enum dp_stats_type {
357 	STATS_FW = 0,
358 	STATS_HOST = 1,
359 	STATS_TYPE_MAX = 2,
360 };
361 
362 /**
363  * @brief General Firmware statistics options
364  *
365  */
366 enum dp_fw_stats {
367 	TXRX_FW_STATS_INVALID	= -1,
368 };
369 
370 /**
371  * dp_stats_mapping_table - Firmware and Host statistics
372  * currently supported
373  */
374 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
375 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
376 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
377 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
378 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
379 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
380 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
381 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
382 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
383 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
384 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
385 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
386 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
387 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
388 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
389 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
390 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
391 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
392 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
393 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
394 	/* Last ENUM for HTT FW STATS */
395 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
396 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
397 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
398 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
399 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
400 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
401 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
402 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
403 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
404 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
405 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
406 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
407 	{TXRX_FW_STATS_INVALID, TXRX_NAPI_STATS},
408 	{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
409 	{TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS},
410 	{TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS},
411 	{TXRX_FW_STATS_INVALID, TXRX_SOC_REO_HW_DESC_DUMP},
412 	{TXRX_FW_STATS_INVALID, TXRX_SOC_WBM_IDLE_HPTP_DUMP},
413 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_USAGE_WM_STATS},
414 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, TXRX_HOST_STATS_INVALID},
415 	{HTT_DBG_EXT_STATS_TX_SOUNDING_INFO, TXRX_HOST_STATS_INVALID}
416 };
417 
418 /* MCL specific functions */
419 #if defined(DP_CON_MON)
420 
421 #ifdef DP_CON_MON_MSI_ENABLED
422 /**
423  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
424  * @soc: pointer to dp_soc handle
425  * @intr_ctx_num: interrupt context number for which mon mask is needed
426  *
427  * For MCL, monitor mode rings are being processed in timer contexts (polled).
428  * This function is returning 0, since in interrupt mode(softirq based RX),
429  * we donot want to process monitor mode rings in a softirq.
430  *
431  * So, in case packet log is enabled for SAP/STA/P2P modes,
432  * regular interrupt processing will not process monitor mode rings. It would be
433  * done in a separate timer context.
434  *
435  * Return: 0
436  */
437 static inline uint32_t
438 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
439 {
440 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
441 }
442 #else
443 /**
444  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
445  * @soc: pointer to dp_soc handle
446  * @intr_ctx_num: interrupt context number for which mon mask is needed
447  *
448  * For MCL, monitor mode rings are being processed in timer contexts (polled).
449  * This function is returning 0, since in interrupt mode(softirq based RX),
450  * we donot want to process monitor mode rings in a softirq.
451  *
452  * So, in case packet log is enabled for SAP/STA/P2P modes,
453  * regular interrupt processing will not process monitor mode rings. It would be
454  * done in a separate timer context.
455  *
456  * Return: 0
457  */
458 static inline uint32_t
459 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
460 {
461 	return 0;
462 }
463 #endif
464 
465 #ifdef IPA_OFFLOAD
466 /**
467  * dp_get_num_rx_contexts() - get number of RX contexts
468  * @soc_hdl: cdp opaque soc handle
469  *
470  * Return: number of RX contexts
471  */
472 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
473 {
474 	int num_rx_contexts;
475 	uint32_t reo_ring_map;
476 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
477 
478 	reo_ring_map = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
479 
480 	switch (soc->arch_id) {
481 	case CDP_ARCH_TYPE_BE:
482 		/* 2 REO rings are used for IPA */
483 		reo_ring_map &=  ~(BIT(3) | BIT(7));
484 
485 		break;
486 	case CDP_ARCH_TYPE_LI:
487 		/* 1 REO ring is used for IPA */
488 		reo_ring_map &=  ~BIT(3);
489 		break;
490 	default:
491 		dp_err("unkonwn arch_id 0x%x", soc->arch_id);
492 		QDF_BUG(0);
493 	}
494 	/*
495 	 * qdf_get_hweight32 prefer over qdf_get_hweight8 in case map is scaled
496 	 * in future
497 	 */
498 	num_rx_contexts = qdf_get_hweight32(reo_ring_map);
499 
500 	return num_rx_contexts;
501 }
502 #else
503 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
504 {
505 	int num_rx_contexts;
506 	uint32_t reo_config;
507 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
508 
509 	reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
510 	/*
511 	 * qdf_get_hweight32 prefer over qdf_get_hweight8 in case map is scaled
512 	 * in future
513 	 */
514 	num_rx_contexts = qdf_get_hweight32(reo_config);
515 
516 	return num_rx_contexts;
517 }
518 #endif
519 
520 #else
521 
522 /**
523  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
524  * @soc: pointer to dp_soc handle
525  * @intr_ctx_num: interrupt context number for which mon mask is needed
526  *
527  * Return: mon mask value
528  */
529 static inline
530 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
531 {
532 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
533 }
534 
535 /**
536  * dp_soc_reset_mon_intr_mask() - reset mon intr mask
537  * @soc: pointer to dp_soc handle
538  *
539  * Return:
540  */
541 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc)
542 {
543 	int i;
544 
545 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
546 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
547 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
548 	}
549 }
550 
551 qdf_export_symbol(dp_soc_reset_mon_intr_mask);
552 
553 /*
554  * dp_service_lmac_rings()- timer to reap lmac rings
555  * @arg: SoC Handle
556  *
557  * Return:
558  *
559  */
560 static void dp_service_lmac_rings(void *arg)
561 {
562 	struct dp_soc *soc = (struct dp_soc *)arg;
563 	int ring = 0, i;
564 	struct dp_pdev *pdev = NULL;
565 	union dp_rx_desc_list_elem_t *desc_list = NULL;
566 	union dp_rx_desc_list_elem_t *tail = NULL;
567 
568 	/* Process LMAC interrupts */
569 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
570 		int mac_for_pdev = ring;
571 		struct dp_srng *rx_refill_buf_ring;
572 
573 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
574 		if (!pdev)
575 			continue;
576 
577 		rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev];
578 
579 		dp_monitor_process(soc, NULL, mac_for_pdev,
580 				   QCA_NAPI_BUDGET);
581 
582 		for (i = 0;
583 		     i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
584 			dp_rxdma_err_process(&soc->intr_ctx[i], soc,
585 					     mac_for_pdev,
586 					     QCA_NAPI_BUDGET);
587 
588 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF,
589 						  mac_for_pdev))
590 			dp_rx_buffers_replenish(soc, mac_for_pdev,
591 						rx_refill_buf_ring,
592 						&soc->rx_desc_buf[mac_for_pdev],
593 						0, &desc_list, &tail);
594 	}
595 
596 	qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
597 }
598 
599 #endif
600 
601 #ifdef FEATURE_MEC
602 void dp_peer_mec_flush_entries(struct dp_soc *soc)
603 {
604 	unsigned int index;
605 	struct dp_mec_entry *mecentry, *mecentry_next;
606 
607 	TAILQ_HEAD(, dp_mec_entry) free_list;
608 	TAILQ_INIT(&free_list);
609 
610 	if (!soc->mec_hash.mask)
611 		return;
612 
613 	if (!soc->mec_hash.bins)
614 		return;
615 
616 	if (!qdf_atomic_read(&soc->mec_cnt))
617 		return;
618 
619 	qdf_spin_lock_bh(&soc->mec_lock);
620 	for (index = 0; index <= soc->mec_hash.mask; index++) {
621 		if (!TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
622 			TAILQ_FOREACH_SAFE(mecentry, &soc->mec_hash.bins[index],
623 					   hash_list_elem, mecentry_next) {
624 			    dp_peer_mec_detach_entry(soc, mecentry, &free_list);
625 			}
626 		}
627 	}
628 	qdf_spin_unlock_bh(&soc->mec_lock);
629 
630 	dp_peer_mec_free_list(soc, &free_list);
631 }
632 
633 /**
634  * dp_print_mec_entries() - Dump MEC entries in table
635  * @soc: Datapath soc handle
636  *
637  * Return: none
638  */
639 static void dp_print_mec_stats(struct dp_soc *soc)
640 {
641 	int i;
642 	uint32_t index;
643 	struct dp_mec_entry *mecentry = NULL, *mec_list;
644 	uint32_t num_entries = 0;
645 
646 	DP_PRINT_STATS("MEC Stats:");
647 	DP_PRINT_STATS("   Entries Added   = %d", soc->stats.mec.added);
648 	DP_PRINT_STATS("   Entries Deleted = %d", soc->stats.mec.deleted);
649 
650 	if (!qdf_atomic_read(&soc->mec_cnt))
651 		return;
652 
653 	mec_list = qdf_mem_malloc(sizeof(*mecentry) * DP_PEER_MAX_MEC_ENTRY);
654 	if (!mec_list) {
655 		dp_peer_warn("%pK: failed to allocate mec_list", soc);
656 		return;
657 	}
658 
659 	DP_PRINT_STATS("MEC Table:");
660 	for (index = 0; index <= soc->mec_hash.mask; index++) {
661 		qdf_spin_lock_bh(&soc->mec_lock);
662 		if (TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
663 			qdf_spin_unlock_bh(&soc->mec_lock);
664 			continue;
665 		}
666 
667 		TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index],
668 			      hash_list_elem) {
669 			qdf_mem_copy(&mec_list[num_entries], mecentry,
670 				     sizeof(*mecentry));
671 			num_entries++;
672 		}
673 		qdf_spin_unlock_bh(&soc->mec_lock);
674 	}
675 
676 	if (!num_entries) {
677 		qdf_mem_free(mec_list);
678 		return;
679 	}
680 
681 	for (i = 0; i < num_entries; i++) {
682 		DP_PRINT_STATS("%6d mac_addr = " QDF_MAC_ADDR_FMT
683 			       " is_active = %d pdev_id = %d vdev_id = %d",
684 			       i,
685 			       QDF_MAC_ADDR_REF(mec_list[i].mac_addr.raw),
686 			       mec_list[i].is_active,
687 			       mec_list[i].pdev_id,
688 			       mec_list[i].vdev_id);
689 	}
690 	qdf_mem_free(mec_list);
691 }
692 #else
693 static void dp_print_mec_stats(struct dp_soc *soc)
694 {
695 }
696 #endif
697 
698 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
699 				 uint8_t vdev_id,
700 				 uint8_t *peer_mac,
701 				 uint8_t *mac_addr,
702 				 enum cdp_txrx_ast_entry_type type,
703 				 uint32_t flags)
704 {
705 	int ret = -1;
706 	QDF_STATUS status = QDF_STATUS_SUCCESS;
707 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
708 						       peer_mac, 0, vdev_id,
709 						       DP_MOD_ID_CDP);
710 
711 	if (!peer) {
712 		dp_peer_debug("Peer is NULL!");
713 		return ret;
714 	}
715 
716 	status = dp_peer_add_ast((struct dp_soc *)soc_hdl,
717 				 peer,
718 				 mac_addr,
719 				 type,
720 				 flags);
721 	if ((status == QDF_STATUS_SUCCESS) ||
722 	    (status == QDF_STATUS_E_ALREADY) ||
723 	    (status == QDF_STATUS_E_AGAIN))
724 		ret = 0;
725 
726 	dp_hmwds_ast_add_notify(peer, mac_addr,
727 				type, status, false);
728 
729 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
730 
731 	return ret;
732 }
733 
734 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
735 						uint8_t vdev_id,
736 						uint8_t *peer_mac,
737 						uint8_t *wds_macaddr,
738 						uint32_t flags)
739 {
740 	int status = -1;
741 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
742 	struct dp_ast_entry  *ast_entry = NULL;
743 	struct dp_peer *peer;
744 
745 	if (soc->ast_offload_support)
746 		return status;
747 
748 	peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
749 				      peer_mac, 0, vdev_id,
750 				      DP_MOD_ID_CDP);
751 
752 	if (!peer) {
753 		dp_peer_debug("Peer is NULL!");
754 		return status;
755 	}
756 
757 	qdf_spin_lock_bh(&soc->ast_lock);
758 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
759 						    peer->vdev->pdev->pdev_id);
760 
761 	if (ast_entry) {
762 		status = dp_peer_update_ast(soc,
763 					    peer,
764 					    ast_entry, flags);
765 	}
766 	qdf_spin_unlock_bh(&soc->ast_lock);
767 
768 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
769 
770 	return status;
771 }
772 
773 /*
774  * dp_peer_reset_ast_entries() - Deletes all HMWDS entries for a peer
775  * @soc_handle:		Datapath SOC handle
776  * @peer:		DP peer
777  * @arg:		callback argument
778  *
779  * Return: None
780  */
781 static void
782 dp_peer_reset_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
783 {
784 	struct dp_ast_entry *ast_entry = NULL;
785 	struct dp_ast_entry *tmp_ast_entry;
786 
787 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
788 		if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
789 		    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
790 			dp_peer_del_ast(soc, ast_entry);
791 	}
792 }
793 
794 /*
795  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
796  * @soc_handle:		Datapath SOC handle
797  * @wds_macaddr:	WDS entry MAC Address
798  * @peer_macaddr:	WDS entry MAC Address
799  * @vdev_id:		id of vdev handle
800  * Return: QDF_STATUS
801  */
802 static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
803 					 uint8_t *wds_macaddr,
804 					 uint8_t *peer_mac_addr,
805 					 uint8_t vdev_id)
806 {
807 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
808 	struct dp_ast_entry *ast_entry = NULL;
809 	struct dp_peer *peer;
810 	struct dp_pdev *pdev;
811 	struct dp_vdev *vdev;
812 
813 	if (soc->ast_offload_support)
814 		return QDF_STATUS_E_FAILURE;
815 
816 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
817 
818 	if (!vdev)
819 		return QDF_STATUS_E_FAILURE;
820 
821 	pdev = vdev->pdev;
822 
823 	if (peer_mac_addr) {
824 		peer = dp_peer_find_hash_find(soc, peer_mac_addr,
825 					      0, vdev->vdev_id,
826 					      DP_MOD_ID_CDP);
827 		if (!peer) {
828 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
829 			return QDF_STATUS_E_FAILURE;
830 		}
831 
832 		qdf_spin_lock_bh(&soc->ast_lock);
833 		dp_peer_reset_ast_entries(soc, peer, NULL);
834 		qdf_spin_unlock_bh(&soc->ast_lock);
835 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
836 	} else if (wds_macaddr) {
837 		qdf_spin_lock_bh(&soc->ast_lock);
838 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
839 							    pdev->pdev_id);
840 
841 		if (ast_entry) {
842 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
843 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
844 				dp_peer_del_ast(soc, ast_entry);
845 		}
846 		qdf_spin_unlock_bh(&soc->ast_lock);
847 	}
848 
849 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
850 	return QDF_STATUS_SUCCESS;
851 }
852 
853 /*
854  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
855  * @soc:		Datapath SOC handle
856  * @vdev_id:		id of vdev object
857  *
858  * Return: QDF_STATUS
859  */
860 static QDF_STATUS
861 dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
862 			     uint8_t vdev_id)
863 {
864 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
865 
866 	if (soc->ast_offload_support)
867 		return QDF_STATUS_SUCCESS;
868 
869 	qdf_spin_lock_bh(&soc->ast_lock);
870 
871 	dp_soc_iterate_peer(soc, dp_peer_reset_ast_entries, NULL,
872 			    DP_MOD_ID_CDP);
873 	qdf_spin_unlock_bh(&soc->ast_lock);
874 
875 	return QDF_STATUS_SUCCESS;
876 }
877 
878 /*
879  * dp_peer_flush_ast_entries() - Delete all wds and hmwds ast entries of a peer
880  * @soc:		Datapath SOC
881  * @peer:		Datapath peer
882  * @arg:		arg to callback
883  *
884  * Return: None
885  */
886 static void
887 dp_peer_flush_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
888 {
889 	struct dp_ast_entry *ase = NULL;
890 	struct dp_ast_entry *temp_ase;
891 
892 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
893 		if ((ase->type ==
894 			CDP_TXRX_AST_TYPE_STATIC) ||
895 			(ase->type ==
896 			 CDP_TXRX_AST_TYPE_SELF) ||
897 			(ase->type ==
898 			 CDP_TXRX_AST_TYPE_STA_BSS))
899 			continue;
900 		dp_peer_del_ast(soc, ase);
901 	}
902 }
903 
904 /*
905  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
906  * @soc:		Datapath SOC handle
907  *
908  * Return: None
909  */
910 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
911 {
912 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
913 
914 	qdf_spin_lock_bh(&soc->ast_lock);
915 
916 	dp_soc_iterate_peer(soc, dp_peer_flush_ast_entries, NULL,
917 			    DP_MOD_ID_CDP);
918 
919 	qdf_spin_unlock_bh(&soc->ast_lock);
920 	dp_peer_mec_flush_entries(soc);
921 }
922 
923 /**
924  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
925  *                                       and return ast entry information
926  *                                       of first ast entry found in the
927  *                                       table with given mac address
928  *
929  * @soc : data path soc handle
930  * @ast_mac_addr : AST entry mac address
931  * @ast_entry_info : ast entry information
932  *
933  * return : true if ast entry found with ast_mac_addr
934  *          false if ast entry not found
935  */
936 static bool dp_peer_get_ast_info_by_soc_wifi3
937 	(struct cdp_soc_t *soc_hdl,
938 	 uint8_t *ast_mac_addr,
939 	 struct cdp_ast_entry_info *ast_entry_info)
940 {
941 	struct dp_ast_entry *ast_entry = NULL;
942 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
943 	struct dp_peer *peer = NULL;
944 
945 	if (soc->ast_offload_support)
946 		return false;
947 
948 	qdf_spin_lock_bh(&soc->ast_lock);
949 
950 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
951 	if ((!ast_entry) ||
952 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
953 		qdf_spin_unlock_bh(&soc->ast_lock);
954 		return false;
955 	}
956 
957 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
958 				     DP_MOD_ID_AST);
959 	if (!peer) {
960 		qdf_spin_unlock_bh(&soc->ast_lock);
961 		return false;
962 	}
963 
964 	ast_entry_info->type = ast_entry->type;
965 	ast_entry_info->pdev_id = ast_entry->pdev_id;
966 	ast_entry_info->vdev_id = ast_entry->vdev_id;
967 	ast_entry_info->peer_id = ast_entry->peer_id;
968 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
969 		     &peer->mac_addr.raw[0],
970 		     QDF_MAC_ADDR_SIZE);
971 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
972 	qdf_spin_unlock_bh(&soc->ast_lock);
973 	return true;
974 }
975 
976 /**
977  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
978  *                                          and return ast entry information
979  *                                          if mac address and pdev_id matches
980  *
981  * @soc : data path soc handle
982  * @ast_mac_addr : AST entry mac address
983  * @pdev_id : pdev_id
984  * @ast_entry_info : ast entry information
985  *
986  * return : true if ast entry found with ast_mac_addr
987  *          false if ast entry not found
988  */
989 static bool dp_peer_get_ast_info_by_pdevid_wifi3
990 		(struct cdp_soc_t *soc_hdl,
991 		 uint8_t *ast_mac_addr,
992 		 uint8_t pdev_id,
993 		 struct cdp_ast_entry_info *ast_entry_info)
994 {
995 	struct dp_ast_entry *ast_entry;
996 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
997 	struct dp_peer *peer = NULL;
998 
999 	if (soc->ast_offload_support)
1000 		return false;
1001 
1002 	qdf_spin_lock_bh(&soc->ast_lock);
1003 
1004 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr,
1005 						    pdev_id);
1006 
1007 	if ((!ast_entry) ||
1008 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
1009 		qdf_spin_unlock_bh(&soc->ast_lock);
1010 		return false;
1011 	}
1012 
1013 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1014 				     DP_MOD_ID_AST);
1015 	if (!peer) {
1016 		qdf_spin_unlock_bh(&soc->ast_lock);
1017 		return false;
1018 	}
1019 
1020 	ast_entry_info->type = ast_entry->type;
1021 	ast_entry_info->pdev_id = ast_entry->pdev_id;
1022 	ast_entry_info->vdev_id = ast_entry->vdev_id;
1023 	ast_entry_info->peer_id = ast_entry->peer_id;
1024 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
1025 		     &peer->mac_addr.raw[0],
1026 		     QDF_MAC_ADDR_SIZE);
1027 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1028 	qdf_spin_unlock_bh(&soc->ast_lock);
1029 	return true;
1030 }
1031 
1032 /**
1033  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
1034  *                            with given mac address
1035  *
1036  * @soc : data path soc handle
1037  * @ast_mac_addr : AST entry mac address
1038  * @callback : callback function to called on ast delete response from FW
1039  * @cookie : argument to be passed to callback
1040  *
1041  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
1042  *          is sent
1043  *          QDF_STATUS_E_INVAL false if ast entry not found
1044  */
1045 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
1046 					       uint8_t *mac_addr,
1047 					       txrx_ast_free_cb callback,
1048 					       void *cookie)
1049 
1050 {
1051 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1052 	struct dp_ast_entry *ast_entry = NULL;
1053 	txrx_ast_free_cb cb = NULL;
1054 	void *arg = NULL;
1055 
1056 	if (soc->ast_offload_support)
1057 		return -QDF_STATUS_E_INVAL;
1058 
1059 	qdf_spin_lock_bh(&soc->ast_lock);
1060 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1061 	if (!ast_entry) {
1062 		qdf_spin_unlock_bh(&soc->ast_lock);
1063 		return -QDF_STATUS_E_INVAL;
1064 	}
1065 
1066 	if (ast_entry->callback) {
1067 		cb = ast_entry->callback;
1068 		arg = ast_entry->cookie;
1069 	}
1070 
1071 	ast_entry->callback = callback;
1072 	ast_entry->cookie = cookie;
1073 
1074 	/*
1075 	 * if delete_in_progress is set AST delete is sent to target
1076 	 * and host is waiting for response should not send delete
1077 	 * again
1078 	 */
1079 	if (!ast_entry->delete_in_progress)
1080 		dp_peer_del_ast(soc, ast_entry);
1081 
1082 	qdf_spin_unlock_bh(&soc->ast_lock);
1083 	if (cb) {
1084 		cb(soc->ctrl_psoc,
1085 		   dp_soc_to_cdp_soc(soc),
1086 		   arg,
1087 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1088 	}
1089 	return QDF_STATUS_SUCCESS;
1090 }
1091 
1092 /**
1093  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
1094  *                                   table if mac address and pdev_id matches
1095  *
1096  * @soc : data path soc handle
1097  * @ast_mac_addr : AST entry mac address
1098  * @pdev_id : pdev id
1099  * @callback : callback function to called on ast delete response from FW
1100  * @cookie : argument to be passed to callback
1101  *
1102  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
1103  *          is sent
1104  *          QDF_STATUS_E_INVAL false if ast entry not found
1105  */
1106 
1107 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
1108 						uint8_t *mac_addr,
1109 						uint8_t pdev_id,
1110 						txrx_ast_free_cb callback,
1111 						void *cookie)
1112 
1113 {
1114 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1115 	struct dp_ast_entry *ast_entry;
1116 	txrx_ast_free_cb cb = NULL;
1117 	void *arg = NULL;
1118 
1119 	if (soc->ast_offload_support)
1120 		return -QDF_STATUS_E_INVAL;
1121 
1122 	qdf_spin_lock_bh(&soc->ast_lock);
1123 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
1124 
1125 	if (!ast_entry) {
1126 		qdf_spin_unlock_bh(&soc->ast_lock);
1127 		return -QDF_STATUS_E_INVAL;
1128 	}
1129 
1130 	if (ast_entry->callback) {
1131 		cb = ast_entry->callback;
1132 		arg = ast_entry->cookie;
1133 	}
1134 
1135 	ast_entry->callback = callback;
1136 	ast_entry->cookie = cookie;
1137 
1138 	/*
1139 	 * if delete_in_progress is set AST delete is sent to target
1140 	 * and host is waiting for response should not sent delete
1141 	 * again
1142 	 */
1143 	if (!ast_entry->delete_in_progress)
1144 		dp_peer_del_ast(soc, ast_entry);
1145 
1146 	qdf_spin_unlock_bh(&soc->ast_lock);
1147 
1148 	if (cb) {
1149 		cb(soc->ctrl_psoc,
1150 		   dp_soc_to_cdp_soc(soc),
1151 		   arg,
1152 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1153 	}
1154 	return QDF_STATUS_SUCCESS;
1155 }
1156 
1157 /**
1158  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
1159  * @ring_num: ring num of the ring being queried
1160  * @grp_mask: the grp_mask array for the ring type in question.
1161  *
1162  * The grp_mask array is indexed by group number and the bit fields correspond
1163  * to ring numbers.  We are finding which interrupt group a ring belongs to.
1164  *
1165  * Return: the index in the grp_mask array with the ring number.
1166  * -QDF_STATUS_E_NOENT if no entry is found
1167  */
1168 static int dp_srng_find_ring_in_mask(int ring_num, uint8_t *grp_mask)
1169 {
1170 	int ext_group_num;
1171 	uint8_t mask = 1 << ring_num;
1172 
1173 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
1174 	     ext_group_num++) {
1175 		if (mask & grp_mask[ext_group_num])
1176 			return ext_group_num;
1177 	}
1178 
1179 	return -QDF_STATUS_E_NOENT;
1180 }
1181 
1182 /**
1183  * dp_is_msi_group_number_invalid() - check msi_group_number valid or not
1184  * @msi_group_number: MSI group number.
1185  * @msi_data_count: MSI data count.
1186  *
1187  * Return: true if msi_group_number is invalid.
1188  */
1189 #ifdef WLAN_ONE_MSI_VECTOR
1190 static bool dp_is_msi_group_number_invalid(int msi_group_number,
1191 					   int msi_data_count)
1192 {
1193 	return false;
1194 }
1195 #else
1196 static bool dp_is_msi_group_number_invalid(int msi_group_number,
1197 					   int msi_data_count)
1198 {
1199 	return msi_group_number > msi_data_count;
1200 }
1201 #endif
1202 
1203 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1204 /**
1205  * dp_is_reo_ring_num_in_nf_grp1() - Check if the current reo ring is part of
1206  *				rx_near_full_grp1 mask
1207  * @soc: Datapath SoC Handle
1208  * @ring_num: REO ring number
1209  *
1210  * Return: 1 if the ring_num belongs to reo_nf_grp1,
1211  *	   0, otherwise.
1212  */
1213 static inline int
1214 dp_is_reo_ring_num_in_nf_grp1(struct dp_soc *soc, int ring_num)
1215 {
1216 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_1 & (1 << ring_num));
1217 }
1218 
1219 /**
1220  * dp_is_reo_ring_num_in_nf_grp2() - Check if the current reo ring is part of
1221  *				rx_near_full_grp2 mask
1222  * @soc: Datapath SoC Handle
1223  * @ring_num: REO ring number
1224  *
1225  * Return: 1 if the ring_num belongs to reo_nf_grp2,
1226  *	   0, otherwise.
1227  */
1228 static inline int
1229 dp_is_reo_ring_num_in_nf_grp2(struct dp_soc *soc, int ring_num)
1230 {
1231 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_2 & (1 << ring_num));
1232 }
1233 
1234 /**
1235  * dp_srng_get_near_full_irq_mask() - Get near-full irq mask for a particular
1236  *				ring type and number
1237  * @soc: Datapath SoC handle
1238  * @ring_type: SRNG type
1239  * @ring_num: ring num
1240  *
1241  * Return: near ful irq mask pointer
1242  */
1243 static inline
1244 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
1245 					enum hal_ring_type ring_type,
1246 					int ring_num)
1247 {
1248 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
1249 	uint8_t wbm2_sw_rx_rel_ring_id;
1250 	uint8_t *nf_irq_mask = NULL;
1251 
1252 	switch (ring_type) {
1253 	case WBM2SW_RELEASE:
1254 		wbm2_sw_rx_rel_ring_id =
1255 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
1256 		if (ring_num != wbm2_sw_rx_rel_ring_id) {
1257 			nf_irq_mask = &soc->wlan_cfg_ctx->
1258 					int_tx_ring_near_full_irq_mask[0];
1259 		}
1260 		break;
1261 	case REO_DST:
1262 		if (dp_is_reo_ring_num_in_nf_grp1(soc, ring_num))
1263 			nf_irq_mask =
1264 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_1_mask[0];
1265 		else if (dp_is_reo_ring_num_in_nf_grp2(soc, ring_num))
1266 			nf_irq_mask =
1267 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_2_mask[0];
1268 		else
1269 			qdf_assert(0);
1270 		break;
1271 	default:
1272 		break;
1273 	}
1274 
1275 	return nf_irq_mask;
1276 }
1277 
1278 /**
1279  * dp_srng_set_msi2_ring_params() - Set the msi2 addr/data in the ring params
1280  * @soc: Datapath SoC handle
1281  * @ring_params: srng params handle
1282  * @msi2_addr: MSI2 addr to be set for the SRNG
1283  * @msi2_data: MSI2 data to be set for the SRNG
1284  *
1285  * Return: None
1286  */
1287 static inline
1288 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
1289 				  struct hal_srng_params *ring_params,
1290 				  qdf_dma_addr_t msi2_addr,
1291 				  uint32_t msi2_data)
1292 {
1293 	ring_params->msi2_addr = msi2_addr;
1294 	ring_params->msi2_data = msi2_data;
1295 }
1296 
1297 /**
1298  * dp_srng_msi2_setup() - Setup MSI2 details for near full IRQ of an SRNG
1299  * @soc: Datapath SoC handle
1300  * @ring_params: ring_params for SRNG
1301  * @ring_type: SENG type
1302  * @ring_num: ring number for the SRNG
1303  * @nf_msi_grp_num: near full msi group number
1304  *
1305  * Return: None
1306  */
1307 static inline void
1308 dp_srng_msi2_setup(struct dp_soc *soc,
1309 		   struct hal_srng_params *ring_params,
1310 		   int ring_type, int ring_num, int nf_msi_grp_num)
1311 {
1312 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1313 	int msi_data_count, ret;
1314 
1315 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1316 					  &msi_data_count, &msi_data_start,
1317 					  &msi_irq_start);
1318 	if (ret)
1319 		return;
1320 
1321 	if (nf_msi_grp_num < 0) {
1322 		dp_init_info("%pK: ring near full IRQ not part of an ext_group; ring_type: %d,ring_num %d",
1323 			     soc, ring_type, ring_num);
1324 		ring_params->msi2_addr = 0;
1325 		ring_params->msi2_data = 0;
1326 		return;
1327 	}
1328 
1329 	if (dp_is_msi_group_number_invalid(nf_msi_grp_num, msi_data_count)) {
1330 		dp_init_warn("%pK: 2 msi_groups will share an msi for near full IRQ; msi_group_num %d",
1331 			     soc, nf_msi_grp_num);
1332 		QDF_ASSERT(0);
1333 	}
1334 
1335 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1336 
1337 	ring_params->nf_irq_support = 1;
1338 	ring_params->msi2_addr = addr_low;
1339 	ring_params->msi2_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1340 	ring_params->msi2_data = (nf_msi_grp_num % msi_data_count)
1341 		+ msi_data_start;
1342 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1343 }
1344 
1345 /* Percentage of ring entries considered as nearly full */
1346 #define DP_NF_HIGH_THRESH_PERCENTAGE	75
1347 /* Percentage of ring entries considered as critically full */
1348 #define DP_NF_CRIT_THRESH_PERCENTAGE	90
1349 /* Percentage of ring entries considered as safe threshold */
1350 #define DP_NF_SAFE_THRESH_PERCENTAGE	50
1351 
1352 /**
1353  * dp_srng_configure_nf_interrupt_thresholds() - Configure the thresholds for
1354  *			near full irq
1355  * @soc: Datapath SoC handle
1356  * @ring_params: ring params for SRNG
1357  * @ring_type: ring type
1358  */
1359 static inline void
1360 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
1361 					  struct hal_srng_params *ring_params,
1362 					  int ring_type)
1363 {
1364 	if (ring_params->nf_irq_support) {
1365 		ring_params->high_thresh = (ring_params->num_entries *
1366 					    DP_NF_HIGH_THRESH_PERCENTAGE) / 100;
1367 		ring_params->crit_thresh = (ring_params->num_entries *
1368 					    DP_NF_CRIT_THRESH_PERCENTAGE) / 100;
1369 		ring_params->safe_thresh = (ring_params->num_entries *
1370 					    DP_NF_SAFE_THRESH_PERCENTAGE) /100;
1371 	}
1372 }
1373 
1374 /**
1375  * dp_srng_set_nf_thresholds() - Set the near full thresholds to srng data
1376  *			structure from the ring params
1377  * @soc: Datapath SoC handle
1378  * @srng: SRNG handle
1379  * @ring_params: ring params for a SRNG
1380  *
1381  * Return: None
1382  */
1383 static inline void
1384 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
1385 			  struct hal_srng_params *ring_params)
1386 {
1387 	srng->crit_thresh = ring_params->crit_thresh;
1388 	srng->safe_thresh = ring_params->safe_thresh;
1389 }
1390 
1391 #else
1392 static inline
1393 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
1394 					enum hal_ring_type ring_type,
1395 					int ring_num)
1396 {
1397 	return NULL;
1398 }
1399 
1400 static inline
1401 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
1402 				  struct hal_srng_params *ring_params,
1403 				  qdf_dma_addr_t msi2_addr,
1404 				  uint32_t msi2_data)
1405 {
1406 }
1407 
1408 static inline void
1409 dp_srng_msi2_setup(struct dp_soc *soc,
1410 		   struct hal_srng_params *ring_params,
1411 		   int ring_type, int ring_num, int nf_msi_grp_num)
1412 {
1413 }
1414 
1415 static inline void
1416 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
1417 					  struct hal_srng_params *ring_params,
1418 					  int ring_type)
1419 {
1420 }
1421 
1422 static inline void
1423 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
1424 			  struct hal_srng_params *ring_params)
1425 {
1426 }
1427 #endif
1428 
1429 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
1430 				       enum hal_ring_type ring_type,
1431 				       int ring_num,
1432 				       int *reg_msi_grp_num,
1433 				       bool nf_irq_support,
1434 				       int *nf_msi_grp_num)
1435 {
1436 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
1437 	uint8_t *grp_mask, *nf_irq_mask = NULL;
1438 	bool nf_irq_enabled = false;
1439 	uint8_t wbm2_sw_rx_rel_ring_id;
1440 
1441 	switch (ring_type) {
1442 	case WBM2SW_RELEASE:
1443 		wbm2_sw_rx_rel_ring_id =
1444 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
1445 		if (ring_num == wbm2_sw_rx_rel_ring_id) {
1446 			/* dp_rx_wbm_err_process - soc->rx_rel_ring */
1447 			grp_mask = &cfg_ctx->int_rx_wbm_rel_ring_mask[0];
1448 			ring_num = 0;
1449 		} else { /* dp_tx_comp_handler - soc->tx_comp_ring */
1450 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1451 			nf_irq_mask = dp_srng_get_near_full_irq_mask(soc,
1452 								     ring_type,
1453 								     ring_num);
1454 			if (nf_irq_mask)
1455 				nf_irq_enabled = true;
1456 
1457 			/*
1458 			 * Using ring 4 as 4th tx completion ring since ring 3
1459 			 * is Rx error ring
1460 			 */
1461 			if (ring_num == WBM2SW_TXCOMP_RING4_NUM)
1462 				ring_num = TXCOMP_RING4_NUM;
1463 		}
1464 	break;
1465 
1466 	case REO_EXCEPTION:
1467 		/* dp_rx_err_process - &soc->reo_exception_ring */
1468 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
1469 	break;
1470 
1471 	case REO_DST:
1472 		/* dp_rx_process - soc->reo_dest_ring */
1473 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1474 		nf_irq_mask = dp_srng_get_near_full_irq_mask(soc, ring_type,
1475 							     ring_num);
1476 		if (nf_irq_mask)
1477 			nf_irq_enabled = true;
1478 	break;
1479 
1480 	case REO_STATUS:
1481 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
1482 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
1483 	break;
1484 
1485 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
1486 	case RXDMA_MONITOR_STATUS:
1487 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
1488 	case RXDMA_MONITOR_DST:
1489 		/* dp_mon_process */
1490 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
1491 	break;
1492 	case TX_MONITOR_DST:
1493 		/* dp_tx_mon_process */
1494 		grp_mask = &soc->wlan_cfg_ctx->int_tx_mon_ring_mask[0];
1495 	break;
1496 	case RXDMA_DST:
1497 		/* dp_rxdma_err_process */
1498 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
1499 	break;
1500 
1501 	case RXDMA_BUF:
1502 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1503 	break;
1504 
1505 	case RXDMA_MONITOR_BUF:
1506 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
1507 	break;
1508 
1509 	case TX_MONITOR_BUF:
1510 		grp_mask = &soc->wlan_cfg_ctx->int_host2txmon_ring_mask[0];
1511 	break;
1512 
1513 	case TCL_DATA:
1514 	/* CMD_CREDIT_RING is used as command in 8074 and credit in 9000 */
1515 	case TCL_CMD_CREDIT:
1516 	case REO_CMD:
1517 	case SW2WBM_RELEASE:
1518 	case WBM_IDLE_LINK:
1519 		/* normally empty SW_TO_HW rings */
1520 		return -QDF_STATUS_E_NOENT;
1521 	break;
1522 
1523 	case TCL_STATUS:
1524 	case REO_REINJECT:
1525 		/* misc unused rings */
1526 		return -QDF_STATUS_E_NOENT;
1527 	break;
1528 
1529 	case CE_SRC:
1530 	case CE_DST:
1531 	case CE_DST_STATUS:
1532 		/* CE_rings - currently handled by hif */
1533 	default:
1534 		return -QDF_STATUS_E_NOENT;
1535 	break;
1536 	}
1537 
1538 	*reg_msi_grp_num = dp_srng_find_ring_in_mask(ring_num, grp_mask);
1539 
1540 	if (nf_irq_support && nf_irq_enabled) {
1541 		*nf_msi_grp_num = dp_srng_find_ring_in_mask(ring_num,
1542 							    nf_irq_mask);
1543 	}
1544 
1545 	return QDF_STATUS_SUCCESS;
1546 }
1547 
1548 /*
1549  * dp_get_num_msi_available()- API to get number of MSIs available
1550  * @dp_soc: DP soc Handle
1551  * @interrupt_mode: Mode of interrupts
1552  *
1553  * Return: Number of MSIs available or 0 in case of integrated
1554  */
1555 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
1556 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1557 {
1558 	return 0;
1559 }
1560 #else
1561 /*
1562  * dp_get_num_msi_available()- API to get number of MSIs available
1563  * @dp_soc: DP soc Handle
1564  * @interrupt_mode: Mode of interrupts
1565  *
1566  * Return: Number of MSIs available or 0 in case of integrated
1567  */
1568 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1569 {
1570 	int msi_data_count;
1571 	int msi_data_start;
1572 	int msi_irq_start;
1573 	int ret;
1574 
1575 	if (interrupt_mode == DP_INTR_INTEGRATED) {
1576 		return 0;
1577 	} else if (interrupt_mode == DP_INTR_MSI || interrupt_mode ==
1578 		   DP_INTR_POLL) {
1579 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1580 						  &msi_data_count,
1581 						  &msi_data_start,
1582 						  &msi_irq_start);
1583 		if (ret) {
1584 			qdf_err("Unable to get DP MSI assignment %d",
1585 				interrupt_mode);
1586 			return -EINVAL;
1587 		}
1588 		return msi_data_count;
1589 	}
1590 	qdf_err("Interrupt mode invalid %d", interrupt_mode);
1591 	return -EINVAL;
1592 }
1593 #endif
1594 
1595 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
1596 			      *ring_params, int ring_type, int ring_num)
1597 {
1598 	int reg_msi_grp_num;
1599 	/*
1600 	 * nf_msi_grp_num needs to be initialized with negative value,
1601 	 * to avoid configuring near-full msi for WBM2SW3 ring
1602 	 */
1603 	int nf_msi_grp_num = -1;
1604 	int msi_data_count;
1605 	int ret;
1606 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1607 	bool nf_irq_support;
1608 
1609 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1610 					    &msi_data_count, &msi_data_start,
1611 					    &msi_irq_start);
1612 
1613 	if (ret)
1614 		return;
1615 
1616 	nf_irq_support = hal_srng_is_near_full_irq_supported(soc->hal_soc,
1617 							     ring_type,
1618 							     ring_num);
1619 	ret = dp_srng_calculate_msi_group(soc, ring_type, ring_num,
1620 					  &reg_msi_grp_num,
1621 					  nf_irq_support,
1622 					  &nf_msi_grp_num);
1623 	if (ret < 0) {
1624 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1625 			     soc, ring_type, ring_num);
1626 		ring_params->msi_addr = 0;
1627 		ring_params->msi_data = 0;
1628 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
1629 		return;
1630 	}
1631 
1632 	if (reg_msi_grp_num < 0) {
1633 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1634 			     soc, ring_type, ring_num);
1635 		ring_params->msi_addr = 0;
1636 		ring_params->msi_data = 0;
1637 		goto configure_msi2;
1638 	}
1639 
1640 	if (dp_is_msi_group_number_invalid(reg_msi_grp_num, msi_data_count)) {
1641 		dp_init_warn("%pK: 2 msi_groups will share an msi; msi_group_num %d",
1642 			     soc, reg_msi_grp_num);
1643 		QDF_ASSERT(0);
1644 	}
1645 
1646 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1647 
1648 	ring_params->msi_addr = addr_low;
1649 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1650 	ring_params->msi_data = (reg_msi_grp_num % msi_data_count)
1651 		+ msi_data_start;
1652 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1653 
1654 	dp_debug("ring type %u ring_num %u msi->data %u msi_addr %llx",
1655 		 ring_type, ring_num, ring_params->msi_data,
1656 		 (uint64_t)ring_params->msi_addr);
1657 
1658 configure_msi2:
1659 	if (!nf_irq_support) {
1660 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
1661 		return;
1662 	}
1663 
1664 	dp_srng_msi2_setup(soc, ring_params, ring_type, ring_num,
1665 			   nf_msi_grp_num);
1666 }
1667 
1668 #ifdef FEATURE_AST
1669 /**
1670  * dp_print_peer_ast_entries() - Dump AST entries of peer
1671  * @soc: Datapath soc handle
1672  * @peer: Datapath peer
1673  * @arg: argument to iterate function
1674  *
1675  * return void
1676  */
1677 static void
1678 dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1679 {
1680 	struct dp_ast_entry *ase, *tmp_ase;
1681 	uint32_t num_entries = 0;
1682 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1683 			"NONE", "STATIC", "SELF", "WDS", "HMWDS", "BSS",
1684 			"DA", "HMWDS_SEC"};
1685 
1686 	DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1687 	    DP_PRINT_STATS("%6d mac_addr = "QDF_MAC_ADDR_FMT
1688 		    " peer_mac_addr = "QDF_MAC_ADDR_FMT
1689 		    " peer_id = %u"
1690 		    " type = %s"
1691 		    " next_hop = %d"
1692 		    " is_active = %d"
1693 		    " ast_idx = %d"
1694 		    " ast_hash = %d"
1695 		    " delete_in_progress = %d"
1696 		    " pdev_id = %d"
1697 		    " vdev_id = %d",
1698 		    ++num_entries,
1699 		    QDF_MAC_ADDR_REF(ase->mac_addr.raw),
1700 		    QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1701 		    ase->peer_id,
1702 		    type[ase->type],
1703 		    ase->next_hop,
1704 		    ase->is_active,
1705 		    ase->ast_idx,
1706 		    ase->ast_hash_value,
1707 		    ase->delete_in_progress,
1708 		    ase->pdev_id,
1709 		    ase->vdev_id);
1710 	}
1711 }
1712 
1713 /**
1714  * dp_print_ast_stats() - Dump AST table contents
1715  * @soc: Datapath soc handle
1716  *
1717  * return void
1718  */
1719 void dp_print_ast_stats(struct dp_soc *soc)
1720 {
1721 	DP_PRINT_STATS("AST Stats:");
1722 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1723 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1724 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1725 	DP_PRINT_STATS("	Entries MAP ERR  = %d", soc->stats.ast.map_err);
1726 	DP_PRINT_STATS("	Entries Mismatch ERR  = %d",
1727 		       soc->stats.ast.ast_mismatch);
1728 
1729 	DP_PRINT_STATS("AST Table:");
1730 
1731 	qdf_spin_lock_bh(&soc->ast_lock);
1732 
1733 	dp_soc_iterate_peer(soc, dp_print_peer_ast_entries, NULL,
1734 			    DP_MOD_ID_GENERIC_STATS);
1735 
1736 	qdf_spin_unlock_bh(&soc->ast_lock);
1737 }
1738 #else
1739 void dp_print_ast_stats(struct dp_soc *soc)
1740 {
1741 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1742 	return;
1743 }
1744 #endif
1745 
1746 /**
1747  * dp_print_peer_info() - Dump peer info
1748  * @soc: Datapath soc handle
1749  * @peer: Datapath peer handle
1750  * @arg: argument to iter function
1751  *
1752  * return void
1753  */
1754 static void
1755 dp_print_peer_info(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1756 {
1757 	struct dp_txrx_peer *txrx_peer = NULL;
1758 
1759 	txrx_peer = dp_get_txrx_peer(peer);
1760 	if (!txrx_peer)
1761 		return;
1762 
1763 	DP_PRINT_STATS(" peer id = %d"
1764 		       " peer_mac_addr = "QDF_MAC_ADDR_FMT
1765 		       " nawds_enabled = %d"
1766 		       " bss_peer = %d"
1767 		       " wds_enabled = %d"
1768 		       " tx_cap_enabled = %d"
1769 		       " rx_cap_enabled = %d",
1770 		       peer->peer_id,
1771 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1772 		       txrx_peer->nawds_enabled,
1773 		       txrx_peer->bss_peer,
1774 		       txrx_peer->wds_enabled,
1775 		       peer->monitor_peer ?
1776 					peer->monitor_peer->tx_cap_enabled : 0,
1777 		       peer->monitor_peer ?
1778 					peer->monitor_peer->rx_cap_enabled : 0);
1779 }
1780 
1781 /**
1782  * dp_print_peer_table() - Dump all Peer stats
1783  * @vdev: Datapath Vdev handle
1784  *
1785  * return void
1786  */
1787 static void dp_print_peer_table(struct dp_vdev *vdev)
1788 {
1789 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1790 	dp_vdev_iterate_peer(vdev, dp_print_peer_info, NULL,
1791 			     DP_MOD_ID_GENERIC_STATS);
1792 }
1793 
1794 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
1795 /**
1796  * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt
1797  * threshold values from the wlan_srng_cfg table for each ring type
1798  * @soc: device handle
1799  * @ring_params: per ring specific parameters
1800  * @ring_type: Ring type
1801  * @ring_num: Ring number for a given ring type
1802  *
1803  * Fill the ring params with the interrupt threshold
1804  * configuration parameters available in the per ring type wlan_srng_cfg
1805  * table.
1806  *
1807  * Return: None
1808  */
1809 static void
1810 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1811 				       struct hal_srng_params *ring_params,
1812 				       int ring_type, int ring_num,
1813 				       int num_entries)
1814 {
1815 	uint8_t wbm2_sw_rx_rel_ring_id;
1816 
1817 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
1818 
1819 	if (ring_type == REO_DST) {
1820 		ring_params->intr_timer_thres_us =
1821 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1822 		ring_params->intr_batch_cntr_thres_entries =
1823 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1824 	} else if (ring_type == WBM2SW_RELEASE &&
1825 		   (ring_num == wbm2_sw_rx_rel_ring_id)) {
1826 		ring_params->intr_timer_thres_us =
1827 				wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1828 		ring_params->intr_batch_cntr_thres_entries =
1829 				wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1830 	} else {
1831 		ring_params->intr_timer_thres_us =
1832 				soc->wlan_srng_cfg[ring_type].timer_threshold;
1833 		ring_params->intr_batch_cntr_thres_entries =
1834 				soc->wlan_srng_cfg[ring_type].batch_count_threshold;
1835 	}
1836 	ring_params->low_threshold =
1837 			soc->wlan_srng_cfg[ring_type].low_threshold;
1838 	if (ring_params->low_threshold)
1839 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1840 
1841 	dp_srng_configure_nf_interrupt_thresholds(soc, ring_params, ring_type);
1842 }
1843 #else
1844 static void
1845 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1846 				       struct hal_srng_params *ring_params,
1847 				       int ring_type, int ring_num,
1848 				       int num_entries)
1849 {
1850 	uint8_t wbm2_sw_rx_rel_ring_id;
1851 
1852 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
1853 
1854 	if (ring_type == REO_DST) {
1855 		ring_params->intr_timer_thres_us =
1856 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1857 		ring_params->intr_batch_cntr_thres_entries =
1858 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1859 	} else if (ring_type == WBM2SW_RELEASE &&
1860 		   (ring_num < wbm2_sw_rx_rel_ring_id ||
1861 		   ring_num == WBM2SW_TXCOMP_RING4_NUM)) {
1862 		ring_params->intr_timer_thres_us =
1863 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1864 		ring_params->intr_batch_cntr_thres_entries =
1865 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1866 	} else {
1867 		ring_params->intr_timer_thres_us =
1868 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1869 		ring_params->intr_batch_cntr_thres_entries =
1870 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1871 	}
1872 
1873 	/* These rings donot require interrupt to host. Make them zero */
1874 	switch (ring_type) {
1875 	case REO_REINJECT:
1876 	case REO_CMD:
1877 	case TCL_DATA:
1878 	case TCL_CMD_CREDIT:
1879 	case TCL_STATUS:
1880 	case WBM_IDLE_LINK:
1881 	case SW2WBM_RELEASE:
1882 	case PPE2TCL:
1883 	case SW2RXDMA_NEW:
1884 		ring_params->intr_timer_thres_us = 0;
1885 		ring_params->intr_batch_cntr_thres_entries = 0;
1886 		break;
1887 	}
1888 
1889 	/* Enable low threshold interrupts for rx buffer rings (regular and
1890 	 * monitor buffer rings.
1891 	 * TODO: See if this is required for any other ring
1892 	 */
1893 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1894 	    (ring_type == RXDMA_MONITOR_STATUS ||
1895 	    (ring_type == TX_MONITOR_BUF))) {
1896 		/* TODO: Setting low threshold to 1/8th of ring size
1897 		 * see if this needs to be configurable
1898 		 */
1899 		ring_params->low_threshold = num_entries >> 3;
1900 		ring_params->intr_timer_thres_us =
1901 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1902 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1903 		ring_params->intr_batch_cntr_thres_entries = 0;
1904 	}
1905 
1906 	/* During initialisation monitor rings are only filled with
1907 	 * MON_BUF_MIN_ENTRIES entries. So low threshold needs to be set to
1908 	 * a value less than that. Low threshold value is reconfigured again
1909 	 * to 1/8th of the ring size when monitor vap is created.
1910 	 */
1911 	if (ring_type == RXDMA_MONITOR_BUF)
1912 		ring_params->low_threshold = MON_BUF_MIN_ENTRIES >> 1;
1913 
1914 	/* In case of PCI chipsets, we dont have PPDU end interrupts,
1915 	 * so MONITOR STATUS ring is reaped by receiving MSI from srng.
1916 	 * Keep batch threshold as 8 so that interrupt is received for
1917 	 * every 4 packets in MONITOR_STATUS ring
1918 	 */
1919 	if ((ring_type == RXDMA_MONITOR_STATUS) &&
1920 	    (soc->intr_mode == DP_INTR_MSI))
1921 		ring_params->intr_batch_cntr_thres_entries = 4;
1922 }
1923 #endif
1924 
1925 #ifdef DP_MEM_PRE_ALLOC
1926 
1927 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
1928 			   size_t ctxt_size)
1929 {
1930 	void *ctxt_mem;
1931 
1932 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_context) {
1933 		dp_warn("dp_prealloc_get_context null!");
1934 		goto dynamic_alloc;
1935 	}
1936 
1937 	ctxt_mem = soc->cdp_soc.ol_ops->dp_prealloc_get_context(ctxt_type);
1938 
1939 	if (ctxt_mem)
1940 		goto end;
1941 
1942 dynamic_alloc:
1943 	dp_info("Pre-alloc of ctxt failed. Dynamic allocation");
1944 	ctxt_mem = qdf_mem_malloc(ctxt_size);
1945 end:
1946 	return ctxt_mem;
1947 }
1948 
1949 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
1950 			 void *vaddr)
1951 {
1952 	QDF_STATUS status;
1953 
1954 	if (soc->cdp_soc.ol_ops->dp_prealloc_put_context) {
1955 		status = soc->cdp_soc.ol_ops->dp_prealloc_put_context(
1956 								ctxt_type,
1957 								vaddr);
1958 	} else {
1959 		dp_warn("dp_prealloc_get_context null!");
1960 		status = QDF_STATUS_E_NOSUPPORT;
1961 	}
1962 
1963 	if (QDF_IS_STATUS_ERROR(status)) {
1964 		dp_info("Context not pre-allocated");
1965 		qdf_mem_free(vaddr);
1966 	}
1967 }
1968 
1969 static inline
1970 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
1971 					   struct dp_srng *srng,
1972 					   uint32_t ring_type)
1973 {
1974 	void *mem;
1975 
1976 	qdf_assert(!srng->is_mem_prealloc);
1977 
1978 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_consistent) {
1979 		dp_warn("dp_prealloc_get_consistent is null!");
1980 		goto qdf;
1981 	}
1982 
1983 	mem =
1984 		soc->cdp_soc.ol_ops->dp_prealloc_get_consistent
1985 						(&srng->alloc_size,
1986 						 &srng->base_vaddr_unaligned,
1987 						 &srng->base_paddr_unaligned,
1988 						 &srng->base_paddr_aligned,
1989 						 DP_RING_BASE_ALIGN, ring_type);
1990 
1991 	if (mem) {
1992 		srng->is_mem_prealloc = true;
1993 		goto end;
1994 	}
1995 qdf:
1996 	mem =  qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
1997 						&srng->base_vaddr_unaligned,
1998 						&srng->base_paddr_unaligned,
1999 						&srng->base_paddr_aligned,
2000 						DP_RING_BASE_ALIGN);
2001 end:
2002 	dp_info("%s memory %pK dp_srng %pK ring_type %d alloc_size %d num_entries %d",
2003 		srng->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", mem,
2004 		srng, ring_type, srng->alloc_size, srng->num_entries);
2005 	return mem;
2006 }
2007 
2008 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
2009 					       struct dp_srng *srng)
2010 {
2011 	if (srng->is_mem_prealloc) {
2012 		if (!soc->cdp_soc.ol_ops->dp_prealloc_put_consistent) {
2013 			dp_warn("dp_prealloc_put_consistent is null!");
2014 			QDF_BUG(0);
2015 			return;
2016 		}
2017 		soc->cdp_soc.ol_ops->dp_prealloc_put_consistent
2018 						(srng->alloc_size,
2019 						 srng->base_vaddr_unaligned,
2020 						 srng->base_paddr_unaligned);
2021 
2022 	} else {
2023 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2024 					srng->alloc_size,
2025 					srng->base_vaddr_unaligned,
2026 					srng->base_paddr_unaligned, 0);
2027 	}
2028 }
2029 
2030 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
2031 				   enum dp_desc_type desc_type,
2032 				   struct qdf_mem_multi_page_t *pages,
2033 				   size_t element_size,
2034 				   uint32_t element_num,
2035 				   qdf_dma_context_t memctxt,
2036 				   bool cacheable)
2037 {
2038 	if (!soc->cdp_soc.ol_ops->dp_get_multi_pages) {
2039 		dp_warn("dp_get_multi_pages is null!");
2040 		goto qdf;
2041 	}
2042 
2043 	pages->num_pages = 0;
2044 	pages->is_mem_prealloc = 0;
2045 	soc->cdp_soc.ol_ops->dp_get_multi_pages(desc_type,
2046 						element_size,
2047 						element_num,
2048 						pages,
2049 						cacheable);
2050 	if (pages->num_pages)
2051 		goto end;
2052 
2053 qdf:
2054 	qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size,
2055 				  element_num, memctxt, cacheable);
2056 end:
2057 	dp_info("%s desc_type %d element_size %d element_num %d cacheable %d",
2058 		pages->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc",
2059 		desc_type, (int)element_size, element_num, cacheable);
2060 }
2061 
2062 void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
2063 				  enum dp_desc_type desc_type,
2064 				  struct qdf_mem_multi_page_t *pages,
2065 				  qdf_dma_context_t memctxt,
2066 				  bool cacheable)
2067 {
2068 	if (pages->is_mem_prealloc) {
2069 		if (!soc->cdp_soc.ol_ops->dp_put_multi_pages) {
2070 			dp_warn("dp_put_multi_pages is null!");
2071 			QDF_BUG(0);
2072 			return;
2073 		}
2074 
2075 		soc->cdp_soc.ol_ops->dp_put_multi_pages(desc_type, pages);
2076 		qdf_mem_zero(pages, sizeof(*pages));
2077 	} else {
2078 		qdf_mem_multi_pages_free(soc->osdev, pages,
2079 					 memctxt, cacheable);
2080 	}
2081 }
2082 
2083 #else
2084 
2085 static inline
2086 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
2087 					   struct dp_srng *srng,
2088 					   uint32_t ring_type)
2089 
2090 {
2091 	void *mem;
2092 
2093 	mem = qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
2094 					       &srng->base_vaddr_unaligned,
2095 					       &srng->base_paddr_unaligned,
2096 					       &srng->base_paddr_aligned,
2097 					       DP_RING_BASE_ALIGN);
2098 	if (mem)
2099 		qdf_mem_set(srng->base_vaddr_unaligned, 0, srng->alloc_size);
2100 
2101 	return mem;
2102 }
2103 
2104 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
2105 					       struct dp_srng *srng)
2106 {
2107 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2108 				srng->alloc_size,
2109 				srng->base_vaddr_unaligned,
2110 				srng->base_paddr_unaligned, 0);
2111 }
2112 
2113 #endif /* DP_MEM_PRE_ALLOC */
2114 
2115 /*
2116  * dp_srng_free() - Free SRNG memory
2117  * @soc  : Data path soc handle
2118  * @srng : SRNG pointer
2119  *
2120  * return: None
2121  */
2122 void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng)
2123 {
2124 	if (srng->alloc_size && srng->base_vaddr_unaligned) {
2125 		if (!srng->cached) {
2126 			dp_srng_mem_free_consistent(soc, srng);
2127 		} else {
2128 			qdf_mem_free(srng->base_vaddr_unaligned);
2129 		}
2130 		srng->alloc_size = 0;
2131 		srng->base_vaddr_unaligned = NULL;
2132 	}
2133 	srng->hal_srng = NULL;
2134 }
2135 
2136 qdf_export_symbol(dp_srng_free);
2137 
2138 #ifdef DISABLE_MON_RING_MSI_CFG
2139 /*
2140  * dp_skip_msi_cfg() - Check if msi cfg has to be skipped for ring_type
2141  * @ring_type: sring type
2142  *
2143  * Return: True if msi cfg should be skipped for srng type else false
2144  */
2145 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2146 {
2147 	if (ring_type == RXDMA_MONITOR_STATUS)
2148 		return true;
2149 
2150 	return false;
2151 }
2152 #else
2153 #ifdef DP_CON_MON_MSI_ENABLED
2154 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2155 {
2156 	if (soc->cdp_soc.ol_ops->get_con_mode &&
2157 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE) {
2158 		if (ring_type == REO_DST)
2159 			return true;
2160 	} else if (ring_type == RXDMA_MONITOR_STATUS) {
2161 		return true;
2162 	}
2163 
2164 	return false;
2165 }
2166 #else
2167 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2168 {
2169 	return false;
2170 }
2171 #endif /* DP_CON_MON_MSI_ENABLED */
2172 #endif /* DISABLE_MON_RING_MSI_CFG */
2173 
2174 /*
2175  * dp_srng_init() - Initialize SRNG
2176  * @soc  : Data path soc handle
2177  * @srng : SRNG pointer
2178  * @ring_type : Ring Type
2179  * @ring_num: Ring number
2180  * @mac_id: mac_id
2181  *
2182  * return: QDF_STATUS
2183  */
2184 QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
2185 			int ring_type, int ring_num, int mac_id)
2186 {
2187 	hal_soc_handle_t hal_soc = soc->hal_soc;
2188 	struct hal_srng_params ring_params;
2189 
2190 	if (srng->hal_srng) {
2191 		dp_init_err("%pK: Ring type: %d, num:%d is already initialized",
2192 			    soc, ring_type, ring_num);
2193 		return QDF_STATUS_SUCCESS;
2194 	}
2195 
2196 	/* memset the srng ring to zero */
2197 	qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size);
2198 
2199 	qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params));
2200 	ring_params.ring_base_paddr = srng->base_paddr_aligned;
2201 	ring_params.ring_base_vaddr = srng->base_vaddr_aligned;
2202 
2203 	ring_params.num_entries = srng->num_entries;
2204 
2205 	dp_info("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
2206 		ring_type, ring_num,
2207 		(void *)ring_params.ring_base_vaddr,
2208 		(void *)ring_params.ring_base_paddr,
2209 		ring_params.num_entries);
2210 
2211 	if (soc->intr_mode == DP_INTR_MSI && !dp_skip_msi_cfg(soc, ring_type)) {
2212 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
2213 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
2214 				 ring_type, ring_num);
2215 	} else {
2216 		ring_params.msi_data = 0;
2217 		ring_params.msi_addr = 0;
2218 		dp_srng_set_msi2_ring_params(soc, &ring_params, 0, 0);
2219 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
2220 				 ring_type, ring_num);
2221 	}
2222 
2223 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
2224 					       ring_type, ring_num,
2225 					       srng->num_entries);
2226 
2227 	dp_srng_set_nf_thresholds(soc, srng, &ring_params);
2228 
2229 	if (srng->cached)
2230 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
2231 
2232 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
2233 					mac_id, &ring_params);
2234 
2235 	if (!srng->hal_srng) {
2236 		dp_srng_free(soc, srng);
2237 		return QDF_STATUS_E_FAILURE;
2238 	}
2239 
2240 	return QDF_STATUS_SUCCESS;
2241 }
2242 
2243 qdf_export_symbol(dp_srng_init);
2244 
2245 /*
2246  * dp_srng_alloc() - Allocate memory for SRNG
2247  * @soc  : Data path soc handle
2248  * @srng : SRNG pointer
2249  * @ring_type : Ring Type
2250  * @num_entries: Number of entries
2251  * @cached: cached flag variable
2252  *
2253  * return: QDF_STATUS
2254  */
2255 QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
2256 			 int ring_type, uint32_t num_entries,
2257 			 bool cached)
2258 {
2259 	hal_soc_handle_t hal_soc = soc->hal_soc;
2260 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
2261 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
2262 
2263 	if (srng->base_vaddr_unaligned) {
2264 		dp_init_err("%pK: Ring type: %d, is already allocated",
2265 			    soc, ring_type);
2266 		return QDF_STATUS_SUCCESS;
2267 	}
2268 
2269 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
2270 	srng->hal_srng = NULL;
2271 	srng->alloc_size = num_entries * entry_size;
2272 	srng->num_entries = num_entries;
2273 	srng->cached = cached;
2274 
2275 	if (!cached) {
2276 		srng->base_vaddr_aligned =
2277 		    dp_srng_aligned_mem_alloc_consistent(soc,
2278 							 srng,
2279 							 ring_type);
2280 	} else {
2281 		srng->base_vaddr_aligned = qdf_aligned_malloc(
2282 					&srng->alloc_size,
2283 					&srng->base_vaddr_unaligned,
2284 					&srng->base_paddr_unaligned,
2285 					&srng->base_paddr_aligned,
2286 					DP_RING_BASE_ALIGN);
2287 	}
2288 
2289 	if (!srng->base_vaddr_aligned)
2290 		return QDF_STATUS_E_NOMEM;
2291 
2292 	return QDF_STATUS_SUCCESS;
2293 }
2294 
2295 qdf_export_symbol(dp_srng_alloc);
2296 
2297 /*
2298  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
2299  * @soc: DP SOC handle
2300  * @srng: source ring structure
2301  * @ring_type: type of ring
2302  * @ring_num: ring number
2303  *
2304  * Return: None
2305  */
2306 void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
2307 		    int ring_type, int ring_num)
2308 {
2309 	if (!srng->hal_srng) {
2310 		dp_init_err("%pK: Ring type: %d, num:%d not setup",
2311 			    soc, ring_type, ring_num);
2312 		return;
2313 	}
2314 
2315 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
2316 	srng->hal_srng = NULL;
2317 }
2318 
2319 qdf_export_symbol(dp_srng_deinit);
2320 
2321 /* TODO: Need this interface from HIF */
2322 void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle);
2323 
2324 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2325 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2326 			 hal_ring_handle_t hal_ring_hdl)
2327 {
2328 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2329 	uint32_t hp, tp;
2330 	uint8_t ring_id;
2331 
2332 	if (!int_ctx)
2333 		return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2334 
2335 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2336 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2337 
2338 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2339 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START);
2340 
2341 	return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2342 }
2343 
2344 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2345 			hal_ring_handle_t hal_ring_hdl)
2346 {
2347 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2348 	uint32_t hp, tp;
2349 	uint8_t ring_id;
2350 
2351 	if (!int_ctx)
2352 		return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2353 
2354 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2355 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2356 
2357 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2358 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END);
2359 
2360 	return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2361 }
2362 
2363 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2364 					      uint8_t hist_group_id)
2365 {
2366 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2367 			 0, 0, 0, HIF_EVENT_TIMER_ENTRY);
2368 }
2369 
2370 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2371 					     uint8_t hist_group_id)
2372 {
2373 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2374 			 0, 0, 0, HIF_EVENT_TIMER_EXIT);
2375 }
2376 #else
2377 
2378 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2379 					      uint8_t hist_group_id)
2380 {
2381 }
2382 
2383 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2384 					     uint8_t hist_group_id)
2385 {
2386 }
2387 
2388 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
2389 
2390 /*
2391  * dp_should_timer_irq_yield() - Decide if the bottom half should yield
2392  * @soc: DP soc handle
2393  * @work_done: work done in softirq context
2394  * @start_time: start time for the softirq
2395  *
2396  * Return: enum with yield code
2397  */
2398 enum timer_yield_status
2399 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
2400 			  uint64_t start_time)
2401 {
2402 	uint64_t cur_time = qdf_get_log_timestamp();
2403 
2404 	if (!work_done)
2405 		return DP_TIMER_WORK_DONE;
2406 
2407 	if (cur_time - start_time > DP_MAX_TIMER_EXEC_TIME_TICKS)
2408 		return DP_TIMER_TIME_EXHAUST;
2409 
2410 	return DP_TIMER_NO_YIELD;
2411 }
2412 
2413 qdf_export_symbol(dp_should_timer_irq_yield);
2414 
2415 #ifdef DP_CON_MON_MSI_ENABLED
2416 static int dp_process_rxdma_dst_ring(struct dp_soc *soc,
2417 				     struct dp_intr *int_ctx,
2418 				     int mac_for_pdev,
2419 				     int total_budget)
2420 {
2421 	if (dp_soc_get_con_mode(soc) == QDF_GLOBAL_MONITOR_MODE)
2422 		return dp_monitor_process(soc, int_ctx, mac_for_pdev,
2423 					  total_budget);
2424 	else
2425 		return dp_rxdma_err_process(int_ctx, soc, mac_for_pdev,
2426 					    total_budget);
2427 }
2428 #else
2429 static int dp_process_rxdma_dst_ring(struct dp_soc *soc,
2430 				     struct dp_intr *int_ctx,
2431 				     int mac_for_pdev,
2432 				     int total_budget)
2433 {
2434 	return dp_rxdma_err_process(int_ctx, soc, mac_for_pdev,
2435 				    total_budget);
2436 }
2437 #endif
2438 
2439 /**
2440  * dp_process_lmac_rings() - Process LMAC rings
2441  * @int_ctx: interrupt context
2442  * @total_budget: budget of work which can be done
2443  *
2444  * Return: work done
2445  */
2446 static int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget)
2447 {
2448 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2449 	struct dp_soc *soc = int_ctx->soc;
2450 	uint32_t remaining_quota = total_budget;
2451 	struct dp_pdev *pdev = NULL;
2452 	uint32_t work_done  = 0;
2453 	int budget = total_budget;
2454 	int ring = 0;
2455 
2456 	/* Process LMAC interrupts */
2457 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
2458 		int mac_for_pdev = ring;
2459 
2460 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
2461 		if (!pdev)
2462 			continue;
2463 		if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
2464 			work_done = dp_monitor_process(soc, int_ctx,
2465 						       mac_for_pdev,
2466 						       remaining_quota);
2467 			if (work_done)
2468 				intr_stats->num_rx_mon_ring_masks++;
2469 			budget -= work_done;
2470 			if (budget <= 0)
2471 				goto budget_done;
2472 			remaining_quota = budget;
2473 		}
2474 
2475 		if (int_ctx->tx_mon_ring_mask & (1 << mac_for_pdev)) {
2476 			work_done = dp_tx_mon_process(soc, int_ctx,
2477 						      mac_for_pdev,
2478 						      remaining_quota);
2479 			if (work_done)
2480 				intr_stats->num_tx_mon_ring_masks++;
2481 			budget -= work_done;
2482 			if (budget <= 0)
2483 				goto budget_done;
2484 			remaining_quota = budget;
2485 		}
2486 
2487 		if (int_ctx->rxdma2host_ring_mask &
2488 				(1 << mac_for_pdev)) {
2489 			work_done = dp_process_rxdma_dst_ring(soc, int_ctx,
2490 							      mac_for_pdev,
2491 							      remaining_quota);
2492 			if (work_done)
2493 				intr_stats->num_rxdma2host_ring_masks++;
2494 			budget -=  work_done;
2495 			if (budget <= 0)
2496 				goto budget_done;
2497 			remaining_quota = budget;
2498 		}
2499 
2500 		if (int_ctx->host2rxdma_ring_mask & (1 << mac_for_pdev)) {
2501 			union dp_rx_desc_list_elem_t *desc_list = NULL;
2502 			union dp_rx_desc_list_elem_t *tail = NULL;
2503 			struct dp_srng *rx_refill_buf_ring;
2504 			struct rx_desc_pool *rx_desc_pool;
2505 
2506 			rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2507 			if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
2508 				rx_refill_buf_ring =
2509 					&soc->rx_refill_buf_ring[mac_for_pdev];
2510 			else
2511 				rx_refill_buf_ring =
2512 					&soc->rx_refill_buf_ring[pdev->lmac_id];
2513 
2514 			intr_stats->num_host2rxdma_ring_masks++;
2515 			dp_rx_buffers_lt_replenish_simple(soc, mac_for_pdev,
2516 							  rx_refill_buf_ring,
2517 							  rx_desc_pool,
2518 							  0,
2519 							  &desc_list,
2520 							  &tail);
2521 		}
2522 
2523 	}
2524 
2525 	if (int_ctx->host2rxdma_mon_ring_mask)
2526 		dp_rx_mon_buf_refill(int_ctx);
2527 
2528 	if (int_ctx->host2txmon_ring_mask)
2529 		dp_tx_mon_buf_refill(int_ctx);
2530 
2531 budget_done:
2532 	return total_budget - budget;
2533 }
2534 
2535 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
2536 /**
2537  * dp_service_near_full_srngs() - Bottom half handler to process the near
2538  *				full IRQ on a SRNG
2539  * @dp_ctx: Datapath SoC handle
2540  * @dp_budget: Number of SRNGs which can be processed in a single attempt
2541  *		without rescheduling
2542  *
2543  * Return: remaining budget/quota for the soc device
2544  */
2545 static uint32_t dp_service_near_full_srngs(void *dp_ctx, uint32_t dp_budget)
2546 {
2547 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2548 	struct dp_soc *soc = int_ctx->soc;
2549 
2550 	/*
2551 	 * dp_service_near_full_srngs arch ops should be initialized always
2552 	 * if the NEAR FULL IRQ feature is enabled.
2553 	 */
2554 	return soc->arch_ops.dp_service_near_full_srngs(soc, int_ctx,
2555 							dp_budget);
2556 }
2557 #endif
2558 
2559 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2560 
2561 /*
2562  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
2563  * @dp_ctx: DP SOC handle
2564  * @budget: Number of frames/descriptors that can be processed in one shot
2565  *
2566  * Return: remaining budget/quota for the soc device
2567  */
2568 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
2569 {
2570 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2571 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2572 	struct dp_soc *soc = int_ctx->soc;
2573 	int ring = 0;
2574 	int index;
2575 	uint32_t work_done  = 0;
2576 	int budget = dp_budget;
2577 	uint8_t tx_mask = int_ctx->tx_ring_mask;
2578 	uint8_t rx_mask = int_ctx->rx_ring_mask;
2579 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
2580 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
2581 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2582 	uint32_t remaining_quota = dp_budget;
2583 
2584 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n",
2585 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
2586 			 reo_status_mask,
2587 			 int_ctx->rx_mon_ring_mask,
2588 			 int_ctx->host2rxdma_ring_mask,
2589 			 int_ctx->rxdma2host_ring_mask);
2590 
2591 	/* Process Tx completion interrupts first to return back buffers */
2592 	for (index = 0; index < soc->num_tx_comp_rings; index++) {
2593 		if (!(1 << wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) & tx_mask))
2594 			continue;
2595 		work_done = dp_tx_comp_handler(int_ctx,
2596 					       soc,
2597 					       soc->tx_comp_ring[index].hal_srng,
2598 					       index, remaining_quota);
2599 		if (work_done) {
2600 			intr_stats->num_tx_ring_masks[index]++;
2601 			dp_verbose_debug("tx mask 0x%x index %d, budget %d, work_done %d",
2602 					 tx_mask, index, budget,
2603 					 work_done);
2604 		}
2605 		budget -= work_done;
2606 		if (budget <= 0)
2607 			goto budget_done;
2608 
2609 		remaining_quota = budget;
2610 	}
2611 
2612 	/* Process REO Exception ring interrupt */
2613 	if (rx_err_mask) {
2614 		work_done = dp_rx_err_process(int_ctx, soc,
2615 					      soc->reo_exception_ring.hal_srng,
2616 					      remaining_quota);
2617 
2618 		if (work_done) {
2619 			intr_stats->num_rx_err_ring_masks++;
2620 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
2621 					 work_done, budget);
2622 		}
2623 
2624 		budget -=  work_done;
2625 		if (budget <= 0) {
2626 			goto budget_done;
2627 		}
2628 		remaining_quota = budget;
2629 	}
2630 
2631 	/* Process Rx WBM release ring interrupt */
2632 	if (rx_wbm_rel_mask) {
2633 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
2634 						  soc->rx_rel_ring.hal_srng,
2635 						  remaining_quota);
2636 
2637 		if (work_done) {
2638 			intr_stats->num_rx_wbm_rel_ring_masks++;
2639 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
2640 					 work_done, budget);
2641 		}
2642 
2643 		budget -=  work_done;
2644 		if (budget <= 0) {
2645 			goto budget_done;
2646 		}
2647 		remaining_quota = budget;
2648 	}
2649 
2650 	/* Process Rx interrupts */
2651 	if (rx_mask) {
2652 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
2653 			if (!(rx_mask & (1 << ring)))
2654 				continue;
2655 			work_done = soc->arch_ops.dp_rx_process(int_ctx,
2656 						  soc->reo_dest_ring[ring].hal_srng,
2657 						  ring,
2658 						  remaining_quota);
2659 			if (work_done) {
2660 				intr_stats->num_rx_ring_masks[ring]++;
2661 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
2662 						 rx_mask, ring,
2663 						 work_done, budget);
2664 				budget -=  work_done;
2665 				if (budget <= 0)
2666 					goto budget_done;
2667 				remaining_quota = budget;
2668 			}
2669 		}
2670 	}
2671 
2672 	if (reo_status_mask) {
2673 		if (dp_reo_status_ring_handler(int_ctx, soc))
2674 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2675 	}
2676 
2677 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
2678 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2679 		if (work_done) {
2680 			budget -=  work_done;
2681 			if (budget <= 0)
2682 				goto budget_done;
2683 			remaining_quota = budget;
2684 		}
2685 	}
2686 
2687 	qdf_lro_flush(int_ctx->lro_ctx);
2688 	intr_stats->num_masks++;
2689 
2690 budget_done:
2691 	return dp_budget - budget;
2692 }
2693 
2694 #else /* QCA_HOST_MODE_WIFI_DISABLED */
2695 
2696 /*
2697  * dp_service_srngs() - Top level handler for DP Monitor Ring interrupts
2698  * @dp_ctx: DP SOC handle
2699  * @budget: Number of frames/descriptors that can be processed in one shot
2700  *
2701  * Return: remaining budget/quota for the soc device
2702  */
2703 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
2704 {
2705 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2706 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2707 	struct dp_soc *soc = int_ctx->soc;
2708 	uint32_t remaining_quota = dp_budget;
2709 	uint32_t work_done  = 0;
2710 	int budget = dp_budget;
2711 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2712 
2713 	if (reo_status_mask) {
2714 		if (dp_reo_status_ring_handler(int_ctx, soc))
2715 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2716 	}
2717 
2718 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
2719 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2720 		if (work_done) {
2721 			budget -=  work_done;
2722 			if (budget <= 0)
2723 				goto budget_done;
2724 			remaining_quota = budget;
2725 		}
2726 	}
2727 
2728 	qdf_lro_flush(int_ctx->lro_ctx);
2729 	intr_stats->num_masks++;
2730 
2731 budget_done:
2732 	return dp_budget - budget;
2733 }
2734 
2735 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2736 
2737 /* dp_interrupt_timer()- timer poll for interrupts
2738  *
2739  * @arg: SoC Handle
2740  *
2741  * Return:
2742  *
2743  */
2744 static void dp_interrupt_timer(void *arg)
2745 {
2746 	struct dp_soc *soc = (struct dp_soc *) arg;
2747 	struct dp_pdev *pdev = soc->pdev_list[0];
2748 	enum timer_yield_status yield = DP_TIMER_NO_YIELD;
2749 	uint32_t work_done  = 0, total_work_done = 0;
2750 	int budget = 0xffff, i;
2751 	uint32_t remaining_quota = budget;
2752 	uint64_t start_time;
2753 	uint32_t lmac_id = DP_MON_INVALID_LMAC_ID;
2754 	uint8_t dp_intr_id = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
2755 	uint32_t lmac_iter;
2756 	int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
2757 	enum reg_wifi_band mon_band;
2758 
2759 	/*
2760 	 * this logic makes all data path interfacing rings (UMAC/LMAC)
2761 	 * and Monitor rings polling mode when NSS offload is disabled
2762 	 */
2763 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx) &&
2764 	    !wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2765 		if (qdf_atomic_read(&soc->cmn_init_done)) {
2766 			for (i = 0; i < wlan_cfg_get_num_contexts(
2767 						soc->wlan_cfg_ctx); i++)
2768 				dp_service_srngs(&soc->intr_ctx[i], 0xffff);
2769 
2770 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
2771 		}
2772 		return;
2773 	}
2774 
2775 	if (!qdf_atomic_read(&soc->cmn_init_done))
2776 		return;
2777 
2778 	if (dp_monitor_is_chan_band_known(pdev)) {
2779 		mon_band = dp_monitor_get_chan_band(pdev);
2780 		lmac_id = pdev->ch_band_lmac_id_mapping[mon_band];
2781 		if (qdf_likely(lmac_id != DP_MON_INVALID_LMAC_ID)) {
2782 			dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id];
2783 			dp_srng_record_timer_entry(soc, dp_intr_id);
2784 		}
2785 	}
2786 
2787 	start_time = qdf_get_log_timestamp();
2788 	dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
2789 
2790 	while (yield == DP_TIMER_NO_YIELD) {
2791 		for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) {
2792 			if (lmac_iter == lmac_id)
2793 				work_done = dp_monitor_process(soc,
2794 						&soc->intr_ctx[dp_intr_id],
2795 						lmac_iter, remaining_quota);
2796 			else
2797 				work_done =
2798 					dp_monitor_drop_packets_for_mac(pdev,
2799 							     lmac_iter,
2800 							     remaining_quota);
2801 			if (work_done) {
2802 				budget -=  work_done;
2803 				if (budget <= 0) {
2804 					yield = DP_TIMER_WORK_EXHAUST;
2805 					goto budget_done;
2806 				}
2807 				remaining_quota = budget;
2808 				total_work_done += work_done;
2809 			}
2810 		}
2811 
2812 		yield = dp_should_timer_irq_yield(soc, total_work_done,
2813 						  start_time);
2814 		total_work_done = 0;
2815 	}
2816 
2817 budget_done:
2818 	if (yield == DP_TIMER_WORK_EXHAUST ||
2819 	    yield == DP_TIMER_TIME_EXHAUST)
2820 		qdf_timer_mod(&soc->int_timer, 1);
2821 	else
2822 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
2823 
2824 	if (lmac_id != DP_MON_INVALID_LMAC_ID)
2825 		dp_srng_record_timer_exit(soc, dp_intr_id);
2826 }
2827 
2828 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2829 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
2830 					struct dp_intr *intr_ctx)
2831 {
2832 	if (intr_ctx->rx_mon_ring_mask)
2833 		return true;
2834 
2835 	return false;
2836 }
2837 #else
2838 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
2839 					struct dp_intr *intr_ctx)
2840 {
2841 	return false;
2842 }
2843 #endif
2844 
2845 /*
2846  * dp_soc_attach_poll() - Register handlers for DP interrupts
2847  * @txrx_soc: DP SOC handle
2848  *
2849  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
2850  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
2851  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
2852  *
2853  * Return: 0 for success, nonzero for failure.
2854  */
2855 static QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
2856 {
2857 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2858 	int i;
2859 	int lmac_id = 0;
2860 
2861 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
2862 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
2863 	soc->intr_mode = DP_INTR_POLL;
2864 
2865 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
2866 		soc->intr_ctx[i].dp_intr_id = i;
2867 		soc->intr_ctx[i].tx_ring_mask =
2868 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
2869 		soc->intr_ctx[i].rx_ring_mask =
2870 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
2871 		soc->intr_ctx[i].rx_mon_ring_mask =
2872 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
2873 		soc->intr_ctx[i].rx_err_ring_mask =
2874 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
2875 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
2876 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
2877 		soc->intr_ctx[i].reo_status_ring_mask =
2878 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
2879 		soc->intr_ctx[i].rxdma2host_ring_mask =
2880 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
2881 		soc->intr_ctx[i].soc = soc;
2882 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
2883 
2884 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
2885 			hif_event_history_init(soc->hif_handle, i);
2886 			soc->mon_intr_id_lmac_map[lmac_id] = i;
2887 			lmac_id++;
2888 		}
2889 	}
2890 
2891 	qdf_timer_init(soc->osdev, &soc->int_timer,
2892 			dp_interrupt_timer, (void *)soc,
2893 			QDF_TIMER_TYPE_WAKE_APPS);
2894 
2895 	return QDF_STATUS_SUCCESS;
2896 }
2897 
2898 /**
2899  * dp_soc_set_interrupt_mode() - Set the interrupt mode in soc
2900  * soc: DP soc handle
2901  *
2902  * Set the appropriate interrupt mode flag in the soc
2903  */
2904 static void dp_soc_set_interrupt_mode(struct dp_soc *soc)
2905 {
2906 	uint32_t msi_base_data, msi_vector_start;
2907 	int msi_vector_count, ret;
2908 
2909 	soc->intr_mode = DP_INTR_INTEGRATED;
2910 
2911 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
2912 	    (dp_is_monitor_mode_using_poll(soc) &&
2913 	     soc->cdp_soc.ol_ops->get_con_mode &&
2914 	     soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) {
2915 		soc->intr_mode = DP_INTR_POLL;
2916 	} else {
2917 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
2918 						  &msi_vector_count,
2919 						  &msi_base_data,
2920 						  &msi_vector_start);
2921 		if (ret)
2922 			return;
2923 
2924 		soc->intr_mode = DP_INTR_MSI;
2925 	}
2926 }
2927 
2928 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc);
2929 #if defined(DP_INTR_POLL_BOTH)
2930 /*
2931  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
2932  * @txrx_soc: DP SOC handle
2933  *
2934  * Call the appropriate attach function based on the mode of operation.
2935  * This is a WAR for enabling monitor mode.
2936  *
2937  * Return: 0 for success. nonzero for failure.
2938  */
2939 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2940 {
2941 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2942 
2943 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
2944 	    (dp_is_monitor_mode_using_poll(soc) &&
2945 	     soc->cdp_soc.ol_ops->get_con_mode &&
2946 	     soc->cdp_soc.ol_ops->get_con_mode() ==
2947 	     QDF_GLOBAL_MONITOR_MODE)) {
2948 		dp_info("Poll mode");
2949 		return dp_soc_attach_poll(txrx_soc);
2950 	} else {
2951 		dp_info("Interrupt  mode");
2952 		return dp_soc_interrupt_attach(txrx_soc);
2953 	}
2954 }
2955 #else
2956 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
2957 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2958 {
2959 	return dp_soc_attach_poll(txrx_soc);
2960 }
2961 #else
2962 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2963 {
2964 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2965 
2966 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx))
2967 		return dp_soc_attach_poll(txrx_soc);
2968 	else
2969 		return dp_soc_interrupt_attach(txrx_soc);
2970 }
2971 #endif
2972 #endif
2973 
2974 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
2975 /**
2976  * dp_soc_interrupt_map_calculate_wifi3_pci_legacy()
2977  * Calculate interrupt map for legacy interrupts
2978  * @soc: DP soc handle
2979  * @intr_ctx_num: Interrupt context number
2980  * @irq_id_map: IRQ map
2981  * num_irq_r: Number of interrupts assigned for this context
2982  *
2983  * Return: void
2984  */
2985 static void dp_soc_interrupt_map_calculate_wifi3_pci_legacy(struct dp_soc *soc,
2986 							    int intr_ctx_num,
2987 							    int *irq_id_map,
2988 							    int *num_irq_r)
2989 {
2990 	int j;
2991 	int num_irq = 0;
2992 	int tx_mask = wlan_cfg_get_tx_ring_mask(
2993 					soc->wlan_cfg_ctx, intr_ctx_num);
2994 	int rx_mask = wlan_cfg_get_rx_ring_mask(
2995 					soc->wlan_cfg_ctx, intr_ctx_num);
2996 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
2997 					soc->wlan_cfg_ctx, intr_ctx_num);
2998 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
2999 					soc->wlan_cfg_ctx, intr_ctx_num);
3000 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3001 					soc->wlan_cfg_ctx, intr_ctx_num);
3002 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3003 					soc->wlan_cfg_ctx, intr_ctx_num);
3004 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3005 					soc->wlan_cfg_ctx, intr_ctx_num);
3006 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3007 					soc->wlan_cfg_ctx, intr_ctx_num);
3008 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3009 					soc->wlan_cfg_ctx, intr_ctx_num);
3010 	soc->intr_mode = DP_INTR_LEGACY_VIRTUAL_IRQ;
3011 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
3012 		if (tx_mask & (1 << j))
3013 			irq_id_map[num_irq++] = (wbm2sw0_release - j);
3014 		if (rx_mask & (1 << j))
3015 			irq_id_map[num_irq++] = (reo2sw1_intr - j);
3016 		if (rx_mon_mask & (1 << j))
3017 			irq_id_map[num_irq++] = (rxmon2sw_p0_dest0 - j);
3018 		if (rx_err_ring_mask & (1 << j))
3019 			irq_id_map[num_irq++] = (reo2sw0_intr - j);
3020 		if (rx_wbm_rel_ring_mask & (1 << j))
3021 			irq_id_map[num_irq++] = (wbm2sw5_release - j);
3022 		if (reo_status_ring_mask & (1 << j))
3023 			irq_id_map[num_irq++] = (reo_status - j);
3024 		if (rxdma2host_ring_mask & (1 << j))
3025 			irq_id_map[num_irq++] = (rxdma2sw_dst_ring0 - j);
3026 		if (host2rxdma_ring_mask & (1 << j))
3027 			irq_id_map[num_irq++] = (sw2rxdma_0 - j);
3028 		if (host2rxdma_mon_ring_mask & (1 << j))
3029 			irq_id_map[num_irq++] = (sw2rxmon_src_ring - j);
3030 	}
3031 	*num_irq_r = num_irq;
3032 }
3033 #else
3034 /**
3035  * dp_soc_interrupt_map_calculate_wifi3_pci_legacy()
3036  * Calculate interrupt map for legacy interrupts
3037  * @soc: DP soc handle
3038  * @intr_ctx_num: Interrupt context number
3039  * @irq_id_map: IRQ map
3040  * num_irq_r: Number of interrupts assigned for this context
3041  *
3042  * Return: void
3043  */
3044 static void dp_soc_interrupt_map_calculate_wifi3_pci_legacy(struct dp_soc *soc,
3045 							    int intr_ctx_num,
3046 							    int *irq_id_map,
3047 							    int *num_irq_r)
3048 {
3049 }
3050 #endif
3051 
3052 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
3053 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
3054 {
3055 	int j;
3056 	int num_irq = 0;
3057 
3058 	int tx_mask =
3059 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3060 	int rx_mask =
3061 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3062 	int rx_mon_mask =
3063 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
3064 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3065 					soc->wlan_cfg_ctx, intr_ctx_num);
3066 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3067 					soc->wlan_cfg_ctx, intr_ctx_num);
3068 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3069 					soc->wlan_cfg_ctx, intr_ctx_num);
3070 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3071 					soc->wlan_cfg_ctx, intr_ctx_num);
3072 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3073 					soc->wlan_cfg_ctx, intr_ctx_num);
3074 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3075 					soc->wlan_cfg_ctx, intr_ctx_num);
3076 
3077 	soc->intr_mode = DP_INTR_INTEGRATED;
3078 
3079 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
3080 
3081 		if (tx_mask & (1 << j)) {
3082 			irq_id_map[num_irq++] =
3083 				(wbm2host_tx_completions_ring1 - j);
3084 		}
3085 
3086 		if (rx_mask & (1 << j)) {
3087 			irq_id_map[num_irq++] =
3088 				(reo2host_destination_ring1 - j);
3089 		}
3090 
3091 		if (rxdma2host_ring_mask & (1 << j)) {
3092 			irq_id_map[num_irq++] =
3093 				rxdma2host_destination_ring_mac1 - j;
3094 		}
3095 
3096 		if (host2rxdma_ring_mask & (1 << j)) {
3097 			irq_id_map[num_irq++] =
3098 				host2rxdma_host_buf_ring_mac1 -	j;
3099 		}
3100 
3101 		if (host2rxdma_mon_ring_mask & (1 << j)) {
3102 			irq_id_map[num_irq++] =
3103 				host2rxdma_monitor_ring1 - j;
3104 		}
3105 
3106 		if (rx_mon_mask & (1 << j)) {
3107 			irq_id_map[num_irq++] =
3108 				ppdu_end_interrupts_mac1 - j;
3109 			irq_id_map[num_irq++] =
3110 				rxdma2host_monitor_status_ring_mac1 - j;
3111 			irq_id_map[num_irq++] =
3112 				rxdma2host_monitor_destination_mac1 - j;
3113 		}
3114 
3115 		if (rx_wbm_rel_ring_mask & (1 << j))
3116 			irq_id_map[num_irq++] = wbm2host_rx_release;
3117 
3118 		if (rx_err_ring_mask & (1 << j))
3119 			irq_id_map[num_irq++] = reo2host_exception;
3120 
3121 		if (reo_status_ring_mask & (1 << j))
3122 			irq_id_map[num_irq++] = reo2host_status;
3123 
3124 	}
3125 	*num_irq_r = num_irq;
3126 }
3127 
3128 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
3129 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
3130 		int msi_vector_count, int msi_vector_start)
3131 {
3132 	int tx_mask = wlan_cfg_get_tx_ring_mask(
3133 					soc->wlan_cfg_ctx, intr_ctx_num);
3134 	int rx_mask = wlan_cfg_get_rx_ring_mask(
3135 					soc->wlan_cfg_ctx, intr_ctx_num);
3136 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
3137 					soc->wlan_cfg_ctx, intr_ctx_num);
3138 	int tx_mon_mask = wlan_cfg_get_tx_mon_ring_mask(
3139 					soc->wlan_cfg_ctx, intr_ctx_num);
3140 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3141 					soc->wlan_cfg_ctx, intr_ctx_num);
3142 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3143 					soc->wlan_cfg_ctx, intr_ctx_num);
3144 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3145 					soc->wlan_cfg_ctx, intr_ctx_num);
3146 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3147 					soc->wlan_cfg_ctx, intr_ctx_num);
3148 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3149 					soc->wlan_cfg_ctx, intr_ctx_num);
3150 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3151 					soc->wlan_cfg_ctx, intr_ctx_num);
3152 	int rx_near_full_grp_1_mask =
3153 		wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
3154 						     intr_ctx_num);
3155 	int rx_near_full_grp_2_mask =
3156 		wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
3157 						     intr_ctx_num);
3158 	int tx_ring_near_full_mask =
3159 		wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
3160 						    intr_ctx_num);
3161 
3162 	int host2txmon_ring_mask =
3163 		wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx,
3164 						  intr_ctx_num);
3165 	unsigned int vector =
3166 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
3167 	int num_irq = 0;
3168 
3169 	soc->intr_mode = DP_INTR_MSI;
3170 
3171 	if (tx_mask | rx_mask | rx_mon_mask | tx_mon_mask | rx_err_ring_mask |
3172 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask |
3173 	    host2rxdma_ring_mask | host2rxdma_mon_ring_mask |
3174 	    rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
3175 	    tx_ring_near_full_mask | host2txmon_ring_mask)
3176 		irq_id_map[num_irq++] =
3177 			pld_get_msi_irq(soc->osdev->dev, vector);
3178 
3179 	*num_irq_r = num_irq;
3180 }
3181 
3182 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
3183 				    int *irq_id_map, int *num_irq)
3184 {
3185 	int msi_vector_count, ret;
3186 	uint32_t msi_base_data, msi_vector_start;
3187 
3188 	if (pld_get_enable_intx(soc->osdev->dev)) {
3189 		return dp_soc_interrupt_map_calculate_wifi3_pci_legacy(soc,
3190 				intr_ctx_num, irq_id_map, num_irq);
3191 	}
3192 
3193 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
3194 					    &msi_vector_count,
3195 					    &msi_base_data,
3196 					    &msi_vector_start);
3197 	if (ret)
3198 		return dp_soc_interrupt_map_calculate_integrated(soc,
3199 				intr_ctx_num, irq_id_map, num_irq);
3200 
3201 	else
3202 		dp_soc_interrupt_map_calculate_msi(soc,
3203 				intr_ctx_num, irq_id_map, num_irq,
3204 				msi_vector_count, msi_vector_start);
3205 }
3206 
3207 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
3208 /**
3209  * dp_soc_near_full_interrupt_attach() - Register handler for DP near fill irq
3210  * @soc: DP soc handle
3211  * @num_irq: IRQ number
3212  * @irq_id_map: IRQ map
3213  * intr_id: interrupt context ID
3214  *
3215  * Return: 0 for success. nonzero for failure.
3216  */
3217 static inline int
3218 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
3219 				  int irq_id_map[], int intr_id)
3220 {
3221 	return hif_register_ext_group(soc->hif_handle,
3222 				      num_irq, irq_id_map,
3223 				      dp_service_near_full_srngs,
3224 				      &soc->intr_ctx[intr_id], "dp_nf_intr",
3225 				      HIF_EXEC_NAPI_TYPE,
3226 				      QCA_NAPI_DEF_SCALE_BIN_SHIFT);
3227 }
3228 #else
3229 static inline int
3230 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
3231 				  int *irq_id_map, int intr_id)
3232 {
3233 	return 0;
3234 }
3235 #endif
3236 
3237 /*
3238  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
3239  * @txrx_soc: DP SOC handle
3240  *
3241  * Return: none
3242  */
3243 static void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
3244 {
3245 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3246 	int i;
3247 
3248 	if (soc->intr_mode == DP_INTR_POLL) {
3249 		qdf_timer_free(&soc->int_timer);
3250 	} else {
3251 		hif_deconfigure_ext_group_interrupts(soc->hif_handle);
3252 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
3253 		hif_deregister_exec_group(soc->hif_handle, "dp_nf_intr");
3254 	}
3255 
3256 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3257 		soc->intr_ctx[i].tx_ring_mask = 0;
3258 		soc->intr_ctx[i].rx_ring_mask = 0;
3259 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
3260 		soc->intr_ctx[i].rx_err_ring_mask = 0;
3261 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
3262 		soc->intr_ctx[i].reo_status_ring_mask = 0;
3263 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
3264 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
3265 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
3266 		soc->intr_ctx[i].rx_near_full_grp_1_mask = 0;
3267 		soc->intr_ctx[i].rx_near_full_grp_2_mask = 0;
3268 		soc->intr_ctx[i].tx_ring_near_full_mask = 0;
3269 		soc->intr_ctx[i].tx_mon_ring_mask = 0;
3270 		soc->intr_ctx[i].host2txmon_ring_mask = 0;
3271 		soc->intr_ctx[i].umac_reset_intr_mask = 0;
3272 
3273 		hif_event_history_deinit(soc->hif_handle, i);
3274 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
3275 	}
3276 
3277 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3278 		    sizeof(soc->mon_intr_id_lmac_map),
3279 		    DP_MON_INVALID_LMAC_ID);
3280 }
3281 
3282 /*
3283  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
3284  * @txrx_soc: DP SOC handle
3285  *
3286  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
3287  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
3288  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
3289  *
3290  * Return: 0 for success. nonzero for failure.
3291  */
3292 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
3293 {
3294 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3295 
3296 	int i = 0;
3297 	int num_irq = 0;
3298 	int rx_err_ring_intr_ctxt_id = HIF_MAX_GROUP;
3299 	int lmac_id = 0;
3300 	int napi_scale;
3301 
3302 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3303 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
3304 
3305 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3306 		int ret = 0;
3307 
3308 		/* Map of IRQ ids registered with one interrupt context */
3309 		int irq_id_map[HIF_MAX_GRP_IRQ];
3310 
3311 		int tx_mask =
3312 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
3313 		int rx_mask =
3314 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
3315 		int rx_mon_mask =
3316 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
3317 		int tx_mon_ring_mask =
3318 			wlan_cfg_get_tx_mon_ring_mask(soc->wlan_cfg_ctx, i);
3319 		int rx_err_ring_mask =
3320 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
3321 		int rx_wbm_rel_ring_mask =
3322 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
3323 		int reo_status_ring_mask =
3324 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
3325 		int rxdma2host_ring_mask =
3326 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
3327 		int host2rxdma_ring_mask =
3328 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
3329 		int host2rxdma_mon_ring_mask =
3330 			wlan_cfg_get_host2rxdma_mon_ring_mask(
3331 				soc->wlan_cfg_ctx, i);
3332 		int rx_near_full_grp_1_mask =
3333 			wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
3334 							     i);
3335 		int rx_near_full_grp_2_mask =
3336 			wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
3337 							     i);
3338 		int tx_ring_near_full_mask =
3339 			wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
3340 							    i);
3341 		int host2txmon_ring_mask =
3342 			wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx, i);
3343 		int umac_reset_intr_mask =
3344 			wlan_cfg_get_umac_reset_intr_mask(soc->wlan_cfg_ctx, i);
3345 
3346 		soc->intr_ctx[i].dp_intr_id = i;
3347 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
3348 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
3349 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
3350 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
3351 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
3352 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
3353 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
3354 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
3355 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
3356 			 host2rxdma_mon_ring_mask;
3357 		soc->intr_ctx[i].rx_near_full_grp_1_mask =
3358 						rx_near_full_grp_1_mask;
3359 		soc->intr_ctx[i].rx_near_full_grp_2_mask =
3360 						rx_near_full_grp_2_mask;
3361 		soc->intr_ctx[i].tx_ring_near_full_mask =
3362 						tx_ring_near_full_mask;
3363 		soc->intr_ctx[i].tx_mon_ring_mask = tx_mon_ring_mask;
3364 		soc->intr_ctx[i].host2txmon_ring_mask = host2txmon_ring_mask;
3365 		soc->intr_ctx[i].umac_reset_intr_mask = umac_reset_intr_mask;
3366 
3367 		soc->intr_ctx[i].soc = soc;
3368 
3369 		num_irq = 0;
3370 
3371 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
3372 					       &num_irq);
3373 
3374 		if (rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
3375 		    tx_ring_near_full_mask) {
3376 			dp_soc_near_full_interrupt_attach(soc, num_irq,
3377 							  irq_id_map, i);
3378 		} else {
3379 			napi_scale = wlan_cfg_get_napi_scale_factor(
3380 							    soc->wlan_cfg_ctx);
3381 			if (!napi_scale)
3382 				napi_scale = QCA_NAPI_DEF_SCALE_BIN_SHIFT;
3383 
3384 			ret = hif_register_ext_group(soc->hif_handle,
3385 				num_irq, irq_id_map, dp_service_srngs,
3386 				&soc->intr_ctx[i], "dp_intr",
3387 				HIF_EXEC_NAPI_TYPE, napi_scale);
3388 		}
3389 
3390 		dp_debug(" int ctx %u num_irq %u irq_id_map %u %u",
3391 			 i, num_irq, irq_id_map[0], irq_id_map[1]);
3392 
3393 		if (ret) {
3394 			dp_init_err("%pK: failed, ret = %d", soc, ret);
3395 			dp_soc_interrupt_detach(txrx_soc);
3396 			return QDF_STATUS_E_FAILURE;
3397 		}
3398 
3399 		hif_event_history_init(soc->hif_handle, i);
3400 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
3401 
3402 		if (rx_err_ring_mask)
3403 			rx_err_ring_intr_ctxt_id = i;
3404 
3405 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
3406 			soc->mon_intr_id_lmac_map[lmac_id] = i;
3407 			lmac_id++;
3408 		}
3409 	}
3410 
3411 	hif_configure_ext_group_interrupts(soc->hif_handle);
3412 	if (rx_err_ring_intr_ctxt_id != HIF_MAX_GROUP)
3413 		hif_config_irq_clear_cpu_affinity(soc->hif_handle,
3414 						  rx_err_ring_intr_ctxt_id, 0);
3415 
3416 	return QDF_STATUS_SUCCESS;
3417 }
3418 
3419 #define AVG_MAX_MPDUS_PER_TID 128
3420 #define AVG_TIDS_PER_CLIENT 2
3421 #define AVG_FLOWS_PER_TID 2
3422 #define AVG_MSDUS_PER_FLOW 128
3423 #define AVG_MSDUS_PER_MPDU 4
3424 
3425 /*
3426  * dp_hw_link_desc_pool_banks_free() - Free h/w link desc pool banks
3427  * @soc: DP SOC handle
3428  * @mac_id: mac id
3429  *
3430  * Return: none
3431  */
3432 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
3433 {
3434 	struct qdf_mem_multi_page_t *pages;
3435 
3436 	if (mac_id != WLAN_INVALID_PDEV_ID) {
3437 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3438 	} else {
3439 		pages = &soc->link_desc_pages;
3440 	}
3441 
3442 	if (!pages) {
3443 		dp_err("can not get link desc pages");
3444 		QDF_ASSERT(0);
3445 		return;
3446 	}
3447 
3448 	if (pages->dma_pages) {
3449 		wlan_minidump_remove((void *)
3450 				     pages->dma_pages->page_v_addr_start,
3451 				     pages->num_pages * pages->page_size,
3452 				     soc->ctrl_psoc,
3453 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3454 				     "hw_link_desc_bank");
3455 		dp_desc_multi_pages_mem_free(soc, DP_HW_LINK_DESC_TYPE,
3456 					     pages, 0, false);
3457 	}
3458 }
3459 
3460 qdf_export_symbol(dp_hw_link_desc_pool_banks_free);
3461 
3462 /*
3463  * dp_hw_link_desc_pool_banks_alloc() - Allocate h/w link desc pool banks
3464  * @soc: DP SOC handle
3465  * @mac_id: mac id
3466  *
3467  * Allocates memory pages for link descriptors, the page size is 4K for
3468  * MCL and 2MB for WIN. if the mac_id is invalid link descriptor pages are
3469  * allocated for regular RX/TX and if the there is a proper mac_id link
3470  * descriptors are allocated for RX monitor mode.
3471  *
3472  * Return: QDF_STATUS_SUCCESS: Success
3473  *	   QDF_STATUS_E_FAILURE: Failure
3474  */
3475 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
3476 {
3477 	hal_soc_handle_t hal_soc = soc->hal_soc;
3478 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
3479 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
3480 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
3481 	uint32_t num_mpdus_per_link_desc = hal_num_mpdus_per_link_desc(hal_soc);
3482 	uint32_t num_msdus_per_link_desc = hal_num_msdus_per_link_desc(hal_soc);
3483 	uint32_t num_mpdu_links_per_queue_desc =
3484 		hal_num_mpdu_links_per_queue_desc(hal_soc);
3485 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3486 	uint32_t *total_link_descs, total_mem_size;
3487 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
3488 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
3489 	uint32_t num_entries;
3490 	struct qdf_mem_multi_page_t *pages;
3491 	struct dp_srng *dp_srng;
3492 	uint8_t minidump_str[MINIDUMP_STR_SIZE];
3493 
3494 	/* Only Tx queue descriptors are allocated from common link descriptor
3495 	 * pool Rx queue descriptors are not included in this because (REO queue
3496 	 * extension descriptors) they are expected to be allocated contiguously
3497 	 * with REO queue descriptors
3498 	 */
3499 	if (mac_id != WLAN_INVALID_PDEV_ID) {
3500 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3501 		/* dp_monitor_get_link_desc_pages returns NULL only
3502 		 * if monitor SOC is  NULL
3503 		 */
3504 		if (!pages) {
3505 			dp_err("can not get link desc pages");
3506 			QDF_ASSERT(0);
3507 			return QDF_STATUS_E_FAULT;
3508 		}
3509 		dp_srng = &soc->rxdma_mon_desc_ring[mac_id];
3510 		num_entries = dp_srng->alloc_size /
3511 			hal_srng_get_entrysize(soc->hal_soc,
3512 					       RXDMA_MONITOR_DESC);
3513 		total_link_descs = dp_monitor_get_total_link_descs(soc, mac_id);
3514 		qdf_str_lcopy(minidump_str, "mon_link_desc_bank",
3515 			      MINIDUMP_STR_SIZE);
3516 	} else {
3517 		num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3518 			AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
3519 
3520 		num_mpdu_queue_descs = num_mpdu_link_descs /
3521 			num_mpdu_links_per_queue_desc;
3522 
3523 		num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3524 			AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
3525 			num_msdus_per_link_desc;
3526 
3527 		num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3528 			AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
3529 
3530 		num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
3531 			num_tx_msdu_link_descs + num_rx_msdu_link_descs;
3532 
3533 		pages = &soc->link_desc_pages;
3534 		total_link_descs = &soc->total_link_descs;
3535 		qdf_str_lcopy(minidump_str, "link_desc_bank",
3536 			      MINIDUMP_STR_SIZE);
3537 	}
3538 
3539 	/* If link descriptor banks are allocated, return from here */
3540 	if (pages->num_pages)
3541 		return QDF_STATUS_SUCCESS;
3542 
3543 	/* Round up to power of 2 */
3544 	*total_link_descs = 1;
3545 	while (*total_link_descs < num_entries)
3546 		*total_link_descs <<= 1;
3547 
3548 	dp_init_info("%pK: total_link_descs: %u, link_desc_size: %d",
3549 		     soc, *total_link_descs, link_desc_size);
3550 	total_mem_size =  *total_link_descs * link_desc_size;
3551 	total_mem_size += link_desc_align;
3552 
3553 	dp_init_info("%pK: total_mem_size: %d",
3554 		     soc, total_mem_size);
3555 
3556 	dp_set_max_page_size(pages, max_alloc_size);
3557 	dp_desc_multi_pages_mem_alloc(soc, DP_HW_LINK_DESC_TYPE,
3558 				      pages,
3559 				      link_desc_size,
3560 				      *total_link_descs,
3561 				      0, false);
3562 	if (!pages->num_pages) {
3563 		dp_err("Multi page alloc fail for hw link desc pool");
3564 		return QDF_STATUS_E_FAULT;
3565 	}
3566 
3567 	wlan_minidump_log(pages->dma_pages->page_v_addr_start,
3568 			  pages->num_pages * pages->page_size,
3569 			  soc->ctrl_psoc,
3570 			  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3571 			  "hw_link_desc_bank");
3572 
3573 	return QDF_STATUS_SUCCESS;
3574 }
3575 
3576 /*
3577  * dp_hw_link_desc_ring_free() - Free h/w link desc rings
3578  * @soc: DP SOC handle
3579  *
3580  * Return: none
3581  */
3582 static void dp_hw_link_desc_ring_free(struct dp_soc *soc)
3583 {
3584 	uint32_t i;
3585 	uint32_t size = soc->wbm_idle_scatter_buf_size;
3586 	void *vaddr = soc->wbm_idle_link_ring.base_vaddr_unaligned;
3587 	qdf_dma_addr_t paddr;
3588 
3589 	if (soc->wbm_idle_scatter_buf_base_vaddr[0]) {
3590 		for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3591 			vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3592 			paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3593 			if (vaddr) {
3594 				qdf_mem_free_consistent(soc->osdev,
3595 							soc->osdev->dev,
3596 							size,
3597 							vaddr,
3598 							paddr,
3599 							0);
3600 				vaddr = NULL;
3601 			}
3602 		}
3603 	} else {
3604 		wlan_minidump_remove(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3605 				     soc->wbm_idle_link_ring.alloc_size,
3606 				     soc->ctrl_psoc,
3607 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3608 				     "wbm_idle_link_ring");
3609 		dp_srng_free(soc, &soc->wbm_idle_link_ring);
3610 	}
3611 }
3612 
3613 /*
3614  * dp_hw_link_desc_ring_alloc() - Allocate hw link desc rings
3615  * @soc: DP SOC handle
3616  *
3617  * Allocate memory for WBM_IDLE_LINK srng ring if the number of
3618  * link descriptors is less then the max_allocated size. else
3619  * allocate memory for wbm_idle_scatter_buffer.
3620  *
3621  * Return: QDF_STATUS_SUCCESS: success
3622  *         QDF_STATUS_E_NO_MEM: No memory (Failure)
3623  */
3624 static QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc)
3625 {
3626 	uint32_t entry_size, i;
3627 	uint32_t total_mem_size;
3628 	qdf_dma_addr_t *baseaddr = NULL;
3629 	struct dp_srng *dp_srng;
3630 	uint32_t ring_type;
3631 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3632 	uint32_t tlds;
3633 
3634 	ring_type = WBM_IDLE_LINK;
3635 	dp_srng = &soc->wbm_idle_link_ring;
3636 	tlds = soc->total_link_descs;
3637 
3638 	entry_size = hal_srng_get_entrysize(soc->hal_soc, ring_type);
3639 	total_mem_size = entry_size * tlds;
3640 
3641 	if (total_mem_size <= max_alloc_size) {
3642 		if (dp_srng_alloc(soc, dp_srng, ring_type, tlds, 0)) {
3643 			dp_init_err("%pK: Link desc idle ring setup failed",
3644 				    soc);
3645 			goto fail;
3646 		}
3647 
3648 		wlan_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3649 				  soc->wbm_idle_link_ring.alloc_size,
3650 				  soc->ctrl_psoc,
3651 				  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3652 				  "wbm_idle_link_ring");
3653 	} else {
3654 		uint32_t num_scatter_bufs;
3655 		uint32_t num_entries_per_buf;
3656 		uint32_t buf_size = 0;
3657 
3658 		soc->wbm_idle_scatter_buf_size =
3659 			hal_idle_list_scatter_buf_size(soc->hal_soc);
3660 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
3661 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
3662 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
3663 					soc->hal_soc, total_mem_size,
3664 					soc->wbm_idle_scatter_buf_size);
3665 
3666 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
3667 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3668 				  FL("scatter bufs size out of bounds"));
3669 			goto fail;
3670 		}
3671 
3672 		for (i = 0; i < num_scatter_bufs; i++) {
3673 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
3674 			buf_size = soc->wbm_idle_scatter_buf_size;
3675 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
3676 				qdf_mem_alloc_consistent(soc->osdev,
3677 							 soc->osdev->dev,
3678 							 buf_size,
3679 							 baseaddr);
3680 
3681 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
3682 				QDF_TRACE(QDF_MODULE_ID_DP,
3683 					  QDF_TRACE_LEVEL_ERROR,
3684 					  FL("Scatter lst memory alloc fail"));
3685 				goto fail;
3686 			}
3687 		}
3688 		soc->num_scatter_bufs = num_scatter_bufs;
3689 	}
3690 	return QDF_STATUS_SUCCESS;
3691 
3692 fail:
3693 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3694 		void *vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3695 		qdf_dma_addr_t paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3696 
3697 		if (vaddr) {
3698 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
3699 						soc->wbm_idle_scatter_buf_size,
3700 						vaddr,
3701 						paddr, 0);
3702 			vaddr = NULL;
3703 		}
3704 	}
3705 	return QDF_STATUS_E_NOMEM;
3706 }
3707 
3708 qdf_export_symbol(dp_hw_link_desc_pool_banks_alloc);
3709 
3710 /*
3711  * dp_hw_link_desc_ring_init() - Initialize hw link desc rings
3712  * @soc: DP SOC handle
3713  *
3714  * Return: QDF_STATUS_SUCCESS: success
3715  *         QDF_STATUS_E_FAILURE: failure
3716  */
3717 static QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc)
3718 {
3719 	struct dp_srng *dp_srng = &soc->wbm_idle_link_ring;
3720 
3721 	if (dp_srng->base_vaddr_unaligned) {
3722 		if (dp_srng_init(soc, dp_srng, WBM_IDLE_LINK, 0, 0))
3723 			return QDF_STATUS_E_FAILURE;
3724 	}
3725 	return QDF_STATUS_SUCCESS;
3726 }
3727 
3728 /*
3729  * dp_hw_link_desc_ring_deinit() - Reset hw link desc rings
3730  * @soc: DP SOC handle
3731  *
3732  * Return: None
3733  */
3734 static void dp_hw_link_desc_ring_deinit(struct dp_soc *soc)
3735 {
3736 	dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0);
3737 }
3738 
3739 /*
3740  * dp_hw_link_desc_ring_replenish() - Replenish hw link desc rings
3741  * @soc: DP SOC handle
3742  * @mac_id: mac id
3743  *
3744  * Return: None
3745  */
3746 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id)
3747 {
3748 	uint32_t cookie = 0;
3749 	uint32_t page_idx = 0;
3750 	struct qdf_mem_multi_page_t *pages;
3751 	struct qdf_mem_dma_page_t *dma_pages;
3752 	uint32_t offset = 0;
3753 	uint32_t count = 0;
3754 	uint32_t desc_id = 0;
3755 	void *desc_srng;
3756 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
3757 	uint32_t *total_link_descs_addr;
3758 	uint32_t total_link_descs;
3759 	uint32_t scatter_buf_num;
3760 	uint32_t num_entries_per_buf = 0;
3761 	uint32_t rem_entries;
3762 	uint32_t num_descs_per_page;
3763 	uint32_t num_scatter_bufs = 0;
3764 	uint8_t *scatter_buf_ptr;
3765 	void *desc;
3766 
3767 	num_scatter_bufs = soc->num_scatter_bufs;
3768 
3769 	if (mac_id == WLAN_INVALID_PDEV_ID) {
3770 		pages = &soc->link_desc_pages;
3771 		total_link_descs = soc->total_link_descs;
3772 		desc_srng = soc->wbm_idle_link_ring.hal_srng;
3773 	} else {
3774 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3775 		/* dp_monitor_get_link_desc_pages returns NULL only
3776 		 * if monitor SOC is  NULL
3777 		 */
3778 		if (!pages) {
3779 			dp_err("can not get link desc pages");
3780 			QDF_ASSERT(0);
3781 			return;
3782 		}
3783 		total_link_descs_addr =
3784 				dp_monitor_get_total_link_descs(soc, mac_id);
3785 		total_link_descs = *total_link_descs_addr;
3786 		desc_srng = soc->rxdma_mon_desc_ring[mac_id].hal_srng;
3787 	}
3788 
3789 	dma_pages = pages->dma_pages;
3790 	do {
3791 		qdf_mem_zero(dma_pages[page_idx].page_v_addr_start,
3792 			     pages->page_size);
3793 		page_idx++;
3794 	} while (page_idx < pages->num_pages);
3795 
3796 	if (desc_srng) {
3797 		hal_srng_access_start_unlocked(soc->hal_soc, desc_srng);
3798 		page_idx = 0;
3799 		count = 0;
3800 		offset = 0;
3801 		pages = &soc->link_desc_pages;
3802 		while ((desc = hal_srng_src_get_next(soc->hal_soc,
3803 						     desc_srng)) &&
3804 			(count < total_link_descs)) {
3805 			page_idx = count / pages->num_element_per_page;
3806 			if (desc_id == pages->num_element_per_page)
3807 				desc_id = 0;
3808 
3809 			offset = count % pages->num_element_per_page;
3810 			cookie = LINK_DESC_COOKIE(desc_id, page_idx,
3811 						  soc->link_desc_id_start);
3812 
3813 			hal_set_link_desc_addr(soc->hal_soc, desc, cookie,
3814 					       dma_pages[page_idx].page_p_addr
3815 					       + (offset * link_desc_size),
3816 					       soc->idle_link_bm_id);
3817 			count++;
3818 			desc_id++;
3819 		}
3820 		hal_srng_access_end_unlocked(soc->hal_soc, desc_srng);
3821 	} else {
3822 		/* Populate idle list scatter buffers with link descriptor
3823 		 * pointers
3824 		 */
3825 		scatter_buf_num = 0;
3826 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
3827 					soc->hal_soc,
3828 					soc->wbm_idle_scatter_buf_size);
3829 
3830 		scatter_buf_ptr = (uint8_t *)(
3831 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
3832 		rem_entries = num_entries_per_buf;
3833 		pages = &soc->link_desc_pages;
3834 		page_idx = 0; count = 0;
3835 		offset = 0;
3836 		num_descs_per_page = pages->num_element_per_page;
3837 
3838 		while (count < total_link_descs) {
3839 			page_idx = count / num_descs_per_page;
3840 			offset = count % num_descs_per_page;
3841 			if (desc_id == pages->num_element_per_page)
3842 				desc_id = 0;
3843 
3844 			cookie = LINK_DESC_COOKIE(desc_id, page_idx,
3845 						  soc->link_desc_id_start);
3846 			hal_set_link_desc_addr(soc->hal_soc,
3847 					       (void *)scatter_buf_ptr,
3848 					       cookie,
3849 					       dma_pages[page_idx].page_p_addr +
3850 					       (offset * link_desc_size),
3851 					       soc->idle_link_bm_id);
3852 			rem_entries--;
3853 			if (rem_entries) {
3854 				scatter_buf_ptr += link_desc_size;
3855 			} else {
3856 				rem_entries = num_entries_per_buf;
3857 				scatter_buf_num++;
3858 				if (scatter_buf_num >= num_scatter_bufs)
3859 					break;
3860 				scatter_buf_ptr = (uint8_t *)
3861 					(soc->wbm_idle_scatter_buf_base_vaddr[
3862 					 scatter_buf_num]);
3863 			}
3864 			count++;
3865 			desc_id++;
3866 		}
3867 		/* Setup link descriptor idle list in HW */
3868 		hal_setup_link_idle_list(soc->hal_soc,
3869 			soc->wbm_idle_scatter_buf_base_paddr,
3870 			soc->wbm_idle_scatter_buf_base_vaddr,
3871 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
3872 			(uint32_t)(scatter_buf_ptr -
3873 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
3874 			scatter_buf_num-1])), total_link_descs);
3875 	}
3876 }
3877 
3878 qdf_export_symbol(dp_link_desc_ring_replenish);
3879 
3880 #ifdef IPA_OFFLOAD
3881 #define USE_1_IPA_RX_REO_RING 1
3882 #define USE_2_IPA_RX_REO_RINGS 2
3883 #define REO_DST_RING_SIZE_QCA6290 1023
3884 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
3885 #define REO_DST_RING_SIZE_QCA8074 1023
3886 #define REO_DST_RING_SIZE_QCN9000 2048
3887 #else
3888 #define REO_DST_RING_SIZE_QCA8074 8
3889 #define REO_DST_RING_SIZE_QCN9000 8
3890 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
3891 
3892 #ifdef IPA_WDI3_TX_TWO_PIPES
3893 #ifdef DP_MEMORY_OPT
3894 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3895 {
3896 	return dp_init_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3897 }
3898 
3899 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3900 {
3901 	dp_deinit_tx_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3902 }
3903 
3904 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3905 {
3906 	return dp_alloc_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3907 }
3908 
3909 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3910 {
3911 	dp_free_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3912 }
3913 
3914 #else /* !DP_MEMORY_OPT */
3915 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3916 {
3917 	return 0;
3918 }
3919 
3920 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3921 {
3922 }
3923 
3924 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3925 {
3926 	return 0
3927 }
3928 
3929 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3930 {
3931 }
3932 #endif /* DP_MEMORY_OPT */
3933 
3934 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
3935 {
3936 	hal_tx_init_data_ring(soc->hal_soc,
3937 			      soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng);
3938 }
3939 
3940 #else /* !IPA_WDI3_TX_TWO_PIPES */
3941 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3942 {
3943 	return 0;
3944 }
3945 
3946 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3947 {
3948 }
3949 
3950 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3951 {
3952 	return 0;
3953 }
3954 
3955 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3956 {
3957 }
3958 
3959 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
3960 {
3961 }
3962 
3963 #endif /* IPA_WDI3_TX_TWO_PIPES */
3964 
3965 #else
3966 
3967 #define REO_DST_RING_SIZE_QCA6290 1024
3968 
3969 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3970 {
3971 	return 0;
3972 }
3973 
3974 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3975 {
3976 }
3977 
3978 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3979 {
3980 	return 0;
3981 }
3982 
3983 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3984 {
3985 }
3986 
3987 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
3988 {
3989 }
3990 
3991 #endif /* IPA_OFFLOAD */
3992 
3993 /*
3994  * dp_soc_reset_ring_map() - Reset cpu ring map
3995  * @soc: Datapath soc handler
3996  *
3997  * This api resets the default cpu ring map
3998  */
3999 
4000 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
4001 {
4002 	uint8_t i;
4003 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4004 
4005 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
4006 		switch (nss_config) {
4007 		case dp_nss_cfg_first_radio:
4008 			/*
4009 			 * Setting Tx ring map for one nss offloaded radio
4010 			 */
4011 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
4012 			break;
4013 
4014 		case dp_nss_cfg_second_radio:
4015 			/*
4016 			 * Setting Tx ring for two nss offloaded radios
4017 			 */
4018 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
4019 			break;
4020 
4021 		case dp_nss_cfg_dbdc:
4022 			/*
4023 			 * Setting Tx ring map for 2 nss offloaded radios
4024 			 */
4025 			soc->tx_ring_map[i] =
4026 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
4027 			break;
4028 
4029 		case dp_nss_cfg_dbtc:
4030 			/*
4031 			 * Setting Tx ring map for 3 nss offloaded radios
4032 			 */
4033 			soc->tx_ring_map[i] =
4034 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
4035 			break;
4036 
4037 		default:
4038 			dp_err("tx_ring_map failed due to invalid nss cfg");
4039 			break;
4040 		}
4041 	}
4042 }
4043 
4044 /*
4045  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
4046  * @dp_soc - DP soc handle
4047  * @ring_type - ring type
4048  * @ring_num - ring_num
4049  *
4050  * return 0 or 1
4051  */
4052 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
4053 {
4054 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4055 	uint8_t status = 0;
4056 
4057 	switch (ring_type) {
4058 	case WBM2SW_RELEASE:
4059 	case REO_DST:
4060 	case RXDMA_BUF:
4061 	case REO_EXCEPTION:
4062 		status = ((nss_config) & (1 << ring_num));
4063 		break;
4064 	default:
4065 		break;
4066 	}
4067 
4068 	return status;
4069 }
4070 
4071 /*
4072  * dp_soc_disable_unused_mac_intr_mask() - reset interrupt mask for
4073  *					  unused WMAC hw rings
4074  * @dp_soc - DP Soc handle
4075  * @mac_num - wmac num
4076  *
4077  * Return: Return void
4078  */
4079 static void dp_soc_disable_unused_mac_intr_mask(struct dp_soc *soc,
4080 						int mac_num)
4081 {
4082 	uint8_t *grp_mask = NULL;
4083 	int group_number;
4084 
4085 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
4086 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4087 	wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4088 					  group_number, 0x0);
4089 
4090 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
4091 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4092 	wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
4093 				      group_number, 0x0);
4094 
4095 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
4096 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4097 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
4098 					  group_number, 0x0);
4099 
4100 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
4101 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
4102 	wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
4103 					      group_number, 0x0);
4104 }
4105 
4106 /*
4107  * dp_soc_reset_intr_mask() - reset interrupt mask
4108  * @dp_soc - DP Soc handle
4109  *
4110  * Return: Return void
4111  */
4112 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
4113 {
4114 	uint8_t j;
4115 	uint8_t *grp_mask = NULL;
4116 	int group_number, mask, num_ring;
4117 
4118 	/* number of tx ring */
4119 	num_ring = soc->num_tcl_data_rings;
4120 
4121 	/*
4122 	 * group mask for tx completion  ring.
4123 	 */
4124 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
4125 
4126 	/* loop and reset the mask for only offloaded ring */
4127 	for (j = 0; j < WLAN_CFG_NUM_TCL_DATA_RINGS; j++) {
4128 		/*
4129 		 * Group number corresponding to tx offloaded ring.
4130 		 */
4131 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4132 		if (group_number < 0) {
4133 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4134 				      soc, WBM2SW_RELEASE, j);
4135 			continue;
4136 		}
4137 
4138 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
4139 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j) &&
4140 		    (!mask)) {
4141 			continue;
4142 		}
4143 
4144 		/* reset the tx mask for offloaded ring */
4145 		mask &= (~(1 << j));
4146 
4147 		/*
4148 		 * reset the interrupt mask for offloaded ring.
4149 		 */
4150 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4151 	}
4152 
4153 	/* number of rx rings */
4154 	num_ring = soc->num_reo_dest_rings;
4155 
4156 	/*
4157 	 * group mask for reo destination ring.
4158 	 */
4159 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
4160 
4161 	/* loop and reset the mask for only offloaded ring */
4162 	for (j = 0; j < WLAN_CFG_NUM_REO_DEST_RING; j++) {
4163 		/*
4164 		 * Group number corresponding to rx offloaded ring.
4165 		 */
4166 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4167 		if (group_number < 0) {
4168 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4169 				      soc, REO_DST, j);
4170 			continue;
4171 		}
4172 
4173 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
4174 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j) &&
4175 		    (!mask)) {
4176 			continue;
4177 		}
4178 
4179 		/* reset the interrupt mask for offloaded ring */
4180 		mask &= (~(1 << j));
4181 
4182 		/*
4183 		 * set the interrupt mask to zero for rx offloaded radio.
4184 		 */
4185 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4186 	}
4187 
4188 	/*
4189 	 * group mask for Rx buffer refill ring
4190 	 */
4191 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
4192 
4193 	/* loop and reset the mask for only offloaded ring */
4194 	for (j = 0; j < MAX_PDEV_CNT; j++) {
4195 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
4196 
4197 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
4198 			continue;
4199 		}
4200 
4201 		/*
4202 		 * Group number corresponding to rx offloaded ring.
4203 		 */
4204 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
4205 		if (group_number < 0) {
4206 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4207 				      soc, REO_DST, lmac_id);
4208 			continue;
4209 		}
4210 
4211 		/* set the interrupt mask for offloaded ring */
4212 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4213 				group_number);
4214 		mask &= (~(1 << lmac_id));
4215 
4216 		/*
4217 		 * set the interrupt mask to zero for rx offloaded radio.
4218 		 */
4219 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4220 			group_number, mask);
4221 	}
4222 
4223 	grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
4224 
4225 	for (j = 0; j < num_ring; j++) {
4226 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_EXCEPTION, j)) {
4227 			continue;
4228 		}
4229 
4230 		/*
4231 		 * Group number corresponding to rx err ring.
4232 		 */
4233 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4234 		if (group_number < 0) {
4235 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4236 				      soc, REO_EXCEPTION, j);
4237 			continue;
4238 		}
4239 
4240 		wlan_cfg_set_rx_err_ring_mask(soc->wlan_cfg_ctx,
4241 					      group_number, 0);
4242 	}
4243 }
4244 
4245 #ifdef IPA_OFFLOAD
4246 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
4247 			 uint32_t *remap1, uint32_t *remap2)
4248 {
4249 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX] = {
4250 				REO_REMAP_SW1, REO_REMAP_SW2, REO_REMAP_SW3,
4251 				REO_REMAP_SW5, REO_REMAP_SW6, REO_REMAP_SW7};
4252 
4253 	switch (soc->arch_id) {
4254 	case CDP_ARCH_TYPE_BE:
4255 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4256 					      soc->num_reo_dest_rings -
4257 					      USE_2_IPA_RX_REO_RINGS, remap1,
4258 					      remap2);
4259 		break;
4260 
4261 	case CDP_ARCH_TYPE_LI:
4262 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4263 					      soc->num_reo_dest_rings -
4264 					      USE_1_IPA_RX_REO_RING, remap1,
4265 					      remap2);
4266 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
4267 		break;
4268 	default:
4269 		dp_err("unkonwn arch_id 0x%x", soc->arch_id);
4270 		QDF_BUG(0);
4271 
4272 	}
4273 
4274 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
4275 
4276 	return true;
4277 }
4278 
4279 #ifdef IPA_WDI3_TX_TWO_PIPES
4280 static bool dp_ipa_is_alt_tx_ring(int index)
4281 {
4282 	return index == IPA_TX_ALT_RING_IDX;
4283 }
4284 
4285 static bool dp_ipa_is_alt_tx_comp_ring(int index)
4286 {
4287 	return index == IPA_TX_ALT_COMP_RING_IDX;
4288 }
4289 #else /* !IPA_WDI3_TX_TWO_PIPES */
4290 static bool dp_ipa_is_alt_tx_ring(int index)
4291 {
4292 	return false;
4293 }
4294 
4295 static bool dp_ipa_is_alt_tx_comp_ring(int index)
4296 {
4297 	return false;
4298 }
4299 #endif /* IPA_WDI3_TX_TWO_PIPES */
4300 
4301 /**
4302  * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA
4303  *
4304  * @tx_ring_num: Tx ring number
4305  * @tx_ipa_ring_sz: Return param only updated for IPA.
4306  * @soc_cfg_ctx: dp soc cfg context
4307  *
4308  * Return: None
4309  */
4310 static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz,
4311 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4312 {
4313 	if (!soc_cfg_ctx->ipa_enabled)
4314 		return;
4315 
4316 	if (tx_ring_num == IPA_TCL_DATA_RING_IDX)
4317 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_ring_size(soc_cfg_ctx);
4318 	else if (dp_ipa_is_alt_tx_ring(tx_ring_num))
4319 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_alt_ring_size(soc_cfg_ctx);
4320 }
4321 
4322 /**
4323  * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA
4324  *
4325  * @tx_comp_ring_num: Tx comp ring number
4326  * @tx_comp_ipa_ring_sz: Return param only updated for IPA.
4327  * @soc_cfg_ctx: dp soc cfg context
4328  *
4329  * Return: None
4330  */
4331 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
4332 					 int *tx_comp_ipa_ring_sz,
4333 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4334 {
4335 	if (!soc_cfg_ctx->ipa_enabled)
4336 		return;
4337 
4338 	if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX)
4339 		*tx_comp_ipa_ring_sz =
4340 				wlan_cfg_ipa_tx_comp_ring_size(soc_cfg_ctx);
4341 	else if (dp_ipa_is_alt_tx_comp_ring(tx_comp_ring_num))
4342 		*tx_comp_ipa_ring_sz =
4343 				wlan_cfg_ipa_tx_alt_comp_ring_size(soc_cfg_ctx);
4344 }
4345 #else
4346 static uint8_t dp_reo_ring_selection(uint32_t value, uint32_t *ring)
4347 {
4348 	uint8_t num = 0;
4349 
4350 	switch (value) {
4351 	/* should we have all the different possible ring configs */
4352 	case 0xFF:
4353 		num = 8;
4354 		ring[0] = REO_REMAP_SW1;
4355 		ring[1] = REO_REMAP_SW2;
4356 		ring[2] = REO_REMAP_SW3;
4357 		ring[3] = REO_REMAP_SW4;
4358 		ring[4] = REO_REMAP_SW5;
4359 		ring[5] = REO_REMAP_SW6;
4360 		ring[6] = REO_REMAP_SW7;
4361 		ring[7] = REO_REMAP_SW8;
4362 		break;
4363 
4364 	case 0x3F:
4365 		num = 6;
4366 		ring[0] = REO_REMAP_SW1;
4367 		ring[1] = REO_REMAP_SW2;
4368 		ring[2] = REO_REMAP_SW3;
4369 		ring[3] = REO_REMAP_SW4;
4370 		ring[4] = REO_REMAP_SW5;
4371 		ring[5] = REO_REMAP_SW6;
4372 		break;
4373 
4374 	case 0xF:
4375 		num = 4;
4376 		ring[0] = REO_REMAP_SW1;
4377 		ring[1] = REO_REMAP_SW2;
4378 		ring[2] = REO_REMAP_SW3;
4379 		ring[3] = REO_REMAP_SW4;
4380 		break;
4381 	case 0xE:
4382 		num = 3;
4383 		ring[0] = REO_REMAP_SW2;
4384 		ring[1] = REO_REMAP_SW3;
4385 		ring[2] = REO_REMAP_SW4;
4386 		break;
4387 	case 0xD:
4388 		num = 3;
4389 		ring[0] = REO_REMAP_SW1;
4390 		ring[1] = REO_REMAP_SW3;
4391 		ring[2] = REO_REMAP_SW4;
4392 		break;
4393 	case 0xC:
4394 		num = 2;
4395 		ring[0] = REO_REMAP_SW3;
4396 		ring[1] = REO_REMAP_SW4;
4397 		break;
4398 	case 0xB:
4399 		num = 3;
4400 		ring[0] = REO_REMAP_SW1;
4401 		ring[1] = REO_REMAP_SW2;
4402 		ring[2] = REO_REMAP_SW4;
4403 		break;
4404 	case 0xA:
4405 		num = 2;
4406 		ring[0] = REO_REMAP_SW2;
4407 		ring[1] = REO_REMAP_SW4;
4408 		break;
4409 	case 0x9:
4410 		num = 2;
4411 		ring[0] = REO_REMAP_SW1;
4412 		ring[1] = REO_REMAP_SW4;
4413 		break;
4414 	case 0x8:
4415 		num = 1;
4416 		ring[0] = REO_REMAP_SW4;
4417 		break;
4418 	case 0x7:
4419 		num = 3;
4420 		ring[0] = REO_REMAP_SW1;
4421 		ring[1] = REO_REMAP_SW2;
4422 		ring[2] = REO_REMAP_SW3;
4423 		break;
4424 	case 0x6:
4425 		num = 2;
4426 		ring[0] = REO_REMAP_SW2;
4427 		ring[1] = REO_REMAP_SW3;
4428 		break;
4429 	case 0x5:
4430 		num = 2;
4431 		ring[0] = REO_REMAP_SW1;
4432 		ring[1] = REO_REMAP_SW3;
4433 		break;
4434 	case 0x4:
4435 		num = 1;
4436 		ring[0] = REO_REMAP_SW3;
4437 		break;
4438 	case 0x3:
4439 		num = 2;
4440 		ring[0] = REO_REMAP_SW1;
4441 		ring[1] = REO_REMAP_SW2;
4442 		break;
4443 	case 0x2:
4444 		num = 1;
4445 		ring[0] = REO_REMAP_SW2;
4446 		break;
4447 	case 0x1:
4448 		num = 1;
4449 		ring[0] = REO_REMAP_SW1;
4450 		break;
4451 	default:
4452 		dp_err("unkonwn reo ring map 0x%x", value);
4453 		QDF_BUG(0);
4454 	}
4455 	return num;
4456 }
4457 
4458 bool dp_reo_remap_config(struct dp_soc *soc,
4459 			 uint32_t *remap0,
4460 			 uint32_t *remap1,
4461 			 uint32_t *remap2)
4462 {
4463 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4464 	uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
4465 	uint8_t target_type, num;
4466 	uint32_t ring[WLAN_CFG_NUM_REO_DEST_RING_MAX];
4467 	uint32_t value;
4468 
4469 	target_type = hal_get_target_type(soc->hal_soc);
4470 
4471 	switch (offload_radio) {
4472 	case dp_nss_cfg_default:
4473 		value = reo_config & WLAN_CFG_NUM_REO_RINGS_MAP_MAX;
4474 		num = dp_reo_ring_selection(value, ring);
4475 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4476 					      num, remap1, remap2);
4477 		hal_compute_reo_remap_ix0(soc->hal_soc, remap0);
4478 
4479 		break;
4480 	case dp_nss_cfg_first_radio:
4481 		value = reo_config & 0xE;
4482 		num = dp_reo_ring_selection(value, ring);
4483 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4484 					      num, remap1, remap2);
4485 
4486 		break;
4487 	case dp_nss_cfg_second_radio:
4488 		value = reo_config & 0xD;
4489 		num = dp_reo_ring_selection(value, ring);
4490 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4491 					      num, remap1, remap2);
4492 
4493 		break;
4494 	case dp_nss_cfg_dbdc:
4495 	case dp_nss_cfg_dbtc:
4496 		/* return false if both or all are offloaded to NSS */
4497 		return false;
4498 
4499 	}
4500 
4501 	dp_debug("remap1 %x remap2 %x offload_radio %u",
4502 		 *remap1, *remap2, offload_radio);
4503 	return true;
4504 }
4505 
4506 static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz,
4507 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4508 {
4509 }
4510 
4511 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
4512 					 int *tx_comp_ipa_ring_sz,
4513 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4514 {
4515 }
4516 #endif /* IPA_OFFLOAD */
4517 
4518 /*
4519  * dp_reo_frag_dst_set() - configure reo register to set the
4520  *                        fragment destination ring
4521  * @soc : Datapath soc
4522  * @frag_dst_ring : output parameter to set fragment destination ring
4523  *
4524  * Based on offload_radio below fragment destination rings is selected
4525  * 0 - TCL
4526  * 1 - SW1
4527  * 2 - SW2
4528  * 3 - SW3
4529  * 4 - SW4
4530  * 5 - Release
4531  * 6 - FW
4532  * 7 - alternate select
4533  *
4534  * return: void
4535  */
4536 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
4537 {
4538 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4539 
4540 	switch (offload_radio) {
4541 	case dp_nss_cfg_default:
4542 		*frag_dst_ring = REO_REMAP_TCL;
4543 		break;
4544 	case dp_nss_cfg_first_radio:
4545 		/*
4546 		 * This configuration is valid for single band radio which
4547 		 * is also NSS offload.
4548 		 */
4549 	case dp_nss_cfg_dbdc:
4550 	case dp_nss_cfg_dbtc:
4551 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
4552 		break;
4553 	default:
4554 		dp_init_err("%pK: dp_reo_frag_dst_set invalid offload radio config", soc);
4555 		break;
4556 	}
4557 }
4558 
4559 #ifdef ENABLE_VERBOSE_DEBUG
4560 static void dp_enable_verbose_debug(struct dp_soc *soc)
4561 {
4562 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4563 
4564 	soc_cfg_ctx = soc->wlan_cfg_ctx;
4565 
4566 	if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask)
4567 		is_dp_verbose_debug_enabled = true;
4568 
4569 	if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask)
4570 		hal_set_verbose_debug(true);
4571 	else
4572 		hal_set_verbose_debug(false);
4573 }
4574 #else
4575 static void dp_enable_verbose_debug(struct dp_soc *soc)
4576 {
4577 }
4578 #endif
4579 
4580 #ifdef WLAN_FEATURE_STATS_EXT
4581 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
4582 {
4583 	qdf_event_create(&soc->rx_hw_stats_event);
4584 }
4585 #else
4586 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
4587 {
4588 }
4589 #endif
4590 
4591 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index)
4592 {
4593 	int tcl_ring_num, wbm_ring_num;
4594 
4595 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
4596 						index,
4597 						&tcl_ring_num,
4598 						&wbm_ring_num);
4599 
4600 	if (tcl_ring_num == -1) {
4601 		dp_err("incorrect tcl ring num for index %u", index);
4602 		return;
4603 	}
4604 
4605 	wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned,
4606 			     soc->tcl_data_ring[index].alloc_size,
4607 			     soc->ctrl_psoc,
4608 			     WLAN_MD_DP_SRNG_TCL_DATA,
4609 			     "tcl_data_ring");
4610 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
4611 	dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA,
4612 		       tcl_ring_num);
4613 
4614 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
4615 		return;
4616 
4617 	wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned,
4618 			     soc->tx_comp_ring[index].alloc_size,
4619 			     soc->ctrl_psoc,
4620 			     WLAN_MD_DP_SRNG_TX_COMP,
4621 			     "tcl_comp_ring");
4622 	dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4623 		       wbm_ring_num);
4624 }
4625 
4626 /**
4627  * dp_init_tx_ring_pair_by_index() - The function inits tcl data/wbm completion
4628  * ring pair
4629  * @soc: DP soc pointer
4630  * @index: index of soc->tcl_data or soc->tx_comp to initialize
4631  *
4632  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
4633  */
4634 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
4635 						uint8_t index)
4636 {
4637 	int tcl_ring_num, wbm_ring_num;
4638 	uint8_t bm_id;
4639 
4640 	if (index >= MAX_TCL_DATA_RINGS) {
4641 		dp_err("unexpected index!");
4642 		QDF_BUG(0);
4643 		goto fail1;
4644 	}
4645 
4646 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
4647 						index,
4648 						&tcl_ring_num,
4649 						&wbm_ring_num);
4650 
4651 	if (tcl_ring_num == -1) {
4652 		dp_err("incorrect tcl ring num for index %u", index);
4653 		goto fail1;
4654 	}
4655 
4656 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
4657 	if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA,
4658 			 tcl_ring_num, 0)) {
4659 		dp_err("dp_srng_init failed for tcl_data_ring");
4660 		goto fail1;
4661 	}
4662 	wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned,
4663 			  soc->tcl_data_ring[index].alloc_size,
4664 			  soc->ctrl_psoc,
4665 			  WLAN_MD_DP_SRNG_TCL_DATA,
4666 			  "tcl_data_ring");
4667 
4668 	if (wbm_ring_num == INVALID_WBM_RING_NUM)
4669 		goto set_rbm;
4670 
4671 	if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4672 			 wbm_ring_num, 0)) {
4673 		dp_err("dp_srng_init failed for tx_comp_ring");
4674 		goto fail1;
4675 	}
4676 
4677 	wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned,
4678 			  soc->tx_comp_ring[index].alloc_size,
4679 			  soc->ctrl_psoc,
4680 			  WLAN_MD_DP_SRNG_TX_COMP,
4681 			  "tcl_comp_ring");
4682 set_rbm:
4683 	bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_ring_num);
4684 
4685 	soc->arch_ops.tx_implicit_rbm_set(soc, tcl_ring_num, bm_id);
4686 
4687 	return QDF_STATUS_SUCCESS;
4688 
4689 fail1:
4690 	return QDF_STATUS_E_FAILURE;
4691 }
4692 
4693 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index)
4694 {
4695 	dp_debug("index %u", index);
4696 	dp_srng_free(soc, &soc->tcl_data_ring[index]);
4697 	dp_srng_free(soc, &soc->tx_comp_ring[index]);
4698 }
4699 
4700 /**
4701  * dp_alloc_tx_ring_pair_by_index() - The function allocs tcl data/wbm2sw
4702  * ring pair for the given "index"
4703  * @soc: DP soc pointer
4704  * @index: index of soc->tcl_data or soc->tx_comp to initialize
4705  *
4706  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
4707  */
4708 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
4709 						 uint8_t index)
4710 {
4711 	int tx_ring_size;
4712 	int tx_comp_ring_size;
4713 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
4714 	int cached = 0;
4715 
4716 	if (index >= MAX_TCL_DATA_RINGS) {
4717 		dp_err("unexpected index!");
4718 		QDF_BUG(0);
4719 		goto fail1;
4720 	}
4721 
4722 	dp_debug("index %u", index);
4723 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
4724 	dp_ipa_get_tx_ring_size(index, &tx_ring_size, soc_cfg_ctx);
4725 
4726 	if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA,
4727 			  tx_ring_size, cached)) {
4728 		dp_err("dp_srng_alloc failed for tcl_data_ring");
4729 		goto fail1;
4730 	}
4731 
4732 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
4733 	dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size, soc_cfg_ctx);
4734 	/* Enable cached TCL desc if NSS offload is disabled */
4735 	if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
4736 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
4737 
4738 	if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) ==
4739 	    INVALID_WBM_RING_NUM)
4740 		return QDF_STATUS_SUCCESS;
4741 
4742 	if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4743 			  tx_comp_ring_size, cached)) {
4744 		dp_err("dp_srng_alloc failed for tx_comp_ring");
4745 		goto fail1;
4746 	}
4747 
4748 	return QDF_STATUS_SUCCESS;
4749 
4750 fail1:
4751 	return QDF_STATUS_E_FAILURE;
4752 }
4753 
4754 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
4755 {
4756 	struct cdp_lro_hash_config lro_hash;
4757 	QDF_STATUS status;
4758 
4759 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
4760 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
4761 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
4762 		dp_err("LRO, GRO and RX hash disabled");
4763 		return QDF_STATUS_E_FAILURE;
4764 	}
4765 
4766 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
4767 
4768 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
4769 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
4770 		lro_hash.lro_enable = 1;
4771 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
4772 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
4773 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
4774 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
4775 	}
4776 
4777 	soc->arch_ops.get_rx_hash_key(soc, &lro_hash);
4778 
4779 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
4780 
4781 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
4782 		QDF_BUG(0);
4783 		dp_err("lro_hash_config not configured");
4784 		return QDF_STATUS_E_FAILURE;
4785 	}
4786 
4787 	status = soc->cdp_soc.ol_ops->lro_hash_config(soc->ctrl_psoc,
4788 						      pdev->pdev_id,
4789 						      &lro_hash);
4790 	if (!QDF_IS_STATUS_SUCCESS(status)) {
4791 		dp_err("failed to send lro_hash_config to FW %u", status);
4792 		return status;
4793 	}
4794 
4795 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
4796 		lro_hash.lro_enable, lro_hash.tcp_flag,
4797 		lro_hash.tcp_flag_mask);
4798 
4799 	dp_info("toeplitz_hash_ipv4:");
4800 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4801 			   lro_hash.toeplitz_hash_ipv4,
4802 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
4803 			   LRO_IPV4_SEED_ARR_SZ));
4804 
4805 	dp_info("toeplitz_hash_ipv6:");
4806 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4807 			   lro_hash.toeplitz_hash_ipv6,
4808 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
4809 			   LRO_IPV6_SEED_ARR_SZ));
4810 
4811 	return status;
4812 }
4813 
4814 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
4815 /*
4816  * dp_reap_timer_init() - initialize the reap timer
4817  * @soc: data path SoC handle
4818  *
4819  * Return: void
4820  */
4821 static void dp_reap_timer_init(struct dp_soc *soc)
4822 {
4823 	/*
4824 	 * Timer to reap rxdma status rings.
4825 	 * Needed until we enable ppdu end interrupts
4826 	 */
4827 	dp_monitor_reap_timer_init(soc);
4828 	dp_monitor_vdev_timer_init(soc);
4829 }
4830 
4831 /*
4832  * dp_reap_timer_deinit() - de-initialize the reap timer
4833  * @soc: data path SoC handle
4834  *
4835  * Return: void
4836  */
4837 static void dp_reap_timer_deinit(struct dp_soc *soc)
4838 {
4839 	dp_monitor_reap_timer_deinit(soc);
4840 }
4841 #else
4842 /* WIN use case */
4843 static void dp_reap_timer_init(struct dp_soc *soc)
4844 {
4845 	/* Configure LMAC rings in Polled mode */
4846 	if (soc->lmac_polled_mode) {
4847 		/*
4848 		 * Timer to reap lmac rings.
4849 		 */
4850 		qdf_timer_init(soc->osdev, &soc->lmac_reap_timer,
4851 			       dp_service_lmac_rings, (void *)soc,
4852 			       QDF_TIMER_TYPE_WAKE_APPS);
4853 		soc->lmac_timer_init = 1;
4854 		qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
4855 	}
4856 }
4857 
4858 static void dp_reap_timer_deinit(struct dp_soc *soc)
4859 {
4860 	if (soc->lmac_timer_init) {
4861 		qdf_timer_stop(&soc->lmac_reap_timer);
4862 		qdf_timer_free(&soc->lmac_reap_timer);
4863 		soc->lmac_timer_init = 0;
4864 	}
4865 }
4866 #endif
4867 
4868 #ifdef QCA_HOST2FW_RXBUF_RING
4869 /*
4870  * dp_rxdma_ring_alloc() - allocate the RXDMA rings
4871  * @soc: data path SoC handle
4872  * @pdev: Physical device handle
4873  *
4874  * Return: 0 - success, > 0 - failure
4875  */
4876 static int dp_rxdma_ring_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
4877 {
4878 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
4879 	int max_mac_rings;
4880 	int i;
4881 	int ring_size;
4882 
4883 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
4884 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
4885 	ring_size =  wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx);
4886 
4887 	for (i = 0; i < max_mac_rings; i++) {
4888 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
4889 		if (dp_srng_alloc(soc, &pdev->rx_mac_buf_ring[i],
4890 				  RXDMA_BUF, ring_size, 0)) {
4891 			dp_init_err("%pK: failed rx mac ring setup", soc);
4892 			return QDF_STATUS_E_FAILURE;
4893 		}
4894 	}
4895 	return QDF_STATUS_SUCCESS;
4896 }
4897 
4898 /*
4899  * dp_rxdma_ring_setup() - configure the RXDMA rings
4900  * @soc: data path SoC handle
4901  * @pdev: Physical device handle
4902  *
4903  * Return: 0 - success, > 0 - failure
4904  */
4905 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
4906 {
4907 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
4908 	int max_mac_rings;
4909 	int i;
4910 
4911 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
4912 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
4913 
4914 	for (i = 0; i < max_mac_rings; i++) {
4915 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
4916 		if (dp_srng_init(soc, &pdev->rx_mac_buf_ring[i],
4917 				 RXDMA_BUF, 1, i)) {
4918 			dp_init_err("%pK: failed rx mac ring setup", soc);
4919 			return QDF_STATUS_E_FAILURE;
4920 		}
4921 	}
4922 	return QDF_STATUS_SUCCESS;
4923 }
4924 
4925 /*
4926  * dp_rxdma_ring_cleanup() - Deinit the RXDMA rings and reap timer
4927  * @soc: data path SoC handle
4928  * @pdev: Physical device handle
4929  *
4930  * Return: void
4931  */
4932 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
4933 {
4934 	int i;
4935 
4936 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
4937 		dp_srng_deinit(soc, &pdev->rx_mac_buf_ring[i], RXDMA_BUF, 1);
4938 
4939 	dp_reap_timer_deinit(soc);
4940 }
4941 
4942 /*
4943  * dp_rxdma_ring_free() - Free the RXDMA rings
4944  * @pdev: Physical device handle
4945  *
4946  * Return: void
4947  */
4948 static void dp_rxdma_ring_free(struct dp_pdev *pdev)
4949 {
4950 	int i;
4951 
4952 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
4953 		dp_srng_free(pdev->soc, &pdev->rx_mac_buf_ring[i]);
4954 }
4955 
4956 #else
4957 static int dp_rxdma_ring_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
4958 {
4959 	return QDF_STATUS_SUCCESS;
4960 }
4961 
4962 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
4963 {
4964 	return QDF_STATUS_SUCCESS;
4965 }
4966 
4967 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
4968 {
4969 	dp_reap_timer_deinit(soc);
4970 }
4971 
4972 static void dp_rxdma_ring_free(struct dp_pdev *pdev)
4973 {
4974 }
4975 #endif
4976 
4977 /**
4978  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
4979  * @pdev - DP_PDEV handle
4980  *
4981  * Return: void
4982  */
4983 static inline void
4984 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
4985 {
4986 	uint8_t map_id;
4987 	struct dp_soc *soc = pdev->soc;
4988 
4989 	if (!soc)
4990 		return;
4991 
4992 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
4993 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
4994 			     default_dscp_tid_map,
4995 			     sizeof(default_dscp_tid_map));
4996 	}
4997 
4998 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
4999 		hal_tx_set_dscp_tid_map(soc->hal_soc,
5000 					default_dscp_tid_map,
5001 					map_id);
5002 	}
5003 }
5004 
5005 /**
5006  * dp_pcp_tid_map_setup(): Initialize the pcp-tid maps
5007  * @pdev - DP_PDEV handle
5008  *
5009  * Return: void
5010  */
5011 static inline void
5012 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
5013 {
5014 	struct dp_soc *soc = pdev->soc;
5015 
5016 	if (!soc)
5017 		return;
5018 
5019 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
5020 		     sizeof(default_pcp_tid_map));
5021 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
5022 }
5023 
5024 #ifdef IPA_OFFLOAD
5025 /**
5026  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
5027  * @soc: data path instance
5028  * @pdev: core txrx pdev context
5029  *
5030  * Return: QDF_STATUS_SUCCESS: success
5031  *         QDF_STATUS_E_RESOURCES: Error return
5032  */
5033 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5034 					   struct dp_pdev *pdev)
5035 {
5036 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5037 	int entries;
5038 
5039 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5040 		soc_cfg_ctx = soc->wlan_cfg_ctx;
5041 		entries =
5042 			wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
5043 
5044 		/* Setup second Rx refill buffer ring */
5045 		if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
5046 				  entries, 0)) {
5047 			dp_init_err("%pK: dp_srng_alloc failed second"
5048 				    "rx refill ring", soc);
5049 			return QDF_STATUS_E_FAILURE;
5050 		}
5051 	}
5052 
5053 	return QDF_STATUS_SUCCESS;
5054 }
5055 
5056 /**
5057  * dp_init_ipa_rx_refill_buf_ring - Init second Rx refill buffer ring
5058  * @soc: data path instance
5059  * @pdev: core txrx pdev context
5060  *
5061  * Return: QDF_STATUS_SUCCESS: success
5062  *         QDF_STATUS_E_RESOURCES: Error return
5063  */
5064 static int dp_init_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5065 					  struct dp_pdev *pdev)
5066 {
5067 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5068 		if (dp_srng_init(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
5069 				 IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id)) {
5070 			dp_init_err("%pK: dp_srng_init failed second"
5071 				    "rx refill ring", soc);
5072 			return QDF_STATUS_E_FAILURE;
5073 		}
5074 	}
5075 	return QDF_STATUS_SUCCESS;
5076 }
5077 
5078 /**
5079  * dp_deinit_ipa_rx_refill_buf_ring - deinit second Rx refill buffer ring
5080  * @soc: data path instance
5081  * @pdev: core txrx pdev context
5082  *
5083  * Return: void
5084  */
5085 static void dp_deinit_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5086 					     struct dp_pdev *pdev)
5087 {
5088 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
5089 		dp_srng_deinit(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 0);
5090 }
5091 
5092 /**
5093  * dp_free_ipa_rx_refill_buf_ring - free second Rx refill buffer ring
5094  * @soc: data path instance
5095  * @pdev: core txrx pdev context
5096  *
5097  * Return: void
5098  */
5099 static void dp_free_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5100 					   struct dp_pdev *pdev)
5101 {
5102 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
5103 		dp_srng_free(soc, &pdev->rx_refill_buf_ring2);
5104 }
5105 #else
5106 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5107 					   struct dp_pdev *pdev)
5108 {
5109 	return QDF_STATUS_SUCCESS;
5110 }
5111 
5112 static int dp_init_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5113 					  struct dp_pdev *pdev)
5114 {
5115 	return QDF_STATUS_SUCCESS;
5116 }
5117 
5118 static void dp_deinit_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5119 					     struct dp_pdev *pdev)
5120 {
5121 }
5122 
5123 static void dp_free_ipa_rx_refill_buf_ring(struct dp_soc *soc,
5124 					   struct dp_pdev *pdev)
5125 {
5126 }
5127 #endif
5128 
5129 #ifdef DP_TX_HW_DESC_HISTORY
5130 /**
5131  * dp_soc_tx_hw_desc_history_attach - Attach TX HW descriptor history
5132  *
5133  * @soc: DP soc handle
5134  *
5135  * Return: None
5136  */
5137 static void dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
5138 {
5139 	soc->tx_hw_desc_history = dp_context_alloc_mem(
5140 			soc, DP_TX_HW_DESC_HIST_TYPE,
5141 			sizeof(*soc->tx_hw_desc_history));
5142 	if (soc->tx_hw_desc_history)
5143 		soc->tx_hw_desc_history->index = 0;
5144 }
5145 
5146 static void dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
5147 {
5148 	dp_context_free_mem(soc, DP_TX_HW_DESC_HIST_TYPE,
5149 			    soc->tx_hw_desc_history);
5150 }
5151 
5152 #else /* DP_TX_HW_DESC_HISTORY */
5153 static inline void
5154 dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
5155 {
5156 }
5157 
5158 static inline void
5159 dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
5160 {
5161 }
5162 #endif /* DP_TX_HW_DESC_HISTORY */
5163 
5164 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
5165 #ifndef RX_DEFRAG_DO_NOT_REINJECT
5166 /**
5167  * dp_soc_rx_reinject_ring_history_attach - Attach the reo reinject ring
5168  *					    history.
5169  * @soc: DP soc handle
5170  *
5171  * Return: None
5172  */
5173 static void dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
5174 {
5175 	soc->rx_reinject_ring_history =
5176 		dp_context_alloc_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
5177 				     sizeof(struct dp_rx_reinject_history));
5178 	if (soc->rx_reinject_ring_history)
5179 		qdf_atomic_init(&soc->rx_reinject_ring_history->index);
5180 }
5181 #else /* RX_DEFRAG_DO_NOT_REINJECT */
5182 static inline void
5183 dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
5184 {
5185 }
5186 #endif /* RX_DEFRAG_DO_NOT_REINJECT */
5187 
5188 /**
5189  * dp_soc_rx_history_attach() - Attach the ring history record buffers
5190  * @soc: DP soc structure
5191  *
5192  * This function allocates the memory for recording the rx ring, rx error
5193  * ring and the reinject ring entries. There is no error returned in case
5194  * of allocation failure since the record function checks if the history is
5195  * initialized or not. We do not want to fail the driver load in case of
5196  * failure to allocate memory for debug history.
5197  *
5198  * Returns: None
5199  */
5200 static void dp_soc_rx_history_attach(struct dp_soc *soc)
5201 {
5202 	int i;
5203 	uint32_t rx_ring_hist_size;
5204 	uint32_t rx_refill_ring_hist_size;
5205 
5206 	rx_ring_hist_size = sizeof(*soc->rx_ring_history[0]);
5207 	rx_refill_ring_hist_size = sizeof(*soc->rx_refill_ring_history[0]);
5208 
5209 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5210 		soc->rx_ring_history[i] = dp_context_alloc_mem(
5211 				soc, DP_RX_RING_HIST_TYPE, rx_ring_hist_size);
5212 		if (soc->rx_ring_history[i])
5213 			qdf_atomic_init(&soc->rx_ring_history[i]->index);
5214 	}
5215 
5216 	soc->rx_err_ring_history = dp_context_alloc_mem(
5217 			soc, DP_RX_ERR_RING_HIST_TYPE, rx_ring_hist_size);
5218 	if (soc->rx_err_ring_history)
5219 		qdf_atomic_init(&soc->rx_err_ring_history->index);
5220 
5221 	dp_soc_rx_reinject_ring_history_attach(soc);
5222 
5223 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5224 		soc->rx_refill_ring_history[i] = dp_context_alloc_mem(
5225 						soc,
5226 						DP_RX_REFILL_RING_HIST_TYPE,
5227 						rx_refill_ring_hist_size);
5228 
5229 		if (soc->rx_refill_ring_history[i])
5230 			qdf_atomic_init(&soc->rx_refill_ring_history[i]->index);
5231 	}
5232 }
5233 
5234 static void dp_soc_rx_history_detach(struct dp_soc *soc)
5235 {
5236 	int i;
5237 
5238 	for (i = 0; i < MAX_REO_DEST_RINGS; i++)
5239 		dp_context_free_mem(soc, DP_RX_RING_HIST_TYPE,
5240 				    soc->rx_ring_history[i]);
5241 
5242 	dp_context_free_mem(soc, DP_RX_ERR_RING_HIST_TYPE,
5243 			    soc->rx_err_ring_history);
5244 
5245 	/*
5246 	 * No need for a featurized detach since qdf_mem_free takes
5247 	 * care of NULL pointer.
5248 	 */
5249 	dp_context_free_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
5250 			    soc->rx_reinject_ring_history);
5251 
5252 	for (i = 0; i < MAX_PDEV_CNT; i++)
5253 		dp_context_free_mem(soc, DP_RX_REFILL_RING_HIST_TYPE,
5254 				    soc->rx_refill_ring_history[i]);
5255 }
5256 
5257 #else
5258 static inline void dp_soc_rx_history_attach(struct dp_soc *soc)
5259 {
5260 }
5261 
5262 static inline void dp_soc_rx_history_detach(struct dp_soc *soc)
5263 {
5264 }
5265 #endif
5266 
5267 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY
5268 /**
5269  * dp_soc_mon_status_ring_history_attach() - Attach the monitor status
5270  *					     buffer record history.
5271  * @soc: DP soc handle
5272  *
5273  * This function allocates memory to track the event for a monitor
5274  * status buffer, before its parsed and freed.
5275  *
5276  * Return: None
5277  */
5278 static void dp_soc_mon_status_ring_history_attach(struct dp_soc *soc)
5279 {
5280 	soc->mon_status_ring_history = dp_context_alloc_mem(soc,
5281 				DP_MON_STATUS_BUF_HIST_TYPE,
5282 				sizeof(struct dp_mon_status_ring_history));
5283 	if (!soc->mon_status_ring_history) {
5284 		dp_err("Failed to alloc memory for mon status ring history");
5285 		return;
5286 	}
5287 }
5288 
5289 /**
5290  * dp_soc_mon_status_ring_history_detach() - Detach the monitor status buffer
5291  *					     record history.
5292  * @soc: DP soc handle
5293  *
5294  * Return: None
5295  */
5296 static void dp_soc_mon_status_ring_history_detach(struct dp_soc *soc)
5297 {
5298 	dp_context_free_mem(soc, DP_MON_STATUS_BUF_HIST_TYPE,
5299 			    soc->mon_status_ring_history);
5300 }
5301 #else
5302 static void dp_soc_mon_status_ring_history_attach(struct dp_soc *soc)
5303 {
5304 }
5305 
5306 static void dp_soc_mon_status_ring_history_detach(struct dp_soc *soc)
5307 {
5308 }
5309 #endif
5310 
5311 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
5312 /**
5313  * dp_soc_tx_history_attach() - Attach the ring history record buffers
5314  * @soc: DP soc structure
5315  *
5316  * This function allocates the memory for recording the tx tcl ring and
5317  * the tx comp ring entries. There is no error returned in case
5318  * of allocation failure since the record function checks if the history is
5319  * initialized or not. We do not want to fail the driver load in case of
5320  * failure to allocate memory for debug history.
5321  *
5322  * Returns: None
5323  */
5324 static void dp_soc_tx_history_attach(struct dp_soc *soc)
5325 {
5326 	uint32_t tx_tcl_hist_size;
5327 	uint32_t tx_comp_hist_size;
5328 
5329 	tx_tcl_hist_size = sizeof(*soc->tx_tcl_history);
5330 	soc->tx_tcl_history = dp_context_alloc_mem(soc, DP_TX_TCL_HIST_TYPE,
5331 						   tx_tcl_hist_size);
5332 	if (soc->tx_tcl_history)
5333 		qdf_atomic_init(&soc->tx_tcl_history->index);
5334 
5335 	tx_comp_hist_size = sizeof(*soc->tx_comp_history);
5336 	soc->tx_comp_history = dp_context_alloc_mem(soc, DP_TX_COMP_HIST_TYPE,
5337 						    tx_comp_hist_size);
5338 	if (soc->tx_comp_history)
5339 		qdf_atomic_init(&soc->tx_comp_history->index);
5340 }
5341 
5342 /**
5343  * dp_soc_tx_history_detach() - Detach the ring history record buffers
5344  * @soc: DP soc structure
5345  *
5346  * This function frees the memory for recording the tx tcl ring and
5347  * the tx comp ring entries.
5348  *
5349  * Returns: None
5350  */
5351 static void dp_soc_tx_history_detach(struct dp_soc *soc)
5352 {
5353 	dp_context_free_mem(soc, DP_TX_TCL_HIST_TYPE, soc->tx_tcl_history);
5354 	dp_context_free_mem(soc, DP_TX_COMP_HIST_TYPE, soc->tx_comp_history);
5355 }
5356 
5357 #else
5358 static inline void dp_soc_tx_history_attach(struct dp_soc *soc)
5359 {
5360 }
5361 
5362 static inline void dp_soc_tx_history_detach(struct dp_soc *soc)
5363 {
5364 }
5365 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
5366 
5367 /*
5368 * dp_pdev_attach_wifi3() - attach txrx pdev
5369 * @txrx_soc: Datapath SOC handle
5370 * @params: Params for PDEV attach
5371 *
5372 * Return: QDF_STATUS
5373 */
5374 static inline
5375 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
5376 				struct cdp_pdev_attach_params *params)
5377 {
5378 	qdf_size_t pdev_context_size;
5379 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5380 	struct dp_pdev *pdev = NULL;
5381 	uint8_t pdev_id = params->pdev_id;
5382 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5383 	int nss_cfg;
5384 
5385 	pdev_context_size =
5386 		soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_PDEV);
5387 	if (pdev_context_size)
5388 		pdev = dp_context_alloc_mem(soc, DP_PDEV_TYPE, pdev_context_size);
5389 
5390 	if (!pdev) {
5391 		dp_init_err("%pK: DP PDEV memory allocation failed",
5392 			    soc);
5393 		goto fail0;
5394 	}
5395 	wlan_minidump_log(pdev, sizeof(*pdev), soc->ctrl_psoc,
5396 			  WLAN_MD_DP_PDEV, "dp_pdev");
5397 
5398 	soc_cfg_ctx = soc->wlan_cfg_ctx;
5399 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
5400 
5401 	if (!pdev->wlan_cfg_ctx) {
5402 		dp_init_err("%pK: pdev cfg_attach failed", soc);
5403 		goto fail1;
5404 	}
5405 
5406 	/*
5407 	 * set nss pdev config based on soc config
5408 	 */
5409 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
5410 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
5411 					 (nss_cfg & (1 << pdev_id)));
5412 
5413 	pdev->soc = soc;
5414 	pdev->pdev_id = pdev_id;
5415 	soc->pdev_list[pdev_id] = pdev;
5416 
5417 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
5418 	soc->pdev_count++;
5419 
5420 	/* Allocate memory for pdev srng rings */
5421 	if (dp_pdev_srng_alloc(pdev)) {
5422 		dp_init_err("%pK: dp_pdev_srng_alloc failed", soc);
5423 		goto fail2;
5424 	}
5425 
5426 	/* Setup second Rx refill buffer ring */
5427 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev)) {
5428 		dp_init_err("%pK: dp_srng_alloc failed rxrefill2 ring",
5429 			    soc);
5430 		goto fail3;
5431 	}
5432 
5433 	/* Allocate memory for pdev rxdma rings */
5434 	if (dp_rxdma_ring_alloc(soc, pdev)) {
5435 		dp_init_err("%pK: dp_rxdma_ring_alloc failed", soc);
5436 		goto fail4;
5437 	}
5438 
5439 	/* Rx specific init */
5440 	if (dp_rx_pdev_desc_pool_alloc(pdev)) {
5441 		dp_init_err("%pK: dp_rx_pdev_attach failed", soc);
5442 		goto fail4;
5443 	}
5444 
5445 	if (dp_monitor_pdev_attach(pdev)) {
5446 		dp_init_err("%pK: dp_monitor_pdev_attach failed", soc);
5447 		goto fail5;
5448 	}
5449 
5450 	soc->arch_ops.txrx_pdev_attach(pdev, params);
5451 
5452 	return QDF_STATUS_SUCCESS;
5453 fail5:
5454 	dp_rx_pdev_desc_pool_free(pdev);
5455 fail4:
5456 	dp_rxdma_ring_free(pdev);
5457 	dp_free_ipa_rx_refill_buf_ring(soc, pdev);
5458 fail3:
5459 	dp_pdev_srng_free(pdev);
5460 fail2:
5461 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
5462 fail1:
5463 	soc->pdev_list[pdev_id] = NULL;
5464 	qdf_mem_free(pdev);
5465 fail0:
5466 	return QDF_STATUS_E_FAILURE;
5467 }
5468 
5469 /**
5470  * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev
5471  * @pdev: Datapath PDEV handle
5472  *
5473  * This is the last chance to flush all pending dp vdevs/peers,
5474  * some peer/vdev leak case like Non-SSR + peer unmap missing
5475  * will be covered here.
5476  *
5477  * Return: None
5478  */
5479 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
5480 {
5481 	struct dp_soc *soc = pdev->soc;
5482 	struct dp_vdev *vdev_arr[MAX_VDEV_CNT] = {0};
5483 	uint32_t i = 0;
5484 	uint32_t num_vdevs = 0;
5485 	struct dp_vdev *vdev = NULL;
5486 
5487 	if (TAILQ_EMPTY(&soc->inactive_vdev_list))
5488 		return;
5489 
5490 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
5491 	TAILQ_FOREACH(vdev, &soc->inactive_vdev_list,
5492 		      inactive_list_elem) {
5493 		if (vdev->pdev != pdev)
5494 			continue;
5495 
5496 		vdev_arr[num_vdevs] = vdev;
5497 		num_vdevs++;
5498 		/* take reference to free */
5499 		dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CDP);
5500 	}
5501 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
5502 
5503 	for (i = 0; i < num_vdevs; i++) {
5504 		dp_vdev_flush_peers((struct cdp_vdev *)vdev_arr[i], 0);
5505 		dp_vdev_unref_delete(soc, vdev_arr[i], DP_MOD_ID_CDP);
5506 	}
5507 }
5508 
5509 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
5510 /**
5511  * dp_vdev_stats_hw_offload_target_config() - Send HTT command to FW
5512  *                                          for enable/disable of HW vdev stats
5513  * @soc: Datapath soc handle
5514  * @pdev_id: INVALID_PDEV_ID for all pdevs or 0,1,2 for individual pdev
5515  * @enable: flag to reprsent enable/disable of hw vdev stats
5516  *
5517  * Return: none
5518  */
5519 static void dp_vdev_stats_hw_offload_target_config(struct dp_soc *soc,
5520 						   uint8_t pdev_id,
5521 						   bool enable)
5522 {
5523 	/* Check SOC level config for HW offload vdev stats support */
5524 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
5525 		dp_debug("%pK: HW vdev offload stats is disabled", soc);
5526 		return;
5527 	}
5528 
5529 	/* Send HTT command to FW for enable of stats */
5530 	dp_h2t_hw_vdev_stats_config_send(soc, pdev_id, enable, false, 0);
5531 }
5532 
5533 /**
5534  * dp_vdev_stats_hw_offload_target_clear() - Clear HW vdev stats on target
5535  * @soc: Datapath soc handle
5536  * @pdev_id: pdev_id (0,1,2)
5537  * @bitmask: bitmask with vdev_id(s) for which stats are to be cleared on HW
5538  *
5539  * Return: none
5540  */
5541 static
5542 void dp_vdev_stats_hw_offload_target_clear(struct dp_soc *soc, uint8_t pdev_id,
5543 					   uint64_t vdev_id_bitmask)
5544 {
5545 	/* Check SOC level config for HW offload vdev stats support */
5546 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
5547 		dp_debug("%pK: HW vdev offload stats is disabled", soc);
5548 		return;
5549 	}
5550 
5551 	/* Send HTT command to FW for reset of stats */
5552 	dp_h2t_hw_vdev_stats_config_send(soc, pdev_id, true, true,
5553 					 vdev_id_bitmask);
5554 }
5555 #else
5556 static void
5557 dp_vdev_stats_hw_offload_target_config(struct dp_soc *soc, uint8_t pdev_id,
5558 				       bool enable)
5559 {
5560 }
5561 
5562 static
5563 void dp_vdev_stats_hw_offload_target_clear(struct dp_soc *soc, uint8_t pdev_id,
5564 					   uint64_t vdev_id_bitmask)
5565 {
5566 }
5567 #endif /*QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT */
5568 
5569 /**
5570  * dp_pdev_deinit() - Deinit txrx pdev
5571  * @txrx_pdev: Datapath PDEV handle
5572  * @force: Force deinit
5573  *
5574  * Return: None
5575  */
5576 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
5577 {
5578 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
5579 	qdf_nbuf_t curr_nbuf, next_nbuf;
5580 
5581 	if (pdev->pdev_deinit)
5582 		return;
5583 
5584 	dp_tx_me_exit(pdev);
5585 	dp_rx_fst_detach(pdev->soc, pdev);
5586 	dp_rx_pdev_buffers_free(pdev);
5587 	dp_rx_pdev_desc_pool_deinit(pdev);
5588 	dp_pdev_bkp_stats_detach(pdev);
5589 	qdf_event_destroy(&pdev->fw_peer_stats_event);
5590 	if (pdev->sojourn_buf)
5591 		qdf_nbuf_free(pdev->sojourn_buf);
5592 
5593 	dp_pdev_flush_pending_vdevs(pdev);
5594 	dp_tx_desc_flush(pdev, NULL, true);
5595 
5596 	qdf_spinlock_destroy(&pdev->tx_mutex);
5597 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
5598 
5599 	dp_monitor_pdev_deinit(pdev);
5600 
5601 	dp_pdev_srng_deinit(pdev);
5602 
5603 	dp_ipa_uc_detach(pdev->soc, pdev);
5604 	dp_deinit_ipa_rx_refill_buf_ring(pdev->soc, pdev);
5605 	dp_rxdma_ring_cleanup(pdev->soc, pdev);
5606 
5607 	curr_nbuf = pdev->invalid_peer_head_msdu;
5608 	while (curr_nbuf) {
5609 		next_nbuf = qdf_nbuf_next(curr_nbuf);
5610 		dp_rx_nbuf_free(curr_nbuf);
5611 		curr_nbuf = next_nbuf;
5612 	}
5613 	pdev->invalid_peer_head_msdu = NULL;
5614 	pdev->invalid_peer_tail_msdu = NULL;
5615 
5616 	dp_wdi_event_detach(pdev);
5617 	pdev->pdev_deinit = 1;
5618 }
5619 
5620 /**
5621  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
5622  * @psoc: Datapath psoc handle
5623  * @pdev_id: Id of datapath PDEV handle
5624  * @force: Force deinit
5625  *
5626  * Return: QDF_STATUS
5627  */
5628 static QDF_STATUS
5629 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
5630 		     int force)
5631 {
5632 	struct dp_pdev *txrx_pdev;
5633 
5634 	txrx_pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
5635 						       pdev_id);
5636 
5637 	if (!txrx_pdev)
5638 		return QDF_STATUS_E_FAILURE;
5639 
5640 	dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force);
5641 
5642 	return QDF_STATUS_SUCCESS;
5643 }
5644 
5645 /*
5646  * dp_pdev_post_attach() - Do post pdev attach after dev_alloc_name
5647  * @txrx_pdev: Datapath PDEV handle
5648  *
5649  * Return: None
5650  */
5651 static void dp_pdev_post_attach(struct cdp_pdev *txrx_pdev)
5652 {
5653 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
5654 
5655 	dp_monitor_tx_capture_debugfs_init(pdev);
5656 
5657 	if (dp_pdev_htt_stats_dbgfs_init(pdev)) {
5658 		dp_init_err("%pK: Failed to initialize pdev HTT stats debugfs", pdev->soc);
5659 	}
5660 }
5661 
5662 /*
5663  * dp_pdev_post_attach_wifi3() - attach txrx pdev post
5664  * @psoc: Datapath soc handle
5665  * @pdev_id: pdev id of pdev
5666  *
5667  * Return: QDF_STATUS
5668  */
5669 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *soc,
5670 				     uint8_t pdev_id)
5671 {
5672 	struct dp_pdev *pdev;
5673 
5674 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
5675 						  pdev_id);
5676 
5677 	if (!pdev) {
5678 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
5679 			    (struct dp_soc *)soc, pdev_id);
5680 		return QDF_STATUS_E_FAILURE;
5681 	}
5682 
5683 	dp_pdev_post_attach((struct cdp_pdev *)pdev);
5684 	return QDF_STATUS_SUCCESS;
5685 }
5686 
5687 /*
5688  * dp_pdev_detach() - Complete rest of pdev detach
5689  * @txrx_pdev: Datapath PDEV handle
5690  * @force: Force deinit
5691  *
5692  * Return: None
5693  */
5694 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
5695 {
5696 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
5697 	struct dp_soc *soc = pdev->soc;
5698 
5699 	dp_pdev_htt_stats_dbgfs_deinit(pdev);
5700 	dp_rx_pdev_desc_pool_free(pdev);
5701 	dp_monitor_pdev_detach(pdev);
5702 	dp_rxdma_ring_free(pdev);
5703 	dp_free_ipa_rx_refill_buf_ring(soc, pdev);
5704 	dp_pdev_srng_free(pdev);
5705 
5706 	soc->pdev_count--;
5707 	soc->pdev_list[pdev->pdev_id] = NULL;
5708 
5709 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
5710 	wlan_minidump_remove(pdev, sizeof(*pdev), soc->ctrl_psoc,
5711 			     WLAN_MD_DP_PDEV, "dp_pdev");
5712 	dp_context_free_mem(soc, DP_PDEV_TYPE, pdev);
5713 }
5714 
5715 /*
5716  * dp_pdev_detach_wifi3() - detach txrx pdev
5717  * @psoc: Datapath soc handle
5718  * @pdev_id: pdev id of pdev
5719  * @force: Force detach
5720  *
5721  * Return: QDF_STATUS
5722  */
5723 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
5724 				       int force)
5725 {
5726 	struct dp_pdev *pdev;
5727 	struct dp_soc *soc = (struct dp_soc *)psoc;
5728 
5729 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
5730 						  pdev_id);
5731 
5732 	if (!pdev) {
5733 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
5734 			    (struct dp_soc *)psoc, pdev_id);
5735 		return QDF_STATUS_E_FAILURE;
5736 	}
5737 
5738 	soc->arch_ops.txrx_pdev_detach(pdev);
5739 
5740 	dp_pdev_detach((struct cdp_pdev *)pdev, force);
5741 	return QDF_STATUS_SUCCESS;
5742 }
5743 
5744 /*
5745  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
5746  * @soc: DP SOC handle
5747  */
5748 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
5749 {
5750 	struct reo_desc_list_node *desc;
5751 	struct dp_rx_tid *rx_tid;
5752 
5753 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
5754 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
5755 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
5756 		rx_tid = &desc->rx_tid;
5757 		qdf_mem_unmap_nbytes_single(soc->osdev,
5758 			rx_tid->hw_qdesc_paddr,
5759 			QDF_DMA_BIDIRECTIONAL,
5760 			rx_tid->hw_qdesc_alloc_size);
5761 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
5762 		qdf_mem_free(desc);
5763 	}
5764 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
5765 	qdf_list_destroy(&soc->reo_desc_freelist);
5766 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
5767 }
5768 
5769 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
5770 /*
5771  * dp_reo_desc_deferred_freelist_create() - Initialize the resources used
5772  *                                          for deferred reo desc list
5773  * @psoc: Datapath soc handle
5774  *
5775  * Return: void
5776  */
5777 static void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
5778 {
5779 	qdf_spinlock_create(&soc->reo_desc_deferred_freelist_lock);
5780 	qdf_list_create(&soc->reo_desc_deferred_freelist,
5781 			REO_DESC_DEFERRED_FREELIST_SIZE);
5782 	soc->reo_desc_deferred_freelist_init = true;
5783 }
5784 
5785 /*
5786  * dp_reo_desc_deferred_freelist_destroy() - loop the deferred free list &
5787  *                                           free the leftover REO QDESCs
5788  * @psoc: Datapath soc handle
5789  *
5790  * Return: void
5791  */
5792 static void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
5793 {
5794 	struct reo_desc_deferred_freelist_node *desc;
5795 
5796 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
5797 	soc->reo_desc_deferred_freelist_init = false;
5798 	while (qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
5799 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
5800 		qdf_mem_unmap_nbytes_single(soc->osdev,
5801 					    desc->hw_qdesc_paddr,
5802 					    QDF_DMA_BIDIRECTIONAL,
5803 					    desc->hw_qdesc_alloc_size);
5804 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
5805 		qdf_mem_free(desc);
5806 	}
5807 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
5808 
5809 	qdf_list_destroy(&soc->reo_desc_deferred_freelist);
5810 	qdf_spinlock_destroy(&soc->reo_desc_deferred_freelist_lock);
5811 }
5812 #else
5813 static inline void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
5814 {
5815 }
5816 
5817 static inline void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
5818 {
5819 }
5820 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
5821 
5822 /*
5823  * dp_soc_reset_txrx_ring_map() - reset tx ring map
5824  * @soc: DP SOC handle
5825  *
5826  */
5827 static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc)
5828 {
5829 	uint32_t i;
5830 
5831 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++)
5832 		soc->tx_ring_map[i] = 0;
5833 }
5834 
5835 /*
5836  * dp_soc_print_inactive_objects() - prints inactive peer and vdev list
5837  * @soc: DP SOC handle
5838  *
5839  */
5840 static void dp_soc_print_inactive_objects(struct dp_soc *soc)
5841 {
5842 	struct dp_peer *peer = NULL;
5843 	struct dp_peer *tmp_peer = NULL;
5844 	struct dp_vdev *vdev = NULL;
5845 	struct dp_vdev *tmp_vdev = NULL;
5846 	int i = 0;
5847 	uint32_t count;
5848 
5849 	if (TAILQ_EMPTY(&soc->inactive_peer_list) &&
5850 	    TAILQ_EMPTY(&soc->inactive_vdev_list))
5851 		return;
5852 
5853 	TAILQ_FOREACH_SAFE(peer, &soc->inactive_peer_list,
5854 			   inactive_list_elem, tmp_peer) {
5855 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
5856 			count = qdf_atomic_read(&peer->mod_refs[i]);
5857 			if (count)
5858 				DP_PRINT_STATS("peer %pK Module id %u ==> %u",
5859 					       peer, i, count);
5860 		}
5861 	}
5862 
5863 	TAILQ_FOREACH_SAFE(vdev, &soc->inactive_vdev_list,
5864 			   inactive_list_elem, tmp_vdev) {
5865 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
5866 			count = qdf_atomic_read(&vdev->mod_refs[i]);
5867 			if (count)
5868 				DP_PRINT_STATS("vdev %pK Module id %u ==> %u",
5869 					       vdev, i, count);
5870 		}
5871 	}
5872 	QDF_BUG(0);
5873 }
5874 
5875 /**
5876  * dp_soc_deinit() - Deinitialize txrx SOC
5877  * @txrx_soc: Opaque DP SOC handle
5878  *
5879  * Return: None
5880  */
5881 static void dp_soc_deinit(void *txrx_soc)
5882 {
5883 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5884 	struct htt_soc *htt_soc = soc->htt_handle;
5885 	struct dp_mon_ops *mon_ops;
5886 
5887 	qdf_atomic_set(&soc->cmn_init_done, 0);
5888 
5889 	soc->arch_ops.txrx_soc_deinit(soc);
5890 
5891 	mon_ops = dp_mon_ops_get(soc);
5892 	if (mon_ops && mon_ops->mon_soc_deinit)
5893 		mon_ops->mon_soc_deinit(soc);
5894 
5895 	/* free peer tables & AST tables allocated during peer_map_attach */
5896 	if (soc->peer_map_attach_success) {
5897 		dp_peer_find_detach(soc);
5898 		soc->arch_ops.txrx_peer_map_detach(soc);
5899 		soc->peer_map_attach_success = FALSE;
5900 	}
5901 
5902 	qdf_flush_work(&soc->htt_stats.work);
5903 	qdf_disable_work(&soc->htt_stats.work);
5904 
5905 	qdf_spinlock_destroy(&soc->htt_stats.lock);
5906 
5907 	dp_soc_reset_txrx_ring_map(soc);
5908 
5909 	dp_reo_desc_freelist_destroy(soc);
5910 	dp_reo_desc_deferred_freelist_destroy(soc);
5911 
5912 	DEINIT_RX_HW_STATS_LOCK(soc);
5913 
5914 	qdf_spinlock_destroy(&soc->ast_lock);
5915 
5916 	dp_peer_mec_spinlock_destroy(soc);
5917 
5918 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
5919 
5920 	qdf_nbuf_queue_free(&soc->invalid_buf_queue);
5921 
5922 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
5923 
5924 	qdf_spinlock_destroy(&soc->vdev_map_lock);
5925 
5926 	dp_reo_cmdlist_destroy(soc);
5927 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
5928 
5929 	dp_soc_tx_desc_sw_pools_deinit(soc);
5930 
5931 	dp_soc_srng_deinit(soc);
5932 
5933 	dp_hw_link_desc_ring_deinit(soc);
5934 
5935 	dp_soc_print_inactive_objects(soc);
5936 	qdf_spinlock_destroy(&soc->inactive_peer_list_lock);
5937 	qdf_spinlock_destroy(&soc->inactive_vdev_list_lock);
5938 
5939 	htt_soc_htc_dealloc(soc->htt_handle);
5940 
5941 	htt_soc_detach(htt_soc);
5942 
5943 	/* Free wbm sg list and reset flags in down path */
5944 	dp_rx_wbm_sg_list_deinit(soc);
5945 
5946 	wlan_minidump_remove(soc, sizeof(*soc), soc->ctrl_psoc,
5947 			     WLAN_MD_DP_SOC, "dp_soc");
5948 }
5949 
5950 /**
5951  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
5952  * @txrx_soc: Opaque DP SOC handle
5953  *
5954  * Return: None
5955  */
5956 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc)
5957 {
5958 	dp_soc_deinit(txrx_soc);
5959 }
5960 
5961 /*
5962  * dp_soc_detach() - Detach rest of txrx SOC
5963  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
5964  *
5965  * Return: None
5966  */
5967 static void dp_soc_detach(struct cdp_soc_t *txrx_soc)
5968 {
5969 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5970 
5971 	soc->arch_ops.txrx_soc_detach(soc);
5972 
5973 	dp_runtime_deinit();
5974 
5975 	dp_sysfs_deinitialize_stats(soc);
5976 	dp_soc_swlm_detach(soc);
5977 	dp_soc_tx_desc_sw_pools_free(soc);
5978 	dp_soc_srng_free(soc);
5979 	dp_hw_link_desc_ring_free(soc);
5980 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
5981 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
5982 	dp_soc_tx_hw_desc_history_detach(soc);
5983 	dp_soc_tx_history_detach(soc);
5984 	dp_soc_mon_status_ring_history_detach(soc);
5985 	dp_soc_rx_history_detach(soc);
5986 
5987 	if (!dp_monitor_modularized_enable()) {
5988 		dp_mon_soc_detach_wrapper(soc);
5989 	}
5990 
5991 	qdf_mem_free(soc->cdp_soc.ops);
5992 	qdf_mem_free(soc);
5993 }
5994 
5995 /*
5996  * dp_soc_detach_wifi3() - Detach txrx SOC
5997  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
5998  *
5999  * Return: None
6000  */
6001 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc)
6002 {
6003 	dp_soc_detach(txrx_soc);
6004 }
6005 
6006 /*
6007  * dp_rxdma_ring_config() - configure the RX DMA rings
6008  *
6009  * This function is used to configure the MAC rings.
6010  * On MCL host provides buffers in Host2FW ring
6011  * FW refills (copies) buffers to the ring and updates
6012  * ring_idx in register
6013  *
6014  * @soc: data path SoC handle
6015  *
6016  * Return: zero on success, non-zero on failure
6017  */
6018 #ifdef QCA_HOST2FW_RXBUF_RING
6019 static inline void
6020 dp_htt_setup_rxdma_err_dst_ring(struct dp_soc *soc, int mac_id,
6021 				int lmac_id)
6022 {
6023 	if (soc->rxdma_err_dst_ring[lmac_id].hal_srng)
6024 		htt_srng_setup(soc->htt_handle, mac_id,
6025 			       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
6026 			       RXDMA_DST);
6027 }
6028 
6029 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
6030 {
6031 	int i;
6032 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6033 
6034 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6035 		struct dp_pdev *pdev = soc->pdev_list[i];
6036 
6037 		if (pdev) {
6038 			int mac_id;
6039 			int max_mac_rings =
6040 				 wlan_cfg_get_num_mac_rings
6041 				(pdev->wlan_cfg_ctx);
6042 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
6043 
6044 			htt_srng_setup(soc->htt_handle, i,
6045 				       soc->rx_refill_buf_ring[lmac_id]
6046 				       .hal_srng,
6047 				       RXDMA_BUF);
6048 
6049 			if (pdev->rx_refill_buf_ring2.hal_srng)
6050 				htt_srng_setup(soc->htt_handle, i,
6051 					       pdev->rx_refill_buf_ring2
6052 					       .hal_srng,
6053 					       RXDMA_BUF);
6054 
6055 			dp_update_num_mac_rings_for_dbs(soc, &max_mac_rings);
6056 			dp_err("pdev_id %d max_mac_rings %d",
6057 			       pdev->pdev_id, max_mac_rings);
6058 
6059 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
6060 				int mac_for_pdev =
6061 					dp_get_mac_id_for_pdev(mac_id,
6062 							       pdev->pdev_id);
6063 				/*
6064 				 * Obtain lmac id from pdev to access the LMAC
6065 				 * ring in soc context
6066 				 */
6067 				lmac_id =
6068 				dp_get_lmac_id_for_pdev_id(soc,
6069 							   mac_id,
6070 							   pdev->pdev_id);
6071 				QDF_TRACE(QDF_MODULE_ID_TXRX,
6072 					 QDF_TRACE_LEVEL_ERROR,
6073 					 FL("mac_id %d"), mac_for_pdev);
6074 
6075 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
6076 					 pdev->rx_mac_buf_ring[mac_id]
6077 						.hal_srng,
6078 					 RXDMA_BUF);
6079 
6080 				if (!soc->rxdma2sw_rings_not_supported)
6081 					dp_htt_setup_rxdma_err_dst_ring(soc,
6082 						mac_for_pdev, lmac_id);
6083 
6084 				/* Configure monitor mode rings */
6085 				status = dp_monitor_htt_srng_setup(soc, pdev,
6086 								   lmac_id,
6087 								   mac_for_pdev);
6088 				if (status != QDF_STATUS_SUCCESS) {
6089 					dp_err("Failed to send htt monitor messages to target");
6090 					return status;
6091 				}
6092 
6093 			}
6094 		}
6095 	}
6096 
6097 	dp_reap_timer_init(soc);
6098 	return status;
6099 }
6100 #else
6101 /* This is only for WIN */
6102 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
6103 {
6104 	int i;
6105 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6106 	int mac_for_pdev;
6107 	int lmac_id;
6108 
6109 	/* Configure monitor mode rings */
6110 	dp_monitor_soc_htt_srng_setup(soc);
6111 
6112 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6113 		struct dp_pdev *pdev =  soc->pdev_list[i];
6114 
6115 		if (!pdev)
6116 			continue;
6117 
6118 		mac_for_pdev = i;
6119 		lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
6120 
6121 		if (soc->rx_refill_buf_ring[lmac_id].hal_srng)
6122 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
6123 				       soc->rx_refill_buf_ring[lmac_id].
6124 				       hal_srng, RXDMA_BUF);
6125 
6126 		/* Configure monitor mode rings */
6127 		dp_monitor_htt_srng_setup(soc, pdev,
6128 					  lmac_id,
6129 					  mac_for_pdev);
6130 		if (!soc->rxdma2sw_rings_not_supported)
6131 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
6132 				       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
6133 				       RXDMA_DST);
6134 	}
6135 
6136 	dp_reap_timer_init(soc);
6137 	return status;
6138 }
6139 #endif
6140 
6141 /*
6142  * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine
6143  *
6144  * This function is used to configure the FSE HW block in RX OLE on a
6145  * per pdev basis. Here, we will be programming parameters related to
6146  * the Flow Search Table.
6147  *
6148  * @soc: data path SoC handle
6149  *
6150  * Return: zero on success, non-zero on failure
6151  */
6152 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
6153 static QDF_STATUS
6154 dp_rx_target_fst_config(struct dp_soc *soc)
6155 {
6156 	int i;
6157 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6158 
6159 	for (i = 0; i < MAX_PDEV_CNT; i++) {
6160 		struct dp_pdev *pdev = soc->pdev_list[i];
6161 
6162 		/* Flow search is not enabled if NSS offload is enabled */
6163 		if (pdev &&
6164 		    !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
6165 			status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev);
6166 			if (status != QDF_STATUS_SUCCESS)
6167 				break;
6168 		}
6169 	}
6170 	return status;
6171 }
6172 #elif defined(WLAN_SUPPORT_RX_FISA)
6173 /**
6174  * dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW
6175  * @soc: SoC handle
6176  *
6177  * Return: Success
6178  */
6179 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
6180 {
6181 	QDF_STATUS status;
6182 	struct dp_rx_fst *fst = soc->rx_fst;
6183 
6184 	/* Check if it is enabled in the INI */
6185 	if (!soc->fisa_enable) {
6186 		dp_err("RX FISA feature is disabled");
6187 		return QDF_STATUS_E_NOSUPPORT;
6188 	}
6189 
6190 	status = dp_rx_flow_send_fst_fw_setup(soc, soc->pdev_list[0]);
6191 	if (QDF_IS_STATUS_ERROR(status)) {
6192 		dp_err("dp_rx_flow_send_fst_fw_setup failed %d",
6193 		       status);
6194 		return status;
6195 	}
6196 
6197 	if (soc->fst_cmem_base) {
6198 		soc->fst_in_cmem = true;
6199 		dp_rx_fst_update_cmem_params(soc, fst->max_entries,
6200 					     soc->fst_cmem_base & 0xffffffff,
6201 					     soc->fst_cmem_base >> 32);
6202 	}
6203 	return status;
6204 }
6205 
6206 #define FISA_MAX_TIMEOUT 0xffffffff
6207 #define FISA_DISABLE_TIMEOUT 0
6208 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
6209 {
6210 	struct dp_htt_rx_fisa_cfg fisa_config;
6211 
6212 	fisa_config.pdev_id = 0;
6213 	fisa_config.fisa_timeout = FISA_MAX_TIMEOUT;
6214 
6215 	return dp_htt_rx_fisa_config(soc->pdev_list[0], &fisa_config);
6216 }
6217 
6218 #else /* !WLAN_SUPPORT_RX_FISA */
6219 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
6220 {
6221 	return QDF_STATUS_SUCCESS;
6222 }
6223 #endif /* !WLAN_SUPPORT_RX_FISA */
6224 
6225 #ifndef WLAN_SUPPORT_RX_FISA
6226 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
6227 {
6228 	return QDF_STATUS_SUCCESS;
6229 }
6230 
6231 static QDF_STATUS dp_rx_dump_fisa_stats(struct dp_soc *soc)
6232 {
6233 	return QDF_STATUS_SUCCESS;
6234 }
6235 
6236 static void dp_rx_dump_fisa_table(struct dp_soc *soc)
6237 {
6238 }
6239 
6240 static void dp_suspend_fse_cache_flush(struct dp_soc *soc)
6241 {
6242 }
6243 
6244 static void dp_resume_fse_cache_flush(struct dp_soc *soc)
6245 {
6246 }
6247 #endif /* !WLAN_SUPPORT_RX_FISA */
6248 
6249 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR
6250 static inline QDF_STATUS dp_print_swlm_stats(struct dp_soc *soc)
6251 {
6252 	return QDF_STATUS_SUCCESS;
6253 }
6254 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */
6255 
6256 #ifdef WLAN_SUPPORT_PPEDS
6257 /*
6258  * dp_soc_target_ppe_rxole_rxdma_cfg() - Configure the RxOLe and RxDMA for PPE
6259  * @soc: DP Tx/Rx handle
6260  *
6261  * Return: QDF_STATUS
6262  */
6263 static
6264 QDF_STATUS dp_soc_target_ppe_rxole_rxdma_cfg(struct dp_soc *soc)
6265 {
6266 	struct dp_htt_rxdma_rxole_ppe_config htt_cfg = {0};
6267 	QDF_STATUS status;
6268 
6269 	/*
6270 	 * Program RxDMA to override the reo destination indication
6271 	 * with REO2PPE_DST_IND, when use_ppe is set to 1 in RX_MSDU_END,
6272 	 * thereby driving the packet to REO2PPE ring.
6273 	 * If the MSDU is spanning more than 1 buffer, then this
6274 	 * override is not done.
6275 	 */
6276 	htt_cfg.override = 1;
6277 	htt_cfg.reo_destination_indication = REO2PPE_DST_IND;
6278 	htt_cfg.multi_buffer_msdu_override_en = 0;
6279 
6280 	/*
6281 	 * Override use_ppe to 0 in RxOLE for the following
6282 	 * cases.
6283 	 */
6284 	htt_cfg.intra_bss_override = 1;
6285 	htt_cfg.decap_raw_override = 1;
6286 	htt_cfg.decap_nwifi_override = 1;
6287 	htt_cfg.ip_frag_override = 1;
6288 
6289 	status = dp_htt_rxdma_rxole_ppe_cfg_set(soc, &htt_cfg);
6290 	if (status != QDF_STATUS_SUCCESS)
6291 		dp_err("RxOLE and RxDMA PPE config failed %d", status);
6292 
6293 	return status;
6294 }
6295 #else
6296 static inline
6297 QDF_STATUS dp_soc_target_ppe_rxole_rxdma_cfg(struct dp_soc *soc)
6298 {
6299 	return QDF_STATUS_SUCCESS;
6300 }
6301 #endif /* WLAN_SUPPORT_PPEDS */
6302 
6303 /*
6304  * dp_soc_attach_target_wifi3() - SOC initialization in the target
6305  * @cdp_soc: Opaque Datapath SOC handle
6306  *
6307  * Return: zero on success, non-zero on failure
6308  */
6309 static QDF_STATUS
6310 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
6311 {
6312 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
6313 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6314 
6315 	htt_soc_attach_target(soc->htt_handle);
6316 
6317 	status = dp_soc_target_ppe_rxole_rxdma_cfg(soc);
6318 	if (status != QDF_STATUS_SUCCESS) {
6319 		dp_err("Failed to send htt RxOLE and RxDMA messages to target");
6320 		return status;
6321 	}
6322 
6323 	status = dp_rxdma_ring_config(soc);
6324 	if (status != QDF_STATUS_SUCCESS) {
6325 		dp_err("Failed to send htt srng setup messages to target");
6326 		return status;
6327 	}
6328 
6329 	status = soc->arch_ops.dp_rxdma_ring_sel_cfg(soc);
6330 	if (status != QDF_STATUS_SUCCESS) {
6331 		dp_err("Failed to send htt ring config message to target");
6332 		return status;
6333 	}
6334 
6335 	status = dp_rx_target_fst_config(soc);
6336 	if (status != QDF_STATUS_SUCCESS &&
6337 	    status != QDF_STATUS_E_NOSUPPORT) {
6338 		dp_err("Failed to send htt fst setup config message to target");
6339 		return status;
6340 	}
6341 
6342 	if (status == QDF_STATUS_SUCCESS) {
6343 		status = dp_rx_fisa_config(soc);
6344 		if (status != QDF_STATUS_SUCCESS) {
6345 			dp_err("Failed to send htt FISA config message to target");
6346 			return status;
6347 		}
6348 	}
6349 
6350 	DP_STATS_INIT(soc);
6351 
6352 	dp_runtime_init(soc);
6353 
6354 	/* Enable HW vdev offload stats if feature is supported */
6355 	dp_vdev_stats_hw_offload_target_config(soc, INVALID_PDEV_ID, true);
6356 
6357 	/* initialize work queue for stats processing */
6358 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
6359 
6360 	return QDF_STATUS_SUCCESS;
6361 }
6362 
6363 /*
6364  * dp_vdev_id_map_tbl_add() - Add vdev into vdev_id table
6365  * @soc: SoC handle
6366  * @vdev: vdev handle
6367  * @vdev_id: vdev_id
6368  *
6369  * Return: None
6370  */
6371 static void dp_vdev_id_map_tbl_add(struct dp_soc *soc,
6372 				   struct dp_vdev *vdev,
6373 				   uint8_t vdev_id)
6374 {
6375 	QDF_ASSERT(vdev_id <= MAX_VDEV_CNT);
6376 
6377 	qdf_spin_lock_bh(&soc->vdev_map_lock);
6378 
6379 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
6380 			QDF_STATUS_SUCCESS) {
6381 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK vdev_id %u",
6382 			     soc, vdev, vdev_id);
6383 		qdf_spin_unlock_bh(&soc->vdev_map_lock);
6384 		return;
6385 	}
6386 
6387 	if (!soc->vdev_id_map[vdev_id])
6388 		soc->vdev_id_map[vdev_id] = vdev;
6389 	else
6390 		QDF_ASSERT(0);
6391 
6392 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
6393 }
6394 
6395 /*
6396  * dp_vdev_id_map_tbl_remove() - remove vdev from vdev_id table
6397  * @soc: SoC handle
6398  * @vdev: vdev handle
6399  *
6400  * Return: None
6401  */
6402 static void dp_vdev_id_map_tbl_remove(struct dp_soc *soc,
6403 				      struct dp_vdev *vdev)
6404 {
6405 	qdf_spin_lock_bh(&soc->vdev_map_lock);
6406 	QDF_ASSERT(soc->vdev_id_map[vdev->vdev_id] == vdev);
6407 
6408 	soc->vdev_id_map[vdev->vdev_id] = NULL;
6409 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
6410 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
6411 }
6412 
6413 /*
6414  * dp_vdev_pdev_list_add() - add vdev into pdev's list
6415  * @soc: soc handle
6416  * @pdev: pdev handle
6417  * @vdev: vdev handle
6418  *
6419  * return: none
6420  */
6421 static void dp_vdev_pdev_list_add(struct dp_soc *soc,
6422 				  struct dp_pdev *pdev,
6423 				  struct dp_vdev *vdev)
6424 {
6425 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
6426 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
6427 			QDF_STATUS_SUCCESS) {
6428 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK",
6429 			     soc, vdev);
6430 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6431 		return;
6432 	}
6433 	/* add this vdev into the pdev's list */
6434 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
6435 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6436 }
6437 
6438 /*
6439  * dp_vdev_pdev_list_remove() - remove vdev from pdev's list
6440  * @soc: SoC handle
6441  * @pdev: pdev handle
6442  * @vdev: VDEV handle
6443  *
6444  * Return: none
6445  */
6446 static void dp_vdev_pdev_list_remove(struct dp_soc *soc,
6447 				     struct dp_pdev *pdev,
6448 				     struct dp_vdev *vdev)
6449 {
6450 	uint8_t found = 0;
6451 	struct dp_vdev *tmpvdev = NULL;
6452 
6453 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
6454 	TAILQ_FOREACH(tmpvdev, &pdev->vdev_list, vdev_list_elem) {
6455 		if (tmpvdev == vdev) {
6456 			found = 1;
6457 			break;
6458 		}
6459 	}
6460 
6461 	if (found) {
6462 		TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
6463 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
6464 	} else {
6465 		dp_vdev_debug("%pK: vdev:%pK not found in pdev:%pK vdevlist:%pK",
6466 			      soc, vdev, pdev, &pdev->vdev_list);
6467 		QDF_ASSERT(0);
6468 	}
6469 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6470 }
6471 
6472 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
6473 /*
6474  * dp_vdev_init_rx_eapol() - initializing osif_rx_eapol
6475  * @vdev: Datapath VDEV handle
6476  *
6477  * Return: None
6478  */
6479 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev)
6480 {
6481 	vdev->osif_rx_eapol = NULL;
6482 }
6483 
6484 /*
6485  * dp_vdev_register_rx_eapol() - Register VDEV operations for rx_eapol
6486  * @vdev: DP vdev handle
6487  * @txrx_ops: Tx and Rx operations
6488  *
6489  * Return: None
6490  */
6491 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev,
6492 					     struct ol_txrx_ops *txrx_ops)
6493 {
6494 	vdev->osif_rx_eapol = txrx_ops->rx.rx_eapol;
6495 }
6496 #else
6497 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev)
6498 {
6499 }
6500 
6501 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev,
6502 					     struct ol_txrx_ops *txrx_ops)
6503 {
6504 }
6505 #endif
6506 
6507 #ifdef WLAN_FEATURE_11BE_MLO
6508 #if defined(WLAN_MLO_MULTI_CHIP) && defined(WLAN_MCAST_MLO)
6509 static inline void dp_vdev_save_mld_info(struct dp_vdev *vdev,
6510 					 struct cdp_vdev_info *vdev_info)
6511 {
6512 	if (qdf_is_macaddr_zero((struct qdf_mac_addr *)vdev_info->mld_mac_addr))
6513 		vdev->mlo_vdev = false;
6514 	else
6515 		vdev->mlo_vdev = true;
6516 }
6517 #else
6518 static inline void dp_vdev_save_mld_info(struct dp_vdev *vdev,
6519 					 struct cdp_vdev_info *vdev_info)
6520 {
6521 }
6522 #endif
6523 static inline void dp_vdev_save_mld_addr(struct dp_vdev *vdev,
6524 					 struct cdp_vdev_info *vdev_info)
6525 {
6526 	if (vdev_info->mld_mac_addr)
6527 		qdf_mem_copy(&vdev->mld_mac_addr.raw[0],
6528 			     vdev_info->mld_mac_addr, QDF_MAC_ADDR_SIZE);
6529 
6530 	dp_vdev_save_mld_info(vdev, vdev_info);
6531 
6532 }
6533 #else
6534 static inline void dp_vdev_save_mld_addr(struct dp_vdev *vdev,
6535 					 struct cdp_vdev_info *vdev_info)
6536 {
6537 
6538 }
6539 #endif
6540 
6541 /*
6542 * dp_vdev_attach_wifi3() - attach txrx vdev
6543 * @txrx_pdev: Datapath PDEV handle
6544 * @pdev_id: PDEV ID for vdev creation
6545 * @vdev_info: parameters used for vdev creation
6546 *
6547 * Return: status
6548 */
6549 static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc,
6550 				       uint8_t pdev_id,
6551 				       struct cdp_vdev_info *vdev_info)
6552 {
6553 	int i = 0;
6554 	qdf_size_t vdev_context_size;
6555 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
6556 	struct dp_pdev *pdev =
6557 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
6558 						   pdev_id);
6559 	struct dp_vdev *vdev;
6560 	uint8_t *vdev_mac_addr = vdev_info->vdev_mac_addr;
6561 	uint8_t vdev_id = vdev_info->vdev_id;
6562 	enum wlan_op_mode op_mode = vdev_info->op_mode;
6563 	enum wlan_op_subtype subtype = vdev_info->subtype;
6564 	uint8_t vdev_stats_id = vdev_info->vdev_stats_id;
6565 
6566 	vdev_context_size =
6567 		soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_VDEV);
6568 	vdev = qdf_mem_malloc(vdev_context_size);
6569 
6570 	if (!pdev) {
6571 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
6572 			    cdp_soc, pdev_id);
6573 		qdf_mem_free(vdev);
6574 		goto fail0;
6575 	}
6576 
6577 	if (!vdev) {
6578 		dp_init_err("%pK: DP VDEV memory allocation failed",
6579 			    cdp_soc);
6580 		goto fail0;
6581 	}
6582 
6583 	wlan_minidump_log(vdev, sizeof(*vdev), soc->ctrl_psoc,
6584 			  WLAN_MD_DP_VDEV, "dp_vdev");
6585 
6586 	vdev->pdev = pdev;
6587 	vdev->vdev_id = vdev_id;
6588 	vdev->vdev_stats_id = vdev_stats_id;
6589 	vdev->opmode = op_mode;
6590 	vdev->subtype = subtype;
6591 	vdev->osdev = soc->osdev;
6592 
6593 	vdev->osif_rx = NULL;
6594 	vdev->osif_rsim_rx_decap = NULL;
6595 	vdev->osif_get_key = NULL;
6596 	vdev->osif_tx_free_ext = NULL;
6597 	vdev->osif_vdev = NULL;
6598 
6599 	vdev->delete.pending = 0;
6600 	vdev->safemode = 0;
6601 	vdev->drop_unenc = 1;
6602 	vdev->sec_type = cdp_sec_type_none;
6603 	vdev->multipass_en = false;
6604 	vdev->wrap_vdev = false;
6605 	dp_vdev_init_rx_eapol(vdev);
6606 	qdf_atomic_init(&vdev->ref_cnt);
6607 	for (i = 0; i < DP_MOD_ID_MAX; i++)
6608 		qdf_atomic_init(&vdev->mod_refs[i]);
6609 
6610 	/* Take one reference for create*/
6611 	qdf_atomic_inc(&vdev->ref_cnt);
6612 	qdf_atomic_inc(&vdev->mod_refs[DP_MOD_ID_CONFIG]);
6613 	vdev->num_peers = 0;
6614 #ifdef notyet
6615 	vdev->filters_num = 0;
6616 #endif
6617 	vdev->lmac_id = pdev->lmac_id;
6618 
6619 	qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
6620 
6621 	dp_vdev_save_mld_addr(vdev, vdev_info);
6622 
6623 	/* TODO: Initialize default HTT meta data that will be used in
6624 	 * TCL descriptors for packets transmitted from this VDEV
6625 	 */
6626 
6627 	qdf_spinlock_create(&vdev->peer_list_lock);
6628 	TAILQ_INIT(&vdev->peer_list);
6629 	dp_peer_multipass_list_init(vdev);
6630 	if ((soc->intr_mode == DP_INTR_POLL) &&
6631 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
6632 		if ((pdev->vdev_count == 0) ||
6633 		    (wlan_op_mode_monitor == vdev->opmode))
6634 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
6635 	} else if (dp_soc_get_con_mode(soc) == QDF_GLOBAL_MISSION_MODE &&
6636 		   soc->intr_mode == DP_INTR_MSI &&
6637 		   wlan_op_mode_monitor == vdev->opmode) {
6638 		/* Timer to reap status ring in mission mode */
6639 		dp_monitor_vdev_timer_start(soc);
6640 	}
6641 
6642 	dp_vdev_id_map_tbl_add(soc, vdev, vdev_id);
6643 
6644 	if (wlan_op_mode_monitor == vdev->opmode) {
6645 		if (dp_monitor_vdev_attach(vdev) == QDF_STATUS_SUCCESS) {
6646 			dp_monitor_pdev_set_mon_vdev(vdev);
6647 			return dp_monitor_vdev_set_monitor_mode_buf_rings(pdev);
6648 		}
6649 		return QDF_STATUS_E_FAILURE;
6650 	}
6651 
6652 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
6653 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
6654 	vdev->dscp_tid_map_id = 0;
6655 	vdev->mcast_enhancement_en = 0;
6656 	vdev->igmp_mcast_enhanc_en = 0;
6657 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
6658 	vdev->prev_tx_enq_tstamp = 0;
6659 	vdev->prev_rx_deliver_tstamp = 0;
6660 	vdev->skip_sw_tid_classification = DP_TX_HW_DSCP_TID_MAP_VALID;
6661 
6662 	dp_vdev_pdev_list_add(soc, pdev, vdev);
6663 	pdev->vdev_count++;
6664 
6665 	if (wlan_op_mode_sta != vdev->opmode &&
6666 	    wlan_op_mode_ndi != vdev->opmode)
6667 		vdev->ap_bridge_enabled = true;
6668 	else
6669 		vdev->ap_bridge_enabled = false;
6670 	dp_init_info("%pK: wlan_cfg_ap_bridge_enabled %d",
6671 		     cdp_soc, vdev->ap_bridge_enabled);
6672 
6673 	dp_tx_vdev_attach(vdev);
6674 
6675 	dp_monitor_vdev_attach(vdev);
6676 	if (!pdev->is_lro_hash_configured) {
6677 		if (QDF_IS_STATUS_SUCCESS(dp_lro_hash_setup(soc, pdev)))
6678 			pdev->is_lro_hash_configured = true;
6679 		else
6680 			dp_err("LRO hash setup failure!");
6681 	}
6682 
6683 	dp_info("Created vdev %pK ("QDF_MAC_ADDR_FMT")", vdev,
6684 		QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
6685 	DP_STATS_INIT(vdev);
6686 
6687 	if (QDF_IS_STATUS_ERROR(soc->arch_ops.txrx_vdev_attach(soc, vdev)))
6688 		goto fail0;
6689 
6690 	if (wlan_op_mode_sta == vdev->opmode)
6691 		dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id,
6692 				     vdev->mac_addr.raw, CDP_LINK_PEER_TYPE);
6693 	return QDF_STATUS_SUCCESS;
6694 
6695 fail0:
6696 	return QDF_STATUS_E_FAILURE;
6697 }
6698 
6699 #ifndef QCA_HOST_MODE_WIFI_DISABLED
6700 /**
6701  * dp_vdev_register_tx_handler() - Register Tx handler
6702  * @vdev: struct dp_vdev *
6703  * @soc: struct dp_soc *
6704  * @txrx_ops: struct ol_txrx_ops *
6705  */
6706 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
6707 					       struct dp_soc *soc,
6708 					       struct ol_txrx_ops *txrx_ops)
6709 {
6710 	/* Enable vdev_id check only for ap, if flag is enabled */
6711 	if (vdev->mesh_vdev)
6712 		txrx_ops->tx.tx = dp_tx_send_mesh;
6713 	else if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
6714 		 (vdev->opmode == wlan_op_mode_ap))
6715 		txrx_ops->tx.tx = dp_tx_send_vdev_id_check;
6716 	else
6717 		txrx_ops->tx.tx = dp_tx_send;
6718 
6719 	/* Avoid check in regular exception Path */
6720 	if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
6721 	    (vdev->opmode == wlan_op_mode_ap))
6722 		txrx_ops->tx.tx_exception = dp_tx_send_exception_vdev_id_check;
6723 	else
6724 		txrx_ops->tx.tx_exception = dp_tx_send_exception;
6725 
6726 	dp_info("Configure tx_vdev_id_chk_handler Feature Flag: %d and mode:%d for vdev_id:%d",
6727 		wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx),
6728 		vdev->opmode, vdev->vdev_id);
6729 }
6730 #else /* QCA_HOST_MODE_WIFI_DISABLED */
6731 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
6732 					       struct dp_soc *soc,
6733 					       struct ol_txrx_ops *txrx_ops)
6734 {
6735 }
6736 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
6737 
6738 /**
6739  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
6740  * @soc: Datapath soc handle
6741  * @vdev_id: id of Datapath VDEV handle
6742  * @osif_vdev: OSIF vdev handle
6743  * @txrx_ops: Tx and Rx operations
6744  *
6745  * Return: DP VDEV handle on success, NULL on failure
6746  */
6747 static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc_hdl,
6748 					 uint8_t vdev_id,
6749 					 ol_osif_vdev_handle osif_vdev,
6750 					 struct ol_txrx_ops *txrx_ops)
6751 {
6752 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6753 	struct dp_vdev *vdev =	dp_vdev_get_ref_by_id(soc, vdev_id,
6754 						      DP_MOD_ID_CDP);
6755 
6756 	if (!vdev)
6757 		return QDF_STATUS_E_FAILURE;
6758 
6759 	vdev->osif_vdev = osif_vdev;
6760 	vdev->osif_rx = txrx_ops->rx.rx;
6761 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
6762 	vdev->osif_rx_flush = txrx_ops->rx.rx_flush;
6763 	vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush;
6764 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
6765 	vdev->osif_fisa_rx = txrx_ops->rx.osif_fisa_rx;
6766 	vdev->osif_fisa_flush = txrx_ops->rx.osif_fisa_flush;
6767 	vdev->osif_get_key = txrx_ops->get_key;
6768 	dp_monitor_vdev_register_osif(vdev, txrx_ops);
6769 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
6770 	vdev->tx_comp = txrx_ops->tx.tx_comp;
6771 	vdev->stats_cb = txrx_ops->rx.stats_rx;
6772 	vdev->tx_classify_critical_pkt_cb =
6773 		txrx_ops->tx.tx_classify_critical_pkt_cb;
6774 #ifdef notyet
6775 #if ATH_SUPPORT_WAPI
6776 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
6777 #endif
6778 #endif
6779 #ifdef UMAC_SUPPORT_PROXY_ARP
6780 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
6781 #endif
6782 	vdev->me_convert = txrx_ops->me_convert;
6783 	vdev->get_tsf_time = txrx_ops->get_tsf_time;
6784 
6785 	dp_vdev_register_rx_eapol(vdev, txrx_ops);
6786 
6787 	dp_vdev_register_tx_handler(vdev, soc, txrx_ops);
6788 
6789 	dp_init_info("%pK: DP Vdev Register success", soc);
6790 
6791 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6792 	return QDF_STATUS_SUCCESS;
6793 }
6794 
6795 void dp_peer_delete(struct dp_soc *soc,
6796 		    struct dp_peer *peer,
6797 		    void *arg)
6798 {
6799 	if (!peer->valid)
6800 		return;
6801 
6802 	dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
6803 			     peer->vdev->vdev_id,
6804 			     peer->mac_addr.raw, 0);
6805 }
6806 
6807 /**
6808  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
6809  * @vdev: Datapath VDEV handle
6810  * @unmap_only: Flag to indicate "only unmap"
6811  *
6812  * Return: void
6813  */
6814 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
6815 {
6816 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6817 	struct dp_pdev *pdev = vdev->pdev;
6818 	struct dp_soc *soc = pdev->soc;
6819 	struct dp_peer *peer;
6820 	uint32_t i = 0;
6821 
6822 
6823 	if (!unmap_only)
6824 		dp_vdev_iterate_peer_lock_safe(vdev, dp_peer_delete, NULL,
6825 					       DP_MOD_ID_CDP);
6826 
6827 	for (i = 0; i < soc->max_peer_id ; i++) {
6828 		peer = __dp_peer_get_ref_by_id(soc, i, DP_MOD_ID_CDP);
6829 
6830 		if (!peer)
6831 			continue;
6832 
6833 		if (peer->vdev != vdev) {
6834 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6835 			continue;
6836 		}
6837 
6838 		dp_info("peer: "QDF_MAC_ADDR_FMT" is getting unmap",
6839 			QDF_MAC_ADDR_REF(peer->mac_addr.raw));
6840 
6841 		dp_rx_peer_unmap_handler(soc, i,
6842 					 vdev->vdev_id,
6843 					 peer->mac_addr.raw, 0,
6844 					 DP_PEER_WDS_COUNT_INVALID);
6845 		SET_PEER_REF_CNT_ONE(peer);
6846 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6847 	}
6848 
6849 }
6850 
6851 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
6852 /*
6853  * dp_txrx_alloc_vdev_stats_id()- Allocate vdev_stats_id
6854  * @soc_hdl: Datapath soc handle
6855  * @vdev_stats_id: Address of vdev_stats_id
6856  *
6857  * Return: QDF_STATUS
6858  */
6859 static QDF_STATUS dp_txrx_alloc_vdev_stats_id(struct cdp_soc_t *soc_hdl,
6860 					      uint8_t *vdev_stats_id)
6861 {
6862 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6863 	uint8_t id = 0;
6864 
6865 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
6866 		*vdev_stats_id = CDP_INVALID_VDEV_STATS_ID;
6867 		return QDF_STATUS_E_FAILURE;
6868 	}
6869 
6870 	while (id < CDP_MAX_VDEV_STATS_ID) {
6871 		if (!qdf_atomic_test_and_set_bit(id, &soc->vdev_stats_id_map)) {
6872 			*vdev_stats_id = id;
6873 			return QDF_STATUS_SUCCESS;
6874 		}
6875 		id++;
6876 	}
6877 
6878 	*vdev_stats_id = CDP_INVALID_VDEV_STATS_ID;
6879 	return QDF_STATUS_E_FAILURE;
6880 }
6881 
6882 /*
6883  * dp_txrx_reset_vdev_stats_id() - Reset vdev_stats_id in dp_soc
6884  * @soc_hdl: Datapath soc handle
6885  * @vdev_stats_id: vdev_stats_id to reset in dp_soc
6886  *
6887  * Return: none
6888  */
6889 static void dp_txrx_reset_vdev_stats_id(struct cdp_soc_t *soc_hdl,
6890 					uint8_t vdev_stats_id)
6891 {
6892 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6893 
6894 	if ((!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) ||
6895 	    (vdev_stats_id >= CDP_MAX_VDEV_STATS_ID))
6896 		return;
6897 
6898 	qdf_atomic_clear_bit(vdev_stats_id, &soc->vdev_stats_id_map);
6899 }
6900 #else
6901 static void dp_txrx_reset_vdev_stats_id(struct cdp_soc_t *soc,
6902 					uint8_t vdev_stats_id)
6903 {}
6904 #endif
6905 /*
6906  * dp_vdev_detach_wifi3() - Detach txrx vdev
6907  * @cdp_soc: Datapath soc handle
6908  * @vdev_id: VDEV Id
6909  * @callback: Callback OL_IF on completion of detach
6910  * @cb_context:	Callback context
6911  *
6912  */
6913 static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc,
6914 				       uint8_t vdev_id,
6915 				       ol_txrx_vdev_delete_cb callback,
6916 				       void *cb_context)
6917 {
6918 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
6919 	struct dp_pdev *pdev;
6920 	struct dp_neighbour_peer *peer = NULL;
6921 	struct dp_peer *vap_self_peer = NULL;
6922 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
6923 						     DP_MOD_ID_CDP);
6924 
6925 	if (!vdev)
6926 		return QDF_STATUS_E_FAILURE;
6927 
6928 	soc->arch_ops.txrx_vdev_detach(soc, vdev);
6929 
6930 	pdev = vdev->pdev;
6931 
6932 	vap_self_peer = dp_sta_vdev_self_peer_ref_n_get(soc, vdev,
6933 							DP_MOD_ID_CONFIG);
6934 	if (vap_self_peer) {
6935 		qdf_spin_lock_bh(&soc->ast_lock);
6936 		if (vap_self_peer->self_ast_entry) {
6937 			dp_peer_del_ast(soc, vap_self_peer->self_ast_entry);
6938 			vap_self_peer->self_ast_entry = NULL;
6939 		}
6940 		qdf_spin_unlock_bh(&soc->ast_lock);
6941 
6942 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc, vdev->vdev_id,
6943 				     vap_self_peer->mac_addr.raw, 0);
6944 		dp_peer_unref_delete(vap_self_peer, DP_MOD_ID_CONFIG);
6945 	}
6946 
6947 	/*
6948 	 * If Target is hung, flush all peers before detaching vdev
6949 	 * this will free all references held due to missing
6950 	 * unmap commands from Target
6951 	 */
6952 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
6953 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, false);
6954 	else if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
6955 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, true);
6956 
6957 	/* indicate that the vdev needs to be deleted */
6958 	vdev->delete.pending = 1;
6959 	dp_rx_vdev_detach(vdev);
6960 	/*
6961 	 * move it after dp_rx_vdev_detach(),
6962 	 * as the call back done in dp_rx_vdev_detach()
6963 	 * still need to get vdev pointer by vdev_id.
6964 	 */
6965 	dp_vdev_id_map_tbl_remove(soc, vdev);
6966 
6967 	dp_monitor_neighbour_peer_list_remove(pdev, vdev, peer);
6968 
6969 	dp_txrx_reset_vdev_stats_id(cdp_soc, vdev->vdev_stats_id);
6970 
6971 	dp_tx_vdev_multipass_deinit(vdev);
6972 
6973 	if (vdev->vdev_dp_ext_handle) {
6974 		qdf_mem_free(vdev->vdev_dp_ext_handle);
6975 		vdev->vdev_dp_ext_handle = NULL;
6976 	}
6977 	vdev->delete.callback = callback;
6978 	vdev->delete.context = cb_context;
6979 
6980 	if (vdev->opmode != wlan_op_mode_monitor)
6981 		dp_vdev_pdev_list_remove(soc, pdev, vdev);
6982 
6983 	pdev->vdev_count--;
6984 	/* release reference taken above for find */
6985 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6986 
6987 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
6988 	TAILQ_INSERT_TAIL(&soc->inactive_vdev_list, vdev, inactive_list_elem);
6989 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
6990 
6991 	/* release reference taken at dp_vdev_create */
6992 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
6993 
6994 	return QDF_STATUS_SUCCESS;
6995 }
6996 
6997 #ifdef WLAN_FEATURE_11BE_MLO
6998 /**
6999  * is_dp_peer_can_reuse() - check if the dp_peer match condition to be reused
7000  * @vdev: Target DP vdev handle
7001  * @peer: DP peer handle to be checked
7002  * @peer_mac_addr: Target peer mac address
7003  * @peer_type: Target peer type
7004  *
7005  * Return: true - if match, false - not match
7006  */
7007 static inline
7008 bool is_dp_peer_can_reuse(struct dp_vdev *vdev,
7009 			  struct dp_peer *peer,
7010 			  uint8_t *peer_mac_addr,
7011 			  enum cdp_peer_type peer_type)
7012 {
7013 	if (peer->bss_peer && (peer->vdev == vdev) &&
7014 	    (peer->peer_type == peer_type) &&
7015 	    (qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw,
7016 			 QDF_MAC_ADDR_SIZE) == 0))
7017 		return true;
7018 
7019 	return false;
7020 }
7021 #else
7022 static inline
7023 bool is_dp_peer_can_reuse(struct dp_vdev *vdev,
7024 			  struct dp_peer *peer,
7025 			  uint8_t *peer_mac_addr,
7026 			  enum cdp_peer_type peer_type)
7027 {
7028 	if (peer->bss_peer && (peer->vdev == vdev) &&
7029 	    (qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw,
7030 			 QDF_MAC_ADDR_SIZE) == 0))
7031 		return true;
7032 
7033 	return false;
7034 }
7035 #endif
7036 
7037 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
7038 						uint8_t *peer_mac_addr,
7039 						enum cdp_peer_type peer_type)
7040 {
7041 	struct dp_peer *peer;
7042 	struct dp_soc *soc = vdev->pdev->soc;
7043 
7044 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
7045 	TAILQ_FOREACH(peer, &soc->inactive_peer_list,
7046 		      inactive_list_elem) {
7047 
7048 		/* reuse bss peer only when vdev matches*/
7049 		if (is_dp_peer_can_reuse(vdev, peer,
7050 					 peer_mac_addr, peer_type)) {
7051 			/* increment ref count for cdp_peer_create*/
7052 			if (dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG) ==
7053 						QDF_STATUS_SUCCESS) {
7054 				TAILQ_REMOVE(&soc->inactive_peer_list, peer,
7055 					     inactive_list_elem);
7056 				qdf_spin_unlock_bh
7057 					(&soc->inactive_peer_list_lock);
7058 				return peer;
7059 			}
7060 		}
7061 	}
7062 
7063 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
7064 	return NULL;
7065 }
7066 
7067 #ifdef FEATURE_AST
7068 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
7069 					       struct dp_pdev *pdev,
7070 					       uint8_t *peer_mac_addr)
7071 {
7072 	struct dp_ast_entry *ast_entry;
7073 
7074 	if (soc->ast_offload_support)
7075 		return;
7076 
7077 	qdf_spin_lock_bh(&soc->ast_lock);
7078 	if (soc->ast_override_support)
7079 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
7080 							    pdev->pdev_id);
7081 	else
7082 		ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
7083 
7084 	if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress)
7085 		dp_peer_del_ast(soc, ast_entry);
7086 
7087 	qdf_spin_unlock_bh(&soc->ast_lock);
7088 }
7089 #endif
7090 
7091 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
7092 /*
7093  * dp_peer_hw_txrx_stats_init() - Initialize hw_txrx_stats_en in dp_peer
7094  * @soc: Datapath soc handle
7095  * @peer: Datapath peer handle
7096  *
7097  * Return: none
7098  */
7099 static inline
7100 void dp_peer_hw_txrx_stats_init(struct dp_soc *soc,
7101 				struct dp_txrx_peer *txrx_peer)
7102 {
7103 	txrx_peer->hw_txrx_stats_en =
7104 		wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx);
7105 }
7106 #else
7107 static inline
7108 void dp_peer_hw_txrx_stats_init(struct dp_soc *soc,
7109 				struct dp_txrx_peer *txrx_peer)
7110 {
7111 	txrx_peer->hw_txrx_stats_en = 0;
7112 }
7113 #endif
7114 
7115 static QDF_STATUS dp_txrx_peer_detach(struct dp_soc *soc, struct dp_peer *peer)
7116 {
7117 	struct dp_txrx_peer *txrx_peer;
7118 	struct dp_pdev *pdev;
7119 
7120 	/* dp_txrx_peer exists for mld peer and legacy peer */
7121 	if (peer->txrx_peer) {
7122 		txrx_peer = peer->txrx_peer;
7123 		peer->txrx_peer = NULL;
7124 		pdev = txrx_peer->vdev->pdev;
7125 
7126 		dp_peer_defrag_rx_tids_deinit(txrx_peer);
7127 		/*
7128 		 * Deallocate the extended stats contenxt
7129 		 */
7130 		dp_peer_delay_stats_ctx_dealloc(soc, txrx_peer);
7131 		dp_peer_rx_bufq_resources_deinit(txrx_peer);
7132 		dp_peer_jitter_stats_ctx_dealloc(pdev, txrx_peer);
7133 		dp_peer_sawf_stats_ctx_free(soc, txrx_peer);
7134 
7135 		qdf_mem_free(txrx_peer);
7136 	}
7137 
7138 	return QDF_STATUS_SUCCESS;
7139 }
7140 
7141 static QDF_STATUS dp_txrx_peer_attach(struct dp_soc *soc, struct dp_peer *peer)
7142 {
7143 	struct dp_txrx_peer *txrx_peer;
7144 	struct dp_pdev *pdev;
7145 
7146 	txrx_peer = (struct dp_txrx_peer *)qdf_mem_malloc(sizeof(*txrx_peer));
7147 
7148 	if (!txrx_peer)
7149 		return QDF_STATUS_E_NOMEM; /* failure */
7150 
7151 	txrx_peer->peer_id = HTT_INVALID_PEER;
7152 	/* initialize the peer_id */
7153 	txrx_peer->vdev = peer->vdev;
7154 	pdev = peer->vdev->pdev;
7155 
7156 	DP_STATS_INIT(txrx_peer);
7157 
7158 	dp_wds_ext_peer_init(txrx_peer);
7159 	dp_peer_rx_bufq_resources_init(txrx_peer);
7160 	dp_peer_hw_txrx_stats_init(soc, txrx_peer);
7161 	/*
7162 	 * Allocate peer extended stats context. Fall through in
7163 	 * case of failure as its not an implicit requirement to have
7164 	 * this object for regular statistics updates.
7165 	 */
7166 	if (dp_peer_delay_stats_ctx_alloc(soc, txrx_peer) !=
7167 					  QDF_STATUS_SUCCESS)
7168 		dp_warn("peer delay_stats ctx alloc failed");
7169 
7170 	/*
7171 	 * Alloctate memory for jitter stats. Fall through in
7172 	 * case of failure as its not an implicit requirement to have
7173 	 * this object for regular statistics updates.
7174 	 */
7175 	if (dp_peer_jitter_stats_ctx_alloc(pdev, txrx_peer) !=
7176 					   QDF_STATUS_SUCCESS)
7177 		dp_warn("peer jitter_stats ctx alloc failed");
7178 
7179 	dp_set_peer_isolation(txrx_peer, false);
7180 
7181 	dp_peer_defrag_rx_tids_init(txrx_peer);
7182 
7183 	if (dp_peer_sawf_stats_ctx_alloc(soc, txrx_peer) != QDF_STATUS_SUCCESS)
7184 		dp_warn("peer sawf stats alloc failed");
7185 
7186 	dp_txrx_peer_attach_add(soc, peer, txrx_peer);
7187 
7188 	return QDF_STATUS_SUCCESS;
7189 }
7190 
7191 static inline
7192 void dp_txrx_peer_stats_clr(struct dp_txrx_peer *txrx_peer)
7193 {
7194 	if (!txrx_peer)
7195 		return;
7196 
7197 	txrx_peer->tx_failed = 0;
7198 	txrx_peer->comp_pkt.num = 0;
7199 	txrx_peer->comp_pkt.bytes = 0;
7200 	txrx_peer->to_stack.num = 0;
7201 	txrx_peer->to_stack.bytes = 0;
7202 
7203 	DP_STATS_CLR(txrx_peer);
7204 	dp_peer_delay_stats_ctx_clr(txrx_peer);
7205 	dp_peer_jitter_stats_ctx_clr(txrx_peer);
7206 }
7207 
7208 /*
7209  * dp_peer_create_wifi3() - attach txrx peer
7210  * @soc_hdl: Datapath soc handle
7211  * @vdev_id: id of vdev
7212  * @peer_mac_addr: Peer MAC address
7213  * @peer_type: link or MLD peer type
7214  *
7215  * Return: 0 on success, -1 on failure
7216  */
7217 static QDF_STATUS
7218 dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
7219 		     uint8_t *peer_mac_addr, enum cdp_peer_type peer_type)
7220 {
7221 	struct dp_peer *peer;
7222 	int i;
7223 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7224 	struct dp_pdev *pdev;
7225 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
7226 	struct dp_vdev *vdev = NULL;
7227 
7228 	if (!peer_mac_addr)
7229 		return QDF_STATUS_E_FAILURE;
7230 
7231 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
7232 
7233 	if (!vdev)
7234 		return QDF_STATUS_E_FAILURE;
7235 
7236 	pdev = vdev->pdev;
7237 	soc = pdev->soc;
7238 
7239 	/*
7240 	 * If a peer entry with given MAC address already exists,
7241 	 * reuse the peer and reset the state of peer.
7242 	 */
7243 	peer = dp_peer_can_reuse(vdev, peer_mac_addr, peer_type);
7244 
7245 	if (peer) {
7246 		qdf_atomic_init(&peer->is_default_route_set);
7247 		dp_peer_cleanup(vdev, peer);
7248 
7249 		dp_peer_vdev_list_add(soc, vdev, peer);
7250 		dp_peer_find_hash_add(soc, peer);
7251 
7252 		dp_peer_rx_tids_create(peer);
7253 		if (IS_MLO_DP_MLD_PEER(peer))
7254 			dp_mld_peer_init_link_peers_info(peer);
7255 
7256 		qdf_spin_lock_bh(&soc->ast_lock);
7257 		dp_peer_delete_ast_entries(soc, peer);
7258 		qdf_spin_unlock_bh(&soc->ast_lock);
7259 
7260 		if ((vdev->opmode == wlan_op_mode_sta) &&
7261 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
7262 		     QDF_MAC_ADDR_SIZE)) {
7263 			ast_type = CDP_TXRX_AST_TYPE_SELF;
7264 		}
7265 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
7266 
7267 		peer->valid = 1;
7268 		peer->is_tdls_peer = false;
7269 		dp_local_peer_id_alloc(pdev, peer);
7270 
7271 		qdf_spinlock_create(&peer->peer_info_lock);
7272 
7273 		DP_STATS_INIT(peer);
7274 
7275 		/*
7276 		 * In tx_monitor mode, filter may be set for unassociated peer
7277 		 * when unassociated peer get associated peer need to
7278 		 * update tx_cap_enabled flag to support peer filter.
7279 		 */
7280 		if (!IS_MLO_DP_MLD_PEER(peer)) {
7281 			dp_monitor_peer_tx_capture_filter_check(pdev, peer);
7282 			dp_monitor_peer_reset_stats(soc, peer);
7283 		}
7284 
7285 		if (peer->txrx_peer) {
7286 			dp_peer_rx_bufq_resources_init(peer->txrx_peer);
7287 			dp_txrx_peer_stats_clr(peer->txrx_peer);
7288 			dp_set_peer_isolation(peer->txrx_peer, false);
7289 			dp_wds_ext_peer_init(peer->txrx_peer);
7290 			dp_peer_hw_txrx_stats_init(soc, peer->txrx_peer);
7291 		}
7292 
7293 		dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
7294 
7295 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7296 		return QDF_STATUS_SUCCESS;
7297 	} else {
7298 		/*
7299 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
7300 		 * need to remove the AST entry which was earlier added as a WDS
7301 		 * entry.
7302 		 * If an AST entry exists, but no peer entry exists with a given
7303 		 * MAC addresses, we could deduce it as a WDS entry
7304 		 */
7305 		dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
7306 	}
7307 
7308 #ifdef notyet
7309 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
7310 		soc->mempool_ol_ath_peer);
7311 #else
7312 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
7313 #endif
7314 	wlan_minidump_log(peer,
7315 			  sizeof(*peer),
7316 			  soc->ctrl_psoc,
7317 			  WLAN_MD_DP_PEER, "dp_peer");
7318 	if (!peer) {
7319 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7320 		return QDF_STATUS_E_FAILURE; /* failure */
7321 	}
7322 
7323 	qdf_mem_zero(peer, sizeof(struct dp_peer));
7324 
7325 	/* store provided params */
7326 	peer->vdev = vdev;
7327 
7328 	/* initialize the peer_id */
7329 	peer->peer_id = HTT_INVALID_PEER;
7330 
7331 	qdf_mem_copy(
7332 		&peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
7333 
7334 	DP_PEER_SET_TYPE(peer, peer_type);
7335 	if (IS_MLO_DP_MLD_PEER(peer)) {
7336 		if (dp_txrx_peer_attach(soc, peer) !=
7337 				QDF_STATUS_SUCCESS)
7338 			goto fail; /* failure */
7339 
7340 		dp_mld_peer_init_link_peers_info(peer);
7341 	} else if (dp_monitor_peer_attach(soc, peer) !=
7342 				QDF_STATUS_SUCCESS)
7343 		dp_warn("peer monitor ctx alloc failed");
7344 
7345 	TAILQ_INIT(&peer->ast_entry_list);
7346 
7347 	/* get the vdev reference for new peer */
7348 	dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CHILD);
7349 
7350 	if ((vdev->opmode == wlan_op_mode_sta) &&
7351 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
7352 			 QDF_MAC_ADDR_SIZE)) {
7353 		ast_type = CDP_TXRX_AST_TYPE_SELF;
7354 	}
7355 	qdf_spinlock_create(&peer->peer_state_lock);
7356 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
7357 	qdf_spinlock_create(&peer->peer_info_lock);
7358 
7359 	/* reset the ast index to flowid table */
7360 	dp_peer_reset_flowq_map(peer);
7361 
7362 	qdf_atomic_init(&peer->ref_cnt);
7363 
7364 	for (i = 0; i < DP_MOD_ID_MAX; i++)
7365 		qdf_atomic_init(&peer->mod_refs[i]);
7366 
7367 	/* keep one reference for attach */
7368 	qdf_atomic_inc(&peer->ref_cnt);
7369 	qdf_atomic_inc(&peer->mod_refs[DP_MOD_ID_CONFIG]);
7370 
7371 	dp_peer_vdev_list_add(soc, vdev, peer);
7372 
7373 	/* TODO: See if hash based search is required */
7374 	dp_peer_find_hash_add(soc, peer);
7375 
7376 	/* Initialize the peer state */
7377 	peer->state = OL_TXRX_PEER_STATE_DISC;
7378 
7379 	dp_info("vdev %pK created peer %pK ("QDF_MAC_ADDR_FMT") ref_cnt: %d",
7380 		vdev, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
7381 		qdf_atomic_read(&peer->ref_cnt));
7382 	/*
7383 	 * For every peer MAp message search and set if bss_peer
7384 	 */
7385 	if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
7386 			QDF_MAC_ADDR_SIZE) == 0 &&
7387 			(wlan_op_mode_sta != vdev->opmode)) {
7388 		dp_info("vdev bss_peer!!");
7389 		peer->bss_peer = 1;
7390 		if (peer->txrx_peer)
7391 			peer->txrx_peer->bss_peer = 1;
7392 	}
7393 
7394 	if (wlan_op_mode_sta == vdev->opmode &&
7395 	    qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
7396 			QDF_MAC_ADDR_SIZE) == 0) {
7397 		peer->sta_self_peer = 1;
7398 	}
7399 
7400 	dp_peer_rx_tids_create(peer);
7401 
7402 	peer->valid = 1;
7403 	dp_local_peer_id_alloc(pdev, peer);
7404 	DP_STATS_INIT(peer);
7405 
7406 	if (dp_peer_sawf_ctx_alloc(soc, peer) != QDF_STATUS_SUCCESS)
7407 		dp_warn("peer sawf context alloc failed");
7408 
7409 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
7410 
7411 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7412 
7413 	return QDF_STATUS_SUCCESS;
7414 fail:
7415 	qdf_mem_free(peer);
7416 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7417 
7418 	return QDF_STATUS_E_FAILURE;
7419 }
7420 
7421 static QDF_STATUS dp_peer_legacy_setup(struct dp_soc *soc, struct dp_peer *peer)
7422 {
7423 	/* txrx_peer might exist already in peer reuse case */
7424 	if (peer->txrx_peer)
7425 		return QDF_STATUS_SUCCESS;
7426 
7427 	if (dp_txrx_peer_attach(soc, peer) !=
7428 				QDF_STATUS_SUCCESS) {
7429 		dp_err("peer txrx ctx alloc failed");
7430 		return QDF_STATUS_E_FAILURE;
7431 	}
7432 
7433 	return QDF_STATUS_SUCCESS;
7434 }
7435 
7436 #ifdef WLAN_FEATURE_11BE_MLO
7437 QDF_STATUS dp_peer_mlo_setup(
7438 			struct dp_soc *soc,
7439 			struct dp_peer *peer,
7440 			uint8_t vdev_id,
7441 			struct cdp_peer_setup_info *setup_info)
7442 {
7443 	struct dp_peer *mld_peer = NULL;
7444 
7445 	/* Non-MLO connection, do nothing */
7446 	if (!setup_info || !setup_info->mld_peer_mac)
7447 		return QDF_STATUS_SUCCESS;
7448 
7449 	/* To do: remove this check if link/mld peer mac_addr allow to same */
7450 	if (!qdf_mem_cmp(setup_info->mld_peer_mac, peer->mac_addr.raw,
7451 			 QDF_MAC_ADDR_SIZE)) {
7452 		dp_peer_err("Same mac addres for link/mld peer");
7453 		return QDF_STATUS_E_FAILURE;
7454 	}
7455 
7456 	/* if this is the first link peer */
7457 	if (setup_info->is_first_link)
7458 		/* create MLD peer */
7459 		dp_peer_create_wifi3((struct cdp_soc_t *)soc,
7460 				     vdev_id,
7461 				     setup_info->mld_peer_mac,
7462 				     CDP_MLD_PEER_TYPE);
7463 
7464 	peer->first_link = setup_info->is_first_link;
7465 	peer->primary_link = setup_info->is_primary_link;
7466 	mld_peer = dp_peer_find_hash_find(soc,
7467 					  setup_info->mld_peer_mac,
7468 					  0, vdev_id, DP_MOD_ID_CDP);
7469 	if (mld_peer) {
7470 		if (setup_info->is_first_link) {
7471 			/* assign rx_tid to mld peer */
7472 			mld_peer->rx_tid = peer->rx_tid;
7473 			/* no cdp_peer_setup for MLD peer,
7474 			 * set it for addba processing
7475 			 */
7476 			qdf_atomic_set(&mld_peer->is_default_route_set, 1);
7477 		} else {
7478 			/* free link peer origial rx_tids mem */
7479 			dp_peer_rx_tids_destroy(peer);
7480 			/* assign mld peer rx_tid to link peer */
7481 			peer->rx_tid = mld_peer->rx_tid;
7482 		}
7483 
7484 		if (setup_info->is_primary_link &&
7485 		    !setup_info->is_first_link) {
7486 			/*
7487 			 * if first link is not the primary link,
7488 			 * then need to change mld_peer->vdev as
7489 			 * primary link dp_vdev is not same one
7490 			 * during mld peer creation.
7491 			 */
7492 
7493 			/* relase the ref to original dp_vdev */
7494 			dp_vdev_unref_delete(soc, mld_peer->vdev,
7495 					     DP_MOD_ID_CHILD);
7496 			/*
7497 			 * get the ref to new dp_vdev,
7498 			 * increase dp_vdev ref_cnt
7499 			 */
7500 			mld_peer->vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7501 							       DP_MOD_ID_CHILD);
7502 		}
7503 
7504 		/* associate mld and link peer */
7505 		dp_link_peer_add_mld_peer(peer, mld_peer);
7506 		dp_mld_peer_add_link_peer(mld_peer, peer);
7507 
7508 		mld_peer->txrx_peer->mld_peer = 1;
7509 		dp_peer_unref_delete(mld_peer, DP_MOD_ID_CDP);
7510 	} else {
7511 		peer->mld_peer = NULL;
7512 		dp_err("mld peer" QDF_MAC_ADDR_FMT "not found!",
7513 		       QDF_MAC_ADDR_REF(setup_info->mld_peer_mac));
7514 		return QDF_STATUS_E_FAILURE;
7515 	}
7516 
7517 	return QDF_STATUS_SUCCESS;
7518 }
7519 
7520 /*
7521  * dp_mlo_peer_authorize() - authorize MLO peer
7522  * @soc: soc handle
7523  * @peer: pointer to link peer
7524  *
7525  * return void
7526  */
7527 static void dp_mlo_peer_authorize(struct dp_soc *soc,
7528 				  struct dp_peer *peer)
7529 {
7530 	int i;
7531 	struct dp_peer *link_peer = NULL;
7532 	struct dp_peer *mld_peer = peer->mld_peer;
7533 	struct dp_mld_link_peers link_peers_info;
7534 
7535 	if (!mld_peer)
7536 		return;
7537 
7538 	/* get link peers with reference */
7539 	dp_get_link_peers_ref_from_mld_peer(soc, mld_peer,
7540 					    &link_peers_info,
7541 					    DP_MOD_ID_CDP);
7542 
7543 	for (i = 0; i < link_peers_info.num_links; i++) {
7544 		link_peer = link_peers_info.link_peers[i];
7545 
7546 		if (!link_peer->authorize) {
7547 			dp_release_link_peers_ref(&link_peers_info,
7548 						  DP_MOD_ID_CDP);
7549 			mld_peer->authorize = false;
7550 			return;
7551 		}
7552 	}
7553 
7554 	/* if we are here all link peers are authorized,
7555 	 * authorize ml_peer also
7556 	 */
7557 	mld_peer->authorize = true;
7558 
7559 	/* release link peers reference */
7560 	dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
7561 }
7562 #endif
7563 
7564 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
7565 				  enum cdp_host_reo_dest_ring *reo_dest,
7566 				  bool *hash_based)
7567 {
7568 	struct dp_soc *soc;
7569 	struct dp_pdev *pdev;
7570 
7571 	pdev = vdev->pdev;
7572 	soc = pdev->soc;
7573 	/*
7574 	 * hash based steering is disabled for Radios which are offloaded
7575 	 * to NSS
7576 	 */
7577 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
7578 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
7579 
7580 	/*
7581 	 * Below line of code will ensure the proper reo_dest ring is chosen
7582 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
7583 	 */
7584 	*reo_dest = pdev->reo_dest;
7585 }
7586 
7587 #ifdef IPA_OFFLOAD
7588 /**
7589  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
7590  * @vdev: Virtual device
7591  *
7592  * Return: true if the vdev is of subtype P2P
7593  *	   false if the vdev is of any other subtype
7594  */
7595 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
7596 {
7597 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
7598 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
7599 	    vdev->subtype == wlan_op_subtype_p2p_go)
7600 		return true;
7601 
7602 	return false;
7603 }
7604 
7605 /*
7606  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
7607  * @vdev: Datapath VDEV handle
7608  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
7609  * @hash_based: pointer to hash value (enabled/disabled) to be populated
7610  *
7611  * If IPA is enabled in ini, for SAP mode, disable hash based
7612  * steering, use default reo_dst ring for RX. Use config values for other modes.
7613  * Return: None
7614  */
7615 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
7616 				       enum cdp_host_reo_dest_ring *reo_dest,
7617 				       bool *hash_based)
7618 {
7619 	struct dp_soc *soc;
7620 	struct dp_pdev *pdev;
7621 
7622 	pdev = vdev->pdev;
7623 	soc = pdev->soc;
7624 
7625 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
7626 
7627 	/* For P2P-GO interfaces we do not need to change the REO
7628 	 * configuration even if IPA config is enabled
7629 	 */
7630 	if (dp_is_vdev_subtype_p2p(vdev))
7631 		return;
7632 
7633 	/*
7634 	 * If IPA is enabled, disable hash-based flow steering and set
7635 	 * reo_dest_ring_4 as the REO ring to receive packets on.
7636 	 * IPA is configured to reap reo_dest_ring_4.
7637 	 *
7638 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
7639 	 * value enum value is from 1 - 4.
7640 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
7641 	 */
7642 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
7643 		if (vdev->opmode == wlan_op_mode_ap) {
7644 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
7645 			*hash_based = 0;
7646 		} else if (vdev->opmode == wlan_op_mode_sta &&
7647 			   dp_ipa_is_mdm_platform()) {
7648 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
7649 		}
7650 	}
7651 }
7652 
7653 #else
7654 
7655 /*
7656  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
7657  * @vdev: Datapath VDEV handle
7658  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
7659  * @hash_based: pointer to hash value (enabled/disabled) to be populated
7660  *
7661  * Use system config values for hash based steering.
7662  * Return: None
7663  */
7664 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
7665 				       enum cdp_host_reo_dest_ring *reo_dest,
7666 				       bool *hash_based)
7667 {
7668 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
7669 }
7670 #endif /* IPA_OFFLOAD */
7671 
7672 /*
7673  * dp_peer_setup_wifi3() - initialize the peer
7674  * @soc_hdl: soc handle object
7675  * @vdev_id : vdev_id of vdev object
7676  * @peer_mac: Peer's mac address
7677  * @peer_setup_info: peer setup info for MLO
7678  *
7679  * Return: QDF_STATUS
7680  */
7681 static QDF_STATUS
7682 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
7683 		    uint8_t *peer_mac,
7684 		    struct cdp_peer_setup_info *setup_info)
7685 {
7686 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7687 	struct dp_pdev *pdev;
7688 	bool hash_based = 0;
7689 	enum cdp_host_reo_dest_ring reo_dest;
7690 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7691 	struct dp_vdev *vdev = NULL;
7692 	struct dp_peer *peer =
7693 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
7694 					       DP_MOD_ID_CDP);
7695 	struct dp_peer *mld_peer = NULL;
7696 	enum wlan_op_mode vdev_opmode;
7697 	uint8_t lmac_peer_id_msb = 0;
7698 
7699 	if (!peer)
7700 		return QDF_STATUS_E_FAILURE;
7701 
7702 	vdev = peer->vdev;
7703 	if (!vdev) {
7704 		status = QDF_STATUS_E_FAILURE;
7705 		goto fail;
7706 	}
7707 
7708 	/* save vdev related member in case vdev freed */
7709 	vdev_opmode = vdev->opmode;
7710 	pdev = vdev->pdev;
7711 	dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
7712 
7713 	dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
7714 		pdev->pdev_id, vdev->vdev_id,
7715 		vdev->opmode, hash_based, reo_dest);
7716 
7717 	/*
7718 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
7719 	 * i.e both the devices have same MAC address. In these
7720 	 * cases we want such pkts to be processed in NULL Q handler
7721 	 * which is REO2TCL ring. for this reason we should
7722 	 * not setup reo_queues and default route for bss_peer.
7723 	 */
7724 	if (!IS_MLO_DP_MLD_PEER(peer))
7725 		dp_monitor_peer_tx_init(pdev, peer);
7726 
7727 	if (!setup_info)
7728 		if (dp_peer_legacy_setup(soc, peer) !=
7729 				QDF_STATUS_SUCCESS) {
7730 			status = QDF_STATUS_E_RESOURCES;
7731 			goto fail;
7732 		}
7733 
7734 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
7735 		status = QDF_STATUS_E_FAILURE;
7736 		goto fail;
7737 	}
7738 
7739 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
7740 		/* TODO: Check the destination ring number to be passed to FW */
7741 		soc->cdp_soc.ol_ops->peer_set_default_routing(
7742 				soc->ctrl_psoc,
7743 				peer->vdev->pdev->pdev_id,
7744 				peer->mac_addr.raw,
7745 				peer->vdev->vdev_id, hash_based, reo_dest,
7746 				lmac_peer_id_msb);
7747 	}
7748 
7749 	qdf_atomic_set(&peer->is_default_route_set, 1);
7750 
7751 	status = dp_peer_mlo_setup(soc, peer, vdev->vdev_id, setup_info);
7752 	if (QDF_IS_STATUS_ERROR(status)) {
7753 		dp_peer_err("peer mlo setup failed");
7754 		qdf_assert_always(0);
7755 	}
7756 
7757 	if (vdev_opmode != wlan_op_mode_monitor) {
7758 		/* In case of MLD peer, switch peer to mld peer and
7759 		 * do peer_rx_init.
7760 		 */
7761 		if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
7762 		    IS_MLO_DP_LINK_PEER(peer)) {
7763 			if (setup_info && setup_info->is_first_link) {
7764 				mld_peer = DP_GET_MLD_PEER_FROM_PEER(peer);
7765 				if (mld_peer)
7766 					dp_peer_rx_init(pdev, mld_peer);
7767 				else
7768 					dp_peer_err("MLD peer null. Primary link peer:%pK", peer);
7769 			}
7770 		} else {
7771 			dp_peer_rx_init(pdev, peer);
7772 		}
7773 	}
7774 
7775 	if (!IS_MLO_DP_MLD_PEER(peer))
7776 		dp_peer_ppdu_delayed_ba_init(peer);
7777 
7778 fail:
7779 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7780 	return status;
7781 }
7782 
7783 /*
7784  * dp_cp_peer_del_resp_handler - Handle the peer delete response
7785  * @soc_hdl: Datapath SOC handle
7786  * @vdev_id: id of virtual device object
7787  * @mac_addr: Mac address of the peer
7788  *
7789  * Return: QDF_STATUS
7790  */
7791 static QDF_STATUS dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl,
7792 					      uint8_t vdev_id,
7793 					      uint8_t *mac_addr)
7794 {
7795 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7796 	struct dp_ast_entry  *ast_entry = NULL;
7797 	txrx_ast_free_cb cb = NULL;
7798 	void *cookie;
7799 
7800 	if (soc->ast_offload_support)
7801 		return QDF_STATUS_E_INVAL;
7802 
7803 	qdf_spin_lock_bh(&soc->ast_lock);
7804 
7805 	ast_entry =
7806 		dp_peer_ast_hash_find_by_vdevid(soc, mac_addr,
7807 						vdev_id);
7808 
7809 	/* in case of qwrap we have multiple BSS peers
7810 	 * with same mac address
7811 	 *
7812 	 * AST entry for this mac address will be created
7813 	 * only for one peer hence it will be NULL here
7814 	 */
7815 	if ((!ast_entry || !ast_entry->delete_in_progress) ||
7816 	    (ast_entry->peer_id != HTT_INVALID_PEER)) {
7817 		qdf_spin_unlock_bh(&soc->ast_lock);
7818 		return QDF_STATUS_E_FAILURE;
7819 	}
7820 
7821 	if (ast_entry->is_mapped)
7822 		soc->ast_table[ast_entry->ast_idx] = NULL;
7823 
7824 	DP_STATS_INC(soc, ast.deleted, 1);
7825 	dp_peer_ast_hash_remove(soc, ast_entry);
7826 
7827 	cb = ast_entry->callback;
7828 	cookie = ast_entry->cookie;
7829 	ast_entry->callback = NULL;
7830 	ast_entry->cookie = NULL;
7831 
7832 	soc->num_ast_entries--;
7833 	qdf_spin_unlock_bh(&soc->ast_lock);
7834 
7835 	if (cb) {
7836 		cb(soc->ctrl_psoc,
7837 		   dp_soc_to_cdp_soc(soc),
7838 		   cookie,
7839 		   CDP_TXRX_AST_DELETED);
7840 	}
7841 	qdf_mem_free(ast_entry);
7842 
7843 	return QDF_STATUS_SUCCESS;
7844 }
7845 
7846 /*
7847  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
7848  * @txrx_soc: cdp soc handle
7849  * @ac: Access category
7850  * @value: timeout value in millisec
7851  *
7852  * Return: void
7853  */
7854 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
7855 				    uint8_t ac, uint32_t value)
7856 {
7857 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
7858 
7859 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
7860 }
7861 
7862 /*
7863  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
7864  * @txrx_soc: cdp soc handle
7865  * @ac: access category
7866  * @value: timeout value in millisec
7867  *
7868  * Return: void
7869  */
7870 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
7871 				    uint8_t ac, uint32_t *value)
7872 {
7873 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
7874 
7875 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
7876 }
7877 
7878 /*
7879  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
7880  * @txrx_soc: cdp soc handle
7881  * @pdev_id: id of physical device object
7882  * @val: reo destination ring index (1 - 4)
7883  *
7884  * Return: QDF_STATUS
7885  */
7886 static QDF_STATUS
7887 dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
7888 		     enum cdp_host_reo_dest_ring val)
7889 {
7890 	struct dp_pdev *pdev =
7891 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
7892 						   pdev_id);
7893 
7894 	if (pdev) {
7895 		pdev->reo_dest = val;
7896 		return QDF_STATUS_SUCCESS;
7897 	}
7898 
7899 	return QDF_STATUS_E_FAILURE;
7900 }
7901 
7902 /*
7903  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
7904  * @txrx_soc: cdp soc handle
7905  * @pdev_id: id of physical device object
7906  *
7907  * Return: reo destination ring index
7908  */
7909 static enum cdp_host_reo_dest_ring
7910 dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id)
7911 {
7912 	struct dp_pdev *pdev =
7913 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
7914 						   pdev_id);
7915 
7916 	if (pdev)
7917 		return pdev->reo_dest;
7918 	else
7919 		return cdp_host_reo_dest_ring_unknown;
7920 }
7921 
7922 #ifdef WLAN_SUPPORT_SCS
7923 /*
7924  * dp_enable_scs_params - Enable/Disable SCS procedures
7925  * @soc - Datapath soc handle
7926  * @peer_mac - STA Mac address
7927  * @vdev_id - ID of the vdev handle
7928  * @active - Flag to set SCS active/inactive
7929  * return type - QDF_STATUS - Success/Invalid
7930  */
7931 static QDF_STATUS
7932 dp_enable_scs_params(struct cdp_soc_t *soc_hdl, struct qdf_mac_addr
7933 		     *peer_mac,
7934 		     uint8_t vdev_id,
7935 		     bool is_active)
7936 {
7937 	struct dp_peer *peer;
7938 	QDF_STATUS status = QDF_STATUS_E_INVAL;
7939 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7940 
7941 	peer = dp_peer_find_hash_find(soc, peer_mac->bytes, 0, vdev_id,
7942 				      DP_MOD_ID_CDP);
7943 
7944 	if (!peer) {
7945 		dp_err("Peer is NULL!");
7946 		goto fail;
7947 	}
7948 
7949 	peer->scs_is_active = is_active;
7950 	status = QDF_STATUS_SUCCESS;
7951 
7952 fail:
7953 	if (peer)
7954 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7955 	return status;
7956 }
7957 
7958 /*
7959  * @brief dp_copy_scs_params - SCS Parameters sent by STA
7960  * is copied from the cdp layer to the dp layer
7961  * These parameters are then used by the peer
7962  * for traffic classification.
7963  *
7964  * @param peer - peer struct
7965  * @param scs_params - cdp layer params
7966  * @idx - SCS_entry index obtained from the
7967  * node database with a given SCSID
7968  * @return void
7969  */
7970 void
7971 dp_copy_scs_params(struct dp_peer *peer,
7972 		   struct cdp_scs_params *scs_params,
7973 		   uint8_t idx)
7974 {
7975 	uint8_t tidx = 0;
7976 	uint8_t tclas_elem;
7977 
7978 	peer->scs[idx].scsid = scs_params->scsid;
7979 	peer->scs[idx].access_priority =
7980 		scs_params->access_priority;
7981 	peer->scs[idx].tclas_elements =
7982 		scs_params->tclas_elements;
7983 	peer->scs[idx].tclas_process =
7984 		scs_params->tclas_process;
7985 
7986 	tclas_elem = peer->scs[idx].tclas_elements;
7987 
7988 	while (tidx < tclas_elem) {
7989 		qdf_mem_copy(&peer->scs[idx].tclas[tidx],
7990 			     &scs_params->tclas[tidx],
7991 			     sizeof(struct cdp_tclas_tuple));
7992 		tidx++;
7993 	}
7994 }
7995 
7996 /*
7997  * @brief dp_record_scs_params() - Copying the SCS params to a
7998  * peer based database.
7999  *
8000  * @soc - Datapath soc handle
8001  * @peer_mac - STA Mac address
8002  * @vdev_id - ID of the vdev handle
8003  * @scs_params - Structure having SCS parameters obtained
8004  * from handshake
8005  * @idx - SCS_entry index obtained from the
8006  * node database with a given SCSID
8007  * @scs_sessions - Total # of SCS sessions active
8008  *
8009  * @details
8010  * SCS parameters sent by the STA in
8011  * the SCS Request to the AP. The AP makes a note of these
8012  * parameters while sending the MSDUs to the STA, to
8013  * send the downlink traffic with correct User priority.
8014  *
8015  * return type - QDF_STATUS - Success/Invalid
8016  */
8017 static QDF_STATUS
8018 dp_record_scs_params(struct cdp_soc_t *soc_hdl, struct qdf_mac_addr
8019 		     *peer_mac,
8020 		     uint8_t vdev_id,
8021 		     struct cdp_scs_params *scs_params,
8022 		     uint8_t idx,
8023 		     uint8_t scs_sessions)
8024 {
8025 	struct dp_peer *peer;
8026 	QDF_STATUS status = QDF_STATUS_E_INVAL;
8027 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8028 
8029 	peer = dp_peer_find_hash_find(soc, peer_mac->bytes, 0, vdev_id,
8030 				      DP_MOD_ID_CDP);
8031 
8032 	if (!peer) {
8033 		dp_err("Peer is NULL!");
8034 		goto fail;
8035 	}
8036 
8037 	if (idx >= IEEE80211_SCS_MAX_NO_OF_ELEM)
8038 		goto fail;
8039 
8040 	/* SCS procedure for the peer is activated
8041 	 * as soon as we get this information from
8042 	 * the control path, unless explicitly disabled.
8043 	 */
8044 	peer->scs_is_active = 1;
8045 	dp_copy_scs_params(peer, scs_params, idx);
8046 	status = QDF_STATUS_SUCCESS;
8047 	peer->no_of_scs_sessions = scs_sessions;
8048 
8049 fail:
8050 	if (peer)
8051 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8052 	return status;
8053 }
8054 #endif
8055 
8056 #ifdef WLAN_SUPPORT_MSCS
8057 /*
8058  * dp_record_mscs_params - MSCS parameters sent by the STA in
8059  * the MSCS Request to the AP. The AP makes a note of these
8060  * parameters while comparing the MSDUs sent by the STA, to
8061  * send the downlink traffic with correct User priority.
8062  * @soc - Datapath soc handle
8063  * @peer_mac - STA Mac address
8064  * @vdev_id - ID of the vdev handle
8065  * @mscs_params - Structure having MSCS parameters obtained
8066  * from handshake
8067  * @active - Flag to set MSCS active/inactive
8068  * return type - QDF_STATUS - Success/Invalid
8069  */
8070 static QDF_STATUS
8071 dp_record_mscs_params(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
8072 		      uint8_t vdev_id, struct cdp_mscs_params *mscs_params,
8073 		      bool active)
8074 {
8075 	struct dp_peer *peer;
8076 	QDF_STATUS status = QDF_STATUS_E_INVAL;
8077 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8078 
8079 	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
8080 				      DP_MOD_ID_CDP);
8081 
8082 	if (!peer) {
8083 		dp_err("Peer is NULL!");
8084 		goto fail;
8085 	}
8086 	if (!active) {
8087 		dp_info("MSCS Procedure is terminated");
8088 		peer->mscs_active = active;
8089 		goto fail;
8090 	}
8091 
8092 	if (mscs_params->classifier_type == IEEE80211_TCLAS_MASK_CLA_TYPE_4) {
8093 		/* Populate entries inside IPV4 database first */
8094 		peer->mscs_ipv4_parameter.user_priority_bitmap =
8095 			mscs_params->user_pri_bitmap;
8096 		peer->mscs_ipv4_parameter.user_priority_limit =
8097 			mscs_params->user_pri_limit;
8098 		peer->mscs_ipv4_parameter.classifier_mask =
8099 			mscs_params->classifier_mask;
8100 
8101 		/* Populate entries inside IPV6 database */
8102 		peer->mscs_ipv6_parameter.user_priority_bitmap =
8103 			mscs_params->user_pri_bitmap;
8104 		peer->mscs_ipv6_parameter.user_priority_limit =
8105 			mscs_params->user_pri_limit;
8106 		peer->mscs_ipv6_parameter.classifier_mask =
8107 			mscs_params->classifier_mask;
8108 		peer->mscs_active = 1;
8109 		dp_info("\n\tMSCS Procedure request based parameters for "QDF_MAC_ADDR_FMT"\n"
8110 			"\tClassifier_type = %d\tUser priority bitmap = %x\n"
8111 			"\tUser priority limit = %x\tClassifier mask = %x",
8112 			QDF_MAC_ADDR_REF(peer_mac),
8113 			mscs_params->classifier_type,
8114 			peer->mscs_ipv4_parameter.user_priority_bitmap,
8115 			peer->mscs_ipv4_parameter.user_priority_limit,
8116 			peer->mscs_ipv4_parameter.classifier_mask);
8117 	}
8118 
8119 	status = QDF_STATUS_SUCCESS;
8120 fail:
8121 	if (peer)
8122 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8123 	return status;
8124 }
8125 #endif
8126 
8127 /*
8128  * dp_get_sec_type() - Get the security type
8129  * @soc: soc handle
8130  * @vdev_id: id of dp handle
8131  * @peer_mac: mac of datapath PEER handle
8132  * @sec_idx:    Security id (mcast, ucast)
8133  *
8134  * return sec_type: Security type
8135  */
8136 static int dp_get_sec_type(struct cdp_soc_t *soc, uint8_t vdev_id,
8137 			   uint8_t *peer_mac, uint8_t sec_idx)
8138 {
8139 	int sec_type = 0;
8140 	struct dp_peer *peer =
8141 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
8142 						       peer_mac, 0, vdev_id,
8143 						       DP_MOD_ID_CDP);
8144 
8145 	if (!peer) {
8146 		dp_cdp_err("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
8147 		return sec_type;
8148 	}
8149 
8150 	if (!peer->txrx_peer) {
8151 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8152 		dp_peer_debug("%pK: txrx peer is NULL!\n", soc);
8153 		return sec_type;
8154 	}
8155 	sec_type = peer->txrx_peer->security[sec_idx].sec_type;
8156 
8157 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8158 	return sec_type;
8159 }
8160 
8161 /*
8162  * dp_peer_authorize() - authorize txrx peer
8163  * @soc: soc handle
8164  * @vdev_id: id of dp handle
8165  * @peer_mac: mac of datapath PEER handle
8166  * @authorize
8167  *
8168  */
8169 static QDF_STATUS
8170 dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8171 		  uint8_t *peer_mac, uint32_t authorize)
8172 {
8173 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8174 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8175 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(soc, peer_mac,
8176 							      0, vdev_id,
8177 							      DP_MOD_ID_CDP);
8178 
8179 	if (!peer) {
8180 		dp_cdp_debug("%pK: Peer is NULL!\n", soc);
8181 		status = QDF_STATUS_E_FAILURE;
8182 	} else {
8183 		peer->authorize = authorize ? 1 : 0;
8184 		if (peer->txrx_peer)
8185 			peer->txrx_peer->authorize = peer->authorize;
8186 
8187 		if (!peer->authorize)
8188 			dp_peer_flush_frags(soc_hdl, vdev_id, peer_mac);
8189 
8190 		dp_mlo_peer_authorize(soc, peer);
8191 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8192 	}
8193 
8194 	return status;
8195 }
8196 
8197 /*
8198  * dp_peer_get_authorize() - get peer authorize status
8199  * @soc: soc handle
8200  * @vdev_id: id of dp handle
8201  * @peer_mac: mac of datapath PEER handle
8202  *
8203  * Retusn: true is peer is authorized, false otherwise
8204  */
8205 static bool
8206 dp_peer_get_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8207 		      uint8_t *peer_mac)
8208 {
8209 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8210 	bool authorize = false;
8211 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
8212 						      0, vdev_id,
8213 						      DP_MOD_ID_CDP);
8214 
8215 	if (!peer) {
8216 		dp_cdp_debug("%pK: Peer is NULL!\n", soc);
8217 		return authorize;
8218 	}
8219 
8220 	authorize = peer->authorize;
8221 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8222 
8223 	return authorize;
8224 }
8225 
8226 /**
8227  * dp_vdev_unref_delete() - check and process vdev delete
8228  * @soc : DP specific soc pointer
8229  * @vdev: DP specific vdev pointer
8230  * @mod_id: module id
8231  *
8232  */
8233 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev,
8234 			  enum dp_mod_id mod_id)
8235 {
8236 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
8237 	void *vdev_delete_context = NULL;
8238 	uint8_t vdev_id = vdev->vdev_id;
8239 	struct dp_pdev *pdev = vdev->pdev;
8240 	struct dp_vdev *tmp_vdev = NULL;
8241 	uint8_t found = 0;
8242 
8243 	QDF_ASSERT(qdf_atomic_dec_return(&vdev->mod_refs[mod_id]) >= 0);
8244 
8245 	/* Return if this is not the last reference*/
8246 	if (!qdf_atomic_dec_and_test(&vdev->ref_cnt))
8247 		return;
8248 
8249 	/*
8250 	 * This should be set as last reference need to released
8251 	 * after cdp_vdev_detach() is called
8252 	 *
8253 	 * if this assert is hit there is a ref count issue
8254 	 */
8255 	QDF_ASSERT(vdev->delete.pending);
8256 
8257 	vdev_delete_cb = vdev->delete.callback;
8258 	vdev_delete_context = vdev->delete.context;
8259 
8260 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")- its last peer is done",
8261 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
8262 
8263 	if (wlan_op_mode_monitor == vdev->opmode) {
8264 		dp_monitor_vdev_delete(soc, vdev);
8265 		goto free_vdev;
8266 	}
8267 
8268 	/* all peers are gone, go ahead and delete it */
8269 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
8270 			FLOW_TYPE_VDEV, vdev_id);
8271 	dp_tx_vdev_detach(vdev);
8272 	dp_monitor_vdev_detach(vdev);
8273 
8274 free_vdev:
8275 	qdf_spinlock_destroy(&vdev->peer_list_lock);
8276 
8277 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
8278 	TAILQ_FOREACH(tmp_vdev, &soc->inactive_vdev_list,
8279 		      inactive_list_elem) {
8280 		if (tmp_vdev == vdev) {
8281 			found = 1;
8282 			break;
8283 		}
8284 	}
8285 	if (found)
8286 		TAILQ_REMOVE(&soc->inactive_vdev_list, vdev,
8287 			     inactive_list_elem);
8288 	/* delete this peer from the list */
8289 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
8290 
8291 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")",
8292 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
8293 	wlan_minidump_remove(vdev, sizeof(*vdev), soc->ctrl_psoc,
8294 			     WLAN_MD_DP_VDEV, "dp_vdev");
8295 	qdf_mem_free(vdev);
8296 	vdev = NULL;
8297 
8298 	if (vdev_delete_cb)
8299 		vdev_delete_cb(vdev_delete_context);
8300 }
8301 
8302 qdf_export_symbol(dp_vdev_unref_delete);
8303 
8304 /*
8305  * dp_peer_unref_delete() - unref and delete peer
8306  * @peer_handle:    Datapath peer handle
8307  * @mod_id:         ID of module releasing reference
8308  *
8309  */
8310 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id mod_id)
8311 {
8312 	struct dp_vdev *vdev = peer->vdev;
8313 	struct dp_pdev *pdev = vdev->pdev;
8314 	struct dp_soc *soc = pdev->soc;
8315 	uint16_t peer_id;
8316 	struct dp_peer *tmp_peer;
8317 	bool found = false;
8318 
8319 	if (mod_id > DP_MOD_ID_RX)
8320 		QDF_ASSERT(qdf_atomic_dec_return(&peer->mod_refs[mod_id]) >= 0);
8321 
8322 	/*
8323 	 * Hold the lock all the way from checking if the peer ref count
8324 	 * is zero until the peer references are removed from the hash
8325 	 * table and vdev list (if the peer ref count is zero).
8326 	 * This protects against a new HL tx operation starting to use the
8327 	 * peer object just after this function concludes it's done being used.
8328 	 * Furthermore, the lock needs to be held while checking whether the
8329 	 * vdev's list of peers is empty, to make sure that list is not modified
8330 	 * concurrently with the empty check.
8331 	 */
8332 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
8333 		peer_id = peer->peer_id;
8334 
8335 		/*
8336 		 * Make sure that the reference to the peer in
8337 		 * peer object map is removed
8338 		 */
8339 		QDF_ASSERT(peer_id == HTT_INVALID_PEER);
8340 
8341 		dp_peer_debug("Deleting peer %pK ("QDF_MAC_ADDR_FMT")", peer,
8342 			      QDF_MAC_ADDR_REF(peer->mac_addr.raw));
8343 
8344 		dp_peer_sawf_ctx_free(soc, peer);
8345 
8346 		wlan_minidump_remove(peer, sizeof(*peer), soc->ctrl_psoc,
8347 				     WLAN_MD_DP_PEER, "dp_peer");
8348 
8349 		qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
8350 		TAILQ_FOREACH(tmp_peer, &soc->inactive_peer_list,
8351 			      inactive_list_elem) {
8352 			if (tmp_peer == peer) {
8353 				found = 1;
8354 				break;
8355 			}
8356 		}
8357 		if (found)
8358 			TAILQ_REMOVE(&soc->inactive_peer_list, peer,
8359 				     inactive_list_elem);
8360 		/* delete this peer from the list */
8361 		qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
8362 		DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
8363 		dp_peer_update_state(soc, peer, DP_PEER_STATE_FREED);
8364 
8365 		/* cleanup the peer data */
8366 		dp_peer_cleanup(vdev, peer);
8367 
8368 		if (!IS_MLO_DP_MLD_PEER(peer))
8369 			dp_monitor_peer_detach(soc, peer);
8370 
8371 		qdf_spinlock_destroy(&peer->peer_state_lock);
8372 
8373 		dp_txrx_peer_detach(soc, peer);
8374 		qdf_mem_free(peer);
8375 
8376 		/*
8377 		 * Decrement ref count taken at peer create
8378 		 */
8379 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CHILD);
8380 	}
8381 }
8382 
8383 qdf_export_symbol(dp_peer_unref_delete);
8384 
8385 /*
8386  * dp_txrx_peer_unref_delete() - unref and delete peer
8387  * @handle: Datapath txrx ref handle
8388  * @mod_id: Module ID of the caller
8389  *
8390  */
8391 void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle,
8392 			       enum dp_mod_id mod_id)
8393 {
8394 	dp_peer_unref_delete((struct dp_peer *)handle, mod_id);
8395 }
8396 
8397 qdf_export_symbol(dp_txrx_peer_unref_delete);
8398 
8399 /*
8400  * dp_peer_detach_wifi3() – Detach txrx peer
8401  * @soc_hdl: soc handle
8402  * @vdev_id: id of dp handle
8403  * @peer_mac: mac of datapath PEER handle
8404  * @bitmap: bitmap indicating special handling of request.
8405  *
8406  */
8407 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
8408 				       uint8_t vdev_id,
8409 				       uint8_t *peer_mac, uint32_t bitmap)
8410 {
8411 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8412 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
8413 						      0, vdev_id,
8414 						      DP_MOD_ID_CDP);
8415 	struct dp_vdev *vdev = NULL;
8416 
8417 	/* Peer can be null for monitor vap mac address */
8418 	if (!peer) {
8419 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
8420 			  "%s: Invalid peer\n", __func__);
8421 		return QDF_STATUS_E_FAILURE;
8422 	}
8423 
8424 	if (!peer->valid) {
8425 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8426 		dp_err("Invalid peer: "QDF_MAC_ADDR_FMT,
8427 			QDF_MAC_ADDR_REF(peer_mac));
8428 		return QDF_STATUS_E_ALREADY;
8429 	}
8430 
8431 	vdev = peer->vdev;
8432 
8433 	if (!vdev) {
8434 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8435 		return QDF_STATUS_E_FAILURE;
8436 	}
8437 
8438 	peer->valid = 0;
8439 
8440 	dp_init_info("%pK: peer %pK (" QDF_MAC_ADDR_FMT ")",
8441 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
8442 
8443 	dp_local_peer_id_free(peer->vdev->pdev, peer);
8444 
8445 	/* Drop all rx packets before deleting peer */
8446 	dp_clear_peer_internal(soc, peer);
8447 
8448 	qdf_spinlock_destroy(&peer->peer_info_lock);
8449 	dp_peer_multipass_list_remove(peer);
8450 
8451 	/* remove the reference to the peer from the hash table */
8452 	dp_peer_find_hash_remove(soc, peer);
8453 
8454 	dp_peer_vdev_list_remove(soc, vdev, peer);
8455 
8456 	dp_peer_mlo_delete(peer);
8457 
8458 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
8459 	TAILQ_INSERT_TAIL(&soc->inactive_peer_list, peer,
8460 			  inactive_list_elem);
8461 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
8462 
8463 	/*
8464 	 * Remove the reference added during peer_attach.
8465 	 * The peer will still be left allocated until the
8466 	 * PEER_UNMAP message arrives to remove the other
8467 	 * reference, added by the PEER_MAP message.
8468 	 */
8469 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
8470 	/*
8471 	 * Remove the reference taken above
8472 	 */
8473 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8474 
8475 	return QDF_STATUS_SUCCESS;
8476 }
8477 
8478 #ifdef DP_RX_UDP_OVER_PEER_ROAM
8479 static QDF_STATUS dp_update_roaming_peer_wifi3(struct cdp_soc_t *soc_hdl,
8480 					       uint8_t vdev_id,
8481 					       uint8_t *peer_mac,
8482 					       uint32_t auth_status)
8483 {
8484 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8485 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8486 						     DP_MOD_ID_CDP);
8487 	if (!vdev)
8488 		return QDF_STATUS_E_FAILURE;
8489 
8490 	vdev->roaming_peer_status = auth_status;
8491 	qdf_mem_copy(vdev->roaming_peer_mac.raw, peer_mac,
8492 		     QDF_MAC_ADDR_SIZE);
8493 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8494 
8495 	return QDF_STATUS_SUCCESS;
8496 }
8497 #endif
8498 /*
8499  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
8500  * @soc_hdl: Datapath soc handle
8501  * @vdev_id: virtual interface id
8502  *
8503  * Return: MAC address on success, NULL on failure.
8504  *
8505  */
8506 static uint8_t *dp_get_vdev_mac_addr_wifi3(struct cdp_soc_t *soc_hdl,
8507 					   uint8_t vdev_id)
8508 {
8509 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8510 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8511 						     DP_MOD_ID_CDP);
8512 	uint8_t *mac = NULL;
8513 
8514 	if (!vdev)
8515 		return NULL;
8516 
8517 	mac = vdev->mac_addr.raw;
8518 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8519 
8520 	return mac;
8521 }
8522 
8523 /*
8524  * dp_vdev_set_wds() - Enable per packet stats
8525  * @soc: DP soc handle
8526  * @vdev_id: id of DP VDEV handle
8527  * @val: value
8528  *
8529  * Return: none
8530  */
8531 static int dp_vdev_set_wds(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8532 			   uint32_t val)
8533 {
8534 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8535 	struct dp_vdev *vdev =
8536 		dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id,
8537 				      DP_MOD_ID_CDP);
8538 
8539 	if (!vdev)
8540 		return QDF_STATUS_E_FAILURE;
8541 
8542 	vdev->wds_enabled = val;
8543 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8544 
8545 	return QDF_STATUS_SUCCESS;
8546 }
8547 
8548 static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
8549 {
8550 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8551 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8552 						     DP_MOD_ID_CDP);
8553 	int opmode;
8554 
8555 	if (!vdev) {
8556 		dp_err("vdev for id %d is NULL", vdev_id);
8557 		return -EINVAL;
8558 	}
8559 	opmode = vdev->opmode;
8560 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8561 
8562 	return opmode;
8563 }
8564 
8565 /**
8566  * dp_get_os_rx_handles_from_vdev_wifi3() - Get os rx handles for a vdev
8567  * @soc_hdl: ol_txrx_soc_handle handle
8568  * @vdev_id: vdev id for which os rx handles are needed
8569  * @stack_fn_p: pointer to stack function pointer
8570  * @osif_handle_p: pointer to ol_osif_vdev_handle
8571  *
8572  * Return: void
8573  */
8574 static
8575 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_soc_t *soc_hdl,
8576 					  uint8_t vdev_id,
8577 					  ol_txrx_rx_fp *stack_fn_p,
8578 					  ol_osif_vdev_handle *osif_vdev_p)
8579 {
8580 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8581 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8582 						     DP_MOD_ID_CDP);
8583 
8584 	if (qdf_unlikely(!vdev)) {
8585 		*stack_fn_p = NULL;
8586 		*osif_vdev_p = NULL;
8587 		return;
8588 	}
8589 	*stack_fn_p = vdev->osif_rx_stack;
8590 	*osif_vdev_p = vdev->osif_vdev;
8591 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8592 }
8593 
8594 /**
8595  * dp_get_ctrl_pdev_from_vdev() - Get control pdev of vdev
8596  * @soc_hdl: datapath soc handle
8597  * @vdev_id: virtual device/interface id
8598  *
8599  * Return: Handle to control pdev
8600  */
8601 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(
8602 						struct cdp_soc_t *soc_hdl,
8603 						uint8_t vdev_id)
8604 {
8605 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8606 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8607 						     DP_MOD_ID_CDP);
8608 	struct dp_pdev *pdev;
8609 
8610 	if (!vdev)
8611 		return NULL;
8612 
8613 	pdev = vdev->pdev;
8614 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8615 	return pdev ? (struct cdp_cfg *)pdev->wlan_cfg_ctx : NULL;
8616 }
8617 
8618 /**
8619  * dp_get_tx_pending() - read pending tx
8620  * @pdev_handle: Datapath PDEV handle
8621  *
8622  * Return: outstanding tx
8623  */
8624 static int32_t dp_get_tx_pending(struct cdp_pdev *pdev_handle)
8625 {
8626 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8627 
8628 	return qdf_atomic_read(&pdev->num_tx_outstanding);
8629 }
8630 
8631 /**
8632  * dp_get_peer_mac_from_peer_id() - get peer mac
8633  * @pdev_handle: Datapath PDEV handle
8634  * @peer_id: Peer ID
8635  * @peer_mac: MAC addr of PEER
8636  *
8637  * Return: QDF_STATUS
8638  */
8639 static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc,
8640 					       uint32_t peer_id,
8641 					       uint8_t *peer_mac)
8642 {
8643 	struct dp_peer *peer;
8644 
8645 	if (soc && peer_mac) {
8646 		peer = dp_peer_get_ref_by_id((struct dp_soc *)soc,
8647 					     (uint16_t)peer_id,
8648 					     DP_MOD_ID_CDP);
8649 		if (peer) {
8650 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
8651 				     QDF_MAC_ADDR_SIZE);
8652 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8653 			return QDF_STATUS_SUCCESS;
8654 		}
8655 	}
8656 
8657 	return QDF_STATUS_E_FAILURE;
8658 }
8659 
8660 #ifdef MESH_MODE_SUPPORT
8661 static
8662 void dp_vdev_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
8663 {
8664 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
8665 
8666 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
8667 	vdev->mesh_vdev = val;
8668 	if (val)
8669 		vdev->skip_sw_tid_classification |=
8670 			DP_TX_MESH_ENABLED;
8671 	else
8672 		vdev->skip_sw_tid_classification &=
8673 			~DP_TX_MESH_ENABLED;
8674 }
8675 
8676 /*
8677  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
8678  * @vdev_hdl: virtual device object
8679  * @val: value to be set
8680  *
8681  * Return: void
8682  */
8683 static
8684 void dp_vdev_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
8685 {
8686 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
8687 
8688 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
8689 	vdev->mesh_rx_filter = val;
8690 }
8691 #endif
8692 
8693 /*
8694  * dp_vdev_set_hlos_tid_override() - to set hlos tid override
8695  * @vdev_hdl: virtual device object
8696  * @val: value to be set
8697  *
8698  * Return: void
8699  */
8700 static
8701 void dp_vdev_set_hlos_tid_override(struct dp_vdev *vdev, uint32_t val)
8702 {
8703 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
8704 	if (val)
8705 		vdev->skip_sw_tid_classification |=
8706 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
8707 	else
8708 		vdev->skip_sw_tid_classification &=
8709 			~DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
8710 }
8711 
8712 /*
8713  * dp_vdev_get_hlos_tid_override() - to get hlos tid override flag
8714  * @vdev_hdl: virtual device object
8715  * @val: value to be set
8716  *
8717  * Return: 1 if this flag is set
8718  */
8719 static
8720 uint8_t dp_vdev_get_hlos_tid_override(struct cdp_vdev *vdev_hdl)
8721 {
8722 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
8723 
8724 	return !!(vdev->skip_sw_tid_classification &
8725 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED);
8726 }
8727 
8728 #ifdef VDEV_PEER_PROTOCOL_COUNT
8729 static void dp_enable_vdev_peer_protocol_count(struct cdp_soc_t *soc_hdl,
8730 					       int8_t vdev_id,
8731 					       bool enable)
8732 {
8733 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8734 	struct dp_vdev *vdev;
8735 
8736 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
8737 	if (!vdev)
8738 		return;
8739 
8740 	dp_info("enable %d vdev_id %d", enable, vdev_id);
8741 	vdev->peer_protocol_count_track = enable;
8742 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8743 }
8744 
8745 static void dp_enable_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
8746 						   int8_t vdev_id,
8747 						   int drop_mask)
8748 {
8749 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8750 	struct dp_vdev *vdev;
8751 
8752 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
8753 	if (!vdev)
8754 		return;
8755 
8756 	dp_info("drop_mask %d vdev_id %d", drop_mask, vdev_id);
8757 	vdev->peer_protocol_count_dropmask = drop_mask;
8758 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8759 }
8760 
8761 static int dp_is_vdev_peer_protocol_count_enabled(struct cdp_soc_t *soc_hdl,
8762 						  int8_t vdev_id)
8763 {
8764 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8765 	struct dp_vdev *vdev;
8766 	int peer_protocol_count_track;
8767 
8768 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
8769 	if (!vdev)
8770 		return 0;
8771 
8772 	dp_info("enable %d vdev_id %d", vdev->peer_protocol_count_track,
8773 		vdev_id);
8774 	peer_protocol_count_track =
8775 		vdev->peer_protocol_count_track;
8776 
8777 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8778 	return peer_protocol_count_track;
8779 }
8780 
8781 static int dp_get_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
8782 					       int8_t vdev_id)
8783 {
8784 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8785 	struct dp_vdev *vdev;
8786 	int peer_protocol_count_dropmask;
8787 
8788 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
8789 	if (!vdev)
8790 		return 0;
8791 
8792 	dp_info("drop_mask %d vdev_id %d", vdev->peer_protocol_count_dropmask,
8793 		vdev_id);
8794 	peer_protocol_count_dropmask =
8795 		vdev->peer_protocol_count_dropmask;
8796 
8797 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8798 	return peer_protocol_count_dropmask;
8799 }
8800 
8801 #endif
8802 
8803 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data)
8804 {
8805 	uint8_t pdev_count;
8806 
8807 	for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) {
8808 		if (soc->pdev_list[pdev_count] &&
8809 		    soc->pdev_list[pdev_count] == data)
8810 			return true;
8811 	}
8812 	return false;
8813 }
8814 
8815 /**
8816  * dp_rx_bar_stats_cb(): BAR received stats callback
8817  * @soc: SOC handle
8818  * @cb_ctxt: Call back context
8819  * @reo_status: Reo status
8820  *
8821  * return: void
8822  */
8823 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
8824 	union hal_reo_status *reo_status)
8825 {
8826 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
8827 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
8828 
8829 	if (!dp_check_pdev_exists(soc, pdev)) {
8830 		dp_err_rl("pdev doesn't exist");
8831 		return;
8832 	}
8833 
8834 	if (!qdf_atomic_read(&soc->cmn_init_done))
8835 		return;
8836 
8837 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
8838 		DP_PRINT_STATS("REO stats failure %d",
8839 			       queue_status->header.status);
8840 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
8841 		return;
8842 	}
8843 
8844 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
8845 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
8846 
8847 }
8848 
8849 /**
8850  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
8851  * @vdev: DP VDEV handle
8852  *
8853  * return: void
8854  */
8855 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
8856 			     struct cdp_vdev_stats *vdev_stats)
8857 {
8858 	struct dp_soc *soc = NULL;
8859 
8860 	if (!vdev || !vdev->pdev)
8861 		return;
8862 
8863 	soc = vdev->pdev->soc;
8864 
8865 	dp_update_vdev_ingress_stats(vdev);
8866 
8867 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
8868 
8869 	dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats,
8870 			     DP_MOD_ID_GENERIC_STATS);
8871 
8872 	dp_update_vdev_rate_stats(vdev_stats, &vdev->stats);
8873 
8874 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
8875 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
8876 			     vdev_stats, vdev->vdev_id,
8877 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
8878 #endif
8879 }
8880 
8881 void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
8882 {
8883 	struct dp_vdev *vdev = NULL;
8884 	struct dp_soc *soc;
8885 	struct cdp_vdev_stats *vdev_stats =
8886 			qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
8887 
8888 	if (!vdev_stats) {
8889 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
8890 			   pdev->soc);
8891 		return;
8892 	}
8893 
8894 	soc = pdev->soc;
8895 
8896 	qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
8897 	qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
8898 	qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
8899 	qdf_mem_zero(&pdev->stats.rx_i, sizeof(pdev->stats.rx_i));
8900 
8901 	if (dp_monitor_is_enable_mcopy_mode(pdev))
8902 		dp_monitor_invalid_peer_update_pdev_stats(soc, pdev);
8903 
8904 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
8905 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
8906 
8907 		dp_aggregate_vdev_stats(vdev, vdev_stats);
8908 		dp_update_pdev_stats(pdev, vdev_stats);
8909 		dp_update_pdev_ingress_stats(pdev, vdev);
8910 	}
8911 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
8912 	qdf_mem_free(vdev_stats);
8913 
8914 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
8915 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
8916 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
8917 #endif
8918 }
8919 
8920 /**
8921  * dp_vdev_getstats() - get vdev packet level stats
8922  * @vdev_handle: Datapath VDEV handle
8923  * @stats: cdp network device stats structure
8924  *
8925  * Return: QDF_STATUS
8926  */
8927 static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle,
8928 				   struct cdp_dev_stats *stats)
8929 {
8930 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8931 	struct dp_pdev *pdev;
8932 	struct dp_soc *soc;
8933 	struct cdp_vdev_stats *vdev_stats;
8934 
8935 	if (!vdev)
8936 		return QDF_STATUS_E_FAILURE;
8937 
8938 	pdev = vdev->pdev;
8939 	if (!pdev)
8940 		return QDF_STATUS_E_FAILURE;
8941 
8942 	soc = pdev->soc;
8943 
8944 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
8945 
8946 	if (!vdev_stats) {
8947 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
8948 			   soc);
8949 		return QDF_STATUS_E_FAILURE;
8950 	}
8951 
8952 	dp_aggregate_vdev_stats(vdev, vdev_stats);
8953 
8954 	stats->tx_packets = vdev_stats->tx.comp_pkt.num;
8955 	stats->tx_bytes = vdev_stats->tx.comp_pkt.bytes;
8956 
8957 	stats->tx_errors = vdev_stats->tx.tx_failed;
8958 	stats->tx_dropped = vdev_stats->tx_i.dropped.dropped_pkt.num +
8959 			    vdev_stats->tx_i.sg.dropped_host.num +
8960 			    vdev_stats->tx_i.mcast_en.dropped_map_error +
8961 			    vdev_stats->tx_i.mcast_en.dropped_self_mac +
8962 			    vdev_stats->tx_i.mcast_en.dropped_send_fail +
8963 			    vdev_stats->tx.nawds_mcast_drop;
8964 
8965 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
8966 		stats->rx_packets = vdev_stats->rx.to_stack.num;
8967 		stats->rx_bytes = vdev_stats->rx.to_stack.bytes;
8968 	} else {
8969 		stats->rx_packets = vdev_stats->rx_i.reo_rcvd_pkt.num +
8970 				    vdev_stats->rx_i.null_q_desc_pkt.num +
8971 				    vdev_stats->rx_i.routed_eapol_pkt.num;
8972 		stats->rx_bytes = vdev_stats->rx_i.reo_rcvd_pkt.bytes +
8973 				  vdev_stats->rx_i.null_q_desc_pkt.bytes +
8974 				  vdev_stats->rx_i.routed_eapol_pkt.bytes;
8975 	}
8976 
8977 	stats->rx_errors = vdev_stats->rx.err.mic_err +
8978 			   vdev_stats->rx.err.decrypt_err +
8979 			   vdev_stats->rx.err.fcserr +
8980 			   vdev_stats->rx.err.pn_err +
8981 			   vdev_stats->rx.err.oor_err +
8982 			   vdev_stats->rx.err.jump_2k_err +
8983 			   vdev_stats->rx.err.rxdma_wifi_parse_err;
8984 
8985 	stats->rx_dropped = vdev_stats->rx.mec_drop.num +
8986 			    vdev_stats->rx.multipass_rx_pkt_drop +
8987 			    vdev_stats->rx.peer_unauth_rx_pkt_drop +
8988 			    vdev_stats->rx.policy_check_drop +
8989 			    vdev_stats->rx.nawds_mcast_drop +
8990 			    vdev_stats->rx.mcast_3addr_drop;
8991 
8992 	qdf_mem_free(vdev_stats);
8993 
8994 	return QDF_STATUS_SUCCESS;
8995 }
8996 
8997 /**
8998  * dp_pdev_getstats() - get pdev packet level stats
8999  * @pdev_handle: Datapath PDEV handle
9000  * @stats: cdp network device stats structure
9001  *
9002  * Return: QDF_STATUS
9003  */
9004 static void dp_pdev_getstats(struct cdp_pdev *pdev_handle,
9005 			     struct cdp_dev_stats *stats)
9006 {
9007 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9008 
9009 	dp_aggregate_pdev_stats(pdev);
9010 
9011 	stats->tx_packets = pdev->stats.tx.comp_pkt.num;
9012 	stats->tx_bytes = pdev->stats.tx.comp_pkt.bytes;
9013 
9014 	stats->tx_errors = pdev->stats.tx.tx_failed;
9015 	stats->tx_dropped = pdev->stats.tx_i.dropped.dropped_pkt.num +
9016 			    pdev->stats.tx_i.sg.dropped_host.num +
9017 			    pdev->stats.tx_i.mcast_en.dropped_map_error +
9018 			    pdev->stats.tx_i.mcast_en.dropped_self_mac +
9019 			    pdev->stats.tx_i.mcast_en.dropped_send_fail +
9020 			    pdev->stats.tx.nawds_mcast_drop +
9021 			    pdev->stats.tso_stats.dropped_host.num;
9022 
9023 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(pdev->soc->wlan_cfg_ctx)) {
9024 		stats->rx_packets = pdev->stats.rx.to_stack.num;
9025 		stats->rx_bytes = pdev->stats.rx.to_stack.bytes;
9026 	} else {
9027 		stats->rx_packets = pdev->stats.rx_i.reo_rcvd_pkt.num +
9028 				    pdev->stats.rx_i.null_q_desc_pkt.num +
9029 				    pdev->stats.rx_i.routed_eapol_pkt.num;
9030 		stats->rx_bytes = pdev->stats.rx_i.reo_rcvd_pkt.bytes +
9031 				  pdev->stats.rx_i.null_q_desc_pkt.bytes +
9032 				  pdev->stats.rx_i.routed_eapol_pkt.bytes;
9033 	}
9034 
9035 	stats->rx_errors = pdev->stats.err.ip_csum_err +
9036 		pdev->stats.err.tcp_udp_csum_err +
9037 		pdev->stats.rx.err.mic_err +
9038 		pdev->stats.rx.err.decrypt_err +
9039 		pdev->stats.rx.err.fcserr +
9040 		pdev->stats.rx.err.pn_err +
9041 		pdev->stats.rx.err.oor_err +
9042 		pdev->stats.rx.err.jump_2k_err +
9043 		pdev->stats.rx.err.rxdma_wifi_parse_err;
9044 	stats->rx_dropped = pdev->stats.dropped.msdu_not_done +
9045 		pdev->stats.dropped.mec +
9046 		pdev->stats.dropped.mesh_filter +
9047 		pdev->stats.dropped.wifi_parse +
9048 		pdev->stats.dropped.mon_rx_drop +
9049 		pdev->stats.dropped.mon_radiotap_update_err +
9050 		pdev->stats.rx.mec_drop.num +
9051 		pdev->stats.rx.multipass_rx_pkt_drop +
9052 		pdev->stats.rx.peer_unauth_rx_pkt_drop +
9053 		pdev->stats.rx.policy_check_drop +
9054 		pdev->stats.rx.nawds_mcast_drop +
9055 		pdev->stats.rx.mcast_3addr_drop;
9056 }
9057 
9058 /**
9059  * dp_get_device_stats() - get interface level packet stats
9060  * @soc: soc handle
9061  * @id : vdev_id or pdev_id based on type
9062  * @stats: cdp network device stats structure
9063  * @type: device type pdev/vdev
9064  *
9065  * Return: QDF_STATUS
9066  */
9067 static QDF_STATUS dp_get_device_stats(struct cdp_soc_t *soc_hdl, uint8_t id,
9068 				      struct cdp_dev_stats *stats,
9069 				      uint8_t type)
9070 {
9071 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9072 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
9073 	struct dp_vdev *vdev;
9074 
9075 	switch (type) {
9076 	case UPDATE_VDEV_STATS:
9077 		vdev = dp_vdev_get_ref_by_id(soc, id, DP_MOD_ID_CDP);
9078 
9079 		if (vdev) {
9080 			status = dp_vdev_getstats((struct cdp_vdev *)vdev,
9081 						  stats);
9082 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9083 		}
9084 		return status;
9085 	case UPDATE_PDEV_STATS:
9086 		{
9087 			struct dp_pdev *pdev =
9088 				dp_get_pdev_from_soc_pdev_id_wifi3(
9089 						(struct dp_soc *)soc,
9090 						 id);
9091 			if (pdev) {
9092 				dp_pdev_getstats((struct cdp_pdev *)pdev,
9093 						 stats);
9094 				return QDF_STATUS_SUCCESS;
9095 			}
9096 		}
9097 		break;
9098 	default:
9099 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9100 			"apstats cannot be updated for this input "
9101 			"type %d", type);
9102 		break;
9103 	}
9104 
9105 	return QDF_STATUS_E_FAILURE;
9106 }
9107 
9108 const
9109 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
9110 {
9111 	switch (ring_type) {
9112 	case REO_DST:
9113 		return "Reo_dst";
9114 	case REO_EXCEPTION:
9115 		return "Reo_exception";
9116 	case REO_CMD:
9117 		return "Reo_cmd";
9118 	case REO_REINJECT:
9119 		return "Reo_reinject";
9120 	case REO_STATUS:
9121 		return "Reo_status";
9122 	case WBM2SW_RELEASE:
9123 		return "wbm2sw_release";
9124 	case TCL_DATA:
9125 		return "tcl_data";
9126 	case TCL_CMD_CREDIT:
9127 		return "tcl_cmd_credit";
9128 	case TCL_STATUS:
9129 		return "tcl_status";
9130 	case SW2WBM_RELEASE:
9131 		return "sw2wbm_release";
9132 	case RXDMA_BUF:
9133 		return "Rxdma_buf";
9134 	case RXDMA_DST:
9135 		return "Rxdma_dst";
9136 	case RXDMA_MONITOR_BUF:
9137 		return "Rxdma_monitor_buf";
9138 	case RXDMA_MONITOR_DESC:
9139 		return "Rxdma_monitor_desc";
9140 	case RXDMA_MONITOR_STATUS:
9141 		return "Rxdma_monitor_status";
9142 	case RXDMA_MONITOR_DST:
9143 		return "Rxdma_monitor_destination";
9144 	case WBM_IDLE_LINK:
9145 		return "WBM_hw_idle_link";
9146 	default:
9147 		dp_err("Invalid ring type");
9148 		break;
9149 	}
9150 	return "Invalid";
9151 }
9152 
9153 /*
9154  * dp_print_napi_stats(): NAPI stats
9155  * @soc - soc handle
9156  */
9157 void dp_print_napi_stats(struct dp_soc *soc)
9158 {
9159 	hif_print_napi_stats(soc->hif_handle);
9160 }
9161 
9162 /**
9163  * dp_txrx_host_peer_stats_clr): Reinitialize the txrx peer stats
9164  * @soc: Datapath soc
9165  * @peer: Datatpath peer
9166  * @arg: argument to iter function
9167  *
9168  * Return: QDF_STATUS
9169  */
9170 static inline void
9171 dp_txrx_host_peer_stats_clr(struct dp_soc *soc,
9172 			    struct dp_peer *peer,
9173 			    void *arg)
9174 {
9175 	struct dp_txrx_peer *txrx_peer = NULL;
9176 	struct dp_peer *tgt_peer = NULL;
9177 	struct cdp_interface_peer_stats peer_stats_intf;
9178 
9179 	qdf_mem_zero(&peer_stats_intf, sizeof(struct cdp_interface_peer_stats));
9180 
9181 	DP_STATS_CLR(peer);
9182 	/* Clear monitor peer stats */
9183 	dp_monitor_peer_reset_stats(soc, peer);
9184 
9185 	/* Clear MLD peer stats only when link peer is primary */
9186 	if (dp_peer_is_primary_link_peer(peer)) {
9187 		tgt_peer = dp_get_tgt_peer_from_peer(peer);
9188 		if (tgt_peer) {
9189 			DP_STATS_CLR(tgt_peer);
9190 			txrx_peer = tgt_peer->txrx_peer;
9191 			dp_txrx_peer_stats_clr(txrx_peer);
9192 		}
9193 	}
9194 
9195 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9196 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, peer->vdev->pdev->soc,
9197 			     &peer_stats_intf,  peer->peer_id,
9198 			     UPDATE_PEER_STATS, peer->vdev->pdev->pdev_id);
9199 #endif
9200 }
9201 
9202 #ifdef WLAN_DP_SRNG_USAGE_WM_TRACKING
9203 static inline void dp_srng_clear_ring_usage_wm_stats(struct dp_soc *soc)
9204 {
9205 	int ring;
9206 
9207 	for (ring = 0; ring < soc->num_reo_dest_rings; ring++)
9208 		hal_srng_clear_ring_usage_wm_locked(soc->hal_soc,
9209 					    soc->reo_dest_ring[ring].hal_srng);
9210 }
9211 #else
9212 static inline void dp_srng_clear_ring_usage_wm_stats(struct dp_soc *soc)
9213 {
9214 }
9215 #endif
9216 
9217 /**
9218  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
9219  * @vdev: DP_VDEV handle
9220  * @dp_soc: DP_SOC handle
9221  *
9222  * Return: QDF_STATUS
9223  */
9224 static inline QDF_STATUS
9225 dp_txrx_host_stats_clr(struct dp_vdev *vdev, struct dp_soc *soc)
9226 {
9227 	if (!vdev || !vdev->pdev)
9228 		return QDF_STATUS_E_FAILURE;
9229 
9230 	/*
9231 	 * if NSS offload is enabled, then send message
9232 	 * to NSS FW to clear the stats. Once NSS FW clears the statistics
9233 	 * then clear host statistics.
9234 	 */
9235 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
9236 		if (soc->cdp_soc.ol_ops->nss_stats_clr)
9237 			soc->cdp_soc.ol_ops->nss_stats_clr(soc->ctrl_psoc,
9238 							   vdev->vdev_id);
9239 	}
9240 
9241 	dp_vdev_stats_hw_offload_target_clear(soc, vdev->pdev->pdev_id,
9242 					      (1 << vdev->vdev_id));
9243 
9244 	DP_STATS_CLR(vdev->pdev);
9245 	DP_STATS_CLR(vdev->pdev->soc);
9246 	DP_STATS_CLR(vdev);
9247 
9248 	hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
9249 
9250 	dp_vdev_iterate_peer(vdev, dp_txrx_host_peer_stats_clr, NULL,
9251 			     DP_MOD_ID_GENERIC_STATS);
9252 
9253 	dp_srng_clear_ring_usage_wm_stats(soc);
9254 
9255 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9256 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
9257 			     &vdev->stats,  vdev->vdev_id,
9258 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
9259 #endif
9260 	return QDF_STATUS_SUCCESS;
9261 }
9262 
9263 /**
9264  * dp_get_peer_calibr_stats()- Get peer calibrated stats
9265  * @peer: Datapath peer
9266  * @peer_stats: buffer for peer stats
9267  *
9268  * Return: none
9269  */
9270 static inline
9271 void dp_get_peer_calibr_stats(struct dp_peer *peer,
9272 			      struct cdp_peer_stats *peer_stats)
9273 {
9274 	struct dp_peer *tgt_peer;
9275 
9276 	tgt_peer = dp_get_tgt_peer_from_peer(peer);
9277 	if (!tgt_peer)
9278 		return;
9279 
9280 	peer_stats->tx.last_per = tgt_peer->stats.tx.last_per;
9281 	peer_stats->tx.tx_bytes_success_last =
9282 				tgt_peer->stats.tx.tx_bytes_success_last;
9283 	peer_stats->tx.tx_data_success_last =
9284 					tgt_peer->stats.tx.tx_data_success_last;
9285 	peer_stats->tx.tx_byte_rate = tgt_peer->stats.tx.tx_byte_rate;
9286 	peer_stats->tx.tx_data_rate = tgt_peer->stats.tx.tx_data_rate;
9287 	peer_stats->tx.tx_data_ucast_last =
9288 					tgt_peer->stats.tx.tx_data_ucast_last;
9289 	peer_stats->tx.tx_data_ucast_rate =
9290 					tgt_peer->stats.tx.tx_data_ucast_rate;
9291 	peer_stats->tx.inactive_time = tgt_peer->stats.tx.inactive_time;
9292 	peer_stats->rx.rx_bytes_success_last =
9293 				tgt_peer->stats.rx.rx_bytes_success_last;
9294 	peer_stats->rx.rx_data_success_last =
9295 				tgt_peer->stats.rx.rx_data_success_last;
9296 	peer_stats->rx.rx_byte_rate = tgt_peer->stats.rx.rx_byte_rate;
9297 	peer_stats->rx.rx_data_rate = tgt_peer->stats.rx.rx_data_rate;
9298 }
9299 
9300 /**
9301  * dp_get_peer_basic_stats()- Get peer basic stats
9302  * @peer: Datapath peer
9303  * @peer_stats: buffer for peer stats
9304  *
9305  * Return: none
9306  */
9307 #ifdef QCA_ENHANCED_STATS_SUPPORT
9308 static inline
9309 void dp_get_peer_basic_stats(struct dp_peer *peer,
9310 			     struct cdp_peer_stats *peer_stats)
9311 {
9312 	struct dp_txrx_peer *txrx_peer;
9313 
9314 	txrx_peer = dp_get_txrx_peer(peer);
9315 	if (!txrx_peer)
9316 		return;
9317 
9318 	peer_stats->tx.comp_pkt.num += txrx_peer->comp_pkt.num;
9319 	peer_stats->tx.comp_pkt.bytes += txrx_peer->comp_pkt.bytes;
9320 	peer_stats->tx.tx_failed += txrx_peer->tx_failed;
9321 	peer_stats->rx.to_stack.num += txrx_peer->to_stack.num;
9322 	peer_stats->rx.to_stack.bytes += txrx_peer->to_stack.bytes;
9323 }
9324 #else
9325 static inline
9326 void dp_get_peer_basic_stats(struct dp_peer *peer,
9327 			     struct cdp_peer_stats *peer_stats)
9328 {
9329 	struct dp_txrx_peer *txrx_peer;
9330 
9331 	txrx_peer = peer->txrx_peer;
9332 	if (!txrx_peer)
9333 		return;
9334 
9335 	peer_stats->tx.comp_pkt.num += txrx_peer->comp_pkt.num;
9336 	peer_stats->tx.comp_pkt.bytes += txrx_peer->comp_pkt.bytes;
9337 	peer_stats->tx.tx_failed += txrx_peer->tx_failed;
9338 	peer_stats->rx.to_stack.num += txrx_peer->to_stack.num;
9339 	peer_stats->rx.to_stack.bytes += txrx_peer->to_stack.bytes;
9340 }
9341 #endif
9342 
9343 /**
9344  * dp_get_peer_per_pkt_stats()- Get peer per pkt stats
9345  * @peer: Datapath peer
9346  * @peer_stats: buffer for peer stats
9347  *
9348  * Return: none
9349  */
9350 #ifdef QCA_ENHANCED_STATS_SUPPORT
9351 static inline
9352 void dp_get_peer_per_pkt_stats(struct dp_peer *peer,
9353 			       struct cdp_peer_stats *peer_stats)
9354 {
9355 	struct dp_txrx_peer *txrx_peer;
9356 	struct dp_peer_per_pkt_stats *per_pkt_stats;
9357 
9358 	txrx_peer = dp_get_txrx_peer(peer);
9359 	if (!txrx_peer)
9360 		return;
9361 
9362 	per_pkt_stats = &txrx_peer->stats.per_pkt_stats;
9363 	DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats);
9364 }
9365 #else
9366 static inline
9367 void dp_get_peer_per_pkt_stats(struct dp_peer *peer,
9368 			       struct cdp_peer_stats *peer_stats)
9369 {
9370 	struct dp_txrx_peer *txrx_peer;
9371 	struct dp_peer_per_pkt_stats *per_pkt_stats;
9372 
9373 	txrx_peer = peer->txrx_peer;
9374 	if (!txrx_peer)
9375 		return;
9376 
9377 	per_pkt_stats = &txrx_peer->stats.per_pkt_stats;
9378 	DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats);
9379 }
9380 #endif
9381 
9382 /**
9383  * dp_get_peer_extd_stats()- Get peer extd stats
9384  * @peer: Datapath peer
9385  * @peer_stats: buffer for peer stats
9386  *
9387  * Return: none
9388  */
9389 #ifdef QCA_ENHANCED_STATS_SUPPORT
9390 #ifdef WLAN_FEATURE_11BE_MLO
9391 static inline
9392 void dp_get_peer_extd_stats(struct dp_peer *peer,
9393 			    struct cdp_peer_stats *peer_stats)
9394 {
9395 	struct dp_soc *soc = peer->vdev->pdev->soc;
9396 
9397 	if (IS_MLO_DP_MLD_PEER(peer)) {
9398 		uint8_t i;
9399 		struct dp_peer *link_peer;
9400 		struct dp_soc *link_peer_soc;
9401 		struct dp_mld_link_peers link_peers_info;
9402 
9403 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
9404 						    &link_peers_info,
9405 						    DP_MOD_ID_CDP);
9406 		for (i = 0; i < link_peers_info.num_links; i++) {
9407 			link_peer = link_peers_info.link_peers[i];
9408 			link_peer_soc = link_peer->vdev->pdev->soc;
9409 			dp_monitor_peer_get_stats(link_peer_soc, link_peer,
9410 						  peer_stats,
9411 						  UPDATE_PEER_STATS);
9412 		}
9413 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
9414 	} else {
9415 		dp_monitor_peer_get_stats(soc, peer, peer_stats,
9416 					  UPDATE_PEER_STATS);
9417 	}
9418 }
9419 #else
9420 static inline
9421 void dp_get_peer_extd_stats(struct dp_peer *peer,
9422 			    struct cdp_peer_stats *peer_stats)
9423 {
9424 	struct dp_soc *soc = peer->vdev->pdev->soc;
9425 
9426 	dp_monitor_peer_get_stats(soc, peer, peer_stats, UPDATE_PEER_STATS);
9427 }
9428 #endif
9429 #else
9430 static inline
9431 void dp_get_peer_extd_stats(struct dp_peer *peer,
9432 			    struct cdp_peer_stats *peer_stats)
9433 {
9434 	struct dp_txrx_peer *txrx_peer;
9435 	struct dp_peer_extd_stats *extd_stats;
9436 
9437 	txrx_peer = peer->txrx_peer;
9438 	if (!txrx_peer)
9439 		return;
9440 
9441 	extd_stats = &txrx_peer->stats.extd_stats;
9442 	DP_UPDATE_EXTD_STATS(peer_stats, extd_stats);
9443 }
9444 #endif
9445 
9446 /**
9447  * dp_get_peer_stats()- Get peer stats
9448  * @peer: Datapath peer
9449  * @peer_stats: buffer for peer stats
9450  *
9451  * Return: none
9452  */
9453 static inline
9454 void dp_get_peer_stats(struct dp_peer *peer, struct cdp_peer_stats *peer_stats)
9455 {
9456 	dp_get_peer_calibr_stats(peer, peer_stats);
9457 
9458 	dp_get_peer_basic_stats(peer, peer_stats);
9459 
9460 	dp_get_peer_per_pkt_stats(peer, peer_stats);
9461 
9462 	dp_get_peer_extd_stats(peer, peer_stats);
9463 }
9464 
9465 /*
9466  * dp_get_host_peer_stats()- function to print peer stats
9467  * @soc: dp_soc handle
9468  * @mac_addr: mac address of the peer
9469  *
9470  * Return: QDF_STATUS
9471  */
9472 static QDF_STATUS
9473 dp_get_host_peer_stats(struct cdp_soc_t *soc, uint8_t *mac_addr)
9474 {
9475 	struct dp_peer *peer = NULL;
9476 	struct cdp_peer_stats *peer_stats = NULL;
9477 
9478 	if (!mac_addr) {
9479 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9480 			  "%s: NULL peer mac addr\n", __func__);
9481 		return QDF_STATUS_E_FAILURE;
9482 	}
9483 
9484 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
9485 				      mac_addr, 0,
9486 				      DP_VDEV_ALL,
9487 				      DP_MOD_ID_CDP);
9488 	if (!peer) {
9489 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9490 			  "%s: Invalid peer\n", __func__);
9491 		return QDF_STATUS_E_FAILURE;
9492 	}
9493 
9494 	peer_stats = qdf_mem_malloc(sizeof(struct cdp_peer_stats));
9495 	if (!peer_stats) {
9496 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9497 			  "%s: Memory allocation failed for cdp_peer_stats\n",
9498 			  __func__);
9499 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9500 		return QDF_STATUS_E_NOMEM;
9501 	}
9502 
9503 	qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats));
9504 
9505 	dp_get_peer_stats(peer, peer_stats);
9506 	dp_print_peer_stats(peer, peer_stats);
9507 
9508 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
9509 
9510 	qdf_mem_free(peer_stats);
9511 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9512 
9513 	return QDF_STATUS_SUCCESS;
9514 }
9515 
9516 /* *
9517  * dp_dump_wbm_idle_hptp() -dump wbm idle ring, hw hp tp info.
9518  * @soc: dp soc.
9519  * @pdev: dp pdev.
9520  *
9521  * Return: None.
9522  */
9523 static void
9524 dp_dump_wbm_idle_hptp(struct dp_soc *soc, struct dp_pdev *pdev)
9525 {
9526 	uint32_t hw_head;
9527 	uint32_t hw_tail;
9528 	struct dp_srng *srng;
9529 
9530 	if (!soc) {
9531 		dp_err("soc is NULL");
9532 		return;
9533 	}
9534 
9535 	if (!pdev) {
9536 		dp_err("pdev is NULL");
9537 		return;
9538 	}
9539 
9540 	srng = &pdev->soc->wbm_idle_link_ring;
9541 	if (!srng) {
9542 		dp_err("wbm_idle_link_ring srng is NULL");
9543 		return;
9544 	}
9545 
9546 	hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &hw_head,
9547 			&hw_tail, WBM_IDLE_LINK);
9548 
9549 	dp_debug("WBM_IDLE_LINK: HW hp: %d, HW tp: %d",
9550 			hw_head, hw_tail);
9551 }
9552 
9553 
9554 /**
9555  * dp_txrx_stats_help() - Helper function for Txrx_Stats
9556  *
9557  * Return: None
9558  */
9559 static void dp_txrx_stats_help(void)
9560 {
9561 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
9562 	dp_info("stats_option:");
9563 	dp_info("  1 -- HTT Tx Statistics");
9564 	dp_info("  2 -- HTT Rx Statistics");
9565 	dp_info("  3 -- HTT Tx HW Queue Statistics");
9566 	dp_info("  4 -- HTT Tx HW Sched Statistics");
9567 	dp_info("  5 -- HTT Error Statistics");
9568 	dp_info("  6 -- HTT TQM Statistics");
9569 	dp_info("  7 -- HTT TQM CMDQ Statistics");
9570 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
9571 	dp_info("  9 -- HTT Tx Rate Statistics");
9572 	dp_info(" 10 -- HTT Rx Rate Statistics");
9573 	dp_info(" 11 -- HTT Peer Statistics");
9574 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
9575 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
9576 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
9577 	dp_info(" 15 -- HTT SRNG Statistics");
9578 	dp_info(" 16 -- HTT SFM Info Statistics");
9579 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
9580 	dp_info(" 18 -- HTT Peer List Details");
9581 	dp_info(" 20 -- Clear Host Statistics");
9582 	dp_info(" 21 -- Host Rx Rate Statistics");
9583 	dp_info(" 22 -- Host Tx Rate Statistics");
9584 	dp_info(" 23 -- Host Tx Statistics");
9585 	dp_info(" 24 -- Host Rx Statistics");
9586 	dp_info(" 25 -- Host AST Statistics");
9587 	dp_info(" 26 -- Host SRNG PTR Statistics");
9588 	dp_info(" 27 -- Host Mon Statistics");
9589 	dp_info(" 28 -- Host REO Queue Statistics");
9590 	dp_info(" 29 -- Host Soc cfg param Statistics");
9591 	dp_info(" 30 -- Host pdev cfg param Statistics");
9592 	dp_info(" 31 -- Host NAPI stats");
9593 	dp_info(" 32 -- Host Interrupt stats");
9594 	dp_info(" 33 -- Host FISA stats");
9595 	dp_info(" 34 -- Host Register Work stats");
9596 	dp_info(" 35 -- HW REO Queue stats");
9597 	dp_info(" 36 -- Host WBM IDLE link desc ring HP/TP");
9598 	dp_info(" 37 -- Host SRNG usage watermark stats");
9599 }
9600 
9601 /**
9602  * dp_print_host_stats()- Function to print the stats aggregated at host
9603  * @vdev_handle: DP_VDEV handle
9604  * @req: host stats type
9605  * @soc: dp soc handler
9606  *
9607  * Return: 0 on success, print error message in case of failure
9608  */
9609 static int
9610 dp_print_host_stats(struct dp_vdev *vdev,
9611 		    struct cdp_txrx_stats_req *req,
9612 		    struct dp_soc *soc)
9613 {
9614 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
9615 	enum cdp_host_txrx_stats type =
9616 			dp_stats_mapping_table[req->stats][STATS_HOST];
9617 
9618 	dp_aggregate_pdev_stats(pdev);
9619 
9620 	switch (type) {
9621 	case TXRX_CLEAR_STATS:
9622 		dp_txrx_host_stats_clr(vdev, soc);
9623 		break;
9624 	case TXRX_RX_RATE_STATS:
9625 		dp_print_rx_rates(vdev);
9626 		break;
9627 	case TXRX_TX_RATE_STATS:
9628 		dp_print_tx_rates(vdev);
9629 		break;
9630 	case TXRX_TX_HOST_STATS:
9631 		dp_print_pdev_tx_stats(pdev);
9632 		dp_print_soc_tx_stats(pdev->soc);
9633 		break;
9634 	case TXRX_RX_HOST_STATS:
9635 		dp_print_pdev_rx_stats(pdev);
9636 		dp_print_soc_rx_stats(pdev->soc);
9637 		break;
9638 	case TXRX_AST_STATS:
9639 		dp_print_ast_stats(pdev->soc);
9640 		dp_print_mec_stats(pdev->soc);
9641 		dp_print_peer_table(vdev);
9642 		break;
9643 	case TXRX_SRNG_PTR_STATS:
9644 		dp_print_ring_stats(pdev);
9645 		break;
9646 	case TXRX_RX_MON_STATS:
9647 		dp_monitor_print_pdev_rx_mon_stats(pdev);
9648 		break;
9649 	case TXRX_REO_QUEUE_STATS:
9650 		dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc,
9651 				       req->peer_addr);
9652 		break;
9653 	case TXRX_SOC_CFG_PARAMS:
9654 		dp_print_soc_cfg_params(pdev->soc);
9655 		break;
9656 	case TXRX_PDEV_CFG_PARAMS:
9657 		dp_print_pdev_cfg_params(pdev);
9658 		break;
9659 	case TXRX_NAPI_STATS:
9660 		dp_print_napi_stats(pdev->soc);
9661 		break;
9662 	case TXRX_SOC_INTERRUPT_STATS:
9663 		dp_print_soc_interrupt_stats(pdev->soc);
9664 		break;
9665 	case TXRX_SOC_FSE_STATS:
9666 		dp_rx_dump_fisa_table(pdev->soc);
9667 		break;
9668 	case TXRX_HAL_REG_WRITE_STATS:
9669 		hal_dump_reg_write_stats(pdev->soc->hal_soc);
9670 		hal_dump_reg_write_srng_stats(pdev->soc->hal_soc);
9671 		break;
9672 	case TXRX_SOC_REO_HW_DESC_DUMP:
9673 		dp_get_rx_reo_queue_info((struct cdp_soc_t *)pdev->soc,
9674 					 vdev->vdev_id);
9675 		break;
9676 	case TXRX_SOC_WBM_IDLE_HPTP_DUMP:
9677 		dp_dump_wbm_idle_hptp(pdev->soc, pdev);
9678 		break;
9679 	case TXRX_SRNG_USAGE_WM_STATS:
9680 		/* Dump usage watermark stats for all SRNGs */
9681 		dp_dump_srng_high_wm_stats(soc, 0xFF);
9682 		break;
9683 	default:
9684 		dp_info("Wrong Input For TxRx Host Stats");
9685 		dp_txrx_stats_help();
9686 		break;
9687 	}
9688 	return 0;
9689 }
9690 
9691 /*
9692  * dp_pdev_tid_stats_ingress_inc
9693  * @pdev: pdev handle
9694  * @val: increase in value
9695  *
9696  * Return: void
9697  */
9698 static void
9699 dp_pdev_tid_stats_ingress_inc(struct dp_pdev *pdev, uint32_t val)
9700 {
9701 	pdev->stats.tid_stats.ingress_stack += val;
9702 }
9703 
9704 /*
9705  * dp_pdev_tid_stats_osif_drop
9706  * @pdev: pdev handle
9707  * @val: increase in value
9708  *
9709  * Return: void
9710  */
9711 static void
9712 dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val)
9713 {
9714 	pdev->stats.tid_stats.osif_drop += val;
9715 }
9716 
9717 /*
9718  * dp_get_fw_peer_stats()- function to print peer stats
9719  * @soc: soc handle
9720  * @pdev_id : id of the pdev handle
9721  * @mac_addr: mac address of the peer
9722  * @cap: Type of htt stats requested
9723  * @is_wait: if set, wait on completion from firmware response
9724  *
9725  * Currently Supporting only MAC ID based requests Only
9726  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
9727  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
9728  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
9729  *
9730  * Return: QDF_STATUS
9731  */
9732 static QDF_STATUS
9733 dp_get_fw_peer_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
9734 		     uint8_t *mac_addr,
9735 		     uint32_t cap, uint32_t is_wait)
9736 {
9737 	int i;
9738 	uint32_t config_param0 = 0;
9739 	uint32_t config_param1 = 0;
9740 	uint32_t config_param2 = 0;
9741 	uint32_t config_param3 = 0;
9742 	struct dp_pdev *pdev =
9743 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9744 						   pdev_id);
9745 
9746 	if (!pdev)
9747 		return QDF_STATUS_E_FAILURE;
9748 
9749 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
9750 	config_param0 |= (1 << (cap + 1));
9751 
9752 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
9753 		config_param1 |= (1 << i);
9754 	}
9755 
9756 	config_param2 |= (mac_addr[0] & 0x000000ff);
9757 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
9758 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
9759 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
9760 
9761 	config_param3 |= (mac_addr[4] & 0x000000ff);
9762 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
9763 
9764 	if (is_wait) {
9765 		qdf_event_reset(&pdev->fw_peer_stats_event);
9766 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
9767 					  config_param0, config_param1,
9768 					  config_param2, config_param3,
9769 					  0, DBG_STATS_COOKIE_DP_STATS, 0);
9770 		qdf_wait_single_event(&pdev->fw_peer_stats_event,
9771 				      DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
9772 	} else {
9773 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
9774 					  config_param0, config_param1,
9775 					  config_param2, config_param3,
9776 					  0, DBG_STATS_COOKIE_DEFAULT, 0);
9777 	}
9778 
9779 	return QDF_STATUS_SUCCESS;
9780 
9781 }
9782 
9783 /* This struct definition will be removed from here
9784  * once it get added in FW headers*/
9785 struct httstats_cmd_req {
9786     uint32_t    config_param0;
9787     uint32_t    config_param1;
9788     uint32_t    config_param2;
9789     uint32_t    config_param3;
9790     int cookie;
9791     u_int8_t    stats_id;
9792 };
9793 
9794 /*
9795  * dp_get_htt_stats: function to process the httstas request
9796  * @soc: DP soc handle
9797  * @pdev_id: id of pdev handle
9798  * @data: pointer to request data
9799  * @data_len: length for request data
9800  *
9801  * return: QDF_STATUS
9802  */
9803 static QDF_STATUS
9804 dp_get_htt_stats(struct cdp_soc_t *soc, uint8_t pdev_id, void *data,
9805 		 uint32_t data_len)
9806 {
9807 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
9808 	struct dp_pdev *pdev =
9809 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9810 						   pdev_id);
9811 
9812 	if (!pdev)
9813 		return QDF_STATUS_E_FAILURE;
9814 
9815 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
9816 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
9817 				req->config_param0, req->config_param1,
9818 				req->config_param2, req->config_param3,
9819 				req->cookie, DBG_STATS_COOKIE_DEFAULT, 0);
9820 
9821 	return QDF_STATUS_SUCCESS;
9822 }
9823 
9824 /**
9825  * dp_set_pdev_tidmap_prty_wifi3(): update tidmap priority in pdev
9826  * @pdev: DP_PDEV handle
9827  * @prio: tidmap priority value passed by the user
9828  *
9829  * Return: QDF_STATUS_SUCCESS on success
9830  */
9831 static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct dp_pdev *pdev,
9832 						uint8_t prio)
9833 {
9834 	struct dp_soc *soc = pdev->soc;
9835 
9836 	soc->tidmap_prty = prio;
9837 
9838 	hal_tx_set_tidmap_prty(soc->hal_soc, prio);
9839 	return QDF_STATUS_SUCCESS;
9840 }
9841 
9842 /*
9843  * dp_get_peer_param: function to get parameters in peer
9844  * @cdp_soc: DP soc handle
9845  * @vdev_id: id of vdev handle
9846  * @peer_mac: peer mac address
9847  * @param: parameter type to be set
9848  * @val : address of buffer
9849  *
9850  * Return: val
9851  */
9852 static QDF_STATUS dp_get_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
9853 				    uint8_t *peer_mac,
9854 				    enum cdp_peer_param_type param,
9855 				    cdp_config_param_type *val)
9856 {
9857 	return QDF_STATUS_SUCCESS;
9858 }
9859 
9860 /*
9861  * dp_set_peer_param: function to set parameters in peer
9862  * @cdp_soc: DP soc handle
9863  * @vdev_id: id of vdev handle
9864  * @peer_mac: peer mac address
9865  * @param: parameter type to be set
9866  * @val: value of parameter to be set
9867  *
9868  * Return: 0 for success. nonzero for failure.
9869  */
9870 static QDF_STATUS dp_set_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
9871 				    uint8_t *peer_mac,
9872 				    enum cdp_peer_param_type param,
9873 				    cdp_config_param_type val)
9874 {
9875 	struct dp_peer *peer =
9876 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
9877 						       peer_mac, 0, vdev_id,
9878 						       DP_MOD_ID_CDP);
9879 	struct dp_txrx_peer *txrx_peer;
9880 
9881 	if (!peer)
9882 		return QDF_STATUS_E_FAILURE;
9883 
9884 	txrx_peer = peer->txrx_peer;
9885 	if (!txrx_peer) {
9886 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9887 		return QDF_STATUS_E_FAILURE;
9888 	}
9889 
9890 	switch (param) {
9891 	case CDP_CONFIG_NAWDS:
9892 		txrx_peer->nawds_enabled = val.cdp_peer_param_nawds;
9893 		break;
9894 	case CDP_CONFIG_ISOLATION:
9895 		dp_set_peer_isolation(txrx_peer, val.cdp_peer_param_isolation);
9896 		break;
9897 	case CDP_CONFIG_IN_TWT:
9898 		txrx_peer->in_twt = !!(val.cdp_peer_param_in_twt);
9899 		break;
9900 	default:
9901 		break;
9902 	}
9903 
9904 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9905 
9906 	return QDF_STATUS_SUCCESS;
9907 }
9908 
9909 /*
9910  * dp_get_pdev_param: function to get parameters from pdev
9911  * @cdp_soc: DP soc handle
9912  * @pdev_id: id of pdev handle
9913  * @param: parameter type to be get
9914  * @value : buffer for value
9915  *
9916  * Return: status
9917  */
9918 static QDF_STATUS dp_get_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
9919 				    enum cdp_pdev_param_type param,
9920 				    cdp_config_param_type *val)
9921 {
9922 	struct cdp_pdev *pdev = (struct cdp_pdev *)
9923 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
9924 						   pdev_id);
9925 	if (!pdev)
9926 		return QDF_STATUS_E_FAILURE;
9927 
9928 	switch (param) {
9929 	case CDP_CONFIG_VOW:
9930 		val->cdp_pdev_param_cfg_vow =
9931 				((struct dp_pdev *)pdev)->delay_stats_flag;
9932 		break;
9933 	case CDP_TX_PENDING:
9934 		val->cdp_pdev_param_tx_pending = dp_get_tx_pending(pdev);
9935 		break;
9936 	case CDP_FILTER_MCAST_DATA:
9937 		val->cdp_pdev_param_fltr_mcast =
9938 				dp_monitor_pdev_get_filter_mcast_data(pdev);
9939 		break;
9940 	case CDP_FILTER_NO_DATA:
9941 		val->cdp_pdev_param_fltr_none =
9942 				dp_monitor_pdev_get_filter_non_data(pdev);
9943 		break;
9944 	case CDP_FILTER_UCAST_DATA:
9945 		val->cdp_pdev_param_fltr_ucast =
9946 				dp_monitor_pdev_get_filter_ucast_data(pdev);
9947 		break;
9948 	default:
9949 		return QDF_STATUS_E_FAILURE;
9950 	}
9951 
9952 	return QDF_STATUS_SUCCESS;
9953 }
9954 
9955 /*
9956  * dp_set_pdev_param: function to set parameters in pdev
9957  * @cdp_soc: DP soc handle
9958  * @pdev_id: id of pdev handle
9959  * @param: parameter type to be set
9960  * @val: value of parameter to be set
9961  *
9962  * Return: 0 for success. nonzero for failure.
9963  */
9964 static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
9965 				    enum cdp_pdev_param_type param,
9966 				    cdp_config_param_type val)
9967 {
9968 	int target_type;
9969 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
9970 	struct dp_pdev *pdev =
9971 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
9972 						   pdev_id);
9973 	enum reg_wifi_band chan_band;
9974 
9975 	if (!pdev)
9976 		return QDF_STATUS_E_FAILURE;
9977 
9978 	target_type = hal_get_target_type(soc->hal_soc);
9979 	switch (target_type) {
9980 	case TARGET_TYPE_QCA6750:
9981 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC0_LMAC_ID;
9982 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
9983 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
9984 		break;
9985 	case TARGET_TYPE_KIWI:
9986 	case TARGET_TYPE_MANGO:
9987 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC0_LMAC_ID;
9988 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
9989 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
9990 		break;
9991 	default:
9992 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC1_LMAC_ID;
9993 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
9994 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
9995 		break;
9996 	}
9997 
9998 	switch (param) {
9999 	case CDP_CONFIG_TX_CAPTURE:
10000 		return dp_monitor_config_debug_sniffer(pdev,
10001 						val.cdp_pdev_param_tx_capture);
10002 	case CDP_CONFIG_DEBUG_SNIFFER:
10003 		return dp_monitor_config_debug_sniffer(pdev,
10004 						val.cdp_pdev_param_dbg_snf);
10005 	case CDP_CONFIG_BPR_ENABLE:
10006 		return dp_monitor_set_bpr_enable(pdev,
10007 						 val.cdp_pdev_param_bpr_enable);
10008 	case CDP_CONFIG_PRIMARY_RADIO:
10009 		pdev->is_primary = val.cdp_pdev_param_primary_radio;
10010 		break;
10011 	case CDP_CONFIG_CAPTURE_LATENCY:
10012 		pdev->latency_capture_enable = val.cdp_pdev_param_cptr_latcy;
10013 		break;
10014 	case CDP_INGRESS_STATS:
10015 		dp_pdev_tid_stats_ingress_inc(pdev,
10016 					      val.cdp_pdev_param_ingrs_stats);
10017 		break;
10018 	case CDP_OSIF_DROP:
10019 		dp_pdev_tid_stats_osif_drop(pdev,
10020 					    val.cdp_pdev_param_osif_drop);
10021 		break;
10022 	case CDP_CONFIG_ENH_RX_CAPTURE:
10023 		return dp_monitor_config_enh_rx_capture(pdev,
10024 						val.cdp_pdev_param_en_rx_cap);
10025 	case CDP_CONFIG_ENH_TX_CAPTURE:
10026 		return dp_monitor_config_enh_tx_capture(pdev,
10027 						val.cdp_pdev_param_en_tx_cap);
10028 	case CDP_CONFIG_HMMC_TID_OVERRIDE:
10029 		pdev->hmmc_tid_override_en = val.cdp_pdev_param_hmmc_tid_ovrd;
10030 		break;
10031 	case CDP_CONFIG_HMMC_TID_VALUE:
10032 		pdev->hmmc_tid = val.cdp_pdev_param_hmmc_tid;
10033 		break;
10034 	case CDP_CHAN_NOISE_FLOOR:
10035 		pdev->chan_noise_floor = val.cdp_pdev_param_chn_noise_flr;
10036 		break;
10037 	case CDP_TIDMAP_PRTY:
10038 		dp_set_pdev_tidmap_prty_wifi3(pdev,
10039 					      val.cdp_pdev_param_tidmap_prty);
10040 		break;
10041 	case CDP_FILTER_NEIGH_PEERS:
10042 		dp_monitor_set_filter_neigh_peers(pdev,
10043 					val.cdp_pdev_param_fltr_neigh_peers);
10044 		break;
10045 	case CDP_MONITOR_CHANNEL:
10046 		dp_monitor_set_chan_num(pdev, val.cdp_pdev_param_monitor_chan);
10047 		break;
10048 	case CDP_MONITOR_FREQUENCY:
10049 		chan_band = wlan_reg_freq_to_band(val.cdp_pdev_param_mon_freq);
10050 		dp_monitor_set_chan_freq(pdev, val.cdp_pdev_param_mon_freq);
10051 		dp_monitor_set_chan_band(pdev, chan_band);
10052 		break;
10053 	case CDP_CONFIG_BSS_COLOR:
10054 		dp_monitor_set_bsscolor(pdev, val.cdp_pdev_param_bss_color);
10055 		break;
10056 	case CDP_SET_ATF_STATS_ENABLE:
10057 		dp_monitor_set_atf_stats_enable(pdev,
10058 					val.cdp_pdev_param_atf_stats_enable);
10059 		break;
10060 	case CDP_CONFIG_SPECIAL_VAP:
10061 		dp_monitor_pdev_config_scan_spcl_vap(pdev,
10062 					val.cdp_pdev_param_config_special_vap);
10063 		dp_monitor_vdev_set_monitor_mode_buf_rings(pdev);
10064 		break;
10065 	case CDP_RESET_SCAN_SPCL_VAP_STATS_ENABLE:
10066 		dp_monitor_pdev_reset_scan_spcl_vap_stats_enable(pdev,
10067 				val.cdp_pdev_param_reset_scan_spcl_vap_stats_enable);
10068 		break;
10069 	case CDP_CONFIG_ENHANCED_STATS_ENABLE:
10070 		pdev->enhanced_stats_en = val.cdp_pdev_param_enhanced_stats_enable;
10071 		break;
10072 	case CDP_ISOLATION:
10073 		pdev->isolation = val.cdp_pdev_param_isolation;
10074 		break;
10075 	case CDP_CONFIG_UNDECODED_METADATA_CAPTURE_ENABLE:
10076 		return dp_monitor_config_undecoded_metadata_capture(pdev,
10077 				val.cdp_pdev_param_undecoded_metadata_enable);
10078 		break;
10079 	default:
10080 		return QDF_STATUS_E_INVAL;
10081 	}
10082 	return QDF_STATUS_SUCCESS;
10083 }
10084 
10085 #ifdef QCA_UNDECODED_METADATA_SUPPORT
10086 static
10087 QDF_STATUS dp_set_pdev_phyrx_error_mask(struct cdp_soc_t *cdp_soc,
10088 					uint8_t pdev_id, uint32_t mask,
10089 					uint32_t mask_cont)
10090 {
10091 	struct dp_pdev *pdev =
10092 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10093 						   pdev_id);
10094 
10095 	if (!pdev)
10096 		return QDF_STATUS_E_FAILURE;
10097 
10098 	return dp_monitor_config_undecoded_metadata_phyrx_error_mask(pdev,
10099 				mask, mask_cont);
10100 }
10101 
10102 static
10103 QDF_STATUS dp_get_pdev_phyrx_error_mask(struct cdp_soc_t *cdp_soc,
10104 					uint8_t pdev_id, uint32_t *mask,
10105 					uint32_t *mask_cont)
10106 {
10107 	struct dp_pdev *pdev =
10108 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
10109 						   pdev_id);
10110 
10111 	if (!pdev)
10112 		return QDF_STATUS_E_FAILURE;
10113 
10114 	return dp_monitor_get_undecoded_metadata_phyrx_error_mask(pdev,
10115 				mask, mask_cont);
10116 }
10117 #endif
10118 
10119 #ifdef QCA_PEER_EXT_STATS
10120 static void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
10121 					  qdf_nbuf_t nbuf)
10122 {
10123 	struct dp_peer *peer = NULL;
10124 	uint16_t peer_id, ring_id;
10125 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
10126 	struct dp_peer_delay_stats *delay_stats = NULL;
10127 
10128 	peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
10129 	if (peer_id > soc->max_peer_id)
10130 		return;
10131 
10132 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
10133 	if (qdf_unlikely(!peer))
10134 		return;
10135 
10136 	if (qdf_unlikely(!peer->txrx_peer)) {
10137 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10138 		return;
10139 	}
10140 
10141 	if (qdf_likely(peer->txrx_peer->delay_stats)) {
10142 		delay_stats = peer->txrx_peer->delay_stats;
10143 		ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
10144 		dp_rx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
10145 					nbuf);
10146 	}
10147 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10148 }
10149 #else
10150 static inline void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
10151 						 qdf_nbuf_t nbuf)
10152 {
10153 }
10154 #endif
10155 
10156 /*
10157  * dp_calculate_delay_stats: function to get rx delay stats
10158  * @cdp_soc: DP soc handle
10159  * @vdev_id: id of DP vdev handle
10160  * @nbuf: skb
10161  *
10162  * Return: QDF_STATUS
10163  */
10164 static QDF_STATUS
10165 dp_calculate_delay_stats(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
10166 			 qdf_nbuf_t nbuf)
10167 {
10168 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
10169 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10170 						     DP_MOD_ID_CDP);
10171 
10172 	if (!vdev)
10173 		return QDF_STATUS_SUCCESS;
10174 
10175 	if (vdev->pdev->delay_stats_flag)
10176 		dp_rx_compute_delay(vdev, nbuf);
10177 	else
10178 		dp_rx_update_peer_delay_stats(soc, nbuf);
10179 
10180 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10181 	return QDF_STATUS_SUCCESS;
10182 }
10183 
10184 /*
10185  * dp_get_vdev_param: function to get parameters from vdev
10186  * @cdp_soc : DP soc handle
10187  * @vdev_id: id of DP vdev handle
10188  * @param: parameter type to get value
10189  * @val: buffer address
10190  *
10191  * return: status
10192  */
10193 static QDF_STATUS dp_get_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
10194 				    enum cdp_vdev_param_type param,
10195 				    cdp_config_param_type *val)
10196 {
10197 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
10198 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10199 						     DP_MOD_ID_CDP);
10200 
10201 	if (!vdev)
10202 		return QDF_STATUS_E_FAILURE;
10203 
10204 	switch (param) {
10205 	case CDP_ENABLE_WDS:
10206 		val->cdp_vdev_param_wds = vdev->wds_enabled;
10207 		break;
10208 	case CDP_ENABLE_MEC:
10209 		val->cdp_vdev_param_mec = vdev->mec_enabled;
10210 		break;
10211 	case CDP_ENABLE_DA_WAR:
10212 		val->cdp_vdev_param_da_war = vdev->pdev->soc->da_war_enabled;
10213 		break;
10214 	case CDP_ENABLE_IGMP_MCAST_EN:
10215 		val->cdp_vdev_param_igmp_mcast_en = vdev->igmp_mcast_enhanc_en;
10216 		break;
10217 	case CDP_ENABLE_MCAST_EN:
10218 		val->cdp_vdev_param_mcast_en = vdev->mcast_enhancement_en;
10219 		break;
10220 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
10221 		val->cdp_vdev_param_hlos_tid_override =
10222 			    dp_vdev_get_hlos_tid_override((struct cdp_vdev *)vdev);
10223 		break;
10224 	case CDP_ENABLE_PEER_AUTHORIZE:
10225 		val->cdp_vdev_param_peer_authorize =
10226 			    vdev->peer_authorize;
10227 		break;
10228 	case CDP_TX_ENCAP_TYPE:
10229 		val->cdp_vdev_param_tx_encap = vdev->tx_encap_type;
10230 		break;
10231 	case CDP_ENABLE_CIPHER:
10232 		val->cdp_vdev_param_cipher_en = vdev->sec_type;
10233 		break;
10234 #ifdef WLAN_SUPPORT_MESH_LATENCY
10235 	case CDP_ENABLE_PEER_TID_LATENCY:
10236 		val->cdp_vdev_param_peer_tid_latency_enable =
10237 			vdev->peer_tid_latency_enabled;
10238 		break;
10239 	case CDP_SET_VAP_MESH_TID:
10240 		val->cdp_vdev_param_mesh_tid =
10241 				vdev->mesh_tid_latency_config.latency_tid;
10242 		break;
10243 #endif
10244 	default:
10245 		dp_cdp_err("%pK: param value %d is wrong",
10246 			   soc, param);
10247 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10248 		return QDF_STATUS_E_FAILURE;
10249 	}
10250 
10251 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10252 	return QDF_STATUS_SUCCESS;
10253 }
10254 
10255 /*
10256  * dp_set_vdev_param: function to set parameters in vdev
10257  * @cdp_soc : DP soc handle
10258  * @vdev_id: id of DP vdev handle
10259  * @param: parameter type to get value
10260  * @val: value
10261  *
10262  * return: QDF_STATUS
10263  */
10264 static QDF_STATUS
10265 dp_set_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
10266 		  enum cdp_vdev_param_type param, cdp_config_param_type val)
10267 {
10268 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
10269 	struct dp_vdev *vdev =
10270 		dp_vdev_get_ref_by_id(dsoc, vdev_id, DP_MOD_ID_CDP);
10271 	uint32_t var = 0;
10272 
10273 	if (!vdev)
10274 		return QDF_STATUS_E_FAILURE;
10275 
10276 	switch (param) {
10277 	case CDP_ENABLE_WDS:
10278 		dp_cdp_err("%pK: wds_enable %d for vdev(%pK) id(%d)\n",
10279 			   dsoc, val.cdp_vdev_param_wds, vdev, vdev->vdev_id);
10280 		vdev->wds_enabled = val.cdp_vdev_param_wds;
10281 		break;
10282 	case CDP_ENABLE_MEC:
10283 		dp_cdp_err("%pK: mec_enable %d for vdev(%pK) id(%d)\n",
10284 			   dsoc, val.cdp_vdev_param_mec, vdev, vdev->vdev_id);
10285 		vdev->mec_enabled = val.cdp_vdev_param_mec;
10286 		break;
10287 	case CDP_ENABLE_DA_WAR:
10288 		dp_cdp_err("%pK: da_war_enable %d for vdev(%pK) id(%d)\n",
10289 			   dsoc, val.cdp_vdev_param_da_war, vdev, vdev->vdev_id);
10290 		vdev->pdev->soc->da_war_enabled = val.cdp_vdev_param_da_war;
10291 		dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
10292 					     vdev->pdev->soc));
10293 		break;
10294 	case CDP_ENABLE_NAWDS:
10295 		vdev->nawds_enabled = val.cdp_vdev_param_nawds;
10296 		break;
10297 	case CDP_ENABLE_MCAST_EN:
10298 		vdev->mcast_enhancement_en = val.cdp_vdev_param_mcast_en;
10299 		break;
10300 	case CDP_ENABLE_IGMP_MCAST_EN:
10301 		vdev->igmp_mcast_enhanc_en = val.cdp_vdev_param_igmp_mcast_en;
10302 		break;
10303 	case CDP_ENABLE_PROXYSTA:
10304 		vdev->proxysta_vdev = val.cdp_vdev_param_proxysta;
10305 		break;
10306 	case CDP_UPDATE_TDLS_FLAGS:
10307 		vdev->tdls_link_connected = val.cdp_vdev_param_tdls_flags;
10308 		break;
10309 	case CDP_CFG_WDS_AGING_TIMER:
10310 		var = val.cdp_vdev_param_aging_tmr;
10311 		if (!var)
10312 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
10313 		else if (var != vdev->wds_aging_timer_val)
10314 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, var);
10315 
10316 		vdev->wds_aging_timer_val = var;
10317 		break;
10318 	case CDP_ENABLE_AP_BRIDGE:
10319 		if (wlan_op_mode_sta != vdev->opmode)
10320 			vdev->ap_bridge_enabled = val.cdp_vdev_param_ap_brdg_en;
10321 		else
10322 			vdev->ap_bridge_enabled = false;
10323 		break;
10324 	case CDP_ENABLE_CIPHER:
10325 		vdev->sec_type = val.cdp_vdev_param_cipher_en;
10326 		break;
10327 	case CDP_ENABLE_QWRAP_ISOLATION:
10328 		vdev->isolation_vdev = val.cdp_vdev_param_qwrap_isolation;
10329 		break;
10330 	case CDP_UPDATE_MULTIPASS:
10331 		vdev->multipass_en = val.cdp_vdev_param_update_multipass;
10332 		break;
10333 	case CDP_TX_ENCAP_TYPE:
10334 		vdev->tx_encap_type = val.cdp_vdev_param_tx_encap;
10335 		break;
10336 	case CDP_RX_DECAP_TYPE:
10337 		vdev->rx_decap_type = val.cdp_vdev_param_rx_decap;
10338 		break;
10339 	case CDP_TID_VDEV_PRTY:
10340 		vdev->tidmap_prty = val.cdp_vdev_param_tidmap_prty;
10341 		break;
10342 	case CDP_TIDMAP_TBL_ID:
10343 		vdev->tidmap_tbl_id = val.cdp_vdev_param_tidmap_tbl_id;
10344 		break;
10345 #ifdef MESH_MODE_SUPPORT
10346 	case CDP_MESH_RX_FILTER:
10347 		dp_vdev_set_mesh_rx_filter((struct cdp_vdev *)vdev,
10348 					   val.cdp_vdev_param_mesh_rx_filter);
10349 		break;
10350 	case CDP_MESH_MODE:
10351 		dp_vdev_set_mesh_mode((struct cdp_vdev *)vdev,
10352 				      val.cdp_vdev_param_mesh_mode);
10353 		break;
10354 #endif
10355 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
10356 		dp_info("vdev_id %d enable hlod tid override %d", vdev_id,
10357 			val.cdp_vdev_param_hlos_tid_override);
10358 		dp_vdev_set_hlos_tid_override(vdev,
10359 				val.cdp_vdev_param_hlos_tid_override);
10360 		break;
10361 #ifdef QCA_SUPPORT_WDS_EXTENDED
10362 	case CDP_CFG_WDS_EXT:
10363 		if (vdev->opmode == wlan_op_mode_ap)
10364 			vdev->wds_ext_enabled = val.cdp_vdev_param_wds_ext;
10365 		break;
10366 #endif
10367 	case CDP_ENABLE_PEER_AUTHORIZE:
10368 		vdev->peer_authorize = val.cdp_vdev_param_peer_authorize;
10369 		break;
10370 #ifdef WLAN_SUPPORT_MESH_LATENCY
10371 	case CDP_ENABLE_PEER_TID_LATENCY:
10372 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
10373 			val.cdp_vdev_param_peer_tid_latency_enable);
10374 		vdev->peer_tid_latency_enabled =
10375 			val.cdp_vdev_param_peer_tid_latency_enable;
10376 		break;
10377 	case CDP_SET_VAP_MESH_TID:
10378 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
10379 			val.cdp_vdev_param_mesh_tid);
10380 		vdev->mesh_tid_latency_config.latency_tid
10381 				= val.cdp_vdev_param_mesh_tid;
10382 		break;
10383 #endif
10384 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
10385 	case CDP_SKIP_BAR_UPDATE_AP:
10386 		dp_info("vdev_id %d skip BAR update: %u", vdev_id,
10387 			val.cdp_skip_bar_update);
10388 		vdev->skip_bar_update = val.cdp_skip_bar_update;
10389 		vdev->skip_bar_update_last_ts = 0;
10390 		break;
10391 #endif
10392 	case CDP_DROP_3ADDR_MCAST:
10393 		dp_info("vdev_id %d drop 3 addr mcast :%d", vdev_id,
10394 			val.cdp_drop_3addr_mcast);
10395 		vdev->drop_3addr_mcast = val.cdp_drop_3addr_mcast;
10396 		break;
10397 	case CDP_ENABLE_WRAP:
10398 		vdev->wrap_vdev = val.cdp_vdev_param_wrap;
10399 		break;
10400 	default:
10401 		break;
10402 	}
10403 
10404 	dp_tx_vdev_update_search_flags((struct dp_vdev *)vdev);
10405 	dsoc->arch_ops.txrx_set_vdev_param(dsoc, vdev, param, val);
10406 	dp_vdev_unref_delete(dsoc, vdev, DP_MOD_ID_CDP);
10407 
10408 	return QDF_STATUS_SUCCESS;
10409 }
10410 
10411 /*
10412  * dp_set_psoc_param: function to set parameters in psoc
10413  * @cdp_soc : DP soc handle
10414  * @param: parameter type to be set
10415  * @val: value of parameter to be set
10416  *
10417  * return: QDF_STATUS
10418  */
10419 static QDF_STATUS
10420 dp_set_psoc_param(struct cdp_soc_t *cdp_soc,
10421 		  enum cdp_psoc_param_type param, cdp_config_param_type val)
10422 {
10423 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
10424 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->wlan_cfg_ctx;
10425 
10426 	switch (param) {
10427 	case CDP_ENABLE_RATE_STATS:
10428 		soc->peerstats_enabled = val.cdp_psoc_param_en_rate_stats;
10429 		break;
10430 	case CDP_SET_NSS_CFG:
10431 		wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx,
10432 					    val.cdp_psoc_param_en_nss_cfg);
10433 		/*
10434 		 * TODO: masked out based on the per offloaded radio
10435 		 */
10436 		switch (val.cdp_psoc_param_en_nss_cfg) {
10437 		case dp_nss_cfg_default:
10438 			break;
10439 		case dp_nss_cfg_first_radio:
10440 		/*
10441 		 * This configuration is valid for single band radio which
10442 		 * is also NSS offload.
10443 		 */
10444 		case dp_nss_cfg_dbdc:
10445 		case dp_nss_cfg_dbtc:
10446 			wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
10447 			wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
10448 			wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
10449 			wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
10450 			break;
10451 		default:
10452 			dp_cdp_err("%pK: Invalid offload config %d",
10453 				   soc, val.cdp_psoc_param_en_nss_cfg);
10454 		}
10455 
10456 			dp_cdp_err("%pK: nss-wifi<0> nss config is enabled"
10457 				   , soc);
10458 		break;
10459 	case CDP_SET_PREFERRED_HW_MODE:
10460 		soc->preferred_hw_mode = val.cdp_psoc_param_preferred_hw_mode;
10461 		break;
10462 	case CDP_IPA_ENABLE:
10463 		soc->wlan_cfg_ctx->ipa_enabled = val.cdp_ipa_enabled;
10464 		break;
10465 	case CDP_CFG_VDEV_STATS_HW_OFFLOAD:
10466 		wlan_cfg_set_vdev_stats_hw_offload_config(wlan_cfg_ctx,
10467 				val.cdp_psoc_param_vdev_stats_hw_offload);
10468 		break;
10469 	case CDP_SAWF_ENABLE:
10470 		wlan_cfg_set_sawf_config(wlan_cfg_ctx, val.cdp_sawf_enabled);
10471 		break;
10472 	default:
10473 		break;
10474 	}
10475 
10476 	return QDF_STATUS_SUCCESS;
10477 }
10478 
10479 /*
10480  * dp_get_psoc_param: function to get parameters in soc
10481  * @cdp_soc : DP soc handle
10482  * @param: parameter type to be set
10483  * @val: address of buffer
10484  *
10485  * return: status
10486  */
10487 static QDF_STATUS dp_get_psoc_param(struct cdp_soc_t *cdp_soc,
10488 				    enum cdp_psoc_param_type param,
10489 				    cdp_config_param_type *val)
10490 {
10491 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
10492 
10493 	if (!soc)
10494 		return QDF_STATUS_E_FAILURE;
10495 
10496 	switch (param) {
10497 	case CDP_CFG_PEER_EXT_STATS:
10498 		val->cdp_psoc_param_pext_stats =
10499 			wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx);
10500 		break;
10501 	case CDP_CFG_VDEV_STATS_HW_OFFLOAD:
10502 		val->cdp_psoc_param_vdev_stats_hw_offload =
10503 			wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx);
10504 		break;
10505 	default:
10506 		dp_warn("Invalid param");
10507 		break;
10508 	}
10509 
10510 	return QDF_STATUS_SUCCESS;
10511 }
10512 
10513 /*
10514  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
10515  * @soc: DP_SOC handle
10516  * @vdev_id: id of DP_VDEV handle
10517  * @map_id:ID of map that needs to be updated
10518  *
10519  * Return: QDF_STATUS
10520  */
10521 static QDF_STATUS dp_set_vdev_dscp_tid_map_wifi3(ol_txrx_soc_handle cdp_soc,
10522 						 uint8_t vdev_id,
10523 						 uint8_t map_id)
10524 {
10525 	cdp_config_param_type val;
10526 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
10527 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10528 						     DP_MOD_ID_CDP);
10529 	if (vdev) {
10530 		vdev->dscp_tid_map_id = map_id;
10531 		val.cdp_vdev_param_dscp_tid_map_id = map_id;
10532 		soc->arch_ops.txrx_set_vdev_param(soc,
10533 						  vdev,
10534 						  CDP_UPDATE_DSCP_TO_TID_MAP,
10535 						  val);
10536 		/* Updatr flag for transmit tid classification */
10537 		if (vdev->dscp_tid_map_id < soc->num_hw_dscp_tid_map)
10538 			vdev->skip_sw_tid_classification |=
10539 				DP_TX_HW_DSCP_TID_MAP_VALID;
10540 		else
10541 			vdev->skip_sw_tid_classification &=
10542 				~DP_TX_HW_DSCP_TID_MAP_VALID;
10543 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10544 		return QDF_STATUS_SUCCESS;
10545 	}
10546 
10547 	return QDF_STATUS_E_FAILURE;
10548 }
10549 
10550 #ifdef DP_RATETABLE_SUPPORT
10551 static int dp_txrx_get_ratekbps(int preamb, int mcs,
10552 				int htflag, int gintval)
10553 {
10554 	uint32_t rix;
10555 	uint16_t ratecode;
10556 	enum cdp_punctured_modes punc_mode = NO_PUNCTURE;
10557 
10558 	return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
10559 			       (uint8_t)preamb, 1, punc_mode,
10560 			       &rix, &ratecode);
10561 }
10562 #else
10563 static int dp_txrx_get_ratekbps(int preamb, int mcs,
10564 				int htflag, int gintval)
10565 {
10566 	return 0;
10567 }
10568 #endif
10569 
10570 /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
10571  * @soc: DP soc handle
10572  * @pdev_id: id of DP pdev handle
10573  * @pdev_stats: buffer to copy to
10574  *
10575  * return : status success/failure
10576  */
10577 static QDF_STATUS
10578 dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
10579 		       struct cdp_pdev_stats *pdev_stats)
10580 {
10581 	struct dp_pdev *pdev =
10582 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10583 						   pdev_id);
10584 	if (!pdev)
10585 		return QDF_STATUS_E_FAILURE;
10586 
10587 	dp_aggregate_pdev_stats(pdev);
10588 
10589 	qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats));
10590 	return QDF_STATUS_SUCCESS;
10591 }
10592 
10593 /* dp_txrx_update_vdev_me_stats(): Update vdev ME stats sent from CDP
10594  * @vdev: DP vdev handle
10595  * @buf: buffer containing specific stats structure
10596  *
10597  * Returns: void
10598  */
10599 static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev,
10600 					 void *buf)
10601 {
10602 	struct cdp_tx_ingress_stats *host_stats = NULL;
10603 
10604 	if (!buf) {
10605 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
10606 		return;
10607 	}
10608 	host_stats = (struct cdp_tx_ingress_stats *)buf;
10609 
10610 	DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt,
10611 			 host_stats->mcast_en.mcast_pkt.num,
10612 			 host_stats->mcast_en.mcast_pkt.bytes);
10613 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error,
10614 		     host_stats->mcast_en.dropped_map_error);
10615 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac,
10616 		     host_stats->mcast_en.dropped_self_mac);
10617 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail,
10618 		     host_stats->mcast_en.dropped_send_fail);
10619 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast,
10620 		     host_stats->mcast_en.ucast);
10621 	DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc,
10622 		     host_stats->mcast_en.fail_seg_alloc);
10623 	DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail,
10624 		     host_stats->mcast_en.clone_fail);
10625 }
10626 
10627 /* dp_txrx_update_vdev_igmp_me_stats(): Update vdev IGMP ME stats sent from CDP
10628  * @vdev: DP vdev handle
10629  * @buf: buffer containing specific stats structure
10630  *
10631  * Returns: void
10632  */
10633 static void dp_txrx_update_vdev_igmp_me_stats(struct dp_vdev *vdev,
10634 					      void *buf)
10635 {
10636 	struct cdp_tx_ingress_stats *host_stats = NULL;
10637 
10638 	if (!buf) {
10639 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
10640 		return;
10641 	}
10642 	host_stats = (struct cdp_tx_ingress_stats *)buf;
10643 
10644 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_rcvd,
10645 		     host_stats->igmp_mcast_en.igmp_rcvd);
10646 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_ucast_converted,
10647 		     host_stats->igmp_mcast_en.igmp_ucast_converted);
10648 }
10649 
10650 /* dp_txrx_update_vdev_host_stats(): Update stats sent through CDP
10651  * @soc: DP soc handle
10652  * @vdev_id: id of DP vdev handle
10653  * @buf: buffer containing specific stats structure
10654  * @stats_id: stats type
10655  *
10656  * Returns: QDF_STATUS
10657  */
10658 static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc_hdl,
10659 						 uint8_t vdev_id,
10660 						 void *buf,
10661 						 uint16_t stats_id)
10662 {
10663 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10664 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10665 						     DP_MOD_ID_CDP);
10666 
10667 	if (!vdev) {
10668 		dp_cdp_err("%pK: Invalid vdev handle", soc);
10669 		return QDF_STATUS_E_FAILURE;
10670 	}
10671 
10672 	switch (stats_id) {
10673 	case DP_VDEV_STATS_PKT_CNT_ONLY:
10674 		break;
10675 	case DP_VDEV_STATS_TX_ME:
10676 		dp_txrx_update_vdev_me_stats(vdev, buf);
10677 		dp_txrx_update_vdev_igmp_me_stats(vdev, buf);
10678 		break;
10679 	default:
10680 		qdf_info("Invalid stats_id %d", stats_id);
10681 		break;
10682 	}
10683 
10684 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10685 	return QDF_STATUS_SUCCESS;
10686 }
10687 
10688 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
10689  * @soc: soc handle
10690  * @vdev_id: id of vdev handle
10691  * @peer_mac: mac of DP_PEER handle
10692  * @peer_stats: buffer to copy to
10693  * return : status success/failure
10694  */
10695 static QDF_STATUS
10696 dp_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
10697 		       uint8_t *peer_mac, struct cdp_peer_stats *peer_stats)
10698 {
10699 	QDF_STATUS status = QDF_STATUS_SUCCESS;
10700 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
10701 						       peer_mac, 0, vdev_id,
10702 						       DP_MOD_ID_CDP);
10703 
10704 	qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats));
10705 
10706 	if (!peer)
10707 		return QDF_STATUS_E_FAILURE;
10708 
10709 	dp_get_peer_stats(peer, peer_stats);
10710 
10711 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10712 
10713 	return status;
10714 }
10715 
10716 /* dp_txrx_get_peer_stats_param - will return specified cdp_peer_stats
10717  * @param soc - soc handle
10718  * @param vdev_id - vdev_id of vdev object
10719  * @param peer_mac - mac address of the peer
10720  * @param type - enum of required stats
10721  * @param buf - buffer to hold the value
10722  * return : status success/failure
10723  */
10724 static QDF_STATUS
10725 dp_txrx_get_peer_stats_param(struct cdp_soc_t *soc, uint8_t vdev_id,
10726 			     uint8_t *peer_mac, enum cdp_peer_stats_type type,
10727 			     cdp_peer_stats_param_t *buf)
10728 {
10729 	QDF_STATUS ret;
10730 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
10731 						      peer_mac, 0, vdev_id,
10732 						      DP_MOD_ID_CDP);
10733 
10734 	if (!peer) {
10735 		dp_peer_err("%pK: Invalid Peer for Mac " QDF_MAC_ADDR_FMT,
10736 			    soc, QDF_MAC_ADDR_REF(peer_mac));
10737 		return QDF_STATUS_E_FAILURE;
10738 	}
10739 
10740 	if (type >= cdp_peer_per_pkt_stats_min &&
10741 	    type < cdp_peer_per_pkt_stats_max) {
10742 		ret = dp_txrx_get_peer_per_pkt_stats_param(peer, type, buf);
10743 	} else if (type >= cdp_peer_extd_stats_min &&
10744 		   type < cdp_peer_extd_stats_max) {
10745 		ret = dp_txrx_get_peer_extd_stats_param(peer, type, buf);
10746 	} else {
10747 		dp_err("%pK: Invalid stat type requested", soc);
10748 		ret = QDF_STATUS_E_FAILURE;
10749 	}
10750 
10751 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10752 
10753 	return ret;
10754 }
10755 
10756 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
10757  * @soc: soc handle
10758  * @vdev_id: id of vdev handle
10759  * @peer_mac: mac of DP_PEER handle
10760  *
10761  * return : QDF_STATUS
10762  */
10763 #ifdef WLAN_FEATURE_11BE_MLO
10764 static QDF_STATUS
10765 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
10766 			 uint8_t *peer_mac)
10767 {
10768 	QDF_STATUS status = QDF_STATUS_SUCCESS;
10769 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10770 	struct dp_peer *peer =
10771 			dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
10772 						       vdev_id, DP_MOD_ID_CDP);
10773 
10774 	if (!peer)
10775 		return QDF_STATUS_E_FAILURE;
10776 
10777 	DP_STATS_CLR(peer);
10778 	dp_txrx_peer_stats_clr(peer->txrx_peer);
10779 
10780 	if (IS_MLO_DP_MLD_PEER(peer)) {
10781 		uint8_t i;
10782 		struct dp_peer *link_peer;
10783 		struct dp_soc *link_peer_soc;
10784 		struct dp_mld_link_peers link_peers_info;
10785 
10786 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
10787 						    &link_peers_info,
10788 						    DP_MOD_ID_CDP);
10789 		for (i = 0; i < link_peers_info.num_links; i++) {
10790 			link_peer = link_peers_info.link_peers[i];
10791 			link_peer_soc = link_peer->vdev->pdev->soc;
10792 
10793 			DP_STATS_CLR(link_peer);
10794 			dp_monitor_peer_reset_stats(link_peer_soc, link_peer);
10795 		}
10796 
10797 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
10798 	} else {
10799 		dp_monitor_peer_reset_stats(soc, peer);
10800 	}
10801 
10802 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10803 
10804 	return status;
10805 }
10806 #else
10807 static QDF_STATUS
10808 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
10809 			 uint8_t *peer_mac)
10810 {
10811 	QDF_STATUS status = QDF_STATUS_SUCCESS;
10812 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
10813 						      peer_mac, 0, vdev_id,
10814 						      DP_MOD_ID_CDP);
10815 
10816 	if (!peer)
10817 		return QDF_STATUS_E_FAILURE;
10818 
10819 	DP_STATS_CLR(peer);
10820 	dp_txrx_peer_stats_clr(peer->txrx_peer);
10821 	dp_monitor_peer_reset_stats((struct dp_soc *)soc, peer);
10822 
10823 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10824 
10825 	return status;
10826 }
10827 #endif
10828 
10829 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
10830  * @vdev_handle: DP_VDEV handle
10831  * @buf: buffer for vdev stats
10832  *
10833  * return : int
10834  */
10835 static int dp_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
10836 				  void *buf, bool is_aggregate)
10837 {
10838 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10839 	struct cdp_vdev_stats *vdev_stats;
10840 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10841 						     DP_MOD_ID_CDP);
10842 
10843 	if (!vdev)
10844 		return 1;
10845 
10846 	vdev_stats = (struct cdp_vdev_stats *)buf;
10847 
10848 	if (is_aggregate) {
10849 		dp_aggregate_vdev_stats(vdev, buf);
10850 	} else {
10851 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
10852 	}
10853 
10854 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10855 	return 0;
10856 }
10857 
10858 /*
10859  * dp_get_total_per(): get total per
10860  * @soc: DP soc handle
10861  * @pdev_id: id of DP_PDEV handle
10862  *
10863  * Return: % error rate using retries per packet and success packets
10864  */
10865 static int dp_get_total_per(struct cdp_soc_t *soc, uint8_t pdev_id)
10866 {
10867 	struct dp_pdev *pdev =
10868 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10869 						   pdev_id);
10870 
10871 	if (!pdev)
10872 		return 0;
10873 
10874 	dp_aggregate_pdev_stats(pdev);
10875 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
10876 		return 0;
10877 	return ((pdev->stats.tx.retries * 100) /
10878 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
10879 }
10880 
10881 /*
10882  * dp_txrx_stats_publish(): publish pdev stats into a buffer
10883  * @soc: DP soc handle
10884  * @pdev_id: id of DP_PDEV handle
10885  * @buf: to hold pdev_stats
10886  *
10887  * Return: int
10888  */
10889 static int
10890 dp_txrx_stats_publish(struct cdp_soc_t *soc, uint8_t pdev_id,
10891 		      struct cdp_stats_extd *buf)
10892 {
10893 	struct cdp_txrx_stats_req req = {0,};
10894 	struct dp_pdev *pdev =
10895 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10896 						   pdev_id);
10897 
10898 	if (!pdev)
10899 		return TXRX_STATS_LEVEL_OFF;
10900 
10901 	dp_aggregate_pdev_stats(pdev);
10902 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
10903 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
10904 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
10905 				req.param1, req.param2, req.param3, 0,
10906 				req.cookie_val, 0);
10907 
10908 	msleep(DP_MAX_SLEEP_TIME);
10909 
10910 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
10911 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
10912 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
10913 				req.param1, req.param2, req.param3, 0,
10914 				req.cookie_val, 0);
10915 
10916 	msleep(DP_MAX_SLEEP_TIME);
10917 	qdf_mem_copy(buf, &pdev->stats, sizeof(struct cdp_pdev_stats));
10918 
10919 	return TXRX_STATS_LEVEL;
10920 }
10921 
10922 /**
10923  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
10924  * @soc: soc handle
10925  * @pdev_id: id of DP_PDEV handle
10926  * @map_id: ID of map that needs to be updated
10927  * @tos: index value in map
10928  * @tid: tid value passed by the user
10929  *
10930  * Return: QDF_STATUS
10931  */
10932 static QDF_STATUS
10933 dp_set_pdev_dscp_tid_map_wifi3(struct cdp_soc_t *soc_handle,
10934 			       uint8_t pdev_id,
10935 			       uint8_t map_id,
10936 			       uint8_t tos, uint8_t tid)
10937 {
10938 	uint8_t dscp;
10939 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10940 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10941 
10942 	if (!pdev)
10943 		return QDF_STATUS_E_FAILURE;
10944 
10945 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
10946 	pdev->dscp_tid_map[map_id][dscp] = tid;
10947 
10948 	if (map_id < soc->num_hw_dscp_tid_map)
10949 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
10950 				       map_id, dscp);
10951 	else
10952 		return QDF_STATUS_E_FAILURE;
10953 
10954 	return QDF_STATUS_SUCCESS;
10955 }
10956 
10957 #ifdef WLAN_SYSFS_DP_STATS
10958 /*
10959  * dp_sysfs_event_trigger(): Trigger event to wait for firmware
10960  * stats request response.
10961  * @soc: soc handle
10962  * @cookie_val: cookie value
10963  *
10964  * @Return: QDF_STATUS
10965  */
10966 static QDF_STATUS
10967 dp_sysfs_event_trigger(struct dp_soc *soc, uint32_t cookie_val)
10968 {
10969 	QDF_STATUS status = QDF_STATUS_SUCCESS;
10970 	/* wait for firmware response for sysfs stats request */
10971 	if (cookie_val == DBG_SYSFS_STATS_COOKIE) {
10972 		if (!soc) {
10973 			dp_cdp_err("soc is NULL");
10974 			return QDF_STATUS_E_FAILURE;
10975 		}
10976 		/* wait for event completion */
10977 		status = qdf_wait_single_event(&soc->sysfs_config->sysfs_txrx_fw_request_done,
10978 					       WLAN_SYSFS_STAT_REQ_WAIT_MS);
10979 		if (status == QDF_STATUS_SUCCESS)
10980 			dp_cdp_info("sysfs_txrx_fw_request_done event completed");
10981 		else if (status == QDF_STATUS_E_TIMEOUT)
10982 			dp_cdp_warn("sysfs_txrx_fw_request_done event expired");
10983 		else
10984 			dp_cdp_warn("sysfs_txrx_fw_request_done event erro code %d", status);
10985 	}
10986 
10987 	return status;
10988 }
10989 #else /* WLAN_SYSFS_DP_STATS */
10990 /*
10991  * dp_sysfs_event_trigger(): Trigger event to wait for firmware
10992  * stats request response.
10993  * @soc: soc handle
10994  * @cookie_val: cookie value
10995  *
10996  * @Return: QDF_STATUS
10997  */
10998 static QDF_STATUS
10999 dp_sysfs_event_trigger(struct dp_soc *soc, uint32_t cookie_val)
11000 {
11001 	return QDF_STATUS_SUCCESS;
11002 }
11003 #endif /* WLAN_SYSFS_DP_STATS */
11004 
11005 /**
11006  * dp_fw_stats_process(): Process TXRX FW stats request.
11007  * @vdev_handle: DP VDEV handle
11008  * @req: stats request
11009  *
11010  * return: QDF_STATUS
11011  */
11012 static QDF_STATUS
11013 dp_fw_stats_process(struct dp_vdev *vdev,
11014 		    struct cdp_txrx_stats_req *req)
11015 {
11016 	struct dp_pdev *pdev = NULL;
11017 	struct dp_soc *soc = NULL;
11018 	uint32_t stats = req->stats;
11019 	uint8_t mac_id = req->mac_id;
11020 	uint32_t cookie_val = DBG_STATS_COOKIE_DEFAULT;
11021 
11022 	if (!vdev) {
11023 		DP_TRACE(NONE, "VDEV not found");
11024 		return QDF_STATUS_E_FAILURE;
11025 	}
11026 
11027 	pdev = vdev->pdev;
11028 	if (!pdev) {
11029 		DP_TRACE(NONE, "PDEV not found");
11030 		return QDF_STATUS_E_FAILURE;
11031 	}
11032 
11033 	soc = pdev->soc;
11034 	if (!soc) {
11035 		DP_TRACE(NONE, "soc not found");
11036 		return QDF_STATUS_E_FAILURE;
11037 	}
11038 
11039 	/* In case request is from host sysfs for displaying stats on console */
11040 	if (req->cookie_val == DBG_SYSFS_STATS_COOKIE)
11041 		cookie_val = DBG_SYSFS_STATS_COOKIE;
11042 
11043 	/*
11044 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
11045 	 * from param0 to param3 according to below rule:
11046 	 *
11047 	 * PARAM:
11048 	 *   - config_param0 : start_offset (stats type)
11049 	 *   - config_param1 : stats bmask from start offset
11050 	 *   - config_param2 : stats bmask from start offset + 32
11051 	 *   - config_param3 : stats bmask from start offset + 64
11052 	 */
11053 	if (req->stats == CDP_TXRX_STATS_0) {
11054 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
11055 		req->param1 = 0xFFFFFFFF;
11056 		req->param2 = 0xFFFFFFFF;
11057 		req->param3 = 0xFFFFFFFF;
11058 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
11059 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
11060 	}
11061 
11062 	if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT) {
11063 		dp_h2t_ext_stats_msg_send(pdev,
11064 					  HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT,
11065 					  req->param0, req->param1, req->param2,
11066 					  req->param3, 0, cookie_val,
11067 					  mac_id);
11068 	} else {
11069 		dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
11070 					  req->param1, req->param2, req->param3,
11071 					  0, cookie_val, mac_id);
11072 	}
11073 
11074 	dp_sysfs_event_trigger(soc, cookie_val);
11075 
11076 	return QDF_STATUS_SUCCESS;
11077 }
11078 
11079 /**
11080  * dp_txrx_stats_request - function to map to firmware and host stats
11081  * @soc: soc handle
11082  * @vdev_id: virtual device ID
11083  * @req: stats request
11084  *
11085  * Return: QDF_STATUS
11086  */
11087 static
11088 QDF_STATUS dp_txrx_stats_request(struct cdp_soc_t *soc_handle,
11089 				 uint8_t vdev_id,
11090 				 struct cdp_txrx_stats_req *req)
11091 {
11092 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_handle);
11093 	int host_stats;
11094 	int fw_stats;
11095 	enum cdp_stats stats;
11096 	int num_stats;
11097 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11098 						     DP_MOD_ID_CDP);
11099 	QDF_STATUS status = QDF_STATUS_E_INVAL;
11100 
11101 	if (!vdev || !req) {
11102 		dp_cdp_err("%pK: Invalid vdev/req instance", soc);
11103 		status = QDF_STATUS_E_INVAL;
11104 		goto fail0;
11105 	}
11106 
11107 	if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) {
11108 		dp_err("Invalid mac id request");
11109 		status = QDF_STATUS_E_INVAL;
11110 		goto fail0;
11111 	}
11112 
11113 	stats = req->stats;
11114 	if (stats >= CDP_TXRX_MAX_STATS) {
11115 		status = QDF_STATUS_E_INVAL;
11116 		goto fail0;
11117 	}
11118 
11119 	/*
11120 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
11121 	 *			has to be updated if new FW HTT stats added
11122 	 */
11123 	if (stats > CDP_TXRX_STATS_HTT_MAX)
11124 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
11125 
11126 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
11127 
11128 	if (stats >= num_stats) {
11129 		dp_cdp_err("%pK : Invalid stats option: %d", soc, stats);
11130 		status = QDF_STATUS_E_INVAL;
11131 		goto fail0;
11132 	}
11133 
11134 	req->stats = stats;
11135 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
11136 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
11137 
11138 	dp_info("stats: %u fw_stats_type: %d host_stats: %d",
11139 		stats, fw_stats, host_stats);
11140 
11141 	if (fw_stats != TXRX_FW_STATS_INVALID) {
11142 		/* update request with FW stats type */
11143 		req->stats = fw_stats;
11144 		status = dp_fw_stats_process(vdev, req);
11145 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
11146 			(host_stats <= TXRX_HOST_STATS_MAX))
11147 		status = dp_print_host_stats(vdev, req, soc);
11148 	else
11149 		dp_cdp_info("%pK: Wrong Input for TxRx Stats", soc);
11150 fail0:
11151 	if (vdev)
11152 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11153 	return status;
11154 }
11155 
11156 /*
11157  * dp_txrx_dump_stats() -  Dump statistics
11158  * @value - Statistics option
11159  */
11160 static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value,
11161 				     enum qdf_stats_verbosity_level level)
11162 {
11163 	struct dp_soc *soc =
11164 		(struct dp_soc *)psoc;
11165 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11166 
11167 	if (!soc) {
11168 		dp_cdp_err("%pK: soc is NULL", soc);
11169 		return QDF_STATUS_E_INVAL;
11170 	}
11171 
11172 	switch (value) {
11173 	case CDP_TXRX_PATH_STATS:
11174 		dp_txrx_path_stats(soc);
11175 		dp_print_soc_interrupt_stats(soc);
11176 		hal_dump_reg_write_stats(soc->hal_soc);
11177 		dp_pdev_print_tx_delay_stats(soc);
11178 		/* Dump usage watermark stats for core TX/RX SRNGs */
11179 		dp_dump_srng_high_wm_stats(soc, (1 << REO_DST));
11180 		break;
11181 
11182 	case CDP_RX_RING_STATS:
11183 		dp_print_per_ring_stats(soc);
11184 		break;
11185 
11186 	case CDP_TXRX_TSO_STATS:
11187 		dp_print_tso_stats(soc, level);
11188 		break;
11189 
11190 	case CDP_DUMP_TX_FLOW_POOL_INFO:
11191 		if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH)
11192 			cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
11193 		else
11194 			dp_tx_dump_flow_pool_info_compact(soc);
11195 		break;
11196 
11197 	case CDP_DP_NAPI_STATS:
11198 		dp_print_napi_stats(soc);
11199 		break;
11200 
11201 	case CDP_TXRX_DESC_STATS:
11202 		/* TODO: NOT IMPLEMENTED */
11203 		break;
11204 
11205 	case CDP_DP_RX_FISA_STATS:
11206 		dp_rx_dump_fisa_stats(soc);
11207 		break;
11208 
11209 	case CDP_DP_SWLM_STATS:
11210 		dp_print_swlm_stats(soc);
11211 		break;
11212 
11213 	case CDP_DP_TX_HW_LATENCY_STATS:
11214 		dp_pdev_print_tx_delay_stats(soc);
11215 		break;
11216 
11217 	default:
11218 		status = QDF_STATUS_E_INVAL;
11219 		break;
11220 	}
11221 
11222 	return status;
11223 
11224 }
11225 
11226 #ifdef WLAN_SYSFS_DP_STATS
11227 static
11228 void dp_sysfs_get_stat_type(struct dp_soc *soc, uint32_t *mac_id,
11229 			    uint32_t *stat_type)
11230 {
11231 	qdf_spinlock_acquire(&soc->sysfs_config->rw_stats_lock);
11232 	*stat_type = soc->sysfs_config->stat_type_requested;
11233 	*mac_id   = soc->sysfs_config->mac_id;
11234 
11235 	qdf_spinlock_release(&soc->sysfs_config->rw_stats_lock);
11236 }
11237 
11238 static
11239 void dp_sysfs_update_config_buf_params(struct dp_soc *soc,
11240 				       uint32_t curr_len,
11241 				       uint32_t max_buf_len,
11242 				       char *buf)
11243 {
11244 	qdf_spinlock_acquire(&soc->sysfs_config->sysfs_write_user_buffer);
11245 	/* set sysfs_config parameters */
11246 	soc->sysfs_config->buf = buf;
11247 	soc->sysfs_config->curr_buffer_length = curr_len;
11248 	soc->sysfs_config->max_buffer_length = max_buf_len;
11249 	qdf_spinlock_release(&soc->sysfs_config->sysfs_write_user_buffer);
11250 }
11251 
11252 static
11253 QDF_STATUS dp_sysfs_fill_stats(ol_txrx_soc_handle soc_hdl,
11254 			       char *buf, uint32_t buf_size)
11255 {
11256 	uint32_t mac_id = 0;
11257 	uint32_t stat_type = 0;
11258 	uint32_t fw_stats = 0;
11259 	uint32_t host_stats = 0;
11260 	enum cdp_stats stats;
11261 	struct cdp_txrx_stats_req req;
11262 	uint32_t num_stats;
11263 	struct dp_soc *soc = NULL;
11264 
11265 	if (!soc_hdl) {
11266 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
11267 		return QDF_STATUS_E_INVAL;
11268 	}
11269 
11270 	soc = cdp_soc_t_to_dp_soc(soc_hdl);
11271 
11272 	if (!soc) {
11273 		dp_cdp_err("%pK: soc is NULL", soc);
11274 		return QDF_STATUS_E_INVAL;
11275 	}
11276 
11277 	dp_sysfs_get_stat_type(soc, &mac_id, &stat_type);
11278 
11279 	stats = stat_type;
11280 	if (stats >= CDP_TXRX_MAX_STATS) {
11281 		dp_cdp_info("sysfs stat type requested is invalid");
11282 		return QDF_STATUS_E_INVAL;
11283 	}
11284 	/*
11285 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
11286 	 *			has to be updated if new FW HTT stats added
11287 	 */
11288 	if (stats > CDP_TXRX_MAX_STATS)
11289 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
11290 
11291 	num_stats = QDF_ARRAY_SIZE(dp_stats_mapping_table);
11292 
11293 	if (stats >= num_stats) {
11294 		dp_cdp_err("%pK : Invalid stats option: %d, max num stats: %d",
11295 				soc, stats, num_stats);
11296 		return QDF_STATUS_E_INVAL;
11297 	}
11298 
11299 	/* build request */
11300 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
11301 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
11302 
11303 	req.stats = stat_type;
11304 	req.mac_id = mac_id;
11305 	/* request stats to be printed */
11306 	qdf_mutex_acquire(&soc->sysfs_config->sysfs_read_lock);
11307 
11308 	if (fw_stats != TXRX_FW_STATS_INVALID) {
11309 		/* update request with FW stats type */
11310 		req.cookie_val = DBG_SYSFS_STATS_COOKIE;
11311 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
11312 			(host_stats <= TXRX_HOST_STATS_MAX)) {
11313 		req.cookie_val = DBG_STATS_COOKIE_DEFAULT;
11314 		soc->sysfs_config->process_id = qdf_get_current_pid();
11315 		soc->sysfs_config->printing_mode = PRINTING_MODE_ENABLED;
11316 	}
11317 
11318 	dp_sysfs_update_config_buf_params(soc, 0, buf_size, buf);
11319 
11320 	dp_txrx_stats_request(soc_hdl, mac_id, &req);
11321 	soc->sysfs_config->process_id = 0;
11322 	soc->sysfs_config->printing_mode = PRINTING_MODE_DISABLED;
11323 
11324 	dp_sysfs_update_config_buf_params(soc, 0, 0, NULL);
11325 
11326 	qdf_mutex_release(&soc->sysfs_config->sysfs_read_lock);
11327 	return QDF_STATUS_SUCCESS;
11328 }
11329 
11330 static
11331 QDF_STATUS dp_sysfs_set_stat_type(ol_txrx_soc_handle soc_hdl,
11332 				  uint32_t stat_type, uint32_t mac_id)
11333 {
11334 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11335 
11336 	if (!soc_hdl) {
11337 		dp_cdp_err("%pK: soc is NULL", soc);
11338 		return QDF_STATUS_E_INVAL;
11339 	}
11340 
11341 	qdf_spinlock_acquire(&soc->sysfs_config->rw_stats_lock);
11342 
11343 	soc->sysfs_config->stat_type_requested = stat_type;
11344 	soc->sysfs_config->mac_id = mac_id;
11345 
11346 	qdf_spinlock_release(&soc->sysfs_config->rw_stats_lock);
11347 
11348 	return QDF_STATUS_SUCCESS;
11349 }
11350 
11351 static
11352 QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl)
11353 {
11354 	struct dp_soc *soc;
11355 	QDF_STATUS status;
11356 
11357 	if (!soc_hdl) {
11358 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
11359 		return QDF_STATUS_E_INVAL;
11360 	}
11361 
11362 	soc = soc_hdl;
11363 
11364 	soc->sysfs_config = qdf_mem_malloc(sizeof(struct sysfs_stats_config));
11365 	if (!soc->sysfs_config) {
11366 		dp_cdp_err("failed to allocate memory for sysfs_config no memory");
11367 		return QDF_STATUS_E_NOMEM;
11368 	}
11369 
11370 	status = qdf_event_create(&soc->sysfs_config->sysfs_txrx_fw_request_done);
11371 	/* create event for fw stats request from sysfs */
11372 	if (status != QDF_STATUS_SUCCESS) {
11373 		dp_cdp_err("failed to create event sysfs_txrx_fw_request_done");
11374 		qdf_mem_free(soc->sysfs_config);
11375 		soc->sysfs_config = NULL;
11376 		return QDF_STATUS_E_FAILURE;
11377 	}
11378 
11379 	qdf_spinlock_create(&soc->sysfs_config->rw_stats_lock);
11380 	qdf_mutex_create(&soc->sysfs_config->sysfs_read_lock);
11381 	qdf_spinlock_create(&soc->sysfs_config->sysfs_write_user_buffer);
11382 
11383 	return QDF_STATUS_SUCCESS;
11384 }
11385 
11386 static
11387 QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl)
11388 {
11389 	struct dp_soc *soc;
11390 	QDF_STATUS status;
11391 
11392 	if (!soc_hdl) {
11393 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
11394 		return QDF_STATUS_E_INVAL;
11395 	}
11396 
11397 	soc = soc_hdl;
11398 	if (!soc->sysfs_config) {
11399 		dp_cdp_err("soc->sysfs_config is NULL");
11400 		return QDF_STATUS_E_FAILURE;
11401 	}
11402 
11403 	status = qdf_event_destroy(&soc->sysfs_config->sysfs_txrx_fw_request_done);
11404 	if (status != QDF_STATUS_SUCCESS)
11405 		dp_cdp_err("Failed to detroy event sysfs_txrx_fw_request_done ");
11406 
11407 	qdf_mutex_destroy(&soc->sysfs_config->sysfs_read_lock);
11408 	qdf_spinlock_destroy(&soc->sysfs_config->rw_stats_lock);
11409 	qdf_spinlock_destroy(&soc->sysfs_config->sysfs_write_user_buffer);
11410 
11411 	qdf_mem_free(soc->sysfs_config);
11412 
11413 	return QDF_STATUS_SUCCESS;
11414 }
11415 
11416 #else /* WLAN_SYSFS_DP_STATS */
11417 
11418 static
11419 QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl)
11420 {
11421 	return QDF_STATUS_SUCCESS;
11422 }
11423 
11424 static
11425 QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl)
11426 {
11427 	return QDF_STATUS_SUCCESS;
11428 }
11429 #endif /* WLAN_SYSFS_DP_STATS */
11430 
11431 /**
11432  * dp_txrx_clear_dump_stats() - clear dumpStats
11433  * @soc- soc handle
11434  * @value - stats option
11435  *
11436  * Return: 0 - Success, non-zero - failure
11437  */
11438 static
11439 QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
11440 				    uint8_t value)
11441 {
11442 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11443 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11444 
11445 	if (!soc) {
11446 		dp_err("soc is NULL");
11447 		return QDF_STATUS_E_INVAL;
11448 	}
11449 
11450 	switch (value) {
11451 	case CDP_TXRX_TSO_STATS:
11452 		dp_txrx_clear_tso_stats(soc);
11453 		break;
11454 
11455 	case CDP_DP_TX_HW_LATENCY_STATS:
11456 		dp_pdev_clear_tx_delay_stats(soc);
11457 		break;
11458 
11459 	default:
11460 		status = QDF_STATUS_E_INVAL;
11461 		break;
11462 	}
11463 
11464 	return status;
11465 }
11466 
11467 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
11468 /**
11469  * dp_update_flow_control_parameters() - API to store datapath
11470  *                            config parameters
11471  * @soc: soc handle
11472  * @cfg: ini parameter handle
11473  *
11474  * Return: void
11475  */
11476 static inline
11477 void dp_update_flow_control_parameters(struct dp_soc *soc,
11478 				struct cdp_config_params *params)
11479 {
11480 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
11481 					params->tx_flow_stop_queue_threshold;
11482 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
11483 					params->tx_flow_start_queue_offset;
11484 }
11485 #else
11486 static inline
11487 void dp_update_flow_control_parameters(struct dp_soc *soc,
11488 				struct cdp_config_params *params)
11489 {
11490 }
11491 #endif
11492 
11493 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
11494 /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */
11495 #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024
11496 
11497 /* Max packet limit for RX REAP Loop (dp_rx_process) */
11498 #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024
11499 
11500 static
11501 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
11502 					struct cdp_config_params *params)
11503 {
11504 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit =
11505 				params->tx_comp_loop_pkt_limit;
11506 
11507 	if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX)
11508 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true;
11509 	else
11510 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false;
11511 
11512 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit =
11513 				params->rx_reap_loop_pkt_limit;
11514 
11515 	if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX)
11516 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = true;
11517 	else
11518 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = false;
11519 
11520 	soc->wlan_cfg_ctx->rx_hp_oos_update_limit =
11521 				params->rx_hp_oos_update_limit;
11522 
11523 	dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u",
11524 		soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit,
11525 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check,
11526 		soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit,
11527 		soc->wlan_cfg_ctx->rx_enable_eol_data_check,
11528 		soc->wlan_cfg_ctx->rx_hp_oos_update_limit);
11529 }
11530 
11531 static void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
11532 				      uint32_t rx_limit)
11533 {
11534 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit = tx_limit;
11535 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit = rx_limit;
11536 }
11537 
11538 #else
11539 static inline
11540 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
11541 					struct cdp_config_params *params)
11542 { }
11543 
11544 static inline
11545 void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
11546 			       uint32_t rx_limit)
11547 {
11548 }
11549 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
11550 
11551 /**
11552  * dp_update_config_parameters() - API to store datapath
11553  *                            config parameters
11554  * @soc: soc handle
11555  * @cfg: ini parameter handle
11556  *
11557  * Return: status
11558  */
11559 static
11560 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
11561 				struct cdp_config_params *params)
11562 {
11563 	struct dp_soc *soc = (struct dp_soc *)psoc;
11564 
11565 	if (!(soc)) {
11566 		dp_cdp_err("%pK: Invalid handle", soc);
11567 		return QDF_STATUS_E_INVAL;
11568 	}
11569 
11570 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
11571 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
11572 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
11573 	soc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload =
11574 				params->p2p_tcp_udp_checksumoffload;
11575 	soc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload =
11576 				params->nan_tcp_udp_checksumoffload;
11577 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
11578 				params->tcp_udp_checksumoffload;
11579 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
11580 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
11581 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
11582 
11583 	dp_update_rx_soft_irq_limit_params(soc, params);
11584 	dp_update_flow_control_parameters(soc, params);
11585 
11586 	return QDF_STATUS_SUCCESS;
11587 }
11588 
11589 static struct cdp_wds_ops dp_ops_wds = {
11590 	.vdev_set_wds = dp_vdev_set_wds,
11591 #ifdef WDS_VENDOR_EXTENSION
11592 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
11593 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
11594 #endif
11595 };
11596 
11597 /*
11598  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
11599  * @soc_hdl - datapath soc handle
11600  * @vdev_id - virtual interface id
11601  * @callback - callback function
11602  * @ctxt: callback context
11603  *
11604  */
11605 static void
11606 dp_txrx_data_tx_cb_set(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
11607 		       ol_txrx_data_tx_cb callback, void *ctxt)
11608 {
11609 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11610 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11611 						     DP_MOD_ID_CDP);
11612 
11613 	if (!vdev)
11614 		return;
11615 
11616 	vdev->tx_non_std_data_callback.func = callback;
11617 	vdev->tx_non_std_data_callback.ctxt = ctxt;
11618 
11619 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11620 }
11621 
11622 /**
11623  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
11624  * @soc: datapath soc handle
11625  * @pdev_id: id of datapath pdev handle
11626  *
11627  * Return: opaque pointer to dp txrx handle
11628  */
11629 static void *dp_pdev_get_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id)
11630 {
11631 	struct dp_pdev *pdev =
11632 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11633 						   pdev_id);
11634 	if (qdf_unlikely(!pdev))
11635 		return NULL;
11636 
11637 	return pdev->dp_txrx_handle;
11638 }
11639 
11640 /**
11641  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
11642  * @soc: datapath soc handle
11643  * @pdev_id: id of datapath pdev handle
11644  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
11645  *
11646  * Return: void
11647  */
11648 static void
11649 dp_pdev_set_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id,
11650 			   void *dp_txrx_hdl)
11651 {
11652 	struct dp_pdev *pdev =
11653 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11654 						   pdev_id);
11655 
11656 	if (!pdev)
11657 		return;
11658 
11659 	pdev->dp_txrx_handle = dp_txrx_hdl;
11660 }
11661 
11662 /**
11663  * dp_vdev_get_dp_ext_handle() - get dp handle from vdev
11664  * @soc: datapath soc handle
11665  * @vdev_id: vdev id
11666  *
11667  * Return: opaque pointer to dp txrx handle
11668  */
11669 static void *dp_vdev_get_dp_ext_handle(ol_txrx_soc_handle soc_hdl,
11670 				       uint8_t vdev_id)
11671 {
11672 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11673 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11674 						     DP_MOD_ID_CDP);
11675 	void *dp_ext_handle;
11676 
11677 	if (!vdev)
11678 		return NULL;
11679 	dp_ext_handle = vdev->vdev_dp_ext_handle;
11680 
11681 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11682 	return dp_ext_handle;
11683 }
11684 
11685 /**
11686  * dp_vdev_set_dp_ext_handle() - set dp handle in vdev
11687  * @soc: datapath soc handle
11688  * @vdev_id: vdev id
11689  * @size: size of advance dp handle
11690  *
11691  * Return: QDF_STATUS
11692  */
11693 static QDF_STATUS
11694 dp_vdev_set_dp_ext_handle(ol_txrx_soc_handle soc_hdl, uint8_t vdev_id,
11695 			  uint16_t size)
11696 {
11697 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11698 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11699 						     DP_MOD_ID_CDP);
11700 	void *dp_ext_handle;
11701 
11702 	if (!vdev)
11703 		return QDF_STATUS_E_FAILURE;
11704 
11705 	dp_ext_handle = qdf_mem_malloc(size);
11706 
11707 	if (!dp_ext_handle) {
11708 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11709 		return QDF_STATUS_E_FAILURE;
11710 	}
11711 
11712 	vdev->vdev_dp_ext_handle = dp_ext_handle;
11713 
11714 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11715 	return QDF_STATUS_SUCCESS;
11716 }
11717 
11718 /**
11719  * dp_vdev_inform_ll_conn() - Inform vdev to add/delete a latency critical
11720  *			      connection for this vdev
11721  * @soc_hdl: CDP soc handle
11722  * @vdev_id: vdev ID
11723  * @action: Add/Delete action
11724  *
11725  * Returns: QDF_STATUS.
11726  */
11727 static QDF_STATUS
11728 dp_vdev_inform_ll_conn(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
11729 		       enum vdev_ll_conn_actions action)
11730 {
11731 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11732 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11733 						     DP_MOD_ID_CDP);
11734 
11735 	if (!vdev) {
11736 		dp_err("LL connection action for invalid vdev %d", vdev_id);
11737 		return QDF_STATUS_E_FAILURE;
11738 	}
11739 
11740 	switch (action) {
11741 	case CDP_VDEV_LL_CONN_ADD:
11742 		vdev->num_latency_critical_conn++;
11743 		break;
11744 
11745 	case CDP_VDEV_LL_CONN_DEL:
11746 		vdev->num_latency_critical_conn--;
11747 		break;
11748 
11749 	default:
11750 		dp_err("LL connection action invalid %d", action);
11751 		break;
11752 	}
11753 
11754 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11755 	return QDF_STATUS_SUCCESS;
11756 }
11757 
11758 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
11759 /**
11760  * dp_soc_set_swlm_enable() - Enable/Disable SWLM if initialized.
11761  * @soc_hdl: CDP Soc handle
11762  * @value: Enable/Disable value
11763  *
11764  * Returns: QDF_STATUS
11765  */
11766 static QDF_STATUS dp_soc_set_swlm_enable(struct cdp_soc_t *soc_hdl,
11767 					 uint8_t value)
11768 {
11769 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11770 
11771 	if (!soc->swlm.is_init) {
11772 		dp_err("SWLM is not initialized");
11773 		return QDF_STATUS_E_FAILURE;
11774 	}
11775 
11776 	soc->swlm.is_enabled = !!value;
11777 
11778 	return QDF_STATUS_SUCCESS;
11779 }
11780 
11781 /**
11782  * dp_soc_is_swlm_enabled() - Check if SWLM is enabled.
11783  * @soc_hdl: CDP Soc handle
11784  *
11785  * Returns: QDF_STATUS
11786  */
11787 static uint8_t dp_soc_is_swlm_enabled(struct cdp_soc_t *soc_hdl)
11788 {
11789 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11790 
11791 	return soc->swlm.is_enabled;
11792 }
11793 #endif
11794 
11795 /**
11796  * dp_display_srng_info() - Dump the srng HP TP info
11797  * @soc_hdl: CDP Soc handle
11798  *
11799  * This function dumps the SW hp/tp values for the important rings.
11800  * HW hp/tp values are not being dumped, since it can lead to
11801  * READ NOC error when UMAC is in low power state. MCC does not have
11802  * device force wake working yet.
11803  *
11804  * Return: none
11805  */
11806 static void dp_display_srng_info(struct cdp_soc_t *soc_hdl)
11807 {
11808 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11809 	hal_soc_handle_t hal_soc = soc->hal_soc;
11810 	uint32_t hp, tp, i;
11811 
11812 	dp_info("SRNG HP-TP data:");
11813 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
11814 		hal_get_sw_hptp(hal_soc, soc->tcl_data_ring[i].hal_srng,
11815 				&tp, &hp);
11816 		dp_info("TCL DATA ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
11817 
11818 		if (wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, i) ==
11819 		    INVALID_WBM_RING_NUM)
11820 			continue;
11821 
11822 		hal_get_sw_hptp(hal_soc, soc->tx_comp_ring[i].hal_srng,
11823 				&tp, &hp);
11824 		dp_info("TX comp ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
11825 	}
11826 
11827 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
11828 		hal_get_sw_hptp(hal_soc, soc->reo_dest_ring[i].hal_srng,
11829 				&tp, &hp);
11830 		dp_info("REO DST ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
11831 	}
11832 
11833 	hal_get_sw_hptp(hal_soc, soc->reo_exception_ring.hal_srng, &tp, &hp);
11834 	dp_info("REO exception ring: hp=0x%x, tp=0x%x", hp, tp);
11835 
11836 	hal_get_sw_hptp(hal_soc, soc->rx_rel_ring.hal_srng, &tp, &hp);
11837 	dp_info("WBM RX release ring: hp=0x%x, tp=0x%x", hp, tp);
11838 
11839 	hal_get_sw_hptp(hal_soc, soc->wbm_desc_rel_ring.hal_srng, &tp, &hp);
11840 	dp_info("WBM desc release ring: hp=0x%x, tp=0x%x", hp, tp);
11841 }
11842 
11843 /**
11844  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
11845  * @soc_handle: datapath soc handle
11846  *
11847  * Return: opaque pointer to external dp (non-core DP)
11848  */
11849 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
11850 {
11851 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11852 
11853 	return soc->external_txrx_handle;
11854 }
11855 
11856 /**
11857  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
11858  * @soc_handle: datapath soc handle
11859  * @txrx_handle: opaque pointer to external dp (non-core DP)
11860  *
11861  * Return: void
11862  */
11863 static void
11864 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
11865 {
11866 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11867 
11868 	soc->external_txrx_handle = txrx_handle;
11869 }
11870 
11871 /**
11872  * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
11873  * @soc_hdl: datapath soc handle
11874  * @pdev_id: id of the datapath pdev handle
11875  * @lmac_id: lmac id
11876  *
11877  * Return: QDF_STATUS
11878  */
11879 static QDF_STATUS
11880 dp_soc_map_pdev_to_lmac
11881 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
11882 	 uint32_t lmac_id)
11883 {
11884 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11885 
11886 	wlan_cfg_set_hw_mac_idx(soc->wlan_cfg_ctx,
11887 				pdev_id,
11888 				lmac_id);
11889 
11890 	/*Set host PDEV ID for lmac_id*/
11891 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
11892 			      pdev_id,
11893 			      lmac_id);
11894 
11895 	return QDF_STATUS_SUCCESS;
11896 }
11897 
11898 /**
11899  * dp_soc_handle_pdev_mode_change() - Update pdev to lmac mapping
11900  * @soc_hdl: datapath soc handle
11901  * @pdev_id: id of the datapath pdev handle
11902  * @lmac_id: lmac id
11903  *
11904  * In the event of a dynamic mode change, update the pdev to lmac mapping
11905  *
11906  * Return: QDF_STATUS
11907  */
11908 static QDF_STATUS
11909 dp_soc_handle_pdev_mode_change
11910 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
11911 	 uint32_t lmac_id)
11912 {
11913 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11914 	struct dp_vdev *vdev = NULL;
11915 	uint8_t hw_pdev_id, mac_id;
11916 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc,
11917 								  pdev_id);
11918 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
11919 
11920 	if (qdf_unlikely(!pdev))
11921 		return QDF_STATUS_E_FAILURE;
11922 
11923 	pdev->lmac_id = lmac_id;
11924 	pdev->target_pdev_id =
11925 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
11926 	dp_info(" mode change %d %d\n", pdev->pdev_id, pdev->lmac_id);
11927 
11928 	/*Set host PDEV ID for lmac_id*/
11929 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
11930 			      pdev->pdev_id,
11931 			      lmac_id);
11932 
11933 	hw_pdev_id =
11934 		dp_get_target_pdev_id_for_host_pdev_id(soc,
11935 						       pdev->pdev_id);
11936 
11937 	/*
11938 	 * When NSS offload is enabled, send pdev_id->lmac_id
11939 	 * and pdev_id to hw_pdev_id to NSS FW
11940 	 */
11941 	if (nss_config) {
11942 		mac_id = pdev->lmac_id;
11943 		if (soc->cdp_soc.ol_ops->pdev_update_lmac_n_target_pdev_id)
11944 			soc->cdp_soc.ol_ops->
11945 				pdev_update_lmac_n_target_pdev_id(
11946 				soc->ctrl_psoc,
11947 				&pdev_id, &mac_id, &hw_pdev_id);
11948 	}
11949 
11950 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
11951 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
11952 		DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
11953 					       hw_pdev_id);
11954 		vdev->lmac_id = pdev->lmac_id;
11955 	}
11956 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
11957 
11958 	return QDF_STATUS_SUCCESS;
11959 }
11960 
11961 /**
11962  * dp_soc_set_pdev_status_down() - set pdev down/up status
11963  * @soc: datapath soc handle
11964  * @pdev_id: id of datapath pdev handle
11965  * @is_pdev_down: pdev down/up status
11966  *
11967  * Return: QDF_STATUS
11968  */
11969 static QDF_STATUS
11970 dp_soc_set_pdev_status_down(struct cdp_soc_t *soc, uint8_t pdev_id,
11971 			    bool is_pdev_down)
11972 {
11973 	struct dp_pdev *pdev =
11974 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11975 						   pdev_id);
11976 	if (!pdev)
11977 		return QDF_STATUS_E_FAILURE;
11978 
11979 	pdev->is_pdev_down = is_pdev_down;
11980 	return QDF_STATUS_SUCCESS;
11981 }
11982 
11983 /**
11984  * dp_get_cfg_capabilities() - get dp capabilities
11985  * @soc_handle: datapath soc handle
11986  * @dp_caps: enum for dp capabilities
11987  *
11988  * Return: bool to determine if dp caps is enabled
11989  */
11990 static bool
11991 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
11992 			enum cdp_capabilities dp_caps)
11993 {
11994 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11995 
11996 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
11997 }
11998 
11999 #ifdef FEATURE_AST
12000 static QDF_STATUS
12001 dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
12002 		       uint8_t *peer_mac)
12003 {
12004 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12005 	QDF_STATUS status = QDF_STATUS_SUCCESS;
12006 	struct dp_peer *peer =
12007 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
12008 					       DP_MOD_ID_CDP);
12009 
12010 	/* Peer can be null for monitor vap mac address */
12011 	if (!peer) {
12012 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
12013 			  "%s: Invalid peer\n", __func__);
12014 		return QDF_STATUS_E_FAILURE;
12015 	}
12016 
12017 	dp_peer_update_state(soc, peer, DP_PEER_STATE_LOGICAL_DELETE);
12018 
12019 	qdf_spin_lock_bh(&soc->ast_lock);
12020 	dp_peer_delete_ast_entries(soc, peer);
12021 	qdf_spin_unlock_bh(&soc->ast_lock);
12022 
12023 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
12024 	return status;
12025 }
12026 #endif
12027 
12028 #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS
12029 /**
12030  * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
12031  * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
12032  * @soc: cdp_soc handle
12033  * @pdev_id: id of cdp_pdev handle
12034  * @protocol_type: protocol type for which stats should be displayed
12035  *
12036  * Return: none
12037  */
12038 static inline void
12039 dp_dump_pdev_rx_protocol_tag_stats(struct cdp_soc_t  *soc, uint8_t pdev_id,
12040 				   uint16_t protocol_type)
12041 {
12042 }
12043 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
12044 
12045 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
12046 /**
12047  * dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be
12048  * applied to the desired protocol type packets
12049  * @soc: soc handle
12050  * @pdev_id: id of cdp_pdev handle
12051  * @enable_rx_protocol_tag - bitmask that indicates what protocol types
12052  * are enabled for tagging. zero indicates disable feature, non-zero indicates
12053  * enable feature
12054  * @protocol_type: new protocol type for which the tag is being added
12055  * @tag: user configured tag for the new protocol
12056  *
12057  * Return: Success
12058  */
12059 static inline QDF_STATUS
12060 dp_update_pdev_rx_protocol_tag(struct cdp_soc_t  *soc, uint8_t pdev_id,
12061 			       uint32_t enable_rx_protocol_tag,
12062 			       uint16_t protocol_type,
12063 			       uint16_t tag)
12064 {
12065 	return QDF_STATUS_SUCCESS;
12066 }
12067 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
12068 
12069 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
12070 /**
12071  * dp_set_rx_flow_tag - add/delete a flow
12072  * @soc: soc handle
12073  * @pdev_id: id of cdp_pdev handle
12074  * @flow_info: flow tuple that is to be added to/deleted from flow search table
12075  *
12076  * Return: Success
12077  */
12078 static inline QDF_STATUS
12079 dp_set_rx_flow_tag(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
12080 		   struct cdp_rx_flow_info *flow_info)
12081 {
12082 	return QDF_STATUS_SUCCESS;
12083 }
12084 /**
12085  * dp_dump_rx_flow_tag_stats - dump the number of packets tagged for
12086  * given flow 5-tuple
12087  * @cdp_soc: soc handle
12088  * @pdev_id: id of cdp_pdev handle
12089  * @flow_info: flow 5-tuple for which stats should be displayed
12090  *
12091  * Return: Success
12092  */
12093 static inline QDF_STATUS
12094 dp_dump_rx_flow_tag_stats(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
12095 			  struct cdp_rx_flow_info *flow_info)
12096 {
12097 	return QDF_STATUS_SUCCESS;
12098 }
12099 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
12100 
12101 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
12102 					   uint32_t max_peers,
12103 					   uint32_t max_ast_index,
12104 					   uint8_t peer_map_unmap_versions)
12105 {
12106 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12107 	QDF_STATUS status;
12108 
12109 	soc->max_peers = max_peers;
12110 
12111 	wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
12112 
12113 	status = soc->arch_ops.txrx_peer_map_attach(soc);
12114 	if (!QDF_IS_STATUS_SUCCESS(status)) {
12115 		dp_err("failure in allocating peer tables");
12116 		return QDF_STATUS_E_FAILURE;
12117 	}
12118 
12119 	dp_info("max_peers %u, calculated max_peers %u max_ast_index: %u\n",
12120 		max_peers, soc->max_peer_id, max_ast_index);
12121 
12122 	status = dp_peer_find_attach(soc);
12123 	if (!QDF_IS_STATUS_SUCCESS(status)) {
12124 		dp_err("Peer find attach failure");
12125 		goto fail;
12126 	}
12127 
12128 	soc->peer_map_unmap_versions = peer_map_unmap_versions;
12129 	soc->peer_map_attach_success = TRUE;
12130 
12131 	return QDF_STATUS_SUCCESS;
12132 fail:
12133 	soc->arch_ops.txrx_peer_map_detach(soc);
12134 
12135 	return status;
12136 }
12137 
12138 static QDF_STATUS dp_soc_set_param(struct cdp_soc_t  *soc_hdl,
12139 				   enum cdp_soc_param_t param,
12140 				   uint32_t value)
12141 {
12142 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12143 
12144 	switch (param) {
12145 	case DP_SOC_PARAM_MSDU_EXCEPTION_DESC:
12146 		soc->num_msdu_exception_desc = value;
12147 		dp_info("num_msdu exception_desc %u",
12148 			value);
12149 		break;
12150 	case DP_SOC_PARAM_CMEM_FSE_SUPPORT:
12151 		if (wlan_cfg_is_fst_in_cmem_enabled(soc->wlan_cfg_ctx))
12152 			soc->fst_in_cmem = !!value;
12153 		dp_info("FW supports CMEM FSE %u", value);
12154 		break;
12155 	case DP_SOC_PARAM_MAX_AST_AGEOUT:
12156 		soc->max_ast_ageout_count = value;
12157 		dp_info("Max ast ageout count %u", soc->max_ast_ageout_count);
12158 		break;
12159 	case DP_SOC_PARAM_EAPOL_OVER_CONTROL_PORT:
12160 		soc->eapol_over_control_port = value;
12161 		dp_info("Eapol over control_port:%d",
12162 			soc->eapol_over_control_port);
12163 		break;
12164 	case DP_SOC_PARAM_MULTI_PEER_GRP_CMD_SUPPORT:
12165 		soc->multi_peer_grp_cmd_supported = value;
12166 		dp_info("Multi Peer group command support:%d",
12167 			soc->multi_peer_grp_cmd_supported);
12168 		break;
12169 	case DP_SOC_PARAM_RSSI_DBM_CONV_SUPPORT:
12170 		soc->features.rssi_dbm_conv_support = value;
12171 		dp_info("Rssi dbm converstion support:%u",
12172 			soc->features.rssi_dbm_conv_support);
12173 		break;
12174 	default:
12175 		dp_info("not handled param %d ", param);
12176 		break;
12177 	}
12178 
12179 	return QDF_STATUS_SUCCESS;
12180 }
12181 
12182 static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
12183 				      void *stats_ctx)
12184 {
12185 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12186 
12187 	soc->rate_stats_ctx = (struct cdp_soc_rate_stats_ctx *)stats_ctx;
12188 }
12189 
12190 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
12191 /**
12192  * dp_peer_flush_rate_stats_req(): Flush peer rate stats
12193  * @soc: Datapath SOC handle
12194  * @peer: Datapath peer
12195  * @arg: argument to iter function
12196  *
12197  * Return: QDF_STATUS
12198  */
12199 static void
12200 dp_peer_flush_rate_stats_req(struct dp_soc *soc, struct dp_peer *peer,
12201 			     void *arg)
12202 {
12203 	if (peer->bss_peer)
12204 		return;
12205 
12206 	dp_wdi_event_handler(
12207 		WDI_EVENT_FLUSH_RATE_STATS_REQ,
12208 		soc, dp_monitor_peer_get_peerstats_ctx(soc, peer),
12209 		peer->peer_id,
12210 		WDI_NO_VAL, peer->vdev->pdev->pdev_id);
12211 }
12212 
12213 /**
12214  * dp_flush_rate_stats_req(): Flush peer rate stats in pdev
12215  * @soc_hdl: Datapath SOC handle
12216  * @pdev_id: pdev_id
12217  *
12218  * Return: QDF_STATUS
12219  */
12220 static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
12221 					  uint8_t pdev_id)
12222 {
12223 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12224 	struct dp_pdev *pdev =
12225 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12226 						   pdev_id);
12227 	if (!pdev)
12228 		return QDF_STATUS_E_FAILURE;
12229 
12230 	dp_pdev_iterate_peer(pdev, dp_peer_flush_rate_stats_req, NULL,
12231 			     DP_MOD_ID_CDP);
12232 
12233 	return QDF_STATUS_SUCCESS;
12234 }
12235 #else
12236 static inline QDF_STATUS
12237 dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
12238 			uint8_t pdev_id)
12239 {
12240 	return QDF_STATUS_SUCCESS;
12241 }
12242 #endif
12243 
12244 static void *dp_peer_get_peerstats_ctx(struct cdp_soc_t *soc_hdl,
12245 				       uint8_t vdev_id,
12246 				       uint8_t *mac_addr)
12247 {
12248 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12249 	struct dp_peer *peer;
12250 	void *peerstats_ctx = NULL;
12251 
12252 	if (mac_addr) {
12253 		peer = dp_peer_find_hash_find(soc, mac_addr,
12254 					      0, vdev_id,
12255 					      DP_MOD_ID_CDP);
12256 		if (!peer)
12257 			return NULL;
12258 
12259 		if (!IS_MLO_DP_MLD_PEER(peer))
12260 			peerstats_ctx = dp_monitor_peer_get_peerstats_ctx(soc,
12261 									  peer);
12262 
12263 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
12264 	}
12265 
12266 	return peerstats_ctx;
12267 }
12268 
12269 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
12270 static QDF_STATUS dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
12271 					   uint8_t pdev_id,
12272 					   void *buf)
12273 {
12274 	 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS,
12275 			      (struct dp_soc *)soc, buf, HTT_INVALID_PEER,
12276 			      WDI_NO_VAL, pdev_id);
12277 	return QDF_STATUS_SUCCESS;
12278 }
12279 #else
12280 static inline QDF_STATUS
12281 dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
12282 			 uint8_t pdev_id,
12283 			 void *buf)
12284 {
12285 	return QDF_STATUS_SUCCESS;
12286 }
12287 #endif
12288 
12289 static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle)
12290 {
12291 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12292 
12293 	return soc->rate_stats_ctx;
12294 }
12295 
12296 /*
12297  * dp_get_cfg() - get dp cfg
12298  * @soc: cdp soc handle
12299  * @cfg: cfg enum
12300  *
12301  * Return: cfg value
12302  */
12303 static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg)
12304 {
12305 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
12306 	uint32_t value = 0;
12307 
12308 	switch (cfg) {
12309 	case cfg_dp_enable_data_stall:
12310 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
12311 		break;
12312 	case cfg_dp_enable_p2p_ip_tcp_udp_checksum_offload:
12313 		value = dpsoc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload;
12314 		break;
12315 	case cfg_dp_enable_nan_ip_tcp_udp_checksum_offload:
12316 		value = dpsoc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload;
12317 		break;
12318 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
12319 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
12320 		break;
12321 	case cfg_dp_disable_legacy_mode_csum_offload:
12322 		value = dpsoc->wlan_cfg_ctx->
12323 					legacy_mode_checksumoffload_disable;
12324 		break;
12325 	case cfg_dp_tso_enable:
12326 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
12327 		break;
12328 	case cfg_dp_lro_enable:
12329 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
12330 		break;
12331 	case cfg_dp_gro_enable:
12332 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
12333 		break;
12334 	case cfg_dp_tc_based_dyn_gro_enable:
12335 		value = dpsoc->wlan_cfg_ctx->tc_based_dynamic_gro;
12336 		break;
12337 	case cfg_dp_tc_ingress_prio:
12338 		value = dpsoc->wlan_cfg_ctx->tc_ingress_prio;
12339 		break;
12340 	case cfg_dp_sg_enable:
12341 		value = dpsoc->wlan_cfg_ctx->sg_enabled;
12342 		break;
12343 	case cfg_dp_tx_flow_start_queue_offset:
12344 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
12345 		break;
12346 	case cfg_dp_tx_flow_stop_queue_threshold:
12347 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
12348 		break;
12349 	case cfg_dp_disable_intra_bss_fwd:
12350 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
12351 		break;
12352 	case cfg_dp_pktlog_buffer_size:
12353 		value = dpsoc->wlan_cfg_ctx->pktlog_buffer_size;
12354 		break;
12355 	case cfg_dp_wow_check_rx_pending:
12356 		value = dpsoc->wlan_cfg_ctx->wow_check_rx_pending_enable;
12357 		break;
12358 	default:
12359 		value =  0;
12360 	}
12361 
12362 	return value;
12363 }
12364 
12365 #ifdef PEER_FLOW_CONTROL
12366 /**
12367  * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params
12368  * @soc_handle: datapath soc handle
12369  * @pdev_id: id of datapath pdev handle
12370  * @param: ol ath params
12371  * @value: value of the flag
12372  * @buff: Buffer to be passed
12373  *
12374  * Implemented this function same as legacy function. In legacy code, single
12375  * function is used to display stats and update pdev params.
12376  *
12377  * Return: 0 for success. nonzero for failure.
12378  */
12379 static uint32_t dp_tx_flow_ctrl_configure_pdev(struct cdp_soc_t *soc_handle,
12380 					       uint8_t pdev_id,
12381 					       enum _dp_param_t param,
12382 					       uint32_t value, void *buff)
12383 {
12384 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12385 	struct dp_pdev *pdev =
12386 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
12387 						   pdev_id);
12388 
12389 	if (qdf_unlikely(!pdev))
12390 		return 1;
12391 
12392 	soc = pdev->soc;
12393 	if (!soc)
12394 		return 1;
12395 
12396 	switch (param) {
12397 #ifdef QCA_ENH_V3_STATS_SUPPORT
12398 	case DP_PARAM_VIDEO_DELAY_STATS_FC:
12399 		if (value)
12400 			pdev->delay_stats_flag = true;
12401 		else
12402 			pdev->delay_stats_flag = false;
12403 		break;
12404 	case DP_PARAM_VIDEO_STATS_FC:
12405 		qdf_print("------- TID Stats ------\n");
12406 		dp_pdev_print_tid_stats(pdev);
12407 		qdf_print("------ Delay Stats ------\n");
12408 		dp_pdev_print_delay_stats(pdev);
12409 		qdf_print("------ Rx Error Stats ------\n");
12410 		dp_pdev_print_rx_error_stats(pdev);
12411 		break;
12412 #endif
12413 	case DP_PARAM_TOTAL_Q_SIZE:
12414 		{
12415 			uint32_t tx_min, tx_max;
12416 
12417 			tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx);
12418 			tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
12419 
12420 			if (!buff) {
12421 				if ((value >= tx_min) && (value <= tx_max)) {
12422 					pdev->num_tx_allowed = value;
12423 				} else {
12424 					dp_tx_info("%pK: Failed to update num_tx_allowed, Q_min = %d Q_max = %d",
12425 						   soc, tx_min, tx_max);
12426 					break;
12427 				}
12428 			} else {
12429 				*(int *)buff = pdev->num_tx_allowed;
12430 			}
12431 		}
12432 		break;
12433 	default:
12434 		dp_tx_info("%pK: not handled param %d ", soc, param);
12435 		break;
12436 	}
12437 
12438 	return 0;
12439 }
12440 #endif
12441 
12442 /**
12443  * dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev
12444  * @psoc: dp soc handle
12445  * @pdev_id: id of DP_PDEV handle
12446  * @pcp: pcp value
12447  * @tid: tid value passed by the user
12448  *
12449  * Return: QDF_STATUS_SUCCESS on success
12450  */
12451 static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
12452 						uint8_t pdev_id,
12453 						uint8_t pcp, uint8_t tid)
12454 {
12455 	struct dp_soc *soc = (struct dp_soc *)psoc;
12456 
12457 	soc->pcp_tid_map[pcp] = tid;
12458 
12459 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
12460 	return QDF_STATUS_SUCCESS;
12461 }
12462 
12463 /**
12464  * dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev
12465  * @soc: DP soc handle
12466  * @vdev_id: id of DP_VDEV handle
12467  * @pcp: pcp value
12468  * @tid: tid value passed by the user
12469  *
12470  * Return: QDF_STATUS_SUCCESS on success
12471  */
12472 static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc_hdl,
12473 						uint8_t vdev_id,
12474 						uint8_t pcp, uint8_t tid)
12475 {
12476 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12477 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12478 						     DP_MOD_ID_CDP);
12479 
12480 	if (!vdev)
12481 		return QDF_STATUS_E_FAILURE;
12482 
12483 	vdev->pcp_tid_map[pcp] = tid;
12484 
12485 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12486 	return QDF_STATUS_SUCCESS;
12487 }
12488 
12489 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
12490 static void dp_drain_txrx(struct cdp_soc_t *soc_handle)
12491 {
12492 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12493 	uint32_t cur_tx_limit, cur_rx_limit;
12494 	uint32_t budget = 0xffff;
12495 	uint32_t val;
12496 	int i;
12497 
12498 	cur_tx_limit = soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit;
12499 	cur_rx_limit = soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit;
12500 
12501 	/* Temporarily increase soft irq limits when going to drain
12502 	 * the UMAC/LMAC SRNGs and restore them after polling.
12503 	 * Though the budget is on higher side, the TX/RX reaping loops
12504 	 * will not execute longer as both TX and RX would be suspended
12505 	 * by the time this API is called.
12506 	 */
12507 	dp_update_soft_irq_limits(soc, budget, budget);
12508 
12509 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
12510 		dp_service_srngs(&soc->intr_ctx[i], budget);
12511 
12512 	dp_update_soft_irq_limits(soc, cur_tx_limit, cur_rx_limit);
12513 
12514 	/* Do a dummy read at offset 0; this will ensure all
12515 	 * pendings writes(HP/TP) are flushed before read returns.
12516 	 */
12517 	val = HAL_REG_READ((struct hal_soc *)soc->hal_soc, 0);
12518 	dp_debug("Register value at offset 0: %u\n", val);
12519 }
12520 #endif
12521 
12522 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
12523 static void
12524 dp_set_pkt_capture_mode(struct cdp_soc_t *soc_handle, bool val)
12525 {
12526 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12527 
12528 	soc->wlan_cfg_ctx->pkt_capture_mode = val;
12529 }
12530 #endif
12531 
12532 #ifdef HW_TX_DELAY_STATS_ENABLE
12533 /**
12534  * dp_enable_disable_vdev_tx_delay_stats(): Start/Stop tx delay stats capture
12535  * @soc: DP soc handle
12536  * @vdev_id: vdev id
12537  * @value: value
12538  *
12539  * Return: None
12540  */
12541 static void
12542 dp_enable_disable_vdev_tx_delay_stats(struct cdp_soc_t *soc_hdl,
12543 				      uint8_t vdev_id,
12544 				      uint8_t value)
12545 {
12546 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12547 	struct dp_vdev *vdev = NULL;
12548 
12549 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
12550 	if (!vdev)
12551 		return;
12552 
12553 	vdev->hw_tx_delay_stats_enabled = value;
12554 
12555 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12556 }
12557 
12558 /**
12559  * dp_check_vdev_tx_delay_stats_enabled() - check the feature is enabled or not
12560  * @soc: DP soc handle
12561  * @vdev_id: vdev id
12562  *
12563  * Returns: 1 if enabled, 0 if disabled
12564  */
12565 static uint8_t
12566 dp_check_vdev_tx_delay_stats_enabled(struct cdp_soc_t *soc_hdl,
12567 				     uint8_t vdev_id)
12568 {
12569 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12570 	struct dp_vdev *vdev;
12571 	uint8_t ret_val = 0;
12572 
12573 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
12574 	if (!vdev)
12575 		return ret_val;
12576 
12577 	ret_val = vdev->hw_tx_delay_stats_enabled;
12578 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12579 
12580 	return ret_val;
12581 }
12582 #endif
12583 
12584 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
12585 static void
12586 dp_recovery_vdev_flush_peers(struct cdp_soc_t *cdp_soc, uint8_t vdev_id)
12587 {
12588 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
12589 	struct dp_vdev *vdev;
12590 
12591 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
12592 
12593 	if (!vdev)
12594 		return;
12595 
12596 	dp_vdev_flush_peers((struct cdp_vdev *)vdev, false);
12597 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12598 }
12599 #endif
12600 
12601 static struct cdp_cmn_ops dp_ops_cmn = {
12602 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
12603 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
12604 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
12605 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
12606 	.txrx_pdev_post_attach = dp_pdev_post_attach_wifi3,
12607 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
12608 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
12609 	.txrx_peer_create = dp_peer_create_wifi3,
12610 	.txrx_peer_setup = dp_peer_setup_wifi3,
12611 #ifdef FEATURE_AST
12612 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
12613 #else
12614 	.txrx_peer_teardown = NULL,
12615 #endif
12616 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
12617 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
12618 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
12619 	.txrx_peer_get_ast_info_by_pdev =
12620 		dp_peer_get_ast_info_by_pdevid_wifi3,
12621 	.txrx_peer_ast_delete_by_soc =
12622 		dp_peer_ast_entry_del_by_soc,
12623 	.txrx_peer_ast_delete_by_pdev =
12624 		dp_peer_ast_entry_del_by_pdev,
12625 	.txrx_peer_delete = dp_peer_delete_wifi3,
12626 #ifdef DP_RX_UDP_OVER_PEER_ROAM
12627 	.txrx_update_roaming_peer = dp_update_roaming_peer_wifi3,
12628 #endif
12629 	.txrx_vdev_register = dp_vdev_register_wifi3,
12630 	.txrx_soc_detach = dp_soc_detach_wifi3,
12631 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
12632 	.txrx_soc_init = dp_soc_init_wifi3,
12633 #ifndef QCA_HOST_MODE_WIFI_DISABLED
12634 	.txrx_tso_soc_attach = dp_tso_soc_attach,
12635 	.txrx_tso_soc_detach = dp_tso_soc_detach,
12636 	.tx_send = dp_tx_send,
12637 	.tx_send_exc = dp_tx_send_exception,
12638 #endif
12639 	.txrx_pdev_init = dp_pdev_init_wifi3,
12640 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
12641 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
12642 	.txrx_ath_getstats = dp_get_device_stats,
12643 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
12644 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
12645 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
12646 	.delba_process = dp_delba_process_wifi3,
12647 	.set_addba_response = dp_set_addba_response,
12648 	.flush_cache_rx_queue = NULL,
12649 	.tid_update_ba_win_size = dp_rx_tid_update_ba_win_size,
12650 	/* TODO: get API's for dscp-tid need to be added*/
12651 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
12652 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
12653 	.txrx_get_total_per = dp_get_total_per,
12654 	.txrx_stats_request = dp_txrx_stats_request,
12655 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
12656 	.display_stats = dp_txrx_dump_stats,
12657 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
12658 	.txrx_intr_detach = dp_soc_interrupt_detach,
12659 	.set_pn_check = dp_set_pn_check_wifi3,
12660 	.set_key_sec_type = dp_set_key_sec_type_wifi3,
12661 	.update_config_parameters = dp_update_config_parameters,
12662 	/* TODO: Add other functions */
12663 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
12664 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
12665 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
12666 	.get_vdev_dp_ext_txrx_handle = dp_vdev_get_dp_ext_handle,
12667 	.set_vdev_dp_ext_txrx_handle = dp_vdev_set_dp_ext_handle,
12668 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
12669 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
12670 	.map_pdev_to_lmac = dp_soc_map_pdev_to_lmac,
12671 	.handle_mode_change = dp_soc_handle_pdev_mode_change,
12672 	.set_pdev_status_down = dp_soc_set_pdev_status_down,
12673 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
12674 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
12675 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
12676 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
12677 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
12678 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
12679 	.set_soc_param = dp_soc_set_param,
12680 	.txrx_get_os_rx_handles_from_vdev =
12681 					dp_get_os_rx_handles_from_vdev_wifi3,
12682 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
12683 	.get_dp_capabilities = dp_get_cfg_capabilities,
12684 	.txrx_get_cfg = dp_get_cfg,
12685 	.set_rate_stats_ctx = dp_soc_set_rate_stats_ctx,
12686 	.get_rate_stats_ctx = dp_soc_get_rate_stats_ctx,
12687 	.txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats,
12688 	.txrx_flush_rate_stats_request = dp_flush_rate_stats_req,
12689 	.txrx_peer_get_peerstats_ctx = dp_peer_get_peerstats_ctx,
12690 
12691 	.set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3,
12692 	.set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3,
12693 
12694 	.txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler,
12695 #ifdef QCA_MULTIPASS_SUPPORT
12696 	.set_vlan_groupkey = dp_set_vlan_groupkey,
12697 #endif
12698 	.get_peer_mac_list = dp_get_peer_mac_list,
12699 	.get_peer_id = dp_get_peer_id,
12700 #ifdef QCA_SUPPORT_WDS_EXTENDED
12701 	.set_wds_ext_peer_rx = dp_wds_ext_set_peer_rx,
12702 #endif /* QCA_SUPPORT_WDS_EXTENDED */
12703 
12704 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
12705 	.txrx_drain = dp_drain_txrx,
12706 #endif
12707 #if defined(FEATURE_RUNTIME_PM)
12708 	.set_rtpm_tput_policy = dp_set_rtpm_tput_policy_requirement,
12709 #endif
12710 #ifdef WLAN_SYSFS_DP_STATS
12711 	.txrx_sysfs_fill_stats = dp_sysfs_fill_stats,
12712 	.txrx_sysfs_set_stat_type = dp_sysfs_set_stat_type,
12713 #endif /* WLAN_SYSFS_DP_STATS */
12714 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
12715 	.set_pkt_capture_mode = dp_set_pkt_capture_mode,
12716 #endif
12717 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
12718 	.txrx_recovery_vdev_flush_peers = dp_recovery_vdev_flush_peers,
12719 #endif
12720 };
12721 
12722 static struct cdp_ctrl_ops dp_ops_ctrl = {
12723 	.txrx_peer_authorize = dp_peer_authorize,
12724 	.txrx_peer_get_authorize = dp_peer_get_authorize,
12725 #ifdef VDEV_PEER_PROTOCOL_COUNT
12726 	.txrx_enable_peer_protocol_count = dp_enable_vdev_peer_protocol_count,
12727 	.txrx_set_peer_protocol_drop_mask =
12728 		dp_enable_vdev_peer_protocol_drop_mask,
12729 	.txrx_is_peer_protocol_count_enabled =
12730 		dp_is_vdev_peer_protocol_count_enabled,
12731 	.txrx_get_peer_protocol_drop_mask = dp_get_vdev_peer_protocol_drop_mask,
12732 #endif
12733 	.txrx_set_vdev_param = dp_set_vdev_param,
12734 	.txrx_set_psoc_param = dp_set_psoc_param,
12735 	.txrx_get_psoc_param = dp_get_psoc_param,
12736 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
12737 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
12738 	.txrx_get_sec_type = dp_get_sec_type,
12739 	.txrx_wdi_event_sub = dp_wdi_event_sub,
12740 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
12741 	.txrx_set_pdev_param = dp_set_pdev_param,
12742 	.txrx_get_pdev_param = dp_get_pdev_param,
12743 	.txrx_set_peer_param = dp_set_peer_param,
12744 	.txrx_get_peer_param = dp_get_peer_param,
12745 #ifdef VDEV_PEER_PROTOCOL_COUNT
12746 	.txrx_peer_protocol_cnt = dp_peer_stats_update_protocol_cnt,
12747 #endif
12748 #ifdef WLAN_SUPPORT_MSCS
12749 	.txrx_record_mscs_params = dp_record_mscs_params,
12750 #endif
12751 #ifdef WLAN_SUPPORT_SCS
12752 	.txrx_enable_scs_params = dp_enable_scs_params,
12753 	.txrx_record_scs_params = dp_record_scs_params,
12754 #endif
12755 	.set_key = dp_set_michael_key,
12756 	.txrx_get_vdev_param = dp_get_vdev_param,
12757 	.calculate_delay_stats = dp_calculate_delay_stats,
12758 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
12759 	.txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag,
12760 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
12761 	.txrx_dump_pdev_rx_protocol_tag_stats =
12762 				dp_dump_pdev_rx_protocol_tag_stats,
12763 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
12764 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
12765 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
12766 	.txrx_set_rx_flow_tag = dp_set_rx_flow_tag,
12767 	.txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats,
12768 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
12769 #ifdef QCA_MULTIPASS_SUPPORT
12770 	.txrx_peer_set_vlan_id = dp_peer_set_vlan_id,
12771 #endif /*QCA_MULTIPASS_SUPPORT*/
12772 #if defined(WLAN_FEATURE_TSF_UPLINK_DELAY) || defined(CONFIG_SAWF)
12773 	.txrx_set_delta_tsf = dp_set_delta_tsf,
12774 #endif
12775 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
12776 	.txrx_set_tsf_ul_delay_report = dp_set_tsf_ul_delay_report,
12777 	.txrx_get_uplink_delay = dp_get_uplink_delay,
12778 #endif
12779 #ifdef QCA_UNDECODED_METADATA_SUPPORT
12780 	.txrx_set_pdev_phyrx_error_mask = dp_set_pdev_phyrx_error_mask,
12781 	.txrx_get_pdev_phyrx_error_mask = dp_get_pdev_phyrx_error_mask,
12782 #endif
12783 	.txrx_peer_flush_frags = dp_peer_flush_frags,
12784 };
12785 
12786 static struct cdp_me_ops dp_ops_me = {
12787 #ifndef QCA_HOST_MODE_WIFI_DISABLED
12788 #ifdef ATH_SUPPORT_IQUE
12789 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
12790 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
12791 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
12792 #endif
12793 #endif
12794 };
12795 
12796 static struct cdp_host_stats_ops dp_ops_host_stats = {
12797 	.txrx_per_peer_stats = dp_get_host_peer_stats,
12798 	.get_fw_peer_stats = dp_get_fw_peer_stats,
12799 	.get_htt_stats = dp_get_htt_stats,
12800 	.txrx_stats_publish = dp_txrx_stats_publish,
12801 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
12802 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
12803 	.txrx_get_soc_stats = dp_txrx_get_soc_stats,
12804 	.txrx_get_peer_stats_param = dp_txrx_get_peer_stats_param,
12805 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
12806 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
12807 	.txrx_get_ratekbps = dp_txrx_get_ratekbps,
12808 	.txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats,
12809 	.txrx_get_peer_delay_stats = dp_txrx_get_peer_delay_stats,
12810 	.txrx_get_peer_jitter_stats = dp_txrx_get_peer_jitter_stats,
12811 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
12812 	.txrx_alloc_vdev_stats_id = dp_txrx_alloc_vdev_stats_id,
12813 	.txrx_reset_vdev_stats_id = dp_txrx_reset_vdev_stats_id,
12814 #endif
12815 #ifdef WLAN_TX_PKT_CAPTURE_ENH
12816 	.get_peer_tx_capture_stats = dp_peer_get_tx_capture_stats,
12817 	.get_pdev_tx_capture_stats = dp_pdev_get_tx_capture_stats,
12818 #endif /* WLAN_TX_PKT_CAPTURE_ENH */
12819 #ifdef HW_TX_DELAY_STATS_ENABLE
12820 	.enable_disable_vdev_tx_delay_stats =
12821 				dp_enable_disable_vdev_tx_delay_stats,
12822 	.is_tx_delay_stats_enabled = dp_check_vdev_tx_delay_stats_enabled,
12823 #endif
12824 	.txrx_get_pdev_tid_stats = dp_pdev_get_tid_stats,
12825 #ifdef WLAN_TELEMETRY_STATS_SUPPORT
12826 	.txrx_pdev_telemetry_stats = dp_get_pdev_telemetry_stats,
12827 	.txrx_peer_telemetry_stats = dp_get_peer_telemetry_stats,
12828 #endif
12829 	/* TODO */
12830 };
12831 
12832 static struct cdp_raw_ops dp_ops_raw = {
12833 	/* TODO */
12834 };
12835 
12836 #ifdef PEER_FLOW_CONTROL
12837 static struct cdp_pflow_ops dp_ops_pflow = {
12838 	dp_tx_flow_ctrl_configure_pdev,
12839 };
12840 #endif /* CONFIG_WIN */
12841 
12842 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
12843 static struct cdp_cfr_ops dp_ops_cfr = {
12844 	.txrx_cfr_filter = NULL,
12845 	.txrx_get_cfr_rcc = dp_get_cfr_rcc,
12846 	.txrx_set_cfr_rcc = dp_set_cfr_rcc,
12847 	.txrx_get_cfr_dbg_stats = dp_get_cfr_dbg_stats,
12848 	.txrx_clear_cfr_dbg_stats = dp_clear_cfr_dbg_stats,
12849 };
12850 #endif
12851 
12852 #ifdef WLAN_SUPPORT_MSCS
12853 static struct cdp_mscs_ops dp_ops_mscs = {
12854 	.mscs_peer_lookup_n_get_priority = dp_mscs_peer_lookup_n_get_priority,
12855 };
12856 #endif
12857 
12858 #ifdef WLAN_SUPPORT_MESH_LATENCY
12859 static struct cdp_mesh_latency_ops dp_ops_mesh_latency = {
12860 	.mesh_latency_update_peer_parameter =
12861 		dp_mesh_latency_update_peer_parameter,
12862 };
12863 #endif
12864 
12865 #ifdef CONFIG_SAWF_DEF_QUEUES
12866 static struct cdp_sawf_ops dp_ops_sawf = {
12867 	.sawf_def_queues_map_req = dp_sawf_def_queues_map_req,
12868 	.sawf_def_queues_unmap_req = dp_sawf_def_queues_unmap_req,
12869 	.sawf_def_queues_get_map_report =
12870 		dp_sawf_def_queues_get_map_report,
12871 #ifdef CONFIG_SAWF
12872 	.txrx_get_peer_sawf_delay_stats = dp_sawf_get_peer_delay_stats,
12873 	.txrx_get_peer_sawf_tx_stats = dp_sawf_get_peer_tx_stats,
12874 	.sawf_mpdu_stats_req = dp_sawf_mpdu_stats_req,
12875 	.sawf_mpdu_details_stats_req = dp_sawf_mpdu_details_stats_req,
12876 	.txrx_sawf_set_mov_avg_params = dp_sawf_set_mov_avg_params,
12877 	.txrx_sawf_set_sla_params = dp_sawf_set_sla_params,
12878 	.txrx_sawf_init_telemtery_params = dp_sawf_init_telemetry_params,
12879 	.telemetry_get_throughput_stats = dp_sawf_get_tx_stats,
12880 	.telemetry_get_mpdu_stats = dp_sawf_get_mpdu_sched_stats,
12881 	.telemetry_get_drop_stats = dp_sawf_get_drop_stats,
12882 #endif
12883 };
12884 #endif
12885 
12886 #if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
12887 /**
12888  * dp_flush_ring_hptp() - Update ring shadow
12889  *			  register HP/TP address when runtime
12890  *                        resume
12891  * @opaque_soc: DP soc context
12892  *
12893  * Return: None
12894  */
12895 static
12896 void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
12897 {
12898 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
12899 						 HAL_SRNG_FLUSH_EVENT)) {
12900 		/* Acquire the lock */
12901 		hal_srng_access_start(soc->hal_soc, hal_srng);
12902 
12903 		hal_srng_access_end(soc->hal_soc, hal_srng);
12904 
12905 		hal_srng_set_flush_last_ts(hal_srng);
12906 
12907 		dp_debug("flushed");
12908 	}
12909 }
12910 #endif
12911 
12912 #ifdef DP_TX_TRACKING
12913 
12914 #define DP_TX_COMP_MAX_LATENCY_MS 30000
12915 /**
12916  * dp_tx_comp_delay_check() - calculate time latency for tx completion per pkt
12917  * @tx_desc: tx descriptor
12918  *
12919  * Calculate time latency for tx completion per pkt and trigger self recovery
12920  * when the delay is more than threshold value.
12921  *
12922  * Return: True if delay is more than threshold
12923  */
12924 static bool dp_tx_comp_delay_check(struct dp_tx_desc_s *tx_desc)
12925 {
12926 	uint64_t time_latency, timestamp_tick = tx_desc->timestamp_tick;
12927 	qdf_ktime_t current_time = qdf_ktime_real_get();
12928 	qdf_ktime_t timestamp = tx_desc->timestamp;
12929 
12930 	if (!timestamp)
12931 		return false;
12932 
12933 	if (dp_tx_pkt_tracepoints_enabled()) {
12934 		time_latency = qdf_ktime_to_ms(current_time) -
12935 				qdf_ktime_to_ms(timestamp);
12936 		if (time_latency >= DP_TX_COMP_MAX_LATENCY_MS) {
12937 			dp_err_rl("enqueued: %llu ms, current : %llu ms",
12938 				  timestamp, current_time);
12939 			return true;
12940 		}
12941 	} else {
12942 		current_time = qdf_system_ticks();
12943 		time_latency = qdf_system_ticks_to_msecs(current_time -
12944 							 timestamp_tick);
12945 		if (time_latency >= DP_TX_COMP_MAX_LATENCY_MS) {
12946 			dp_err_rl("enqueued: %u ms, current : %u ms",
12947 				  qdf_system_ticks_to_msecs(timestamp),
12948 				  qdf_system_ticks_to_msecs(current_time));
12949 			return true;
12950 		}
12951 	}
12952 
12953 	return false;
12954 }
12955 
12956 /**
12957  * dp_find_missing_tx_comp() - check for leaked descriptor in tx path
12958  * @soc - DP SOC context
12959  *
12960  * Parse through descriptors in all pools and validate magic number and
12961  * completion time. Trigger self recovery if magic value is corrupted.
12962  *
12963  * Return: None.
12964  */
12965 static void dp_find_missing_tx_comp(struct dp_soc *soc)
12966 {
12967 	uint8_t i;
12968 	uint32_t j;
12969 	uint32_t num_desc, page_id, offset;
12970 	uint16_t num_desc_per_page;
12971 	struct dp_tx_desc_s *tx_desc = NULL;
12972 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
12973 	bool send_fw_stats_cmd = false;
12974 	uint8_t vdev_id;
12975 
12976 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
12977 		tx_desc_pool = &soc->tx_desc[i];
12978 		if (!(tx_desc_pool->pool_size) ||
12979 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
12980 		    !(tx_desc_pool->desc_pages.cacheable_pages))
12981 			continue;
12982 
12983 		num_desc = tx_desc_pool->pool_size;
12984 		num_desc_per_page =
12985 			tx_desc_pool->desc_pages.num_element_per_page;
12986 		for (j = 0; j < num_desc; j++) {
12987 			page_id = j / num_desc_per_page;
12988 			offset = j % num_desc_per_page;
12989 
12990 			if (qdf_unlikely(!(tx_desc_pool->
12991 					 desc_pages.cacheable_pages)))
12992 				break;
12993 
12994 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
12995 			if (tx_desc->magic == DP_TX_MAGIC_PATTERN_FREE) {
12996 				continue;
12997 			} else if (tx_desc->magic ==
12998 				   DP_TX_MAGIC_PATTERN_INUSE) {
12999 				if (dp_tx_comp_delay_check(tx_desc)) {
13000 					dp_err_rl("Tx completion not rcvd for id: %u",
13001 						  tx_desc->id);
13002 
13003 					if (!send_fw_stats_cmd) {
13004 						send_fw_stats_cmd = true;
13005 						vdev_id = i;
13006 					}
13007 				}
13008 			} else {
13009 				dp_err_rl("tx desc %u corrupted, flags: 0x%x",
13010 				       tx_desc->id, tx_desc->flags);
13011 			}
13012 		}
13013 	}
13014 
13015 	/*
13016 	 * The unit test command to dump FW stats is required only once as the
13017 	 * stats are dumped at pdev level and not vdev level.
13018 	 */
13019 	if (send_fw_stats_cmd && soc->cdp_soc.ol_ops->dp_send_unit_test_cmd) {
13020 		uint32_t fw_stats_args[2] = {533, 1};
13021 
13022 		soc->cdp_soc.ol_ops->dp_send_unit_test_cmd(vdev_id,
13023 							   WLAN_MODULE_TX, 2,
13024 							   fw_stats_args);
13025 	}
13026 }
13027 #else
13028 static inline void dp_find_missing_tx_comp(struct dp_soc *soc)
13029 {
13030 }
13031 #endif
13032 
13033 #ifdef FEATURE_RUNTIME_PM
13034 /**
13035  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
13036  * @soc_hdl: Datapath soc handle
13037  * @pdev_id: id of data path pdev handle
13038  *
13039  * DP is ready to runtime suspend if there are no pending TX packets.
13040  *
13041  * Return: QDF_STATUS
13042  */
13043 static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
13044 {
13045 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13046 	struct dp_pdev *pdev;
13047 	uint8_t i;
13048 	int32_t tx_pending;
13049 
13050 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13051 	if (!pdev) {
13052 		dp_err("pdev is NULL");
13053 		return QDF_STATUS_E_INVAL;
13054 	}
13055 
13056 	/* Abort if there are any pending TX packets */
13057 	tx_pending = dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev));
13058 	if (tx_pending) {
13059 		dp_info_rl("%pK: Abort suspend due to pending TX packets %d",
13060 			   soc, tx_pending);
13061 		dp_find_missing_tx_comp(soc);
13062 		/* perform a force flush if tx is pending */
13063 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
13064 			hal_srng_set_event(soc->tcl_data_ring[i].hal_srng,
13065 					   HAL_SRNG_FLUSH_EVENT);
13066 			dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
13067 		}
13068 		qdf_atomic_set(&soc->tx_pending_rtpm, 0);
13069 
13070 		return QDF_STATUS_E_AGAIN;
13071 	}
13072 
13073 	if (dp_runtime_get_refcount(soc)) {
13074 		dp_init_info("refcount: %d", dp_runtime_get_refcount(soc));
13075 
13076 		return QDF_STATUS_E_AGAIN;
13077 	}
13078 
13079 	if (soc->intr_mode == DP_INTR_POLL)
13080 		qdf_timer_stop(&soc->int_timer);
13081 
13082 	dp_rx_fst_update_pm_suspend_status(soc, true);
13083 
13084 	return QDF_STATUS_SUCCESS;
13085 }
13086 
13087 #define DP_FLUSH_WAIT_CNT 10
13088 #define DP_RUNTIME_SUSPEND_WAIT_MS 10
13089 /**
13090  * dp_runtime_resume() - ensure DP is ready to runtime resume
13091  * @soc_hdl: Datapath soc handle
13092  * @pdev_id: id of data path pdev handle
13093  *
13094  * Resume DP for runtime PM.
13095  *
13096  * Return: QDF_STATUS
13097  */
13098 static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
13099 {
13100 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13101 	int i, suspend_wait = 0;
13102 
13103 	if (soc->intr_mode == DP_INTR_POLL)
13104 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
13105 
13106 	/*
13107 	 * Wait until dp runtime refcount becomes zero or time out, then flush
13108 	 * pending tx for runtime suspend.
13109 	 */
13110 	while (dp_runtime_get_refcount(soc) &&
13111 	       suspend_wait < DP_FLUSH_WAIT_CNT) {
13112 		qdf_sleep(DP_RUNTIME_SUSPEND_WAIT_MS);
13113 		suspend_wait++;
13114 	}
13115 
13116 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
13117 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
13118 	}
13119 	qdf_atomic_set(&soc->tx_pending_rtpm, 0);
13120 
13121 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
13122 	dp_rx_fst_update_pm_suspend_status(soc, false);
13123 
13124 	return QDF_STATUS_SUCCESS;
13125 }
13126 #endif /* FEATURE_RUNTIME_PM */
13127 
13128 /**
13129  * dp_tx_get_success_ack_stats() - get tx success completion count
13130  * @soc_hdl: Datapath soc handle
13131  * @vdevid: vdev identifier
13132  *
13133  * Return: tx success ack count
13134  */
13135 static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl,
13136 					    uint8_t vdev_id)
13137 {
13138 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13139 	struct cdp_vdev_stats *vdev_stats = NULL;
13140 	uint32_t tx_success;
13141 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
13142 						     DP_MOD_ID_CDP);
13143 
13144 	if (!vdev) {
13145 		dp_cdp_err("%pK: Invalid vdev id %d", soc, vdev_id);
13146 		return 0;
13147 	}
13148 
13149 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
13150 	if (!vdev_stats) {
13151 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats", soc);
13152 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13153 		return 0;
13154 	}
13155 
13156 	dp_aggregate_vdev_stats(vdev, vdev_stats);
13157 
13158 	tx_success = vdev_stats->tx.tx_success.num;
13159 	qdf_mem_free(vdev_stats);
13160 
13161 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13162 	return tx_success;
13163 }
13164 
13165 #ifdef WLAN_SUPPORT_DATA_STALL
13166 /**
13167  * dp_register_data_stall_detect_cb() - register data stall callback
13168  * @soc_hdl: Datapath soc handle
13169  * @pdev_id: id of data path pdev handle
13170  * @data_stall_detect_callback: data stall callback function
13171  *
13172  * Return: QDF_STATUS Enumeration
13173  */
13174 static
13175 QDF_STATUS dp_register_data_stall_detect_cb(
13176 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
13177 			data_stall_detect_cb data_stall_detect_callback)
13178 {
13179 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13180 	struct dp_pdev *pdev;
13181 
13182 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13183 	if (!pdev) {
13184 		dp_err("pdev NULL!");
13185 		return QDF_STATUS_E_INVAL;
13186 	}
13187 
13188 	pdev->data_stall_detect_callback = data_stall_detect_callback;
13189 	return QDF_STATUS_SUCCESS;
13190 }
13191 
13192 /**
13193  * dp_deregister_data_stall_detect_cb() - de-register data stall callback
13194  * @soc_hdl: Datapath soc handle
13195  * @pdev_id: id of data path pdev handle
13196  * @data_stall_detect_callback: data stall callback function
13197  *
13198  * Return: QDF_STATUS Enumeration
13199  */
13200 static
13201 QDF_STATUS dp_deregister_data_stall_detect_cb(
13202 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
13203 			data_stall_detect_cb data_stall_detect_callback)
13204 {
13205 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13206 	struct dp_pdev *pdev;
13207 
13208 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13209 	if (!pdev) {
13210 		dp_err("pdev NULL!");
13211 		return QDF_STATUS_E_INVAL;
13212 	}
13213 
13214 	pdev->data_stall_detect_callback = NULL;
13215 	return QDF_STATUS_SUCCESS;
13216 }
13217 
13218 /**
13219  * dp_txrx_post_data_stall_event() - post data stall event
13220  * @soc_hdl: Datapath soc handle
13221  * @indicator: Module triggering data stall
13222  * @data_stall_type: data stall event type
13223  * @pdev_id: pdev id
13224  * @vdev_id_bitmap: vdev id bitmap
13225  * @recovery_type: data stall recovery type
13226  *
13227  * Return: None
13228  */
13229 static void
13230 dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl,
13231 			      enum data_stall_log_event_indicator indicator,
13232 			      enum data_stall_log_event_type data_stall_type,
13233 			      uint32_t pdev_id, uint32_t vdev_id_bitmap,
13234 			      enum data_stall_log_recovery_type recovery_type)
13235 {
13236 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13237 	struct data_stall_event_info data_stall_info;
13238 	struct dp_pdev *pdev;
13239 
13240 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13241 	if (!pdev) {
13242 		dp_err("pdev NULL!");
13243 		return;
13244 	}
13245 
13246 	if (!pdev->data_stall_detect_callback) {
13247 		dp_err("data stall cb not registered!");
13248 		return;
13249 	}
13250 
13251 	dp_info("data_stall_type: %x pdev_id: %d",
13252 		data_stall_type, pdev_id);
13253 
13254 	data_stall_info.indicator = indicator;
13255 	data_stall_info.data_stall_type = data_stall_type;
13256 	data_stall_info.vdev_id_bitmap = vdev_id_bitmap;
13257 	data_stall_info.pdev_id = pdev_id;
13258 	data_stall_info.recovery_type = recovery_type;
13259 
13260 	pdev->data_stall_detect_callback(&data_stall_info);
13261 }
13262 #endif /* WLAN_SUPPORT_DATA_STALL */
13263 
13264 #ifdef WLAN_FEATURE_STATS_EXT
13265 /* rx hw stats event wait timeout in ms */
13266 #define DP_REO_STATUS_STATS_TIMEOUT 1500
13267 /**
13268  * dp_txrx_ext_stats_request - request dp txrx extended stats request
13269  * @soc_hdl: soc handle
13270  * @pdev_id: pdev id
13271  * @req: stats request
13272  *
13273  * Return: QDF_STATUS
13274  */
13275 static QDF_STATUS
13276 dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
13277 			  struct cdp_txrx_ext_stats *req)
13278 {
13279 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13280 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13281 	int i = 0;
13282 	int tcl_ring_full = 0;
13283 
13284 	if (!pdev) {
13285 		dp_err("pdev is null");
13286 		return QDF_STATUS_E_INVAL;
13287 	}
13288 
13289 	dp_aggregate_pdev_stats(pdev);
13290 
13291 	for(i = 0 ; i < MAX_TCL_DATA_RINGS; i++)
13292 		tcl_ring_full += soc->stats.tx.tcl_ring_full[i];
13293 
13294 	req->tx_msdu_enqueue = pdev->stats.tx_i.processed.num;
13295 	req->tx_msdu_overflow = tcl_ring_full;
13296 	req->rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
13297 	req->rx_mpdu_delivered = soc->ext_stats.rx_mpdu_received;
13298 	req->rx_mpdu_missed = pdev->stats.err.reo_error;
13299 	/* only count error source from RXDMA */
13300 	req->rx_mpdu_error = pdev->stats.err.rxdma_error;
13301 
13302 	dp_info("ext stats: tx_msdu_enq = %u, tx_msdu_overflow = %u, "
13303 		"tx_mpdu_recieve = %u, rx_mpdu_delivered = %u, "
13304 		"rx_mpdu_missed = %u, rx_mpdu_error = %u",
13305 		req->tx_msdu_enqueue,
13306 		req->tx_msdu_overflow,
13307 		req->rx_mpdu_received,
13308 		req->rx_mpdu_delivered,
13309 		req->rx_mpdu_missed,
13310 		req->rx_mpdu_error);
13311 
13312 	return QDF_STATUS_SUCCESS;
13313 }
13314 
13315 /**
13316  * dp_rx_hw_stats_cb - request rx hw stats response callback
13317  * @soc: soc handle
13318  * @cb_ctxt: callback context
13319  * @reo_status: reo command response status
13320  *
13321  * Return: None
13322  */
13323 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
13324 			      union hal_reo_status *reo_status)
13325 {
13326 	struct dp_req_rx_hw_stats_t *rx_hw_stats = cb_ctxt;
13327 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
13328 	bool is_query_timeout;
13329 
13330 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
13331 	is_query_timeout = rx_hw_stats->is_query_timeout;
13332 	/* free the cb_ctxt if all pending tid stats query is received */
13333 	if (qdf_atomic_dec_and_test(&rx_hw_stats->pending_tid_stats_cnt)) {
13334 		if (!is_query_timeout) {
13335 			qdf_event_set(&soc->rx_hw_stats_event);
13336 			soc->is_last_stats_ctx_init = false;
13337 		}
13338 
13339 		qdf_mem_free(rx_hw_stats);
13340 	}
13341 
13342 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
13343 		dp_info("REO stats failure %d",
13344 			queue_status->header.status);
13345 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
13346 		return;
13347 	}
13348 
13349 	if (!is_query_timeout) {
13350 		soc->ext_stats.rx_mpdu_received +=
13351 					queue_status->mpdu_frms_cnt;
13352 		soc->ext_stats.rx_mpdu_missed +=
13353 					queue_status->hole_cnt;
13354 	}
13355 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
13356 }
13357 
13358 /**
13359  * dp_request_rx_hw_stats - request rx hardware stats
13360  * @soc_hdl: soc handle
13361  * @vdev_id: vdev id
13362  *
13363  * Return: None
13364  */
13365 static QDF_STATUS
13366 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
13367 {
13368 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13369 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
13370 						     DP_MOD_ID_CDP);
13371 	struct dp_peer *peer = NULL;
13372 	QDF_STATUS status;
13373 	struct dp_req_rx_hw_stats_t *rx_hw_stats;
13374 	int rx_stats_sent_cnt = 0;
13375 	uint32_t last_rx_mpdu_received;
13376 	uint32_t last_rx_mpdu_missed;
13377 
13378 	if (!vdev) {
13379 		dp_err("vdev is null for vdev_id: %u", vdev_id);
13380 		status = QDF_STATUS_E_INVAL;
13381 		goto out;
13382 	}
13383 
13384 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_CDP);
13385 
13386 	if (!peer) {
13387 		dp_err("Peer is NULL");
13388 		status = QDF_STATUS_E_INVAL;
13389 		goto out;
13390 	}
13391 
13392 	rx_hw_stats = qdf_mem_malloc(sizeof(*rx_hw_stats));
13393 
13394 	if (!rx_hw_stats) {
13395 		dp_err("malloc failed for hw stats structure");
13396 		status = QDF_STATUS_E_INVAL;
13397 		goto out;
13398 	}
13399 
13400 	qdf_event_reset(&soc->rx_hw_stats_event);
13401 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
13402 	/* save the last soc cumulative stats and reset it to 0 */
13403 	last_rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
13404 	last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
13405 	soc->ext_stats.rx_mpdu_received = 0;
13406 
13407 	rx_stats_sent_cnt =
13408 		dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
13409 	if (!rx_stats_sent_cnt) {
13410 		dp_err("no tid stats sent successfully");
13411 		qdf_mem_free(rx_hw_stats);
13412 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
13413 		status = QDF_STATUS_E_INVAL;
13414 		goto out;
13415 	}
13416 	qdf_atomic_set(&rx_hw_stats->pending_tid_stats_cnt,
13417 		       rx_stats_sent_cnt);
13418 	rx_hw_stats->is_query_timeout = false;
13419 	soc->is_last_stats_ctx_init = true;
13420 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
13421 
13422 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
13423 				       DP_REO_STATUS_STATS_TIMEOUT);
13424 
13425 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
13426 	if (status != QDF_STATUS_SUCCESS) {
13427 		dp_info("rx hw stats event timeout");
13428 		if (soc->is_last_stats_ctx_init)
13429 			rx_hw_stats->is_query_timeout = true;
13430 		/**
13431 		 * If query timeout happened, use the last saved stats
13432 		 * for this time query.
13433 		 */
13434 		soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
13435 		soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
13436 	}
13437 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
13438 
13439 out:
13440 	if (peer)
13441 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
13442 	if (vdev)
13443 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
13444 
13445 	return status;
13446 }
13447 
13448 /**
13449  * dp_reset_rx_hw_ext_stats - Reset rx hardware ext stats
13450  * @soc_hdl: soc handle
13451  *
13452  * Return: None
13453  */
13454 static
13455 void dp_reset_rx_hw_ext_stats(struct cdp_soc_t *soc_hdl)
13456 {
13457 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13458 
13459 	soc->ext_stats.rx_mpdu_received = 0;
13460 	soc->ext_stats.rx_mpdu_missed = 0;
13461 }
13462 #endif /* WLAN_FEATURE_STATS_EXT */
13463 
13464 static
13465 uint32_t dp_get_tx_rings_grp_bitmap(struct cdp_soc_t *soc_hdl)
13466 {
13467 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
13468 
13469 	return soc->wlan_cfg_ctx->tx_rings_grp_bitmap;
13470 }
13471 
13472 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
13473 /**
13474  * dp_mark_first_wakeup_packet() - set flag to indicate that
13475  *    fw is compatible for marking first packet after wow wakeup
13476  * @soc_hdl: Datapath soc handle
13477  * @pdev_id: id of data path pdev handle
13478  * @value: 1 for enabled/ 0 for disabled
13479  *
13480  * Return: None
13481  */
13482 static void dp_mark_first_wakeup_packet(struct cdp_soc_t *soc_hdl,
13483 					uint8_t pdev_id, uint8_t value)
13484 {
13485 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13486 	struct dp_pdev *pdev;
13487 
13488 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13489 	if (!pdev) {
13490 		dp_err("pdev is NULL");
13491 		return;
13492 	}
13493 
13494 	pdev->is_first_wakeup_packet = value;
13495 }
13496 #endif
13497 
13498 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
13499 /**
13500  * dp_set_peer_txq_flush_config() - Set the peer txq flush configuration
13501  * @soc_hdl: Opaque handle to the DP soc object
13502  * @vdev_id: VDEV identifier
13503  * @mac: MAC address of the peer
13504  * @ac: access category mask
13505  * @tid: TID mask
13506  * @policy: Flush policy
13507  *
13508  * Return: 0 on success, errno on failure
13509  */
13510 static int dp_set_peer_txq_flush_config(struct cdp_soc_t *soc_hdl,
13511 					uint8_t vdev_id, uint8_t *mac,
13512 					uint8_t ac, uint32_t tid,
13513 					enum cdp_peer_txq_flush_policy policy)
13514 {
13515 	struct dp_soc *soc;
13516 
13517 	if (!soc_hdl) {
13518 		dp_err("soc is null");
13519 		return -EINVAL;
13520 	}
13521 	soc = cdp_soc_t_to_dp_soc(soc_hdl);
13522 	return target_if_peer_txq_flush_config(soc->ctrl_psoc, vdev_id,
13523 					       mac, ac, tid, policy);
13524 }
13525 #endif
13526 
13527 #ifdef CONNECTIVITY_PKTLOG
13528 /**
13529  * dp_register_packetdump_callback() - registers
13530  *  tx data packet, tx mgmt. packet and rx data packet
13531  *  dump callback handler.
13532  *
13533  * @soc_hdl: Datapath soc handle
13534  * @pdev_id: id of data path pdev handle
13535  * @dp_tx_packetdump_cb: tx packetdump cb
13536  * @dp_rx_packetdump_cb: rx packetdump cb
13537  *
13538  * This function is used to register tx data pkt, tx mgmt.
13539  * pkt and rx data pkt dump callback
13540  *
13541  * Return: None
13542  *
13543  */
13544 static inline
13545 void dp_register_packetdump_callback(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
13546 				     ol_txrx_pktdump_cb dp_tx_packetdump_cb,
13547 				     ol_txrx_pktdump_cb dp_rx_packetdump_cb)
13548 {
13549 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13550 	struct dp_pdev *pdev;
13551 
13552 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13553 	if (!pdev) {
13554 		dp_err("pdev is NULL!");
13555 		return;
13556 	}
13557 
13558 	pdev->dp_tx_packetdump_cb = dp_tx_packetdump_cb;
13559 	pdev->dp_rx_packetdump_cb = dp_rx_packetdump_cb;
13560 }
13561 
13562 /**
13563  * dp_deregister_packetdump_callback() - deregidters
13564  *  tx data packet, tx mgmt. packet and rx data packet
13565  *  dump callback handler
13566  * @soc_hdl: Datapath soc handle
13567  * @pdev_id: id of data path pdev handle
13568  *
13569  * This function is used to deregidter tx data pkt.,
13570  * tx mgmt. pkt and rx data pkt. dump callback
13571  *
13572  * Return: None
13573  *
13574  */
13575 static inline
13576 void dp_deregister_packetdump_callback(struct cdp_soc_t *soc_hdl,
13577 				       uint8_t pdev_id)
13578 {
13579 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13580 	struct dp_pdev *pdev;
13581 
13582 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13583 	if (!pdev) {
13584 		dp_err("pdev is NULL!");
13585 		return;
13586 	}
13587 
13588 	pdev->dp_tx_packetdump_cb = NULL;
13589 	pdev->dp_rx_packetdump_cb = NULL;
13590 }
13591 #endif
13592 
13593 #ifdef DP_PEER_EXTENDED_API
13594 static struct cdp_misc_ops dp_ops_misc = {
13595 #ifdef FEATURE_WLAN_TDLS
13596 	.tx_non_std = dp_tx_non_std,
13597 #endif /* FEATURE_WLAN_TDLS */
13598 	.get_opmode = dp_get_opmode,
13599 #ifdef FEATURE_RUNTIME_PM
13600 	.runtime_suspend = dp_runtime_suspend,
13601 	.runtime_resume = dp_runtime_resume,
13602 #endif /* FEATURE_RUNTIME_PM */
13603 	.get_num_rx_contexts = dp_get_num_rx_contexts,
13604 	.get_tx_ack_stats = dp_tx_get_success_ack_stats,
13605 #ifdef WLAN_SUPPORT_DATA_STALL
13606 	.txrx_data_stall_cb_register = dp_register_data_stall_detect_cb,
13607 	.txrx_data_stall_cb_deregister = dp_deregister_data_stall_detect_cb,
13608 	.txrx_post_data_stall_event = dp_txrx_post_data_stall_event,
13609 #endif
13610 
13611 #ifdef WLAN_FEATURE_STATS_EXT
13612 	.txrx_ext_stats_request = dp_txrx_ext_stats_request,
13613 	.request_rx_hw_stats = dp_request_rx_hw_stats,
13614 	.reset_rx_hw_ext_stats = dp_reset_rx_hw_ext_stats,
13615 #endif /* WLAN_FEATURE_STATS_EXT */
13616 	.vdev_inform_ll_conn = dp_vdev_inform_ll_conn,
13617 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
13618 	.set_swlm_enable = dp_soc_set_swlm_enable,
13619 	.is_swlm_enabled = dp_soc_is_swlm_enabled,
13620 #endif
13621 	.display_txrx_hw_info = dp_display_srng_info,
13622 	.get_tx_rings_grp_bitmap = dp_get_tx_rings_grp_bitmap,
13623 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
13624 	.mark_first_wakeup_packet = dp_mark_first_wakeup_packet,
13625 #endif
13626 #ifdef WLAN_FEATURE_PEER_TXQ_FLUSH_CONF
13627 	.set_peer_txq_flush_config = dp_set_peer_txq_flush_config,
13628 #endif
13629 #ifdef CONNECTIVITY_PKTLOG
13630 	.register_pktdump_cb = dp_register_packetdump_callback,
13631 	.unregister_pktdump_cb = dp_deregister_packetdump_callback,
13632 #endif
13633 };
13634 #endif
13635 
13636 #ifdef DP_FLOW_CTL
13637 static struct cdp_flowctl_ops dp_ops_flowctl = {
13638 	/* WIFI 3.0 DP implement as required. */
13639 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
13640 	.flow_pool_map_handler = dp_tx_flow_pool_map,
13641 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
13642 	.register_pause_cb = dp_txrx_register_pause_cb,
13643 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
13644 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
13645 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
13646 };
13647 
13648 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
13649 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
13650 };
13651 #endif
13652 
13653 #ifdef IPA_OFFLOAD
13654 static struct cdp_ipa_ops dp_ops_ipa = {
13655 	.ipa_get_resource = dp_ipa_get_resource,
13656 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
13657 	.ipa_iounmap_doorbell_vaddr = dp_ipa_iounmap_doorbell_vaddr,
13658 	.ipa_op_response = dp_ipa_op_response,
13659 	.ipa_register_op_cb = dp_ipa_register_op_cb,
13660 	.ipa_deregister_op_cb = dp_ipa_deregister_op_cb,
13661 	.ipa_get_stat = dp_ipa_get_stat,
13662 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
13663 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
13664 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
13665 	.ipa_setup = dp_ipa_setup,
13666 	.ipa_cleanup = dp_ipa_cleanup,
13667 	.ipa_setup_iface = dp_ipa_setup_iface,
13668 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
13669 	.ipa_enable_pipes = dp_ipa_enable_pipes,
13670 	.ipa_disable_pipes = dp_ipa_disable_pipes,
13671 	.ipa_set_perf_level = dp_ipa_set_perf_level,
13672 	.ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd,
13673 	.ipa_tx_buf_smmu_mapping = dp_ipa_tx_buf_smmu_mapping,
13674 	.ipa_tx_buf_smmu_unmapping = dp_ipa_tx_buf_smmu_unmapping,
13675 #ifdef IPA_WDS_EASYMESH_FEATURE
13676 	.ipa_ast_create = dp_ipa_ast_create,
13677 #endif
13678 };
13679 #endif
13680 
13681 #ifdef DP_POWER_SAVE
13682 static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
13683 {
13684 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13685 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13686 	int timeout = SUSPEND_DRAIN_WAIT;
13687 	int drain_wait_delay = 50; /* 50 ms */
13688 	int32_t tx_pending;
13689 
13690 	if (qdf_unlikely(!pdev)) {
13691 		dp_err("pdev is NULL");
13692 		return QDF_STATUS_E_INVAL;
13693 	}
13694 
13695 	/* Abort if there are any pending TX packets */
13696 	while ((tx_pending = dp_get_tx_pending((struct cdp_pdev *)pdev))) {
13697 		qdf_sleep(drain_wait_delay);
13698 		if (timeout <= 0) {
13699 			dp_info("TX frames are pending %d, abort suspend",
13700 				tx_pending);
13701 			dp_find_missing_tx_comp(soc);
13702 			return QDF_STATUS_E_TIMEOUT;
13703 		}
13704 		timeout = timeout - drain_wait_delay;
13705 	}
13706 
13707 	if (soc->intr_mode == DP_INTR_POLL)
13708 		qdf_timer_stop(&soc->int_timer);
13709 
13710 	/* Stop monitor reap timer and reap any pending frames in ring */
13711 	dp_monitor_reap_timer_suspend(soc);
13712 
13713 	dp_suspend_fse_cache_flush(soc);
13714 
13715 	return QDF_STATUS_SUCCESS;
13716 }
13717 
13718 static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
13719 {
13720 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13721 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13722 	uint8_t i;
13723 
13724 	if (qdf_unlikely(!pdev)) {
13725 		dp_err("pdev is NULL");
13726 		return QDF_STATUS_E_INVAL;
13727 	}
13728 
13729 	if (soc->intr_mode == DP_INTR_POLL)
13730 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
13731 
13732 	/* Start monitor reap timer */
13733 	dp_monitor_reap_timer_start(soc, CDP_MON_REAP_SOURCE_ANY);
13734 
13735 	dp_resume_fse_cache_flush(soc);
13736 
13737 	for (i = 0; i < soc->num_tcl_data_rings; i++)
13738 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
13739 
13740 	return QDF_STATUS_SUCCESS;
13741 }
13742 
13743 /**
13744  * dp_process_wow_ack_rsp() - process wow ack response
13745  * @soc_hdl: datapath soc handle
13746  * @pdev_id: data path pdev handle id
13747  *
13748  * Return: none
13749  */
13750 static void dp_process_wow_ack_rsp(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
13751 {
13752 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13753 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13754 
13755 	if (qdf_unlikely(!pdev)) {
13756 		dp_err("pdev is NULL");
13757 		return;
13758 	}
13759 
13760 	/*
13761 	 * As part of wow enable FW disables the mon status ring and in wow ack
13762 	 * response from FW reap mon status ring to make sure no packets pending
13763 	 * in the ring.
13764 	 */
13765 	dp_monitor_reap_timer_suspend(soc);
13766 }
13767 
13768 /**
13769  * dp_process_target_suspend_req() - process target suspend request
13770  * @soc_hdl: datapath soc handle
13771  * @pdev_id: data path pdev handle id
13772  *
13773  * Return: none
13774  */
13775 static void dp_process_target_suspend_req(struct cdp_soc_t *soc_hdl,
13776 					  uint8_t pdev_id)
13777 {
13778 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13779 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13780 
13781 	if (qdf_unlikely(!pdev)) {
13782 		dp_err("pdev is NULL");
13783 		return;
13784 	}
13785 
13786 	/* Stop monitor reap timer and reap any pending frames in ring */
13787 	dp_monitor_reap_timer_suspend(soc);
13788 }
13789 
13790 static struct cdp_bus_ops dp_ops_bus = {
13791 	.bus_suspend = dp_bus_suspend,
13792 	.bus_resume = dp_bus_resume,
13793 	.process_wow_ack_rsp = dp_process_wow_ack_rsp,
13794 	.process_target_suspend_req = dp_process_target_suspend_req
13795 };
13796 #endif
13797 
13798 #ifdef DP_FLOW_CTL
13799 static struct cdp_throttle_ops dp_ops_throttle = {
13800 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
13801 };
13802 
13803 static struct cdp_cfg_ops dp_ops_cfg = {
13804 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
13805 };
13806 #endif
13807 
13808 #ifdef DP_PEER_EXTENDED_API
13809 static struct cdp_ocb_ops dp_ops_ocb = {
13810 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
13811 };
13812 
13813 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
13814 	.clear_stats = dp_txrx_clear_dump_stats,
13815 };
13816 
13817 static struct cdp_peer_ops dp_ops_peer = {
13818 	.register_peer = dp_register_peer,
13819 	.clear_peer = dp_clear_peer,
13820 	.find_peer_exist = dp_find_peer_exist,
13821 	.find_peer_exist_on_vdev = dp_find_peer_exist_on_vdev,
13822 	.find_peer_exist_on_other_vdev = dp_find_peer_exist_on_other_vdev,
13823 	.peer_state_update = dp_peer_state_update,
13824 	.get_vdevid = dp_get_vdevid,
13825 	.get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr,
13826 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
13827 	.get_peer_state = dp_get_peer_state,
13828 	.peer_flush_frags = dp_peer_flush_frags,
13829 	.set_peer_as_tdls_peer = dp_set_peer_as_tdls_peer,
13830 };
13831 #endif
13832 
13833 static void dp_soc_txrx_ops_attach(struct dp_soc *soc)
13834 {
13835 	soc->cdp_soc.ops->cmn_drv_ops = &dp_ops_cmn;
13836 	soc->cdp_soc.ops->ctrl_ops = &dp_ops_ctrl;
13837 	soc->cdp_soc.ops->me_ops = &dp_ops_me;
13838 	soc->cdp_soc.ops->host_stats_ops = &dp_ops_host_stats;
13839 	soc->cdp_soc.ops->wds_ops = &dp_ops_wds;
13840 	soc->cdp_soc.ops->raw_ops = &dp_ops_raw;
13841 #ifdef PEER_FLOW_CONTROL
13842 	soc->cdp_soc.ops->pflow_ops = &dp_ops_pflow;
13843 #endif /* PEER_FLOW_CONTROL */
13844 #ifdef DP_PEER_EXTENDED_API
13845 	soc->cdp_soc.ops->misc_ops = &dp_ops_misc;
13846 	soc->cdp_soc.ops->ocb_ops = &dp_ops_ocb;
13847 	soc->cdp_soc.ops->peer_ops = &dp_ops_peer;
13848 	soc->cdp_soc.ops->mob_stats_ops = &dp_ops_mob_stats;
13849 #endif
13850 #ifdef DP_FLOW_CTL
13851 	soc->cdp_soc.ops->cfg_ops = &dp_ops_cfg;
13852 	soc->cdp_soc.ops->flowctl_ops = &dp_ops_flowctl;
13853 	soc->cdp_soc.ops->l_flowctl_ops = &dp_ops_l_flowctl;
13854 	soc->cdp_soc.ops->throttle_ops = &dp_ops_throttle;
13855 #endif
13856 #ifdef IPA_OFFLOAD
13857 	soc->cdp_soc.ops->ipa_ops = &dp_ops_ipa;
13858 #endif
13859 #ifdef DP_POWER_SAVE
13860 	soc->cdp_soc.ops->bus_ops = &dp_ops_bus;
13861 #endif
13862 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
13863 	soc->cdp_soc.ops->cfr_ops = &dp_ops_cfr;
13864 #endif
13865 #ifdef WLAN_SUPPORT_MSCS
13866 	soc->cdp_soc.ops->mscs_ops = &dp_ops_mscs;
13867 #endif
13868 #ifdef WLAN_SUPPORT_MESH_LATENCY
13869 	soc->cdp_soc.ops->mesh_latency_ops = &dp_ops_mesh_latency;
13870 #endif
13871 #ifdef CONFIG_SAWF_DEF_QUEUES
13872 	soc->cdp_soc.ops->sawf_ops = &dp_ops_sawf;
13873 #endif
13874 };
13875 
13876 /*
13877  * dp_soc_set_txrx_ring_map()
13878  * @dp_soc: DP handler for soc
13879  *
13880  * Return: Void
13881  */
13882 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
13883 {
13884 	uint32_t i;
13885 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
13886 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
13887 	}
13888 }
13889 
13890 qdf_export_symbol(dp_soc_set_txrx_ring_map);
13891 
13892 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
13893 	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574)
13894 /**
13895  * dp_soc_attach_wifi3() - Attach txrx SOC
13896  * @ctrl_psoc: Opaque SOC handle from control plane
13897  * @params: SOC attach params
13898  *
13899  * Return: DP SOC handle on success, NULL on failure
13900  */
13901 struct cdp_soc_t *
13902 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
13903 		    struct cdp_soc_attach_params *params)
13904 {
13905 	struct dp_soc *dp_soc = NULL;
13906 
13907 	dp_soc = dp_soc_attach(ctrl_psoc, params);
13908 
13909 	return dp_soc_to_cdp_soc_t(dp_soc);
13910 }
13911 
13912 static inline void dp_soc_set_def_pdev(struct dp_soc *soc)
13913 {
13914 	int lmac_id;
13915 
13916 	for (lmac_id = 0; lmac_id < MAX_NUM_LMAC_HW; lmac_id++) {
13917 		/*Set default host PDEV ID for lmac_id*/
13918 		wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
13919 				      INVALID_PDEV_ID, lmac_id);
13920 	}
13921 }
13922 
13923 static uint32_t
13924 dp_get_link_desc_id_start(uint16_t arch_id)
13925 {
13926 	switch (arch_id) {
13927 	case CDP_ARCH_TYPE_LI:
13928 		return LINK_DESC_ID_START_21_BITS_COOKIE;
13929 	case CDP_ARCH_TYPE_BE:
13930 		return LINK_DESC_ID_START_20_BITS_COOKIE;
13931 	default:
13932 		dp_err("unkonwn arch_id 0x%x", arch_id);
13933 		QDF_BUG(0);
13934 		return LINK_DESC_ID_START_21_BITS_COOKIE;
13935 	}
13936 }
13937 
13938 /**
13939  * dp_soc_attach() - Attach txrx SOC
13940  * @ctrl_psoc: Opaque SOC handle from control plane
13941  * @params: SOC attach params
13942  *
13943  * Return: DP SOC handle on success, NULL on failure
13944  */
13945 static struct dp_soc *
13946 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
13947 	      struct cdp_soc_attach_params *params)
13948 {
13949 	int int_ctx;
13950 	struct dp_soc *soc =  NULL;
13951 	uint16_t arch_id;
13952 	struct hif_opaque_softc *hif_handle = params->hif_handle;
13953 	qdf_device_t qdf_osdev = params->qdf_osdev;
13954 	struct ol_if_ops *ol_ops = params->ol_ops;
13955 	uint16_t device_id = params->device_id;
13956 
13957 	if (!hif_handle) {
13958 		dp_err("HIF handle is NULL");
13959 		goto fail0;
13960 	}
13961 	arch_id = cdp_get_arch_type_from_devid(device_id);
13962 	soc = qdf_mem_malloc(dp_get_soc_context_size(device_id));
13963 	if (!soc) {
13964 		dp_err("DP SOC memory allocation failed");
13965 		goto fail0;
13966 	}
13967 
13968 	dp_info("soc memory allocated %pK", soc);
13969 	soc->hif_handle = hif_handle;
13970 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
13971 	if (!soc->hal_soc)
13972 		goto fail1;
13973 
13974 	hif_get_cmem_info(soc->hif_handle,
13975 			  &soc->cmem_base,
13976 			  &soc->cmem_total_size);
13977 	soc->cmem_avail_size = soc->cmem_total_size;
13978 	int_ctx = 0;
13979 	soc->device_id = device_id;
13980 	soc->cdp_soc.ops =
13981 		(struct cdp_ops *)qdf_mem_malloc(sizeof(struct cdp_ops));
13982 	if (!soc->cdp_soc.ops)
13983 		goto fail1;
13984 
13985 	dp_soc_txrx_ops_attach(soc);
13986 	soc->cdp_soc.ol_ops = ol_ops;
13987 	soc->ctrl_psoc = ctrl_psoc;
13988 	soc->osdev = qdf_osdev;
13989 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
13990 	hal_rx_get_tlv_size(soc->hal_soc, &soc->rx_pkt_tlv_size,
13991 			    &soc->rx_mon_pkt_tlv_size);
13992 	soc->idle_link_bm_id = hal_get_idle_link_bm_id(soc->hal_soc,
13993 						       params->mlo_chip_id);
13994 	soc->features.dmac_cmn_src_rxbuf_ring_enabled =
13995 		hal_dmac_cmn_src_rxbuf_ring_get(soc->hal_soc);
13996 	soc->arch_id = arch_id;
13997 	soc->link_desc_id_start =
13998 			dp_get_link_desc_id_start(soc->arch_id);
13999 	dp_configure_arch_ops(soc);
14000 
14001 	/* Reset wbm sg list and flags */
14002 	dp_rx_wbm_sg_list_reset(soc);
14003 
14004 	dp_soc_tx_hw_desc_history_attach(soc);
14005 	dp_soc_rx_history_attach(soc);
14006 	dp_soc_mon_status_ring_history_attach(soc);
14007 	dp_soc_tx_history_attach(soc);
14008 	wlan_set_srng_cfg(&soc->wlan_srng_cfg);
14009 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
14010 	if (!soc->wlan_cfg_ctx) {
14011 		dp_err("wlan_cfg_ctx failed\n");
14012 		goto fail2;
14013 	}
14014 	dp_soc_cfg_attach(soc);
14015 
14016 	if (dp_hw_link_desc_pool_banks_alloc(soc, WLAN_INVALID_PDEV_ID)) {
14017 		dp_err("failed to allocate link desc pool banks");
14018 		goto fail3;
14019 	}
14020 
14021 	if (dp_hw_link_desc_ring_alloc(soc)) {
14022 		dp_err("failed to allocate link_desc_ring");
14023 		goto fail4;
14024 	}
14025 
14026 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_attach(soc,
14027 								 params))) {
14028 		dp_err("unable to do target specific attach");
14029 		goto fail5;
14030 	}
14031 
14032 	if (dp_soc_srng_alloc(soc)) {
14033 		dp_err("failed to allocate soc srng rings");
14034 		goto fail6;
14035 	}
14036 
14037 	if (dp_soc_tx_desc_sw_pools_alloc(soc)) {
14038 		dp_err("dp_soc_tx_desc_sw_pools_alloc failed");
14039 		goto fail7;
14040 	}
14041 
14042 	if (!dp_monitor_modularized_enable()) {
14043 		if (dp_mon_soc_attach_wrapper(soc)) {
14044 			dp_err("failed to attach monitor");
14045 			goto fail8;
14046 		}
14047 	}
14048 
14049 	if (dp_sysfs_initialize_stats(soc) != QDF_STATUS_SUCCESS) {
14050 		dp_err("failed to initialize dp stats sysfs file");
14051 		dp_sysfs_deinitialize_stats(soc);
14052 	}
14053 
14054 	dp_soc_swlm_attach(soc);
14055 	dp_soc_set_interrupt_mode(soc);
14056 	dp_soc_set_def_pdev(soc);
14057 
14058 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
14059 		qdf_dma_mem_stats_read(),
14060 		qdf_heap_mem_stats_read(),
14061 		qdf_skb_total_mem_stats_read());
14062 
14063 	return soc;
14064 fail8:
14065 	dp_soc_tx_desc_sw_pools_free(soc);
14066 fail7:
14067 	dp_soc_srng_free(soc);
14068 fail6:
14069 	soc->arch_ops.txrx_soc_detach(soc);
14070 fail5:
14071 	dp_hw_link_desc_ring_free(soc);
14072 fail4:
14073 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
14074 fail3:
14075 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
14076 fail2:
14077 	qdf_mem_free(soc->cdp_soc.ops);
14078 fail1:
14079 	qdf_mem_free(soc);
14080 fail0:
14081 	return NULL;
14082 }
14083 
14084 /**
14085  * dp_soc_init() - Initialize txrx SOC
14086  * @dp_soc: Opaque DP SOC handle
14087  * @htc_handle: Opaque HTC handle
14088  * @hif_handle: Opaque HIF handle
14089  *
14090  * Return: DP SOC handle on success, NULL on failure
14091  */
14092 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
14093 		  struct hif_opaque_softc *hif_handle)
14094 {
14095 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
14096 	bool is_monitor_mode = false;
14097 	struct hal_reo_params reo_params;
14098 	uint8_t i;
14099 	int num_dp_msi;
14100 	struct dp_mon_ops *mon_ops;
14101 
14102 	wlan_minidump_log(soc, sizeof(*soc), soc->ctrl_psoc,
14103 			  WLAN_MD_DP_SOC, "dp_soc");
14104 
14105 	soc->hif_handle = hif_handle;
14106 
14107 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
14108 	if (!soc->hal_soc)
14109 		goto fail0;
14110 
14111 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_init(soc))) {
14112 		dp_err("unable to do target specific init");
14113 		goto fail0;
14114 	}
14115 
14116 	htt_soc = htt_soc_attach(soc, htc_handle);
14117 	if (!htt_soc)
14118 		goto fail1;
14119 
14120 	soc->htt_handle = htt_soc;
14121 
14122 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
14123 		goto fail2;
14124 
14125 	htt_set_htc_handle(htt_soc, htc_handle);
14126 
14127 	dp_soc_cfg_init(soc);
14128 
14129 	dp_monitor_soc_cfg_init(soc);
14130 	/* Reset/Initialize wbm sg list and flags */
14131 	dp_rx_wbm_sg_list_reset(soc);
14132 
14133 	/* Note: Any SRNG ring initialization should happen only after
14134 	 * Interrupt mode is set and followed by filling up the
14135 	 * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER.
14136 	 */
14137 	dp_soc_set_interrupt_mode(soc);
14138 	if (soc->cdp_soc.ol_ops->get_con_mode &&
14139 	    soc->cdp_soc.ol_ops->get_con_mode() ==
14140 	    QDF_GLOBAL_MONITOR_MODE)
14141 		is_monitor_mode = true;
14142 
14143 	num_dp_msi = dp_get_num_msi_available(soc, soc->intr_mode);
14144 	if (num_dp_msi < 0) {
14145 		dp_init_err("%pK: dp_interrupt assignment failed", soc);
14146 		goto fail3;
14147 	}
14148 
14149 	wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, num_dp_msi,
14150 				     soc->intr_mode, is_monitor_mode);
14151 
14152 	/* initialize WBM_IDLE_LINK ring */
14153 	if (dp_hw_link_desc_ring_init(soc)) {
14154 		dp_init_err("%pK: dp_hw_link_desc_ring_init failed", soc);
14155 		goto fail3;
14156 	}
14157 
14158 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
14159 
14160 	if (dp_soc_srng_init(soc)) {
14161 		dp_init_err("%pK: dp_soc_srng_init failed", soc);
14162 		goto fail4;
14163 	}
14164 
14165 	if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
14166 			       htt_get_htc_handle(htt_soc),
14167 			       soc->hal_soc, soc->osdev) == NULL)
14168 		goto fail5;
14169 
14170 	/* Initialize descriptors in TCL Rings */
14171 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
14172 		hal_tx_init_data_ring(soc->hal_soc,
14173 				      soc->tcl_data_ring[i].hal_srng);
14174 	}
14175 
14176 	if (dp_soc_tx_desc_sw_pools_init(soc)) {
14177 		dp_init_err("%pK: dp_tx_soc_attach failed", soc);
14178 		goto fail6;
14179 	}
14180 
14181 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
14182 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
14183 	soc->cce_disable = false;
14184 	soc->max_ast_ageout_count = MAX_AST_AGEOUT_COUNT;
14185 
14186 	soc->sta_mode_search_policy = DP_TX_ADDR_SEARCH_ADDR_POLICY;
14187 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
14188 	qdf_spinlock_create(&soc->vdev_map_lock);
14189 	qdf_atomic_init(&soc->num_tx_outstanding);
14190 	qdf_atomic_init(&soc->num_tx_exception);
14191 	soc->num_tx_allowed =
14192 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
14193 
14194 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
14195 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
14196 				CDP_CFG_MAX_PEER_ID);
14197 
14198 		if (ret != -EINVAL)
14199 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
14200 
14201 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
14202 				CDP_CFG_CCE_DISABLE);
14203 		if (ret == 1)
14204 			soc->cce_disable = true;
14205 	}
14206 
14207 	/*
14208 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
14209 	 * and IPQ5018 WMAC2 is not there in these platforms.
14210 	 */
14211 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018 ||
14212 	    soc->disable_mac2_intr)
14213 		dp_soc_disable_unused_mac_intr_mask(soc, 0x2);
14214 
14215 	/*
14216 	 * Skip registering hw ring interrupts for WMAC1 on IPQ5018
14217 	 * WMAC1 is not there in this platform.
14218 	 */
14219 	if (soc->disable_mac1_intr)
14220 		dp_soc_disable_unused_mac_intr_mask(soc, 0x1);
14221 
14222 	/* Setup HW REO */
14223 	qdf_mem_zero(&reo_params, sizeof(reo_params));
14224 
14225 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
14226 		/*
14227 		 * Reo ring remap is not required if both radios
14228 		 * are offloaded to NSS
14229 		 */
14230 
14231 		if (dp_reo_remap_config(soc, &reo_params.remap0,
14232 					&reo_params.remap1,
14233 					&reo_params.remap2))
14234 			reo_params.rx_hash_enabled = true;
14235 		else
14236 			reo_params.rx_hash_enabled = false;
14237 	}
14238 
14239 	/* setup the global rx defrag waitlist */
14240 	TAILQ_INIT(&soc->rx.defrag.waitlist);
14241 	soc->rx.defrag.timeout_ms =
14242 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
14243 	soc->rx.defrag.next_flush_ms = 0;
14244 	soc->rx.flags.defrag_timeout_check =
14245 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
14246 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
14247 
14248 	/*
14249 	 * set the fragment destination ring
14250 	 */
14251 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
14252 
14253 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
14254 		reo_params.alt_dst_ind_0 = REO_REMAP_RELEASE;
14255 
14256 	hal_reo_setup(soc->hal_soc, &reo_params);
14257 
14258 	hal_reo_set_err_dst_remap(soc->hal_soc);
14259 
14260 	soc->features.pn_in_reo_dest = hal_reo_enable_pn_in_dest(soc->hal_soc);
14261 
14262 	mon_ops = dp_mon_ops_get(soc);
14263 	if (mon_ops && mon_ops->mon_soc_init)
14264 		mon_ops->mon_soc_init(soc);
14265 
14266 	qdf_atomic_set(&soc->cmn_init_done, 1);
14267 
14268 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
14269 
14270 	qdf_spinlock_create(&soc->ast_lock);
14271 	dp_peer_mec_spinlock_create(soc);
14272 
14273 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
14274 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
14275 	INIT_RX_HW_STATS_LOCK(soc);
14276 
14277 	qdf_nbuf_queue_init(&soc->invalid_buf_queue);
14278 	/* fill the tx/rx cpu ring map*/
14279 	dp_soc_set_txrx_ring_map(soc);
14280 
14281 	TAILQ_INIT(&soc->inactive_peer_list);
14282 	qdf_spinlock_create(&soc->inactive_peer_list_lock);
14283 	TAILQ_INIT(&soc->inactive_vdev_list);
14284 	qdf_spinlock_create(&soc->inactive_vdev_list_lock);
14285 	qdf_spinlock_create(&soc->htt_stats.lock);
14286 	/* initialize work queue for stats processing */
14287 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
14288 
14289 	dp_reo_desc_deferred_freelist_create(soc);
14290 
14291 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
14292 		qdf_dma_mem_stats_read(),
14293 		qdf_heap_mem_stats_read(),
14294 		qdf_skb_total_mem_stats_read());
14295 
14296 	soc->vdev_stats_id_map = 0;
14297 
14298 	return soc;
14299 fail6:
14300 	htt_soc_htc_dealloc(soc->htt_handle);
14301 fail5:
14302 	dp_soc_srng_deinit(soc);
14303 fail4:
14304 	dp_hw_link_desc_ring_deinit(soc);
14305 fail3:
14306 	htt_htc_pkt_pool_free(htt_soc);
14307 fail2:
14308 	htt_soc_detach(htt_soc);
14309 fail1:
14310 	soc->arch_ops.txrx_soc_deinit(soc);
14311 fail0:
14312 	return NULL;
14313 }
14314 
14315 /**
14316  * dp_soc_init_wifi3() - Initialize txrx SOC
14317  * @soc: Opaque DP SOC handle
14318  * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
14319  * @hif_handle: Opaque HIF handle
14320  * @htc_handle: Opaque HTC handle
14321  * @qdf_osdev: QDF device (Unused)
14322  * @ol_ops: Offload Operations (Unused)
14323  * @device_id: Device ID (Unused)
14324  *
14325  * Return: DP SOC handle on success, NULL on failure
14326  */
14327 void *dp_soc_init_wifi3(struct cdp_soc_t *soc,
14328 			struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
14329 			struct hif_opaque_softc *hif_handle,
14330 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
14331 			struct ol_if_ops *ol_ops, uint16_t device_id)
14332 {
14333 	return dp_soc_init((struct dp_soc *)soc, htc_handle, hif_handle);
14334 }
14335 
14336 #endif
14337 
14338 /*
14339  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
14340  *
14341  * @soc: handle to DP soc
14342  * @mac_id: MAC id
14343  *
14344  * Return: Return pdev corresponding to MAC
14345  */
14346 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
14347 {
14348 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
14349 		return (mac_id < MAX_PDEV_CNT) ? soc->pdev_list[mac_id] : NULL;
14350 
14351 	/* Typically for MCL as there only 1 PDEV*/
14352 	return soc->pdev_list[0];
14353 }
14354 
14355 void dp_update_num_mac_rings_for_dbs(struct dp_soc *soc,
14356 				     int *max_mac_rings)
14357 {
14358 	bool dbs_enable = false;
14359 
14360 	if (soc->cdp_soc.ol_ops->is_hw_dbs_capable)
14361 		dbs_enable = soc->cdp_soc.ol_ops->
14362 				is_hw_dbs_capable((void *)soc->ctrl_psoc);
14363 
14364 	*max_mac_rings = dbs_enable ? (*max_mac_rings) : 1;
14365 	dp_info("dbs_enable %d, max_mac_rings %d",
14366 		dbs_enable, *max_mac_rings);
14367 }
14368 
14369 qdf_export_symbol(dp_update_num_mac_rings_for_dbs);
14370 
14371 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
14372 /**
14373  * dp_get_cfr_rcc() - get cfr rcc config
14374  * @soc_hdl: Datapath soc handle
14375  * @pdev_id: id of objmgr pdev
14376  *
14377  * Return: true/false based on cfr mode setting
14378  */
14379 static
14380 bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
14381 {
14382 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14383 	struct dp_pdev *pdev = NULL;
14384 
14385 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14386 	if (!pdev) {
14387 		dp_err("pdev is NULL");
14388 		return false;
14389 	}
14390 
14391 	return pdev->cfr_rcc_mode;
14392 }
14393 
14394 /**
14395  * dp_set_cfr_rcc() - enable/disable cfr rcc config
14396  * @soc_hdl: Datapath soc handle
14397  * @pdev_id: id of objmgr pdev
14398  * @enable: Enable/Disable cfr rcc mode
14399  *
14400  * Return: none
14401  */
14402 static
14403 void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, bool enable)
14404 {
14405 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14406 	struct dp_pdev *pdev = NULL;
14407 
14408 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14409 	if (!pdev) {
14410 		dp_err("pdev is NULL");
14411 		return;
14412 	}
14413 
14414 	pdev->cfr_rcc_mode = enable;
14415 }
14416 
14417 /*
14418  * dp_get_cfr_dbg_stats - Get the debug statistics for CFR
14419  * @soc_hdl: Datapath soc handle
14420  * @pdev_id: id of data path pdev handle
14421  * @cfr_rcc_stats: CFR RCC debug statistics buffer
14422  *
14423  * Return: none
14424  */
14425 static inline void
14426 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
14427 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats)
14428 {
14429 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14430 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14431 
14432 	if (!pdev) {
14433 		dp_err("Invalid pdev");
14434 		return;
14435 	}
14436 
14437 	qdf_mem_copy(cfr_rcc_stats, &pdev->stats.rcc,
14438 		     sizeof(struct cdp_cfr_rcc_stats));
14439 }
14440 
14441 /*
14442  * dp_clear_cfr_dbg_stats - Clear debug statistics for CFR
14443  * @soc_hdl: Datapath soc handle
14444  * @pdev_id: id of data path pdev handle
14445  *
14446  * Return: none
14447  */
14448 static void dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl,
14449 				   uint8_t pdev_id)
14450 {
14451 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
14452 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
14453 
14454 	if (!pdev) {
14455 		dp_err("dp pdev is NULL");
14456 		return;
14457 	}
14458 
14459 	qdf_mem_zero(&pdev->stats.rcc, sizeof(pdev->stats.rcc));
14460 }
14461 #endif
14462 
14463 /**
14464  * dp_bucket_index() - Return index from array
14465  *
14466  * @delay: delay measured
14467  * @array: array used to index corresponding delay
14468  * @delay_in_us: flag to indicate whether the delay in ms or us
14469  *
14470  * Return: index
14471  */
14472 static uint8_t
14473 dp_bucket_index(uint32_t delay, uint16_t *array, bool delay_in_us)
14474 {
14475 	uint8_t i = CDP_DELAY_BUCKET_0;
14476 	uint32_t thr_low, thr_high;
14477 
14478 	for (; i < CDP_DELAY_BUCKET_MAX - 1; i++) {
14479 		thr_low = array[i];
14480 		thr_high = array[i + 1];
14481 
14482 		if (delay_in_us) {
14483 			thr_low = thr_low * USEC_PER_MSEC;
14484 			thr_high = thr_high * USEC_PER_MSEC;
14485 		}
14486 		if (delay >= thr_low && delay <= thr_high)
14487 			return i;
14488 	}
14489 	return (CDP_DELAY_BUCKET_MAX - 1);
14490 }
14491 
14492 #ifdef HW_TX_DELAY_STATS_ENABLE
14493 /*
14494  * cdp_fw_to_hw_delay_range
14495  * Fw to hw delay ranges in milliseconds
14496  */
14497 static uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
14498 	0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
14499 #else
14500 static uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
14501 	0, 2, 4, 6, 8, 10, 20, 30, 40, 50, 100, 250, 500};
14502 #endif
14503 
14504 /*
14505  * cdp_sw_enq_delay_range
14506  * Software enqueue delay ranges in milliseconds
14507  */
14508 static uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
14509 	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
14510 
14511 /*
14512  * cdp_intfrm_delay_range
14513  * Interframe delay ranges in milliseconds
14514  */
14515 static uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
14516 	0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
14517 
14518 /**
14519  * dp_fill_delay_buckets() - Fill delay statistics bucket for each
14520  *				type of delay
14521  * @tstats: tid tx stats
14522  * @rstats: tid rx stats
14523  * @delay: delay in ms
14524  * @tid: tid value
14525  * @mode: type of tx delay mode
14526  * @ring_id: ring number
14527  * @delay_in_us: flag to indicate whether the delay in ms or us
14528  *
14529  * Return: pointer to cdp_delay_stats structure
14530  */
14531 static struct cdp_delay_stats *
14532 dp_fill_delay_buckets(struct cdp_tid_tx_stats *tstats,
14533 		      struct cdp_tid_rx_stats *rstats, uint32_t delay,
14534 		      uint8_t tid, uint8_t mode, uint8_t ring_id,
14535 		      bool delay_in_us)
14536 {
14537 	uint8_t delay_index = 0;
14538 	struct cdp_delay_stats *stats = NULL;
14539 
14540 	/*
14541 	 * Update delay stats in proper bucket
14542 	 */
14543 	switch (mode) {
14544 	/* Software Enqueue delay ranges */
14545 	case CDP_DELAY_STATS_SW_ENQ:
14546 		if (!tstats)
14547 			break;
14548 
14549 		delay_index = dp_bucket_index(delay, cdp_sw_enq_delay,
14550 					      delay_in_us);
14551 		tstats->swq_delay.delay_bucket[delay_index]++;
14552 		stats = &tstats->swq_delay;
14553 		break;
14554 
14555 	/* Tx Completion delay ranges */
14556 	case CDP_DELAY_STATS_FW_HW_TRANSMIT:
14557 		if (!tstats)
14558 			break;
14559 
14560 		delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay,
14561 					      delay_in_us);
14562 		tstats->hwtx_delay.delay_bucket[delay_index]++;
14563 		stats = &tstats->hwtx_delay;
14564 		break;
14565 
14566 	/* Interframe tx delay ranges */
14567 	case CDP_DELAY_STATS_TX_INTERFRAME:
14568 		if (!tstats)
14569 			break;
14570 
14571 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
14572 					      delay_in_us);
14573 		tstats->intfrm_delay.delay_bucket[delay_index]++;
14574 		stats = &tstats->intfrm_delay;
14575 		break;
14576 
14577 	/* Interframe rx delay ranges */
14578 	case CDP_DELAY_STATS_RX_INTERFRAME:
14579 		if (!rstats)
14580 			break;
14581 
14582 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
14583 					      delay_in_us);
14584 		rstats->intfrm_delay.delay_bucket[delay_index]++;
14585 		stats = &rstats->intfrm_delay;
14586 		break;
14587 
14588 	/* Ring reap to indication to network stack */
14589 	case CDP_DELAY_STATS_REAP_STACK:
14590 		if (!rstats)
14591 			break;
14592 
14593 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay,
14594 					      delay_in_us);
14595 		rstats->to_stack_delay.delay_bucket[delay_index]++;
14596 		stats = &rstats->to_stack_delay;
14597 		break;
14598 	default:
14599 		dp_debug("Incorrect delay mode: %d", mode);
14600 	}
14601 
14602 	return stats;
14603 }
14604 
14605 void dp_update_delay_stats(struct cdp_tid_tx_stats *tstats,
14606 			   struct cdp_tid_rx_stats *rstats, uint32_t delay,
14607 			   uint8_t tid, uint8_t mode, uint8_t ring_id,
14608 			   bool delay_in_us)
14609 {
14610 	struct cdp_delay_stats *dstats = NULL;
14611 
14612 	/*
14613 	 * Delay ranges are different for different delay modes
14614 	 * Get the correct index to update delay bucket
14615 	 */
14616 	dstats = dp_fill_delay_buckets(tstats, rstats, delay, tid, mode,
14617 				       ring_id, delay_in_us);
14618 	if (qdf_unlikely(!dstats))
14619 		return;
14620 
14621 	if (delay != 0) {
14622 		/*
14623 		 * Compute minimum,average and maximum
14624 		 * delay
14625 		 */
14626 		if (delay < dstats->min_delay)
14627 			dstats->min_delay = delay;
14628 
14629 		if (delay > dstats->max_delay)
14630 			dstats->max_delay = delay;
14631 
14632 		/*
14633 		 * Average over delay measured till now
14634 		 */
14635 		if (!dstats->avg_delay)
14636 			dstats->avg_delay = delay;
14637 		else
14638 			dstats->avg_delay = ((delay + dstats->avg_delay) >> 1);
14639 	}
14640 }
14641 
14642 /**
14643  * dp_get_peer_mac_list(): function to get peer mac list of vdev
14644  * @soc: Datapath soc handle
14645  * @vdev_id: vdev id
14646  * @newmac: Table of the clients mac
14647  * @mac_cnt: No. of MACs required
14648  * @limit: Limit the number of clients
14649  *
14650  * return: no of clients
14651  */
14652 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id,
14653 			      u_int8_t newmac[][QDF_MAC_ADDR_SIZE],
14654 			      u_int16_t mac_cnt, bool limit)
14655 {
14656 	struct dp_soc *dp_soc = (struct dp_soc *)soc;
14657 	struct dp_vdev *vdev =
14658 		dp_vdev_get_ref_by_id(dp_soc, vdev_id, DP_MOD_ID_CDP);
14659 	struct dp_peer *peer;
14660 	uint16_t new_mac_cnt = 0;
14661 
14662 	if (!vdev)
14663 		return new_mac_cnt;
14664 
14665 	if (limit && (vdev->num_peers > mac_cnt))
14666 		return 0;
14667 
14668 	qdf_spin_lock_bh(&vdev->peer_list_lock);
14669 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
14670 		if (peer->bss_peer)
14671 			continue;
14672 		if (new_mac_cnt < mac_cnt) {
14673 			WLAN_ADDR_COPY(newmac[new_mac_cnt], peer->mac_addr.raw);
14674 			new_mac_cnt++;
14675 		}
14676 	}
14677 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
14678 	dp_vdev_unref_delete(dp_soc, vdev, DP_MOD_ID_CDP);
14679 	return new_mac_cnt;
14680 }
14681 
14682 uint16_t dp_get_peer_id(ol_txrx_soc_handle soc, uint8_t vdev_id, uint8_t *mac)
14683 {
14684 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
14685 						       mac, 0, vdev_id,
14686 						       DP_MOD_ID_CDP);
14687 	uint16_t peer_id = HTT_INVALID_PEER;
14688 
14689 	if (!peer) {
14690 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
14691 		return peer_id;
14692 	}
14693 
14694 	peer_id = peer->peer_id;
14695 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
14696 	return peer_id;
14697 }
14698 
14699 #ifdef QCA_SUPPORT_WDS_EXTENDED
14700 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc,
14701 				  uint8_t vdev_id,
14702 				  uint8_t *mac,
14703 				  ol_txrx_rx_fp rx,
14704 				  ol_osif_peer_handle osif_peer)
14705 {
14706 	struct dp_txrx_peer *txrx_peer = NULL;
14707 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
14708 						       mac, 0, vdev_id,
14709 						       DP_MOD_ID_CDP);
14710 	QDF_STATUS status = QDF_STATUS_E_INVAL;
14711 
14712 	if (!peer) {
14713 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
14714 		return status;
14715 	}
14716 
14717 	txrx_peer = dp_get_txrx_peer(peer);
14718 	if (!txrx_peer) {
14719 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
14720 		return status;
14721 	}
14722 
14723 	if (rx) {
14724 		if (txrx_peer->osif_rx) {
14725 			status = QDF_STATUS_E_ALREADY;
14726 		} else {
14727 			txrx_peer->osif_rx = rx;
14728 			status = QDF_STATUS_SUCCESS;
14729 		}
14730 	} else {
14731 		if (txrx_peer->osif_rx) {
14732 			txrx_peer->osif_rx = NULL;
14733 			status = QDF_STATUS_SUCCESS;
14734 		} else {
14735 			status = QDF_STATUS_E_ALREADY;
14736 		}
14737 	}
14738 
14739 	txrx_peer->wds_ext.osif_peer = osif_peer;
14740 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
14741 
14742 	return status;
14743 }
14744 #endif /* QCA_SUPPORT_WDS_EXTENDED */
14745 
14746 /**
14747  * dp_pdev_srng_deinit() - de-initialize all pdev srng ring including
14748  *			   monitor rings
14749  * @pdev: Datapath pdev handle
14750  *
14751  */
14752 static void dp_pdev_srng_deinit(struct dp_pdev *pdev)
14753 {
14754 	struct dp_soc *soc = pdev->soc;
14755 	uint8_t i;
14756 
14757 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled)
14758 		dp_srng_deinit(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
14759 			       RXDMA_BUF,
14760 			       pdev->lmac_id);
14761 
14762 	if (!soc->rxdma2sw_rings_not_supported) {
14763 		for (i = 0;
14764 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
14765 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
14766 								 pdev->pdev_id);
14767 
14768 			wlan_minidump_remove(soc->rxdma_err_dst_ring[lmac_id].
14769 							base_vaddr_unaligned,
14770 					     soc->rxdma_err_dst_ring[lmac_id].
14771 								alloc_size,
14772 					     soc->ctrl_psoc,
14773 					     WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
14774 					     "rxdma_err_dst");
14775 			dp_srng_deinit(soc, &soc->rxdma_err_dst_ring[lmac_id],
14776 				       RXDMA_DST, lmac_id);
14777 		}
14778 	}
14779 
14780 
14781 }
14782 
14783 /**
14784  * dp_pdev_srng_init() - initialize all pdev srng rings including
14785  *			   monitor rings
14786  * @pdev: Datapath pdev handle
14787  *
14788  * return: QDF_STATUS_SUCCESS on success
14789  *	   QDF_STATUS_E_NOMEM on failure
14790  */
14791 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev)
14792 {
14793 	struct dp_soc *soc = pdev->soc;
14794 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
14795 	uint32_t i;
14796 
14797 	soc_cfg_ctx = soc->wlan_cfg_ctx;
14798 
14799 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
14800 		if (dp_srng_init(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
14801 				 RXDMA_BUF, 0, pdev->lmac_id)) {
14802 			dp_init_err("%pK: dp_srng_init failed rx refill ring",
14803 				    soc);
14804 			goto fail1;
14805 		}
14806 	}
14807 
14808 	/* LMAC RxDMA to SW Rings configuration */
14809 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
14810 		/* Only valid for MCL */
14811 		pdev = soc->pdev_list[0];
14812 
14813 	if (!soc->rxdma2sw_rings_not_supported) {
14814 		for (i = 0;
14815 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
14816 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
14817 								 pdev->pdev_id);
14818 			struct dp_srng *srng =
14819 				&soc->rxdma_err_dst_ring[lmac_id];
14820 
14821 			if (srng->hal_srng)
14822 				continue;
14823 
14824 			if (dp_srng_init(soc, srng, RXDMA_DST, 0, lmac_id)) {
14825 				dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring",
14826 					    soc);
14827 				goto fail1;
14828 			}
14829 			wlan_minidump_log(soc->rxdma_err_dst_ring[lmac_id].
14830 						base_vaddr_unaligned,
14831 					  soc->rxdma_err_dst_ring[lmac_id].
14832 						alloc_size,
14833 					  soc->ctrl_psoc,
14834 					  WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
14835 					  "rxdma_err_dst");
14836 		}
14837 	}
14838 	return QDF_STATUS_SUCCESS;
14839 
14840 fail1:
14841 	dp_pdev_srng_deinit(pdev);
14842 	return QDF_STATUS_E_NOMEM;
14843 }
14844 
14845 /**
14846  * dp_pdev_srng_free() - free all pdev srng rings including monitor rings
14847  * pdev: Datapath pdev handle
14848  *
14849  */
14850 static void dp_pdev_srng_free(struct dp_pdev *pdev)
14851 {
14852 	struct dp_soc *soc = pdev->soc;
14853 	uint8_t i;
14854 
14855 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled)
14856 		dp_srng_free(soc, &soc->rx_refill_buf_ring[pdev->lmac_id]);
14857 
14858 	if (!soc->rxdma2sw_rings_not_supported) {
14859 		for (i = 0;
14860 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
14861 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
14862 								 pdev->pdev_id);
14863 
14864 			dp_srng_free(soc, &soc->rxdma_err_dst_ring[lmac_id]);
14865 		}
14866 	}
14867 }
14868 
14869 /**
14870  * dp_pdev_srng_alloc() - allocate memory for all pdev srng rings including
14871  *			  monitor rings
14872  * pdev: Datapath pdev handle
14873  *
14874  * return: QDF_STATUS_SUCCESS on success
14875  *	   QDF_STATUS_E_NOMEM on failure
14876  */
14877 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev)
14878 {
14879 	struct dp_soc *soc = pdev->soc;
14880 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
14881 	uint32_t ring_size;
14882 	uint32_t i;
14883 
14884 	soc_cfg_ctx = soc->wlan_cfg_ctx;
14885 
14886 	ring_size = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
14887 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
14888 		if (dp_srng_alloc(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
14889 				  RXDMA_BUF, ring_size, 0)) {
14890 			dp_init_err("%pK: dp_srng_alloc failed rx refill ring",
14891 				    soc);
14892 			goto fail1;
14893 		}
14894 	}
14895 
14896 	ring_size = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
14897 	/* LMAC RxDMA to SW Rings configuration */
14898 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
14899 		/* Only valid for MCL */
14900 		pdev = soc->pdev_list[0];
14901 
14902 	if (!soc->rxdma2sw_rings_not_supported) {
14903 		for (i = 0;
14904 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
14905 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
14906 								 pdev->pdev_id);
14907 			struct dp_srng *srng =
14908 				&soc->rxdma_err_dst_ring[lmac_id];
14909 
14910 			if (srng->base_vaddr_unaligned)
14911 				continue;
14912 
14913 			if (dp_srng_alloc(soc, srng, RXDMA_DST, ring_size, 0)) {
14914 				dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring",
14915 					    soc);
14916 				goto fail1;
14917 			}
14918 		}
14919 	}
14920 
14921 	return QDF_STATUS_SUCCESS;
14922 fail1:
14923 	dp_pdev_srng_free(pdev);
14924 	return QDF_STATUS_E_NOMEM;
14925 }
14926 
14927 #ifndef WLAN_DP_DISABLE_TCL_CMD_CRED_SRNG
14928 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
14929 {
14930 	QDF_STATUS status;
14931 
14932 	if (soc->init_tcl_cmd_cred_ring) {
14933 		status =  dp_srng_init(soc, &soc->tcl_cmd_credit_ring,
14934 				       TCL_CMD_CREDIT, 0, 0);
14935 		if (QDF_IS_STATUS_ERROR(status))
14936 			return status;
14937 
14938 		wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
14939 				  soc->tcl_cmd_credit_ring.alloc_size,
14940 				  soc->ctrl_psoc,
14941 				  WLAN_MD_DP_SRNG_TCL_CMD,
14942 				  "wbm_desc_rel_ring");
14943 	}
14944 
14945 	return QDF_STATUS_SUCCESS;
14946 }
14947 
14948 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
14949 {
14950 	if (soc->init_tcl_cmd_cred_ring) {
14951 		wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
14952 				     soc->tcl_cmd_credit_ring.alloc_size,
14953 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_CMD,
14954 				     "wbm_desc_rel_ring");
14955 		dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring,
14956 			       TCL_CMD_CREDIT, 0);
14957 	}
14958 }
14959 
14960 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
14961 {
14962 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
14963 	uint32_t entries;
14964 	QDF_STATUS status;
14965 
14966 	entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx);
14967 	if (soc->init_tcl_cmd_cred_ring) {
14968 		status = dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring,
14969 				       TCL_CMD_CREDIT, entries, 0);
14970 		if (QDF_IS_STATUS_ERROR(status))
14971 			return status;
14972 	}
14973 
14974 	return QDF_STATUS_SUCCESS;
14975 }
14976 
14977 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
14978 {
14979 	if (soc->init_tcl_cmd_cred_ring)
14980 		dp_srng_free(soc, &soc->tcl_cmd_credit_ring);
14981 }
14982 
14983 static inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
14984 {
14985 	if (soc->init_tcl_cmd_cred_ring)
14986 		hal_tx_init_cmd_credit_ring(soc->hal_soc,
14987 					    soc->tcl_cmd_credit_ring.hal_srng);
14988 }
14989 #else
14990 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_init(struct dp_soc *soc)
14991 {
14992 	return QDF_STATUS_SUCCESS;
14993 }
14994 
14995 static inline void dp_soc_tcl_cmd_cred_srng_deinit(struct dp_soc *soc)
14996 {
14997 }
14998 
14999 static inline QDF_STATUS dp_soc_tcl_cmd_cred_srng_alloc(struct dp_soc *soc)
15000 {
15001 	return QDF_STATUS_SUCCESS;
15002 }
15003 
15004 static inline void dp_soc_tcl_cmd_cred_srng_free(struct dp_soc *soc)
15005 {
15006 }
15007 
15008 static inline void dp_tx_init_cmd_credit_ring(struct dp_soc *soc)
15009 {
15010 }
15011 #endif
15012 
15013 #ifndef WLAN_DP_DISABLE_TCL_STATUS_SRNG
15014 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
15015 {
15016 	QDF_STATUS status;
15017 
15018 	status =  dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0);
15019 	if (QDF_IS_STATUS_ERROR(status))
15020 		return status;
15021 
15022 	wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned,
15023 			  soc->tcl_status_ring.alloc_size,
15024 			  soc->ctrl_psoc,
15025 			  WLAN_MD_DP_SRNG_TCL_STATUS,
15026 			  "wbm_desc_rel_ring");
15027 
15028 	return QDF_STATUS_SUCCESS;
15029 }
15030 
15031 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
15032 {
15033 	wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned,
15034 			     soc->tcl_status_ring.alloc_size,
15035 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_STATUS,
15036 			     "wbm_desc_rel_ring");
15037 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
15038 }
15039 
15040 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
15041 {
15042 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
15043 	uint32_t entries;
15044 	QDF_STATUS status = QDF_STATUS_SUCCESS;
15045 
15046 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
15047 	status = dp_srng_alloc(soc, &soc->tcl_status_ring,
15048 			       TCL_STATUS, entries, 0);
15049 
15050 	return status;
15051 }
15052 
15053 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
15054 {
15055 	dp_srng_free(soc, &soc->tcl_status_ring);
15056 }
15057 #else
15058 static inline QDF_STATUS dp_soc_tcl_status_srng_init(struct dp_soc *soc)
15059 {
15060 	return QDF_STATUS_SUCCESS;
15061 }
15062 
15063 static inline void dp_soc_tcl_status_srng_deinit(struct dp_soc *soc)
15064 {
15065 }
15066 
15067 static inline QDF_STATUS dp_soc_tcl_status_srng_alloc(struct dp_soc *soc)
15068 {
15069 	return QDF_STATUS_SUCCESS;
15070 }
15071 
15072 static inline void dp_soc_tcl_status_srng_free(struct dp_soc *soc)
15073 {
15074 }
15075 #endif
15076 
15077 /**
15078  * dp_soc_srng_deinit() - de-initialize soc srng rings
15079  * @soc: Datapath soc handle
15080  *
15081  */
15082 static void dp_soc_srng_deinit(struct dp_soc *soc)
15083 {
15084 	uint32_t i;
15085 
15086 	if (soc->arch_ops.txrx_soc_srng_deinit)
15087 		soc->arch_ops.txrx_soc_srng_deinit(soc);
15088 
15089 	/* Free the ring memories */
15090 	/* Common rings */
15091 	wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
15092 			     soc->wbm_desc_rel_ring.alloc_size,
15093 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_WBM_DESC_REL,
15094 			     "wbm_desc_rel_ring");
15095 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
15096 
15097 	/* Tx data rings */
15098 	for (i = 0; i < soc->num_tcl_data_rings; i++)
15099 		dp_deinit_tx_pair_by_index(soc, i);
15100 
15101 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
15102 		dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
15103 		dp_ipa_deinit_alt_tx_ring(soc);
15104 	}
15105 
15106 	/* TCL command and status rings */
15107 	dp_soc_tcl_cmd_cred_srng_deinit(soc);
15108 	dp_soc_tcl_status_srng_deinit(soc);
15109 
15110 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
15111 		/* TODO: Get number of rings and ring sizes
15112 		 * from wlan_cfg
15113 		 */
15114 		wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned,
15115 				     soc->reo_dest_ring[i].alloc_size,
15116 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_DEST,
15117 				     "reo_dest_ring");
15118 		dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i);
15119 	}
15120 
15121 	/* REO reinjection ring */
15122 	wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned,
15123 			     soc->reo_reinject_ring.alloc_size,
15124 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_REINJECT,
15125 			     "reo_reinject_ring");
15126 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
15127 
15128 	/* Rx release ring */
15129 	wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned,
15130 			     soc->rx_rel_ring.alloc_size,
15131 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_RX_REL,
15132 			     "reo_release_ring");
15133 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
15134 
15135 	/* Rx exception ring */
15136 	/* TODO: Better to store ring_type and ring_num in
15137 	 * dp_srng during setup
15138 	 */
15139 	wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned,
15140 			     soc->reo_exception_ring.alloc_size,
15141 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_EXCEPTION,
15142 			     "reo_exception_ring");
15143 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
15144 
15145 	/* REO command and status rings */
15146 	wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned,
15147 			     soc->reo_cmd_ring.alloc_size,
15148 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_CMD,
15149 			     "reo_cmd_ring");
15150 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
15151 	wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned,
15152 			     soc->reo_status_ring.alloc_size,
15153 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_STATUS,
15154 			     "reo_status_ring");
15155 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
15156 }
15157 
15158 /**
15159  * dp_soc_srng_init() - Initialize soc level srng rings
15160  * @soc: Datapath soc handle
15161  *
15162  * return: QDF_STATUS_SUCCESS on success
15163  *	   QDF_STATUS_E_FAILURE on failure
15164  */
15165 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc)
15166 {
15167 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
15168 	uint8_t i;
15169 	uint8_t wbm2_sw_rx_rel_ring_id;
15170 
15171 	soc_cfg_ctx = soc->wlan_cfg_ctx;
15172 
15173 	dp_enable_verbose_debug(soc);
15174 
15175 	/* WBM descriptor release ring */
15176 	if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) {
15177 		dp_init_err("%pK: dp_srng_init failed for wbm_desc_rel_ring", soc);
15178 		goto fail1;
15179 	}
15180 
15181 	wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
15182 			  soc->wbm_desc_rel_ring.alloc_size,
15183 			  soc->ctrl_psoc,
15184 			  WLAN_MD_DP_SRNG_WBM_DESC_REL,
15185 			  "wbm_desc_rel_ring");
15186 
15187 	/* TCL command and status rings */
15188 	if (dp_soc_tcl_cmd_cred_srng_init(soc)) {
15189 		dp_init_err("%pK: dp_srng_init failed for tcl_cmd_ring", soc);
15190 		goto fail1;
15191 	}
15192 
15193 	if (dp_soc_tcl_status_srng_init(soc)) {
15194 		dp_init_err("%pK: dp_srng_init failed for tcl_status_ring", soc);
15195 		goto fail1;
15196 	}
15197 
15198 	/* REO reinjection ring */
15199 	if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) {
15200 		dp_init_err("%pK: dp_srng_init failed for reo_reinject_ring", soc);
15201 		goto fail1;
15202 	}
15203 
15204 	wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned,
15205 			  soc->reo_reinject_ring.alloc_size,
15206 			  soc->ctrl_psoc,
15207 			  WLAN_MD_DP_SRNG_REO_REINJECT,
15208 			  "reo_reinject_ring");
15209 
15210 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc_cfg_ctx);
15211 	/* Rx release ring */
15212 	if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
15213 			 wbm2_sw_rx_rel_ring_id, 0)) {
15214 		dp_init_err("%pK: dp_srng_init failed for rx_rel_ring", soc);
15215 		goto fail1;
15216 	}
15217 
15218 	wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned,
15219 			  soc->rx_rel_ring.alloc_size,
15220 			  soc->ctrl_psoc,
15221 			  WLAN_MD_DP_SRNG_RX_REL,
15222 			  "reo_release_ring");
15223 
15224 	/* Rx exception ring */
15225 	if (dp_srng_init(soc, &soc->reo_exception_ring,
15226 			 REO_EXCEPTION, 0, MAX_REO_DEST_RINGS)) {
15227 		dp_init_err("%pK: dp_srng_init failed - reo_exception", soc);
15228 		goto fail1;
15229 	}
15230 
15231 	wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned,
15232 			  soc->reo_exception_ring.alloc_size,
15233 			  soc->ctrl_psoc,
15234 			  WLAN_MD_DP_SRNG_REO_EXCEPTION,
15235 			  "reo_exception_ring");
15236 
15237 	/* REO command and status rings */
15238 	if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) {
15239 		dp_init_err("%pK: dp_srng_init failed for reo_cmd_ring", soc);
15240 		goto fail1;
15241 	}
15242 
15243 	wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned,
15244 			  soc->reo_cmd_ring.alloc_size,
15245 			  soc->ctrl_psoc,
15246 			  WLAN_MD_DP_SRNG_REO_CMD,
15247 			  "reo_cmd_ring");
15248 
15249 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
15250 	TAILQ_INIT(&soc->rx.reo_cmd_list);
15251 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
15252 
15253 	if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) {
15254 		dp_init_err("%pK: dp_srng_init failed for reo_status_ring", soc);
15255 		goto fail1;
15256 	}
15257 
15258 	wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned,
15259 			  soc->reo_status_ring.alloc_size,
15260 			  soc->ctrl_psoc,
15261 			  WLAN_MD_DP_SRNG_REO_STATUS,
15262 			  "reo_status_ring");
15263 
15264 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
15265 		if (dp_init_tx_ring_pair_by_index(soc, i))
15266 			goto fail1;
15267 	}
15268 
15269 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
15270 		if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
15271 			goto fail1;
15272 
15273 		if (dp_ipa_init_alt_tx_ring(soc))
15274 			goto fail1;
15275 	}
15276 
15277 	dp_create_ext_stats_event(soc);
15278 
15279 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
15280 		/* Initialize REO destination ring */
15281 		if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) {
15282 			dp_init_err("%pK: dp_srng_init failed for reo_dest_ringn", soc);
15283 			goto fail1;
15284 		}
15285 
15286 		wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned,
15287 				  soc->reo_dest_ring[i].alloc_size,
15288 				  soc->ctrl_psoc,
15289 				  WLAN_MD_DP_SRNG_REO_DEST,
15290 				  "reo_dest_ring");
15291 	}
15292 
15293 	if (soc->arch_ops.txrx_soc_srng_init) {
15294 		if (soc->arch_ops.txrx_soc_srng_init(soc)) {
15295 			dp_init_err("%pK: dp_srng_init failed for arch rings",
15296 				    soc);
15297 			goto fail1;
15298 		}
15299 	}
15300 
15301 	return QDF_STATUS_SUCCESS;
15302 fail1:
15303 	/*
15304 	 * Cleanup will be done as part of soc_detach, which will
15305 	 * be called on pdev attach failure
15306 	 */
15307 	dp_soc_srng_deinit(soc);
15308 	return QDF_STATUS_E_FAILURE;
15309 }
15310 
15311 /**
15312  * dp_soc_srng_free() - free soc level srng rings
15313  * @soc: Datapath soc handle
15314  *
15315  */
15316 static void dp_soc_srng_free(struct dp_soc *soc)
15317 {
15318 	uint32_t i;
15319 
15320 	if (soc->arch_ops.txrx_soc_srng_free)
15321 		soc->arch_ops.txrx_soc_srng_free(soc);
15322 
15323 	dp_srng_free(soc, &soc->wbm_desc_rel_ring);
15324 
15325 	for (i = 0; i < soc->num_tcl_data_rings; i++)
15326 		dp_free_tx_ring_pair_by_index(soc, i);
15327 
15328 	/* Free IPA rings for TCL_TX and TCL_COMPL ring */
15329 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
15330 		dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
15331 		dp_ipa_free_alt_tx_ring(soc);
15332 	}
15333 
15334 	dp_soc_tcl_cmd_cred_srng_free(soc);
15335 	dp_soc_tcl_status_srng_free(soc);
15336 
15337 	for (i = 0; i < soc->num_reo_dest_rings; i++)
15338 		dp_srng_free(soc, &soc->reo_dest_ring[i]);
15339 
15340 	dp_srng_free(soc, &soc->reo_reinject_ring);
15341 	dp_srng_free(soc, &soc->rx_rel_ring);
15342 
15343 	dp_srng_free(soc, &soc->reo_exception_ring);
15344 
15345 	dp_srng_free(soc, &soc->reo_cmd_ring);
15346 	dp_srng_free(soc, &soc->reo_status_ring);
15347 }
15348 
15349 /**
15350  * dp_soc_srng_alloc() - Allocate memory for soc level srng rings
15351  * @soc: Datapath soc handle
15352  *
15353  * return: QDF_STATUS_SUCCESS on success
15354  *	   QDF_STATUS_E_NOMEM on failure
15355  */
15356 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc)
15357 {
15358 	uint32_t entries;
15359 	uint32_t i;
15360 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
15361 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
15362 	uint32_t tx_comp_ring_size, tx_ring_size, reo_dst_ring_size;
15363 
15364 	soc_cfg_ctx = soc->wlan_cfg_ctx;
15365 
15366 	/* sw2wbm link descriptor release ring */
15367 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
15368 	if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE,
15369 			  entries, 0)) {
15370 		dp_init_err("%pK: dp_srng_alloc failed for wbm_desc_rel_ring", soc);
15371 		goto fail1;
15372 	}
15373 
15374 	/* TCL command and status rings */
15375 	if (dp_soc_tcl_cmd_cred_srng_alloc(soc)) {
15376 		dp_init_err("%pK: dp_srng_alloc failed for tcl_cmd_ring", soc);
15377 		goto fail1;
15378 	}
15379 
15380 	if (dp_soc_tcl_status_srng_alloc(soc)) {
15381 		dp_init_err("%pK: dp_srng_alloc failed for tcl_status_ring", soc);
15382 		goto fail1;
15383 	}
15384 
15385 	/* REO reinjection ring */
15386 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
15387 	if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT,
15388 			  entries, 0)) {
15389 		dp_init_err("%pK: dp_srng_alloc failed for reo_reinject_ring", soc);
15390 		goto fail1;
15391 	}
15392 
15393 	/* Rx release ring */
15394 	entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx);
15395 	if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
15396 			  entries, 0)) {
15397 		dp_init_err("%pK: dp_srng_alloc failed for rx_rel_ring", soc);
15398 		goto fail1;
15399 	}
15400 
15401 	/* Rx exception ring */
15402 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
15403 	if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION,
15404 			  entries, 0)) {
15405 		dp_init_err("%pK: dp_srng_alloc failed - reo_exception", soc);
15406 		goto fail1;
15407 	}
15408 
15409 	/* REO command and status rings */
15410 	entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx);
15411 	if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) {
15412 		dp_init_err("%pK: dp_srng_alloc failed for reo_cmd_ring", soc);
15413 		goto fail1;
15414 	}
15415 
15416 	entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx);
15417 	if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS,
15418 			  entries, 0)) {
15419 		dp_init_err("%pK: dp_srng_alloc failed for reo_status_ring", soc);
15420 		goto fail1;
15421 	}
15422 
15423 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
15424 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
15425 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx);
15426 
15427 	/* Disable cached desc if NSS offload is enabled */
15428 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
15429 		cached = 0;
15430 
15431 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
15432 		if (dp_alloc_tx_ring_pair_by_index(soc, i))
15433 			goto fail1;
15434 	}
15435 
15436 	/* IPA rings for TCL_TX and TX_COMP will be allocated here */
15437 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
15438 		if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
15439 			goto fail1;
15440 
15441 		if (dp_ipa_alloc_alt_tx_ring(soc))
15442 			goto fail1;
15443 	}
15444 
15445 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
15446 		/* Setup REO destination ring */
15447 		if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST,
15448 				  reo_dst_ring_size, cached)) {
15449 			dp_init_err("%pK: dp_srng_alloc failed for reo_dest_ring", soc);
15450 			goto fail1;
15451 		}
15452 	}
15453 
15454 	if (soc->arch_ops.txrx_soc_srng_alloc) {
15455 		if (soc->arch_ops.txrx_soc_srng_alloc(soc)) {
15456 			dp_init_err("%pK: dp_srng_alloc failed for arch rings",
15457 				    soc);
15458 			goto fail1;
15459 		}
15460 	}
15461 
15462 	return QDF_STATUS_SUCCESS;
15463 
15464 fail1:
15465 	dp_soc_srng_free(soc);
15466 	return QDF_STATUS_E_NOMEM;
15467 }
15468 
15469 static void dp_soc_cfg_dump(struct dp_soc *soc, uint32_t target_type)
15470 {
15471 	dp_init_info("DP soc Dump for Target = %d", target_type);
15472 	dp_init_info("ast_override_support = %d, da_war_enabled = %d,",
15473 		     soc->ast_override_support, soc->da_war_enabled);
15474 
15475 	wlan_cfg_dp_soc_ctx_dump(soc->wlan_cfg_ctx);
15476 }
15477 
15478 /**
15479  * dp_soc_cfg_init() - initialize target specific configuration
15480  *		       during dp_soc_init
15481  * @soc: dp soc handle
15482  */
15483 static void dp_soc_cfg_init(struct dp_soc *soc)
15484 {
15485 	uint32_t target_type;
15486 
15487 	target_type = hal_get_target_type(soc->hal_soc);
15488 	switch (target_type) {
15489 	case TARGET_TYPE_QCA6290:
15490 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
15491 					       REO_DST_RING_SIZE_QCA6290);
15492 		soc->ast_override_support = 1;
15493 		soc->da_war_enabled = false;
15494 		break;
15495 	case TARGET_TYPE_QCA6390:
15496 	case TARGET_TYPE_QCA6490:
15497 	case TARGET_TYPE_QCA6750:
15498 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
15499 					       REO_DST_RING_SIZE_QCA6290);
15500 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
15501 		soc->ast_override_support = 1;
15502 		if (soc->cdp_soc.ol_ops->get_con_mode &&
15503 		    soc->cdp_soc.ol_ops->get_con_mode() ==
15504 		    QDF_GLOBAL_MONITOR_MODE) {
15505 			int int_ctx;
15506 
15507 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
15508 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
15509 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
15510 			}
15511 		}
15512 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
15513 		break;
15514 	case TARGET_TYPE_KIWI:
15515 	case TARGET_TYPE_MANGO:
15516 		soc->ast_override_support = 1;
15517 		soc->per_tid_basize_max_tid = 8;
15518 
15519 		if (soc->cdp_soc.ol_ops->get_con_mode &&
15520 		    soc->cdp_soc.ol_ops->get_con_mode() ==
15521 		    QDF_GLOBAL_MONITOR_MODE) {
15522 			int int_ctx;
15523 
15524 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS;
15525 			     int_ctx++) {
15526 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
15527 				if (dp_is_monitor_mode_using_poll(soc))
15528 					soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
15529 			}
15530 		}
15531 
15532 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
15533 		soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev = 1;
15534 		break;
15535 	case TARGET_TYPE_QCA8074:
15536 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
15537 		soc->da_war_enabled = true;
15538 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
15539 		break;
15540 	case TARGET_TYPE_QCA8074V2:
15541 	case TARGET_TYPE_QCA6018:
15542 	case TARGET_TYPE_QCA9574:
15543 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
15544 		soc->ast_override_support = 1;
15545 		soc->per_tid_basize_max_tid = 8;
15546 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
15547 		soc->da_war_enabled = false;
15548 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
15549 		break;
15550 	case TARGET_TYPE_QCN9000:
15551 		soc->ast_override_support = 1;
15552 		soc->da_war_enabled = false;
15553 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
15554 		soc->per_tid_basize_max_tid = 8;
15555 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
15556 		soc->lmac_polled_mode = 0;
15557 		soc->wbm_release_desc_rx_sg_support = 1;
15558 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
15559 		break;
15560 	case TARGET_TYPE_QCA5018:
15561 	case TARGET_TYPE_QCN6122:
15562 		soc->ast_override_support = 1;
15563 		soc->da_war_enabled = false;
15564 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
15565 		soc->per_tid_basize_max_tid = 8;
15566 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS_11AX;
15567 		soc->disable_mac1_intr = 1;
15568 		soc->disable_mac2_intr = 1;
15569 		soc->wbm_release_desc_rx_sg_support = 1;
15570 		break;
15571 	case TARGET_TYPE_QCN9224:
15572 		soc->ast_override_support = 1;
15573 		soc->da_war_enabled = false;
15574 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
15575 		soc->per_tid_basize_max_tid = 8;
15576 		soc->wbm_release_desc_rx_sg_support = 1;
15577 		soc->rxdma2sw_rings_not_supported = 1;
15578 		soc->wbm_sg_last_msdu_war = 1;
15579 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
15580 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
15581 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
15582 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
15583 		break;
15584 	default:
15585 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
15586 		qdf_assert_always(0);
15587 		break;
15588 	}
15589 	dp_soc_cfg_dump(soc, target_type);
15590 }
15591 
15592 /**
15593  * dp_soc_cfg_attach() - set target specific configuration in
15594  *			 dp soc cfg.
15595  * @soc: dp soc handle
15596  */
15597 static void dp_soc_cfg_attach(struct dp_soc *soc)
15598 {
15599 	int target_type;
15600 	int nss_cfg = 0;
15601 
15602 	target_type = hal_get_target_type(soc->hal_soc);
15603 	switch (target_type) {
15604 	case TARGET_TYPE_QCA6290:
15605 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
15606 					       REO_DST_RING_SIZE_QCA6290);
15607 		break;
15608 	case TARGET_TYPE_QCA6390:
15609 	case TARGET_TYPE_QCA6490:
15610 	case TARGET_TYPE_QCA6750:
15611 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
15612 					       REO_DST_RING_SIZE_QCA6290);
15613 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
15614 		break;
15615 	case TARGET_TYPE_KIWI:
15616 	case TARGET_TYPE_MANGO:
15617 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
15618 		break;
15619 	case TARGET_TYPE_QCA8074:
15620 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
15621 		break;
15622 	case TARGET_TYPE_QCA8074V2:
15623 	case TARGET_TYPE_QCA6018:
15624 	case TARGET_TYPE_QCA9574:
15625 	case TARGET_TYPE_QCN6122:
15626 	case TARGET_TYPE_QCA5018:
15627 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
15628 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
15629 		break;
15630 	case TARGET_TYPE_QCN9000:
15631 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
15632 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
15633 		break;
15634 	case TARGET_TYPE_QCN9224:
15635 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
15636 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
15637 		break;
15638 	default:
15639 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
15640 		qdf_assert_always(0);
15641 		break;
15642 	}
15643 
15644 	if (soc->cdp_soc.ol_ops->get_soc_nss_cfg)
15645 		nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc);
15646 
15647 	wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg);
15648 
15649 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
15650 		wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0);
15651 		wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0);
15652 		wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0);
15653 		wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0);
15654 		soc->init_tcl_cmd_cred_ring = false;
15655 		soc->num_tcl_data_rings =
15656 			wlan_cfg_num_nss_tcl_data_rings(soc->wlan_cfg_ctx);
15657 		soc->num_reo_dest_rings =
15658 			wlan_cfg_num_nss_reo_dest_rings(soc->wlan_cfg_ctx);
15659 
15660 	} else {
15661 		soc->init_tcl_cmd_cred_ring = true;
15662 		soc->num_tx_comp_rings =
15663 			wlan_cfg_num_tx_comp_rings(soc->wlan_cfg_ctx);
15664 		soc->num_tcl_data_rings =
15665 			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
15666 		soc->num_reo_dest_rings =
15667 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
15668 	}
15669 
15670 	soc->arch_ops.soc_cfg_attach(soc);
15671 }
15672 
15673 static inline  void dp_pdev_set_default_reo(struct dp_pdev *pdev)
15674 {
15675 	struct dp_soc *soc = pdev->soc;
15676 
15677 	switch (pdev->pdev_id) {
15678 	case 0:
15679 		pdev->reo_dest =
15680 			wlan_cfg_radio0_default_reo_get(soc->wlan_cfg_ctx);
15681 		break;
15682 
15683 	case 1:
15684 		pdev->reo_dest =
15685 			wlan_cfg_radio1_default_reo_get(soc->wlan_cfg_ctx);
15686 		break;
15687 
15688 	case 2:
15689 		pdev->reo_dest =
15690 			wlan_cfg_radio2_default_reo_get(soc->wlan_cfg_ctx);
15691 		break;
15692 
15693 	default:
15694 		dp_init_err("%pK: Invalid pdev_id %d for reo selection",
15695 			    soc, pdev->pdev_id);
15696 		break;
15697 	}
15698 }
15699 
15700 static QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc,
15701 				      HTC_HANDLE htc_handle,
15702 				      qdf_device_t qdf_osdev,
15703 				      uint8_t pdev_id)
15704 {
15705 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
15706 	int nss_cfg;
15707 	void *sojourn_buf;
15708 	QDF_STATUS ret;
15709 
15710 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
15711 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
15712 
15713 	soc_cfg_ctx = soc->wlan_cfg_ctx;
15714 	pdev->soc = soc;
15715 	pdev->pdev_id = pdev_id;
15716 
15717 	/*
15718 	 * Variable to prevent double pdev deinitialization during
15719 	 * radio detach execution .i.e. in the absence of any vdev.
15720 	 */
15721 	pdev->pdev_deinit = 0;
15722 
15723 	if (dp_wdi_event_attach(pdev)) {
15724 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
15725 			  "dp_wdi_evet_attach failed");
15726 		goto fail0;
15727 	}
15728 
15729 	if (dp_pdev_srng_init(pdev)) {
15730 		dp_init_err("%pK: Failed to initialize pdev srng rings", soc);
15731 		goto fail1;
15732 	}
15733 
15734 	/* Initialize descriptors in TCL Rings used by IPA */
15735 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
15736 		hal_tx_init_data_ring(soc->hal_soc,
15737 				      soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng);
15738 		dp_ipa_hal_tx_init_alt_data_ring(soc);
15739 	}
15740 
15741 	/*
15742 	 * Initialize command/credit ring descriptor
15743 	 * Command/CREDIT ring also used for sending DATA cmds
15744 	 */
15745 	dp_tx_init_cmd_credit_ring(soc);
15746 
15747 	dp_tx_pdev_init(pdev);
15748 
15749 	/*
15750 	 * set nss pdev config based on soc config
15751 	 */
15752 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
15753 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
15754 					 (nss_cfg & (1 << pdev_id)));
15755 	pdev->target_pdev_id =
15756 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
15757 
15758 	if (soc->preferred_hw_mode == WMI_HOST_HW_MODE_2G_PHYB &&
15759 	    pdev->lmac_id == PHYB_2G_LMAC_ID) {
15760 		pdev->target_pdev_id = PHYB_2G_TARGET_PDEV_ID;
15761 	}
15762 
15763 	/* Reset the cpu ring map if radio is NSS offloaded */
15764 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
15765 		dp_soc_reset_cpu_ring_map(soc);
15766 		dp_soc_reset_intr_mask(soc);
15767 	}
15768 
15769 	TAILQ_INIT(&pdev->vdev_list);
15770 	qdf_spinlock_create(&pdev->vdev_list_lock);
15771 	pdev->vdev_count = 0;
15772 	pdev->is_lro_hash_configured = 0;
15773 
15774 	qdf_spinlock_create(&pdev->tx_mutex);
15775 	pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_INVALID_LMAC_ID;
15776 	pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_INVALID_LMAC_ID;
15777 	pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_INVALID_LMAC_ID;
15778 
15779 	DP_STATS_INIT(pdev);
15780 
15781 	dp_local_peer_id_pool_init(pdev);
15782 
15783 	dp_dscp_tid_map_setup(pdev);
15784 	dp_pcp_tid_map_setup(pdev);
15785 
15786 	/* set the reo destination during initialization */
15787 	dp_pdev_set_default_reo(pdev);
15788 
15789 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
15790 
15791 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
15792 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
15793 			      TRUE);
15794 
15795 	if (!pdev->sojourn_buf) {
15796 		dp_init_err("%pK: Failed to allocate sojourn buf", soc);
15797 		goto fail2;
15798 	}
15799 	sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
15800 	qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
15801 
15802 	qdf_event_create(&pdev->fw_peer_stats_event);
15803 
15804 	pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
15805 
15806 	if (dp_rxdma_ring_setup(soc, pdev)) {
15807 		dp_init_err("%pK: RXDMA ring config failed", soc);
15808 		goto fail3;
15809 	}
15810 
15811 	if (dp_init_ipa_rx_refill_buf_ring(soc, pdev))
15812 		goto fail3;
15813 
15814 	if (dp_ipa_ring_resource_setup(soc, pdev))
15815 		goto fail4;
15816 
15817 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
15818 		dp_init_err("%pK: dp_ipa_uc_attach failed", soc);
15819 		goto fail4;
15820 	}
15821 
15822 	ret = dp_rx_fst_attach(soc, pdev);
15823 	if ((ret != QDF_STATUS_SUCCESS) &&
15824 	    (ret != QDF_STATUS_E_NOSUPPORT)) {
15825 		dp_init_err("%pK: RX Flow Search Table attach failed: pdev %d err %d",
15826 			    soc, pdev_id, ret);
15827 		goto fail5;
15828 	}
15829 
15830 	if (dp_pdev_bkp_stats_attach(pdev) != QDF_STATUS_SUCCESS) {
15831 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
15832 			  FL("dp_pdev_bkp_stats_attach failed"));
15833 		goto fail6;
15834 	}
15835 
15836 	if (dp_monitor_pdev_init(pdev)) {
15837 		dp_init_err("%pK: dp_monitor_pdev_init failed\n", soc);
15838 		goto fail7;
15839 	}
15840 
15841 	/* initialize sw rx descriptors */
15842 	dp_rx_pdev_desc_pool_init(pdev);
15843 	/* allocate buffers and replenish the RxDMA ring */
15844 	dp_rx_pdev_buffers_alloc(pdev);
15845 
15846 	dp_init_tso_stats(pdev);
15847 
15848 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
15849 		qdf_dma_mem_stats_read(),
15850 		qdf_heap_mem_stats_read(),
15851 		qdf_skb_total_mem_stats_read());
15852 
15853 	return QDF_STATUS_SUCCESS;
15854 fail7:
15855 	dp_pdev_bkp_stats_detach(pdev);
15856 fail6:
15857 	dp_rx_fst_detach(soc, pdev);
15858 fail5:
15859 	dp_ipa_uc_detach(soc, pdev);
15860 fail4:
15861 	dp_deinit_ipa_rx_refill_buf_ring(soc, pdev);
15862 fail3:
15863 	dp_rxdma_ring_cleanup(soc, pdev);
15864 	qdf_nbuf_free(pdev->sojourn_buf);
15865 fail2:
15866 	qdf_spinlock_destroy(&pdev->tx_mutex);
15867 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
15868 	dp_pdev_srng_deinit(pdev);
15869 fail1:
15870 	dp_wdi_event_detach(pdev);
15871 fail0:
15872 	return QDF_STATUS_E_FAILURE;
15873 }
15874 
15875 /*
15876  * dp_pdev_init_wifi3() - Init txrx pdev
15877  * @htc_handle: HTC handle for host-target interface
15878  * @qdf_osdev: QDF OS device
15879  * @force: Force deinit
15880  *
15881  * Return: QDF_STATUS
15882  */
15883 static QDF_STATUS dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
15884 				     HTC_HANDLE htc_handle,
15885 				     qdf_device_t qdf_osdev,
15886 				     uint8_t pdev_id)
15887 {
15888 	return dp_pdev_init(txrx_soc, htc_handle, qdf_osdev, pdev_id);
15889 }
15890 
15891