xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision e7074b084e7d61e3773ab3306225765c7b7fdcd3)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <qdf_types.h>
21 #include <qdf_lock.h>
22 #include <qdf_net_types.h>
23 #include <qdf_lro.h>
24 #include <qdf_module.h>
25 #include <hal_hw_headers.h>
26 #include <hal_api.h>
27 #include <hif.h>
28 #include <htt.h>
29 #include <wdi_event.h>
30 #include <queue.h>
31 #include "dp_types.h"
32 #include "dp_internal.h"
33 #include "dp_tx.h"
34 #include "dp_tx_desc.h"
35 #include "dp_rx.h"
36 #ifdef DP_RATETABLE_SUPPORT
37 #include "dp_ratetable.h"
38 #endif
39 #include <cdp_txrx_handle.h>
40 #include <wlan_cfg.h>
41 #include <wlan_utility.h>
42 #include "cdp_txrx_cmn_struct.h"
43 #include "cdp_txrx_stats_struct.h"
44 #include "cdp_txrx_cmn_reg.h"
45 #include <qdf_util.h>
46 #include "dp_peer.h"
47 #include "htt_stats.h"
48 #include "dp_htt.h"
49 #ifdef WLAN_SUPPORT_RX_FISA
50 #include <dp_fisa_rx.h>
51 #endif
52 #include "htt_ppdu_stats.h"
53 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
54 #include "cfg_ucfg_api.h"
55 
56 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
57 #include "cdp_txrx_flow_ctrl_v2.h"
58 #else
59 
60 static inline void
61 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
62 {
63 	return;
64 }
65 #endif
66 #ifdef WIFI_MONITOR_SUPPORT
67 #include <dp_mon.h>
68 #endif
69 #include "dp_ipa.h"
70 #ifdef FEATURE_WDS
71 #include "dp_txrx_wds.h"
72 #endif
73 #ifdef WLAN_SUPPORT_MSCS
74 #include "dp_mscs.h"
75 #endif
76 #ifdef WLAN_SUPPORT_MESH_LATENCY
77 #include "dp_mesh_latency.h"
78 #endif
79 #ifdef ATH_SUPPORT_IQUE
80 #include "dp_txrx_me.h"
81 #endif
82 #if defined(DP_CON_MON)
83 #ifndef REMOVE_PKT_LOG
84 #include <pktlog_ac_api.h>
85 #include <pktlog_ac.h>
86 #endif
87 #endif
88 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
89 #include <dp_swlm.h>
90 #endif
91 
92 #ifdef WLAN_FEATURE_STATS_EXT
93 #define INIT_RX_HW_STATS_LOCK(_soc) \
94 	qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
95 #define DEINIT_RX_HW_STATS_LOCK(_soc) \
96 	qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
97 #else
98 #define INIT_RX_HW_STATS_LOCK(_soc)  /* no op */
99 #define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
100 #endif
101 
102 #if defined(DP_PEER_EXTENDED_API) || defined(WLAN_DP_PENDING_MEM_FLUSH)
103 #define SET_PEER_REF_CNT_ONE(_peer) \
104 	qdf_atomic_set(&(_peer)->ref_cnt, 1)
105 #else
106 #define SET_PEER_REF_CNT_ONE(_peer)
107 #endif
108 
109 #ifdef WLAN_SYSFS_DP_STATS
110 /* sysfs event wait time for firmware stat request unit millseconds */
111 #define WLAN_SYSFS_STAT_REQ_WAIT_MS 3000
112 #endif
113 
114 #ifdef QCA_DP_ENABLE_TX_COMP_RING4
115 #define TXCOMP_RING4_NUM 3
116 #else
117 #define TXCOMP_RING4_NUM WBM2SW_TXCOMP_RING4_NUM
118 #endif
119 
120 #ifdef WLAN_MCAST_MLO
121 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) \
122 		HTT_TX_TCL_METADATA_V2_PDEV_ID_SET(_var, _val)
123 #else
124 #define DP_TX_TCL_METADATA_PDEV_ID_SET(_var, _val) \
125 		HTT_TX_TCL_METADATA_PDEV_ID_SET(_var, _val)
126 #endif
127 
128 QDF_COMPILE_TIME_ASSERT(max_rx_rings_check,
129 			MAX_REO_DEST_RINGS == CDP_MAX_RX_RINGS);
130 
131 QDF_COMPILE_TIME_ASSERT(max_tx_rings_check,
132 			MAX_TCL_DATA_RINGS == CDP_MAX_TX_COMP_RINGS);
133 
134 #define dp_init_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_INIT, params)
135 #define dp_init_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_INIT, params)
136 #define dp_init_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_INIT, params)
137 #define dp_init_info(params...) \
138 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_INIT, ## params)
139 #define dp_init_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_INIT, params)
140 
141 #define dp_vdev_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_VDEV, params)
142 #define dp_vdev_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_VDEV, params)
143 #define dp_vdev_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_VDEV, params)
144 #define dp_vdev_info(params...) \
145 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_VDEV, ## params)
146 #define dp_vdev_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_VDEV, params)
147 
148 void dp_configure_arch_ops(struct dp_soc *soc);
149 qdf_size_t dp_get_soc_context_size(uint16_t device_id);
150 
151 /*
152  * The max size of cdp_peer_stats_param_t is limited to 16 bytes.
153  * If the buffer size is exceeding this size limit,
154  * dp_txrx_get_peer_stats is to be used instead.
155  */
156 QDF_COMPILE_TIME_ASSERT(cdp_peer_stats_param_t_max_size,
157 			(sizeof(cdp_peer_stats_param_t) <= 16));
158 
159 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
160 /*
161  * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS
162  * also should be updated accordingly
163  */
164 QDF_COMPILE_TIME_ASSERT(num_intr_grps,
165 			HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS);
166 
167 /*
168  * HIF_EVENT_HIST_MAX should always be power of 2
169  */
170 QDF_COMPILE_TIME_ASSERT(hif_event_history_size,
171 			(HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0);
172 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
173 
174 /*
175  * If WLAN_CFG_INT_NUM_CONTEXTS is changed,
176  * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated
177  */
178 QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs,
179 			WLAN_CFG_INT_NUM_CONTEXTS_MAX >=
180 			WLAN_CFG_INT_NUM_CONTEXTS);
181 
182 static QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl);
183 static QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl);
184 
185 static void dp_pdev_srng_deinit(struct dp_pdev *pdev);
186 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev);
187 static void dp_pdev_srng_free(struct dp_pdev *pdev);
188 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev);
189 
190 static void dp_soc_srng_deinit(struct dp_soc *soc);
191 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc);
192 static void dp_soc_srng_free(struct dp_soc *soc);
193 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc);
194 
195 static void dp_soc_cfg_init(struct dp_soc *soc);
196 static void dp_soc_cfg_attach(struct dp_soc *soc);
197 
198 static inline
199 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
200 				struct cdp_pdev_attach_params *params);
201 
202 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id);
203 
204 static QDF_STATUS
205 dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
206 		   HTC_HANDLE htc_handle,
207 		   qdf_device_t qdf_osdev,
208 		   uint8_t pdev_id);
209 
210 static QDF_STATUS
211 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, int force);
212 
213 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc);
214 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc);
215 
216 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
217 		  struct hif_opaque_softc *hif_handle);
218 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
219 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc,
220 				       uint8_t pdev_id,
221 				       int force);
222 static struct dp_soc *
223 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
224 	      struct cdp_soc_attach_params *params);
225 static inline QDF_STATUS dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl,
226 					      uint8_t vdev_id,
227 					      uint8_t *peer_mac_addr,
228 					      enum cdp_peer_type peer_type);
229 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
230 				       uint8_t vdev_id,
231 				       uint8_t *peer_mac, uint32_t bitmap);
232 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
233 				bool unmap_only);
234 #ifdef ENABLE_VERBOSE_DEBUG
235 bool is_dp_verbose_debug_enabled;
236 #endif
237 
238 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
239 static bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
240 static void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
241 			   bool enable);
242 static inline void
243 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
244 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats);
245 static inline void
246 dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
247 #endif
248 
249 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
250 						uint8_t index);
251 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index);
252 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index);
253 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
254 						 uint8_t index);
255 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
256 					    enum hal_ring_type ring_type,
257 					    int ring_num);
258 
259 #define DP_INTR_POLL_TIMER_MS	5
260 
261 #define MON_VDEV_TIMER_INIT 0x1
262 #define MON_VDEV_TIMER_RUNNING 0x2
263 
264 #define DP_MCS_LENGTH (6*MAX_MCS)
265 
266 #define DP_CURR_FW_STATS_AVAIL 19
267 #define DP_HTT_DBG_EXT_STATS_MAX 256
268 #define DP_MAX_SLEEP_TIME 100
269 #ifndef QCA_WIFI_3_0_EMU
270 #define SUSPEND_DRAIN_WAIT 500
271 #else
272 #define SUSPEND_DRAIN_WAIT 3000
273 #endif
274 
275 #ifdef IPA_OFFLOAD
276 /* Exclude IPA rings from the interrupt context */
277 #define TX_RING_MASK_VAL	0xb
278 #define RX_RING_MASK_VAL	0x7
279 #else
280 #define TX_RING_MASK_VAL	0xF
281 #define RX_RING_MASK_VAL	0xF
282 #endif
283 
284 #define STR_MAXLEN	64
285 
286 #define RNG_ERR		"SRNG setup failed for"
287 
288 /**
289  * default_dscp_tid_map - Default DSCP-TID mapping
290  *
291  * DSCP        TID
292  * 000000      0
293  * 001000      1
294  * 010000      2
295  * 011000      3
296  * 100000      4
297  * 101000      5
298  * 110000      6
299  * 111000      7
300  */
301 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
302 	0, 0, 0, 0, 0, 0, 0, 0,
303 	1, 1, 1, 1, 1, 1, 1, 1,
304 	2, 2, 2, 2, 2, 2, 2, 2,
305 	3, 3, 3, 3, 3, 3, 3, 3,
306 	4, 4, 4, 4, 4, 4, 4, 4,
307 	5, 5, 5, 5, 5, 5, 5, 5,
308 	6, 6, 6, 6, 6, 6, 6, 6,
309 	7, 7, 7, 7, 7, 7, 7, 7,
310 };
311 
312 /**
313  * default_pcp_tid_map - Default PCP-TID mapping
314  *
315  * PCP     TID
316  * 000      0
317  * 001      1
318  * 010      2
319  * 011      3
320  * 100      4
321  * 101      5
322  * 110      6
323  * 111      7
324  */
325 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
326 	0, 1, 2, 3, 4, 5, 6, 7,
327 };
328 
329 /**
330  * @brief Cpu to tx ring map
331  */
332 uint8_t
333 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
334 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
335 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
336 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
337 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
338 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
339 #ifdef WLAN_TX_PKT_CAPTURE_ENH
340 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
341 #endif
342 };
343 
344 qdf_export_symbol(dp_cpu_ring_map);
345 
346 /**
347  * @brief Select the type of statistics
348  */
349 enum dp_stats_type {
350 	STATS_FW = 0,
351 	STATS_HOST = 1,
352 	STATS_TYPE_MAX = 2,
353 };
354 
355 /**
356  * @brief General Firmware statistics options
357  *
358  */
359 enum dp_fw_stats {
360 	TXRX_FW_STATS_INVALID	= -1,
361 };
362 
363 /**
364  * dp_stats_mapping_table - Firmware and Host statistics
365  * currently supported
366  */
367 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
368 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
369 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
370 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
371 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
372 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
373 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
374 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
375 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
376 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
377 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
378 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
379 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
380 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
381 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
382 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
383 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
384 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
385 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
386 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
387 	/* Last ENUM for HTT FW STATS */
388 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
389 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
390 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
391 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
392 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
393 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
394 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
395 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
396 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
397 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
398 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
399 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
400 	{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
401 	{TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS},
402 	{TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS},
403 	{TXRX_FW_STATS_INVALID, TXRX_SOC_REO_HW_DESC_DUMP},
404 	{TXRX_FW_STATS_INVALID, TXRX_SOC_WBM_IDLE_HPTP_DUMP},
405 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, TXRX_HOST_STATS_INVALID}
406 };
407 
408 /* MCL specific functions */
409 #if defined(DP_CON_MON)
410 
411 #ifdef DP_CON_MON_MSI_ENABLED
412 /**
413  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
414  * @soc: pointer to dp_soc handle
415  * @intr_ctx_num: interrupt context number for which mon mask is needed
416  *
417  * For MCL, monitor mode rings are being processed in timer contexts (polled).
418  * This function is returning 0, since in interrupt mode(softirq based RX),
419  * we donot want to process monitor mode rings in a softirq.
420  *
421  * So, in case packet log is enabled for SAP/STA/P2P modes,
422  * regular interrupt processing will not process monitor mode rings. It would be
423  * done in a separate timer context.
424  *
425  * Return: 0
426  */
427 static inline uint32_t
428 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
429 {
430 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
431 }
432 #else
433 /**
434  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
435  * @soc: pointer to dp_soc handle
436  * @intr_ctx_num: interrupt context number for which mon mask is needed
437  *
438  * For MCL, monitor mode rings are being processed in timer contexts (polled).
439  * This function is returning 0, since in interrupt mode(softirq based RX),
440  * we donot want to process monitor mode rings in a softirq.
441  *
442  * So, in case packet log is enabled for SAP/STA/P2P modes,
443  * regular interrupt processing will not process monitor mode rings. It would be
444  * done in a separate timer context.
445  *
446  * Return: 0
447  */
448 static inline uint32_t
449 dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
450 {
451 	return 0;
452 }
453 #endif
454 
455 /**
456  * dp_get_num_rx_contexts() - get number of RX contexts
457  * @soc_hdl: cdp opaque soc handle
458  *
459  * Return: number of RX contexts
460  */
461 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
462 {
463 	int i;
464 	int num_rx_contexts = 0;
465 
466 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
467 
468 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
469 		if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i))
470 			num_rx_contexts++;
471 
472 	return num_rx_contexts;
473 }
474 
475 #else
476 
477 /**
478  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
479  * @soc: pointer to dp_soc handle
480  * @intr_ctx_num: interrupt context number for which mon mask is needed
481  *
482  * Return: mon mask value
483  */
484 static inline
485 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
486 {
487 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
488 }
489 
490 /**
491  * dp_soc_reset_mon_intr_mask() - reset mon intr mask
492  * @soc: pointer to dp_soc handle
493  *
494  * Return:
495  */
496 void dp_soc_reset_mon_intr_mask(struct dp_soc *soc)
497 {
498 	int i;
499 
500 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
501 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
502 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
503 	}
504 }
505 
506 qdf_export_symbol(dp_soc_reset_mon_intr_mask);
507 
508 /*
509  * dp_service_lmac_rings()- timer to reap lmac rings
510  * @arg: SoC Handle
511  *
512  * Return:
513  *
514  */
515 static void dp_service_lmac_rings(void *arg)
516 {
517 	struct dp_soc *soc = (struct dp_soc *)arg;
518 	int ring = 0, i;
519 	struct dp_pdev *pdev = NULL;
520 	union dp_rx_desc_list_elem_t *desc_list = NULL;
521 	union dp_rx_desc_list_elem_t *tail = NULL;
522 
523 	/* Process LMAC interrupts */
524 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
525 		int mac_for_pdev = ring;
526 		struct dp_srng *rx_refill_buf_ring;
527 
528 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
529 		if (!pdev)
530 			continue;
531 
532 		rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev];
533 
534 		dp_monitor_process(soc, NULL, mac_for_pdev,
535 				   QCA_NAPI_BUDGET);
536 
537 		for (i = 0;
538 		     i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
539 			dp_rxdma_err_process(&soc->intr_ctx[i], soc,
540 					     mac_for_pdev,
541 					     QCA_NAPI_BUDGET);
542 
543 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF,
544 						  mac_for_pdev))
545 			dp_rx_buffers_replenish(soc, mac_for_pdev,
546 						rx_refill_buf_ring,
547 						&soc->rx_desc_buf[mac_for_pdev],
548 						0, &desc_list, &tail);
549 	}
550 
551 	qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
552 }
553 
554 #endif
555 
556 #ifdef FEATURE_MEC
557 void dp_peer_mec_flush_entries(struct dp_soc *soc)
558 {
559 	unsigned int index;
560 	struct dp_mec_entry *mecentry, *mecentry_next;
561 
562 	TAILQ_HEAD(, dp_mec_entry) free_list;
563 	TAILQ_INIT(&free_list);
564 
565 	if (!soc->mec_hash.mask)
566 		return;
567 
568 	if (!soc->mec_hash.bins)
569 		return;
570 
571 	if (!qdf_atomic_read(&soc->mec_cnt))
572 		return;
573 
574 	qdf_spin_lock_bh(&soc->mec_lock);
575 	for (index = 0; index <= soc->mec_hash.mask; index++) {
576 		if (!TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
577 			TAILQ_FOREACH_SAFE(mecentry, &soc->mec_hash.bins[index],
578 					   hash_list_elem, mecentry_next) {
579 			    dp_peer_mec_detach_entry(soc, mecentry, &free_list);
580 			}
581 		}
582 	}
583 	qdf_spin_unlock_bh(&soc->mec_lock);
584 
585 	dp_peer_mec_free_list(soc, &free_list);
586 }
587 
588 /**
589  * dp_print_mec_entries() - Dump MEC entries in table
590  * @soc: Datapath soc handle
591  *
592  * Return: none
593  */
594 static void dp_print_mec_stats(struct dp_soc *soc)
595 {
596 	int i;
597 	uint32_t index;
598 	struct dp_mec_entry *mecentry = NULL, *mec_list;
599 	uint32_t num_entries = 0;
600 
601 	DP_PRINT_STATS("MEC Stats:");
602 	DP_PRINT_STATS("   Entries Added   = %d", soc->stats.mec.added);
603 	DP_PRINT_STATS("   Entries Deleted = %d", soc->stats.mec.deleted);
604 
605 	if (!qdf_atomic_read(&soc->mec_cnt))
606 		return;
607 
608 	mec_list = qdf_mem_malloc(sizeof(*mecentry) * DP_PEER_MAX_MEC_ENTRY);
609 	if (!mec_list) {
610 		dp_peer_warn("%pK: failed to allocate mec_list", soc);
611 		return;
612 	}
613 
614 	DP_PRINT_STATS("MEC Table:");
615 	for (index = 0; index <= soc->mec_hash.mask; index++) {
616 		qdf_spin_lock_bh(&soc->mec_lock);
617 		if (TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
618 			qdf_spin_unlock_bh(&soc->mec_lock);
619 			continue;
620 		}
621 
622 		TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index],
623 			      hash_list_elem) {
624 			qdf_mem_copy(&mec_list[num_entries], mecentry,
625 				     sizeof(*mecentry));
626 			num_entries++;
627 		}
628 		qdf_spin_unlock_bh(&soc->mec_lock);
629 	}
630 
631 	if (!num_entries) {
632 		qdf_mem_free(mec_list);
633 		return;
634 	}
635 
636 	for (i = 0; i < num_entries; i++) {
637 		DP_PRINT_STATS("%6d mac_addr = " QDF_MAC_ADDR_FMT
638 			       " is_active = %d pdev_id = %d vdev_id = %d",
639 			       i,
640 			       QDF_MAC_ADDR_REF(mec_list[i].mac_addr.raw),
641 			       mec_list[i].is_active,
642 			       mec_list[i].pdev_id,
643 			       mec_list[i].vdev_id);
644 	}
645 	qdf_mem_free(mec_list);
646 }
647 #else
648 static void dp_print_mec_stats(struct dp_soc *soc)
649 {
650 }
651 #endif
652 
653 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
654 				 uint8_t vdev_id,
655 				 uint8_t *peer_mac,
656 				 uint8_t *mac_addr,
657 				 enum cdp_txrx_ast_entry_type type,
658 				 uint32_t flags)
659 {
660 	int ret = -1;
661 	QDF_STATUS status = QDF_STATUS_SUCCESS;
662 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
663 						       peer_mac, 0, vdev_id,
664 						       DP_MOD_ID_CDP);
665 
666 	if (!peer) {
667 		dp_peer_debug("Peer is NULL!");
668 		return ret;
669 	}
670 
671 	status = dp_peer_add_ast((struct dp_soc *)soc_hdl,
672 				 peer,
673 				 mac_addr,
674 				 type,
675 				 flags);
676 	if ((status == QDF_STATUS_SUCCESS) ||
677 	    (status == QDF_STATUS_E_ALREADY) ||
678 	    (status == QDF_STATUS_E_AGAIN))
679 		ret = 0;
680 
681 	dp_hmwds_ast_add_notify(peer, mac_addr,
682 				type, status, false);
683 
684 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
685 
686 	return ret;
687 }
688 
689 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
690 						uint8_t vdev_id,
691 						uint8_t *peer_mac,
692 						uint8_t *wds_macaddr,
693 						uint32_t flags)
694 {
695 	int status = -1;
696 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
697 	struct dp_ast_entry  *ast_entry = NULL;
698 	struct dp_peer *peer;
699 
700 	if (soc->ast_offload_support)
701 		return status;
702 
703 	peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
704 				      peer_mac, 0, vdev_id,
705 				      DP_MOD_ID_CDP);
706 
707 	if (!peer) {
708 		dp_peer_debug("Peer is NULL!");
709 		return status;
710 	}
711 
712 	qdf_spin_lock_bh(&soc->ast_lock);
713 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
714 						    peer->vdev->pdev->pdev_id);
715 
716 	if (ast_entry) {
717 		status = dp_peer_update_ast(soc,
718 					    peer,
719 					    ast_entry, flags);
720 	}
721 	qdf_spin_unlock_bh(&soc->ast_lock);
722 
723 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
724 
725 	return status;
726 }
727 
728 /*
729  * dp_peer_reset_ast_entries() - Deletes all HMWDS entries for a peer
730  * @soc_handle:		Datapath SOC handle
731  * @peer:		DP peer
732  * @arg:		callback argument
733  *
734  * Return: None
735  */
736 static void
737 dp_peer_reset_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
738 {
739 	struct dp_ast_entry *ast_entry = NULL;
740 	struct dp_ast_entry *tmp_ast_entry;
741 
742 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
743 		if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
744 		    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
745 			dp_peer_del_ast(soc, ast_entry);
746 	}
747 }
748 
749 /*
750  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
751  * @soc_handle:		Datapath SOC handle
752  * @wds_macaddr:	WDS entry MAC Address
753  * @peer_macaddr:	WDS entry MAC Address
754  * @vdev_id:		id of vdev handle
755  * Return: QDF_STATUS
756  */
757 static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
758 					 uint8_t *wds_macaddr,
759 					 uint8_t *peer_mac_addr,
760 					 uint8_t vdev_id)
761 {
762 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
763 	struct dp_ast_entry *ast_entry = NULL;
764 	struct dp_peer *peer;
765 	struct dp_pdev *pdev;
766 	struct dp_vdev *vdev;
767 
768 	if (soc->ast_offload_support)
769 		return QDF_STATUS_E_FAILURE;
770 
771 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
772 
773 	if (!vdev)
774 		return QDF_STATUS_E_FAILURE;
775 
776 	pdev = vdev->pdev;
777 
778 	if (peer_mac_addr) {
779 		peer = dp_peer_find_hash_find(soc, peer_mac_addr,
780 					      0, vdev->vdev_id,
781 					      DP_MOD_ID_CDP);
782 		if (!peer) {
783 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
784 			return QDF_STATUS_E_FAILURE;
785 		}
786 
787 		qdf_spin_lock_bh(&soc->ast_lock);
788 		dp_peer_reset_ast_entries(soc, peer, NULL);
789 		qdf_spin_unlock_bh(&soc->ast_lock);
790 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
791 	} else if (wds_macaddr) {
792 		qdf_spin_lock_bh(&soc->ast_lock);
793 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
794 							    pdev->pdev_id);
795 
796 		if (ast_entry) {
797 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
798 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
799 				dp_peer_del_ast(soc, ast_entry);
800 		}
801 		qdf_spin_unlock_bh(&soc->ast_lock);
802 	}
803 
804 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
805 	return QDF_STATUS_SUCCESS;
806 }
807 
808 /*
809  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
810  * @soc:		Datapath SOC handle
811  * @vdev_id:		id of vdev object
812  *
813  * Return: QDF_STATUS
814  */
815 static QDF_STATUS
816 dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
817 			     uint8_t vdev_id)
818 {
819 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
820 
821 	if (soc->ast_offload_support)
822 		return QDF_STATUS_SUCCESS;
823 
824 	qdf_spin_lock_bh(&soc->ast_lock);
825 
826 	dp_soc_iterate_peer(soc, dp_peer_reset_ast_entries, NULL,
827 			    DP_MOD_ID_CDP);
828 	qdf_spin_unlock_bh(&soc->ast_lock);
829 
830 	return QDF_STATUS_SUCCESS;
831 }
832 
833 /*
834  * dp_peer_flush_ast_entries() - Delete all wds and hmwds ast entries of a peer
835  * @soc:		Datapath SOC
836  * @peer:		Datapath peer
837  * @arg:		arg to callback
838  *
839  * Return: None
840  */
841 static void
842 dp_peer_flush_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
843 {
844 	struct dp_ast_entry *ase = NULL;
845 	struct dp_ast_entry *temp_ase;
846 
847 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
848 		if ((ase->type ==
849 			CDP_TXRX_AST_TYPE_STATIC) ||
850 			(ase->type ==
851 			 CDP_TXRX_AST_TYPE_SELF) ||
852 			(ase->type ==
853 			 CDP_TXRX_AST_TYPE_STA_BSS))
854 			continue;
855 		dp_peer_del_ast(soc, ase);
856 	}
857 }
858 
859 /*
860  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
861  * @soc:		Datapath SOC handle
862  *
863  * Return: None
864  */
865 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
866 {
867 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
868 
869 	qdf_spin_lock_bh(&soc->ast_lock);
870 
871 	dp_soc_iterate_peer(soc, dp_peer_flush_ast_entries, NULL,
872 			    DP_MOD_ID_CDP);
873 
874 	qdf_spin_unlock_bh(&soc->ast_lock);
875 	dp_peer_mec_flush_entries(soc);
876 }
877 
878 /**
879  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
880  *                                       and return ast entry information
881  *                                       of first ast entry found in the
882  *                                       table with given mac address
883  *
884  * @soc : data path soc handle
885  * @ast_mac_addr : AST entry mac address
886  * @ast_entry_info : ast entry information
887  *
888  * return : true if ast entry found with ast_mac_addr
889  *          false if ast entry not found
890  */
891 static bool dp_peer_get_ast_info_by_soc_wifi3
892 	(struct cdp_soc_t *soc_hdl,
893 	 uint8_t *ast_mac_addr,
894 	 struct cdp_ast_entry_info *ast_entry_info)
895 {
896 	struct dp_ast_entry *ast_entry = NULL;
897 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
898 	struct dp_peer *peer = NULL;
899 
900 	if (soc->ast_offload_support)
901 		return false;
902 
903 	qdf_spin_lock_bh(&soc->ast_lock);
904 
905 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
906 	if ((!ast_entry) ||
907 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
908 		qdf_spin_unlock_bh(&soc->ast_lock);
909 		return false;
910 	}
911 
912 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
913 				     DP_MOD_ID_AST);
914 	if (!peer) {
915 		qdf_spin_unlock_bh(&soc->ast_lock);
916 		return false;
917 	}
918 
919 	ast_entry_info->type = ast_entry->type;
920 	ast_entry_info->pdev_id = ast_entry->pdev_id;
921 	ast_entry_info->vdev_id = ast_entry->vdev_id;
922 	ast_entry_info->peer_id = ast_entry->peer_id;
923 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
924 		     &peer->mac_addr.raw[0],
925 		     QDF_MAC_ADDR_SIZE);
926 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
927 	qdf_spin_unlock_bh(&soc->ast_lock);
928 	return true;
929 }
930 
931 /**
932  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
933  *                                          and return ast entry information
934  *                                          if mac address and pdev_id matches
935  *
936  * @soc : data path soc handle
937  * @ast_mac_addr : AST entry mac address
938  * @pdev_id : pdev_id
939  * @ast_entry_info : ast entry information
940  *
941  * return : true if ast entry found with ast_mac_addr
942  *          false if ast entry not found
943  */
944 static bool dp_peer_get_ast_info_by_pdevid_wifi3
945 		(struct cdp_soc_t *soc_hdl,
946 		 uint8_t *ast_mac_addr,
947 		 uint8_t pdev_id,
948 		 struct cdp_ast_entry_info *ast_entry_info)
949 {
950 	struct dp_ast_entry *ast_entry;
951 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
952 	struct dp_peer *peer = NULL;
953 
954 	if (soc->ast_offload_support)
955 		return false;
956 
957 	qdf_spin_lock_bh(&soc->ast_lock);
958 
959 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr,
960 						    pdev_id);
961 
962 	if ((!ast_entry) ||
963 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
964 		qdf_spin_unlock_bh(&soc->ast_lock);
965 		return false;
966 	}
967 
968 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
969 				     DP_MOD_ID_AST);
970 	if (!peer) {
971 		qdf_spin_unlock_bh(&soc->ast_lock);
972 		return false;
973 	}
974 
975 	ast_entry_info->type = ast_entry->type;
976 	ast_entry_info->pdev_id = ast_entry->pdev_id;
977 	ast_entry_info->vdev_id = ast_entry->vdev_id;
978 	ast_entry_info->peer_id = ast_entry->peer_id;
979 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
980 		     &peer->mac_addr.raw[0],
981 		     QDF_MAC_ADDR_SIZE);
982 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
983 	qdf_spin_unlock_bh(&soc->ast_lock);
984 	return true;
985 }
986 
987 /**
988  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
989  *                            with given mac address
990  *
991  * @soc : data path soc handle
992  * @ast_mac_addr : AST entry mac address
993  * @callback : callback function to called on ast delete response from FW
994  * @cookie : argument to be passed to callback
995  *
996  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
997  *          is sent
998  *          QDF_STATUS_E_INVAL false if ast entry not found
999  */
1000 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
1001 					       uint8_t *mac_addr,
1002 					       txrx_ast_free_cb callback,
1003 					       void *cookie)
1004 
1005 {
1006 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1007 	struct dp_ast_entry *ast_entry = NULL;
1008 	txrx_ast_free_cb cb = NULL;
1009 	void *arg = NULL;
1010 
1011 	if (soc->ast_offload_support)
1012 		return -QDF_STATUS_E_INVAL;
1013 
1014 	qdf_spin_lock_bh(&soc->ast_lock);
1015 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1016 	if (!ast_entry) {
1017 		qdf_spin_unlock_bh(&soc->ast_lock);
1018 		return -QDF_STATUS_E_INVAL;
1019 	}
1020 
1021 	if (ast_entry->callback) {
1022 		cb = ast_entry->callback;
1023 		arg = ast_entry->cookie;
1024 	}
1025 
1026 	ast_entry->callback = callback;
1027 	ast_entry->cookie = cookie;
1028 
1029 	/*
1030 	 * if delete_in_progress is set AST delete is sent to target
1031 	 * and host is waiting for response should not send delete
1032 	 * again
1033 	 */
1034 	if (!ast_entry->delete_in_progress)
1035 		dp_peer_del_ast(soc, ast_entry);
1036 
1037 	qdf_spin_unlock_bh(&soc->ast_lock);
1038 	if (cb) {
1039 		cb(soc->ctrl_psoc,
1040 		   dp_soc_to_cdp_soc(soc),
1041 		   arg,
1042 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1043 	}
1044 	return QDF_STATUS_SUCCESS;
1045 }
1046 
1047 /**
1048  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
1049  *                                   table if mac address and pdev_id matches
1050  *
1051  * @soc : data path soc handle
1052  * @ast_mac_addr : AST entry mac address
1053  * @pdev_id : pdev id
1054  * @callback : callback function to called on ast delete response from FW
1055  * @cookie : argument to be passed to callback
1056  *
1057  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
1058  *          is sent
1059  *          QDF_STATUS_E_INVAL false if ast entry not found
1060  */
1061 
1062 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
1063 						uint8_t *mac_addr,
1064 						uint8_t pdev_id,
1065 						txrx_ast_free_cb callback,
1066 						void *cookie)
1067 
1068 {
1069 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1070 	struct dp_ast_entry *ast_entry;
1071 	txrx_ast_free_cb cb = NULL;
1072 	void *arg = NULL;
1073 
1074 	if (soc->ast_offload_support)
1075 		return -QDF_STATUS_E_INVAL;
1076 
1077 	qdf_spin_lock_bh(&soc->ast_lock);
1078 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
1079 
1080 	if (!ast_entry) {
1081 		qdf_spin_unlock_bh(&soc->ast_lock);
1082 		return -QDF_STATUS_E_INVAL;
1083 	}
1084 
1085 	if (ast_entry->callback) {
1086 		cb = ast_entry->callback;
1087 		arg = ast_entry->cookie;
1088 	}
1089 
1090 	ast_entry->callback = callback;
1091 	ast_entry->cookie = cookie;
1092 
1093 	/*
1094 	 * if delete_in_progress is set AST delete is sent to target
1095 	 * and host is waiting for response should not sent delete
1096 	 * again
1097 	 */
1098 	if (!ast_entry->delete_in_progress)
1099 		dp_peer_del_ast(soc, ast_entry);
1100 
1101 	qdf_spin_unlock_bh(&soc->ast_lock);
1102 
1103 	if (cb) {
1104 		cb(soc->ctrl_psoc,
1105 		   dp_soc_to_cdp_soc(soc),
1106 		   arg,
1107 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1108 	}
1109 	return QDF_STATUS_SUCCESS;
1110 }
1111 
1112 /**
1113  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
1114  * @ring_num: ring num of the ring being queried
1115  * @grp_mask: the grp_mask array for the ring type in question.
1116  *
1117  * The grp_mask array is indexed by group number and the bit fields correspond
1118  * to ring numbers.  We are finding which interrupt group a ring belongs to.
1119  *
1120  * Return: the index in the grp_mask array with the ring number.
1121  * -QDF_STATUS_E_NOENT if no entry is found
1122  */
1123 static int dp_srng_find_ring_in_mask(int ring_num, uint8_t *grp_mask)
1124 {
1125 	int ext_group_num;
1126 	uint8_t mask = 1 << ring_num;
1127 
1128 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
1129 	     ext_group_num++) {
1130 		if (mask & grp_mask[ext_group_num])
1131 			return ext_group_num;
1132 	}
1133 
1134 	return -QDF_STATUS_E_NOENT;
1135 }
1136 
1137 /**
1138  * dp_is_msi_group_number_invalid() - check msi_group_number valid or not
1139  * @msi_group_number: MSI group number.
1140  * @msi_data_count: MSI data count.
1141  *
1142  * Return: true if msi_group_number is invalid.
1143  */
1144 #ifdef WLAN_ONE_MSI_VECTOR
1145 static bool dp_is_msi_group_number_invalid(int msi_group_number,
1146 					   int msi_data_count)
1147 {
1148 	return false;
1149 }
1150 #else
1151 static bool dp_is_msi_group_number_invalid(int msi_group_number,
1152 					   int msi_data_count)
1153 {
1154 	return msi_group_number > msi_data_count;
1155 }
1156 #endif
1157 
1158 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
1159 /**
1160  * dp_is_reo_ring_num_in_nf_grp1() - Check if the current reo ring is part of
1161  *				rx_near_full_grp1 mask
1162  * @soc: Datapath SoC Handle
1163  * @ring_num: REO ring number
1164  *
1165  * Return: 1 if the ring_num belongs to reo_nf_grp1,
1166  *	   0, otherwise.
1167  */
1168 static inline int
1169 dp_is_reo_ring_num_in_nf_grp1(struct dp_soc *soc, int ring_num)
1170 {
1171 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_1 & (1 << ring_num));
1172 }
1173 
1174 /**
1175  * dp_is_reo_ring_num_in_nf_grp2() - Check if the current reo ring is part of
1176  *				rx_near_full_grp2 mask
1177  * @soc: Datapath SoC Handle
1178  * @ring_num: REO ring number
1179  *
1180  * Return: 1 if the ring_num belongs to reo_nf_grp2,
1181  *	   0, otherwise.
1182  */
1183 static inline int
1184 dp_is_reo_ring_num_in_nf_grp2(struct dp_soc *soc, int ring_num)
1185 {
1186 	return (WLAN_CFG_RX_NEAR_FULL_IRQ_MASK_2 & (1 << ring_num));
1187 }
1188 
1189 /**
1190  * dp_srng_get_near_full_irq_mask() - Get near-full irq mask for a particular
1191  *				ring type and number
1192  * @soc: Datapath SoC handle
1193  * @ring_type: SRNG type
1194  * @ring_num: ring num
1195  *
1196  * Return: near ful irq mask pointer
1197  */
1198 static inline
1199 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
1200 					enum hal_ring_type ring_type,
1201 					int ring_num)
1202 {
1203 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
1204 	uint8_t wbm2_sw_rx_rel_ring_id;
1205 	uint8_t *nf_irq_mask = NULL;
1206 
1207 	switch (ring_type) {
1208 	case WBM2SW_RELEASE:
1209 		wbm2_sw_rx_rel_ring_id =
1210 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
1211 		if (ring_num != wbm2_sw_rx_rel_ring_id) {
1212 			nf_irq_mask = &soc->wlan_cfg_ctx->
1213 					int_tx_ring_near_full_irq_mask[0];
1214 		}
1215 		break;
1216 	case REO_DST:
1217 		if (dp_is_reo_ring_num_in_nf_grp1(soc, ring_num))
1218 			nf_irq_mask =
1219 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_1_mask[0];
1220 		else if (dp_is_reo_ring_num_in_nf_grp2(soc, ring_num))
1221 			nf_irq_mask =
1222 			&soc->wlan_cfg_ctx->int_rx_ring_near_full_irq_2_mask[0];
1223 		else
1224 			qdf_assert(0);
1225 		break;
1226 	default:
1227 		break;
1228 	}
1229 
1230 	return nf_irq_mask;
1231 }
1232 
1233 /**
1234  * dp_srng_set_msi2_ring_params() - Set the msi2 addr/data in the ring params
1235  * @soc: Datapath SoC handle
1236  * @ring_params: srng params handle
1237  * @msi2_addr: MSI2 addr to be set for the SRNG
1238  * @msi2_data: MSI2 data to be set for the SRNG
1239  *
1240  * Return: None
1241  */
1242 static inline
1243 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
1244 				  struct hal_srng_params *ring_params,
1245 				  qdf_dma_addr_t msi2_addr,
1246 				  uint32_t msi2_data)
1247 {
1248 	ring_params->msi2_addr = msi2_addr;
1249 	ring_params->msi2_data = msi2_data;
1250 }
1251 
1252 /**
1253  * dp_srng_msi2_setup() - Setup MSI2 details for near full IRQ of an SRNG
1254  * @soc: Datapath SoC handle
1255  * @ring_params: ring_params for SRNG
1256  * @ring_type: SENG type
1257  * @ring_num: ring number for the SRNG
1258  * @nf_msi_grp_num: near full msi group number
1259  *
1260  * Return: None
1261  */
1262 static inline void
1263 dp_srng_msi2_setup(struct dp_soc *soc,
1264 		   struct hal_srng_params *ring_params,
1265 		   int ring_type, int ring_num, int nf_msi_grp_num)
1266 {
1267 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1268 	int msi_data_count, ret;
1269 
1270 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1271 					  &msi_data_count, &msi_data_start,
1272 					  &msi_irq_start);
1273 	if (ret)
1274 		return;
1275 
1276 	if (nf_msi_grp_num < 0) {
1277 		dp_init_info("%pK: ring near full IRQ not part of an ext_group; ring_type: %d,ring_num %d",
1278 			     soc, ring_type, ring_num);
1279 		ring_params->msi2_addr = 0;
1280 		ring_params->msi2_data = 0;
1281 		return;
1282 	}
1283 
1284 	if (dp_is_msi_group_number_invalid(nf_msi_grp_num, msi_data_count)) {
1285 		dp_init_warn("%pK: 2 msi_groups will share an msi for near full IRQ; msi_group_num %d",
1286 			     soc, nf_msi_grp_num);
1287 		QDF_ASSERT(0);
1288 	}
1289 
1290 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1291 
1292 	ring_params->nf_irq_support = 1;
1293 	ring_params->msi2_addr = addr_low;
1294 	ring_params->msi2_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1295 	ring_params->msi2_data = (nf_msi_grp_num % msi_data_count)
1296 		+ msi_data_start;
1297 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1298 }
1299 
1300 /* Percentage of ring entries considered as nearly full */
1301 #define DP_NF_HIGH_THRESH_PERCENTAGE	75
1302 /* Percentage of ring entries considered as critically full */
1303 #define DP_NF_CRIT_THRESH_PERCENTAGE	90
1304 /* Percentage of ring entries considered as safe threshold */
1305 #define DP_NF_SAFE_THRESH_PERCENTAGE	50
1306 
1307 /**
1308  * dp_srng_configure_nf_interrupt_thresholds() - Configure the thresholds for
1309  *			near full irq
1310  * @soc: Datapath SoC handle
1311  * @ring_params: ring params for SRNG
1312  * @ring_type: ring type
1313  */
1314 static inline void
1315 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
1316 					  struct hal_srng_params *ring_params,
1317 					  int ring_type)
1318 {
1319 	if (ring_params->nf_irq_support) {
1320 		ring_params->high_thresh = (ring_params->num_entries *
1321 					    DP_NF_HIGH_THRESH_PERCENTAGE) / 100;
1322 		ring_params->crit_thresh = (ring_params->num_entries *
1323 					    DP_NF_CRIT_THRESH_PERCENTAGE) / 100;
1324 		ring_params->safe_thresh = (ring_params->num_entries *
1325 					    DP_NF_SAFE_THRESH_PERCENTAGE) /100;
1326 	}
1327 }
1328 
1329 /**
1330  * dp_srng_set_nf_thresholds() - Set the near full thresholds to srng data
1331  *			structure from the ring params
1332  * @soc: Datapath SoC handle
1333  * @srng: SRNG handle
1334  * @ring_params: ring params for a SRNG
1335  *
1336  * Return: None
1337  */
1338 static inline void
1339 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
1340 			  struct hal_srng_params *ring_params)
1341 {
1342 	srng->crit_thresh = ring_params->crit_thresh;
1343 	srng->safe_thresh = ring_params->safe_thresh;
1344 }
1345 
1346 #else
1347 static inline
1348 uint8_t *dp_srng_get_near_full_irq_mask(struct dp_soc *soc,
1349 					enum hal_ring_type ring_type,
1350 					int ring_num)
1351 {
1352 	return NULL;
1353 }
1354 
1355 static inline
1356 void dp_srng_set_msi2_ring_params(struct dp_soc *soc,
1357 				  struct hal_srng_params *ring_params,
1358 				  qdf_dma_addr_t msi2_addr,
1359 				  uint32_t msi2_data)
1360 {
1361 }
1362 
1363 static inline void
1364 dp_srng_msi2_setup(struct dp_soc *soc,
1365 		   struct hal_srng_params *ring_params,
1366 		   int ring_type, int ring_num, int nf_msi_grp_num)
1367 {
1368 }
1369 
1370 static inline void
1371 dp_srng_configure_nf_interrupt_thresholds(struct dp_soc *soc,
1372 					  struct hal_srng_params *ring_params,
1373 					  int ring_type)
1374 {
1375 }
1376 
1377 static inline void
1378 dp_srng_set_nf_thresholds(struct dp_soc *soc, struct dp_srng *srng,
1379 			  struct hal_srng_params *ring_params)
1380 {
1381 }
1382 #endif
1383 
1384 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
1385 				       enum hal_ring_type ring_type,
1386 				       int ring_num,
1387 				       int *reg_msi_grp_num,
1388 				       bool nf_irq_support,
1389 				       int *nf_msi_grp_num)
1390 {
1391 	struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
1392 	uint8_t *grp_mask, *nf_irq_mask = NULL;
1393 	bool nf_irq_enabled = false;
1394 	uint8_t wbm2_sw_rx_rel_ring_id;
1395 
1396 	switch (ring_type) {
1397 	case WBM2SW_RELEASE:
1398 		wbm2_sw_rx_rel_ring_id =
1399 			wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
1400 		if (ring_num == wbm2_sw_rx_rel_ring_id) {
1401 			/* dp_rx_wbm_err_process - soc->rx_rel_ring */
1402 			grp_mask = &cfg_ctx->int_rx_wbm_rel_ring_mask[0];
1403 			ring_num = 0;
1404 		} else { /* dp_tx_comp_handler - soc->tx_comp_ring */
1405 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1406 			nf_irq_mask = dp_srng_get_near_full_irq_mask(soc,
1407 								     ring_type,
1408 								     ring_num);
1409 			if (nf_irq_mask)
1410 				nf_irq_enabled = true;
1411 
1412 			/*
1413 			 * Using ring 4 as 4th tx completion ring since ring 3
1414 			 * is Rx error ring
1415 			 */
1416 			if (ring_num == WBM2SW_TXCOMP_RING4_NUM)
1417 				ring_num = TXCOMP_RING4_NUM;
1418 		}
1419 	break;
1420 
1421 	case REO_EXCEPTION:
1422 		/* dp_rx_err_process - &soc->reo_exception_ring */
1423 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
1424 	break;
1425 
1426 	case REO_DST:
1427 		/* dp_rx_process - soc->reo_dest_ring */
1428 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1429 		nf_irq_mask = dp_srng_get_near_full_irq_mask(soc, ring_type,
1430 							     ring_num);
1431 		if (nf_irq_mask)
1432 			nf_irq_enabled = true;
1433 	break;
1434 
1435 	case REO_STATUS:
1436 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
1437 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
1438 	break;
1439 
1440 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
1441 	case RXDMA_MONITOR_STATUS:
1442 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
1443 	case RXDMA_MONITOR_DST:
1444 		/* dp_mon_process */
1445 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
1446 	break;
1447 	case TX_MONITOR_DST:
1448 		/* dp_tx_mon_process */
1449 		grp_mask = &soc->wlan_cfg_ctx->int_tx_mon_ring_mask[0];
1450 	break;
1451 	case RXDMA_DST:
1452 		/* dp_rxdma_err_process */
1453 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
1454 	break;
1455 
1456 	case RXDMA_BUF:
1457 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1458 	break;
1459 
1460 	case RXDMA_MONITOR_BUF:
1461 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
1462 	break;
1463 
1464 	case TX_MONITOR_BUF:
1465 		grp_mask = &soc->wlan_cfg_ctx->int_host2txmon_ring_mask[0];
1466 	break;
1467 
1468 	case TCL_DATA:
1469 	/* CMD_CREDIT_RING is used as command in 8074 and credit in 9000 */
1470 	case TCL_CMD_CREDIT:
1471 	case REO_CMD:
1472 	case SW2WBM_RELEASE:
1473 	case WBM_IDLE_LINK:
1474 		/* normally empty SW_TO_HW rings */
1475 		return -QDF_STATUS_E_NOENT;
1476 	break;
1477 
1478 	case TCL_STATUS:
1479 	case REO_REINJECT:
1480 		/* misc unused rings */
1481 		return -QDF_STATUS_E_NOENT;
1482 	break;
1483 
1484 	case CE_SRC:
1485 	case CE_DST:
1486 	case CE_DST_STATUS:
1487 		/* CE_rings - currently handled by hif */
1488 	default:
1489 		return -QDF_STATUS_E_NOENT;
1490 	break;
1491 	}
1492 
1493 	*reg_msi_grp_num = dp_srng_find_ring_in_mask(ring_num, grp_mask);
1494 
1495 	if (nf_irq_support && nf_irq_enabled) {
1496 		*nf_msi_grp_num = dp_srng_find_ring_in_mask(ring_num,
1497 							    nf_irq_mask);
1498 	}
1499 
1500 	return QDF_STATUS_SUCCESS;
1501 }
1502 
1503 /*
1504  * dp_get_num_msi_available()- API to get number of MSIs available
1505  * @dp_soc: DP soc Handle
1506  * @interrupt_mode: Mode of interrupts
1507  *
1508  * Return: Number of MSIs available or 0 in case of integrated
1509  */
1510 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
1511 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1512 {
1513 	return 0;
1514 }
1515 #else
1516 /*
1517  * dp_get_num_msi_available()- API to get number of MSIs available
1518  * @dp_soc: DP soc Handle
1519  * @interrupt_mode: Mode of interrupts
1520  *
1521  * Return: Number of MSIs available or 0 in case of integrated
1522  */
1523 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1524 {
1525 	int msi_data_count;
1526 	int msi_data_start;
1527 	int msi_irq_start;
1528 	int ret;
1529 
1530 	if (interrupt_mode == DP_INTR_INTEGRATED) {
1531 		return 0;
1532 	} else if (interrupt_mode == DP_INTR_MSI || interrupt_mode ==
1533 		   DP_INTR_POLL) {
1534 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1535 						  &msi_data_count,
1536 						  &msi_data_start,
1537 						  &msi_irq_start);
1538 		if (ret) {
1539 			qdf_err("Unable to get DP MSI assignment %d",
1540 				interrupt_mode);
1541 			return -EINVAL;
1542 		}
1543 		return msi_data_count;
1544 	}
1545 	qdf_err("Interrupt mode invalid %d", interrupt_mode);
1546 	return -EINVAL;
1547 }
1548 #endif
1549 
1550 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
1551 			      *ring_params, int ring_type, int ring_num)
1552 {
1553 	int reg_msi_grp_num;
1554 	/*
1555 	 * nf_msi_grp_num needs to be initialized with negative value,
1556 	 * to avoid configuring near-full msi for WBM2SW3 ring
1557 	 */
1558 	int nf_msi_grp_num = -1;
1559 	int msi_data_count;
1560 	int ret;
1561 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1562 	bool nf_irq_support;
1563 
1564 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1565 					    &msi_data_count, &msi_data_start,
1566 					    &msi_irq_start);
1567 
1568 	if (ret)
1569 		return;
1570 
1571 	nf_irq_support = hal_srng_is_near_full_irq_supported(soc->hal_soc,
1572 							     ring_type,
1573 							     ring_num);
1574 	ret = dp_srng_calculate_msi_group(soc, ring_type, ring_num,
1575 					  &reg_msi_grp_num,
1576 					  nf_irq_support,
1577 					  &nf_msi_grp_num);
1578 	if (ret < 0) {
1579 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1580 			     soc, ring_type, ring_num);
1581 		ring_params->msi_addr = 0;
1582 		ring_params->msi_data = 0;
1583 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
1584 		return;
1585 	}
1586 
1587 	if (reg_msi_grp_num < 0) {
1588 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1589 			     soc, ring_type, ring_num);
1590 		ring_params->msi_addr = 0;
1591 		ring_params->msi_data = 0;
1592 		goto configure_msi2;
1593 	}
1594 
1595 	if (dp_is_msi_group_number_invalid(reg_msi_grp_num, msi_data_count)) {
1596 		dp_init_warn("%pK: 2 msi_groups will share an msi; msi_group_num %d",
1597 			     soc, reg_msi_grp_num);
1598 		QDF_ASSERT(0);
1599 	}
1600 
1601 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1602 
1603 	ring_params->msi_addr = addr_low;
1604 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1605 	ring_params->msi_data = (reg_msi_grp_num % msi_data_count)
1606 		+ msi_data_start;
1607 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1608 
1609 	dp_debug("ring type %u ring_num %u msi->data %u msi_addr %llx",
1610 		 ring_type, ring_num, ring_params->msi_data,
1611 		 (uint64_t)ring_params->msi_addr);
1612 
1613 configure_msi2:
1614 	if (!nf_irq_support) {
1615 		dp_srng_set_msi2_ring_params(soc, ring_params, 0, 0);
1616 		return;
1617 	}
1618 
1619 	dp_srng_msi2_setup(soc, ring_params, ring_type, ring_num,
1620 			   nf_msi_grp_num);
1621 }
1622 
1623 #ifdef FEATURE_AST
1624 /**
1625  * dp_print_peer_ast_entries() - Dump AST entries of peer
1626  * @soc: Datapath soc handle
1627  * @peer: Datapath peer
1628  * @arg: argument to iterate function
1629  *
1630  * return void
1631  */
1632 static void
1633 dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1634 {
1635 	struct dp_ast_entry *ase, *tmp_ase;
1636 	uint32_t num_entries = 0;
1637 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1638 			"NONE", "STATIC", "SELF", "WDS", "HMWDS", "BSS",
1639 			"DA", "HMWDS_SEC"};
1640 
1641 	DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1642 	    DP_PRINT_STATS("%6d mac_addr = "QDF_MAC_ADDR_FMT
1643 		    " peer_mac_addr = "QDF_MAC_ADDR_FMT
1644 		    " peer_id = %u"
1645 		    " type = %s"
1646 		    " next_hop = %d"
1647 		    " is_active = %d"
1648 		    " ast_idx = %d"
1649 		    " ast_hash = %d"
1650 		    " delete_in_progress = %d"
1651 		    " pdev_id = %d"
1652 		    " vdev_id = %d",
1653 		    ++num_entries,
1654 		    QDF_MAC_ADDR_REF(ase->mac_addr.raw),
1655 		    QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1656 		    ase->peer_id,
1657 		    type[ase->type],
1658 		    ase->next_hop,
1659 		    ase->is_active,
1660 		    ase->ast_idx,
1661 		    ase->ast_hash_value,
1662 		    ase->delete_in_progress,
1663 		    ase->pdev_id,
1664 		    ase->vdev_id);
1665 	}
1666 }
1667 
1668 /**
1669  * dp_print_ast_stats() - Dump AST table contents
1670  * @soc: Datapath soc handle
1671  *
1672  * return void
1673  */
1674 void dp_print_ast_stats(struct dp_soc *soc)
1675 {
1676 	DP_PRINT_STATS("AST Stats:");
1677 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1678 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1679 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1680 	DP_PRINT_STATS("	Entries MAP ERR  = %d", soc->stats.ast.map_err);
1681 	DP_PRINT_STATS("	Entries Mismatch ERR  = %d",
1682 		       soc->stats.ast.ast_mismatch);
1683 
1684 	DP_PRINT_STATS("AST Table:");
1685 
1686 	qdf_spin_lock_bh(&soc->ast_lock);
1687 
1688 	dp_soc_iterate_peer(soc, dp_print_peer_ast_entries, NULL,
1689 			    DP_MOD_ID_GENERIC_STATS);
1690 
1691 	qdf_spin_unlock_bh(&soc->ast_lock);
1692 }
1693 #else
1694 void dp_print_ast_stats(struct dp_soc *soc)
1695 {
1696 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1697 	return;
1698 }
1699 #endif
1700 
1701 /**
1702  * dp_print_peer_info() - Dump peer info
1703  * @soc: Datapath soc handle
1704  * @peer: Datapath peer handle
1705  * @arg: argument to iter function
1706  *
1707  * return void
1708  */
1709 static void
1710 dp_print_peer_info(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1711 {
1712 	struct dp_txrx_peer *txrx_peer = NULL;
1713 
1714 	txrx_peer = dp_get_txrx_peer(peer);
1715 	if (!txrx_peer)
1716 		return;
1717 
1718 	DP_PRINT_STATS(" peer id = %d"
1719 		       " peer_mac_addr = "QDF_MAC_ADDR_FMT
1720 		       " nawds_enabled = %d"
1721 		       " bss_peer = %d"
1722 		       " wds_enabled = %d"
1723 		       " tx_cap_enabled = %d"
1724 		       " rx_cap_enabled = %d",
1725 		       peer->peer_id,
1726 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1727 		       txrx_peer->nawds_enabled,
1728 		       txrx_peer->bss_peer,
1729 		       txrx_peer->wds_enabled,
1730 		       peer->monitor_peer ?
1731 					peer->monitor_peer->tx_cap_enabled : 0,
1732 		       peer->monitor_peer ?
1733 					peer->monitor_peer->rx_cap_enabled : 0);
1734 }
1735 
1736 /**
1737  * dp_print_peer_table() - Dump all Peer stats
1738  * @vdev: Datapath Vdev handle
1739  *
1740  * return void
1741  */
1742 static void dp_print_peer_table(struct dp_vdev *vdev)
1743 {
1744 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1745 	dp_vdev_iterate_peer(vdev, dp_print_peer_info, NULL,
1746 			     DP_MOD_ID_GENERIC_STATS);
1747 }
1748 
1749 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
1750 /**
1751  * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt
1752  * threshold values from the wlan_srng_cfg table for each ring type
1753  * @soc: device handle
1754  * @ring_params: per ring specific parameters
1755  * @ring_type: Ring type
1756  * @ring_num: Ring number for a given ring type
1757  *
1758  * Fill the ring params with the interrupt threshold
1759  * configuration parameters available in the per ring type wlan_srng_cfg
1760  * table.
1761  *
1762  * Return: None
1763  */
1764 static void
1765 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1766 				       struct hal_srng_params *ring_params,
1767 				       int ring_type, int ring_num,
1768 				       int num_entries)
1769 {
1770 	uint8_t wbm2_sw_rx_rel_ring_id;
1771 
1772 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
1773 
1774 	if (ring_type == REO_DST) {
1775 		ring_params->intr_timer_thres_us =
1776 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1777 		ring_params->intr_batch_cntr_thres_entries =
1778 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1779 	} else if (ring_type == WBM2SW_RELEASE &&
1780 		   (ring_num == wbm2_sw_rx_rel_ring_id)) {
1781 		ring_params->intr_timer_thres_us =
1782 				wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1783 		ring_params->intr_batch_cntr_thres_entries =
1784 				wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1785 	} else {
1786 		ring_params->intr_timer_thres_us =
1787 				soc->wlan_srng_cfg[ring_type].timer_threshold;
1788 		ring_params->intr_batch_cntr_thres_entries =
1789 				soc->wlan_srng_cfg[ring_type].batch_count_threshold;
1790 	}
1791 	ring_params->low_threshold =
1792 			soc->wlan_srng_cfg[ring_type].low_threshold;
1793 	if (ring_params->low_threshold)
1794 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1795 
1796 	dp_srng_configure_nf_interrupt_thresholds(soc, ring_params, ring_type);
1797 }
1798 #else
1799 static void
1800 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1801 				       struct hal_srng_params *ring_params,
1802 				       int ring_type, int ring_num,
1803 				       int num_entries)
1804 {
1805 	uint8_t wbm2_sw_rx_rel_ring_id;
1806 
1807 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc->wlan_cfg_ctx);
1808 
1809 	if (ring_type == REO_DST) {
1810 		ring_params->intr_timer_thres_us =
1811 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1812 		ring_params->intr_batch_cntr_thres_entries =
1813 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1814 	} else if (ring_type == WBM2SW_RELEASE &&
1815 		   (ring_num < wbm2_sw_rx_rel_ring_id ||
1816 		   ring_num == WBM2SW_TXCOMP_RING4_NUM)) {
1817 		ring_params->intr_timer_thres_us =
1818 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1819 		ring_params->intr_batch_cntr_thres_entries =
1820 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1821 	} else {
1822 		ring_params->intr_timer_thres_us =
1823 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1824 		ring_params->intr_batch_cntr_thres_entries =
1825 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1826 	}
1827 
1828 	/* These rings donot require interrupt to host. Make them zero */
1829 	switch (ring_type) {
1830 	case REO_REINJECT:
1831 	case REO_CMD:
1832 	case TCL_DATA:
1833 	case TCL_CMD_CREDIT:
1834 	case TCL_STATUS:
1835 	case WBM_IDLE_LINK:
1836 	case SW2WBM_RELEASE:
1837 	case PPE2TCL:
1838 	case SW2RXDMA_NEW:
1839 		ring_params->intr_timer_thres_us = 0;
1840 		ring_params->intr_batch_cntr_thres_entries = 0;
1841 		break;
1842 	}
1843 
1844 	/* Enable low threshold interrupts for rx buffer rings (regular and
1845 	 * monitor buffer rings.
1846 	 * TODO: See if this is required for any other ring
1847 	 */
1848 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1849 	    (ring_type == RXDMA_MONITOR_STATUS ||
1850 	    (ring_type == TX_MONITOR_BUF))) {
1851 		/* TODO: Setting low threshold to 1/8th of ring size
1852 		 * see if this needs to be configurable
1853 		 */
1854 		ring_params->low_threshold = num_entries >> 3;
1855 		ring_params->intr_timer_thres_us =
1856 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1857 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1858 		ring_params->intr_batch_cntr_thres_entries = 0;
1859 	}
1860 
1861 	/* During initialisation monitor rings are only filled with
1862 	 * MON_BUF_MIN_ENTRIES entries. So low threshold needs to be set to
1863 	 * a value less than that. Low threshold value is reconfigured again
1864 	 * to 1/8th of the ring size when monitor vap is created.
1865 	 */
1866 	if (ring_type == RXDMA_MONITOR_BUF)
1867 		ring_params->low_threshold = MON_BUF_MIN_ENTRIES >> 1;
1868 
1869 	/* In case of PCI chipsets, we dont have PPDU end interrupts,
1870 	 * so MONITOR STATUS ring is reaped by receiving MSI from srng.
1871 	 * Keep batch threshold as 8 so that interrupt is received for
1872 	 * every 4 packets in MONITOR_STATUS ring
1873 	 */
1874 	if ((ring_type == RXDMA_MONITOR_STATUS) &&
1875 	    (soc->intr_mode == DP_INTR_MSI))
1876 		ring_params->intr_batch_cntr_thres_entries = 4;
1877 }
1878 #endif
1879 
1880 #ifdef DP_MEM_PRE_ALLOC
1881 
1882 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
1883 			   size_t ctxt_size)
1884 {
1885 	void *ctxt_mem;
1886 
1887 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_context) {
1888 		dp_warn("dp_prealloc_get_context null!");
1889 		goto dynamic_alloc;
1890 	}
1891 
1892 	ctxt_mem = soc->cdp_soc.ol_ops->dp_prealloc_get_context(ctxt_type);
1893 
1894 	if (ctxt_mem)
1895 		goto end;
1896 
1897 dynamic_alloc:
1898 	dp_info("Pre-alloc of ctxt failed. Dynamic allocation");
1899 	ctxt_mem = qdf_mem_malloc(ctxt_size);
1900 end:
1901 	return ctxt_mem;
1902 }
1903 
1904 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
1905 			 void *vaddr)
1906 {
1907 	QDF_STATUS status;
1908 
1909 	if (soc->cdp_soc.ol_ops->dp_prealloc_put_context) {
1910 		status = soc->cdp_soc.ol_ops->dp_prealloc_put_context(
1911 								ctxt_type,
1912 								vaddr);
1913 	} else {
1914 		dp_warn("dp_prealloc_get_context null!");
1915 		status = QDF_STATUS_E_NOSUPPORT;
1916 	}
1917 
1918 	if (QDF_IS_STATUS_ERROR(status)) {
1919 		dp_info("Context not pre-allocated");
1920 		qdf_mem_free(vaddr);
1921 	}
1922 }
1923 
1924 static inline
1925 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
1926 					   struct dp_srng *srng,
1927 					   uint32_t ring_type)
1928 {
1929 	void *mem;
1930 
1931 	qdf_assert(!srng->is_mem_prealloc);
1932 
1933 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_consistent) {
1934 		dp_warn("dp_prealloc_get_consistent is null!");
1935 		goto qdf;
1936 	}
1937 
1938 	mem =
1939 		soc->cdp_soc.ol_ops->dp_prealloc_get_consistent
1940 						(&srng->alloc_size,
1941 						 &srng->base_vaddr_unaligned,
1942 						 &srng->base_paddr_unaligned,
1943 						 &srng->base_paddr_aligned,
1944 						 DP_RING_BASE_ALIGN, ring_type);
1945 
1946 	if (mem) {
1947 		srng->is_mem_prealloc = true;
1948 		goto end;
1949 	}
1950 qdf:
1951 	mem =  qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
1952 						&srng->base_vaddr_unaligned,
1953 						&srng->base_paddr_unaligned,
1954 						&srng->base_paddr_aligned,
1955 						DP_RING_BASE_ALIGN);
1956 end:
1957 	dp_info("%s memory %pK dp_srng %pK ring_type %d alloc_size %d num_entries %d",
1958 		srng->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", mem,
1959 		srng, ring_type, srng->alloc_size, srng->num_entries);
1960 	return mem;
1961 }
1962 
1963 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
1964 					       struct dp_srng *srng)
1965 {
1966 	if (srng->is_mem_prealloc) {
1967 		if (!soc->cdp_soc.ol_ops->dp_prealloc_put_consistent) {
1968 			dp_warn("dp_prealloc_put_consistent is null!");
1969 			QDF_BUG(0);
1970 			return;
1971 		}
1972 		soc->cdp_soc.ol_ops->dp_prealloc_put_consistent
1973 						(srng->alloc_size,
1974 						 srng->base_vaddr_unaligned,
1975 						 srng->base_paddr_unaligned);
1976 
1977 	} else {
1978 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1979 					srng->alloc_size,
1980 					srng->base_vaddr_unaligned,
1981 					srng->base_paddr_unaligned, 0);
1982 	}
1983 }
1984 
1985 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
1986 				   enum dp_desc_type desc_type,
1987 				   struct qdf_mem_multi_page_t *pages,
1988 				   size_t element_size,
1989 				   uint16_t element_num,
1990 				   qdf_dma_context_t memctxt,
1991 				   bool cacheable)
1992 {
1993 	if (!soc->cdp_soc.ol_ops->dp_get_multi_pages) {
1994 		dp_warn("dp_get_multi_pages is null!");
1995 		goto qdf;
1996 	}
1997 
1998 	pages->num_pages = 0;
1999 	pages->is_mem_prealloc = 0;
2000 	soc->cdp_soc.ol_ops->dp_get_multi_pages(desc_type,
2001 						element_size,
2002 						element_num,
2003 						pages,
2004 						cacheable);
2005 	if (pages->num_pages)
2006 		goto end;
2007 
2008 qdf:
2009 	qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size,
2010 				  element_num, memctxt, cacheable);
2011 end:
2012 	dp_info("%s desc_type %d element_size %d element_num %d cacheable %d",
2013 		pages->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc",
2014 		desc_type, (int)element_size, element_num, cacheable);
2015 }
2016 
2017 void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
2018 				  enum dp_desc_type desc_type,
2019 				  struct qdf_mem_multi_page_t *pages,
2020 				  qdf_dma_context_t memctxt,
2021 				  bool cacheable)
2022 {
2023 	if (pages->is_mem_prealloc) {
2024 		if (!soc->cdp_soc.ol_ops->dp_put_multi_pages) {
2025 			dp_warn("dp_put_multi_pages is null!");
2026 			QDF_BUG(0);
2027 			return;
2028 		}
2029 
2030 		soc->cdp_soc.ol_ops->dp_put_multi_pages(desc_type, pages);
2031 		qdf_mem_zero(pages, sizeof(*pages));
2032 	} else {
2033 		qdf_mem_multi_pages_free(soc->osdev, pages,
2034 					 memctxt, cacheable);
2035 	}
2036 }
2037 
2038 #else
2039 
2040 static inline
2041 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
2042 					   struct dp_srng *srng,
2043 					   uint32_t ring_type)
2044 
2045 {
2046 	void *mem;
2047 
2048 	mem = qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
2049 					       &srng->base_vaddr_unaligned,
2050 					       &srng->base_paddr_unaligned,
2051 					       &srng->base_paddr_aligned,
2052 					       DP_RING_BASE_ALIGN);
2053 	if (mem)
2054 		qdf_mem_set(srng->base_vaddr_unaligned, 0, srng->alloc_size);
2055 
2056 	return mem;
2057 }
2058 
2059 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
2060 					       struct dp_srng *srng)
2061 {
2062 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2063 				srng->alloc_size,
2064 				srng->base_vaddr_unaligned,
2065 				srng->base_paddr_unaligned, 0);
2066 }
2067 
2068 #endif /* DP_MEM_PRE_ALLOC */
2069 
2070 /*
2071  * dp_srng_free() - Free SRNG memory
2072  * @soc  : Data path soc handle
2073  * @srng : SRNG pointer
2074  *
2075  * return: None
2076  */
2077 void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng)
2078 {
2079 	if (srng->alloc_size && srng->base_vaddr_unaligned) {
2080 		if (!srng->cached) {
2081 			dp_srng_mem_free_consistent(soc, srng);
2082 		} else {
2083 			qdf_mem_free(srng->base_vaddr_unaligned);
2084 		}
2085 		srng->alloc_size = 0;
2086 		srng->base_vaddr_unaligned = NULL;
2087 	}
2088 	srng->hal_srng = NULL;
2089 }
2090 
2091 qdf_export_symbol(dp_srng_free);
2092 
2093 #ifdef DISABLE_MON_RING_MSI_CFG
2094 /*
2095  * dp_skip_msi_cfg() - Check if msi cfg has to be skipped for ring_type
2096  * @ring_type: sring type
2097  *
2098  * Return: True if msi cfg should be skipped for srng type else false
2099  */
2100 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2101 {
2102 	if (ring_type == RXDMA_MONITOR_STATUS)
2103 		return true;
2104 
2105 	return false;
2106 }
2107 #else
2108 #ifdef DP_CON_MON_MSI_ENABLED
2109 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2110 {
2111 	if (soc->cdp_soc.ol_ops->get_con_mode &&
2112 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE) {
2113 		if (ring_type == REO_DST)
2114 			return true;
2115 	} else if (ring_type == RXDMA_MONITOR_STATUS) {
2116 		return true;
2117 	}
2118 
2119 	return false;
2120 }
2121 #else
2122 static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
2123 {
2124 	return false;
2125 }
2126 #endif /* DP_CON_MON_MSI_ENABLED */
2127 #endif /* DISABLE_MON_RING_MSI_CFG */
2128 
2129 /*
2130  * dp_srng_init() - Initialize SRNG
2131  * @soc  : Data path soc handle
2132  * @srng : SRNG pointer
2133  * @ring_type : Ring Type
2134  * @ring_num: Ring number
2135  * @mac_id: mac_id
2136  *
2137  * return: QDF_STATUS
2138  */
2139 QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
2140 			int ring_type, int ring_num, int mac_id)
2141 {
2142 	hal_soc_handle_t hal_soc = soc->hal_soc;
2143 	struct hal_srng_params ring_params;
2144 
2145 	if (srng->hal_srng) {
2146 		dp_init_err("%pK: Ring type: %d, num:%d is already initialized",
2147 			    soc, ring_type, ring_num);
2148 		return QDF_STATUS_SUCCESS;
2149 	}
2150 
2151 	/* memset the srng ring to zero */
2152 	qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size);
2153 
2154 	qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params));
2155 	ring_params.ring_base_paddr = srng->base_paddr_aligned;
2156 	ring_params.ring_base_vaddr = srng->base_vaddr_aligned;
2157 
2158 	ring_params.num_entries = srng->num_entries;
2159 
2160 	dp_info("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
2161 		ring_type, ring_num,
2162 		(void *)ring_params.ring_base_vaddr,
2163 		(void *)ring_params.ring_base_paddr,
2164 		ring_params.num_entries);
2165 
2166 	if (soc->intr_mode == DP_INTR_MSI && !dp_skip_msi_cfg(soc, ring_type)) {
2167 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
2168 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
2169 				 ring_type, ring_num);
2170 	} else {
2171 		ring_params.msi_data = 0;
2172 		ring_params.msi_addr = 0;
2173 		dp_srng_set_msi2_ring_params(soc, &ring_params, 0, 0);
2174 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
2175 				 ring_type, ring_num);
2176 	}
2177 
2178 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
2179 					       ring_type, ring_num,
2180 					       srng->num_entries);
2181 
2182 	dp_srng_set_nf_thresholds(soc, srng, &ring_params);
2183 
2184 	if (srng->cached)
2185 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
2186 
2187 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
2188 					mac_id, &ring_params);
2189 
2190 	if (!srng->hal_srng) {
2191 		dp_srng_free(soc, srng);
2192 		return QDF_STATUS_E_FAILURE;
2193 	}
2194 
2195 	return QDF_STATUS_SUCCESS;
2196 }
2197 
2198 qdf_export_symbol(dp_srng_init);
2199 
2200 /*
2201  * dp_srng_alloc() - Allocate memory for SRNG
2202  * @soc  : Data path soc handle
2203  * @srng : SRNG pointer
2204  * @ring_type : Ring Type
2205  * @num_entries: Number of entries
2206  * @cached: cached flag variable
2207  *
2208  * return: QDF_STATUS
2209  */
2210 QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
2211 			 int ring_type, uint32_t num_entries,
2212 			 bool cached)
2213 {
2214 	hal_soc_handle_t hal_soc = soc->hal_soc;
2215 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
2216 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
2217 
2218 	if (srng->base_vaddr_unaligned) {
2219 		dp_init_err("%pK: Ring type: %d, is already allocated",
2220 			    soc, ring_type);
2221 		return QDF_STATUS_SUCCESS;
2222 	}
2223 
2224 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
2225 	srng->hal_srng = NULL;
2226 	srng->alloc_size = num_entries * entry_size;
2227 	srng->num_entries = num_entries;
2228 	srng->cached = cached;
2229 
2230 	if (!cached) {
2231 		srng->base_vaddr_aligned =
2232 		    dp_srng_aligned_mem_alloc_consistent(soc,
2233 							 srng,
2234 							 ring_type);
2235 	} else {
2236 		srng->base_vaddr_aligned = qdf_aligned_malloc(
2237 					&srng->alloc_size,
2238 					&srng->base_vaddr_unaligned,
2239 					&srng->base_paddr_unaligned,
2240 					&srng->base_paddr_aligned,
2241 					DP_RING_BASE_ALIGN);
2242 	}
2243 
2244 	if (!srng->base_vaddr_aligned)
2245 		return QDF_STATUS_E_NOMEM;
2246 
2247 	return QDF_STATUS_SUCCESS;
2248 }
2249 
2250 qdf_export_symbol(dp_srng_alloc);
2251 
2252 /*
2253  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
2254  * @soc: DP SOC handle
2255  * @srng: source ring structure
2256  * @ring_type: type of ring
2257  * @ring_num: ring number
2258  *
2259  * Return: None
2260  */
2261 void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
2262 		    int ring_type, int ring_num)
2263 {
2264 	if (!srng->hal_srng) {
2265 		dp_init_err("%pK: Ring type: %d, num:%d not setup",
2266 			    soc, ring_type, ring_num);
2267 		return;
2268 	}
2269 
2270 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
2271 	srng->hal_srng = NULL;
2272 }
2273 
2274 qdf_export_symbol(dp_srng_deinit);
2275 
2276 /* TODO: Need this interface from HIF */
2277 void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle);
2278 
2279 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2280 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2281 			 hal_ring_handle_t hal_ring_hdl)
2282 {
2283 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2284 	uint32_t hp, tp;
2285 	uint8_t ring_id;
2286 
2287 	if (!int_ctx)
2288 		return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2289 
2290 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2291 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2292 
2293 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2294 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START);
2295 
2296 	return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2297 }
2298 
2299 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2300 			hal_ring_handle_t hal_ring_hdl)
2301 {
2302 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2303 	uint32_t hp, tp;
2304 	uint8_t ring_id;
2305 
2306 	if (!int_ctx)
2307 		return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2308 
2309 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2310 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2311 
2312 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2313 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END);
2314 
2315 	return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2316 }
2317 
2318 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2319 					      uint8_t hist_group_id)
2320 {
2321 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2322 			 0, 0, 0, HIF_EVENT_TIMER_ENTRY);
2323 }
2324 
2325 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2326 					     uint8_t hist_group_id)
2327 {
2328 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2329 			 0, 0, 0, HIF_EVENT_TIMER_EXIT);
2330 }
2331 #else
2332 
2333 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2334 					      uint8_t hist_group_id)
2335 {
2336 }
2337 
2338 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2339 					     uint8_t hist_group_id)
2340 {
2341 }
2342 
2343 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
2344 
2345 /*
2346  * dp_should_timer_irq_yield() - Decide if the bottom half should yield
2347  * @soc: DP soc handle
2348  * @work_done: work done in softirq context
2349  * @start_time: start time for the softirq
2350  *
2351  * Return: enum with yield code
2352  */
2353 enum timer_yield_status
2354 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
2355 			  uint64_t start_time)
2356 {
2357 	uint64_t cur_time = qdf_get_log_timestamp();
2358 
2359 	if (!work_done)
2360 		return DP_TIMER_WORK_DONE;
2361 
2362 	if (cur_time - start_time > DP_MAX_TIMER_EXEC_TIME_TICKS)
2363 		return DP_TIMER_TIME_EXHAUST;
2364 
2365 	return DP_TIMER_NO_YIELD;
2366 }
2367 
2368 qdf_export_symbol(dp_should_timer_irq_yield);
2369 
2370 #ifdef DP_CON_MON_MSI_ENABLED
2371 static int dp_process_rxdma_dst_ring(struct dp_soc *soc,
2372 				     struct dp_intr *int_ctx,
2373 				     int mac_for_pdev,
2374 				     int total_budget)
2375 {
2376 	if (dp_soc_get_con_mode(soc) == QDF_GLOBAL_MONITOR_MODE)
2377 		return dp_monitor_process(soc, int_ctx, mac_for_pdev,
2378 					  total_budget);
2379 	else
2380 		return dp_rxdma_err_process(int_ctx, soc, mac_for_pdev,
2381 					    total_budget);
2382 }
2383 #else
2384 static int dp_process_rxdma_dst_ring(struct dp_soc *soc,
2385 				     struct dp_intr *int_ctx,
2386 				     int mac_for_pdev,
2387 				     int total_budget)
2388 {
2389 	return dp_rxdma_err_process(int_ctx, soc, mac_for_pdev,
2390 				    total_budget);
2391 }
2392 #endif
2393 
2394 /**
2395  * dp_process_lmac_rings() - Process LMAC rings
2396  * @int_ctx: interrupt context
2397  * @total_budget: budget of work which can be done
2398  *
2399  * Return: work done
2400  */
2401 static int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget)
2402 {
2403 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2404 	struct dp_soc *soc = int_ctx->soc;
2405 	uint32_t remaining_quota = total_budget;
2406 	struct dp_pdev *pdev = NULL;
2407 	uint32_t work_done  = 0;
2408 	int budget = total_budget;
2409 	int ring = 0;
2410 
2411 	/* Process LMAC interrupts */
2412 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
2413 		int mac_for_pdev = ring;
2414 
2415 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
2416 		if (!pdev)
2417 			continue;
2418 		if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
2419 			work_done = dp_monitor_process(soc, int_ctx,
2420 						       mac_for_pdev,
2421 						       remaining_quota);
2422 			if (work_done)
2423 				intr_stats->num_rx_mon_ring_masks++;
2424 			budget -= work_done;
2425 			if (budget <= 0)
2426 				goto budget_done;
2427 			remaining_quota = budget;
2428 		}
2429 
2430 		if (int_ctx->tx_mon_ring_mask & (1 << mac_for_pdev)) {
2431 			work_done = dp_tx_mon_process(soc, int_ctx,
2432 						      mac_for_pdev,
2433 						      remaining_quota);
2434 			if (work_done)
2435 				intr_stats->num_tx_mon_ring_masks++;
2436 			budget -= work_done;
2437 			if (budget <= 0)
2438 				goto budget_done;
2439 			remaining_quota = budget;
2440 		}
2441 
2442 		if (int_ctx->rxdma2host_ring_mask &
2443 				(1 << mac_for_pdev)) {
2444 			work_done = dp_process_rxdma_dst_ring(soc, int_ctx,
2445 							      mac_for_pdev,
2446 							      remaining_quota);
2447 			if (work_done)
2448 				intr_stats->num_rxdma2host_ring_masks++;
2449 			budget -=  work_done;
2450 			if (budget <= 0)
2451 				goto budget_done;
2452 			remaining_quota = budget;
2453 		}
2454 
2455 		if (int_ctx->host2rxdma_ring_mask & (1 << mac_for_pdev)) {
2456 			union dp_rx_desc_list_elem_t *desc_list = NULL;
2457 			union dp_rx_desc_list_elem_t *tail = NULL;
2458 			struct dp_srng *rx_refill_buf_ring;
2459 			struct rx_desc_pool *rx_desc_pool;
2460 
2461 			rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
2462 			if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
2463 				rx_refill_buf_ring =
2464 					&soc->rx_refill_buf_ring[mac_for_pdev];
2465 			else
2466 				rx_refill_buf_ring =
2467 					&soc->rx_refill_buf_ring[pdev->lmac_id];
2468 
2469 			intr_stats->num_host2rxdma_ring_masks++;
2470 			dp_rx_buffers_lt_replenish_simple(soc, mac_for_pdev,
2471 							  rx_refill_buf_ring,
2472 							  rx_desc_pool,
2473 							  0,
2474 							  &desc_list,
2475 							  &tail);
2476 		}
2477 
2478 	}
2479 
2480 	if (int_ctx->host2rxdma_mon_ring_mask)
2481 		dp_rx_mon_buf_refill(int_ctx);
2482 
2483 	if (int_ctx->host2txmon_ring_mask)
2484 		dp_tx_mon_buf_refill(int_ctx);
2485 
2486 budget_done:
2487 	return total_budget - budget;
2488 }
2489 
2490 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
2491 /**
2492  * dp_service_near_full_srngs() - Bottom half handler to process the near
2493  *				full IRQ on a SRNG
2494  * @dp_ctx: Datapath SoC handle
2495  * @dp_budget: Number of SRNGs which can be processed in a single attempt
2496  *		without rescheduling
2497  *
2498  * Return: remaining budget/quota for the soc device
2499  */
2500 static uint32_t dp_service_near_full_srngs(void *dp_ctx, uint32_t dp_budget)
2501 {
2502 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2503 	struct dp_soc *soc = int_ctx->soc;
2504 
2505 	/*
2506 	 * dp_service_near_full_srngs arch ops should be initialized always
2507 	 * if the NEAR FULL IRQ feature is enabled.
2508 	 */
2509 	return soc->arch_ops.dp_service_near_full_srngs(soc, int_ctx,
2510 							dp_budget);
2511 }
2512 #endif
2513 
2514 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2515 
2516 /*
2517  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
2518  * @dp_ctx: DP SOC handle
2519  * @budget: Number of frames/descriptors that can be processed in one shot
2520  *
2521  * Return: remaining budget/quota for the soc device
2522  */
2523 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
2524 {
2525 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2526 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2527 	struct dp_soc *soc = int_ctx->soc;
2528 	int ring = 0;
2529 	int index;
2530 	uint32_t work_done  = 0;
2531 	int budget = dp_budget;
2532 	uint8_t tx_mask = int_ctx->tx_ring_mask;
2533 	uint8_t rx_mask = int_ctx->rx_ring_mask;
2534 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
2535 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
2536 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2537 	uint32_t remaining_quota = dp_budget;
2538 
2539 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n",
2540 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
2541 			 reo_status_mask,
2542 			 int_ctx->rx_mon_ring_mask,
2543 			 int_ctx->host2rxdma_ring_mask,
2544 			 int_ctx->rxdma2host_ring_mask);
2545 
2546 	/* Process Tx completion interrupts first to return back buffers */
2547 	for (index = 0; index < soc->num_tx_comp_rings; index++) {
2548 		if (!(1 << wlan_cfg_get_wbm_ring_num_for_index(soc->wlan_cfg_ctx, index) & tx_mask))
2549 			continue;
2550 		work_done = dp_tx_comp_handler(int_ctx,
2551 					       soc,
2552 					       soc->tx_comp_ring[index].hal_srng,
2553 					       index, remaining_quota);
2554 		if (work_done) {
2555 			intr_stats->num_tx_ring_masks[index]++;
2556 			dp_verbose_debug("tx mask 0x%x index %d, budget %d, work_done %d",
2557 					 tx_mask, index, budget,
2558 					 work_done);
2559 		}
2560 		budget -= work_done;
2561 		if (budget <= 0)
2562 			goto budget_done;
2563 
2564 		remaining_quota = budget;
2565 	}
2566 
2567 	/* Process REO Exception ring interrupt */
2568 	if (rx_err_mask) {
2569 		work_done = dp_rx_err_process(int_ctx, soc,
2570 					      soc->reo_exception_ring.hal_srng,
2571 					      remaining_quota);
2572 
2573 		if (work_done) {
2574 			intr_stats->num_rx_err_ring_masks++;
2575 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
2576 					 work_done, budget);
2577 		}
2578 
2579 		budget -=  work_done;
2580 		if (budget <= 0) {
2581 			goto budget_done;
2582 		}
2583 		remaining_quota = budget;
2584 	}
2585 
2586 	/* Process Rx WBM release ring interrupt */
2587 	if (rx_wbm_rel_mask) {
2588 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
2589 						  soc->rx_rel_ring.hal_srng,
2590 						  remaining_quota);
2591 
2592 		if (work_done) {
2593 			intr_stats->num_rx_wbm_rel_ring_masks++;
2594 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
2595 					 work_done, budget);
2596 		}
2597 
2598 		budget -=  work_done;
2599 		if (budget <= 0) {
2600 			goto budget_done;
2601 		}
2602 		remaining_quota = budget;
2603 	}
2604 
2605 	/* Process Rx interrupts */
2606 	if (rx_mask) {
2607 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
2608 			if (!(rx_mask & (1 << ring)))
2609 				continue;
2610 			work_done = soc->arch_ops.dp_rx_process(int_ctx,
2611 						  soc->reo_dest_ring[ring].hal_srng,
2612 						  ring,
2613 						  remaining_quota);
2614 			if (work_done) {
2615 				intr_stats->num_rx_ring_masks[ring]++;
2616 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
2617 						 rx_mask, ring,
2618 						 work_done, budget);
2619 				budget -=  work_done;
2620 				if (budget <= 0)
2621 					goto budget_done;
2622 				remaining_quota = budget;
2623 			}
2624 		}
2625 	}
2626 
2627 	if (reo_status_mask) {
2628 		if (dp_reo_status_ring_handler(int_ctx, soc))
2629 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2630 	}
2631 
2632 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
2633 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2634 		if (work_done) {
2635 			budget -=  work_done;
2636 			if (budget <= 0)
2637 				goto budget_done;
2638 			remaining_quota = budget;
2639 		}
2640 	}
2641 
2642 	qdf_lro_flush(int_ctx->lro_ctx);
2643 	intr_stats->num_masks++;
2644 
2645 budget_done:
2646 	return dp_budget - budget;
2647 }
2648 
2649 #else /* QCA_HOST_MODE_WIFI_DISABLED */
2650 
2651 /*
2652  * dp_service_srngs() - Top level handler for DP Monitor Ring interrupts
2653  * @dp_ctx: DP SOC handle
2654  * @budget: Number of frames/descriptors that can be processed in one shot
2655  *
2656  * Return: remaining budget/quota for the soc device
2657  */
2658 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
2659 {
2660 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2661 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2662 	struct dp_soc *soc = int_ctx->soc;
2663 	uint32_t remaining_quota = dp_budget;
2664 	uint32_t work_done  = 0;
2665 	int budget = dp_budget;
2666 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2667 
2668 	if (reo_status_mask) {
2669 		if (dp_reo_status_ring_handler(int_ctx, soc))
2670 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2671 	}
2672 
2673 	if (qdf_unlikely(!dp_monitor_is_vdev_timer_running(soc))) {
2674 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2675 		if (work_done) {
2676 			budget -=  work_done;
2677 			if (budget <= 0)
2678 				goto budget_done;
2679 			remaining_quota = budget;
2680 		}
2681 	}
2682 
2683 	qdf_lro_flush(int_ctx->lro_ctx);
2684 	intr_stats->num_masks++;
2685 
2686 budget_done:
2687 	return dp_budget - budget;
2688 }
2689 
2690 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2691 
2692 /* dp_interrupt_timer()- timer poll for interrupts
2693  *
2694  * @arg: SoC Handle
2695  *
2696  * Return:
2697  *
2698  */
2699 static void dp_interrupt_timer(void *arg)
2700 {
2701 	struct dp_soc *soc = (struct dp_soc *) arg;
2702 	struct dp_pdev *pdev = soc->pdev_list[0];
2703 	enum timer_yield_status yield = DP_TIMER_NO_YIELD;
2704 	uint32_t work_done  = 0, total_work_done = 0;
2705 	int budget = 0xffff, i;
2706 	uint32_t remaining_quota = budget;
2707 	uint64_t start_time;
2708 	uint32_t lmac_id = DP_MON_INVALID_LMAC_ID;
2709 	uint8_t dp_intr_id = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
2710 	uint32_t lmac_iter;
2711 	int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
2712 	enum reg_wifi_band mon_band;
2713 
2714 	/*
2715 	 * this logic makes all data path interfacing rings (UMAC/LMAC)
2716 	 * and Monitor rings polling mode when NSS offload is disabled
2717 	 */
2718 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx) &&
2719 	    !wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2720 		if (qdf_atomic_read(&soc->cmn_init_done)) {
2721 			for (i = 0; i < wlan_cfg_get_num_contexts(
2722 						soc->wlan_cfg_ctx); i++)
2723 				dp_service_srngs(&soc->intr_ctx[i], 0xffff);
2724 
2725 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
2726 		}
2727 		return;
2728 	}
2729 
2730 	if (!qdf_atomic_read(&soc->cmn_init_done))
2731 		return;
2732 
2733 	if (dp_monitor_is_chan_band_known(pdev)) {
2734 		mon_band = dp_monitor_get_chan_band(pdev);
2735 		lmac_id = pdev->ch_band_lmac_id_mapping[mon_band];
2736 		if (qdf_likely(lmac_id != DP_MON_INVALID_LMAC_ID)) {
2737 			dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id];
2738 			dp_srng_record_timer_entry(soc, dp_intr_id);
2739 		}
2740 	}
2741 
2742 	start_time = qdf_get_log_timestamp();
2743 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
2744 
2745 	while (yield == DP_TIMER_NO_YIELD) {
2746 		for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) {
2747 			if (lmac_iter == lmac_id)
2748 				work_done = dp_monitor_process(soc,
2749 						&soc->intr_ctx[dp_intr_id],
2750 						lmac_iter, remaining_quota);
2751 			else
2752 				work_done =
2753 					dp_monitor_drop_packets_for_mac(pdev,
2754 							     lmac_iter,
2755 							     remaining_quota);
2756 			if (work_done) {
2757 				budget -=  work_done;
2758 				if (budget <= 0) {
2759 					yield = DP_TIMER_WORK_EXHAUST;
2760 					goto budget_done;
2761 				}
2762 				remaining_quota = budget;
2763 				total_work_done += work_done;
2764 			}
2765 		}
2766 
2767 		yield = dp_should_timer_irq_yield(soc, total_work_done,
2768 						  start_time);
2769 		total_work_done = 0;
2770 	}
2771 
2772 budget_done:
2773 	if (yield == DP_TIMER_WORK_EXHAUST ||
2774 	    yield == DP_TIMER_TIME_EXHAUST)
2775 		qdf_timer_mod(&soc->int_timer, 1);
2776 	else
2777 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
2778 
2779 	if (lmac_id != DP_MON_INVALID_LMAC_ID)
2780 		dp_srng_record_timer_exit(soc, dp_intr_id);
2781 }
2782 
2783 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2784 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
2785 					struct dp_intr *intr_ctx)
2786 {
2787 	if (intr_ctx->rx_mon_ring_mask)
2788 		return true;
2789 
2790 	return false;
2791 }
2792 #else
2793 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
2794 					struct dp_intr *intr_ctx)
2795 {
2796 	return false;
2797 }
2798 #endif
2799 
2800 /*
2801  * dp_soc_attach_poll() - Register handlers for DP interrupts
2802  * @txrx_soc: DP SOC handle
2803  *
2804  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
2805  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
2806  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
2807  *
2808  * Return: 0 for success, nonzero for failure.
2809  */
2810 static QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
2811 {
2812 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2813 	int i;
2814 	int lmac_id = 0;
2815 
2816 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
2817 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
2818 	soc->intr_mode = DP_INTR_POLL;
2819 
2820 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
2821 		soc->intr_ctx[i].dp_intr_id = i;
2822 		soc->intr_ctx[i].tx_ring_mask =
2823 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
2824 		soc->intr_ctx[i].rx_ring_mask =
2825 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
2826 		soc->intr_ctx[i].rx_mon_ring_mask =
2827 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
2828 		soc->intr_ctx[i].rx_err_ring_mask =
2829 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
2830 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
2831 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
2832 		soc->intr_ctx[i].reo_status_ring_mask =
2833 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
2834 		soc->intr_ctx[i].rxdma2host_ring_mask =
2835 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
2836 		soc->intr_ctx[i].soc = soc;
2837 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
2838 
2839 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
2840 			hif_event_history_init(soc->hif_handle, i);
2841 			soc->mon_intr_id_lmac_map[lmac_id] = i;
2842 			lmac_id++;
2843 		}
2844 	}
2845 
2846 	qdf_timer_init(soc->osdev, &soc->int_timer,
2847 			dp_interrupt_timer, (void *)soc,
2848 			QDF_TIMER_TYPE_WAKE_APPS);
2849 
2850 	return QDF_STATUS_SUCCESS;
2851 }
2852 
2853 /**
2854  * dp_soc_set_interrupt_mode() - Set the interrupt mode in soc
2855  * soc: DP soc handle
2856  *
2857  * Set the appropriate interrupt mode flag in the soc
2858  */
2859 static void dp_soc_set_interrupt_mode(struct dp_soc *soc)
2860 {
2861 	uint32_t msi_base_data, msi_vector_start;
2862 	int msi_vector_count, ret;
2863 
2864 	soc->intr_mode = DP_INTR_INTEGRATED;
2865 
2866 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
2867 	    (dp_is_monitor_mode_using_poll(soc) &&
2868 	     soc->cdp_soc.ol_ops->get_con_mode &&
2869 	     soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) {
2870 		soc->intr_mode = DP_INTR_POLL;
2871 	} else {
2872 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
2873 						  &msi_vector_count,
2874 						  &msi_base_data,
2875 						  &msi_vector_start);
2876 		if (ret)
2877 			return;
2878 
2879 		soc->intr_mode = DP_INTR_MSI;
2880 	}
2881 }
2882 
2883 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc);
2884 #if defined(DP_INTR_POLL_BOTH)
2885 /*
2886  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
2887  * @txrx_soc: DP SOC handle
2888  *
2889  * Call the appropriate attach function based on the mode of operation.
2890  * This is a WAR for enabling monitor mode.
2891  *
2892  * Return: 0 for success. nonzero for failure.
2893  */
2894 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2895 {
2896 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2897 
2898 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
2899 	    (dp_is_monitor_mode_using_poll(soc) &&
2900 	     soc->cdp_soc.ol_ops->get_con_mode &&
2901 	     soc->cdp_soc.ol_ops->get_con_mode() ==
2902 	     QDF_GLOBAL_MONITOR_MODE)) {
2903 		dp_info("Poll mode");
2904 		return dp_soc_attach_poll(txrx_soc);
2905 	} else {
2906 		dp_info("Interrupt  mode");
2907 		return dp_soc_interrupt_attach(txrx_soc);
2908 	}
2909 }
2910 #else
2911 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
2912 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2913 {
2914 	return dp_soc_attach_poll(txrx_soc);
2915 }
2916 #else
2917 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2918 {
2919 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2920 
2921 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx))
2922 		return dp_soc_attach_poll(txrx_soc);
2923 	else
2924 		return dp_soc_interrupt_attach(txrx_soc);
2925 }
2926 #endif
2927 #endif
2928 
2929 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
2930 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
2931 {
2932 	int j;
2933 	int num_irq = 0;
2934 
2935 	int tx_mask =
2936 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2937 	int rx_mask =
2938 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2939 	int rx_mon_mask =
2940 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2941 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
2942 					soc->wlan_cfg_ctx, intr_ctx_num);
2943 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
2944 					soc->wlan_cfg_ctx, intr_ctx_num);
2945 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
2946 					soc->wlan_cfg_ctx, intr_ctx_num);
2947 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
2948 					soc->wlan_cfg_ctx, intr_ctx_num);
2949 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
2950 					soc->wlan_cfg_ctx, intr_ctx_num);
2951 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
2952 					soc->wlan_cfg_ctx, intr_ctx_num);
2953 
2954 	soc->intr_mode = DP_INTR_INTEGRATED;
2955 
2956 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
2957 
2958 		if (tx_mask & (1 << j)) {
2959 			irq_id_map[num_irq++] =
2960 				(wbm2host_tx_completions_ring1 - j);
2961 		}
2962 
2963 		if (rx_mask & (1 << j)) {
2964 			irq_id_map[num_irq++] =
2965 				(reo2host_destination_ring1 - j);
2966 		}
2967 
2968 		if (rxdma2host_ring_mask & (1 << j)) {
2969 			irq_id_map[num_irq++] =
2970 				rxdma2host_destination_ring_mac1 - j;
2971 		}
2972 
2973 		if (host2rxdma_ring_mask & (1 << j)) {
2974 			irq_id_map[num_irq++] =
2975 				host2rxdma_host_buf_ring_mac1 -	j;
2976 		}
2977 
2978 		if (host2rxdma_mon_ring_mask & (1 << j)) {
2979 			irq_id_map[num_irq++] =
2980 				host2rxdma_monitor_ring1 - j;
2981 		}
2982 
2983 		if (rx_mon_mask & (1 << j)) {
2984 			irq_id_map[num_irq++] =
2985 				ppdu_end_interrupts_mac1 - j;
2986 			irq_id_map[num_irq++] =
2987 				rxdma2host_monitor_status_ring_mac1 - j;
2988 			irq_id_map[num_irq++] =
2989 				rxdma2host_monitor_destination_mac1 - j;
2990 		}
2991 
2992 		if (rx_wbm_rel_ring_mask & (1 << j))
2993 			irq_id_map[num_irq++] = wbm2host_rx_release;
2994 
2995 		if (rx_err_ring_mask & (1 << j))
2996 			irq_id_map[num_irq++] = reo2host_exception;
2997 
2998 		if (reo_status_ring_mask & (1 << j))
2999 			irq_id_map[num_irq++] = reo2host_status;
3000 
3001 	}
3002 	*num_irq_r = num_irq;
3003 }
3004 
3005 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
3006 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
3007 		int msi_vector_count, int msi_vector_start)
3008 {
3009 	int tx_mask = wlan_cfg_get_tx_ring_mask(
3010 					soc->wlan_cfg_ctx, intr_ctx_num);
3011 	int rx_mask = wlan_cfg_get_rx_ring_mask(
3012 					soc->wlan_cfg_ctx, intr_ctx_num);
3013 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
3014 					soc->wlan_cfg_ctx, intr_ctx_num);
3015 	int tx_mon_mask = wlan_cfg_get_tx_mon_ring_mask(
3016 					soc->wlan_cfg_ctx, intr_ctx_num);
3017 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
3018 					soc->wlan_cfg_ctx, intr_ctx_num);
3019 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
3020 					soc->wlan_cfg_ctx, intr_ctx_num);
3021 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
3022 					soc->wlan_cfg_ctx, intr_ctx_num);
3023 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
3024 					soc->wlan_cfg_ctx, intr_ctx_num);
3025 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
3026 					soc->wlan_cfg_ctx, intr_ctx_num);
3027 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
3028 					soc->wlan_cfg_ctx, intr_ctx_num);
3029 	int rx_near_full_grp_1_mask =
3030 		wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
3031 						     intr_ctx_num);
3032 	int rx_near_full_grp_2_mask =
3033 		wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
3034 						     intr_ctx_num);
3035 	int tx_ring_near_full_mask =
3036 		wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
3037 						    intr_ctx_num);
3038 
3039 	int host2txmon_ring_mask =
3040 		wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx,
3041 						  intr_ctx_num);
3042 	unsigned int vector =
3043 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
3044 	int num_irq = 0;
3045 
3046 	soc->intr_mode = DP_INTR_MSI;
3047 
3048 	if (tx_mask | rx_mask | rx_mon_mask | tx_mon_mask | rx_err_ring_mask |
3049 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask |
3050 	    host2rxdma_ring_mask | host2rxdma_mon_ring_mask |
3051 	    rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
3052 	    tx_ring_near_full_mask | host2txmon_ring_mask)
3053 		irq_id_map[num_irq++] =
3054 			pld_get_msi_irq(soc->osdev->dev, vector);
3055 
3056 	*num_irq_r = num_irq;
3057 }
3058 
3059 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
3060 				    int *irq_id_map, int *num_irq)
3061 {
3062 	int msi_vector_count, ret;
3063 	uint32_t msi_base_data, msi_vector_start;
3064 
3065 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
3066 					    &msi_vector_count,
3067 					    &msi_base_data,
3068 					    &msi_vector_start);
3069 	if (ret)
3070 		return dp_soc_interrupt_map_calculate_integrated(soc,
3071 				intr_ctx_num, irq_id_map, num_irq);
3072 
3073 	else
3074 		dp_soc_interrupt_map_calculate_msi(soc,
3075 				intr_ctx_num, irq_id_map, num_irq,
3076 				msi_vector_count, msi_vector_start);
3077 }
3078 
3079 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
3080 /**
3081  * dp_soc_near_full_interrupt_attach() - Register handler for DP near fill irq
3082  * @soc: DP soc handle
3083  * @num_irq: IRQ number
3084  * @irq_id_map: IRQ map
3085  * intr_id: interrupt context ID
3086  *
3087  * Return: 0 for success. nonzero for failure.
3088  */
3089 static inline int
3090 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
3091 				  int irq_id_map[], int intr_id)
3092 {
3093 	return hif_register_ext_group(soc->hif_handle,
3094 				      num_irq, irq_id_map,
3095 				      dp_service_near_full_srngs,
3096 				      &soc->intr_ctx[intr_id], "dp_nf_intr",
3097 				      HIF_EXEC_NAPI_TYPE,
3098 				      QCA_NAPI_DEF_SCALE_BIN_SHIFT);
3099 }
3100 #else
3101 static inline int
3102 dp_soc_near_full_interrupt_attach(struct dp_soc *soc, int num_irq,
3103 				  int *irq_id_map, int intr_id)
3104 {
3105 	return 0;
3106 }
3107 #endif
3108 
3109 /*
3110  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
3111  * @txrx_soc: DP SOC handle
3112  *
3113  * Return: none
3114  */
3115 static void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
3116 {
3117 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3118 	int i;
3119 
3120 	if (soc->intr_mode == DP_INTR_POLL) {
3121 		qdf_timer_free(&soc->int_timer);
3122 	} else {
3123 		hif_deconfigure_ext_group_interrupts(soc->hif_handle);
3124 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
3125 		hif_deregister_exec_group(soc->hif_handle, "dp_nf_intr");
3126 	}
3127 
3128 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3129 		soc->intr_ctx[i].tx_ring_mask = 0;
3130 		soc->intr_ctx[i].rx_ring_mask = 0;
3131 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
3132 		soc->intr_ctx[i].rx_err_ring_mask = 0;
3133 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
3134 		soc->intr_ctx[i].reo_status_ring_mask = 0;
3135 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
3136 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
3137 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
3138 		soc->intr_ctx[i].rx_near_full_grp_1_mask = 0;
3139 		soc->intr_ctx[i].rx_near_full_grp_2_mask = 0;
3140 		soc->intr_ctx[i].tx_ring_near_full_mask = 0;
3141 		soc->intr_ctx[i].tx_mon_ring_mask = 0;
3142 		soc->intr_ctx[i].host2txmon_ring_mask = 0;
3143 
3144 		hif_event_history_deinit(soc->hif_handle, i);
3145 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
3146 	}
3147 
3148 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3149 		    sizeof(soc->mon_intr_id_lmac_map),
3150 		    DP_MON_INVALID_LMAC_ID);
3151 }
3152 
3153 /*
3154  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
3155  * @txrx_soc: DP SOC handle
3156  *
3157  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
3158  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
3159  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
3160  *
3161  * Return: 0 for success. nonzero for failure.
3162  */
3163 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
3164 {
3165 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3166 
3167 	int i = 0;
3168 	int num_irq = 0;
3169 	int rx_err_ring_intr_ctxt_id = HIF_MAX_GROUP;
3170 	int lmac_id = 0;
3171 
3172 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
3173 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
3174 
3175 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
3176 		int ret = 0;
3177 
3178 		/* Map of IRQ ids registered with one interrupt context */
3179 		int irq_id_map[HIF_MAX_GRP_IRQ];
3180 
3181 		int tx_mask =
3182 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
3183 		int rx_mask =
3184 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
3185 		int rx_mon_mask =
3186 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
3187 		int tx_mon_ring_mask =
3188 			wlan_cfg_get_tx_mon_ring_mask(soc->wlan_cfg_ctx, i);
3189 		int rx_err_ring_mask =
3190 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
3191 		int rx_wbm_rel_ring_mask =
3192 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
3193 		int reo_status_ring_mask =
3194 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
3195 		int rxdma2host_ring_mask =
3196 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
3197 		int host2rxdma_ring_mask =
3198 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
3199 		int host2rxdma_mon_ring_mask =
3200 			wlan_cfg_get_host2rxdma_mon_ring_mask(
3201 				soc->wlan_cfg_ctx, i);
3202 		int rx_near_full_grp_1_mask =
3203 			wlan_cfg_get_rx_near_full_grp_1_mask(soc->wlan_cfg_ctx,
3204 							     i);
3205 		int rx_near_full_grp_2_mask =
3206 			wlan_cfg_get_rx_near_full_grp_2_mask(soc->wlan_cfg_ctx,
3207 							     i);
3208 		int tx_ring_near_full_mask =
3209 			wlan_cfg_get_tx_ring_near_full_mask(soc->wlan_cfg_ctx,
3210 							    i);
3211 		int host2txmon_ring_mask =
3212 			wlan_cfg_get_host2txmon_ring_mask(soc->wlan_cfg_ctx, i);
3213 
3214 		soc->intr_ctx[i].dp_intr_id = i;
3215 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
3216 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
3217 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
3218 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
3219 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
3220 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
3221 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
3222 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
3223 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
3224 			 host2rxdma_mon_ring_mask;
3225 		soc->intr_ctx[i].rx_near_full_grp_1_mask =
3226 						rx_near_full_grp_1_mask;
3227 		soc->intr_ctx[i].rx_near_full_grp_2_mask =
3228 						rx_near_full_grp_2_mask;
3229 		soc->intr_ctx[i].tx_ring_near_full_mask =
3230 						tx_ring_near_full_mask;
3231 		soc->intr_ctx[i].tx_mon_ring_mask = tx_mon_ring_mask;
3232 		soc->intr_ctx[i].host2txmon_ring_mask = host2txmon_ring_mask;
3233 
3234 		soc->intr_ctx[i].soc = soc;
3235 
3236 		num_irq = 0;
3237 
3238 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
3239 					       &num_irq);
3240 
3241 		if (rx_near_full_grp_1_mask | rx_near_full_grp_2_mask |
3242 		    tx_ring_near_full_mask) {
3243 			dp_soc_near_full_interrupt_attach(soc, num_irq,
3244 							  irq_id_map, i);
3245 		} else {
3246 			ret = hif_register_ext_group(soc->hif_handle,
3247 				num_irq, irq_id_map, dp_service_srngs,
3248 				&soc->intr_ctx[i], "dp_intr",
3249 				HIF_EXEC_NAPI_TYPE,
3250 				QCA_NAPI_DEF_SCALE_BIN_SHIFT);
3251 		}
3252 
3253 		dp_debug(" int ctx %u num_irq %u irq_id_map %u %u",
3254 			 i, num_irq, irq_id_map[0], irq_id_map[1]);
3255 
3256 		if (ret) {
3257 			dp_init_err("%pK: failed, ret = %d", soc, ret);
3258 			dp_soc_interrupt_detach(txrx_soc);
3259 			return QDF_STATUS_E_FAILURE;
3260 		}
3261 
3262 		hif_event_history_init(soc->hif_handle, i);
3263 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
3264 
3265 		if (rx_err_ring_mask)
3266 			rx_err_ring_intr_ctxt_id = i;
3267 
3268 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
3269 			soc->mon_intr_id_lmac_map[lmac_id] = i;
3270 			lmac_id++;
3271 		}
3272 	}
3273 
3274 	hif_configure_ext_group_interrupts(soc->hif_handle);
3275 	if (rx_err_ring_intr_ctxt_id != HIF_MAX_GROUP)
3276 		hif_config_irq_clear_cpu_affinity(soc->hif_handle,
3277 						  rx_err_ring_intr_ctxt_id, 0);
3278 
3279 	return QDF_STATUS_SUCCESS;
3280 }
3281 
3282 #define AVG_MAX_MPDUS_PER_TID 128
3283 #define AVG_TIDS_PER_CLIENT 2
3284 #define AVG_FLOWS_PER_TID 2
3285 #define AVG_MSDUS_PER_FLOW 128
3286 #define AVG_MSDUS_PER_MPDU 4
3287 
3288 /*
3289  * dp_hw_link_desc_pool_banks_free() - Free h/w link desc pool banks
3290  * @soc: DP SOC handle
3291  * @mac_id: mac id
3292  *
3293  * Return: none
3294  */
3295 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
3296 {
3297 	struct qdf_mem_multi_page_t *pages;
3298 
3299 	if (mac_id != WLAN_INVALID_PDEV_ID) {
3300 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3301 	} else {
3302 		pages = &soc->link_desc_pages;
3303 	}
3304 
3305 	if (!pages) {
3306 		dp_err("can not get link desc pages");
3307 		QDF_ASSERT(0);
3308 		return;
3309 	}
3310 
3311 	if (pages->dma_pages) {
3312 		wlan_minidump_remove((void *)
3313 				     pages->dma_pages->page_v_addr_start,
3314 				     pages->num_pages * pages->page_size,
3315 				     soc->ctrl_psoc,
3316 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3317 				     "hw_link_desc_bank");
3318 		dp_desc_multi_pages_mem_free(soc, DP_HW_LINK_DESC_TYPE,
3319 					     pages, 0, false);
3320 	}
3321 }
3322 
3323 qdf_export_symbol(dp_hw_link_desc_pool_banks_free);
3324 
3325 /*
3326  * dp_hw_link_desc_pool_banks_alloc() - Allocate h/w link desc pool banks
3327  * @soc: DP SOC handle
3328  * @mac_id: mac id
3329  *
3330  * Allocates memory pages for link descriptors, the page size is 4K for
3331  * MCL and 2MB for WIN. if the mac_id is invalid link descriptor pages are
3332  * allocated for regular RX/TX and if the there is a proper mac_id link
3333  * descriptors are allocated for RX monitor mode.
3334  *
3335  * Return: QDF_STATUS_SUCCESS: Success
3336  *	   QDF_STATUS_E_FAILURE: Failure
3337  */
3338 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
3339 {
3340 	hal_soc_handle_t hal_soc = soc->hal_soc;
3341 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
3342 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
3343 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
3344 	uint32_t num_mpdus_per_link_desc = hal_num_mpdus_per_link_desc(hal_soc);
3345 	uint32_t num_msdus_per_link_desc = hal_num_msdus_per_link_desc(hal_soc);
3346 	uint32_t num_mpdu_links_per_queue_desc =
3347 		hal_num_mpdu_links_per_queue_desc(hal_soc);
3348 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3349 	uint32_t *total_link_descs, total_mem_size;
3350 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
3351 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
3352 	uint32_t num_entries;
3353 	struct qdf_mem_multi_page_t *pages;
3354 	struct dp_srng *dp_srng;
3355 	uint8_t minidump_str[MINIDUMP_STR_SIZE];
3356 
3357 	/* Only Tx queue descriptors are allocated from common link descriptor
3358 	 * pool Rx queue descriptors are not included in this because (REO queue
3359 	 * extension descriptors) they are expected to be allocated contiguously
3360 	 * with REO queue descriptors
3361 	 */
3362 	if (mac_id != WLAN_INVALID_PDEV_ID) {
3363 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3364 		/* dp_monitor_get_link_desc_pages returns NULL only
3365 		 * if monitor SOC is  NULL
3366 		 */
3367 		if (!pages) {
3368 			dp_err("can not get link desc pages");
3369 			QDF_ASSERT(0);
3370 			return QDF_STATUS_E_FAULT;
3371 		}
3372 		dp_srng = &soc->rxdma_mon_desc_ring[mac_id];
3373 		num_entries = dp_srng->alloc_size /
3374 			hal_srng_get_entrysize(soc->hal_soc,
3375 					       RXDMA_MONITOR_DESC);
3376 		total_link_descs = dp_monitor_get_total_link_descs(soc, mac_id);
3377 		qdf_str_lcopy(minidump_str, "mon_link_desc_bank",
3378 			      MINIDUMP_STR_SIZE);
3379 	} else {
3380 		num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3381 			AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
3382 
3383 		num_mpdu_queue_descs = num_mpdu_link_descs /
3384 			num_mpdu_links_per_queue_desc;
3385 
3386 		num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3387 			AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
3388 			num_msdus_per_link_desc;
3389 
3390 		num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
3391 			AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
3392 
3393 		num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
3394 			num_tx_msdu_link_descs + num_rx_msdu_link_descs;
3395 
3396 		pages = &soc->link_desc_pages;
3397 		total_link_descs = &soc->total_link_descs;
3398 		qdf_str_lcopy(minidump_str, "link_desc_bank",
3399 			      MINIDUMP_STR_SIZE);
3400 	}
3401 
3402 	/* If link descriptor banks are allocated, return from here */
3403 	if (pages->num_pages)
3404 		return QDF_STATUS_SUCCESS;
3405 
3406 	/* Round up to power of 2 */
3407 	*total_link_descs = 1;
3408 	while (*total_link_descs < num_entries)
3409 		*total_link_descs <<= 1;
3410 
3411 	dp_init_info("%pK: total_link_descs: %u, link_desc_size: %d",
3412 		     soc, *total_link_descs, link_desc_size);
3413 	total_mem_size =  *total_link_descs * link_desc_size;
3414 	total_mem_size += link_desc_align;
3415 
3416 	dp_init_info("%pK: total_mem_size: %d",
3417 		     soc, total_mem_size);
3418 
3419 	dp_set_max_page_size(pages, max_alloc_size);
3420 	dp_desc_multi_pages_mem_alloc(soc, DP_HW_LINK_DESC_TYPE,
3421 				      pages,
3422 				      link_desc_size,
3423 				      *total_link_descs,
3424 				      0, false);
3425 	if (!pages->num_pages) {
3426 		dp_err("Multi page alloc fail for hw link desc pool");
3427 		return QDF_STATUS_E_FAULT;
3428 	}
3429 
3430 	wlan_minidump_log(pages->dma_pages->page_v_addr_start,
3431 			  pages->num_pages * pages->page_size,
3432 			  soc->ctrl_psoc,
3433 			  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3434 			  "hw_link_desc_bank");
3435 
3436 	return QDF_STATUS_SUCCESS;
3437 }
3438 
3439 /*
3440  * dp_hw_link_desc_ring_free() - Free h/w link desc rings
3441  * @soc: DP SOC handle
3442  *
3443  * Return: none
3444  */
3445 static void dp_hw_link_desc_ring_free(struct dp_soc *soc)
3446 {
3447 	uint32_t i;
3448 	uint32_t size = soc->wbm_idle_scatter_buf_size;
3449 	void *vaddr = soc->wbm_idle_link_ring.base_vaddr_unaligned;
3450 	qdf_dma_addr_t paddr;
3451 
3452 	if (soc->wbm_idle_scatter_buf_base_vaddr[0]) {
3453 		for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3454 			vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3455 			paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3456 			if (vaddr) {
3457 				qdf_mem_free_consistent(soc->osdev,
3458 							soc->osdev->dev,
3459 							size,
3460 							vaddr,
3461 							paddr,
3462 							0);
3463 				vaddr = NULL;
3464 			}
3465 		}
3466 	} else {
3467 		wlan_minidump_remove(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3468 				     soc->wbm_idle_link_ring.alloc_size,
3469 				     soc->ctrl_psoc,
3470 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3471 				     "wbm_idle_link_ring");
3472 		dp_srng_free(soc, &soc->wbm_idle_link_ring);
3473 	}
3474 }
3475 
3476 /*
3477  * dp_hw_link_desc_ring_alloc() - Allocate hw link desc rings
3478  * @soc: DP SOC handle
3479  *
3480  * Allocate memory for WBM_IDLE_LINK srng ring if the number of
3481  * link descriptors is less then the max_allocated size. else
3482  * allocate memory for wbm_idle_scatter_buffer.
3483  *
3484  * Return: QDF_STATUS_SUCCESS: success
3485  *         QDF_STATUS_E_NO_MEM: No memory (Failure)
3486  */
3487 static QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc)
3488 {
3489 	uint32_t entry_size, i;
3490 	uint32_t total_mem_size;
3491 	qdf_dma_addr_t *baseaddr = NULL;
3492 	struct dp_srng *dp_srng;
3493 	uint32_t ring_type;
3494 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3495 	uint32_t tlds;
3496 
3497 	ring_type = WBM_IDLE_LINK;
3498 	dp_srng = &soc->wbm_idle_link_ring;
3499 	tlds = soc->total_link_descs;
3500 
3501 	entry_size = hal_srng_get_entrysize(soc->hal_soc, ring_type);
3502 	total_mem_size = entry_size * tlds;
3503 
3504 	if (total_mem_size <= max_alloc_size) {
3505 		if (dp_srng_alloc(soc, dp_srng, ring_type, tlds, 0)) {
3506 			dp_init_err("%pK: Link desc idle ring setup failed",
3507 				    soc);
3508 			goto fail;
3509 		}
3510 
3511 		wlan_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3512 				  soc->wbm_idle_link_ring.alloc_size,
3513 				  soc->ctrl_psoc,
3514 				  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3515 				  "wbm_idle_link_ring");
3516 	} else {
3517 		uint32_t num_scatter_bufs;
3518 		uint32_t num_entries_per_buf;
3519 		uint32_t buf_size = 0;
3520 
3521 		soc->wbm_idle_scatter_buf_size =
3522 			hal_idle_list_scatter_buf_size(soc->hal_soc);
3523 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
3524 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
3525 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
3526 					soc->hal_soc, total_mem_size,
3527 					soc->wbm_idle_scatter_buf_size);
3528 
3529 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
3530 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3531 				  FL("scatter bufs size out of bounds"));
3532 			goto fail;
3533 		}
3534 
3535 		for (i = 0; i < num_scatter_bufs; i++) {
3536 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
3537 			buf_size = soc->wbm_idle_scatter_buf_size;
3538 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
3539 				qdf_mem_alloc_consistent(soc->osdev,
3540 							 soc->osdev->dev,
3541 							 buf_size,
3542 							 baseaddr);
3543 
3544 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
3545 				QDF_TRACE(QDF_MODULE_ID_DP,
3546 					  QDF_TRACE_LEVEL_ERROR,
3547 					  FL("Scatter lst memory alloc fail"));
3548 				goto fail;
3549 			}
3550 		}
3551 		soc->num_scatter_bufs = num_scatter_bufs;
3552 	}
3553 	return QDF_STATUS_SUCCESS;
3554 
3555 fail:
3556 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3557 		void *vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3558 		qdf_dma_addr_t paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3559 
3560 		if (vaddr) {
3561 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
3562 						soc->wbm_idle_scatter_buf_size,
3563 						vaddr,
3564 						paddr, 0);
3565 			vaddr = NULL;
3566 		}
3567 	}
3568 	return QDF_STATUS_E_NOMEM;
3569 }
3570 
3571 qdf_export_symbol(dp_hw_link_desc_pool_banks_alloc);
3572 
3573 /*
3574  * dp_hw_link_desc_ring_init() - Initialize hw link desc rings
3575  * @soc: DP SOC handle
3576  *
3577  * Return: QDF_STATUS_SUCCESS: success
3578  *         QDF_STATUS_E_FAILURE: failure
3579  */
3580 static QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc)
3581 {
3582 	struct dp_srng *dp_srng = &soc->wbm_idle_link_ring;
3583 
3584 	if (dp_srng->base_vaddr_unaligned) {
3585 		if (dp_srng_init(soc, dp_srng, WBM_IDLE_LINK, 0, 0))
3586 			return QDF_STATUS_E_FAILURE;
3587 	}
3588 	return QDF_STATUS_SUCCESS;
3589 }
3590 
3591 /*
3592  * dp_hw_link_desc_ring_deinit() - Reset hw link desc rings
3593  * @soc: DP SOC handle
3594  *
3595  * Return: None
3596  */
3597 static void dp_hw_link_desc_ring_deinit(struct dp_soc *soc)
3598 {
3599 	dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0);
3600 }
3601 
3602 /*
3603  * dp_hw_link_desc_ring_replenish() - Replenish hw link desc rings
3604  * @soc: DP SOC handle
3605  * @mac_id: mac id
3606  *
3607  * Return: None
3608  */
3609 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id)
3610 {
3611 	uint32_t cookie = 0;
3612 	uint32_t page_idx = 0;
3613 	struct qdf_mem_multi_page_t *pages;
3614 	struct qdf_mem_dma_page_t *dma_pages;
3615 	uint32_t offset = 0;
3616 	uint32_t count = 0;
3617 	uint32_t desc_id = 0;
3618 	void *desc_srng;
3619 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
3620 	uint32_t *total_link_descs_addr;
3621 	uint32_t total_link_descs;
3622 	uint32_t scatter_buf_num;
3623 	uint32_t num_entries_per_buf = 0;
3624 	uint32_t rem_entries;
3625 	uint32_t num_descs_per_page;
3626 	uint32_t num_scatter_bufs = 0;
3627 	uint8_t *scatter_buf_ptr;
3628 	void *desc;
3629 
3630 	num_scatter_bufs = soc->num_scatter_bufs;
3631 
3632 	if (mac_id == WLAN_INVALID_PDEV_ID) {
3633 		pages = &soc->link_desc_pages;
3634 		total_link_descs = soc->total_link_descs;
3635 		desc_srng = soc->wbm_idle_link_ring.hal_srng;
3636 	} else {
3637 		pages = dp_monitor_get_link_desc_pages(soc, mac_id);
3638 		/* dp_monitor_get_link_desc_pages returns NULL only
3639 		 * if monitor SOC is  NULL
3640 		 */
3641 		if (!pages) {
3642 			dp_err("can not get link desc pages");
3643 			QDF_ASSERT(0);
3644 			return;
3645 		}
3646 		total_link_descs_addr =
3647 				dp_monitor_get_total_link_descs(soc, mac_id);
3648 		total_link_descs = *total_link_descs_addr;
3649 		desc_srng = soc->rxdma_mon_desc_ring[mac_id].hal_srng;
3650 	}
3651 
3652 	dma_pages = pages->dma_pages;
3653 	do {
3654 		qdf_mem_zero(dma_pages[page_idx].page_v_addr_start,
3655 			     pages->page_size);
3656 		page_idx++;
3657 	} while (page_idx < pages->num_pages);
3658 
3659 	if (desc_srng) {
3660 		hal_srng_access_start_unlocked(soc->hal_soc, desc_srng);
3661 		page_idx = 0;
3662 		count = 0;
3663 		offset = 0;
3664 		pages = &soc->link_desc_pages;
3665 		while ((desc = hal_srng_src_get_next(soc->hal_soc,
3666 						     desc_srng)) &&
3667 			(count < total_link_descs)) {
3668 			page_idx = count / pages->num_element_per_page;
3669 			if (desc_id == pages->num_element_per_page)
3670 				desc_id = 0;
3671 
3672 			offset = count % pages->num_element_per_page;
3673 			cookie = LINK_DESC_COOKIE(desc_id, page_idx,
3674 						  soc->link_desc_id_start);
3675 
3676 			hal_set_link_desc_addr(soc->hal_soc, desc, cookie,
3677 					       dma_pages[page_idx].page_p_addr
3678 					       + (offset * link_desc_size),
3679 					       soc->idle_link_bm_id);
3680 			count++;
3681 			desc_id++;
3682 		}
3683 		hal_srng_access_end_unlocked(soc->hal_soc, desc_srng);
3684 	} else {
3685 		/* Populate idle list scatter buffers with link descriptor
3686 		 * pointers
3687 		 */
3688 		scatter_buf_num = 0;
3689 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
3690 					soc->hal_soc,
3691 					soc->wbm_idle_scatter_buf_size);
3692 
3693 		scatter_buf_ptr = (uint8_t *)(
3694 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
3695 		rem_entries = num_entries_per_buf;
3696 		pages = &soc->link_desc_pages;
3697 		page_idx = 0; count = 0;
3698 		offset = 0;
3699 		num_descs_per_page = pages->num_element_per_page;
3700 
3701 		while (count < total_link_descs) {
3702 			page_idx = count / num_descs_per_page;
3703 			offset = count % num_descs_per_page;
3704 			if (desc_id == pages->num_element_per_page)
3705 				desc_id = 0;
3706 
3707 			cookie = LINK_DESC_COOKIE(desc_id, page_idx,
3708 						  soc->link_desc_id_start);
3709 			hal_set_link_desc_addr(soc->hal_soc,
3710 					       (void *)scatter_buf_ptr,
3711 					       cookie,
3712 					       dma_pages[page_idx].page_p_addr +
3713 					       (offset * link_desc_size),
3714 					       soc->idle_link_bm_id);
3715 			rem_entries--;
3716 			if (rem_entries) {
3717 				scatter_buf_ptr += link_desc_size;
3718 			} else {
3719 				rem_entries = num_entries_per_buf;
3720 				scatter_buf_num++;
3721 				if (scatter_buf_num >= num_scatter_bufs)
3722 					break;
3723 				scatter_buf_ptr = (uint8_t *)
3724 					(soc->wbm_idle_scatter_buf_base_vaddr[
3725 					 scatter_buf_num]);
3726 			}
3727 			count++;
3728 			desc_id++;
3729 		}
3730 		/* Setup link descriptor idle list in HW */
3731 		hal_setup_link_idle_list(soc->hal_soc,
3732 			soc->wbm_idle_scatter_buf_base_paddr,
3733 			soc->wbm_idle_scatter_buf_base_vaddr,
3734 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
3735 			(uint32_t)(scatter_buf_ptr -
3736 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
3737 			scatter_buf_num-1])), total_link_descs);
3738 	}
3739 }
3740 
3741 qdf_export_symbol(dp_link_desc_ring_replenish);
3742 
3743 #ifdef IPA_OFFLOAD
3744 #define USE_1_IPA_RX_REO_RING 1
3745 #define USE_2_IPA_RX_REO_RINGS 2
3746 #define REO_DST_RING_SIZE_QCA6290 1023
3747 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
3748 #define REO_DST_RING_SIZE_QCA8074 1023
3749 #define REO_DST_RING_SIZE_QCN9000 2048
3750 #else
3751 #define REO_DST_RING_SIZE_QCA8074 8
3752 #define REO_DST_RING_SIZE_QCN9000 8
3753 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
3754 
3755 #ifdef IPA_WDI3_TX_TWO_PIPES
3756 #ifdef DP_MEMORY_OPT
3757 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3758 {
3759 	return dp_init_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3760 }
3761 
3762 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3763 {
3764 	dp_deinit_tx_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3765 }
3766 
3767 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3768 {
3769 	return dp_alloc_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3770 }
3771 
3772 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3773 {
3774 	dp_free_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3775 }
3776 
3777 #else /* !DP_MEMORY_OPT */
3778 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3779 {
3780 	return 0;
3781 }
3782 
3783 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3784 {
3785 }
3786 
3787 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3788 {
3789 	return 0
3790 }
3791 
3792 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3793 {
3794 }
3795 #endif /* DP_MEMORY_OPT */
3796 
3797 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
3798 {
3799 	hal_tx_init_data_ring(soc->hal_soc,
3800 			      soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng);
3801 }
3802 
3803 #else /* !IPA_WDI3_TX_TWO_PIPES */
3804 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3805 {
3806 	return 0;
3807 }
3808 
3809 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3810 {
3811 }
3812 
3813 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3814 {
3815 	return 0;
3816 }
3817 
3818 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3819 {
3820 }
3821 
3822 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
3823 {
3824 }
3825 
3826 #endif /* IPA_WDI3_TX_TWO_PIPES */
3827 
3828 #else
3829 
3830 #define REO_DST_RING_SIZE_QCA6290 1024
3831 
3832 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3833 {
3834 	return 0;
3835 }
3836 
3837 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3838 {
3839 }
3840 
3841 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3842 {
3843 	return 0;
3844 }
3845 
3846 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3847 {
3848 }
3849 
3850 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
3851 {
3852 }
3853 
3854 #endif /* IPA_OFFLOAD */
3855 
3856 /*
3857  * dp_soc_reset_ring_map() - Reset cpu ring map
3858  * @soc: Datapath soc handler
3859  *
3860  * This api resets the default cpu ring map
3861  */
3862 
3863 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
3864 {
3865 	uint8_t i;
3866 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
3867 
3868 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
3869 		switch (nss_config) {
3870 		case dp_nss_cfg_first_radio:
3871 			/*
3872 			 * Setting Tx ring map for one nss offloaded radio
3873 			 */
3874 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
3875 			break;
3876 
3877 		case dp_nss_cfg_second_radio:
3878 			/*
3879 			 * Setting Tx ring for two nss offloaded radios
3880 			 */
3881 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
3882 			break;
3883 
3884 		case dp_nss_cfg_dbdc:
3885 			/*
3886 			 * Setting Tx ring map for 2 nss offloaded radios
3887 			 */
3888 			soc->tx_ring_map[i] =
3889 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
3890 			break;
3891 
3892 		case dp_nss_cfg_dbtc:
3893 			/*
3894 			 * Setting Tx ring map for 3 nss offloaded radios
3895 			 */
3896 			soc->tx_ring_map[i] =
3897 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
3898 			break;
3899 
3900 		default:
3901 			dp_err("tx_ring_map failed due to invalid nss cfg");
3902 			break;
3903 		}
3904 	}
3905 }
3906 
3907 /*
3908  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
3909  * @dp_soc - DP soc handle
3910  * @ring_type - ring type
3911  * @ring_num - ring_num
3912  *
3913  * return 0 or 1
3914  */
3915 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
3916 {
3917 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
3918 	uint8_t status = 0;
3919 
3920 	switch (ring_type) {
3921 	case WBM2SW_RELEASE:
3922 	case REO_DST:
3923 	case RXDMA_BUF:
3924 	case REO_EXCEPTION:
3925 		status = ((nss_config) & (1 << ring_num));
3926 		break;
3927 	default:
3928 		break;
3929 	}
3930 
3931 	return status;
3932 }
3933 
3934 /*
3935  * dp_soc_disable_unused_mac_intr_mask() - reset interrupt mask for
3936  *					  unused WMAC hw rings
3937  * @dp_soc - DP Soc handle
3938  * @mac_num - wmac num
3939  *
3940  * Return: Return void
3941  */
3942 static void dp_soc_disable_unused_mac_intr_mask(struct dp_soc *soc,
3943 						int mac_num)
3944 {
3945 	uint8_t *grp_mask = NULL;
3946 	int group_number;
3947 
3948 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
3949 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
3950 	wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
3951 					  group_number, 0x0);
3952 
3953 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
3954 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
3955 	wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
3956 				      group_number, 0x0);
3957 
3958 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
3959 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
3960 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
3961 					  group_number, 0x0);
3962 
3963 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
3964 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
3965 	wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
3966 					      group_number, 0x0);
3967 }
3968 
3969 /*
3970  * dp_soc_reset_intr_mask() - reset interrupt mask
3971  * @dp_soc - DP Soc handle
3972  *
3973  * Return: Return void
3974  */
3975 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
3976 {
3977 	uint8_t j;
3978 	uint8_t *grp_mask = NULL;
3979 	int group_number, mask, num_ring;
3980 
3981 	/* number of tx ring */
3982 	num_ring = soc->num_tcl_data_rings;
3983 
3984 	/*
3985 	 * group mask for tx completion  ring.
3986 	 */
3987 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
3988 
3989 	/* loop and reset the mask for only offloaded ring */
3990 	for (j = 0; j < WLAN_CFG_NUM_TCL_DATA_RINGS; j++) {
3991 		/*
3992 		 * Group number corresponding to tx offloaded ring.
3993 		 */
3994 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
3995 		if (group_number < 0) {
3996 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
3997 				      soc, WBM2SW_RELEASE, j);
3998 			continue;
3999 		}
4000 
4001 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
4002 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j) &&
4003 		    (!mask)) {
4004 			continue;
4005 		}
4006 
4007 		/* reset the tx mask for offloaded ring */
4008 		mask &= (~(1 << j));
4009 
4010 		/*
4011 		 * reset the interrupt mask for offloaded ring.
4012 		 */
4013 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4014 	}
4015 
4016 	/* number of rx rings */
4017 	num_ring = soc->num_reo_dest_rings;
4018 
4019 	/*
4020 	 * group mask for reo destination ring.
4021 	 */
4022 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
4023 
4024 	/* loop and reset the mask for only offloaded ring */
4025 	for (j = 0; j < WLAN_CFG_NUM_REO_DEST_RING; j++) {
4026 		/*
4027 		 * Group number corresponding to rx offloaded ring.
4028 		 */
4029 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4030 		if (group_number < 0) {
4031 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4032 				      soc, REO_DST, j);
4033 			continue;
4034 		}
4035 
4036 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
4037 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j) &&
4038 		    (!mask)) {
4039 			continue;
4040 		}
4041 
4042 		/* reset the interrupt mask for offloaded ring */
4043 		mask &= (~(1 << j));
4044 
4045 		/*
4046 		 * set the interrupt mask to zero for rx offloaded radio.
4047 		 */
4048 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
4049 	}
4050 
4051 	/*
4052 	 * group mask for Rx buffer refill ring
4053 	 */
4054 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
4055 
4056 	/* loop and reset the mask for only offloaded ring */
4057 	for (j = 0; j < MAX_PDEV_CNT; j++) {
4058 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
4059 
4060 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
4061 			continue;
4062 		}
4063 
4064 		/*
4065 		 * Group number corresponding to rx offloaded ring.
4066 		 */
4067 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
4068 		if (group_number < 0) {
4069 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4070 				      soc, REO_DST, lmac_id);
4071 			continue;
4072 		}
4073 
4074 		/* set the interrupt mask for offloaded ring */
4075 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4076 				group_number);
4077 		mask &= (~(1 << lmac_id));
4078 
4079 		/*
4080 		 * set the interrupt mask to zero for rx offloaded radio.
4081 		 */
4082 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
4083 			group_number, mask);
4084 	}
4085 
4086 	grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
4087 
4088 	for (j = 0; j < num_ring; j++) {
4089 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_EXCEPTION, j)) {
4090 			continue;
4091 		}
4092 
4093 		/*
4094 		 * Group number corresponding to rx err ring.
4095 		 */
4096 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
4097 		if (group_number < 0) {
4098 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
4099 				      soc, REO_EXCEPTION, j);
4100 			continue;
4101 		}
4102 
4103 		wlan_cfg_set_rx_err_ring_mask(soc->wlan_cfg_ctx,
4104 					      group_number, 0);
4105 	}
4106 }
4107 
4108 #ifdef IPA_OFFLOAD
4109 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
4110 			 uint32_t *remap1, uint32_t *remap2)
4111 {
4112 	uint32_t ring[8] = {REO_REMAP_SW1, REO_REMAP_SW2, REO_REMAP_SW3};
4113 	int target_type;
4114 
4115 	target_type = hal_get_target_type(soc->hal_soc);
4116 
4117 	switch (target_type) {
4118 	case TARGET_TYPE_KIWI:
4119 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4120 					      soc->num_reo_dest_rings -
4121 					      USE_2_IPA_RX_REO_RINGS, remap1,
4122 					      remap2);
4123 		break;
4124 
4125 	default:
4126 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4127 					      soc->num_reo_dest_rings -
4128 					      USE_1_IPA_RX_REO_RING, remap1,
4129 					      remap2);
4130 		break;
4131 	}
4132 
4133 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
4134 
4135 	return true;
4136 }
4137 
4138 #ifdef IPA_WDI3_TX_TWO_PIPES
4139 static bool dp_ipa_is_alt_tx_ring(int index)
4140 {
4141 	return index == IPA_TX_ALT_RING_IDX;
4142 }
4143 
4144 static bool dp_ipa_is_alt_tx_comp_ring(int index)
4145 {
4146 	return index == IPA_TX_ALT_COMP_RING_IDX;
4147 }
4148 #else /* !IPA_WDI3_TX_TWO_PIPES */
4149 static bool dp_ipa_is_alt_tx_ring(int index)
4150 {
4151 	return false;
4152 }
4153 
4154 static bool dp_ipa_is_alt_tx_comp_ring(int index)
4155 {
4156 	return false;
4157 }
4158 #endif /* IPA_WDI3_TX_TWO_PIPES */
4159 
4160 /**
4161  * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA
4162  *
4163  * @tx_ring_num: Tx ring number
4164  * @tx_ipa_ring_sz: Return param only updated for IPA.
4165  * @soc_cfg_ctx: dp soc cfg context
4166  *
4167  * Return: None
4168  */
4169 static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz,
4170 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4171 {
4172 	if (!soc_cfg_ctx->ipa_enabled)
4173 		return;
4174 
4175 	if (tx_ring_num == IPA_TCL_DATA_RING_IDX)
4176 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_ring_size(soc_cfg_ctx);
4177 	else if (dp_ipa_is_alt_tx_ring(tx_ring_num))
4178 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_alt_ring_size(soc_cfg_ctx);
4179 }
4180 
4181 /**
4182  * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA
4183  *
4184  * @tx_comp_ring_num: Tx comp ring number
4185  * @tx_comp_ipa_ring_sz: Return param only updated for IPA.
4186  * @soc_cfg_ctx: dp soc cfg context
4187  *
4188  * Return: None
4189  */
4190 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
4191 					 int *tx_comp_ipa_ring_sz,
4192 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4193 {
4194 	if (!soc_cfg_ctx->ipa_enabled)
4195 		return;
4196 
4197 	if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX)
4198 		*tx_comp_ipa_ring_sz =
4199 				wlan_cfg_ipa_tx_comp_ring_size(soc_cfg_ctx);
4200 	else if (dp_ipa_is_alt_tx_comp_ring(tx_comp_ring_num))
4201 		*tx_comp_ipa_ring_sz =
4202 				wlan_cfg_ipa_tx_alt_comp_ring_size(soc_cfg_ctx);
4203 }
4204 #else
4205 static uint8_t dp_reo_ring_selection(uint32_t value, uint32_t *ring)
4206 {
4207 	uint8_t num = 0;
4208 
4209 	switch (value) {
4210 	case 0xF:
4211 		num = 4;
4212 		ring[0] = REO_REMAP_SW1;
4213 		ring[1] = REO_REMAP_SW2;
4214 		ring[2] = REO_REMAP_SW3;
4215 		ring[3] = REO_REMAP_SW4;
4216 		break;
4217 	case 0xE:
4218 		num = 3;
4219 		ring[0] = REO_REMAP_SW2;
4220 		ring[1] = REO_REMAP_SW3;
4221 		ring[2] = REO_REMAP_SW4;
4222 		break;
4223 	case 0xD:
4224 		num = 3;
4225 		ring[0] = REO_REMAP_SW1;
4226 		ring[1] = REO_REMAP_SW3;
4227 		ring[2] = REO_REMAP_SW4;
4228 		break;
4229 	case 0xC:
4230 		num = 2;
4231 		ring[0] = REO_REMAP_SW3;
4232 		ring[1] = REO_REMAP_SW4;
4233 		break;
4234 	case 0xB:
4235 		num = 3;
4236 		ring[0] = REO_REMAP_SW1;
4237 		ring[1] = REO_REMAP_SW2;
4238 		ring[2] = REO_REMAP_SW4;
4239 		break;
4240 	case 0xA:
4241 		num = 2;
4242 		ring[0] = REO_REMAP_SW2;
4243 		ring[1] = REO_REMAP_SW4;
4244 		break;
4245 	case 0x9:
4246 		num = 2;
4247 		ring[0] = REO_REMAP_SW1;
4248 		ring[1] = REO_REMAP_SW4;
4249 		break;
4250 	case 0x8:
4251 		num = 1;
4252 		ring[0] = REO_REMAP_SW4;
4253 		break;
4254 	case 0x7:
4255 		num = 3;
4256 		ring[0] = REO_REMAP_SW1;
4257 		ring[1] = REO_REMAP_SW2;
4258 		ring[2] = REO_REMAP_SW3;
4259 		break;
4260 	case 0x6:
4261 		num = 2;
4262 		ring[0] = REO_REMAP_SW2;
4263 		ring[1] = REO_REMAP_SW3;
4264 		break;
4265 	case 0x5:
4266 		num = 2;
4267 		ring[0] = REO_REMAP_SW1;
4268 		ring[1] = REO_REMAP_SW3;
4269 		break;
4270 	case 0x4:
4271 		num = 1;
4272 		ring[0] = REO_REMAP_SW3;
4273 		break;
4274 	case 0x3:
4275 		num = 2;
4276 		ring[0] = REO_REMAP_SW1;
4277 		ring[1] = REO_REMAP_SW2;
4278 		break;
4279 	case 0x2:
4280 		num = 1;
4281 		ring[0] = REO_REMAP_SW2;
4282 		break;
4283 	case 0x1:
4284 		num = 1;
4285 		ring[0] = REO_REMAP_SW1;
4286 		break;
4287 	}
4288 	return num;
4289 }
4290 
4291 bool dp_reo_remap_config(struct dp_soc *soc,
4292 			 uint32_t *remap0,
4293 			 uint32_t *remap1,
4294 			 uint32_t *remap2)
4295 {
4296 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4297 	uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
4298 	uint8_t target_type, num;
4299 	uint32_t ring[4];
4300 	uint32_t value;
4301 
4302 	target_type = hal_get_target_type(soc->hal_soc);
4303 
4304 	switch (offload_radio) {
4305 	case dp_nss_cfg_default:
4306 		value = reo_config & 0xF;
4307 		num = dp_reo_ring_selection(value, ring);
4308 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4309 					      num, remap1, remap2);
4310 
4311 		break;
4312 	case dp_nss_cfg_first_radio:
4313 		value = reo_config & 0xE;
4314 		num = dp_reo_ring_selection(value, ring);
4315 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4316 					      num, remap1, remap2);
4317 
4318 		break;
4319 	case dp_nss_cfg_second_radio:
4320 		value = reo_config & 0xD;
4321 		num = dp_reo_ring_selection(value, ring);
4322 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
4323 					      num, remap1, remap2);
4324 
4325 		break;
4326 	case dp_nss_cfg_dbdc:
4327 	case dp_nss_cfg_dbtc:
4328 		/* return false if both or all are offloaded to NSS */
4329 		return false;
4330 
4331 	}
4332 
4333 	dp_debug("remap1 %x remap2 %x offload_radio %u",
4334 		 *remap1, *remap2, offload_radio);
4335 	return true;
4336 }
4337 
4338 static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz,
4339 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4340 {
4341 }
4342 
4343 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
4344 					 int *tx_comp_ipa_ring_sz,
4345 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
4346 {
4347 }
4348 #endif /* IPA_OFFLOAD */
4349 
4350 /*
4351  * dp_reo_frag_dst_set() - configure reo register to set the
4352  *                        fragment destination ring
4353  * @soc : Datapath soc
4354  * @frag_dst_ring : output parameter to set fragment destination ring
4355  *
4356  * Based on offload_radio below fragment destination rings is selected
4357  * 0 - TCL
4358  * 1 - SW1
4359  * 2 - SW2
4360  * 3 - SW3
4361  * 4 - SW4
4362  * 5 - Release
4363  * 6 - FW
4364  * 7 - alternate select
4365  *
4366  * return: void
4367  */
4368 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
4369 {
4370 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
4371 
4372 	switch (offload_radio) {
4373 	case dp_nss_cfg_default:
4374 		*frag_dst_ring = REO_REMAP_TCL;
4375 		break;
4376 	case dp_nss_cfg_first_radio:
4377 		/*
4378 		 * This configuration is valid for single band radio which
4379 		 * is also NSS offload.
4380 		 */
4381 	case dp_nss_cfg_dbdc:
4382 	case dp_nss_cfg_dbtc:
4383 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
4384 		break;
4385 	default:
4386 		dp_init_err("%pK: dp_reo_frag_dst_set invalid offload radio config", soc);
4387 		break;
4388 	}
4389 }
4390 
4391 #ifdef ENABLE_VERBOSE_DEBUG
4392 static void dp_enable_verbose_debug(struct dp_soc *soc)
4393 {
4394 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4395 
4396 	soc_cfg_ctx = soc->wlan_cfg_ctx;
4397 
4398 	if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask)
4399 		is_dp_verbose_debug_enabled = true;
4400 
4401 	if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask)
4402 		hal_set_verbose_debug(true);
4403 	else
4404 		hal_set_verbose_debug(false);
4405 }
4406 #else
4407 static void dp_enable_verbose_debug(struct dp_soc *soc)
4408 {
4409 }
4410 #endif
4411 
4412 #ifdef WLAN_FEATURE_STATS_EXT
4413 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
4414 {
4415 	qdf_event_create(&soc->rx_hw_stats_event);
4416 }
4417 #else
4418 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
4419 {
4420 }
4421 #endif
4422 
4423 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index)
4424 {
4425 	int tcl_ring_num, wbm_ring_num;
4426 
4427 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
4428 						index,
4429 						&tcl_ring_num,
4430 						&wbm_ring_num);
4431 
4432 	if (tcl_ring_num == -1 || wbm_ring_num == -1) {
4433 		dp_err("incorrect tcl/wbm ring num for index %u", index);
4434 		return;
4435 	}
4436 
4437 	wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned,
4438 			     soc->tcl_data_ring[index].alloc_size,
4439 			     soc->ctrl_psoc,
4440 			     WLAN_MD_DP_SRNG_TCL_DATA,
4441 			     "tcl_data_ring");
4442 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
4443 	dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA,
4444 		       tcl_ring_num);
4445 
4446 	wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned,
4447 			     soc->tx_comp_ring[index].alloc_size,
4448 			     soc->ctrl_psoc,
4449 			     WLAN_MD_DP_SRNG_TX_COMP,
4450 			     "tcl_comp_ring");
4451 	dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4452 		       wbm_ring_num);
4453 }
4454 
4455 /**
4456  * dp_init_tx_ring_pair_by_index() - The function inits tcl data/wbm completion
4457  * ring pair
4458  * @soc: DP soc pointer
4459  * @index: index of soc->tcl_data or soc->tx_comp to initialize
4460  *
4461  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
4462  */
4463 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
4464 						uint8_t index)
4465 {
4466 	int tcl_ring_num, wbm_ring_num;
4467 	uint8_t bm_id;
4468 
4469 	if (index >= MAX_TCL_DATA_RINGS) {
4470 		dp_err("unexpected index!");
4471 		QDF_BUG(0);
4472 		goto fail1;
4473 	}
4474 
4475 	wlan_cfg_get_tcl_wbm_ring_num_for_index(soc->wlan_cfg_ctx,
4476 						index,
4477 						&tcl_ring_num,
4478 						&wbm_ring_num);
4479 
4480 	if (tcl_ring_num == -1 || wbm_ring_num == -1) {
4481 		dp_err("incorrect tcl/wbm ring num for index %u", index);
4482 		goto fail1;
4483 	}
4484 
4485 	dp_info("index %u tcl %u wbm %u", index, tcl_ring_num, wbm_ring_num);
4486 	if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA,
4487 			 tcl_ring_num, 0)) {
4488 		dp_err("dp_srng_init failed for tcl_data_ring");
4489 		goto fail1;
4490 	}
4491 	wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned,
4492 			  soc->tcl_data_ring[index].alloc_size,
4493 			  soc->ctrl_psoc,
4494 			  WLAN_MD_DP_SRNG_TCL_DATA,
4495 			  "tcl_data_ring");
4496 
4497 	if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4498 			 wbm_ring_num, 0)) {
4499 		dp_err("dp_srng_init failed for tx_comp_ring");
4500 		goto fail1;
4501 	}
4502 
4503 	bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx, tcl_ring_num);
4504 
4505 	soc->arch_ops.tx_implicit_rbm_set(soc, tcl_ring_num, bm_id);
4506 	wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned,
4507 			  soc->tx_comp_ring[index].alloc_size,
4508 			  soc->ctrl_psoc,
4509 			  WLAN_MD_DP_SRNG_TX_COMP,
4510 			  "tcl_comp_ring");
4511 
4512 	return QDF_STATUS_SUCCESS;
4513 
4514 fail1:
4515 	return QDF_STATUS_E_FAILURE;
4516 }
4517 
4518 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index)
4519 {
4520 	dp_debug("index %u", index);
4521 	dp_srng_free(soc, &soc->tcl_data_ring[index]);
4522 	dp_srng_free(soc, &soc->tx_comp_ring[index]);
4523 }
4524 
4525 /**
4526  * dp_alloc_tx_ring_pair_by_index() - The function allocs tcl data/wbm2sw
4527  * ring pair for the given "index"
4528  * @soc: DP soc pointer
4529  * @index: index of soc->tcl_data or soc->tx_comp to initialize
4530  *
4531  * Return: QDF_STATUS_SUCCESS on success, error code otherwise.
4532  */
4533 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
4534 						 uint8_t index)
4535 {
4536 	int tx_ring_size;
4537 	int tx_comp_ring_size;
4538 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
4539 	int cached = 0;
4540 
4541 	if (index >= MAX_TCL_DATA_RINGS) {
4542 		dp_err("unexpected index!");
4543 		QDF_BUG(0);
4544 		goto fail1;
4545 	}
4546 
4547 	dp_debug("index %u", index);
4548 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
4549 	dp_ipa_get_tx_ring_size(index, &tx_ring_size, soc_cfg_ctx);
4550 
4551 	if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA,
4552 			  tx_ring_size, cached)) {
4553 		dp_err("dp_srng_alloc failed for tcl_data_ring");
4554 		goto fail1;
4555 	}
4556 
4557 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
4558 	dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size, soc_cfg_ctx);
4559 	/* Enable cached TCL desc if NSS offload is disabled */
4560 	if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
4561 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
4562 
4563 	if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4564 			  tx_comp_ring_size, cached)) {
4565 		dp_err("dp_srng_alloc failed for tx_comp_ring");
4566 		goto fail1;
4567 	}
4568 
4569 	return QDF_STATUS_SUCCESS;
4570 
4571 fail1:
4572 	return QDF_STATUS_E_FAILURE;
4573 }
4574 
4575 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
4576 {
4577 	struct cdp_lro_hash_config lro_hash;
4578 	QDF_STATUS status;
4579 
4580 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
4581 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
4582 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
4583 		dp_err("LRO, GRO and RX hash disabled");
4584 		return QDF_STATUS_E_FAILURE;
4585 	}
4586 
4587 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
4588 
4589 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
4590 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
4591 		lro_hash.lro_enable = 1;
4592 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
4593 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
4594 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
4595 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
4596 	}
4597 
4598 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
4599 			     (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
4600 			      LRO_IPV4_SEED_ARR_SZ));
4601 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
4602 			     (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
4603 			      LRO_IPV6_SEED_ARR_SZ));
4604 
4605 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
4606 
4607 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
4608 		QDF_BUG(0);
4609 		dp_err("lro_hash_config not configured");
4610 		return QDF_STATUS_E_FAILURE;
4611 	}
4612 
4613 	status = soc->cdp_soc.ol_ops->lro_hash_config(soc->ctrl_psoc,
4614 						      pdev->pdev_id,
4615 						      &lro_hash);
4616 	if (!QDF_IS_STATUS_SUCCESS(status)) {
4617 		dp_err("failed to send lro_hash_config to FW %u", status);
4618 		return status;
4619 	}
4620 
4621 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
4622 		lro_hash.lro_enable, lro_hash.tcp_flag,
4623 		lro_hash.tcp_flag_mask);
4624 
4625 	dp_info("toeplitz_hash_ipv4:");
4626 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4627 			   lro_hash.toeplitz_hash_ipv4,
4628 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
4629 			   LRO_IPV4_SEED_ARR_SZ));
4630 
4631 	dp_info("toeplitz_hash_ipv6:");
4632 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4633 			   lro_hash.toeplitz_hash_ipv6,
4634 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
4635 			   LRO_IPV6_SEED_ARR_SZ));
4636 
4637 	return status;
4638 }
4639 
4640 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
4641 /*
4642  * dp_reap_timer_init() - initialize the reap timer
4643  * @soc: data path SoC handle
4644  *
4645  * Return: void
4646  */
4647 static void dp_reap_timer_init(struct dp_soc *soc)
4648 {
4649 	/*
4650 	 * Timer to reap rxdma status rings.
4651 	 * Needed until we enable ppdu end interrupts
4652 	 */
4653 	dp_monitor_reap_timer_init(soc);
4654 	dp_monitor_vdev_timer_init(soc);
4655 }
4656 
4657 /*
4658  * dp_reap_timer_deinit() - de-initialize the reap timer
4659  * @soc: data path SoC handle
4660  *
4661  * Return: void
4662  */
4663 static void dp_reap_timer_deinit(struct dp_soc *soc)
4664 {
4665 	dp_monitor_reap_timer_deinit(soc);
4666 }
4667 #else
4668 /* WIN use case */
4669 static void dp_reap_timer_init(struct dp_soc *soc)
4670 {
4671 	/* Configure LMAC rings in Polled mode */
4672 	if (soc->lmac_polled_mode) {
4673 		/*
4674 		 * Timer to reap lmac rings.
4675 		 */
4676 		qdf_timer_init(soc->osdev, &soc->lmac_reap_timer,
4677 			       dp_service_lmac_rings, (void *)soc,
4678 			       QDF_TIMER_TYPE_WAKE_APPS);
4679 		soc->lmac_timer_init = 1;
4680 		qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
4681 	}
4682 }
4683 
4684 static void dp_reap_timer_deinit(struct dp_soc *soc)
4685 {
4686 	if (soc->lmac_timer_init) {
4687 		qdf_timer_stop(&soc->lmac_reap_timer);
4688 		qdf_timer_free(&soc->lmac_reap_timer);
4689 		soc->lmac_timer_init = 0;
4690 	}
4691 }
4692 #endif
4693 
4694 #ifdef QCA_HOST2FW_RXBUF_RING
4695 /*
4696  * dp_rxdma_ring_alloc() - allocate the RXDMA rings
4697  * @soc: data path SoC handle
4698  * @pdev: Physical device handle
4699  *
4700  * Return: 0 - success, > 0 - failure
4701  */
4702 static int dp_rxdma_ring_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
4703 {
4704 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
4705 	int max_mac_rings;
4706 	int i;
4707 	int ring_size;
4708 
4709 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
4710 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
4711 	ring_size =  wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx);
4712 
4713 	for (i = 0; i < max_mac_rings; i++) {
4714 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
4715 		if (dp_srng_alloc(soc, &pdev->rx_mac_buf_ring[i],
4716 				  RXDMA_BUF, ring_size, 0)) {
4717 			dp_init_err("%pK: failed rx mac ring setup", soc);
4718 			return QDF_STATUS_E_FAILURE;
4719 		}
4720 	}
4721 	return QDF_STATUS_SUCCESS;
4722 }
4723 
4724 /*
4725  * dp_rxdma_ring_setup() - configure the RXDMA rings
4726  * @soc: data path SoC handle
4727  * @pdev: Physical device handle
4728  *
4729  * Return: 0 - success, > 0 - failure
4730  */
4731 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
4732 {
4733 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
4734 	int max_mac_rings;
4735 	int i;
4736 
4737 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
4738 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
4739 
4740 	for (i = 0; i < max_mac_rings; i++) {
4741 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
4742 		if (dp_srng_init(soc, &pdev->rx_mac_buf_ring[i],
4743 				 RXDMA_BUF, 1, i)) {
4744 			dp_init_err("%pK: failed rx mac ring setup", soc);
4745 			return QDF_STATUS_E_FAILURE;
4746 		}
4747 	}
4748 	return QDF_STATUS_SUCCESS;
4749 }
4750 
4751 /*
4752  * dp_rxdma_ring_cleanup() - Deinit the RXDMA rings and reap timer
4753  * @soc: data path SoC handle
4754  * @pdev: Physical device handle
4755  *
4756  * Return: void
4757  */
4758 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
4759 {
4760 	int i;
4761 
4762 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
4763 		dp_srng_deinit(soc, &pdev->rx_mac_buf_ring[i], RXDMA_BUF, 1);
4764 
4765 	dp_reap_timer_deinit(soc);
4766 }
4767 
4768 /*
4769  * dp_rxdma_ring_free() - Free the RXDMA rings
4770  * @pdev: Physical device handle
4771  *
4772  * Return: void
4773  */
4774 static void dp_rxdma_ring_free(struct dp_pdev *pdev)
4775 {
4776 	int i;
4777 
4778 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
4779 		dp_srng_free(pdev->soc, &pdev->rx_mac_buf_ring[i]);
4780 }
4781 
4782 #else
4783 static int dp_rxdma_ring_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
4784 {
4785 	return QDF_STATUS_SUCCESS;
4786 }
4787 
4788 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
4789 {
4790 	return QDF_STATUS_SUCCESS;
4791 }
4792 
4793 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
4794 {
4795 	dp_reap_timer_deinit(soc);
4796 }
4797 
4798 static void dp_rxdma_ring_free(struct dp_pdev *pdev)
4799 {
4800 }
4801 #endif
4802 
4803 /**
4804  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
4805  * @pdev - DP_PDEV handle
4806  *
4807  * Return: void
4808  */
4809 static inline void
4810 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
4811 {
4812 	uint8_t map_id;
4813 	struct dp_soc *soc = pdev->soc;
4814 
4815 	if (!soc)
4816 		return;
4817 
4818 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
4819 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
4820 			     default_dscp_tid_map,
4821 			     sizeof(default_dscp_tid_map));
4822 	}
4823 
4824 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
4825 		hal_tx_set_dscp_tid_map(soc->hal_soc,
4826 					default_dscp_tid_map,
4827 					map_id);
4828 	}
4829 }
4830 
4831 /**
4832  * dp_pcp_tid_map_setup(): Initialize the pcp-tid maps
4833  * @pdev - DP_PDEV handle
4834  *
4835  * Return: void
4836  */
4837 static inline void
4838 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
4839 {
4840 	struct dp_soc *soc = pdev->soc;
4841 
4842 	if (!soc)
4843 		return;
4844 
4845 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
4846 		     sizeof(default_pcp_tid_map));
4847 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
4848 }
4849 
4850 #ifdef IPA_OFFLOAD
4851 /**
4852  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
4853  * @soc: data path instance
4854  * @pdev: core txrx pdev context
4855  *
4856  * Return: QDF_STATUS_SUCCESS: success
4857  *         QDF_STATUS_E_RESOURCES: Error return
4858  */
4859 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4860 					   struct dp_pdev *pdev)
4861 {
4862 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4863 	int entries;
4864 
4865 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
4866 		soc_cfg_ctx = soc->wlan_cfg_ctx;
4867 		entries =
4868 			wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
4869 
4870 		/* Setup second Rx refill buffer ring */
4871 		if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
4872 				  entries, 0)) {
4873 			dp_init_err("%pK: dp_srng_alloc failed second"
4874 				    "rx refill ring", soc);
4875 			return QDF_STATUS_E_FAILURE;
4876 		}
4877 	}
4878 
4879 	return QDF_STATUS_SUCCESS;
4880 }
4881 
4882 /**
4883  * dp_init_ipa_rx_refill_buf_ring - Init second Rx refill buffer ring
4884  * @soc: data path instance
4885  * @pdev: core txrx pdev context
4886  *
4887  * Return: QDF_STATUS_SUCCESS: success
4888  *         QDF_STATUS_E_RESOURCES: Error return
4889  */
4890 static int dp_init_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4891 					  struct dp_pdev *pdev)
4892 {
4893 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
4894 		if (dp_srng_init(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
4895 				 IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id)) {
4896 			dp_init_err("%pK: dp_srng_init failed second"
4897 				    "rx refill ring", soc);
4898 			return QDF_STATUS_E_FAILURE;
4899 		}
4900 	}
4901 	return QDF_STATUS_SUCCESS;
4902 }
4903 
4904 /**
4905  * dp_deinit_ipa_rx_refill_buf_ring - deinit second Rx refill buffer ring
4906  * @soc: data path instance
4907  * @pdev: core txrx pdev context
4908  *
4909  * Return: void
4910  */
4911 static void dp_deinit_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4912 					     struct dp_pdev *pdev)
4913 {
4914 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
4915 		dp_srng_deinit(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 0);
4916 }
4917 
4918 /**
4919  * dp_free_ipa_rx_refill_buf_ring - free second Rx refill buffer ring
4920  * @soc: data path instance
4921  * @pdev: core txrx pdev context
4922  *
4923  * Return: void
4924  */
4925 static void dp_free_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4926 					   struct dp_pdev *pdev)
4927 {
4928 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
4929 		dp_srng_free(soc, &pdev->rx_refill_buf_ring2);
4930 }
4931 #else
4932 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4933 					   struct dp_pdev *pdev)
4934 {
4935 	return QDF_STATUS_SUCCESS;
4936 }
4937 
4938 static int dp_init_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4939 					  struct dp_pdev *pdev)
4940 {
4941 	return QDF_STATUS_SUCCESS;
4942 }
4943 
4944 static void dp_deinit_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4945 					     struct dp_pdev *pdev)
4946 {
4947 }
4948 
4949 static void dp_free_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4950 					   struct dp_pdev *pdev)
4951 {
4952 }
4953 #endif
4954 
4955 #ifdef DP_TX_HW_DESC_HISTORY
4956 /**
4957  * dp_soc_tx_hw_desc_history_attach - Attach TX HW descriptor history
4958  *
4959  * @soc: DP soc handle
4960  *
4961  * Return: None
4962  */
4963 static void dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
4964 {
4965 	soc->tx_hw_desc_history = dp_context_alloc_mem(
4966 			soc, DP_TX_HW_DESC_HIST_TYPE,
4967 			sizeof(*soc->tx_hw_desc_history));
4968 	if (soc->tx_hw_desc_history)
4969 		soc->tx_hw_desc_history->index = 0;
4970 }
4971 
4972 static void dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
4973 {
4974 	dp_context_free_mem(soc, DP_TX_HW_DESC_HIST_TYPE,
4975 			    soc->tx_hw_desc_history);
4976 }
4977 
4978 #else /* DP_TX_HW_DESC_HISTORY */
4979 static inline void
4980 dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
4981 {
4982 }
4983 
4984 static inline void
4985 dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
4986 {
4987 }
4988 #endif /* DP_TX_HW_DESC_HISTORY */
4989 
4990 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
4991 #ifndef RX_DEFRAG_DO_NOT_REINJECT
4992 /**
4993  * dp_soc_rx_reinject_ring_history_attach - Attach the reo reinject ring
4994  *					    history.
4995  * @soc: DP soc handle
4996  *
4997  * Return: None
4998  */
4999 static void dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
5000 {
5001 	soc->rx_reinject_ring_history =
5002 		dp_context_alloc_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
5003 				     sizeof(struct dp_rx_reinject_history));
5004 	if (soc->rx_reinject_ring_history)
5005 		qdf_atomic_init(&soc->rx_reinject_ring_history->index);
5006 }
5007 #else /* RX_DEFRAG_DO_NOT_REINJECT */
5008 static inline void
5009 dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
5010 {
5011 }
5012 #endif /* RX_DEFRAG_DO_NOT_REINJECT */
5013 
5014 /**
5015  * dp_soc_rx_history_attach() - Attach the ring history record buffers
5016  * @soc: DP soc structure
5017  *
5018  * This function allocates the memory for recording the rx ring, rx error
5019  * ring and the reinject ring entries. There is no error returned in case
5020  * of allocation failure since the record function checks if the history is
5021  * initialized or not. We do not want to fail the driver load in case of
5022  * failure to allocate memory for debug history.
5023  *
5024  * Returns: None
5025  */
5026 static void dp_soc_rx_history_attach(struct dp_soc *soc)
5027 {
5028 	int i;
5029 	uint32_t rx_ring_hist_size;
5030 	uint32_t rx_refill_ring_hist_size;
5031 
5032 	rx_ring_hist_size = sizeof(*soc->rx_ring_history[0]);
5033 	rx_refill_ring_hist_size = sizeof(*soc->rx_refill_ring_history[0]);
5034 
5035 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
5036 		soc->rx_ring_history[i] = dp_context_alloc_mem(
5037 				soc, DP_RX_RING_HIST_TYPE, rx_ring_hist_size);
5038 		if (soc->rx_ring_history[i])
5039 			qdf_atomic_init(&soc->rx_ring_history[i]->index);
5040 	}
5041 
5042 	soc->rx_err_ring_history = dp_context_alloc_mem(
5043 			soc, DP_RX_ERR_RING_HIST_TYPE, rx_ring_hist_size);
5044 	if (soc->rx_err_ring_history)
5045 		qdf_atomic_init(&soc->rx_err_ring_history->index);
5046 
5047 	dp_soc_rx_reinject_ring_history_attach(soc);
5048 
5049 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5050 		soc->rx_refill_ring_history[i] = dp_context_alloc_mem(
5051 						soc,
5052 						DP_RX_REFILL_RING_HIST_TYPE,
5053 						rx_refill_ring_hist_size);
5054 
5055 		if (soc->rx_refill_ring_history[i])
5056 			qdf_atomic_init(&soc->rx_refill_ring_history[i]->index);
5057 	}
5058 }
5059 
5060 static void dp_soc_rx_history_detach(struct dp_soc *soc)
5061 {
5062 	int i;
5063 
5064 	for (i = 0; i < MAX_REO_DEST_RINGS; i++)
5065 		dp_context_free_mem(soc, DP_RX_RING_HIST_TYPE,
5066 				    soc->rx_ring_history[i]);
5067 
5068 	dp_context_free_mem(soc, DP_RX_ERR_RING_HIST_TYPE,
5069 			    soc->rx_err_ring_history);
5070 
5071 	/*
5072 	 * No need for a featurized detach since qdf_mem_free takes
5073 	 * care of NULL pointer.
5074 	 */
5075 	dp_context_free_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
5076 			    soc->rx_reinject_ring_history);
5077 
5078 	for (i = 0; i < MAX_PDEV_CNT; i++)
5079 		dp_context_free_mem(soc, DP_RX_REFILL_RING_HIST_TYPE,
5080 				    soc->rx_refill_ring_history[i]);
5081 }
5082 
5083 #else
5084 static inline void dp_soc_rx_history_attach(struct dp_soc *soc)
5085 {
5086 }
5087 
5088 static inline void dp_soc_rx_history_detach(struct dp_soc *soc)
5089 {
5090 }
5091 #endif
5092 
5093 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
5094 /**
5095  * dp_soc_tx_history_attach() - Attach the ring history record buffers
5096  * @soc: DP soc structure
5097  *
5098  * This function allocates the memory for recording the tx tcl ring and
5099  * the tx comp ring entries. There is no error returned in case
5100  * of allocation failure since the record function checks if the history is
5101  * initialized or not. We do not want to fail the driver load in case of
5102  * failure to allocate memory for debug history.
5103  *
5104  * Returns: None
5105  */
5106 static void dp_soc_tx_history_attach(struct dp_soc *soc)
5107 {
5108 	uint32_t tx_tcl_hist_size;
5109 	uint32_t tx_comp_hist_size;
5110 
5111 	tx_tcl_hist_size = sizeof(*soc->tx_tcl_history);
5112 	soc->tx_tcl_history = dp_context_alloc_mem(soc, DP_TX_TCL_HIST_TYPE,
5113 						   tx_tcl_hist_size);
5114 	if (soc->tx_tcl_history)
5115 		qdf_atomic_init(&soc->tx_tcl_history->index);
5116 
5117 	tx_comp_hist_size = sizeof(*soc->tx_comp_history);
5118 	soc->tx_comp_history = dp_context_alloc_mem(soc, DP_TX_COMP_HIST_TYPE,
5119 						    tx_comp_hist_size);
5120 	if (soc->tx_comp_history)
5121 		qdf_atomic_init(&soc->tx_comp_history->index);
5122 }
5123 
5124 /**
5125  * dp_soc_tx_history_detach() - Detach the ring history record buffers
5126  * @soc: DP soc structure
5127  *
5128  * This function frees the memory for recording the tx tcl ring and
5129  * the tx comp ring entries.
5130  *
5131  * Returns: None
5132  */
5133 static void dp_soc_tx_history_detach(struct dp_soc *soc)
5134 {
5135 	dp_context_free_mem(soc, DP_TX_TCL_HIST_TYPE, soc->tx_tcl_history);
5136 	dp_context_free_mem(soc, DP_TX_COMP_HIST_TYPE, soc->tx_comp_history);
5137 }
5138 
5139 #else
5140 static inline void dp_soc_tx_history_attach(struct dp_soc *soc)
5141 {
5142 }
5143 
5144 static inline void dp_soc_tx_history_detach(struct dp_soc *soc)
5145 {
5146 }
5147 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
5148 
5149 /*
5150 * dp_pdev_attach_wifi3() - attach txrx pdev
5151 * @txrx_soc: Datapath SOC handle
5152 * @params: Params for PDEV attach
5153 *
5154 * Return: QDF_STATUS
5155 */
5156 static inline
5157 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
5158 				struct cdp_pdev_attach_params *params)
5159 {
5160 	qdf_size_t pdev_context_size;
5161 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5162 	struct dp_pdev *pdev = NULL;
5163 	uint8_t pdev_id = params->pdev_id;
5164 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
5165 	int nss_cfg;
5166 
5167 	pdev_context_size =
5168 		soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_PDEV);
5169 	if (pdev_context_size)
5170 		pdev = dp_context_alloc_mem(soc, DP_PDEV_TYPE, pdev_context_size);
5171 
5172 	if (!pdev) {
5173 		dp_init_err("%pK: DP PDEV memory allocation failed",
5174 			    soc);
5175 		goto fail0;
5176 	}
5177 	wlan_minidump_log(pdev, sizeof(*pdev), soc->ctrl_psoc,
5178 			  WLAN_MD_DP_PDEV, "dp_pdev");
5179 
5180 	soc_cfg_ctx = soc->wlan_cfg_ctx;
5181 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
5182 
5183 	if (!pdev->wlan_cfg_ctx) {
5184 		dp_init_err("%pK: pdev cfg_attach failed", soc);
5185 		goto fail1;
5186 	}
5187 
5188 	/*
5189 	 * set nss pdev config based on soc config
5190 	 */
5191 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
5192 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
5193 					 (nss_cfg & (1 << pdev_id)));
5194 
5195 	pdev->soc = soc;
5196 	pdev->pdev_id = pdev_id;
5197 	soc->pdev_list[pdev_id] = pdev;
5198 
5199 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
5200 	soc->pdev_count++;
5201 
5202 	/* Allocate memory for pdev srng rings */
5203 	if (dp_pdev_srng_alloc(pdev)) {
5204 		dp_init_err("%pK: dp_pdev_srng_alloc failed", soc);
5205 		goto fail2;
5206 	}
5207 
5208 	/* Setup second Rx refill buffer ring */
5209 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev)) {
5210 		dp_init_err("%pK: dp_srng_alloc failed rxrefill2 ring",
5211 			    soc);
5212 		goto fail3;
5213 	}
5214 
5215 	/* Allocate memory for pdev rxdma rings */
5216 	if (dp_rxdma_ring_alloc(soc, pdev)) {
5217 		dp_init_err("%pK: dp_rxdma_ring_alloc failed", soc);
5218 		goto fail4;
5219 	}
5220 
5221 	/* Rx specific init */
5222 	if (dp_rx_pdev_desc_pool_alloc(pdev)) {
5223 		dp_init_err("%pK: dp_rx_pdev_attach failed", soc);
5224 		goto fail4;
5225 	}
5226 
5227 	if (dp_monitor_pdev_attach(pdev)) {
5228 		dp_init_err("%pK: dp_monitor_pdev_attach failed", soc);
5229 		goto fail5;
5230 	}
5231 
5232 	soc->arch_ops.txrx_pdev_attach(pdev, params);
5233 
5234 	return QDF_STATUS_SUCCESS;
5235 fail5:
5236 	dp_rx_pdev_desc_pool_free(pdev);
5237 fail4:
5238 	dp_rxdma_ring_free(pdev);
5239 	dp_free_ipa_rx_refill_buf_ring(soc, pdev);
5240 fail3:
5241 	dp_pdev_srng_free(pdev);
5242 fail2:
5243 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
5244 fail1:
5245 	soc->pdev_list[pdev_id] = NULL;
5246 	qdf_mem_free(pdev);
5247 fail0:
5248 	return QDF_STATUS_E_FAILURE;
5249 }
5250 
5251 /**
5252  * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev
5253  * @pdev: Datapath PDEV handle
5254  *
5255  * This is the last chance to flush all pending dp vdevs/peers,
5256  * some peer/vdev leak case like Non-SSR + peer unmap missing
5257  * will be covered here.
5258  *
5259  * Return: None
5260  */
5261 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
5262 {
5263 	struct dp_soc *soc = pdev->soc;
5264 	struct dp_vdev *vdev_arr[MAX_VDEV_CNT] = {0};
5265 	uint32_t i = 0;
5266 	uint32_t num_vdevs = 0;
5267 	struct dp_vdev *vdev = NULL;
5268 
5269 	if (TAILQ_EMPTY(&soc->inactive_vdev_list))
5270 		return;
5271 
5272 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
5273 	TAILQ_FOREACH(vdev, &soc->inactive_vdev_list,
5274 		      inactive_list_elem) {
5275 		if (vdev->pdev != pdev)
5276 			continue;
5277 
5278 		vdev_arr[num_vdevs] = vdev;
5279 		num_vdevs++;
5280 		/* take reference to free */
5281 		dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CDP);
5282 	}
5283 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
5284 
5285 	for (i = 0; i < num_vdevs; i++) {
5286 		dp_vdev_flush_peers((struct cdp_vdev *)vdev_arr[i], 0);
5287 		dp_vdev_unref_delete(soc, vdev_arr[i], DP_MOD_ID_CDP);
5288 	}
5289 }
5290 
5291 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
5292 /**
5293  * dp_vdev_stats_hw_offload_target_config() - Send HTT command to FW
5294  *                                          for enable/disable of HW vdev stats
5295  * @soc: Datapath soc handle
5296  * @pdev_id: INVALID_PDEV_ID for all pdevs or 0,1,2 for individual pdev
5297  * @enable: flag to reprsent enable/disable of hw vdev stats
5298  *
5299  * Return: none
5300  */
5301 static void dp_vdev_stats_hw_offload_target_config(struct dp_soc *soc,
5302 						   uint8_t pdev_id,
5303 						   bool enable)
5304 {
5305 	/* Check SOC level config for HW offload vdev stats support */
5306 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
5307 		dp_debug("%pK: HW vdev offload stats is disabled", soc);
5308 		return;
5309 	}
5310 
5311 	/* Send HTT command to FW for enable of stats */
5312 	dp_h2t_hw_vdev_stats_config_send(soc, pdev_id, enable, false, 0);
5313 }
5314 
5315 /**
5316  * dp_vdev_stats_hw_offload_target_clear() - Clear HW vdev stats on target
5317  * @soc: Datapath soc handle
5318  * @pdev_id: pdev_id (0,1,2)
5319  * @bitmask: bitmask with vdev_id(s) for which stats are to be cleared on HW
5320  *
5321  * Return: none
5322  */
5323 static
5324 void dp_vdev_stats_hw_offload_target_clear(struct dp_soc *soc, uint8_t pdev_id,
5325 					   uint64_t vdev_id_bitmask)
5326 {
5327 	/* Check SOC level config for HW offload vdev stats support */
5328 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
5329 		dp_debug("%pK: HW vdev offload stats is disabled", soc);
5330 		return;
5331 	}
5332 
5333 	/* Send HTT command to FW for reset of stats */
5334 	dp_h2t_hw_vdev_stats_config_send(soc, pdev_id, true, true,
5335 					 vdev_id_bitmask);
5336 }
5337 #else
5338 static void
5339 dp_vdev_stats_hw_offload_target_config(struct dp_soc *soc, uint8_t pdev_id,
5340 				       bool enable)
5341 {
5342 }
5343 
5344 static
5345 void dp_vdev_stats_hw_offload_target_clear(struct dp_soc *soc, uint8_t pdev_id,
5346 					   uint64_t vdev_id_bitmask)
5347 {
5348 }
5349 #endif /*QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT */
5350 
5351 /**
5352  * dp_pdev_deinit() - Deinit txrx pdev
5353  * @txrx_pdev: Datapath PDEV handle
5354  * @force: Force deinit
5355  *
5356  * Return: None
5357  */
5358 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
5359 {
5360 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
5361 	qdf_nbuf_t curr_nbuf, next_nbuf;
5362 
5363 	if (pdev->pdev_deinit)
5364 		return;
5365 
5366 	dp_tx_me_exit(pdev);
5367 	dp_rx_fst_detach(pdev->soc, pdev);
5368 	dp_rx_pdev_buffers_free(pdev);
5369 	dp_rx_pdev_desc_pool_deinit(pdev);
5370 	dp_pdev_bkp_stats_detach(pdev);
5371 	qdf_event_destroy(&pdev->fw_peer_stats_event);
5372 	if (pdev->sojourn_buf)
5373 		qdf_nbuf_free(pdev->sojourn_buf);
5374 
5375 	dp_pdev_flush_pending_vdevs(pdev);
5376 	dp_tx_desc_flush(pdev, NULL, true);
5377 
5378 	qdf_spinlock_destroy(&pdev->tx_mutex);
5379 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
5380 
5381 	dp_monitor_pdev_deinit(pdev);
5382 
5383 	dp_pdev_srng_deinit(pdev);
5384 
5385 	dp_ipa_uc_detach(pdev->soc, pdev);
5386 	dp_deinit_ipa_rx_refill_buf_ring(pdev->soc, pdev);
5387 	dp_rxdma_ring_cleanup(pdev->soc, pdev);
5388 
5389 	curr_nbuf = pdev->invalid_peer_head_msdu;
5390 	while (curr_nbuf) {
5391 		next_nbuf = qdf_nbuf_next(curr_nbuf);
5392 		dp_rx_nbuf_free(curr_nbuf);
5393 		curr_nbuf = next_nbuf;
5394 	}
5395 	pdev->invalid_peer_head_msdu = NULL;
5396 	pdev->invalid_peer_tail_msdu = NULL;
5397 
5398 	dp_wdi_event_detach(pdev);
5399 	pdev->pdev_deinit = 1;
5400 }
5401 
5402 /**
5403  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
5404  * @psoc: Datapath psoc handle
5405  * @pdev_id: Id of datapath PDEV handle
5406  * @force: Force deinit
5407  *
5408  * Return: QDF_STATUS
5409  */
5410 static QDF_STATUS
5411 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
5412 		     int force)
5413 {
5414 	struct dp_pdev *txrx_pdev;
5415 
5416 	txrx_pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
5417 						       pdev_id);
5418 
5419 	if (!txrx_pdev)
5420 		return QDF_STATUS_E_FAILURE;
5421 
5422 	dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force);
5423 
5424 	return QDF_STATUS_SUCCESS;
5425 }
5426 
5427 /*
5428  * dp_pdev_post_attach() - Do post pdev attach after dev_alloc_name
5429  * @txrx_pdev: Datapath PDEV handle
5430  *
5431  * Return: None
5432  */
5433 static void dp_pdev_post_attach(struct cdp_pdev *txrx_pdev)
5434 {
5435 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
5436 
5437 	dp_monitor_tx_capture_debugfs_init(pdev);
5438 
5439 	if (dp_pdev_htt_stats_dbgfs_init(pdev)) {
5440 		dp_init_err("%pK: Failed to initialize pdev HTT stats debugfs", pdev->soc);
5441 	}
5442 }
5443 
5444 /*
5445  * dp_pdev_post_attach_wifi3() - attach txrx pdev post
5446  * @psoc: Datapath soc handle
5447  * @pdev_id: pdev id of pdev
5448  *
5449  * Return: QDF_STATUS
5450  */
5451 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *soc,
5452 				     uint8_t pdev_id)
5453 {
5454 	struct dp_pdev *pdev;
5455 
5456 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
5457 						  pdev_id);
5458 
5459 	if (!pdev) {
5460 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
5461 			    (struct dp_soc *)soc, pdev_id);
5462 		return QDF_STATUS_E_FAILURE;
5463 	}
5464 
5465 	dp_pdev_post_attach((struct cdp_pdev *)pdev);
5466 	return QDF_STATUS_SUCCESS;
5467 }
5468 
5469 /*
5470  * dp_pdev_detach() - Complete rest of pdev detach
5471  * @txrx_pdev: Datapath PDEV handle
5472  * @force: Force deinit
5473  *
5474  * Return: None
5475  */
5476 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
5477 {
5478 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
5479 	struct dp_soc *soc = pdev->soc;
5480 
5481 	dp_pdev_htt_stats_dbgfs_deinit(pdev);
5482 	dp_rx_pdev_desc_pool_free(pdev);
5483 	dp_monitor_pdev_detach(pdev);
5484 	dp_rxdma_ring_free(pdev);
5485 	dp_free_ipa_rx_refill_buf_ring(soc, pdev);
5486 	dp_pdev_srng_free(pdev);
5487 
5488 	soc->pdev_count--;
5489 	soc->pdev_list[pdev->pdev_id] = NULL;
5490 
5491 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
5492 	wlan_minidump_remove(pdev, sizeof(*pdev), soc->ctrl_psoc,
5493 			     WLAN_MD_DP_PDEV, "dp_pdev");
5494 	dp_context_free_mem(soc, DP_PDEV_TYPE, pdev);
5495 }
5496 
5497 /*
5498  * dp_pdev_detach_wifi3() - detach txrx pdev
5499  * @psoc: Datapath soc handle
5500  * @pdev_id: pdev id of pdev
5501  * @force: Force detach
5502  *
5503  * Return: QDF_STATUS
5504  */
5505 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
5506 				       int force)
5507 {
5508 	struct dp_pdev *pdev;
5509 
5510 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
5511 						  pdev_id);
5512 
5513 	if (!pdev) {
5514 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
5515 			    (struct dp_soc *)psoc, pdev_id);
5516 		return QDF_STATUS_E_FAILURE;
5517 	}
5518 
5519 	dp_pdev_detach((struct cdp_pdev *)pdev, force);
5520 	return QDF_STATUS_SUCCESS;
5521 }
5522 
5523 /*
5524  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
5525  * @soc: DP SOC handle
5526  */
5527 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
5528 {
5529 	struct reo_desc_list_node *desc;
5530 	struct dp_rx_tid *rx_tid;
5531 
5532 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
5533 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
5534 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
5535 		rx_tid = &desc->rx_tid;
5536 		qdf_mem_unmap_nbytes_single(soc->osdev,
5537 			rx_tid->hw_qdesc_paddr,
5538 			QDF_DMA_BIDIRECTIONAL,
5539 			rx_tid->hw_qdesc_alloc_size);
5540 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
5541 		qdf_mem_free(desc);
5542 	}
5543 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
5544 	qdf_list_destroy(&soc->reo_desc_freelist);
5545 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
5546 }
5547 
5548 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
5549 /*
5550  * dp_reo_desc_deferred_freelist_create() - Initialize the resources used
5551  *                                          for deferred reo desc list
5552  * @psoc: Datapath soc handle
5553  *
5554  * Return: void
5555  */
5556 static void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
5557 {
5558 	qdf_spinlock_create(&soc->reo_desc_deferred_freelist_lock);
5559 	qdf_list_create(&soc->reo_desc_deferred_freelist,
5560 			REO_DESC_DEFERRED_FREELIST_SIZE);
5561 	soc->reo_desc_deferred_freelist_init = true;
5562 }
5563 
5564 /*
5565  * dp_reo_desc_deferred_freelist_destroy() - loop the deferred free list &
5566  *                                           free the leftover REO QDESCs
5567  * @psoc: Datapath soc handle
5568  *
5569  * Return: void
5570  */
5571 static void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
5572 {
5573 	struct reo_desc_deferred_freelist_node *desc;
5574 
5575 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
5576 	soc->reo_desc_deferred_freelist_init = false;
5577 	while (qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
5578 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
5579 		qdf_mem_unmap_nbytes_single(soc->osdev,
5580 					    desc->hw_qdesc_paddr,
5581 					    QDF_DMA_BIDIRECTIONAL,
5582 					    desc->hw_qdesc_alloc_size);
5583 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
5584 		qdf_mem_free(desc);
5585 	}
5586 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
5587 
5588 	qdf_list_destroy(&soc->reo_desc_deferred_freelist);
5589 	qdf_spinlock_destroy(&soc->reo_desc_deferred_freelist_lock);
5590 }
5591 #else
5592 static inline void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
5593 {
5594 }
5595 
5596 static inline void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
5597 {
5598 }
5599 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
5600 
5601 /*
5602  * dp_soc_reset_txrx_ring_map() - reset tx ring map
5603  * @soc: DP SOC handle
5604  *
5605  */
5606 static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc)
5607 {
5608 	uint32_t i;
5609 
5610 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++)
5611 		soc->tx_ring_map[i] = 0;
5612 }
5613 
5614 /*
5615  * dp_soc_print_inactive_objects() - prints inactive peer and vdev list
5616  * @soc: DP SOC handle
5617  *
5618  */
5619 static void dp_soc_print_inactive_objects(struct dp_soc *soc)
5620 {
5621 	struct dp_peer *peer = NULL;
5622 	struct dp_peer *tmp_peer = NULL;
5623 	struct dp_vdev *vdev = NULL;
5624 	struct dp_vdev *tmp_vdev = NULL;
5625 	int i = 0;
5626 	uint32_t count;
5627 
5628 	if (TAILQ_EMPTY(&soc->inactive_peer_list) &&
5629 	    TAILQ_EMPTY(&soc->inactive_vdev_list))
5630 		return;
5631 
5632 	TAILQ_FOREACH_SAFE(peer, &soc->inactive_peer_list,
5633 			   inactive_list_elem, tmp_peer) {
5634 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
5635 			count = qdf_atomic_read(&peer->mod_refs[i]);
5636 			if (count)
5637 				DP_PRINT_STATS("peer %pK Module id %u ==> %u",
5638 					       peer, i, count);
5639 		}
5640 	}
5641 
5642 	TAILQ_FOREACH_SAFE(vdev, &soc->inactive_vdev_list,
5643 			   inactive_list_elem, tmp_vdev) {
5644 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
5645 			count = qdf_atomic_read(&vdev->mod_refs[i]);
5646 			if (count)
5647 				DP_PRINT_STATS("vdev %pK Module id %u ==> %u",
5648 					       vdev, i, count);
5649 		}
5650 	}
5651 	QDF_BUG(0);
5652 }
5653 
5654 /**
5655  * dp_soc_deinit() - Deinitialize txrx SOC
5656  * @txrx_soc: Opaque DP SOC handle
5657  *
5658  * Return: None
5659  */
5660 static void dp_soc_deinit(void *txrx_soc)
5661 {
5662 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5663 	struct htt_soc *htt_soc = soc->htt_handle;
5664 	struct dp_mon_ops *mon_ops;
5665 
5666 	qdf_atomic_set(&soc->cmn_init_done, 0);
5667 
5668 	soc->arch_ops.txrx_soc_deinit(soc);
5669 
5670 	mon_ops = dp_mon_ops_get(soc);
5671 	if (mon_ops && mon_ops->mon_soc_deinit)
5672 		mon_ops->mon_soc_deinit(soc);
5673 
5674 	/* free peer tables & AST tables allocated during peer_map_attach */
5675 	if (soc->peer_map_attach_success) {
5676 		dp_peer_find_detach(soc);
5677 		soc->arch_ops.txrx_peer_map_detach(soc);
5678 		soc->peer_map_attach_success = FALSE;
5679 	}
5680 
5681 	qdf_flush_work(&soc->htt_stats.work);
5682 	qdf_disable_work(&soc->htt_stats.work);
5683 
5684 	qdf_spinlock_destroy(&soc->htt_stats.lock);
5685 
5686 	dp_soc_reset_txrx_ring_map(soc);
5687 
5688 	dp_reo_desc_freelist_destroy(soc);
5689 	dp_reo_desc_deferred_freelist_destroy(soc);
5690 
5691 	DEINIT_RX_HW_STATS_LOCK(soc);
5692 
5693 	qdf_spinlock_destroy(&soc->ast_lock);
5694 
5695 	dp_peer_mec_spinlock_destroy(soc);
5696 
5697 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
5698 
5699 	qdf_nbuf_queue_free(&soc->invalid_buf_queue);
5700 
5701 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
5702 
5703 	qdf_spinlock_destroy(&soc->vdev_map_lock);
5704 
5705 	dp_reo_cmdlist_destroy(soc);
5706 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
5707 
5708 	dp_soc_tx_desc_sw_pools_deinit(soc);
5709 
5710 	dp_soc_srng_deinit(soc);
5711 
5712 	dp_hw_link_desc_ring_deinit(soc);
5713 
5714 	dp_soc_print_inactive_objects(soc);
5715 	qdf_spinlock_destroy(&soc->inactive_peer_list_lock);
5716 	qdf_spinlock_destroy(&soc->inactive_vdev_list_lock);
5717 
5718 	htt_soc_htc_dealloc(soc->htt_handle);
5719 
5720 	htt_soc_detach(htt_soc);
5721 
5722 	/* Free wbm sg list and reset flags in down path */
5723 	dp_rx_wbm_sg_list_deinit(soc);
5724 
5725 	wlan_minidump_remove(soc, sizeof(*soc), soc->ctrl_psoc,
5726 			     WLAN_MD_DP_SOC, "dp_soc");
5727 }
5728 
5729 /**
5730  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
5731  * @txrx_soc: Opaque DP SOC handle
5732  *
5733  * Return: None
5734  */
5735 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc)
5736 {
5737 	dp_soc_deinit(txrx_soc);
5738 }
5739 
5740 /*
5741  * dp_soc_detach() - Detach rest of txrx SOC
5742  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
5743  *
5744  * Return: None
5745  */
5746 static void dp_soc_detach(struct cdp_soc_t *txrx_soc)
5747 {
5748 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5749 
5750 	soc->arch_ops.txrx_soc_detach(soc);
5751 
5752 	dp_sysfs_deinitialize_stats(soc);
5753 	dp_soc_swlm_detach(soc);
5754 	dp_soc_tx_desc_sw_pools_free(soc);
5755 	dp_soc_srng_free(soc);
5756 	dp_hw_link_desc_ring_free(soc);
5757 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
5758 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
5759 	dp_soc_tx_hw_desc_history_detach(soc);
5760 	dp_soc_tx_history_detach(soc);
5761 	dp_soc_rx_history_detach(soc);
5762 
5763 	if (!dp_monitor_modularized_enable()) {
5764 		dp_mon_soc_detach_wrapper(soc);
5765 	}
5766 
5767 	qdf_mem_free(soc->cdp_soc.ops);
5768 	qdf_mem_free(soc);
5769 }
5770 
5771 /*
5772  * dp_soc_detach_wifi3() - Detach txrx SOC
5773  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
5774  *
5775  * Return: None
5776  */
5777 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc)
5778 {
5779 	dp_soc_detach(txrx_soc);
5780 }
5781 
5782 /*
5783  * dp_rxdma_ring_config() - configure the RX DMA rings
5784  *
5785  * This function is used to configure the MAC rings.
5786  * On MCL host provides buffers in Host2FW ring
5787  * FW refills (copies) buffers to the ring and updates
5788  * ring_idx in register
5789  *
5790  * @soc: data path SoC handle
5791  *
5792  * Return: zero on success, non-zero on failure
5793  */
5794 #ifdef QCA_HOST2FW_RXBUF_RING
5795 static inline void
5796 dp_htt_setup_rxdma_err_dst_ring(struct dp_soc *soc, int mac_id,
5797 				int lmac_id)
5798 {
5799 	if (soc->rxdma_err_dst_ring[lmac_id].hal_srng)
5800 		htt_srng_setup(soc->htt_handle, mac_id,
5801 			       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
5802 			       RXDMA_DST);
5803 }
5804 
5805 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
5806 {
5807 	int i;
5808 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5809 
5810 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5811 		struct dp_pdev *pdev = soc->pdev_list[i];
5812 
5813 		if (pdev) {
5814 			int mac_id;
5815 			int max_mac_rings =
5816 				 wlan_cfg_get_num_mac_rings
5817 				(pdev->wlan_cfg_ctx);
5818 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
5819 
5820 			htt_srng_setup(soc->htt_handle, i,
5821 				       soc->rx_refill_buf_ring[lmac_id]
5822 				       .hal_srng,
5823 				       RXDMA_BUF);
5824 
5825 			if (pdev->rx_refill_buf_ring2.hal_srng)
5826 				htt_srng_setup(soc->htt_handle, i,
5827 					       pdev->rx_refill_buf_ring2
5828 					       .hal_srng,
5829 					       RXDMA_BUF);
5830 
5831 			/* get max_mac_rings based on DBS */
5832 			dp_is_hw_dbs_enable(soc, &max_mac_rings);
5833 			dp_err("pdev_id %d max_mac_rings %d",
5834 			       pdev->pdev_id, max_mac_rings);
5835 
5836 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
5837 				int mac_for_pdev =
5838 					dp_get_mac_id_for_pdev(mac_id,
5839 							       pdev->pdev_id);
5840 				/*
5841 				 * Obtain lmac id from pdev to access the LMAC
5842 				 * ring in soc context
5843 				 */
5844 				lmac_id =
5845 				dp_get_lmac_id_for_pdev_id(soc,
5846 							   mac_id,
5847 							   pdev->pdev_id);
5848 				QDF_TRACE(QDF_MODULE_ID_TXRX,
5849 					 QDF_TRACE_LEVEL_ERROR,
5850 					 FL("mac_id %d"), mac_for_pdev);
5851 
5852 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
5853 					 pdev->rx_mac_buf_ring[mac_id]
5854 						.hal_srng,
5855 					 RXDMA_BUF);
5856 
5857 				if (!soc->rxdma2sw_rings_not_supported)
5858 					dp_htt_setup_rxdma_err_dst_ring(soc,
5859 						mac_for_pdev, lmac_id);
5860 
5861 				/* Configure monitor mode rings */
5862 				status = dp_monitor_htt_srng_setup(soc, pdev,
5863 								   lmac_id,
5864 								   mac_for_pdev);
5865 				if (status != QDF_STATUS_SUCCESS) {
5866 					dp_err("Failed to send htt monitor messages to target");
5867 					return status;
5868 				}
5869 
5870 			}
5871 		}
5872 	}
5873 
5874 	dp_reap_timer_init(soc);
5875 	return status;
5876 }
5877 #else
5878 /* This is only for WIN */
5879 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
5880 {
5881 	int i;
5882 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5883 	int mac_for_pdev;
5884 	int lmac_id;
5885 
5886 	/* Configure monitor mode rings */
5887 	dp_monitor_soc_htt_srng_setup(soc);
5888 
5889 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5890 		struct dp_pdev *pdev =  soc->pdev_list[i];
5891 
5892 		if (!pdev)
5893 			continue;
5894 
5895 		mac_for_pdev = i;
5896 		lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
5897 
5898 		if (soc->rx_refill_buf_ring[lmac_id].hal_srng)
5899 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
5900 				       soc->rx_refill_buf_ring[lmac_id].
5901 				       hal_srng, RXDMA_BUF);
5902 
5903 		/* Configure monitor mode rings */
5904 		dp_monitor_htt_srng_setup(soc, pdev,
5905 					  lmac_id,
5906 					  mac_for_pdev);
5907 		if (!soc->rxdma2sw_rings_not_supported)
5908 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
5909 				       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
5910 				       RXDMA_DST);
5911 	}
5912 
5913 	dp_reap_timer_init(soc);
5914 	return status;
5915 }
5916 #endif
5917 
5918 /*
5919  * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine
5920  *
5921  * This function is used to configure the FSE HW block in RX OLE on a
5922  * per pdev basis. Here, we will be programming parameters related to
5923  * the Flow Search Table.
5924  *
5925  * @soc: data path SoC handle
5926  *
5927  * Return: zero on success, non-zero on failure
5928  */
5929 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
5930 static QDF_STATUS
5931 dp_rx_target_fst_config(struct dp_soc *soc)
5932 {
5933 	int i;
5934 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5935 
5936 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5937 		struct dp_pdev *pdev = soc->pdev_list[i];
5938 
5939 		/* Flow search is not enabled if NSS offload is enabled */
5940 		if (pdev &&
5941 		    !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
5942 			status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev);
5943 			if (status != QDF_STATUS_SUCCESS)
5944 				break;
5945 		}
5946 	}
5947 	return status;
5948 }
5949 #elif defined(WLAN_SUPPORT_RX_FISA)
5950 /**
5951  * dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW
5952  * @soc: SoC handle
5953  *
5954  * Return: Success
5955  */
5956 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
5957 {
5958 	/* Check if it is enabled in the INI */
5959 	if (!soc->fisa_enable) {
5960 		dp_err("RX FISA feature is disabled");
5961 		return QDF_STATUS_E_NOSUPPORT;
5962 	}
5963 
5964 	return dp_rx_flow_send_fst_fw_setup(soc, soc->pdev_list[0]);
5965 }
5966 
5967 #define FISA_MAX_TIMEOUT 0xffffffff
5968 #define FISA_DISABLE_TIMEOUT 0
5969 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
5970 {
5971 	struct dp_htt_rx_fisa_cfg fisa_config;
5972 
5973 	fisa_config.pdev_id = 0;
5974 	fisa_config.fisa_timeout = FISA_MAX_TIMEOUT;
5975 
5976 	return dp_htt_rx_fisa_config(soc->pdev_list[0], &fisa_config);
5977 }
5978 #else /* !WLAN_SUPPORT_RX_FISA */
5979 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
5980 {
5981 	return QDF_STATUS_SUCCESS;
5982 }
5983 #endif /* !WLAN_SUPPORT_RX_FISA */
5984 
5985 #ifndef WLAN_SUPPORT_RX_FISA
5986 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
5987 {
5988 	return QDF_STATUS_SUCCESS;
5989 }
5990 
5991 static QDF_STATUS dp_rx_dump_fisa_stats(struct dp_soc *soc)
5992 {
5993 	return QDF_STATUS_SUCCESS;
5994 }
5995 
5996 static void dp_rx_dump_fisa_table(struct dp_soc *soc)
5997 {
5998 }
5999 
6000 static void dp_suspend_fse_cache_flush(struct dp_soc *soc)
6001 {
6002 }
6003 
6004 static void dp_resume_fse_cache_flush(struct dp_soc *soc)
6005 {
6006 }
6007 #endif /* !WLAN_SUPPORT_RX_FISA */
6008 
6009 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR
6010 static inline QDF_STATUS dp_print_swlm_stats(struct dp_soc *soc)
6011 {
6012 	return QDF_STATUS_SUCCESS;
6013 }
6014 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */
6015 
6016 /*
6017  * dp_soc_attach_target_wifi3() - SOC initialization in the target
6018  * @cdp_soc: Opaque Datapath SOC handle
6019  *
6020  * Return: zero on success, non-zero on failure
6021  */
6022 static QDF_STATUS
6023 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
6024 {
6025 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
6026 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6027 
6028 	htt_soc_attach_target(soc->htt_handle);
6029 
6030 	status = dp_rxdma_ring_config(soc);
6031 	if (status != QDF_STATUS_SUCCESS) {
6032 		dp_err("Failed to send htt srng setup messages to target");
6033 		return status;
6034 	}
6035 
6036 	status = soc->arch_ops.dp_rxdma_ring_sel_cfg(soc);
6037 	if (status != QDF_STATUS_SUCCESS) {
6038 		dp_err("Failed to send htt ring config message to target");
6039 		return status;
6040 	}
6041 
6042 	status = dp_rx_target_fst_config(soc);
6043 	if (status != QDF_STATUS_SUCCESS &&
6044 	    status != QDF_STATUS_E_NOSUPPORT) {
6045 		dp_err("Failed to send htt fst setup config message to target");
6046 		return status;
6047 	}
6048 
6049 	if (status == QDF_STATUS_SUCCESS) {
6050 		status = dp_rx_fisa_config(soc);
6051 		if (status != QDF_STATUS_SUCCESS) {
6052 			dp_err("Failed to send htt FISA config message to target");
6053 			return status;
6054 		}
6055 	}
6056 
6057 	DP_STATS_INIT(soc);
6058 
6059 	dp_runtime_init(soc);
6060 
6061 	/* Enable HW vdev offload stats if feature is supported */
6062 	dp_vdev_stats_hw_offload_target_config(soc, INVALID_PDEV_ID, true);
6063 
6064 	/* initialize work queue for stats processing */
6065 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
6066 
6067 	return QDF_STATUS_SUCCESS;
6068 }
6069 
6070 /*
6071  * dp_vdev_id_map_tbl_add() - Add vdev into vdev_id table
6072  * @soc: SoC handle
6073  * @vdev: vdev handle
6074  * @vdev_id: vdev_id
6075  *
6076  * Return: None
6077  */
6078 static void dp_vdev_id_map_tbl_add(struct dp_soc *soc,
6079 				   struct dp_vdev *vdev,
6080 				   uint8_t vdev_id)
6081 {
6082 	QDF_ASSERT(vdev_id <= MAX_VDEV_CNT);
6083 
6084 	qdf_spin_lock_bh(&soc->vdev_map_lock);
6085 
6086 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
6087 			QDF_STATUS_SUCCESS) {
6088 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK vdev_id %u",
6089 			     soc, vdev, vdev_id);
6090 		qdf_spin_unlock_bh(&soc->vdev_map_lock);
6091 		return;
6092 	}
6093 
6094 	if (!soc->vdev_id_map[vdev_id])
6095 		soc->vdev_id_map[vdev_id] = vdev;
6096 	else
6097 		QDF_ASSERT(0);
6098 
6099 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
6100 }
6101 
6102 /*
6103  * dp_vdev_id_map_tbl_remove() - remove vdev from vdev_id table
6104  * @soc: SoC handle
6105  * @vdev: vdev handle
6106  *
6107  * Return: None
6108  */
6109 static void dp_vdev_id_map_tbl_remove(struct dp_soc *soc,
6110 				      struct dp_vdev *vdev)
6111 {
6112 	qdf_spin_lock_bh(&soc->vdev_map_lock);
6113 	QDF_ASSERT(soc->vdev_id_map[vdev->vdev_id] == vdev);
6114 
6115 	soc->vdev_id_map[vdev->vdev_id] = NULL;
6116 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
6117 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
6118 }
6119 
6120 /*
6121  * dp_vdev_pdev_list_add() - add vdev into pdev's list
6122  * @soc: soc handle
6123  * @pdev: pdev handle
6124  * @vdev: vdev handle
6125  *
6126  * return: none
6127  */
6128 static void dp_vdev_pdev_list_add(struct dp_soc *soc,
6129 				  struct dp_pdev *pdev,
6130 				  struct dp_vdev *vdev)
6131 {
6132 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
6133 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
6134 			QDF_STATUS_SUCCESS) {
6135 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK",
6136 			     soc, vdev);
6137 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6138 		return;
6139 	}
6140 	/* add this vdev into the pdev's list */
6141 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
6142 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6143 }
6144 
6145 /*
6146  * dp_vdev_pdev_list_remove() - remove vdev from pdev's list
6147  * @soc: SoC handle
6148  * @pdev: pdev handle
6149  * @vdev: VDEV handle
6150  *
6151  * Return: none
6152  */
6153 static void dp_vdev_pdev_list_remove(struct dp_soc *soc,
6154 				     struct dp_pdev *pdev,
6155 				     struct dp_vdev *vdev)
6156 {
6157 	uint8_t found = 0;
6158 	struct dp_vdev *tmpvdev = NULL;
6159 
6160 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
6161 	TAILQ_FOREACH(tmpvdev, &pdev->vdev_list, vdev_list_elem) {
6162 		if (tmpvdev == vdev) {
6163 			found = 1;
6164 			break;
6165 		}
6166 	}
6167 
6168 	if (found) {
6169 		TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
6170 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
6171 	} else {
6172 		dp_vdev_debug("%pK: vdev:%pK not found in pdev:%pK vdevlist:%pK",
6173 			      soc, vdev, pdev, &pdev->vdev_list);
6174 		QDF_ASSERT(0);
6175 	}
6176 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6177 }
6178 
6179 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
6180 /*
6181  * dp_vdev_init_rx_eapol() - initializing osif_rx_eapol
6182  * @vdev: Datapath VDEV handle
6183  *
6184  * Return: None
6185  */
6186 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev)
6187 {
6188 	vdev->osif_rx_eapol = NULL;
6189 }
6190 
6191 /*
6192  * dp_vdev_register_rx_eapol() - Register VDEV operations for rx_eapol
6193  * @vdev: DP vdev handle
6194  * @txrx_ops: Tx and Rx operations
6195  *
6196  * Return: None
6197  */
6198 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev,
6199 					     struct ol_txrx_ops *txrx_ops)
6200 {
6201 	vdev->osif_rx_eapol = txrx_ops->rx.rx_eapol;
6202 }
6203 #else
6204 static inline void dp_vdev_init_rx_eapol(struct dp_vdev *vdev)
6205 {
6206 }
6207 
6208 static inline void dp_vdev_register_rx_eapol(struct dp_vdev *vdev,
6209 					     struct ol_txrx_ops *txrx_ops)
6210 {
6211 }
6212 #endif
6213 
6214 #ifdef WLAN_FEATURE_11BE_MLO
6215 static inline void dp_vdev_save_mld_addr(struct dp_vdev *vdev,
6216 					 struct cdp_vdev_info *vdev_info)
6217 {
6218 	if (vdev_info->mld_mac_addr)
6219 		qdf_mem_copy(&vdev->mld_mac_addr.raw[0],
6220 			     vdev_info->mld_mac_addr, QDF_MAC_ADDR_SIZE);
6221 }
6222 #else
6223 static inline void dp_vdev_save_mld_addr(struct dp_vdev *vdev,
6224 					 struct cdp_vdev_info *vdev_info)
6225 {
6226 
6227 }
6228 #endif
6229 
6230 /*
6231 * dp_vdev_attach_wifi3() - attach txrx vdev
6232 * @txrx_pdev: Datapath PDEV handle
6233 * @pdev_id: PDEV ID for vdev creation
6234 * @vdev_info: parameters used for vdev creation
6235 *
6236 * Return: status
6237 */
6238 static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc,
6239 				       uint8_t pdev_id,
6240 				       struct cdp_vdev_info *vdev_info)
6241 {
6242 	int i = 0;
6243 	qdf_size_t vdev_context_size;
6244 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
6245 	struct dp_pdev *pdev =
6246 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
6247 						   pdev_id);
6248 	struct dp_vdev *vdev;
6249 	uint8_t *vdev_mac_addr = vdev_info->vdev_mac_addr;
6250 	uint8_t vdev_id = vdev_info->vdev_id;
6251 	enum wlan_op_mode op_mode = vdev_info->op_mode;
6252 	enum wlan_op_subtype subtype = vdev_info->subtype;
6253 	uint8_t vdev_stats_id = vdev_info->vdev_stats_id;
6254 
6255 	vdev_context_size =
6256 		soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_VDEV);
6257 	vdev = qdf_mem_malloc(vdev_context_size);
6258 
6259 	if (!pdev) {
6260 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
6261 			    cdp_soc, pdev_id);
6262 		qdf_mem_free(vdev);
6263 		goto fail0;
6264 	}
6265 
6266 	if (!vdev) {
6267 		dp_init_err("%pK: DP VDEV memory allocation failed",
6268 			    cdp_soc);
6269 		goto fail0;
6270 	}
6271 
6272 	wlan_minidump_log(vdev, sizeof(*vdev), soc->ctrl_psoc,
6273 			  WLAN_MD_DP_VDEV, "dp_vdev");
6274 
6275 	vdev->pdev = pdev;
6276 	vdev->vdev_id = vdev_id;
6277 	vdev->vdev_stats_id = vdev_stats_id;
6278 	vdev->opmode = op_mode;
6279 	vdev->subtype = subtype;
6280 	vdev->osdev = soc->osdev;
6281 
6282 	vdev->osif_rx = NULL;
6283 	vdev->osif_rsim_rx_decap = NULL;
6284 	vdev->osif_get_key = NULL;
6285 	vdev->osif_tx_free_ext = NULL;
6286 	vdev->osif_vdev = NULL;
6287 
6288 	vdev->delete.pending = 0;
6289 	vdev->safemode = 0;
6290 	vdev->drop_unenc = 1;
6291 	vdev->sec_type = cdp_sec_type_none;
6292 	vdev->multipass_en = false;
6293 	dp_vdev_init_rx_eapol(vdev);
6294 	qdf_atomic_init(&vdev->ref_cnt);
6295 	for (i = 0; i < DP_MOD_ID_MAX; i++)
6296 		qdf_atomic_init(&vdev->mod_refs[i]);
6297 
6298 	/* Take one reference for create*/
6299 	qdf_atomic_inc(&vdev->ref_cnt);
6300 	qdf_atomic_inc(&vdev->mod_refs[DP_MOD_ID_CONFIG]);
6301 	vdev->num_peers = 0;
6302 #ifdef notyet
6303 	vdev->filters_num = 0;
6304 #endif
6305 	vdev->lmac_id = pdev->lmac_id;
6306 
6307 	qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
6308 
6309 	dp_vdev_save_mld_addr(vdev, vdev_info);
6310 
6311 	/* TODO: Initialize default HTT meta data that will be used in
6312 	 * TCL descriptors for packets transmitted from this VDEV
6313 	 */
6314 
6315 	qdf_spinlock_create(&vdev->peer_list_lock);
6316 	TAILQ_INIT(&vdev->peer_list);
6317 	dp_peer_multipass_list_init(vdev);
6318 	if ((soc->intr_mode == DP_INTR_POLL) &&
6319 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
6320 		if ((pdev->vdev_count == 0) ||
6321 		    (wlan_op_mode_monitor == vdev->opmode))
6322 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
6323 	} else if (dp_soc_get_con_mode(soc) == QDF_GLOBAL_MISSION_MODE &&
6324 		   soc->intr_mode == DP_INTR_MSI &&
6325 		   wlan_op_mode_monitor == vdev->opmode) {
6326 		/* Timer to reap status ring in mission mode */
6327 		dp_monitor_vdev_timer_start(soc);
6328 	}
6329 
6330 	dp_vdev_id_map_tbl_add(soc, vdev, vdev_id);
6331 
6332 	if (wlan_op_mode_monitor == vdev->opmode) {
6333 		if (dp_monitor_vdev_attach(vdev) == QDF_STATUS_SUCCESS) {
6334 			dp_monitor_pdev_set_mon_vdev(vdev);
6335 			dp_monitor_vdev_set_monitor_mode_buf_rings(pdev);
6336 			return QDF_STATUS_SUCCESS;
6337 		}
6338 		return QDF_STATUS_E_FAILURE;
6339 	}
6340 
6341 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
6342 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
6343 	vdev->dscp_tid_map_id = 0;
6344 	vdev->mcast_enhancement_en = 0;
6345 	vdev->igmp_mcast_enhanc_en = 0;
6346 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
6347 	vdev->prev_tx_enq_tstamp = 0;
6348 	vdev->prev_rx_deliver_tstamp = 0;
6349 	vdev->skip_sw_tid_classification = DP_TX_HW_DSCP_TID_MAP_VALID;
6350 
6351 	dp_vdev_pdev_list_add(soc, pdev, vdev);
6352 	pdev->vdev_count++;
6353 
6354 	if (wlan_op_mode_sta != vdev->opmode &&
6355 	    wlan_op_mode_ndi != vdev->opmode)
6356 		vdev->ap_bridge_enabled = true;
6357 	else
6358 		vdev->ap_bridge_enabled = false;
6359 	dp_init_info("%pK: wlan_cfg_ap_bridge_enabled %d",
6360 		     cdp_soc, vdev->ap_bridge_enabled);
6361 
6362 	dp_tx_vdev_attach(vdev);
6363 
6364 	dp_monitor_vdev_attach(vdev);
6365 	if (!pdev->is_lro_hash_configured) {
6366 		if (QDF_IS_STATUS_SUCCESS(dp_lro_hash_setup(soc, pdev)))
6367 			pdev->is_lro_hash_configured = true;
6368 		else
6369 			dp_err("LRO hash setup failure!");
6370 	}
6371 
6372 	dp_info("Created vdev %pK ("QDF_MAC_ADDR_FMT")", vdev,
6373 		QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
6374 	DP_STATS_INIT(vdev);
6375 
6376 	if (QDF_IS_STATUS_ERROR(soc->arch_ops.txrx_vdev_attach(soc, vdev)))
6377 		goto fail0;
6378 
6379 	if (wlan_op_mode_sta == vdev->opmode)
6380 		dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id,
6381 				     vdev->mac_addr.raw, CDP_LINK_PEER_TYPE);
6382 	return QDF_STATUS_SUCCESS;
6383 
6384 fail0:
6385 	return QDF_STATUS_E_FAILURE;
6386 }
6387 
6388 #ifndef QCA_HOST_MODE_WIFI_DISABLED
6389 /**
6390  * dp_vdev_register_tx_handler() - Register Tx handler
6391  * @vdev: struct dp_vdev *
6392  * @soc: struct dp_soc *
6393  * @txrx_ops: struct ol_txrx_ops *
6394  */
6395 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
6396 					       struct dp_soc *soc,
6397 					       struct ol_txrx_ops *txrx_ops)
6398 {
6399 	/* Enable vdev_id check only for ap, if flag is enabled */
6400 	if (vdev->mesh_vdev)
6401 		txrx_ops->tx.tx = dp_tx_send_mesh;
6402 	else if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
6403 		 (vdev->opmode == wlan_op_mode_ap))
6404 		txrx_ops->tx.tx = dp_tx_send_vdev_id_check;
6405 	else
6406 		txrx_ops->tx.tx = dp_tx_send;
6407 
6408 	/* Avoid check in regular exception Path */
6409 	if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
6410 	    (vdev->opmode == wlan_op_mode_ap))
6411 		txrx_ops->tx.tx_exception = dp_tx_send_exception_vdev_id_check;
6412 	else
6413 		txrx_ops->tx.tx_exception = dp_tx_send_exception;
6414 
6415 	dp_info("Configure tx_vdev_id_chk_handler Feature Flag: %d and mode:%d for vdev_id:%d",
6416 		wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx),
6417 		vdev->opmode, vdev->vdev_id);
6418 }
6419 #else /* QCA_HOST_MODE_WIFI_DISABLED */
6420 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
6421 					       struct dp_soc *soc,
6422 					       struct ol_txrx_ops *txrx_ops)
6423 {
6424 }
6425 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
6426 
6427 /**
6428  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
6429  * @soc: Datapath soc handle
6430  * @vdev_id: id of Datapath VDEV handle
6431  * @osif_vdev: OSIF vdev handle
6432  * @txrx_ops: Tx and Rx operations
6433  *
6434  * Return: DP VDEV handle on success, NULL on failure
6435  */
6436 static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc_hdl,
6437 					 uint8_t vdev_id,
6438 					 ol_osif_vdev_handle osif_vdev,
6439 					 struct ol_txrx_ops *txrx_ops)
6440 {
6441 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6442 	struct dp_vdev *vdev =	dp_vdev_get_ref_by_id(soc, vdev_id,
6443 						      DP_MOD_ID_CDP);
6444 
6445 	if (!vdev)
6446 		return QDF_STATUS_E_FAILURE;
6447 
6448 	vdev->osif_vdev = osif_vdev;
6449 	vdev->osif_rx = txrx_ops->rx.rx;
6450 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
6451 	vdev->osif_rx_flush = txrx_ops->rx.rx_flush;
6452 	vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush;
6453 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
6454 	vdev->osif_fisa_rx = txrx_ops->rx.osif_fisa_rx;
6455 	vdev->osif_fisa_flush = txrx_ops->rx.osif_fisa_flush;
6456 	vdev->osif_get_key = txrx_ops->get_key;
6457 	dp_monitor_vdev_register_osif(vdev, txrx_ops);
6458 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
6459 	vdev->tx_comp = txrx_ops->tx.tx_comp;
6460 	vdev->stats_cb = txrx_ops->rx.stats_rx;
6461 #ifdef notyet
6462 #if ATH_SUPPORT_WAPI
6463 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
6464 #endif
6465 #endif
6466 #ifdef UMAC_SUPPORT_PROXY_ARP
6467 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
6468 #endif
6469 	vdev->me_convert = txrx_ops->me_convert;
6470 	vdev->get_tsf_time = txrx_ops->get_tsf_time;
6471 
6472 	dp_vdev_register_rx_eapol(vdev, txrx_ops);
6473 
6474 	dp_vdev_register_tx_handler(vdev, soc, txrx_ops);
6475 
6476 	dp_init_info("%pK: DP Vdev Register success", soc);
6477 
6478 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6479 	return QDF_STATUS_SUCCESS;
6480 }
6481 
6482 void dp_peer_delete(struct dp_soc *soc,
6483 		    struct dp_peer *peer,
6484 		    void *arg)
6485 {
6486 	if (!peer->valid)
6487 		return;
6488 
6489 	dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
6490 			     peer->vdev->vdev_id,
6491 			     peer->mac_addr.raw, 0);
6492 }
6493 
6494 /**
6495  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
6496  * @vdev: Datapath VDEV handle
6497  * @unmap_only: Flag to indicate "only unmap"
6498  *
6499  * Return: void
6500  */
6501 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
6502 {
6503 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6504 	struct dp_pdev *pdev = vdev->pdev;
6505 	struct dp_soc *soc = pdev->soc;
6506 	struct dp_peer *peer;
6507 	uint32_t i = 0;
6508 
6509 
6510 	if (!unmap_only)
6511 		dp_vdev_iterate_peer_lock_safe(vdev, dp_peer_delete, NULL,
6512 					       DP_MOD_ID_CDP);
6513 
6514 	for (i = 0; i < soc->max_peer_id ; i++) {
6515 		peer = __dp_peer_get_ref_by_id(soc, i, DP_MOD_ID_CDP);
6516 
6517 		if (!peer)
6518 			continue;
6519 
6520 		if (peer->vdev != vdev) {
6521 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6522 			continue;
6523 		}
6524 
6525 		dp_info("peer: "QDF_MAC_ADDR_FMT" is getting unmap",
6526 			QDF_MAC_ADDR_REF(peer->mac_addr.raw));
6527 
6528 		dp_rx_peer_unmap_handler(soc, i,
6529 					 vdev->vdev_id,
6530 					 peer->mac_addr.raw, 0,
6531 					 DP_PEER_WDS_COUNT_INVALID);
6532 		SET_PEER_REF_CNT_ONE(peer);
6533 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6534 	}
6535 
6536 }
6537 
6538 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
6539 /*
6540  * dp_txrx_alloc_vdev_stats_id()- Allocate vdev_stats_id
6541  * @soc_hdl: Datapath soc handle
6542  * @vdev_stats_id: Address of vdev_stats_id
6543  *
6544  * Return: QDF_STATUS
6545  */
6546 static QDF_STATUS dp_txrx_alloc_vdev_stats_id(struct cdp_soc_t *soc_hdl,
6547 					      uint8_t *vdev_stats_id)
6548 {
6549 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6550 	uint8_t id = 0;
6551 
6552 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
6553 		*vdev_stats_id = CDP_INVALID_VDEV_STATS_ID;
6554 		return QDF_STATUS_E_FAILURE;
6555 	}
6556 
6557 	while (id < CDP_MAX_VDEV_STATS_ID) {
6558 		if (!qdf_atomic_test_and_set_bit(id, &soc->vdev_stats_id_map)) {
6559 			*vdev_stats_id = id;
6560 			return QDF_STATUS_SUCCESS;
6561 		}
6562 		id++;
6563 	}
6564 
6565 	*vdev_stats_id = CDP_INVALID_VDEV_STATS_ID;
6566 	return QDF_STATUS_E_FAILURE;
6567 }
6568 
6569 /*
6570  * dp_txrx_reset_vdev_stats_id() - Reset vdev_stats_id in dp_soc
6571  * @soc_hdl: Datapath soc handle
6572  * @vdev_stats_id: vdev_stats_id to reset in dp_soc
6573  *
6574  * Return: none
6575  */
6576 static void dp_txrx_reset_vdev_stats_id(struct cdp_soc_t *soc_hdl,
6577 					uint8_t vdev_stats_id)
6578 {
6579 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6580 
6581 	if ((!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) ||
6582 	    (vdev_stats_id >= CDP_MAX_VDEV_STATS_ID))
6583 		return;
6584 
6585 	qdf_atomic_clear_bit(vdev_stats_id, &soc->vdev_stats_id_map);
6586 }
6587 #else
6588 static void dp_txrx_reset_vdev_stats_id(struct cdp_soc_t *soc,
6589 					uint8_t vdev_stats_id)
6590 {}
6591 #endif
6592 /*
6593  * dp_vdev_detach_wifi3() - Detach txrx vdev
6594  * @cdp_soc: Datapath soc handle
6595  * @vdev_id: VDEV Id
6596  * @callback: Callback OL_IF on completion of detach
6597  * @cb_context:	Callback context
6598  *
6599  */
6600 static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc,
6601 				       uint8_t vdev_id,
6602 				       ol_txrx_vdev_delete_cb callback,
6603 				       void *cb_context)
6604 {
6605 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
6606 	struct dp_pdev *pdev;
6607 	struct dp_neighbour_peer *peer = NULL;
6608 	struct dp_peer *vap_self_peer = NULL;
6609 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
6610 						     DP_MOD_ID_CDP);
6611 
6612 	if (!vdev)
6613 		return QDF_STATUS_E_FAILURE;
6614 
6615 	soc->arch_ops.txrx_vdev_detach(soc, vdev);
6616 
6617 	pdev = vdev->pdev;
6618 
6619 	vap_self_peer = dp_sta_vdev_self_peer_ref_n_get(soc, vdev,
6620 							DP_MOD_ID_CONFIG);
6621 	if (vap_self_peer) {
6622 		qdf_spin_lock_bh(&soc->ast_lock);
6623 		if (vap_self_peer->self_ast_entry) {
6624 			dp_peer_del_ast(soc, vap_self_peer->self_ast_entry);
6625 			vap_self_peer->self_ast_entry = NULL;
6626 		}
6627 		qdf_spin_unlock_bh(&soc->ast_lock);
6628 
6629 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc, vdev->vdev_id,
6630 				     vap_self_peer->mac_addr.raw, 0);
6631 		dp_peer_unref_delete(vap_self_peer, DP_MOD_ID_CONFIG);
6632 	}
6633 
6634 	/*
6635 	 * If Target is hung, flush all peers before detaching vdev
6636 	 * this will free all references held due to missing
6637 	 * unmap commands from Target
6638 	 */
6639 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
6640 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, false);
6641 	else if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
6642 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, true);
6643 
6644 	/* indicate that the vdev needs to be deleted */
6645 	vdev->delete.pending = 1;
6646 	dp_rx_vdev_detach(vdev);
6647 	/*
6648 	 * move it after dp_rx_vdev_detach(),
6649 	 * as the call back done in dp_rx_vdev_detach()
6650 	 * still need to get vdev pointer by vdev_id.
6651 	 */
6652 	dp_vdev_id_map_tbl_remove(soc, vdev);
6653 
6654 	dp_monitor_neighbour_peer_list_remove(pdev, vdev, peer);
6655 
6656 	dp_txrx_reset_vdev_stats_id(cdp_soc, vdev->vdev_stats_id);
6657 
6658 	dp_tx_vdev_multipass_deinit(vdev);
6659 
6660 	if (vdev->vdev_dp_ext_handle) {
6661 		qdf_mem_free(vdev->vdev_dp_ext_handle);
6662 		vdev->vdev_dp_ext_handle = NULL;
6663 	}
6664 	vdev->delete.callback = callback;
6665 	vdev->delete.context = cb_context;
6666 
6667 	if (vdev->opmode != wlan_op_mode_monitor)
6668 		dp_vdev_pdev_list_remove(soc, pdev, vdev);
6669 
6670 	pdev->vdev_count--;
6671 	/* release reference taken above for find */
6672 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6673 
6674 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
6675 	TAILQ_INSERT_TAIL(&soc->inactive_vdev_list, vdev, inactive_list_elem);
6676 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
6677 
6678 	/* release reference taken at dp_vdev_create */
6679 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
6680 
6681 	return QDF_STATUS_SUCCESS;
6682 }
6683 
6684 #ifdef WLAN_FEATURE_11BE_MLO
6685 /**
6686  * is_dp_peer_can_reuse() - check if the dp_peer match condition to be reused
6687  * @vdev: Target DP vdev handle
6688  * @peer: DP peer handle to be checked
6689  * @peer_mac_addr: Target peer mac address
6690  * @peer_type: Target peer type
6691  *
6692  * Return: true - if match, false - not match
6693  */
6694 static inline
6695 bool is_dp_peer_can_reuse(struct dp_vdev *vdev,
6696 			  struct dp_peer *peer,
6697 			  uint8_t *peer_mac_addr,
6698 			  enum cdp_peer_type peer_type)
6699 {
6700 	if (peer->bss_peer && (peer->vdev == vdev) &&
6701 	    (peer->peer_type == peer_type) &&
6702 	    (qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw,
6703 			 QDF_MAC_ADDR_SIZE) == 0))
6704 		return true;
6705 
6706 	return false;
6707 }
6708 #else
6709 static inline
6710 bool is_dp_peer_can_reuse(struct dp_vdev *vdev,
6711 			  struct dp_peer *peer,
6712 			  uint8_t *peer_mac_addr,
6713 			  enum cdp_peer_type peer_type)
6714 {
6715 	if (peer->bss_peer && (peer->vdev == vdev) &&
6716 	    (qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw,
6717 			 QDF_MAC_ADDR_SIZE) == 0))
6718 		return true;
6719 
6720 	return false;
6721 }
6722 #endif
6723 
6724 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
6725 						uint8_t *peer_mac_addr,
6726 						enum cdp_peer_type peer_type)
6727 {
6728 	struct dp_peer *peer;
6729 	struct dp_soc *soc = vdev->pdev->soc;
6730 
6731 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
6732 	TAILQ_FOREACH(peer, &soc->inactive_peer_list,
6733 		      inactive_list_elem) {
6734 
6735 		/* reuse bss peer only when vdev matches*/
6736 		if (is_dp_peer_can_reuse(vdev, peer,
6737 					 peer_mac_addr, peer_type)) {
6738 			/* increment ref count for cdp_peer_create*/
6739 			if (dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG) ==
6740 						QDF_STATUS_SUCCESS) {
6741 				TAILQ_REMOVE(&soc->inactive_peer_list, peer,
6742 					     inactive_list_elem);
6743 				qdf_spin_unlock_bh
6744 					(&soc->inactive_peer_list_lock);
6745 				return peer;
6746 			}
6747 		}
6748 	}
6749 
6750 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
6751 	return NULL;
6752 }
6753 
6754 #ifdef FEATURE_AST
6755 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
6756 					       struct dp_pdev *pdev,
6757 					       uint8_t *peer_mac_addr)
6758 {
6759 	struct dp_ast_entry *ast_entry;
6760 
6761 	if (soc->ast_offload_support)
6762 		return;
6763 
6764 	qdf_spin_lock_bh(&soc->ast_lock);
6765 	if (soc->ast_override_support)
6766 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
6767 							    pdev->pdev_id);
6768 	else
6769 		ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
6770 
6771 	if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress)
6772 		dp_peer_del_ast(soc, ast_entry);
6773 
6774 	qdf_spin_unlock_bh(&soc->ast_lock);
6775 }
6776 #endif
6777 
6778 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
6779 /*
6780  * dp_peer_hw_txrx_stats_init() - Initialize hw_txrx_stats_en in dp_peer
6781  * @soc: Datapath soc handle
6782  * @peer: Datapath peer handle
6783  *
6784  * Return: none
6785  */
6786 static inline
6787 void dp_peer_hw_txrx_stats_init(struct dp_soc *soc,
6788 				struct dp_txrx_peer *txrx_peer)
6789 {
6790 	txrx_peer->hw_txrx_stats_en =
6791 		wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx);
6792 }
6793 #else
6794 static inline
6795 void dp_peer_hw_txrx_stats_init(struct dp_soc *soc,
6796 				struct dp_txrx_peer *txrx_peer)
6797 {
6798 	txrx_peer->hw_txrx_stats_en = 0;
6799 }
6800 #endif
6801 
6802 static QDF_STATUS dp_txrx_peer_detach(struct dp_soc *soc, struct dp_peer *peer)
6803 {
6804 	struct dp_txrx_peer *txrx_peer;
6805 	struct dp_pdev *pdev;
6806 
6807 	/* dp_txrx_peer exists for mld peer and legacy peer */
6808 	if (peer->txrx_peer) {
6809 		txrx_peer = peer->txrx_peer;
6810 		peer->txrx_peer = NULL;
6811 		pdev = txrx_peer->vdev->pdev;
6812 
6813 		dp_peer_defrag_rx_tids_deinit(txrx_peer);
6814 		/*
6815 		 * Deallocate the extended stats contenxt
6816 		 */
6817 		dp_peer_delay_stats_ctx_dealloc(soc, txrx_peer);
6818 		dp_peer_rx_bufq_resources_deinit(txrx_peer);
6819 		dp_peer_jitter_stats_ctx_dealloc(pdev, txrx_peer);
6820 
6821 		qdf_mem_free(txrx_peer);
6822 	}
6823 
6824 	return QDF_STATUS_SUCCESS;
6825 }
6826 
6827 static QDF_STATUS dp_txrx_peer_attach(struct dp_soc *soc, struct dp_peer *peer)
6828 {
6829 	struct dp_txrx_peer *txrx_peer;
6830 	struct dp_pdev *pdev;
6831 
6832 	txrx_peer = (struct dp_txrx_peer *)qdf_mem_malloc(sizeof(*txrx_peer));
6833 
6834 	if (!txrx_peer)
6835 		return QDF_STATUS_E_NOMEM; /* failure */
6836 
6837 	txrx_peer->peer_id = HTT_INVALID_PEER;
6838 	/* initialize the peer_id */
6839 	txrx_peer->vdev = peer->vdev;
6840 	pdev = peer->vdev->pdev;
6841 
6842 	DP_STATS_INIT(txrx_peer);
6843 
6844 	dp_wds_ext_peer_init(txrx_peer);
6845 	dp_peer_rx_bufq_resources_init(txrx_peer);
6846 	dp_peer_hw_txrx_stats_init(soc, txrx_peer);
6847 	/*
6848 	 * Allocate peer extended stats context. Fall through in
6849 	 * case of failure as its not an implicit requirement to have
6850 	 * this object for regular statistics updates.
6851 	 */
6852 	if (dp_peer_delay_stats_ctx_alloc(soc, txrx_peer) !=
6853 					  QDF_STATUS_SUCCESS)
6854 		dp_warn("peer delay_stats ctx alloc failed");
6855 
6856 	/*
6857 	 * Alloctate memory for jitter stats. Fall through in
6858 	 * case of failure as its not an implicit requirement to have
6859 	 * this object for regular statistics updates.
6860 	 */
6861 	if (dp_peer_jitter_stats_ctx_alloc(pdev, txrx_peer) !=
6862 					   QDF_STATUS_SUCCESS)
6863 		dp_warn("peer jitter_stats ctx alloc failed");
6864 
6865 	dp_set_peer_isolation(txrx_peer, false);
6866 
6867 	dp_peer_defrag_rx_tids_init(txrx_peer);
6868 	dp_txrx_peer_attach_add(soc, peer, txrx_peer);
6869 
6870 	return QDF_STATUS_SUCCESS;
6871 }
6872 
6873 static inline
6874 void dp_txrx_peer_stats_clr(struct dp_txrx_peer *txrx_peer)
6875 {
6876 	if (!txrx_peer)
6877 		return;
6878 
6879 	txrx_peer->tx_failed = 0;
6880 	txrx_peer->comp_pkt.num = 0;
6881 	txrx_peer->comp_pkt.bytes = 0;
6882 	txrx_peer->to_stack.num = 0;
6883 	txrx_peer->to_stack.bytes = 0;
6884 
6885 	DP_STATS_CLR(txrx_peer);
6886 	dp_peer_delay_stats_ctx_clr(txrx_peer);
6887 	dp_peer_jitter_stats_ctx_clr(txrx_peer);
6888 }
6889 
6890 /*
6891  * dp_peer_create_wifi3() - attach txrx peer
6892  * @soc_hdl: Datapath soc handle
6893  * @vdev_id: id of vdev
6894  * @peer_mac_addr: Peer MAC address
6895  * @peer_type: link or MLD peer type
6896  *
6897  * Return: 0 on success, -1 on failure
6898  */
6899 static QDF_STATUS
6900 dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
6901 		     uint8_t *peer_mac_addr, enum cdp_peer_type peer_type)
6902 {
6903 	struct dp_peer *peer;
6904 	int i;
6905 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6906 	struct dp_pdev *pdev;
6907 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
6908 	struct dp_vdev *vdev = NULL;
6909 
6910 	if (!peer_mac_addr)
6911 		return QDF_STATUS_E_FAILURE;
6912 
6913 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
6914 
6915 	if (!vdev)
6916 		return QDF_STATUS_E_FAILURE;
6917 
6918 	pdev = vdev->pdev;
6919 	soc = pdev->soc;
6920 
6921 	/*
6922 	 * If a peer entry with given MAC address already exists,
6923 	 * reuse the peer and reset the state of peer.
6924 	 */
6925 	peer = dp_peer_can_reuse(vdev, peer_mac_addr, peer_type);
6926 
6927 	if (peer) {
6928 		qdf_atomic_init(&peer->is_default_route_set);
6929 		dp_peer_cleanup(vdev, peer);
6930 
6931 		dp_peer_vdev_list_add(soc, vdev, peer);
6932 		dp_peer_find_hash_add(soc, peer);
6933 
6934 		dp_peer_rx_tids_create(peer);
6935 		if (IS_MLO_DP_MLD_PEER(peer))
6936 			dp_mld_peer_init_link_peers_info(peer);
6937 
6938 		qdf_spin_lock_bh(&soc->ast_lock);
6939 		dp_peer_delete_ast_entries(soc, peer);
6940 		qdf_spin_unlock_bh(&soc->ast_lock);
6941 
6942 		if ((vdev->opmode == wlan_op_mode_sta) &&
6943 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
6944 		     QDF_MAC_ADDR_SIZE)) {
6945 			ast_type = CDP_TXRX_AST_TYPE_SELF;
6946 		}
6947 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
6948 
6949 		peer->valid = 1;
6950 		dp_local_peer_id_alloc(pdev, peer);
6951 
6952 		qdf_spinlock_create(&peer->peer_info_lock);
6953 
6954 		DP_STATS_INIT(peer);
6955 
6956 		/*
6957 		 * In tx_monitor mode, filter may be set for unassociated peer
6958 		 * when unassociated peer get associated peer need to
6959 		 * update tx_cap_enabled flag to support peer filter.
6960 		 */
6961 		if (!IS_MLO_DP_MLD_PEER(peer)) {
6962 			dp_monitor_peer_tx_capture_filter_check(pdev, peer);
6963 			dp_monitor_peer_reset_stats(soc, peer);
6964 		}
6965 
6966 		if (peer->txrx_peer) {
6967 			dp_peer_rx_bufq_resources_init(peer->txrx_peer);
6968 			dp_txrx_peer_stats_clr(peer->txrx_peer);
6969 			dp_set_peer_isolation(peer->txrx_peer, false);
6970 			dp_wds_ext_peer_init(peer->txrx_peer);
6971 			dp_peer_hw_txrx_stats_init(soc, peer->txrx_peer);
6972 		}
6973 
6974 		dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
6975 
6976 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6977 		return QDF_STATUS_SUCCESS;
6978 	} else {
6979 		/*
6980 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
6981 		 * need to remove the AST entry which was earlier added as a WDS
6982 		 * entry.
6983 		 * If an AST entry exists, but no peer entry exists with a given
6984 		 * MAC addresses, we could deduce it as a WDS entry
6985 		 */
6986 		dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
6987 	}
6988 
6989 #ifdef notyet
6990 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
6991 		soc->mempool_ol_ath_peer);
6992 #else
6993 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
6994 #endif
6995 	wlan_minidump_log(peer,
6996 			  sizeof(*peer),
6997 			  soc->ctrl_psoc,
6998 			  WLAN_MD_DP_PEER, "dp_peer");
6999 	if (!peer) {
7000 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7001 		return QDF_STATUS_E_FAILURE; /* failure */
7002 	}
7003 
7004 	qdf_mem_zero(peer, sizeof(struct dp_peer));
7005 
7006 	/* store provided params */
7007 	peer->vdev = vdev;
7008 
7009 	/* initialize the peer_id */
7010 	peer->peer_id = HTT_INVALID_PEER;
7011 
7012 	qdf_mem_copy(
7013 		&peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
7014 
7015 	DP_PEER_SET_TYPE(peer, peer_type);
7016 	if (IS_MLO_DP_MLD_PEER(peer)) {
7017 		if (dp_txrx_peer_attach(soc, peer) !=
7018 				QDF_STATUS_SUCCESS)
7019 			goto fail; /* failure */
7020 
7021 		dp_mld_peer_init_link_peers_info(peer);
7022 	} else if (dp_monitor_peer_attach(soc, peer) !=
7023 				QDF_STATUS_SUCCESS)
7024 		dp_warn("peer monitor ctx alloc failed");
7025 
7026 	TAILQ_INIT(&peer->ast_entry_list);
7027 
7028 	/* get the vdev reference for new peer */
7029 	dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CHILD);
7030 
7031 	if ((vdev->opmode == wlan_op_mode_sta) &&
7032 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
7033 			 QDF_MAC_ADDR_SIZE)) {
7034 		ast_type = CDP_TXRX_AST_TYPE_SELF;
7035 	}
7036 	qdf_spinlock_create(&peer->peer_state_lock);
7037 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
7038 	qdf_spinlock_create(&peer->peer_info_lock);
7039 
7040 	/* reset the ast index to flowid table */
7041 	dp_peer_reset_flowq_map(peer);
7042 
7043 	qdf_atomic_init(&peer->ref_cnt);
7044 
7045 	for (i = 0; i < DP_MOD_ID_MAX; i++)
7046 		qdf_atomic_init(&peer->mod_refs[i]);
7047 
7048 	/* keep one reference for attach */
7049 	qdf_atomic_inc(&peer->ref_cnt);
7050 	qdf_atomic_inc(&peer->mod_refs[DP_MOD_ID_CONFIG]);
7051 
7052 	dp_peer_vdev_list_add(soc, vdev, peer);
7053 
7054 	/* TODO: See if hash based search is required */
7055 	dp_peer_find_hash_add(soc, peer);
7056 
7057 	/* Initialize the peer state */
7058 	peer->state = OL_TXRX_PEER_STATE_DISC;
7059 
7060 	dp_info("vdev %pK created peer %pK ("QDF_MAC_ADDR_FMT") ref_cnt: %d",
7061 		vdev, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
7062 		qdf_atomic_read(&peer->ref_cnt));
7063 	/*
7064 	 * For every peer MAp message search and set if bss_peer
7065 	 */
7066 	if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
7067 			QDF_MAC_ADDR_SIZE) == 0 &&
7068 			(wlan_op_mode_sta != vdev->opmode)) {
7069 		dp_info("vdev bss_peer!!");
7070 		peer->bss_peer = 1;
7071 		if (peer->txrx_peer)
7072 			peer->txrx_peer->bss_peer = 1;
7073 	}
7074 
7075 	if (wlan_op_mode_sta == vdev->opmode &&
7076 	    qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
7077 			QDF_MAC_ADDR_SIZE) == 0) {
7078 		peer->sta_self_peer = 1;
7079 	}
7080 
7081 	dp_peer_rx_tids_create(peer);
7082 
7083 	peer->valid = 1;
7084 	dp_local_peer_id_alloc(pdev, peer);
7085 	DP_STATS_INIT(peer);
7086 
7087 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
7088 
7089 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7090 
7091 	return QDF_STATUS_SUCCESS;
7092 fail:
7093 	qdf_mem_free(peer);
7094 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7095 
7096 	return QDF_STATUS_E_FAILURE;
7097 }
7098 
7099 static QDF_STATUS dp_peer_legacy_setup(struct dp_soc *soc, struct dp_peer *peer)
7100 {
7101 	/* txrx_peer might exist already in peer reuse case */
7102 	if (peer->txrx_peer)
7103 		return QDF_STATUS_SUCCESS;
7104 
7105 	if (dp_txrx_peer_attach(soc, peer) !=
7106 				QDF_STATUS_SUCCESS) {
7107 		dp_err("peer txrx ctx alloc failed");
7108 		return QDF_STATUS_E_FAILURE;
7109 	}
7110 
7111 	return QDF_STATUS_SUCCESS;
7112 }
7113 
7114 #ifdef WLAN_FEATURE_11BE_MLO
7115 QDF_STATUS dp_peer_mlo_setup(
7116 			struct dp_soc *soc,
7117 			struct dp_peer *peer,
7118 			uint8_t vdev_id,
7119 			struct cdp_peer_setup_info *setup_info)
7120 {
7121 	struct dp_peer *mld_peer = NULL;
7122 
7123 	/* Non-MLO connection, do nothing */
7124 	if (!setup_info || !setup_info->mld_peer_mac)
7125 		return QDF_STATUS_SUCCESS;
7126 
7127 	/* To do: remove this check if link/mld peer mac_addr allow to same */
7128 	if (!qdf_mem_cmp(setup_info->mld_peer_mac, peer->mac_addr.raw,
7129 			 QDF_MAC_ADDR_SIZE)) {
7130 		dp_peer_err("Same mac addres for link/mld peer");
7131 		return QDF_STATUS_E_FAILURE;
7132 	}
7133 
7134 	/* if this is the first link peer */
7135 	if (setup_info->is_first_link)
7136 		/* create MLD peer */
7137 		dp_peer_create_wifi3((struct cdp_soc_t *)soc,
7138 				     vdev_id,
7139 				     setup_info->mld_peer_mac,
7140 				     CDP_MLD_PEER_TYPE);
7141 
7142 	peer->first_link = setup_info->is_first_link;
7143 	peer->primary_link = setup_info->is_primary_link;
7144 	mld_peer = dp_peer_find_hash_find(soc,
7145 					  setup_info->mld_peer_mac,
7146 					  0, DP_VDEV_ALL, DP_MOD_ID_CDP);
7147 	if (mld_peer) {
7148 		if (setup_info->is_first_link) {
7149 			/* assign rx_tid to mld peer */
7150 			mld_peer->rx_tid = peer->rx_tid;
7151 			/* no cdp_peer_setup for MLD peer,
7152 			 * set it for addba processing
7153 			 */
7154 			qdf_atomic_set(&mld_peer->is_default_route_set, 1);
7155 		} else {
7156 			/* free link peer origial rx_tids mem */
7157 			dp_peer_rx_tids_destroy(peer);
7158 			/* assign mld peer rx_tid to link peer */
7159 			peer->rx_tid = mld_peer->rx_tid;
7160 		}
7161 
7162 		if (setup_info->is_primary_link &&
7163 		    !setup_info->is_first_link) {
7164 			/*
7165 			 * if first link is not the primary link,
7166 			 * then need to change mld_peer->vdev as
7167 			 * primary link dp_vdev is not same one
7168 			 * during mld peer creation.
7169 			 */
7170 
7171 			/* relase the ref to original dp_vdev */
7172 			dp_vdev_unref_delete(soc, mld_peer->vdev,
7173 					     DP_MOD_ID_CHILD);
7174 			/*
7175 			 * get the ref to new dp_vdev,
7176 			 * increase dp_vdev ref_cnt
7177 			 */
7178 			mld_peer->vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7179 							       DP_MOD_ID_CHILD);
7180 		}
7181 
7182 		/* associate mld and link peer */
7183 		dp_link_peer_add_mld_peer(peer, mld_peer);
7184 		dp_mld_peer_add_link_peer(mld_peer, peer);
7185 
7186 		mld_peer->txrx_peer->mld_peer = 1;
7187 		dp_peer_unref_delete(mld_peer, DP_MOD_ID_CDP);
7188 	} else {
7189 		peer->mld_peer = NULL;
7190 		dp_err("mld peer" QDF_MAC_ADDR_FMT "not found!",
7191 		       QDF_MAC_ADDR_REF(setup_info->mld_peer_mac));
7192 		return QDF_STATUS_E_FAILURE;
7193 	}
7194 
7195 	return QDF_STATUS_SUCCESS;
7196 }
7197 
7198 /*
7199  * dp_mlo_peer_authorize() - authorize MLO peer
7200  * @soc: soc handle
7201  * @peer: pointer to link peer
7202  *
7203  * return void
7204  */
7205 static void dp_mlo_peer_authorize(struct dp_soc *soc,
7206 				  struct dp_peer *peer)
7207 {
7208 	int i;
7209 	struct dp_peer *link_peer = NULL;
7210 	struct dp_peer *mld_peer = peer->mld_peer;
7211 	struct dp_mld_link_peers link_peers_info;
7212 
7213 	if (!mld_peer)
7214 		return;
7215 
7216 	/* get link peers with reference */
7217 	dp_get_link_peers_ref_from_mld_peer(soc, mld_peer,
7218 					    &link_peers_info,
7219 					    DP_MOD_ID_CDP);
7220 
7221 	for (i = 0; i < link_peers_info.num_links; i++) {
7222 		link_peer = link_peers_info.link_peers[i];
7223 
7224 		if (!link_peer->authorize) {
7225 			dp_release_link_peers_ref(&link_peers_info,
7226 						  DP_MOD_ID_CDP);
7227 			mld_peer->authorize = false;
7228 			return;
7229 		}
7230 	}
7231 
7232 	/* if we are here all link peers are authorized,
7233 	 * authorize ml_peer also
7234 	 */
7235 	mld_peer->authorize = true;
7236 
7237 	/* release link peers reference */
7238 	dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
7239 }
7240 #endif
7241 
7242 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
7243 				  enum cdp_host_reo_dest_ring *reo_dest,
7244 				  bool *hash_based)
7245 {
7246 	struct dp_soc *soc;
7247 	struct dp_pdev *pdev;
7248 
7249 	pdev = vdev->pdev;
7250 	soc = pdev->soc;
7251 	/*
7252 	 * hash based steering is disabled for Radios which are offloaded
7253 	 * to NSS
7254 	 */
7255 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
7256 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
7257 
7258 	/*
7259 	 * Below line of code will ensure the proper reo_dest ring is chosen
7260 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
7261 	 */
7262 	*reo_dest = pdev->reo_dest;
7263 }
7264 
7265 #ifdef IPA_OFFLOAD
7266 /**
7267  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
7268  * @vdev: Virtual device
7269  *
7270  * Return: true if the vdev is of subtype P2P
7271  *	   false if the vdev is of any other subtype
7272  */
7273 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
7274 {
7275 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
7276 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
7277 	    vdev->subtype == wlan_op_subtype_p2p_go)
7278 		return true;
7279 
7280 	return false;
7281 }
7282 
7283 /*
7284  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
7285  * @vdev: Datapath VDEV handle
7286  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
7287  * @hash_based: pointer to hash value (enabled/disabled) to be populated
7288  *
7289  * If IPA is enabled in ini, for SAP mode, disable hash based
7290  * steering, use default reo_dst ring for RX. Use config values for other modes.
7291  * Return: None
7292  */
7293 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
7294 				       struct cdp_peer_setup_info *setup_info,
7295 				       enum cdp_host_reo_dest_ring *reo_dest,
7296 				       bool *hash_based,
7297 				       uint8_t *lmac_peer_id_msb)
7298 {
7299 	struct dp_soc *soc;
7300 	struct dp_pdev *pdev;
7301 
7302 	pdev = vdev->pdev;
7303 	soc = pdev->soc;
7304 
7305 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
7306 
7307 	/* For P2P-GO interfaces we do not need to change the REO
7308 	 * configuration even if IPA config is enabled
7309 	 */
7310 	if (dp_is_vdev_subtype_p2p(vdev))
7311 		return;
7312 
7313 	/*
7314 	 * If IPA is enabled, disable hash-based flow steering and set
7315 	 * reo_dest_ring_4 as the REO ring to receive packets on.
7316 	 * IPA is configured to reap reo_dest_ring_4.
7317 	 *
7318 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
7319 	 * value enum value is from 1 - 4.
7320 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
7321 	 */
7322 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
7323 		if (vdev->opmode == wlan_op_mode_ap) {
7324 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
7325 			*hash_based = 0;
7326 		} else if (vdev->opmode == wlan_op_mode_sta &&
7327 			   dp_ipa_is_mdm_platform()) {
7328 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
7329 		}
7330 	}
7331 }
7332 
7333 #else
7334 
7335 /*
7336  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
7337  * @vdev: Datapath VDEV handle
7338  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
7339  * @hash_based: pointer to hash value (enabled/disabled) to be populated
7340  *
7341  * Use system config values for hash based steering.
7342  * Return: None
7343  */
7344 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
7345 				       struct cdp_peer_setup_info *setup_info,
7346 				       enum cdp_host_reo_dest_ring *reo_dest,
7347 				       bool *hash_based,
7348 				       uint8_t *lmac_peer_id_msb)
7349 {
7350 	struct dp_soc *soc = vdev->pdev->soc;
7351 
7352 	soc->arch_ops.peer_get_reo_hash(vdev, setup_info, reo_dest, hash_based,
7353 					lmac_peer_id_msb);
7354 }
7355 #endif /* IPA_OFFLOAD */
7356 
7357 /*
7358  * dp_peer_setup_wifi3() - initialize the peer
7359  * @soc_hdl: soc handle object
7360  * @vdev_id : vdev_id of vdev object
7361  * @peer_mac: Peer's mac address
7362  * @peer_setup_info: peer setup info for MLO
7363  *
7364  * Return: QDF_STATUS
7365  */
7366 static QDF_STATUS
7367 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
7368 		    uint8_t *peer_mac,
7369 		    struct cdp_peer_setup_info *setup_info)
7370 {
7371 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7372 	struct dp_pdev *pdev;
7373 	bool hash_based = 0;
7374 	enum cdp_host_reo_dest_ring reo_dest;
7375 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7376 	struct dp_vdev *vdev = NULL;
7377 	struct dp_peer *peer =
7378 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
7379 					       DP_MOD_ID_CDP);
7380 	struct dp_peer *mld_peer = NULL;
7381 	enum wlan_op_mode vdev_opmode;
7382 	uint8_t lmac_peer_id_msb = 0;
7383 
7384 	if (!peer)
7385 		return QDF_STATUS_E_FAILURE;
7386 
7387 	vdev = peer->vdev;
7388 	if (!vdev) {
7389 		status = QDF_STATUS_E_FAILURE;
7390 		goto fail;
7391 	}
7392 
7393 	/* save vdev related member in case vdev freed */
7394 	vdev_opmode = vdev->opmode;
7395 	pdev = vdev->pdev;
7396 	dp_peer_setup_get_reo_hash(vdev, setup_info,
7397 				   &reo_dest, &hash_based,
7398 				   &lmac_peer_id_msb);
7399 
7400 	dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
7401 		pdev->pdev_id, vdev->vdev_id,
7402 		vdev->opmode, hash_based, reo_dest);
7403 
7404 	/*
7405 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
7406 	 * i.e both the devices have same MAC address. In these
7407 	 * cases we want such pkts to be processed in NULL Q handler
7408 	 * which is REO2TCL ring. for this reason we should
7409 	 * not setup reo_queues and default route for bss_peer.
7410 	 */
7411 	if (!IS_MLO_DP_MLD_PEER(peer))
7412 		dp_monitor_peer_tx_init(pdev, peer);
7413 
7414 	if (!setup_info)
7415 		if (dp_peer_legacy_setup(soc, peer) !=
7416 				QDF_STATUS_SUCCESS) {
7417 			status = QDF_STATUS_E_RESOURCES;
7418 			goto fail;
7419 		}
7420 
7421 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
7422 		status = QDF_STATUS_E_FAILURE;
7423 		goto fail;
7424 	}
7425 
7426 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
7427 		/* TODO: Check the destination ring number to be passed to FW */
7428 		soc->cdp_soc.ol_ops->peer_set_default_routing(
7429 				soc->ctrl_psoc,
7430 				peer->vdev->pdev->pdev_id,
7431 				peer->mac_addr.raw,
7432 				peer->vdev->vdev_id, hash_based, reo_dest,
7433 				lmac_peer_id_msb);
7434 	}
7435 
7436 	qdf_atomic_set(&peer->is_default_route_set, 1);
7437 
7438 	status = dp_peer_mlo_setup(soc, peer, vdev->vdev_id, setup_info);
7439 	if (QDF_IS_STATUS_ERROR(status)) {
7440 		dp_peer_err("peer mlo setup failed");
7441 		qdf_assert_always(0);
7442 	}
7443 
7444 	if (vdev_opmode != wlan_op_mode_monitor) {
7445 		/* In case of MLD peer, switch peer to mld peer and
7446 		 * do peer_rx_init.
7447 		 */
7448 		if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
7449 		    IS_MLO_DP_LINK_PEER(peer)) {
7450 			if (setup_info && setup_info->is_first_link) {
7451 				mld_peer = DP_GET_MLD_PEER_FROM_PEER(peer);
7452 				if (mld_peer)
7453 					dp_peer_rx_init(pdev, mld_peer);
7454 				else
7455 					dp_peer_err("MLD peer null. Primary link peer:%pK", peer);
7456 			}
7457 		} else {
7458 			dp_peer_rx_init(pdev, peer);
7459 		}
7460 	}
7461 
7462 	if (!IS_MLO_DP_MLD_PEER(peer))
7463 		dp_peer_ppdu_delayed_ba_init(peer);
7464 
7465 fail:
7466 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7467 	return status;
7468 }
7469 
7470 /*
7471  * dp_cp_peer_del_resp_handler - Handle the peer delete response
7472  * @soc_hdl: Datapath SOC handle
7473  * @vdev_id: id of virtual device object
7474  * @mac_addr: Mac address of the peer
7475  *
7476  * Return: QDF_STATUS
7477  */
7478 static QDF_STATUS dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl,
7479 					      uint8_t vdev_id,
7480 					      uint8_t *mac_addr)
7481 {
7482 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7483 	struct dp_ast_entry  *ast_entry = NULL;
7484 	txrx_ast_free_cb cb = NULL;
7485 	void *cookie;
7486 
7487 	if (soc->ast_offload_support)
7488 		return QDF_STATUS_E_INVAL;
7489 
7490 	qdf_spin_lock_bh(&soc->ast_lock);
7491 
7492 	ast_entry =
7493 		dp_peer_ast_hash_find_by_vdevid(soc, mac_addr,
7494 						vdev_id);
7495 
7496 	/* in case of qwrap we have multiple BSS peers
7497 	 * with same mac address
7498 	 *
7499 	 * AST entry for this mac address will be created
7500 	 * only for one peer hence it will be NULL here
7501 	 */
7502 	if ((!ast_entry || !ast_entry->delete_in_progress) ||
7503 	    (ast_entry->peer_id != HTT_INVALID_PEER)) {
7504 		qdf_spin_unlock_bh(&soc->ast_lock);
7505 		return QDF_STATUS_E_FAILURE;
7506 	}
7507 
7508 	if (ast_entry->is_mapped)
7509 		soc->ast_table[ast_entry->ast_idx] = NULL;
7510 
7511 	DP_STATS_INC(soc, ast.deleted, 1);
7512 	dp_peer_ast_hash_remove(soc, ast_entry);
7513 
7514 	cb = ast_entry->callback;
7515 	cookie = ast_entry->cookie;
7516 	ast_entry->callback = NULL;
7517 	ast_entry->cookie = NULL;
7518 
7519 	soc->num_ast_entries--;
7520 	qdf_spin_unlock_bh(&soc->ast_lock);
7521 
7522 	if (cb) {
7523 		cb(soc->ctrl_psoc,
7524 		   dp_soc_to_cdp_soc(soc),
7525 		   cookie,
7526 		   CDP_TXRX_AST_DELETED);
7527 	}
7528 	qdf_mem_free(ast_entry);
7529 
7530 	return QDF_STATUS_SUCCESS;
7531 }
7532 
7533 /*
7534  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
7535  * @txrx_soc: cdp soc handle
7536  * @ac: Access category
7537  * @value: timeout value in millisec
7538  *
7539  * Return: void
7540  */
7541 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
7542 				    uint8_t ac, uint32_t value)
7543 {
7544 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
7545 
7546 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
7547 }
7548 
7549 /*
7550  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
7551  * @txrx_soc: cdp soc handle
7552  * @ac: access category
7553  * @value: timeout value in millisec
7554  *
7555  * Return: void
7556  */
7557 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
7558 				    uint8_t ac, uint32_t *value)
7559 {
7560 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
7561 
7562 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
7563 }
7564 
7565 /*
7566  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
7567  * @txrx_soc: cdp soc handle
7568  * @pdev_id: id of physical device object
7569  * @val: reo destination ring index (1 - 4)
7570  *
7571  * Return: QDF_STATUS
7572  */
7573 static QDF_STATUS
7574 dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
7575 		     enum cdp_host_reo_dest_ring val)
7576 {
7577 	struct dp_pdev *pdev =
7578 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
7579 						   pdev_id);
7580 
7581 	if (pdev) {
7582 		pdev->reo_dest = val;
7583 		return QDF_STATUS_SUCCESS;
7584 	}
7585 
7586 	return QDF_STATUS_E_FAILURE;
7587 }
7588 
7589 /*
7590  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
7591  * @txrx_soc: cdp soc handle
7592  * @pdev_id: id of physical device object
7593  *
7594  * Return: reo destination ring index
7595  */
7596 static enum cdp_host_reo_dest_ring
7597 dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id)
7598 {
7599 	struct dp_pdev *pdev =
7600 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
7601 						   pdev_id);
7602 
7603 	if (pdev)
7604 		return pdev->reo_dest;
7605 	else
7606 		return cdp_host_reo_dest_ring_unknown;
7607 }
7608 
7609 #ifdef WLAN_SUPPORT_SCS
7610 /*
7611  * dp_enable_scs_params - Enable/Disable SCS procedures
7612  * @soc - Datapath soc handle
7613  * @peer_mac - STA Mac address
7614  * @vdev_id - ID of the vdev handle
7615  * @active - Flag to set SCS active/inactive
7616  * return type - QDF_STATUS - Success/Invalid
7617  */
7618 static QDF_STATUS
7619 dp_enable_scs_params(struct cdp_soc_t *soc_hdl, struct qdf_mac_addr
7620 		     *peer_mac,
7621 		     uint8_t vdev_id,
7622 		     bool is_active)
7623 {
7624 	struct dp_peer *peer;
7625 	QDF_STATUS status = QDF_STATUS_E_INVAL;
7626 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7627 
7628 	peer = dp_peer_find_hash_find(soc, peer_mac->bytes, 0, vdev_id,
7629 				      DP_MOD_ID_CDP);
7630 
7631 	if (!peer) {
7632 		dp_err("Peer is NULL!");
7633 		goto fail;
7634 	}
7635 
7636 	peer->scs_is_active = is_active;
7637 	status = QDF_STATUS_SUCCESS;
7638 
7639 fail:
7640 	if (peer)
7641 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7642 	return status;
7643 }
7644 
7645 /*
7646  * @brief dp_copy_scs_params - SCS Parameters sent by STA
7647  * is copied from the cdp layer to the dp layer
7648  * These parameters are then used by the peer
7649  * for traffic classification.
7650  *
7651  * @param peer - peer struct
7652  * @param scs_params - cdp layer params
7653  * @idx - SCS_entry index obtained from the
7654  * node database with a given SCSID
7655  * @return void
7656  */
7657 void
7658 dp_copy_scs_params(struct dp_peer *peer,
7659 		   struct cdp_scs_params *scs_params,
7660 		   uint8_t idx)
7661 {
7662 	uint8_t tidx = 0;
7663 	uint8_t tclas_elem;
7664 
7665 	peer->scs[idx].scsid = scs_params->scsid;
7666 	peer->scs[idx].access_priority =
7667 		scs_params->access_priority;
7668 	peer->scs[idx].tclas_elements =
7669 		scs_params->tclas_elements;
7670 	peer->scs[idx].tclas_process =
7671 		scs_params->tclas_process;
7672 
7673 	tclas_elem = peer->scs[idx].tclas_elements;
7674 
7675 	while (tidx < tclas_elem) {
7676 		qdf_mem_copy(&peer->scs[idx].tclas[tidx],
7677 			     &scs_params->tclas[tidx],
7678 			     sizeof(struct cdp_tclas_tuple));
7679 		tidx++;
7680 	}
7681 }
7682 
7683 /*
7684  * @brief dp_record_scs_params() - Copying the SCS params to a
7685  * peer based database.
7686  *
7687  * @soc - Datapath soc handle
7688  * @peer_mac - STA Mac address
7689  * @vdev_id - ID of the vdev handle
7690  * @scs_params - Structure having SCS parameters obtained
7691  * from handshake
7692  * @idx - SCS_entry index obtained from the
7693  * node database with a given SCSID
7694  * @scs_sessions - Total # of SCS sessions active
7695  *
7696  * @details
7697  * SCS parameters sent by the STA in
7698  * the SCS Request to the AP. The AP makes a note of these
7699  * parameters while sending the MSDUs to the STA, to
7700  * send the downlink traffic with correct User priority.
7701  *
7702  * return type - QDF_STATUS - Success/Invalid
7703  */
7704 static QDF_STATUS
7705 dp_record_scs_params(struct cdp_soc_t *soc_hdl, struct qdf_mac_addr
7706 		     *peer_mac,
7707 		     uint8_t vdev_id,
7708 		     struct cdp_scs_params *scs_params,
7709 		     uint8_t idx,
7710 		     uint8_t scs_sessions)
7711 {
7712 	struct dp_peer *peer;
7713 	QDF_STATUS status = QDF_STATUS_E_INVAL;
7714 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7715 
7716 	peer = dp_peer_find_hash_find(soc, peer_mac->bytes, 0, vdev_id,
7717 				      DP_MOD_ID_CDP);
7718 
7719 	if (!peer) {
7720 		dp_err("Peer is NULL!");
7721 		goto fail;
7722 	}
7723 
7724 	if (idx >= IEEE80211_SCS_MAX_NO_OF_ELEM)
7725 		goto fail;
7726 
7727 	/* SCS procedure for the peer is activated
7728 	 * as soon as we get this information from
7729 	 * the control path, unless explicitly disabled.
7730 	 */
7731 	peer->scs_is_active = 1;
7732 	dp_copy_scs_params(peer, scs_params, idx);
7733 	status = QDF_STATUS_SUCCESS;
7734 	peer->no_of_scs_sessions = scs_sessions;
7735 
7736 fail:
7737 	if (peer)
7738 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7739 	return status;
7740 }
7741 #endif
7742 
7743 #ifdef WLAN_SUPPORT_MSCS
7744 /*
7745  * dp_record_mscs_params - MSCS parameters sent by the STA in
7746  * the MSCS Request to the AP. The AP makes a note of these
7747  * parameters while comparing the MSDUs sent by the STA, to
7748  * send the downlink traffic with correct User priority.
7749  * @soc - Datapath soc handle
7750  * @peer_mac - STA Mac address
7751  * @vdev_id - ID of the vdev handle
7752  * @mscs_params - Structure having MSCS parameters obtained
7753  * from handshake
7754  * @active - Flag to set MSCS active/inactive
7755  * return type - QDF_STATUS - Success/Invalid
7756  */
7757 static QDF_STATUS
7758 dp_record_mscs_params(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
7759 		      uint8_t vdev_id, struct cdp_mscs_params *mscs_params,
7760 		      bool active)
7761 {
7762 	struct dp_peer *peer;
7763 	QDF_STATUS status = QDF_STATUS_E_INVAL;
7764 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7765 
7766 	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
7767 				      DP_MOD_ID_CDP);
7768 
7769 	if (!peer) {
7770 		dp_err("Peer is NULL!");
7771 		goto fail;
7772 	}
7773 	if (!active) {
7774 		dp_info("MSCS Procedure is terminated");
7775 		peer->mscs_active = active;
7776 		goto fail;
7777 	}
7778 
7779 	if (mscs_params->classifier_type == IEEE80211_TCLAS_MASK_CLA_TYPE_4) {
7780 		/* Populate entries inside IPV4 database first */
7781 		peer->mscs_ipv4_parameter.user_priority_bitmap =
7782 			mscs_params->user_pri_bitmap;
7783 		peer->mscs_ipv4_parameter.user_priority_limit =
7784 			mscs_params->user_pri_limit;
7785 		peer->mscs_ipv4_parameter.classifier_mask =
7786 			mscs_params->classifier_mask;
7787 
7788 		/* Populate entries inside IPV6 database */
7789 		peer->mscs_ipv6_parameter.user_priority_bitmap =
7790 			mscs_params->user_pri_bitmap;
7791 		peer->mscs_ipv6_parameter.user_priority_limit =
7792 			mscs_params->user_pri_limit;
7793 		peer->mscs_ipv6_parameter.classifier_mask =
7794 			mscs_params->classifier_mask;
7795 		peer->mscs_active = 1;
7796 		dp_info("\n\tMSCS Procedure request based parameters for "QDF_MAC_ADDR_FMT"\n"
7797 			"\tClassifier_type = %d\tUser priority bitmap = %x\n"
7798 			"\tUser priority limit = %x\tClassifier mask = %x",
7799 			QDF_MAC_ADDR_REF(peer_mac),
7800 			mscs_params->classifier_type,
7801 			peer->mscs_ipv4_parameter.user_priority_bitmap,
7802 			peer->mscs_ipv4_parameter.user_priority_limit,
7803 			peer->mscs_ipv4_parameter.classifier_mask);
7804 	}
7805 
7806 	status = QDF_STATUS_SUCCESS;
7807 fail:
7808 	if (peer)
7809 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7810 	return status;
7811 }
7812 #endif
7813 
7814 /*
7815  * dp_get_sec_type() - Get the security type
7816  * @soc: soc handle
7817  * @vdev_id: id of dp handle
7818  * @peer_mac: mac of datapath PEER handle
7819  * @sec_idx:    Security id (mcast, ucast)
7820  *
7821  * return sec_type: Security type
7822  */
7823 static int dp_get_sec_type(struct cdp_soc_t *soc, uint8_t vdev_id,
7824 			   uint8_t *peer_mac, uint8_t sec_idx)
7825 {
7826 	int sec_type = 0;
7827 	struct dp_peer *peer =
7828 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
7829 						       peer_mac, 0, vdev_id,
7830 						       DP_MOD_ID_CDP);
7831 
7832 	if (!peer) {
7833 		dp_cdp_err("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
7834 		return sec_type;
7835 	}
7836 
7837 	if (!peer->txrx_peer) {
7838 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7839 		dp_peer_debug("%pK: txrx peer is NULL!\n", soc);
7840 		return sec_type;
7841 	}
7842 	sec_type = peer->txrx_peer->security[sec_idx].sec_type;
7843 
7844 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7845 	return sec_type;
7846 }
7847 
7848 /*
7849  * dp_peer_authorize() - authorize txrx peer
7850  * @soc: soc handle
7851  * @vdev_id: id of dp handle
7852  * @peer_mac: mac of datapath PEER handle
7853  * @authorize
7854  *
7855  */
7856 static QDF_STATUS
7857 dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
7858 		  uint8_t *peer_mac, uint32_t authorize)
7859 {
7860 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7861 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7862 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(soc, peer_mac,
7863 							      0, vdev_id,
7864 							      DP_MOD_ID_CDP);
7865 
7866 	if (!peer) {
7867 		dp_cdp_debug("%pK: Peer is NULL!\n", soc);
7868 		status = QDF_STATUS_E_FAILURE;
7869 	} else {
7870 		peer->authorize = authorize ? 1 : 0;
7871 		if (peer->txrx_peer)
7872 			peer->txrx_peer->authorize = peer->authorize;
7873 
7874 		if (!peer->authorize)
7875 			dp_peer_flush_frags(soc_hdl, vdev_id, peer_mac);
7876 
7877 		dp_mlo_peer_authorize(soc, peer);
7878 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7879 	}
7880 
7881 	return status;
7882 }
7883 
7884 /*
7885  * dp_peer_get_authorize() - get peer authorize status
7886  * @soc: soc handle
7887  * @vdev_id: id of dp handle
7888  * @peer_mac: mac of datapath PEER handle
7889  *
7890  * Retusn: true is peer is authorized, false otherwise
7891  */
7892 static bool
7893 dp_peer_get_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
7894 		      uint8_t *peer_mac)
7895 {
7896 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7897 	bool authorize = false;
7898 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
7899 						      0, vdev_id,
7900 						      DP_MOD_ID_CDP);
7901 
7902 	if (!peer) {
7903 		dp_cdp_debug("%pK: Peer is NULL!\n", soc);
7904 		return authorize;
7905 	}
7906 
7907 	authorize = peer->authorize;
7908 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7909 
7910 	return authorize;
7911 }
7912 
7913 /**
7914  * dp_vdev_unref_delete() - check and process vdev delete
7915  * @soc : DP specific soc pointer
7916  * @vdev: DP specific vdev pointer
7917  * @mod_id: module id
7918  *
7919  */
7920 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev,
7921 			  enum dp_mod_id mod_id)
7922 {
7923 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
7924 	void *vdev_delete_context = NULL;
7925 	uint8_t vdev_id = vdev->vdev_id;
7926 	struct dp_pdev *pdev = vdev->pdev;
7927 	struct dp_vdev *tmp_vdev = NULL;
7928 	uint8_t found = 0;
7929 
7930 	QDF_ASSERT(qdf_atomic_dec_return(&vdev->mod_refs[mod_id]) >= 0);
7931 
7932 	/* Return if this is not the last reference*/
7933 	if (!qdf_atomic_dec_and_test(&vdev->ref_cnt))
7934 		return;
7935 
7936 	/*
7937 	 * This should be set as last reference need to released
7938 	 * after cdp_vdev_detach() is called
7939 	 *
7940 	 * if this assert is hit there is a ref count issue
7941 	 */
7942 	QDF_ASSERT(vdev->delete.pending);
7943 
7944 	vdev_delete_cb = vdev->delete.callback;
7945 	vdev_delete_context = vdev->delete.context;
7946 
7947 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")- its last peer is done",
7948 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
7949 
7950 	if (wlan_op_mode_monitor == vdev->opmode) {
7951 		dp_monitor_vdev_delete(soc, vdev);
7952 		goto free_vdev;
7953 	}
7954 
7955 	/* all peers are gone, go ahead and delete it */
7956 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
7957 			FLOW_TYPE_VDEV, vdev_id);
7958 	dp_tx_vdev_detach(vdev);
7959 	dp_monitor_vdev_detach(vdev);
7960 
7961 free_vdev:
7962 	qdf_spinlock_destroy(&vdev->peer_list_lock);
7963 
7964 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
7965 	TAILQ_FOREACH(tmp_vdev, &soc->inactive_vdev_list,
7966 		      inactive_list_elem) {
7967 		if (tmp_vdev == vdev) {
7968 			found = 1;
7969 			break;
7970 		}
7971 	}
7972 	if (found)
7973 		TAILQ_REMOVE(&soc->inactive_vdev_list, vdev,
7974 			     inactive_list_elem);
7975 	/* delete this peer from the list */
7976 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
7977 
7978 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")",
7979 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
7980 	wlan_minidump_remove(vdev, sizeof(*vdev), soc->ctrl_psoc,
7981 			     WLAN_MD_DP_VDEV, "dp_vdev");
7982 	qdf_mem_free(vdev);
7983 	vdev = NULL;
7984 
7985 	if (vdev_delete_cb)
7986 		vdev_delete_cb(vdev_delete_context);
7987 }
7988 
7989 qdf_export_symbol(dp_vdev_unref_delete);
7990 
7991 /*
7992  * dp_peer_unref_delete() - unref and delete peer
7993  * @peer_handle:    Datapath peer handle
7994  * @mod_id:         ID of module releasing reference
7995  *
7996  */
7997 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id mod_id)
7998 {
7999 	struct dp_vdev *vdev = peer->vdev;
8000 	struct dp_pdev *pdev = vdev->pdev;
8001 	struct dp_soc *soc = pdev->soc;
8002 	uint16_t peer_id;
8003 	struct dp_peer *tmp_peer;
8004 	bool found = false;
8005 
8006 	if (mod_id > DP_MOD_ID_RX)
8007 		QDF_ASSERT(qdf_atomic_dec_return(&peer->mod_refs[mod_id]) >= 0);
8008 
8009 	/*
8010 	 * Hold the lock all the way from checking if the peer ref count
8011 	 * is zero until the peer references are removed from the hash
8012 	 * table and vdev list (if the peer ref count is zero).
8013 	 * This protects against a new HL tx operation starting to use the
8014 	 * peer object just after this function concludes it's done being used.
8015 	 * Furthermore, the lock needs to be held while checking whether the
8016 	 * vdev's list of peers is empty, to make sure that list is not modified
8017 	 * concurrently with the empty check.
8018 	 */
8019 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
8020 		peer_id = peer->peer_id;
8021 
8022 		/*
8023 		 * Make sure that the reference to the peer in
8024 		 * peer object map is removed
8025 		 */
8026 		QDF_ASSERT(peer_id == HTT_INVALID_PEER);
8027 
8028 		dp_peer_debug("Deleting peer %pK ("QDF_MAC_ADDR_FMT")", peer,
8029 			      QDF_MAC_ADDR_REF(peer->mac_addr.raw));
8030 
8031 		wlan_minidump_remove(peer, sizeof(*peer), soc->ctrl_psoc,
8032 				     WLAN_MD_DP_PEER, "dp_peer");
8033 
8034 		qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
8035 		TAILQ_FOREACH(tmp_peer, &soc->inactive_peer_list,
8036 			      inactive_list_elem) {
8037 			if (tmp_peer == peer) {
8038 				found = 1;
8039 				break;
8040 			}
8041 		}
8042 		if (found)
8043 			TAILQ_REMOVE(&soc->inactive_peer_list, peer,
8044 				     inactive_list_elem);
8045 		/* delete this peer from the list */
8046 		qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
8047 		DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
8048 		dp_peer_update_state(soc, peer, DP_PEER_STATE_FREED);
8049 
8050 		/* cleanup the peer data */
8051 		dp_peer_cleanup(vdev, peer);
8052 
8053 		if (!IS_MLO_DP_MLD_PEER(peer))
8054 			dp_monitor_peer_detach(soc, peer);
8055 
8056 		qdf_spinlock_destroy(&peer->peer_state_lock);
8057 
8058 		dp_txrx_peer_detach(soc, peer);
8059 		qdf_mem_free(peer);
8060 
8061 		/*
8062 		 * Decrement ref count taken at peer create
8063 		 */
8064 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CHILD);
8065 	}
8066 }
8067 
8068 qdf_export_symbol(dp_peer_unref_delete);
8069 
8070 /*
8071  * dp_txrx_peer_unref_delete() - unref and delete peer
8072  * @handle: Datapath txrx ref handle
8073  * @mod_id: Module ID of the caller
8074  *
8075  */
8076 void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle,
8077 			       enum dp_mod_id mod_id)
8078 {
8079 	dp_peer_unref_delete((struct dp_peer *)handle, mod_id);
8080 }
8081 
8082 qdf_export_symbol(dp_txrx_peer_unref_delete);
8083 
8084 /*
8085  * dp_peer_detach_wifi3() – Detach txrx peer
8086  * @soc_hdl: soc handle
8087  * @vdev_id: id of dp handle
8088  * @peer_mac: mac of datapath PEER handle
8089  * @bitmap: bitmap indicating special handling of request.
8090  *
8091  */
8092 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
8093 				       uint8_t vdev_id,
8094 				       uint8_t *peer_mac, uint32_t bitmap)
8095 {
8096 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8097 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
8098 						      0, vdev_id,
8099 						      DP_MOD_ID_CDP);
8100 	struct dp_vdev *vdev = NULL;
8101 
8102 	/* Peer can be null for monitor vap mac address */
8103 	if (!peer) {
8104 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
8105 			  "%s: Invalid peer\n", __func__);
8106 		return QDF_STATUS_E_FAILURE;
8107 	}
8108 
8109 	if (!peer->valid) {
8110 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8111 		dp_err("Invalid peer: "QDF_MAC_ADDR_FMT,
8112 			QDF_MAC_ADDR_REF(peer_mac));
8113 		return QDF_STATUS_E_ALREADY;
8114 	}
8115 
8116 	vdev = peer->vdev;
8117 
8118 	if (!vdev) {
8119 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8120 		return QDF_STATUS_E_FAILURE;
8121 	}
8122 
8123 	peer->valid = 0;
8124 
8125 	dp_init_info("%pK: peer %pK (" QDF_MAC_ADDR_FMT ")",
8126 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
8127 
8128 	dp_local_peer_id_free(peer->vdev->pdev, peer);
8129 
8130 	/* Drop all rx packets before deleting peer */
8131 	dp_clear_peer_internal(soc, peer);
8132 
8133 	qdf_spinlock_destroy(&peer->peer_info_lock);
8134 	dp_peer_multipass_list_remove(peer);
8135 
8136 	/* remove the reference to the peer from the hash table */
8137 	dp_peer_find_hash_remove(soc, peer);
8138 
8139 	dp_peer_vdev_list_remove(soc, vdev, peer);
8140 
8141 	dp_peer_mlo_delete(peer);
8142 
8143 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
8144 	TAILQ_INSERT_TAIL(&soc->inactive_peer_list, peer,
8145 			  inactive_list_elem);
8146 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
8147 
8148 	/*
8149 	 * Remove the reference added during peer_attach.
8150 	 * The peer will still be left allocated until the
8151 	 * PEER_UNMAP message arrives to remove the other
8152 	 * reference, added by the PEER_MAP message.
8153 	 */
8154 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
8155 	/*
8156 	 * Remove the reference taken above
8157 	 */
8158 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8159 
8160 	return QDF_STATUS_SUCCESS;
8161 }
8162 
8163 /*
8164  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
8165  * @soc_hdl: Datapath soc handle
8166  * @vdev_id: virtual interface id
8167  *
8168  * Return: MAC address on success, NULL on failure.
8169  *
8170  */
8171 static uint8_t *dp_get_vdev_mac_addr_wifi3(struct cdp_soc_t *soc_hdl,
8172 					   uint8_t vdev_id)
8173 {
8174 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8175 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8176 						     DP_MOD_ID_CDP);
8177 	uint8_t *mac = NULL;
8178 
8179 	if (!vdev)
8180 		return NULL;
8181 
8182 	mac = vdev->mac_addr.raw;
8183 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8184 
8185 	return mac;
8186 }
8187 
8188 /*
8189  * dp_vdev_set_wds() - Enable per packet stats
8190  * @soc: DP soc handle
8191  * @vdev_id: id of DP VDEV handle
8192  * @val: value
8193  *
8194  * Return: none
8195  */
8196 static int dp_vdev_set_wds(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8197 			   uint32_t val)
8198 {
8199 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8200 	struct dp_vdev *vdev =
8201 		dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id,
8202 				      DP_MOD_ID_CDP);
8203 
8204 	if (!vdev)
8205 		return QDF_STATUS_E_FAILURE;
8206 
8207 	vdev->wds_enabled = val;
8208 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8209 
8210 	return QDF_STATUS_SUCCESS;
8211 }
8212 
8213 static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
8214 {
8215 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8216 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8217 						     DP_MOD_ID_CDP);
8218 	int opmode;
8219 
8220 	if (!vdev) {
8221 		dp_err("vdev for id %d is NULL", vdev_id);
8222 		return -EINVAL;
8223 	}
8224 	opmode = vdev->opmode;
8225 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8226 
8227 	return opmode;
8228 }
8229 
8230 /**
8231  * dp_get_os_rx_handles_from_vdev_wifi3() - Get os rx handles for a vdev
8232  * @soc_hdl: ol_txrx_soc_handle handle
8233  * @vdev_id: vdev id for which os rx handles are needed
8234  * @stack_fn_p: pointer to stack function pointer
8235  * @osif_handle_p: pointer to ol_osif_vdev_handle
8236  *
8237  * Return: void
8238  */
8239 static
8240 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_soc_t *soc_hdl,
8241 					  uint8_t vdev_id,
8242 					  ol_txrx_rx_fp *stack_fn_p,
8243 					  ol_osif_vdev_handle *osif_vdev_p)
8244 {
8245 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8246 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8247 						     DP_MOD_ID_CDP);
8248 
8249 	if (qdf_unlikely(!vdev)) {
8250 		*stack_fn_p = NULL;
8251 		*osif_vdev_p = NULL;
8252 		return;
8253 	}
8254 	*stack_fn_p = vdev->osif_rx_stack;
8255 	*osif_vdev_p = vdev->osif_vdev;
8256 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8257 }
8258 
8259 /**
8260  * dp_get_ctrl_pdev_from_vdev() - Get control pdev of vdev
8261  * @soc_hdl: datapath soc handle
8262  * @vdev_id: virtual device/interface id
8263  *
8264  * Return: Handle to control pdev
8265  */
8266 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(
8267 						struct cdp_soc_t *soc_hdl,
8268 						uint8_t vdev_id)
8269 {
8270 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8271 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8272 						     DP_MOD_ID_CDP);
8273 	struct dp_pdev *pdev;
8274 
8275 	if (!vdev)
8276 		return NULL;
8277 
8278 	pdev = vdev->pdev;
8279 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8280 	return pdev ? (struct cdp_cfg *)pdev->wlan_cfg_ctx : NULL;
8281 }
8282 
8283 /**
8284  * dp_get_tx_pending() - read pending tx
8285  * @pdev_handle: Datapath PDEV handle
8286  *
8287  * Return: outstanding tx
8288  */
8289 static int32_t dp_get_tx_pending(struct cdp_pdev *pdev_handle)
8290 {
8291 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8292 
8293 	return qdf_atomic_read(&pdev->num_tx_outstanding);
8294 }
8295 
8296 /**
8297  * dp_get_peer_mac_from_peer_id() - get peer mac
8298  * @pdev_handle: Datapath PDEV handle
8299  * @peer_id: Peer ID
8300  * @peer_mac: MAC addr of PEER
8301  *
8302  * Return: QDF_STATUS
8303  */
8304 static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc,
8305 					       uint32_t peer_id,
8306 					       uint8_t *peer_mac)
8307 {
8308 	struct dp_peer *peer;
8309 
8310 	if (soc && peer_mac) {
8311 		peer = dp_peer_get_ref_by_id((struct dp_soc *)soc,
8312 					     (uint16_t)peer_id,
8313 					     DP_MOD_ID_CDP);
8314 		if (peer) {
8315 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
8316 				     QDF_MAC_ADDR_SIZE);
8317 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8318 			return QDF_STATUS_SUCCESS;
8319 		}
8320 	}
8321 
8322 	return QDF_STATUS_E_FAILURE;
8323 }
8324 
8325 #ifdef MESH_MODE_SUPPORT
8326 static
8327 void dp_vdev_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
8328 {
8329 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
8330 
8331 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
8332 	vdev->mesh_vdev = val;
8333 	if (val)
8334 		vdev->skip_sw_tid_classification |=
8335 			DP_TX_MESH_ENABLED;
8336 	else
8337 		vdev->skip_sw_tid_classification &=
8338 			~DP_TX_MESH_ENABLED;
8339 }
8340 
8341 /*
8342  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
8343  * @vdev_hdl: virtual device object
8344  * @val: value to be set
8345  *
8346  * Return: void
8347  */
8348 static
8349 void dp_vdev_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
8350 {
8351 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
8352 
8353 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
8354 	vdev->mesh_rx_filter = val;
8355 }
8356 #endif
8357 
8358 /*
8359  * dp_vdev_set_hlos_tid_override() - to set hlos tid override
8360  * @vdev_hdl: virtual device object
8361  * @val: value to be set
8362  *
8363  * Return: void
8364  */
8365 static
8366 void dp_vdev_set_hlos_tid_override(struct dp_vdev *vdev, uint32_t val)
8367 {
8368 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
8369 	if (val)
8370 		vdev->skip_sw_tid_classification |=
8371 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
8372 	else
8373 		vdev->skip_sw_tid_classification &=
8374 			~DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
8375 }
8376 
8377 /*
8378  * dp_vdev_get_hlos_tid_override() - to get hlos tid override flag
8379  * @vdev_hdl: virtual device object
8380  * @val: value to be set
8381  *
8382  * Return: 1 if this flag is set
8383  */
8384 static
8385 uint8_t dp_vdev_get_hlos_tid_override(struct cdp_vdev *vdev_hdl)
8386 {
8387 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
8388 
8389 	return !!(vdev->skip_sw_tid_classification &
8390 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED);
8391 }
8392 
8393 #ifdef VDEV_PEER_PROTOCOL_COUNT
8394 static void dp_enable_vdev_peer_protocol_count(struct cdp_soc_t *soc_hdl,
8395 					       int8_t vdev_id,
8396 					       bool enable)
8397 {
8398 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8399 	struct dp_vdev *vdev;
8400 
8401 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
8402 	if (!vdev)
8403 		return;
8404 
8405 	dp_info("enable %d vdev_id %d", enable, vdev_id);
8406 	vdev->peer_protocol_count_track = enable;
8407 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8408 }
8409 
8410 static void dp_enable_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
8411 						   int8_t vdev_id,
8412 						   int drop_mask)
8413 {
8414 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8415 	struct dp_vdev *vdev;
8416 
8417 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
8418 	if (!vdev)
8419 		return;
8420 
8421 	dp_info("drop_mask %d vdev_id %d", drop_mask, vdev_id);
8422 	vdev->peer_protocol_count_dropmask = drop_mask;
8423 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8424 }
8425 
8426 static int dp_is_vdev_peer_protocol_count_enabled(struct cdp_soc_t *soc_hdl,
8427 						  int8_t vdev_id)
8428 {
8429 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8430 	struct dp_vdev *vdev;
8431 	int peer_protocol_count_track;
8432 
8433 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
8434 	if (!vdev)
8435 		return 0;
8436 
8437 	dp_info("enable %d vdev_id %d", vdev->peer_protocol_count_track,
8438 		vdev_id);
8439 	peer_protocol_count_track =
8440 		vdev->peer_protocol_count_track;
8441 
8442 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8443 	return peer_protocol_count_track;
8444 }
8445 
8446 static int dp_get_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
8447 					       int8_t vdev_id)
8448 {
8449 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8450 	struct dp_vdev *vdev;
8451 	int peer_protocol_count_dropmask;
8452 
8453 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
8454 	if (!vdev)
8455 		return 0;
8456 
8457 	dp_info("drop_mask %d vdev_id %d", vdev->peer_protocol_count_dropmask,
8458 		vdev_id);
8459 	peer_protocol_count_dropmask =
8460 		vdev->peer_protocol_count_dropmask;
8461 
8462 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8463 	return peer_protocol_count_dropmask;
8464 }
8465 
8466 #endif
8467 
8468 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data)
8469 {
8470 	uint8_t pdev_count;
8471 
8472 	for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) {
8473 		if (soc->pdev_list[pdev_count] &&
8474 		    soc->pdev_list[pdev_count] == data)
8475 			return true;
8476 	}
8477 	return false;
8478 }
8479 
8480 /**
8481  * dp_rx_bar_stats_cb(): BAR received stats callback
8482  * @soc: SOC handle
8483  * @cb_ctxt: Call back context
8484  * @reo_status: Reo status
8485  *
8486  * return: void
8487  */
8488 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
8489 	union hal_reo_status *reo_status)
8490 {
8491 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
8492 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
8493 
8494 	if (!dp_check_pdev_exists(soc, pdev)) {
8495 		dp_err_rl("pdev doesn't exist");
8496 		return;
8497 	}
8498 
8499 	if (!qdf_atomic_read(&soc->cmn_init_done))
8500 		return;
8501 
8502 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
8503 		DP_PRINT_STATS("REO stats failure %d",
8504 			       queue_status->header.status);
8505 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
8506 		return;
8507 	}
8508 
8509 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
8510 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
8511 
8512 }
8513 
8514 /**
8515  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
8516  * @vdev: DP VDEV handle
8517  *
8518  * return: void
8519  */
8520 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
8521 			     struct cdp_vdev_stats *vdev_stats)
8522 {
8523 	struct dp_soc *soc = NULL;
8524 
8525 	if (!vdev || !vdev->pdev)
8526 		return;
8527 
8528 	soc = vdev->pdev->soc;
8529 
8530 	dp_update_vdev_ingress_stats(vdev);
8531 
8532 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
8533 
8534 	dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats,
8535 			     DP_MOD_ID_GENERIC_STATS);
8536 
8537 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
8538 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
8539 			     vdev_stats, vdev->vdev_id,
8540 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
8541 #endif
8542 }
8543 
8544 void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
8545 {
8546 	struct dp_vdev *vdev = NULL;
8547 	struct dp_soc *soc;
8548 	struct cdp_vdev_stats *vdev_stats =
8549 			qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
8550 
8551 	if (!vdev_stats) {
8552 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
8553 			   pdev->soc);
8554 		return;
8555 	}
8556 
8557 	soc = pdev->soc;
8558 
8559 	qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
8560 	qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
8561 	qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
8562 	qdf_mem_zero(&pdev->stats.rx_i, sizeof(pdev->stats.rx_i));
8563 
8564 	if (dp_monitor_is_enable_mcopy_mode(pdev))
8565 		dp_monitor_invalid_peer_update_pdev_stats(soc, pdev);
8566 
8567 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
8568 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
8569 
8570 		dp_aggregate_vdev_stats(vdev, vdev_stats);
8571 		dp_update_pdev_stats(pdev, vdev_stats);
8572 		dp_update_pdev_ingress_stats(pdev, vdev);
8573 	}
8574 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
8575 	qdf_mem_free(vdev_stats);
8576 
8577 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
8578 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
8579 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
8580 #endif
8581 }
8582 
8583 /**
8584  * dp_vdev_getstats() - get vdev packet level stats
8585  * @vdev_handle: Datapath VDEV handle
8586  * @stats: cdp network device stats structure
8587  *
8588  * Return: QDF_STATUS
8589  */
8590 static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle,
8591 				   struct cdp_dev_stats *stats)
8592 {
8593 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8594 	struct dp_pdev *pdev;
8595 	struct dp_soc *soc;
8596 	struct cdp_vdev_stats *vdev_stats;
8597 
8598 	if (!vdev)
8599 		return QDF_STATUS_E_FAILURE;
8600 
8601 	pdev = vdev->pdev;
8602 	if (!pdev)
8603 		return QDF_STATUS_E_FAILURE;
8604 
8605 	soc = pdev->soc;
8606 
8607 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
8608 
8609 	if (!vdev_stats) {
8610 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
8611 			   soc);
8612 		return QDF_STATUS_E_FAILURE;
8613 	}
8614 
8615 	dp_aggregate_vdev_stats(vdev, vdev_stats);
8616 
8617 	stats->tx_packets = vdev_stats->tx.comp_pkt.num;
8618 	stats->tx_bytes = vdev_stats->tx.comp_pkt.bytes;
8619 
8620 	stats->tx_errors = vdev_stats->tx.tx_failed;
8621 	stats->tx_dropped = vdev_stats->tx_i.dropped.dropped_pkt.num +
8622 			    vdev_stats->tx_i.sg.dropped_host.num +
8623 			    vdev_stats->tx_i.mcast_en.dropped_map_error +
8624 			    vdev_stats->tx_i.mcast_en.dropped_self_mac +
8625 			    vdev_stats->tx_i.mcast_en.dropped_send_fail +
8626 			    vdev_stats->tx.nawds_mcast_drop;
8627 
8628 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx)) {
8629 		stats->rx_packets = vdev_stats->rx.to_stack.num;
8630 		stats->rx_bytes = vdev_stats->rx.to_stack.bytes;
8631 	} else {
8632 		stats->rx_packets = vdev_stats->rx_i.reo_rcvd_pkt.num +
8633 				    vdev_stats->rx_i.null_q_desc_pkt.num +
8634 				    vdev_stats->rx_i.routed_eapol_pkt.num;
8635 		stats->rx_bytes = vdev_stats->rx_i.reo_rcvd_pkt.bytes +
8636 				  vdev_stats->rx_i.null_q_desc_pkt.bytes +
8637 				  vdev_stats->rx_i.routed_eapol_pkt.bytes;
8638 	}
8639 
8640 	stats->rx_errors = vdev_stats->rx.err.mic_err +
8641 			   vdev_stats->rx.err.decrypt_err +
8642 			   vdev_stats->rx.err.fcserr +
8643 			   vdev_stats->rx.err.pn_err +
8644 			   vdev_stats->rx.err.oor_err +
8645 			   vdev_stats->rx.err.jump_2k_err +
8646 			   vdev_stats->rx.err.rxdma_wifi_parse_err;
8647 
8648 	stats->rx_dropped = vdev_stats->rx.mec_drop.num +
8649 			    vdev_stats->rx.multipass_rx_pkt_drop +
8650 			    vdev_stats->rx.peer_unauth_rx_pkt_drop +
8651 			    vdev_stats->rx.policy_check_drop +
8652 			    vdev_stats->rx.nawds_mcast_drop;
8653 
8654 	qdf_mem_free(vdev_stats);
8655 
8656 	return QDF_STATUS_SUCCESS;
8657 }
8658 
8659 /**
8660  * dp_pdev_getstats() - get pdev packet level stats
8661  * @pdev_handle: Datapath PDEV handle
8662  * @stats: cdp network device stats structure
8663  *
8664  * Return: QDF_STATUS
8665  */
8666 static void dp_pdev_getstats(struct cdp_pdev *pdev_handle,
8667 			     struct cdp_dev_stats *stats)
8668 {
8669 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8670 
8671 	dp_aggregate_pdev_stats(pdev);
8672 
8673 	stats->tx_packets = pdev->stats.tx.comp_pkt.num;
8674 	stats->tx_bytes = pdev->stats.tx.comp_pkt.bytes;
8675 
8676 	stats->tx_errors = pdev->stats.tx.tx_failed;
8677 	stats->tx_dropped = pdev->stats.tx_i.dropped.dropped_pkt.num +
8678 			    pdev->stats.tx_i.sg.dropped_host.num +
8679 			    pdev->stats.tx_i.mcast_en.dropped_map_error +
8680 			    pdev->stats.tx_i.mcast_en.dropped_self_mac +
8681 			    pdev->stats.tx_i.mcast_en.dropped_send_fail +
8682 			    pdev->stats.tx.nawds_mcast_drop +
8683 			    pdev->stats.tso_stats.dropped_host.num;
8684 
8685 	if (!wlan_cfg_get_vdev_stats_hw_offload_config(pdev->soc->wlan_cfg_ctx)) {
8686 		stats->rx_packets = pdev->stats.rx.to_stack.num;
8687 		stats->rx_bytes = pdev->stats.rx.to_stack.bytes;
8688 	} else {
8689 		stats->rx_packets = pdev->stats.rx_i.reo_rcvd_pkt.num +
8690 				    pdev->stats.rx_i.null_q_desc_pkt.num +
8691 				    pdev->stats.rx_i.routed_eapol_pkt.num;
8692 		stats->rx_bytes = pdev->stats.rx_i.reo_rcvd_pkt.bytes +
8693 				  pdev->stats.rx_i.null_q_desc_pkt.bytes +
8694 				  pdev->stats.rx_i.routed_eapol_pkt.bytes;
8695 	}
8696 
8697 	stats->rx_errors = pdev->stats.err.ip_csum_err +
8698 		pdev->stats.err.tcp_udp_csum_err +
8699 		pdev->stats.rx.err.mic_err +
8700 		pdev->stats.rx.err.decrypt_err +
8701 		pdev->stats.rx.err.fcserr +
8702 		pdev->stats.rx.err.pn_err +
8703 		pdev->stats.rx.err.oor_err +
8704 		pdev->stats.rx.err.jump_2k_err +
8705 		pdev->stats.rx.err.rxdma_wifi_parse_err;
8706 	stats->rx_dropped = pdev->stats.dropped.msdu_not_done +
8707 		pdev->stats.dropped.mec +
8708 		pdev->stats.dropped.mesh_filter +
8709 		pdev->stats.dropped.wifi_parse +
8710 		pdev->stats.dropped.mon_rx_drop +
8711 		pdev->stats.dropped.mon_radiotap_update_err +
8712 		pdev->stats.rx.mec_drop.num +
8713 		pdev->stats.rx.multipass_rx_pkt_drop +
8714 		pdev->stats.rx.peer_unauth_rx_pkt_drop +
8715 		pdev->stats.rx.policy_check_drop +
8716 		pdev->stats.rx.nawds_mcast_drop;
8717 }
8718 
8719 /**
8720  * dp_get_device_stats() - get interface level packet stats
8721  * @soc: soc handle
8722  * @id : vdev_id or pdev_id based on type
8723  * @stats: cdp network device stats structure
8724  * @type: device type pdev/vdev
8725  *
8726  * Return: QDF_STATUS
8727  */
8728 static QDF_STATUS dp_get_device_stats(struct cdp_soc_t *soc_hdl, uint8_t id,
8729 				      struct cdp_dev_stats *stats,
8730 				      uint8_t type)
8731 {
8732 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8733 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
8734 	struct dp_vdev *vdev;
8735 
8736 	switch (type) {
8737 	case UPDATE_VDEV_STATS:
8738 		vdev = dp_vdev_get_ref_by_id(soc, id, DP_MOD_ID_CDP);
8739 
8740 		if (vdev) {
8741 			status = dp_vdev_getstats((struct cdp_vdev *)vdev,
8742 						  stats);
8743 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8744 		}
8745 		return status;
8746 	case UPDATE_PDEV_STATS:
8747 		{
8748 			struct dp_pdev *pdev =
8749 				dp_get_pdev_from_soc_pdev_id_wifi3(
8750 						(struct dp_soc *)soc,
8751 						 id);
8752 			if (pdev) {
8753 				dp_pdev_getstats((struct cdp_pdev *)pdev,
8754 						 stats);
8755 				return QDF_STATUS_SUCCESS;
8756 			}
8757 		}
8758 		break;
8759 	default:
8760 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
8761 			"apstats cannot be updated for this input "
8762 			"type %d", type);
8763 		break;
8764 	}
8765 
8766 	return QDF_STATUS_E_FAILURE;
8767 }
8768 
8769 const
8770 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
8771 {
8772 	switch (ring_type) {
8773 	case REO_DST:
8774 		return "Reo_dst";
8775 	case REO_EXCEPTION:
8776 		return "Reo_exception";
8777 	case REO_CMD:
8778 		return "Reo_cmd";
8779 	case REO_REINJECT:
8780 		return "Reo_reinject";
8781 	case REO_STATUS:
8782 		return "Reo_status";
8783 	case WBM2SW_RELEASE:
8784 		return "wbm2sw_release";
8785 	case TCL_DATA:
8786 		return "tcl_data";
8787 	case TCL_CMD_CREDIT:
8788 		return "tcl_cmd_credit";
8789 	case TCL_STATUS:
8790 		return "tcl_status";
8791 	case SW2WBM_RELEASE:
8792 		return "sw2wbm_release";
8793 	case RXDMA_BUF:
8794 		return "Rxdma_buf";
8795 	case RXDMA_DST:
8796 		return "Rxdma_dst";
8797 	case RXDMA_MONITOR_BUF:
8798 		return "Rxdma_monitor_buf";
8799 	case RXDMA_MONITOR_DESC:
8800 		return "Rxdma_monitor_desc";
8801 	case RXDMA_MONITOR_STATUS:
8802 		return "Rxdma_monitor_status";
8803 	case RXDMA_MONITOR_DST:
8804 		return "Rxdma_monitor_destination";
8805 	case WBM_IDLE_LINK:
8806 		return "WBM_hw_idle_link";
8807 	default:
8808 		dp_err("Invalid ring type");
8809 		break;
8810 	}
8811 	return "Invalid";
8812 }
8813 
8814 /*
8815  * dp_print_napi_stats(): NAPI stats
8816  * @soc - soc handle
8817  */
8818 void dp_print_napi_stats(struct dp_soc *soc)
8819 {
8820 	hif_print_napi_stats(soc->hif_handle);
8821 }
8822 
8823 /**
8824  * dp_txrx_host_peer_stats_clr): Reinitialize the txrx peer stats
8825  * @soc: Datapath soc
8826  * @peer: Datatpath peer
8827  * @arg: argument to iter function
8828  *
8829  * Return: QDF_STATUS
8830  */
8831 static inline void
8832 dp_txrx_host_peer_stats_clr(struct dp_soc *soc,
8833 			    struct dp_peer *peer,
8834 			    void *arg)
8835 {
8836 	struct dp_txrx_peer *txrx_peer = NULL;
8837 	struct dp_peer *tgt_peer = NULL;
8838 	struct cdp_interface_peer_stats peer_stats_intf;
8839 
8840 	qdf_mem_zero(&peer_stats_intf, sizeof(struct cdp_interface_peer_stats));
8841 
8842 	DP_STATS_CLR(peer);
8843 	/* Clear monitor peer stats */
8844 	dp_monitor_peer_reset_stats(soc, peer);
8845 
8846 	/* Clear MLD peer stats only when link peer is primary */
8847 	if (dp_peer_is_primary_link_peer(peer)) {
8848 		tgt_peer = dp_get_tgt_peer_from_peer(peer);
8849 		if (tgt_peer) {
8850 			DP_STATS_CLR(tgt_peer);
8851 			txrx_peer = tgt_peer->txrx_peer;
8852 			dp_txrx_peer_stats_clr(txrx_peer);
8853 		}
8854 	}
8855 
8856 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
8857 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, peer->vdev->pdev->soc,
8858 			     &peer_stats_intf,  peer->peer_id,
8859 			     UPDATE_PEER_STATS, peer->vdev->pdev->pdev_id);
8860 #endif
8861 }
8862 
8863 /**
8864  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
8865  * @vdev: DP_VDEV handle
8866  * @dp_soc: DP_SOC handle
8867  *
8868  * Return: QDF_STATUS
8869  */
8870 static inline QDF_STATUS
8871 dp_txrx_host_stats_clr(struct dp_vdev *vdev, struct dp_soc *soc)
8872 {
8873 	if (!vdev || !vdev->pdev)
8874 		return QDF_STATUS_E_FAILURE;
8875 
8876 	/*
8877 	 * if NSS offload is enabled, then send message
8878 	 * to NSS FW to clear the stats. Once NSS FW clears the statistics
8879 	 * then clear host statistics.
8880 	 */
8881 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
8882 		if (soc->cdp_soc.ol_ops->nss_stats_clr)
8883 			soc->cdp_soc.ol_ops->nss_stats_clr(soc->ctrl_psoc,
8884 							   vdev->vdev_id);
8885 	}
8886 
8887 	dp_vdev_stats_hw_offload_target_clear(soc, vdev->pdev->pdev_id,
8888 					      (1 << vdev->vdev_id));
8889 
8890 	DP_STATS_CLR(vdev->pdev);
8891 	DP_STATS_CLR(vdev->pdev->soc);
8892 	DP_STATS_CLR(vdev);
8893 
8894 	hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
8895 
8896 	dp_vdev_iterate_peer(vdev, dp_txrx_host_peer_stats_clr, NULL,
8897 			     DP_MOD_ID_GENERIC_STATS);
8898 
8899 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
8900 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
8901 			     &vdev->stats,  vdev->vdev_id,
8902 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
8903 #endif
8904 	return QDF_STATUS_SUCCESS;
8905 }
8906 
8907 /**
8908  * dp_get_peer_calibr_stats()- Get peer calibrated stats
8909  * @peer: Datapath peer
8910  * @peer_stats: buffer for peer stats
8911  *
8912  * Return: none
8913  */
8914 static inline
8915 void dp_get_peer_calibr_stats(struct dp_peer *peer,
8916 			      struct cdp_peer_stats *peer_stats)
8917 {
8918 	peer_stats->tx.last_per = peer->stats.tx.last_per;
8919 	peer_stats->tx.tx_bytes_success_last =
8920 					peer->stats.tx.tx_bytes_success_last;
8921 	peer_stats->tx.tx_data_success_last =
8922 					peer->stats.tx.tx_data_success_last;
8923 	peer_stats->tx.tx_byte_rate = peer->stats.tx.tx_byte_rate;
8924 	peer_stats->tx.tx_data_rate = peer->stats.tx.tx_data_rate;
8925 	peer_stats->tx.tx_data_ucast_last = peer->stats.tx.tx_data_ucast_last;
8926 	peer_stats->tx.tx_data_ucast_rate = peer->stats.tx.tx_data_ucast_rate;
8927 	peer_stats->tx.inactive_time = peer->stats.tx.inactive_time;
8928 	peer_stats->rx.rx_bytes_success_last =
8929 					peer->stats.rx.rx_bytes_success_last;
8930 	peer_stats->rx.rx_data_success_last =
8931 					peer->stats.rx.rx_data_success_last;
8932 	peer_stats->rx.rx_byte_rate = peer->stats.rx.rx_byte_rate;
8933 	peer_stats->rx.rx_data_rate = peer->stats.rx.rx_data_rate;
8934 }
8935 
8936 /**
8937  * dp_get_peer_basic_stats()- Get peer basic stats
8938  * @peer: Datapath peer
8939  * @peer_stats: buffer for peer stats
8940  *
8941  * Return: none
8942  */
8943 static inline
8944 void dp_get_peer_basic_stats(struct dp_peer *peer,
8945 			     struct cdp_peer_stats *peer_stats)
8946 {
8947 	struct dp_txrx_peer *txrx_peer;
8948 
8949 	txrx_peer = peer->txrx_peer;
8950 	if (!txrx_peer)
8951 		return;
8952 
8953 	peer_stats->tx.comp_pkt.num += txrx_peer->comp_pkt.num;
8954 	peer_stats->tx.comp_pkt.bytes += txrx_peer->comp_pkt.bytes;
8955 	peer_stats->tx.tx_failed += txrx_peer->tx_failed;
8956 	peer_stats->rx.to_stack.num += txrx_peer->to_stack.num;
8957 	peer_stats->rx.to_stack.bytes += txrx_peer->to_stack.bytes;
8958 }
8959 
8960 /**
8961  * dp_get_peer_per_pkt_stats()- Get peer per pkt stats
8962  * @peer: Datapath peer
8963  * @peer_stats: buffer for peer stats
8964  *
8965  * Return: none
8966  */
8967 static inline
8968 void dp_get_peer_per_pkt_stats(struct dp_peer *peer,
8969 			       struct cdp_peer_stats *peer_stats)
8970 {
8971 	struct dp_txrx_peer *txrx_peer;
8972 	struct dp_peer_per_pkt_stats *per_pkt_stats;
8973 
8974 	txrx_peer = peer->txrx_peer;
8975 	if (!txrx_peer)
8976 		return;
8977 
8978 	per_pkt_stats = &txrx_peer->stats.per_pkt_stats;
8979 	DP_UPDATE_PER_PKT_STATS(peer_stats, per_pkt_stats);
8980 }
8981 
8982 /**
8983  * dp_get_peer_extd_stats()- Get peer extd stats
8984  * @peer: Datapath peer
8985  * @peer_stats: buffer for peer stats
8986  *
8987  * Return: none
8988  */
8989 #ifdef QCA_ENHANCED_STATS_SUPPORT
8990 #ifdef WLAN_FEATURE_11BE_MLO
8991 static inline
8992 void dp_get_peer_extd_stats(struct dp_peer *peer,
8993 			    struct cdp_peer_stats *peer_stats)
8994 {
8995 	struct dp_soc *soc = peer->vdev->pdev->soc;
8996 
8997 	if (IS_MLO_DP_MLD_PEER(peer)) {
8998 		uint8_t i;
8999 		struct dp_peer *link_peer;
9000 		struct dp_soc *link_peer_soc;
9001 		struct dp_mld_link_peers link_peers_info;
9002 
9003 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
9004 						    &link_peers_info,
9005 						    DP_MOD_ID_CDP);
9006 		for (i = 0; i < link_peers_info.num_links; i++) {
9007 			link_peer = link_peers_info.link_peers[i];
9008 			link_peer_soc = link_peer->vdev->pdev->soc;
9009 			dp_monitor_peer_get_stats(link_peer_soc, link_peer,
9010 						  peer_stats,
9011 						  UPDATE_PEER_STATS);
9012 		}
9013 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
9014 	} else {
9015 		dp_monitor_peer_get_stats(soc, peer, peer_stats,
9016 					  UPDATE_PEER_STATS);
9017 	}
9018 }
9019 #else
9020 static inline
9021 void dp_get_peer_extd_stats(struct dp_peer *peer,
9022 			    struct cdp_peer_stats *peer_stats)
9023 {
9024 	struct dp_soc *soc = peer->vdev->pdev->soc;
9025 
9026 	dp_monitor_peer_get_stats(soc, peer, peer_stats, UPDATE_PEER_STATS);
9027 }
9028 #endif
9029 #else
9030 static inline
9031 void dp_get_peer_extd_stats(struct dp_peer *peer,
9032 			    struct cdp_peer_stats *peer_stats)
9033 {
9034 	struct dp_txrx_peer *txrx_peer;
9035 	struct dp_peer_extd_stats *extd_stats;
9036 
9037 	txrx_peer = peer->txrx_peer;
9038 	if (!txrx_peer)
9039 		return;
9040 
9041 	extd_stats = &txrx_peer->stats.extd_stats;
9042 	DP_UPDATE_EXTD_STATS(peer_stats, extd_stats);
9043 }
9044 #endif
9045 
9046 /**
9047  * dp_get_peer_stats()- Get peer stats
9048  * @peer: Datapath peer
9049  * @peer_stats: buffer for peer stats
9050  *
9051  * Return: none
9052  */
9053 static inline
9054 void dp_get_peer_stats(struct dp_peer *peer, struct cdp_peer_stats *peer_stats)
9055 {
9056 	dp_get_peer_calibr_stats(peer, peer_stats);
9057 
9058 	dp_get_peer_basic_stats(peer, peer_stats);
9059 
9060 	dp_get_peer_per_pkt_stats(peer, peer_stats);
9061 
9062 	dp_get_peer_extd_stats(peer, peer_stats);
9063 }
9064 
9065 /*
9066  * dp_get_host_peer_stats()- function to print peer stats
9067  * @soc: dp_soc handle
9068  * @mac_addr: mac address of the peer
9069  *
9070  * Return: QDF_STATUS
9071  */
9072 static QDF_STATUS
9073 dp_get_host_peer_stats(struct cdp_soc_t *soc, uint8_t *mac_addr)
9074 {
9075 	struct dp_peer *peer = NULL;
9076 	struct cdp_peer_stats *peer_stats = NULL;
9077 
9078 	if (!mac_addr) {
9079 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9080 			  "%s: NULL peer mac addr\n", __func__);
9081 		return QDF_STATUS_E_FAILURE;
9082 	}
9083 
9084 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
9085 				      mac_addr, 0,
9086 				      DP_VDEV_ALL,
9087 				      DP_MOD_ID_CDP);
9088 	if (!peer) {
9089 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9090 			  "%s: Invalid peer\n", __func__);
9091 		return QDF_STATUS_E_FAILURE;
9092 	}
9093 
9094 	peer_stats = qdf_mem_malloc(sizeof(struct cdp_peer_stats));
9095 	if (!peer_stats) {
9096 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
9097 			  "%s: Memory allocation failed for cdp_peer_stats\n",
9098 			  __func__);
9099 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9100 		return QDF_STATUS_E_NOMEM;
9101 	}
9102 
9103 	qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats));
9104 
9105 	dp_get_peer_stats(peer, peer_stats);
9106 	dp_print_peer_stats(peer, peer_stats);
9107 
9108 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
9109 
9110 	qdf_mem_free(peer_stats);
9111 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9112 
9113 	return QDF_STATUS_SUCCESS;
9114 }
9115 
9116 /* *
9117  * dp_dump_wbm_idle_hptp() -dump wbm idle ring, hw hp tp info.
9118  * @soc: dp soc.
9119  * @pdev: dp pdev.
9120  *
9121  * Return: None.
9122  */
9123 static void
9124 dp_dump_wbm_idle_hptp(struct dp_soc *soc, struct dp_pdev *pdev)
9125 {
9126 	uint32_t hw_head;
9127 	uint32_t hw_tail;
9128 	struct dp_srng *srng;
9129 
9130 	if (!soc) {
9131 		dp_err("soc is NULL");
9132 		return;
9133 	}
9134 
9135 	if (!pdev) {
9136 		dp_err("pdev is NULL");
9137 		return;
9138 	}
9139 
9140 	srng = &pdev->soc->wbm_idle_link_ring;
9141 	if (!srng) {
9142 		dp_err("wbm_idle_link_ring srng is NULL");
9143 		return;
9144 	}
9145 
9146 	hal_get_hw_hptp(soc->hal_soc, srng->hal_srng, &hw_head,
9147 			&hw_tail, WBM_IDLE_LINK);
9148 
9149 	dp_debug("WBM_IDLE_LINK: HW hp: %d, HW tp: %d",
9150 			hw_head, hw_tail);
9151 }
9152 
9153 
9154 /**
9155  * dp_txrx_stats_help() - Helper function for Txrx_Stats
9156  *
9157  * Return: None
9158  */
9159 static void dp_txrx_stats_help(void)
9160 {
9161 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
9162 	dp_info("stats_option:");
9163 	dp_info("  1 -- HTT Tx Statistics");
9164 	dp_info("  2 -- HTT Rx Statistics");
9165 	dp_info("  3 -- HTT Tx HW Queue Statistics");
9166 	dp_info("  4 -- HTT Tx HW Sched Statistics");
9167 	dp_info("  5 -- HTT Error Statistics");
9168 	dp_info("  6 -- HTT TQM Statistics");
9169 	dp_info("  7 -- HTT TQM CMDQ Statistics");
9170 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
9171 	dp_info("  9 -- HTT Tx Rate Statistics");
9172 	dp_info(" 10 -- HTT Rx Rate Statistics");
9173 	dp_info(" 11 -- HTT Peer Statistics");
9174 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
9175 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
9176 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
9177 	dp_info(" 15 -- HTT SRNG Statistics");
9178 	dp_info(" 16 -- HTT SFM Info Statistics");
9179 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
9180 	dp_info(" 18 -- HTT Peer List Details");
9181 	dp_info(" 20 -- Clear Host Statistics");
9182 	dp_info(" 21 -- Host Rx Rate Statistics");
9183 	dp_info(" 22 -- Host Tx Rate Statistics");
9184 	dp_info(" 23 -- Host Tx Statistics");
9185 	dp_info(" 24 -- Host Rx Statistics");
9186 	dp_info(" 25 -- Host AST Statistics");
9187 	dp_info(" 26 -- Host SRNG PTR Statistics");
9188 	dp_info(" 27 -- Host Mon Statistics");
9189 	dp_info(" 28 -- Host REO Queue Statistics");
9190 	dp_info(" 29 -- Host Soc cfg param Statistics");
9191 	dp_info(" 30 -- Host pdev cfg param Statistics");
9192 	dp_info(" 31 -- Host FISA stats");
9193 	dp_info(" 32 -- Host Register Work stats");
9194 }
9195 
9196 /**
9197  * dp_print_host_stats()- Function to print the stats aggregated at host
9198  * @vdev_handle: DP_VDEV handle
9199  * @req: host stats type
9200  * @soc: dp soc handler
9201  *
9202  * Return: 0 on success, print error message in case of failure
9203  */
9204 static int
9205 dp_print_host_stats(struct dp_vdev *vdev,
9206 		    struct cdp_txrx_stats_req *req,
9207 		    struct dp_soc *soc)
9208 {
9209 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
9210 	enum cdp_host_txrx_stats type =
9211 			dp_stats_mapping_table[req->stats][STATS_HOST];
9212 
9213 	dp_aggregate_pdev_stats(pdev);
9214 
9215 	switch (type) {
9216 	case TXRX_CLEAR_STATS:
9217 		dp_txrx_host_stats_clr(vdev, soc);
9218 		break;
9219 	case TXRX_RX_RATE_STATS:
9220 		dp_print_rx_rates(vdev);
9221 		break;
9222 	case TXRX_TX_RATE_STATS:
9223 		dp_print_tx_rates(vdev);
9224 		break;
9225 	case TXRX_TX_HOST_STATS:
9226 		dp_print_pdev_tx_stats(pdev);
9227 		dp_print_soc_tx_stats(pdev->soc);
9228 		break;
9229 	case TXRX_RX_HOST_STATS:
9230 		dp_print_pdev_rx_stats(pdev);
9231 		dp_print_soc_rx_stats(pdev->soc);
9232 		break;
9233 	case TXRX_AST_STATS:
9234 		dp_print_ast_stats(pdev->soc);
9235 		dp_print_mec_stats(pdev->soc);
9236 		dp_print_peer_table(vdev);
9237 		break;
9238 	case TXRX_SRNG_PTR_STATS:
9239 		dp_print_ring_stats(pdev);
9240 		break;
9241 	case TXRX_RX_MON_STATS:
9242 		dp_monitor_print_pdev_rx_mon_stats(pdev);
9243 		break;
9244 	case TXRX_REO_QUEUE_STATS:
9245 		dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc,
9246 				       req->peer_addr);
9247 		break;
9248 	case TXRX_SOC_CFG_PARAMS:
9249 		dp_print_soc_cfg_params(pdev->soc);
9250 		break;
9251 	case TXRX_PDEV_CFG_PARAMS:
9252 		dp_print_pdev_cfg_params(pdev);
9253 		break;
9254 	case TXRX_NAPI_STATS:
9255 		dp_print_napi_stats(pdev->soc);
9256 		break;
9257 	case TXRX_SOC_INTERRUPT_STATS:
9258 		dp_print_soc_interrupt_stats(pdev->soc);
9259 		break;
9260 	case TXRX_SOC_FSE_STATS:
9261 		dp_rx_dump_fisa_table(pdev->soc);
9262 		break;
9263 	case TXRX_HAL_REG_WRITE_STATS:
9264 		hal_dump_reg_write_stats(pdev->soc->hal_soc);
9265 		hal_dump_reg_write_srng_stats(pdev->soc->hal_soc);
9266 		break;
9267 	case TXRX_SOC_REO_HW_DESC_DUMP:
9268 		dp_get_rx_reo_queue_info((struct cdp_soc_t *)pdev->soc,
9269 					 vdev->vdev_id);
9270 		break;
9271 	case TXRX_SOC_WBM_IDLE_HPTP_DUMP:
9272 		dp_dump_wbm_idle_hptp(pdev->soc, pdev);
9273 		break;
9274 	default:
9275 		dp_info("Wrong Input For TxRx Host Stats");
9276 		dp_txrx_stats_help();
9277 		break;
9278 	}
9279 	return 0;
9280 }
9281 
9282 /*
9283  * dp_pdev_tid_stats_ingress_inc
9284  * @pdev: pdev handle
9285  * @val: increase in value
9286  *
9287  * Return: void
9288  */
9289 static void
9290 dp_pdev_tid_stats_ingress_inc(struct dp_pdev *pdev, uint32_t val)
9291 {
9292 	pdev->stats.tid_stats.ingress_stack += val;
9293 }
9294 
9295 /*
9296  * dp_pdev_tid_stats_osif_drop
9297  * @pdev: pdev handle
9298  * @val: increase in value
9299  *
9300  * Return: void
9301  */
9302 static void
9303 dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val)
9304 {
9305 	pdev->stats.tid_stats.osif_drop += val;
9306 }
9307 
9308 /*
9309  * dp_get_fw_peer_stats()- function to print peer stats
9310  * @soc: soc handle
9311  * @pdev_id : id of the pdev handle
9312  * @mac_addr: mac address of the peer
9313  * @cap: Type of htt stats requested
9314  * @is_wait: if set, wait on completion from firmware response
9315  *
9316  * Currently Supporting only MAC ID based requests Only
9317  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
9318  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
9319  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
9320  *
9321  * Return: QDF_STATUS
9322  */
9323 static QDF_STATUS
9324 dp_get_fw_peer_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
9325 		     uint8_t *mac_addr,
9326 		     uint32_t cap, uint32_t is_wait)
9327 {
9328 	int i;
9329 	uint32_t config_param0 = 0;
9330 	uint32_t config_param1 = 0;
9331 	uint32_t config_param2 = 0;
9332 	uint32_t config_param3 = 0;
9333 	struct dp_pdev *pdev =
9334 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9335 						   pdev_id);
9336 
9337 	if (!pdev)
9338 		return QDF_STATUS_E_FAILURE;
9339 
9340 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
9341 	config_param0 |= (1 << (cap + 1));
9342 
9343 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
9344 		config_param1 |= (1 << i);
9345 	}
9346 
9347 	config_param2 |= (mac_addr[0] & 0x000000ff);
9348 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
9349 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
9350 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
9351 
9352 	config_param3 |= (mac_addr[4] & 0x000000ff);
9353 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
9354 
9355 	if (is_wait) {
9356 		qdf_event_reset(&pdev->fw_peer_stats_event);
9357 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
9358 					  config_param0, config_param1,
9359 					  config_param2, config_param3,
9360 					  0, DBG_STATS_COOKIE_DP_STATS, 0);
9361 		qdf_wait_single_event(&pdev->fw_peer_stats_event,
9362 				      DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
9363 	} else {
9364 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
9365 					  config_param0, config_param1,
9366 					  config_param2, config_param3,
9367 					  0, DBG_STATS_COOKIE_DEFAULT, 0);
9368 	}
9369 
9370 	return QDF_STATUS_SUCCESS;
9371 
9372 }
9373 
9374 /* This struct definition will be removed from here
9375  * once it get added in FW headers*/
9376 struct httstats_cmd_req {
9377     uint32_t    config_param0;
9378     uint32_t    config_param1;
9379     uint32_t    config_param2;
9380     uint32_t    config_param3;
9381     int cookie;
9382     u_int8_t    stats_id;
9383 };
9384 
9385 /*
9386  * dp_get_htt_stats: function to process the httstas request
9387  * @soc: DP soc handle
9388  * @pdev_id: id of pdev handle
9389  * @data: pointer to request data
9390  * @data_len: length for request data
9391  *
9392  * return: QDF_STATUS
9393  */
9394 static QDF_STATUS
9395 dp_get_htt_stats(struct cdp_soc_t *soc, uint8_t pdev_id, void *data,
9396 		 uint32_t data_len)
9397 {
9398 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
9399 	struct dp_pdev *pdev =
9400 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9401 						   pdev_id);
9402 
9403 	if (!pdev)
9404 		return QDF_STATUS_E_FAILURE;
9405 
9406 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
9407 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
9408 				req->config_param0, req->config_param1,
9409 				req->config_param2, req->config_param3,
9410 				req->cookie, DBG_STATS_COOKIE_DEFAULT, 0);
9411 
9412 	return QDF_STATUS_SUCCESS;
9413 }
9414 
9415 /**
9416  * dp_set_pdev_tidmap_prty_wifi3(): update tidmap priority in pdev
9417  * @pdev: DP_PDEV handle
9418  * @prio: tidmap priority value passed by the user
9419  *
9420  * Return: QDF_STATUS_SUCCESS on success
9421  */
9422 static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct dp_pdev *pdev,
9423 						uint8_t prio)
9424 {
9425 	struct dp_soc *soc = pdev->soc;
9426 
9427 	soc->tidmap_prty = prio;
9428 
9429 	hal_tx_set_tidmap_prty(soc->hal_soc, prio);
9430 	return QDF_STATUS_SUCCESS;
9431 }
9432 
9433 /*
9434  * dp_get_peer_param: function to get parameters in peer
9435  * @cdp_soc: DP soc handle
9436  * @vdev_id: id of vdev handle
9437  * @peer_mac: peer mac address
9438  * @param: parameter type to be set
9439  * @val : address of buffer
9440  *
9441  * Return: val
9442  */
9443 static QDF_STATUS dp_get_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
9444 				    uint8_t *peer_mac,
9445 				    enum cdp_peer_param_type param,
9446 				    cdp_config_param_type *val)
9447 {
9448 	return QDF_STATUS_SUCCESS;
9449 }
9450 
9451 /*
9452  * dp_set_peer_param: function to set parameters in peer
9453  * @cdp_soc: DP soc handle
9454  * @vdev_id: id of vdev handle
9455  * @peer_mac: peer mac address
9456  * @param: parameter type to be set
9457  * @val: value of parameter to be set
9458  *
9459  * Return: 0 for success. nonzero for failure.
9460  */
9461 static QDF_STATUS dp_set_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
9462 				    uint8_t *peer_mac,
9463 				    enum cdp_peer_param_type param,
9464 				    cdp_config_param_type val)
9465 {
9466 	struct dp_peer *peer =
9467 			dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
9468 						       peer_mac, 0, vdev_id,
9469 						       DP_MOD_ID_CDP);
9470 	struct dp_txrx_peer *txrx_peer;
9471 
9472 	if (!peer)
9473 		return QDF_STATUS_E_FAILURE;
9474 
9475 	txrx_peer = peer->txrx_peer;
9476 	if (!txrx_peer) {
9477 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9478 		return QDF_STATUS_E_FAILURE;
9479 	}
9480 
9481 	switch (param) {
9482 	case CDP_CONFIG_NAWDS:
9483 		txrx_peer->nawds_enabled = val.cdp_peer_param_nawds;
9484 		break;
9485 	case CDP_CONFIG_ISOLATION:
9486 		dp_set_peer_isolation(txrx_peer, val.cdp_peer_param_isolation);
9487 		break;
9488 	case CDP_CONFIG_IN_TWT:
9489 		txrx_peer->in_twt = !!(val.cdp_peer_param_in_twt);
9490 		break;
9491 	default:
9492 		break;
9493 	}
9494 
9495 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9496 
9497 	return QDF_STATUS_SUCCESS;
9498 }
9499 
9500 /*
9501  * dp_get_pdev_param: function to get parameters from pdev
9502  * @cdp_soc: DP soc handle
9503  * @pdev_id: id of pdev handle
9504  * @param: parameter type to be get
9505  * @value : buffer for value
9506  *
9507  * Return: status
9508  */
9509 static QDF_STATUS dp_get_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
9510 				    enum cdp_pdev_param_type param,
9511 				    cdp_config_param_type *val)
9512 {
9513 	struct cdp_pdev *pdev = (struct cdp_pdev *)
9514 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
9515 						   pdev_id);
9516 	if (!pdev)
9517 		return QDF_STATUS_E_FAILURE;
9518 
9519 	switch (param) {
9520 	case CDP_CONFIG_VOW:
9521 		val->cdp_pdev_param_cfg_vow =
9522 				((struct dp_pdev *)pdev)->delay_stats_flag;
9523 		break;
9524 	case CDP_TX_PENDING:
9525 		val->cdp_pdev_param_tx_pending = dp_get_tx_pending(pdev);
9526 		break;
9527 	case CDP_FILTER_MCAST_DATA:
9528 		val->cdp_pdev_param_fltr_mcast =
9529 				dp_monitor_pdev_get_filter_mcast_data(pdev);
9530 		break;
9531 	case CDP_FILTER_NO_DATA:
9532 		val->cdp_pdev_param_fltr_none =
9533 				dp_monitor_pdev_get_filter_non_data(pdev);
9534 		break;
9535 	case CDP_FILTER_UCAST_DATA:
9536 		val->cdp_pdev_param_fltr_ucast =
9537 				dp_monitor_pdev_get_filter_ucast_data(pdev);
9538 		break;
9539 	default:
9540 		return QDF_STATUS_E_FAILURE;
9541 	}
9542 
9543 	return QDF_STATUS_SUCCESS;
9544 }
9545 
9546 /*
9547  * dp_set_pdev_param: function to set parameters in pdev
9548  * @cdp_soc: DP soc handle
9549  * @pdev_id: id of pdev handle
9550  * @param: parameter type to be set
9551  * @val: value of parameter to be set
9552  *
9553  * Return: 0 for success. nonzero for failure.
9554  */
9555 static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
9556 				    enum cdp_pdev_param_type param,
9557 				    cdp_config_param_type val)
9558 {
9559 	int target_type;
9560 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
9561 	struct dp_pdev *pdev =
9562 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
9563 						   pdev_id);
9564 	enum reg_wifi_band chan_band;
9565 
9566 	if (!pdev)
9567 		return QDF_STATUS_E_FAILURE;
9568 
9569 	target_type = hal_get_target_type(soc->hal_soc);
9570 	switch (target_type) {
9571 	case TARGET_TYPE_QCA6750:
9572 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC0_LMAC_ID;
9573 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
9574 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
9575 		break;
9576 	case TARGET_TYPE_KIWI:
9577 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC0_LMAC_ID;
9578 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
9579 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
9580 		break;
9581 	default:
9582 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MAC1_LMAC_ID;
9583 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MAC0_LMAC_ID;
9584 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MAC0_LMAC_ID;
9585 		break;
9586 	}
9587 
9588 	switch (param) {
9589 	case CDP_CONFIG_TX_CAPTURE:
9590 		return dp_monitor_config_debug_sniffer(pdev,
9591 						val.cdp_pdev_param_tx_capture);
9592 	case CDP_CONFIG_DEBUG_SNIFFER:
9593 		return dp_monitor_config_debug_sniffer(pdev,
9594 						val.cdp_pdev_param_dbg_snf);
9595 	case CDP_CONFIG_BPR_ENABLE:
9596 		return dp_monitor_set_bpr_enable(pdev,
9597 						 val.cdp_pdev_param_bpr_enable);
9598 	case CDP_CONFIG_PRIMARY_RADIO:
9599 		pdev->is_primary = val.cdp_pdev_param_primary_radio;
9600 		break;
9601 	case CDP_CONFIG_CAPTURE_LATENCY:
9602 		pdev->latency_capture_enable = val.cdp_pdev_param_cptr_latcy;
9603 		break;
9604 	case CDP_INGRESS_STATS:
9605 		dp_pdev_tid_stats_ingress_inc(pdev,
9606 					      val.cdp_pdev_param_ingrs_stats);
9607 		break;
9608 	case CDP_OSIF_DROP:
9609 		dp_pdev_tid_stats_osif_drop(pdev,
9610 					    val.cdp_pdev_param_osif_drop);
9611 		break;
9612 	case CDP_CONFIG_ENH_RX_CAPTURE:
9613 		return dp_monitor_config_enh_rx_capture(pdev,
9614 						val.cdp_pdev_param_en_rx_cap);
9615 	case CDP_CONFIG_ENH_TX_CAPTURE:
9616 		return dp_monitor_config_enh_tx_capture(pdev,
9617 						val.cdp_pdev_param_en_tx_cap);
9618 	case CDP_CONFIG_HMMC_TID_OVERRIDE:
9619 		pdev->hmmc_tid_override_en = val.cdp_pdev_param_hmmc_tid_ovrd;
9620 		break;
9621 	case CDP_CONFIG_HMMC_TID_VALUE:
9622 		pdev->hmmc_tid = val.cdp_pdev_param_hmmc_tid;
9623 		break;
9624 	case CDP_CHAN_NOISE_FLOOR:
9625 		pdev->chan_noise_floor = val.cdp_pdev_param_chn_noise_flr;
9626 		break;
9627 	case CDP_TIDMAP_PRTY:
9628 		dp_set_pdev_tidmap_prty_wifi3(pdev,
9629 					      val.cdp_pdev_param_tidmap_prty);
9630 		break;
9631 	case CDP_FILTER_NEIGH_PEERS:
9632 		dp_monitor_set_filter_neigh_peers(pdev,
9633 					val.cdp_pdev_param_fltr_neigh_peers);
9634 		break;
9635 	case CDP_MONITOR_CHANNEL:
9636 		dp_monitor_set_chan_num(pdev, val.cdp_pdev_param_monitor_chan);
9637 		break;
9638 	case CDP_MONITOR_FREQUENCY:
9639 		chan_band = wlan_reg_freq_to_band(val.cdp_pdev_param_mon_freq);
9640 		dp_monitor_set_chan_freq(pdev, val.cdp_pdev_param_mon_freq);
9641 		dp_monitor_set_chan_band(pdev, chan_band);
9642 		break;
9643 	case CDP_CONFIG_BSS_COLOR:
9644 		dp_monitor_set_bsscolor(pdev, val.cdp_pdev_param_bss_color);
9645 		break;
9646 	case CDP_SET_ATF_STATS_ENABLE:
9647 		dp_monitor_set_atf_stats_enable(pdev,
9648 					val.cdp_pdev_param_atf_stats_enable);
9649 		break;
9650 	case CDP_CONFIG_SPECIAL_VAP:
9651 		dp_monitor_pdev_config_scan_spcl_vap(pdev,
9652 					val.cdp_pdev_param_config_special_vap);
9653 		dp_monitor_vdev_set_monitor_mode_buf_rings(pdev);
9654 		break;
9655 	case CDP_RESET_SCAN_SPCL_VAP_STATS_ENABLE:
9656 		dp_monitor_pdev_reset_scan_spcl_vap_stats_enable(pdev,
9657 				val.cdp_pdev_param_reset_scan_spcl_vap_stats_enable);
9658 		break;
9659 	case CDP_CONFIG_ENHANCED_STATS_ENABLE:
9660 		pdev->enhanced_stats_en = val.cdp_pdev_param_enhanced_stats_enable;
9661 		break;
9662 	case CDP_ISOLATION:
9663 		pdev->isolation = val.cdp_pdev_param_isolation;
9664 		break;
9665 	case CDP_CONFIG_UNDECODED_METADATA_CAPTURE_ENABLE:
9666 		return dp_monitor_config_undecoded_metadata_capture(pdev,
9667 				val.cdp_pdev_param_undecoded_metadata_enable);
9668 		break;
9669 	default:
9670 		return QDF_STATUS_E_INVAL;
9671 	}
9672 	return QDF_STATUS_SUCCESS;
9673 }
9674 
9675 #ifdef QCA_UNDECODED_METADATA_SUPPORT
9676 static
9677 QDF_STATUS dp_set_pdev_phyrx_error_mask(struct cdp_soc_t *cdp_soc,
9678 					uint8_t pdev_id, uint32_t mask,
9679 					uint32_t mask_cont)
9680 {
9681 	struct dp_pdev *pdev =
9682 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
9683 						   pdev_id);
9684 
9685 	if (!pdev)
9686 		return QDF_STATUS_E_FAILURE;
9687 
9688 	return dp_monitor_config_undecoded_metadata_phyrx_error_mask(pdev,
9689 				mask, mask_cont);
9690 }
9691 
9692 static
9693 QDF_STATUS dp_get_pdev_phyrx_error_mask(struct cdp_soc_t *cdp_soc,
9694 					uint8_t pdev_id, uint32_t *mask,
9695 					uint32_t *mask_cont)
9696 {
9697 	struct dp_pdev *pdev =
9698 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
9699 						   pdev_id);
9700 
9701 	if (!pdev)
9702 		return QDF_STATUS_E_FAILURE;
9703 
9704 	return dp_monitor_get_undecoded_metadata_phyrx_error_mask(pdev,
9705 				mask, mask_cont);
9706 }
9707 #endif
9708 
9709 #ifdef QCA_PEER_EXT_STATS
9710 static void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
9711 					  qdf_nbuf_t nbuf)
9712 {
9713 	struct dp_peer *peer = NULL;
9714 	uint16_t peer_id, ring_id;
9715 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
9716 	struct dp_peer_delay_stats *delay_stats = NULL;
9717 
9718 	peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
9719 	if (peer_id > soc->max_peer_id)
9720 		return;
9721 
9722 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
9723 	if (qdf_unlikely(!peer))
9724 		return;
9725 
9726 	if (qdf_unlikely(!peer->txrx_peer)) {
9727 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9728 		return;
9729 	}
9730 
9731 	if (qdf_likely(peer->txrx_peer->delay_stats)) {
9732 		delay_stats = peer->txrx_peer->delay_stats;
9733 		ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
9734 		dp_rx_compute_tid_delay(&delay_stats->delay_tid_stats[tid][ring_id],
9735 					nbuf);
9736 	}
9737 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9738 }
9739 #else
9740 static inline void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
9741 						 qdf_nbuf_t nbuf)
9742 {
9743 }
9744 #endif
9745 
9746 /*
9747  * dp_calculate_delay_stats: function to get rx delay stats
9748  * @cdp_soc: DP soc handle
9749  * @vdev_id: id of DP vdev handle
9750  * @nbuf: skb
9751  *
9752  * Return: QDF_STATUS
9753  */
9754 static QDF_STATUS
9755 dp_calculate_delay_stats(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
9756 			 qdf_nbuf_t nbuf)
9757 {
9758 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
9759 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9760 						     DP_MOD_ID_CDP);
9761 
9762 	if (!vdev)
9763 		return QDF_STATUS_SUCCESS;
9764 
9765 	if (vdev->pdev->delay_stats_flag)
9766 		dp_rx_compute_delay(vdev, nbuf);
9767 	else
9768 		dp_rx_update_peer_delay_stats(soc, nbuf);
9769 
9770 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9771 	return QDF_STATUS_SUCCESS;
9772 }
9773 
9774 /*
9775  * dp_get_vdev_param: function to get parameters from vdev
9776  * @cdp_soc : DP soc handle
9777  * @vdev_id: id of DP vdev handle
9778  * @param: parameter type to get value
9779  * @val: buffer address
9780  *
9781  * return: status
9782  */
9783 static QDF_STATUS dp_get_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
9784 				    enum cdp_vdev_param_type param,
9785 				    cdp_config_param_type *val)
9786 {
9787 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
9788 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9789 						     DP_MOD_ID_CDP);
9790 
9791 	if (!vdev)
9792 		return QDF_STATUS_E_FAILURE;
9793 
9794 	switch (param) {
9795 	case CDP_ENABLE_WDS:
9796 		val->cdp_vdev_param_wds = vdev->wds_enabled;
9797 		break;
9798 	case CDP_ENABLE_MEC:
9799 		val->cdp_vdev_param_mec = vdev->mec_enabled;
9800 		break;
9801 	case CDP_ENABLE_DA_WAR:
9802 		val->cdp_vdev_param_da_war = vdev->pdev->soc->da_war_enabled;
9803 		break;
9804 	case CDP_ENABLE_IGMP_MCAST_EN:
9805 		val->cdp_vdev_param_igmp_mcast_en = vdev->igmp_mcast_enhanc_en;
9806 		break;
9807 	case CDP_ENABLE_MCAST_EN:
9808 		val->cdp_vdev_param_mcast_en = vdev->mcast_enhancement_en;
9809 		break;
9810 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
9811 		val->cdp_vdev_param_hlos_tid_override =
9812 			    dp_vdev_get_hlos_tid_override((struct cdp_vdev *)vdev);
9813 		break;
9814 	case CDP_ENABLE_PEER_AUTHORIZE:
9815 		val->cdp_vdev_param_peer_authorize =
9816 			    vdev->peer_authorize;
9817 		break;
9818 #ifdef WLAN_SUPPORT_MESH_LATENCY
9819 	case CDP_ENABLE_PEER_TID_LATENCY:
9820 		val->cdp_vdev_param_peer_tid_latency_enable =
9821 			vdev->peer_tid_latency_enabled;
9822 		break;
9823 	case CDP_SET_VAP_MESH_TID:
9824 		val->cdp_vdev_param_mesh_tid =
9825 				vdev->mesh_tid_latency_config.latency_tid;
9826 		break;
9827 #endif
9828 	default:
9829 		dp_cdp_err("%pK: param value %d is wrong",
9830 			   soc, param);
9831 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9832 		return QDF_STATUS_E_FAILURE;
9833 	}
9834 
9835 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9836 	return QDF_STATUS_SUCCESS;
9837 }
9838 
9839 /*
9840  * dp_set_vdev_param: function to set parameters in vdev
9841  * @cdp_soc : DP soc handle
9842  * @vdev_id: id of DP vdev handle
9843  * @param: parameter type to get value
9844  * @val: value
9845  *
9846  * return: QDF_STATUS
9847  */
9848 static QDF_STATUS
9849 dp_set_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
9850 		  enum cdp_vdev_param_type param, cdp_config_param_type val)
9851 {
9852 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
9853 	struct dp_vdev *vdev =
9854 		dp_vdev_get_ref_by_id(dsoc, vdev_id, DP_MOD_ID_CDP);
9855 	uint32_t var = 0;
9856 
9857 	if (!vdev)
9858 		return QDF_STATUS_E_FAILURE;
9859 
9860 	switch (param) {
9861 	case CDP_ENABLE_WDS:
9862 		dp_cdp_err("%pK: wds_enable %d for vdev(%pK) id(%d)\n",
9863 			   dsoc, val.cdp_vdev_param_wds, vdev, vdev->vdev_id);
9864 		vdev->wds_enabled = val.cdp_vdev_param_wds;
9865 		break;
9866 	case CDP_ENABLE_MEC:
9867 		dp_cdp_err("%pK: mec_enable %d for vdev(%pK) id(%d)\n",
9868 			   dsoc, val.cdp_vdev_param_mec, vdev, vdev->vdev_id);
9869 		vdev->mec_enabled = val.cdp_vdev_param_mec;
9870 		break;
9871 	case CDP_ENABLE_DA_WAR:
9872 		dp_cdp_err("%pK: da_war_enable %d for vdev(%pK) id(%d)\n",
9873 			   dsoc, val.cdp_vdev_param_da_war, vdev, vdev->vdev_id);
9874 		vdev->pdev->soc->da_war_enabled = val.cdp_vdev_param_da_war;
9875 		dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
9876 					     vdev->pdev->soc));
9877 		break;
9878 	case CDP_ENABLE_NAWDS:
9879 		vdev->nawds_enabled = val.cdp_vdev_param_nawds;
9880 		break;
9881 	case CDP_ENABLE_MCAST_EN:
9882 		vdev->mcast_enhancement_en = val.cdp_vdev_param_mcast_en;
9883 		break;
9884 	case CDP_ENABLE_IGMP_MCAST_EN:
9885 		vdev->igmp_mcast_enhanc_en = val.cdp_vdev_param_igmp_mcast_en;
9886 		break;
9887 	case CDP_ENABLE_PROXYSTA:
9888 		vdev->proxysta_vdev = val.cdp_vdev_param_proxysta;
9889 		break;
9890 	case CDP_UPDATE_TDLS_FLAGS:
9891 		vdev->tdls_link_connected = val.cdp_vdev_param_tdls_flags;
9892 		break;
9893 	case CDP_CFG_WDS_AGING_TIMER:
9894 		var = val.cdp_vdev_param_aging_tmr;
9895 		if (!var)
9896 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
9897 		else if (var != vdev->wds_aging_timer_val)
9898 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, var);
9899 
9900 		vdev->wds_aging_timer_val = var;
9901 		break;
9902 	case CDP_ENABLE_AP_BRIDGE:
9903 		if (wlan_op_mode_sta != vdev->opmode)
9904 			vdev->ap_bridge_enabled = val.cdp_vdev_param_ap_brdg_en;
9905 		else
9906 			vdev->ap_bridge_enabled = false;
9907 		break;
9908 	case CDP_ENABLE_CIPHER:
9909 		vdev->sec_type = val.cdp_vdev_param_cipher_en;
9910 		break;
9911 	case CDP_ENABLE_QWRAP_ISOLATION:
9912 		vdev->isolation_vdev = val.cdp_vdev_param_qwrap_isolation;
9913 		break;
9914 	case CDP_UPDATE_MULTIPASS:
9915 		vdev->multipass_en = val.cdp_vdev_param_update_multipass;
9916 		break;
9917 	case CDP_TX_ENCAP_TYPE:
9918 		vdev->tx_encap_type = val.cdp_vdev_param_tx_encap;
9919 		break;
9920 	case CDP_RX_DECAP_TYPE:
9921 		vdev->rx_decap_type = val.cdp_vdev_param_rx_decap;
9922 		break;
9923 	case CDP_TID_VDEV_PRTY:
9924 		vdev->tidmap_prty = val.cdp_vdev_param_tidmap_prty;
9925 		break;
9926 	case CDP_TIDMAP_TBL_ID:
9927 		vdev->tidmap_tbl_id = val.cdp_vdev_param_tidmap_tbl_id;
9928 		break;
9929 #ifdef MESH_MODE_SUPPORT
9930 	case CDP_MESH_RX_FILTER:
9931 		dp_vdev_set_mesh_rx_filter((struct cdp_vdev *)vdev,
9932 					   val.cdp_vdev_param_mesh_rx_filter);
9933 		break;
9934 	case CDP_MESH_MODE:
9935 		dp_vdev_set_mesh_mode((struct cdp_vdev *)vdev,
9936 				      val.cdp_vdev_param_mesh_mode);
9937 		break;
9938 #endif
9939 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
9940 		dp_info("vdev_id %d enable hlod tid override %d", vdev_id,
9941 			val.cdp_vdev_param_hlos_tid_override);
9942 		dp_vdev_set_hlos_tid_override(vdev,
9943 				val.cdp_vdev_param_hlos_tid_override);
9944 		break;
9945 #ifdef QCA_SUPPORT_WDS_EXTENDED
9946 	case CDP_CFG_WDS_EXT:
9947 		vdev->wds_ext_enabled = val.cdp_vdev_param_wds_ext;
9948 		break;
9949 #endif
9950 	case CDP_ENABLE_PEER_AUTHORIZE:
9951 		vdev->peer_authorize = val.cdp_vdev_param_peer_authorize;
9952 		break;
9953 #ifdef WLAN_SUPPORT_MESH_LATENCY
9954 	case CDP_ENABLE_PEER_TID_LATENCY:
9955 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
9956 			val.cdp_vdev_param_peer_tid_latency_enable);
9957 		vdev->peer_tid_latency_enabled =
9958 			val.cdp_vdev_param_peer_tid_latency_enable;
9959 		break;
9960 	case CDP_SET_VAP_MESH_TID:
9961 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
9962 			val.cdp_vdev_param_mesh_tid);
9963 		vdev->mesh_tid_latency_config.latency_tid
9964 				= val.cdp_vdev_param_mesh_tid;
9965 		break;
9966 #endif
9967 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
9968 	case CDP_SKIP_BAR_UPDATE_AP:
9969 		dp_info("vdev_id %d skip BAR update: %u", vdev_id,
9970 			val.cdp_skip_bar_update);
9971 		vdev->skip_bar_update = val.cdp_skip_bar_update;
9972 		vdev->skip_bar_update_last_ts = 0;
9973 		break;
9974 #endif
9975 	default:
9976 		break;
9977 	}
9978 
9979 	dsoc->arch_ops.txrx_set_vdev_param(dsoc, vdev, param, val);
9980 	dp_tx_vdev_update_search_flags((struct dp_vdev *)vdev);
9981 	dp_vdev_unref_delete(dsoc, vdev, DP_MOD_ID_CDP);
9982 
9983 	return QDF_STATUS_SUCCESS;
9984 }
9985 
9986 /*
9987  * dp_set_psoc_param: function to set parameters in psoc
9988  * @cdp_soc : DP soc handle
9989  * @param: parameter type to be set
9990  * @val: value of parameter to be set
9991  *
9992  * return: QDF_STATUS
9993  */
9994 static QDF_STATUS
9995 dp_set_psoc_param(struct cdp_soc_t *cdp_soc,
9996 		  enum cdp_psoc_param_type param, cdp_config_param_type val)
9997 {
9998 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
9999 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->wlan_cfg_ctx;
10000 
10001 	switch (param) {
10002 	case CDP_ENABLE_RATE_STATS:
10003 		soc->rdkstats_enabled = val.cdp_psoc_param_en_rate_stats;
10004 		break;
10005 	case CDP_SET_NSS_CFG:
10006 		wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx,
10007 					    val.cdp_psoc_param_en_nss_cfg);
10008 		/*
10009 		 * TODO: masked out based on the per offloaded radio
10010 		 */
10011 		switch (val.cdp_psoc_param_en_nss_cfg) {
10012 		case dp_nss_cfg_default:
10013 			break;
10014 		case dp_nss_cfg_first_radio:
10015 		/*
10016 		 * This configuration is valid for single band radio which
10017 		 * is also NSS offload.
10018 		 */
10019 		case dp_nss_cfg_dbdc:
10020 		case dp_nss_cfg_dbtc:
10021 			wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
10022 			wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
10023 			wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
10024 			wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
10025 			break;
10026 		default:
10027 			dp_cdp_err("%pK: Invalid offload config %d",
10028 				   soc, val.cdp_psoc_param_en_nss_cfg);
10029 		}
10030 
10031 			dp_cdp_err("%pK: nss-wifi<0> nss config is enabled"
10032 				   , soc);
10033 		break;
10034 	case CDP_SET_PREFERRED_HW_MODE:
10035 		soc->preferred_hw_mode = val.cdp_psoc_param_preferred_hw_mode;
10036 		break;
10037 	case CDP_IPA_ENABLE:
10038 		soc->wlan_cfg_ctx->ipa_enabled = val.cdp_ipa_enabled;
10039 		break;
10040 	case CDP_CFG_VDEV_STATS_HW_OFFLOAD:
10041 		wlan_cfg_set_vdev_stats_hw_offload_config(wlan_cfg_ctx,
10042 				val.cdp_psoc_param_vdev_stats_hw_offload);
10043 		break;
10044 	case CDP_SAWF_ENABLE:
10045 		wlan_cfg_set_sawf_config(wlan_cfg_ctx, val.cdp_sawf_enabled);
10046 		break;
10047 	default:
10048 		break;
10049 	}
10050 
10051 	return QDF_STATUS_SUCCESS;
10052 }
10053 
10054 /*
10055  * dp_get_psoc_param: function to get parameters in soc
10056  * @cdp_soc : DP soc handle
10057  * @param: parameter type to be set
10058  * @val: address of buffer
10059  *
10060  * return: status
10061  */
10062 static QDF_STATUS dp_get_psoc_param(struct cdp_soc_t *cdp_soc,
10063 				    enum cdp_psoc_param_type param,
10064 				    cdp_config_param_type *val)
10065 {
10066 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
10067 
10068 	if (!soc)
10069 		return QDF_STATUS_E_FAILURE;
10070 
10071 	switch (param) {
10072 	case CDP_CFG_PEER_EXT_STATS:
10073 		val->cdp_psoc_param_pext_stats =
10074 			wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx);
10075 		break;
10076 	case CDP_CFG_VDEV_STATS_HW_OFFLOAD:
10077 		val->cdp_psoc_param_vdev_stats_hw_offload =
10078 			wlan_cfg_get_vdev_stats_hw_offload_config(soc->wlan_cfg_ctx);
10079 		break;
10080 	default:
10081 		dp_warn("Invalid param");
10082 		break;
10083 	}
10084 
10085 	return QDF_STATUS_SUCCESS;
10086 }
10087 
10088 /*
10089  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
10090  * @soc: DP_SOC handle
10091  * @vdev_id: id of DP_VDEV handle
10092  * @map_id:ID of map that needs to be updated
10093  *
10094  * Return: QDF_STATUS
10095  */
10096 static QDF_STATUS dp_set_vdev_dscp_tid_map_wifi3(ol_txrx_soc_handle cdp_soc,
10097 						 uint8_t vdev_id,
10098 						 uint8_t map_id)
10099 {
10100 	cdp_config_param_type val;
10101 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
10102 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10103 						     DP_MOD_ID_CDP);
10104 	if (vdev) {
10105 		vdev->dscp_tid_map_id = map_id;
10106 		val.cdp_vdev_param_dscp_tid_map_id = map_id;
10107 		soc->arch_ops.txrx_set_vdev_param(soc,
10108 						  vdev,
10109 						  CDP_UPDATE_DSCP_TO_TID_MAP,
10110 						  val);
10111 		/* Updatr flag for transmit tid classification */
10112 		if (vdev->dscp_tid_map_id < soc->num_hw_dscp_tid_map)
10113 			vdev->skip_sw_tid_classification |=
10114 				DP_TX_HW_DSCP_TID_MAP_VALID;
10115 		else
10116 			vdev->skip_sw_tid_classification &=
10117 				~DP_TX_HW_DSCP_TID_MAP_VALID;
10118 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10119 		return QDF_STATUS_SUCCESS;
10120 	}
10121 
10122 	return QDF_STATUS_E_FAILURE;
10123 }
10124 
10125 #ifdef DP_RATETABLE_SUPPORT
10126 static int dp_txrx_get_ratekbps(int preamb, int mcs,
10127 				int htflag, int gintval)
10128 {
10129 	uint32_t rix;
10130 	uint16_t ratecode;
10131 	enum PUNCTURED_MODES punc_mode = NO_PUNCTURE;
10132 
10133 	return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
10134 			       (uint8_t)preamb, 1, punc_mode,
10135 			       &rix, &ratecode);
10136 }
10137 #else
10138 static int dp_txrx_get_ratekbps(int preamb, int mcs,
10139 				int htflag, int gintval)
10140 {
10141 	return 0;
10142 }
10143 #endif
10144 
10145 /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
10146  * @soc: DP soc handle
10147  * @pdev_id: id of DP pdev handle
10148  * @pdev_stats: buffer to copy to
10149  *
10150  * return : status success/failure
10151  */
10152 static QDF_STATUS
10153 dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
10154 		       struct cdp_pdev_stats *pdev_stats)
10155 {
10156 	struct dp_pdev *pdev =
10157 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10158 						   pdev_id);
10159 	if (!pdev)
10160 		return QDF_STATUS_E_FAILURE;
10161 
10162 	dp_aggregate_pdev_stats(pdev);
10163 
10164 	qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats));
10165 	return QDF_STATUS_SUCCESS;
10166 }
10167 
10168 /* dp_txrx_update_vdev_me_stats(): Update vdev ME stats sent from CDP
10169  * @vdev: DP vdev handle
10170  * @buf: buffer containing specific stats structure
10171  *
10172  * Returns: void
10173  */
10174 static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev,
10175 					 void *buf)
10176 {
10177 	struct cdp_tx_ingress_stats *host_stats = NULL;
10178 
10179 	if (!buf) {
10180 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
10181 		return;
10182 	}
10183 	host_stats = (struct cdp_tx_ingress_stats *)buf;
10184 
10185 	DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt,
10186 			 host_stats->mcast_en.mcast_pkt.num,
10187 			 host_stats->mcast_en.mcast_pkt.bytes);
10188 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error,
10189 		     host_stats->mcast_en.dropped_map_error);
10190 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac,
10191 		     host_stats->mcast_en.dropped_self_mac);
10192 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail,
10193 		     host_stats->mcast_en.dropped_send_fail);
10194 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast,
10195 		     host_stats->mcast_en.ucast);
10196 	DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc,
10197 		     host_stats->mcast_en.fail_seg_alloc);
10198 	DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail,
10199 		     host_stats->mcast_en.clone_fail);
10200 }
10201 
10202 /* dp_txrx_update_vdev_igmp_me_stats(): Update vdev IGMP ME stats sent from CDP
10203  * @vdev: DP vdev handle
10204  * @buf: buffer containing specific stats structure
10205  *
10206  * Returns: void
10207  */
10208 static void dp_txrx_update_vdev_igmp_me_stats(struct dp_vdev *vdev,
10209 					      void *buf)
10210 {
10211 	struct cdp_tx_ingress_stats *host_stats = NULL;
10212 
10213 	if (!buf) {
10214 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
10215 		return;
10216 	}
10217 	host_stats = (struct cdp_tx_ingress_stats *)buf;
10218 
10219 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_rcvd,
10220 		     host_stats->igmp_mcast_en.igmp_rcvd);
10221 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_ucast_converted,
10222 		     host_stats->igmp_mcast_en.igmp_ucast_converted);
10223 }
10224 
10225 /* dp_txrx_update_vdev_host_stats(): Update stats sent through CDP
10226  * @soc: DP soc handle
10227  * @vdev_id: id of DP vdev handle
10228  * @buf: buffer containing specific stats structure
10229  * @stats_id: stats type
10230  *
10231  * Returns: QDF_STATUS
10232  */
10233 static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc_hdl,
10234 						 uint8_t vdev_id,
10235 						 void *buf,
10236 						 uint16_t stats_id)
10237 {
10238 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10239 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10240 						     DP_MOD_ID_CDP);
10241 
10242 	if (!vdev) {
10243 		dp_cdp_err("%pK: Invalid vdev handle", soc);
10244 		return QDF_STATUS_E_FAILURE;
10245 	}
10246 
10247 	switch (stats_id) {
10248 	case DP_VDEV_STATS_PKT_CNT_ONLY:
10249 		break;
10250 	case DP_VDEV_STATS_TX_ME:
10251 		dp_txrx_update_vdev_me_stats(vdev, buf);
10252 		dp_txrx_update_vdev_igmp_me_stats(vdev, buf);
10253 		break;
10254 	default:
10255 		qdf_info("Invalid stats_id %d", stats_id);
10256 		break;
10257 	}
10258 
10259 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10260 	return QDF_STATUS_SUCCESS;
10261 }
10262 
10263 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
10264  * @soc: soc handle
10265  * @vdev_id: id of vdev handle
10266  * @peer_mac: mac of DP_PEER handle
10267  * @peer_stats: buffer to copy to
10268  * return : status success/failure
10269  */
10270 static QDF_STATUS
10271 dp_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
10272 		       uint8_t *peer_mac, struct cdp_peer_stats *peer_stats)
10273 {
10274 	QDF_STATUS status = QDF_STATUS_SUCCESS;
10275 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
10276 						       peer_mac, 0, vdev_id,
10277 						       DP_MOD_ID_CDP);
10278 
10279 	qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats));
10280 
10281 	if (!peer)
10282 		return QDF_STATUS_E_FAILURE;
10283 
10284 	dp_get_peer_stats(peer, peer_stats);
10285 
10286 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10287 
10288 	return status;
10289 }
10290 
10291 /* dp_txrx_get_peer_stats_param - will return specified cdp_peer_stats
10292  * @param soc - soc handle
10293  * @param vdev_id - vdev_id of vdev object
10294  * @param peer_mac - mac address of the peer
10295  * @param type - enum of required stats
10296  * @param buf - buffer to hold the value
10297  * return : status success/failure
10298  */
10299 static QDF_STATUS
10300 dp_txrx_get_peer_stats_param(struct cdp_soc_t *soc, uint8_t vdev_id,
10301 			     uint8_t *peer_mac, enum cdp_peer_stats_type type,
10302 			     cdp_peer_stats_param_t *buf)
10303 {
10304 	QDF_STATUS ret;
10305 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
10306 						      peer_mac, 0, vdev_id,
10307 						      DP_MOD_ID_CDP);
10308 
10309 	if (!peer) {
10310 		dp_peer_err("%pK: Invalid Peer for Mac " QDF_MAC_ADDR_FMT,
10311 			    soc, QDF_MAC_ADDR_REF(peer_mac));
10312 		return QDF_STATUS_E_FAILURE;
10313 	}
10314 
10315 	if (type >= cdp_peer_per_pkt_stats_min &&
10316 	    type < cdp_peer_per_pkt_stats_max) {
10317 		ret = dp_txrx_get_peer_per_pkt_stats_param(peer, type, buf);
10318 	} else if (type >= cdp_peer_extd_stats_min &&
10319 		   type < cdp_peer_extd_stats_max) {
10320 		ret = dp_txrx_get_peer_extd_stats_param(peer, type, buf);
10321 	} else {
10322 		dp_err("%pK: Invalid stat type requested", soc);
10323 		ret = QDF_STATUS_E_FAILURE;
10324 	}
10325 
10326 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10327 
10328 	return ret;
10329 }
10330 
10331 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
10332  * @soc: soc handle
10333  * @vdev_id: id of vdev handle
10334  * @peer_mac: mac of DP_PEER handle
10335  *
10336  * return : QDF_STATUS
10337  */
10338 #ifdef WLAN_FEATURE_11BE_MLO
10339 static QDF_STATUS
10340 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
10341 			 uint8_t *peer_mac)
10342 {
10343 	QDF_STATUS status = QDF_STATUS_SUCCESS;
10344 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10345 	struct dp_peer *peer =
10346 			dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
10347 						       vdev_id, DP_MOD_ID_CDP);
10348 
10349 	if (!peer)
10350 		return QDF_STATUS_E_FAILURE;
10351 
10352 	DP_STATS_CLR(peer);
10353 	dp_txrx_peer_stats_clr(peer->txrx_peer);
10354 
10355 	if (IS_MLO_DP_MLD_PEER(peer)) {
10356 		uint8_t i;
10357 		struct dp_peer *link_peer;
10358 		struct dp_soc *link_peer_soc;
10359 		struct dp_mld_link_peers link_peers_info;
10360 
10361 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
10362 						    &link_peers_info,
10363 						    DP_MOD_ID_CDP);
10364 		for (i = 0; i < link_peers_info.num_links; i++) {
10365 			link_peer = link_peers_info.link_peers[i];
10366 			link_peer_soc = link_peer->vdev->pdev->soc;
10367 
10368 			DP_STATS_CLR(link_peer);
10369 			dp_monitor_peer_reset_stats(link_peer_soc, link_peer);
10370 		}
10371 
10372 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
10373 	} else {
10374 		dp_monitor_peer_reset_stats(soc, peer);
10375 	}
10376 
10377 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10378 
10379 	return status;
10380 }
10381 #else
10382 static QDF_STATUS
10383 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
10384 			 uint8_t *peer_mac)
10385 {
10386 	QDF_STATUS status = QDF_STATUS_SUCCESS;
10387 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
10388 						      peer_mac, 0, vdev_id,
10389 						      DP_MOD_ID_CDP);
10390 
10391 	if (!peer)
10392 		return QDF_STATUS_E_FAILURE;
10393 
10394 	DP_STATS_CLR(peer);
10395 	dp_txrx_peer_stats_clr(peer->txrx_peer);
10396 	dp_monitor_peer_reset_stats((struct dp_soc *)soc, peer);
10397 
10398 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10399 
10400 	return status;
10401 }
10402 #endif
10403 
10404 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
10405  * @vdev_handle: DP_VDEV handle
10406  * @buf: buffer for vdev stats
10407  *
10408  * return : int
10409  */
10410 static int dp_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
10411 				  void *buf, bool is_aggregate)
10412 {
10413 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10414 	struct cdp_vdev_stats *vdev_stats;
10415 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10416 						     DP_MOD_ID_CDP);
10417 
10418 	if (!vdev)
10419 		return 1;
10420 
10421 	vdev_stats = (struct cdp_vdev_stats *)buf;
10422 
10423 	if (is_aggregate) {
10424 		dp_aggregate_vdev_stats(vdev, buf);
10425 	} else {
10426 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
10427 	}
10428 
10429 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10430 	return 0;
10431 }
10432 
10433 /*
10434  * dp_get_total_per(): get total per
10435  * @soc: DP soc handle
10436  * @pdev_id: id of DP_PDEV handle
10437  *
10438  * Return: % error rate using retries per packet and success packets
10439  */
10440 static int dp_get_total_per(struct cdp_soc_t *soc, uint8_t pdev_id)
10441 {
10442 	struct dp_pdev *pdev =
10443 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10444 						   pdev_id);
10445 
10446 	if (!pdev)
10447 		return 0;
10448 
10449 	dp_aggregate_pdev_stats(pdev);
10450 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
10451 		return 0;
10452 	return ((pdev->stats.tx.retries * 100) /
10453 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
10454 }
10455 
10456 /*
10457  * dp_txrx_stats_publish(): publish pdev stats into a buffer
10458  * @soc: DP soc handle
10459  * @pdev_id: id of DP_PDEV handle
10460  * @buf: to hold pdev_stats
10461  *
10462  * Return: int
10463  */
10464 static int
10465 dp_txrx_stats_publish(struct cdp_soc_t *soc, uint8_t pdev_id,
10466 		      struct cdp_stats_extd *buf)
10467 {
10468 	struct cdp_txrx_stats_req req = {0,};
10469 	struct dp_pdev *pdev =
10470 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10471 						   pdev_id);
10472 
10473 	if (!pdev)
10474 		return TXRX_STATS_LEVEL_OFF;
10475 
10476 	dp_aggregate_pdev_stats(pdev);
10477 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
10478 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
10479 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
10480 				req.param1, req.param2, req.param3, 0,
10481 				req.cookie_val, 0);
10482 
10483 	msleep(DP_MAX_SLEEP_TIME);
10484 
10485 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
10486 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
10487 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
10488 				req.param1, req.param2, req.param3, 0,
10489 				req.cookie_val, 0);
10490 
10491 	msleep(DP_MAX_SLEEP_TIME);
10492 	qdf_mem_copy(buf, &pdev->stats, sizeof(struct cdp_pdev_stats));
10493 
10494 	return TXRX_STATS_LEVEL;
10495 }
10496 
10497 /**
10498  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
10499  * @soc: soc handle
10500  * @pdev_id: id of DP_PDEV handle
10501  * @map_id: ID of map that needs to be updated
10502  * @tos: index value in map
10503  * @tid: tid value passed by the user
10504  *
10505  * Return: QDF_STATUS
10506  */
10507 static QDF_STATUS
10508 dp_set_pdev_dscp_tid_map_wifi3(struct cdp_soc_t *soc_handle,
10509 			       uint8_t pdev_id,
10510 			       uint8_t map_id,
10511 			       uint8_t tos, uint8_t tid)
10512 {
10513 	uint8_t dscp;
10514 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10515 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10516 
10517 	if (!pdev)
10518 		return QDF_STATUS_E_FAILURE;
10519 
10520 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
10521 	pdev->dscp_tid_map[map_id][dscp] = tid;
10522 
10523 	if (map_id < soc->num_hw_dscp_tid_map)
10524 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
10525 				       map_id, dscp);
10526 	else
10527 		return QDF_STATUS_E_FAILURE;
10528 
10529 	return QDF_STATUS_SUCCESS;
10530 }
10531 
10532 #ifdef WLAN_SYSFS_DP_STATS
10533 /*
10534  * dp_sysfs_event_trigger(): Trigger event to wait for firmware
10535  * stats request response.
10536  * @soc: soc handle
10537  * @cookie_val: cookie value
10538  *
10539  * @Return: QDF_STATUS
10540  */
10541 static QDF_STATUS
10542 dp_sysfs_event_trigger(struct dp_soc *soc, uint32_t cookie_val)
10543 {
10544 	QDF_STATUS status = QDF_STATUS_SUCCESS;
10545 	/* wait for firmware response for sysfs stats request */
10546 	if (cookie_val == DBG_SYSFS_STATS_COOKIE) {
10547 		if (!soc) {
10548 			dp_cdp_err("soc is NULL");
10549 			return QDF_STATUS_E_FAILURE;
10550 		}
10551 		/* wait for event completion */
10552 		status = qdf_wait_single_event(&soc->sysfs_config->sysfs_txrx_fw_request_done,
10553 					       WLAN_SYSFS_STAT_REQ_WAIT_MS);
10554 		if (status == QDF_STATUS_SUCCESS)
10555 			dp_cdp_info("sysfs_txrx_fw_request_done event completed");
10556 		else if (status == QDF_STATUS_E_TIMEOUT)
10557 			dp_cdp_warn("sysfs_txrx_fw_request_done event expired");
10558 		else
10559 			dp_cdp_warn("sysfs_txrx_fw_request_done event erro code %d", status);
10560 	}
10561 
10562 	return status;
10563 }
10564 #else /* WLAN_SYSFS_DP_STATS */
10565 /*
10566  * dp_sysfs_event_trigger(): Trigger event to wait for firmware
10567  * stats request response.
10568  * @soc: soc handle
10569  * @cookie_val: cookie value
10570  *
10571  * @Return: QDF_STATUS
10572  */
10573 static QDF_STATUS
10574 dp_sysfs_event_trigger(struct dp_soc *soc, uint32_t cookie_val)
10575 {
10576 	return QDF_STATUS_SUCCESS;
10577 }
10578 #endif /* WLAN_SYSFS_DP_STATS */
10579 
10580 /**
10581  * dp_fw_stats_process(): Process TXRX FW stats request.
10582  * @vdev_handle: DP VDEV handle
10583  * @req: stats request
10584  *
10585  * return: QDF_STATUS
10586  */
10587 static QDF_STATUS
10588 dp_fw_stats_process(struct dp_vdev *vdev,
10589 		    struct cdp_txrx_stats_req *req)
10590 {
10591 	struct dp_pdev *pdev = NULL;
10592 	struct dp_soc *soc = NULL;
10593 	uint32_t stats = req->stats;
10594 	uint8_t mac_id = req->mac_id;
10595 	uint32_t cookie_val = DBG_STATS_COOKIE_DEFAULT;
10596 
10597 	if (!vdev) {
10598 		DP_TRACE(NONE, "VDEV not found");
10599 		return QDF_STATUS_E_FAILURE;
10600 	}
10601 
10602 	pdev = vdev->pdev;
10603 	if (!pdev) {
10604 		DP_TRACE(NONE, "PDEV not found");
10605 		return QDF_STATUS_E_FAILURE;
10606 	}
10607 
10608 	soc = pdev->soc;
10609 	if (!soc) {
10610 		DP_TRACE(NONE, "soc not found");
10611 		return QDF_STATUS_E_FAILURE;
10612 	}
10613 
10614 	/* In case request is from host sysfs for displaying stats on console */
10615 	if (req->cookie_val == DBG_SYSFS_STATS_COOKIE)
10616 		cookie_val = DBG_SYSFS_STATS_COOKIE;
10617 
10618 	/*
10619 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
10620 	 * from param0 to param3 according to below rule:
10621 	 *
10622 	 * PARAM:
10623 	 *   - config_param0 : start_offset (stats type)
10624 	 *   - config_param1 : stats bmask from start offset
10625 	 *   - config_param2 : stats bmask from start offset + 32
10626 	 *   - config_param3 : stats bmask from start offset + 64
10627 	 */
10628 	if (req->stats == CDP_TXRX_STATS_0) {
10629 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
10630 		req->param1 = 0xFFFFFFFF;
10631 		req->param2 = 0xFFFFFFFF;
10632 		req->param3 = 0xFFFFFFFF;
10633 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
10634 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
10635 	}
10636 
10637 	if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT) {
10638 		dp_h2t_ext_stats_msg_send(pdev,
10639 					  HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT,
10640 					  req->param0, req->param1, req->param2,
10641 					  req->param3, 0, cookie_val,
10642 					  mac_id);
10643 	} else {
10644 		dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
10645 					  req->param1, req->param2, req->param3,
10646 					  0, cookie_val, mac_id);
10647 	}
10648 
10649 	dp_sysfs_event_trigger(soc, cookie_val);
10650 
10651 	return QDF_STATUS_SUCCESS;
10652 }
10653 
10654 /**
10655  * dp_txrx_stats_request - function to map to firmware and host stats
10656  * @soc: soc handle
10657  * @vdev_id: virtual device ID
10658  * @req: stats request
10659  *
10660  * Return: QDF_STATUS
10661  */
10662 static
10663 QDF_STATUS dp_txrx_stats_request(struct cdp_soc_t *soc_handle,
10664 				 uint8_t vdev_id,
10665 				 struct cdp_txrx_stats_req *req)
10666 {
10667 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_handle);
10668 	int host_stats;
10669 	int fw_stats;
10670 	enum cdp_stats stats;
10671 	int num_stats;
10672 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10673 						     DP_MOD_ID_CDP);
10674 	QDF_STATUS status = QDF_STATUS_E_INVAL;
10675 
10676 	if (!vdev || !req) {
10677 		dp_cdp_err("%pK: Invalid vdev/req instance", soc);
10678 		status = QDF_STATUS_E_INVAL;
10679 		goto fail0;
10680 	}
10681 
10682 	if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) {
10683 		dp_err("Invalid mac id request");
10684 		status = QDF_STATUS_E_INVAL;
10685 		goto fail0;
10686 	}
10687 
10688 	stats = req->stats;
10689 	if (stats >= CDP_TXRX_MAX_STATS) {
10690 		status = QDF_STATUS_E_INVAL;
10691 		goto fail0;
10692 	}
10693 
10694 	/*
10695 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
10696 	 *			has to be updated if new FW HTT stats added
10697 	 */
10698 	if (stats > CDP_TXRX_STATS_HTT_MAX)
10699 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
10700 
10701 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
10702 
10703 	if (stats >= num_stats) {
10704 		dp_cdp_err("%pK : Invalid stats option: %d", soc, stats);
10705 		status = QDF_STATUS_E_INVAL;
10706 		goto fail0;
10707 	}
10708 
10709 	req->stats = stats;
10710 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
10711 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
10712 
10713 	dp_info("stats: %u fw_stats_type: %d host_stats: %d",
10714 		stats, fw_stats, host_stats);
10715 
10716 	if (fw_stats != TXRX_FW_STATS_INVALID) {
10717 		/* update request with FW stats type */
10718 		req->stats = fw_stats;
10719 		status = dp_fw_stats_process(vdev, req);
10720 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
10721 			(host_stats <= TXRX_HOST_STATS_MAX))
10722 		status = dp_print_host_stats(vdev, req, soc);
10723 	else
10724 		dp_cdp_info("%pK: Wrong Input for TxRx Stats", soc);
10725 fail0:
10726 	if (vdev)
10727 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10728 	return status;
10729 }
10730 
10731 /*
10732  * dp_txrx_dump_stats() -  Dump statistics
10733  * @value - Statistics option
10734  */
10735 static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value,
10736 				     enum qdf_stats_verbosity_level level)
10737 {
10738 	struct dp_soc *soc =
10739 		(struct dp_soc *)psoc;
10740 	QDF_STATUS status = QDF_STATUS_SUCCESS;
10741 
10742 	if (!soc) {
10743 		dp_cdp_err("%pK: soc is NULL", soc);
10744 		return QDF_STATUS_E_INVAL;
10745 	}
10746 
10747 	switch (value) {
10748 	case CDP_TXRX_PATH_STATS:
10749 		dp_txrx_path_stats(soc);
10750 		dp_print_soc_interrupt_stats(soc);
10751 		hal_dump_reg_write_stats(soc->hal_soc);
10752 		break;
10753 
10754 	case CDP_RX_RING_STATS:
10755 		dp_print_per_ring_stats(soc);
10756 		break;
10757 
10758 	case CDP_TXRX_TSO_STATS:
10759 		dp_print_tso_stats(soc, level);
10760 		break;
10761 
10762 	case CDP_DUMP_TX_FLOW_POOL_INFO:
10763 		if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH)
10764 			cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
10765 		else
10766 			dp_tx_dump_flow_pool_info_compact(soc);
10767 		break;
10768 
10769 	case CDP_DP_NAPI_STATS:
10770 		dp_print_napi_stats(soc);
10771 		break;
10772 
10773 	case CDP_TXRX_DESC_STATS:
10774 		/* TODO: NOT IMPLEMENTED */
10775 		break;
10776 
10777 	case CDP_DP_RX_FISA_STATS:
10778 		dp_rx_dump_fisa_stats(soc);
10779 		break;
10780 
10781 	case CDP_DP_SWLM_STATS:
10782 		dp_print_swlm_stats(soc);
10783 		break;
10784 
10785 	default:
10786 		status = QDF_STATUS_E_INVAL;
10787 		break;
10788 	}
10789 
10790 	return status;
10791 
10792 }
10793 
10794 #ifdef WLAN_SYSFS_DP_STATS
10795 static
10796 void dp_sysfs_get_stat_type(struct dp_soc *soc, uint32_t *mac_id,
10797 			    uint32_t *stat_type)
10798 {
10799 	qdf_spinlock_acquire(&soc->sysfs_config->rw_stats_lock);
10800 	*stat_type = soc->sysfs_config->stat_type_requested;
10801 	*mac_id   = soc->sysfs_config->mac_id;
10802 
10803 	qdf_spinlock_release(&soc->sysfs_config->rw_stats_lock);
10804 }
10805 
10806 static
10807 void dp_sysfs_update_config_buf_params(struct dp_soc *soc,
10808 				       uint32_t curr_len,
10809 				       uint32_t max_buf_len,
10810 				       char *buf)
10811 {
10812 	qdf_spinlock_acquire(&soc->sysfs_config->sysfs_write_user_buffer);
10813 	/* set sysfs_config parameters */
10814 	soc->sysfs_config->buf = buf;
10815 	soc->sysfs_config->curr_buffer_length = curr_len;
10816 	soc->sysfs_config->max_buffer_length = max_buf_len;
10817 	qdf_spinlock_release(&soc->sysfs_config->sysfs_write_user_buffer);
10818 }
10819 
10820 static
10821 QDF_STATUS dp_sysfs_fill_stats(ol_txrx_soc_handle soc_hdl,
10822 			       char *buf, uint32_t buf_size)
10823 {
10824 	uint32_t mac_id = 0;
10825 	uint32_t stat_type = 0;
10826 	uint32_t fw_stats = 0;
10827 	uint32_t host_stats = 0;
10828 	enum cdp_stats stats;
10829 	struct cdp_txrx_stats_req req;
10830 	struct dp_soc *soc = NULL;
10831 
10832 	if (!soc_hdl) {
10833 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
10834 		return QDF_STATUS_E_INVAL;
10835 	}
10836 
10837 	soc = cdp_soc_t_to_dp_soc(soc_hdl);
10838 
10839 	if (!soc) {
10840 		dp_cdp_err("%pK: soc is NULL", soc);
10841 		return QDF_STATUS_E_INVAL;
10842 	}
10843 
10844 	dp_sysfs_get_stat_type(soc, &mac_id, &stat_type);
10845 
10846 	stats = stat_type;
10847 	if (stats >= CDP_TXRX_MAX_STATS) {
10848 		dp_cdp_info("sysfs stat type requested is invalid");
10849 		return QDF_STATUS_E_INVAL;
10850 	}
10851 	/*
10852 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
10853 	 *			has to be updated if new FW HTT stats added
10854 	 */
10855 	if (stats > CDP_TXRX_MAX_STATS)
10856 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
10857 
10858 	/* build request */
10859 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
10860 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
10861 
10862 	req.stats = stat_type;
10863 	req.mac_id = mac_id;
10864 	/* request stats to be printed */
10865 	qdf_mutex_acquire(&soc->sysfs_config->sysfs_read_lock);
10866 
10867 	if (fw_stats != TXRX_FW_STATS_INVALID) {
10868 		/* update request with FW stats type */
10869 		req.cookie_val = DBG_SYSFS_STATS_COOKIE;
10870 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
10871 			(host_stats <= TXRX_HOST_STATS_MAX)) {
10872 		req.cookie_val = DBG_STATS_COOKIE_DEFAULT;
10873 		soc->sysfs_config->process_id = qdf_get_current_pid();
10874 		soc->sysfs_config->printing_mode = PRINTING_MODE_ENABLED;
10875 	}
10876 
10877 	dp_sysfs_update_config_buf_params(soc, 0, buf_size, buf);
10878 
10879 	dp_txrx_stats_request(soc_hdl, mac_id, &req);
10880 	soc->sysfs_config->process_id = 0;
10881 	soc->sysfs_config->printing_mode = PRINTING_MODE_DISABLED;
10882 
10883 	dp_sysfs_update_config_buf_params(soc, 0, 0, NULL);
10884 
10885 	qdf_mutex_release(&soc->sysfs_config->sysfs_read_lock);
10886 	return QDF_STATUS_SUCCESS;
10887 }
10888 
10889 static
10890 QDF_STATUS dp_sysfs_set_stat_type(ol_txrx_soc_handle soc_hdl,
10891 				  uint32_t stat_type, uint32_t mac_id)
10892 {
10893 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10894 
10895 	if (!soc_hdl) {
10896 		dp_cdp_err("%pK: soc is NULL", soc);
10897 		return QDF_STATUS_E_INVAL;
10898 	}
10899 
10900 	qdf_spinlock_acquire(&soc->sysfs_config->rw_stats_lock);
10901 
10902 	soc->sysfs_config->stat_type_requested = stat_type;
10903 	soc->sysfs_config->mac_id = mac_id;
10904 
10905 	qdf_spinlock_release(&soc->sysfs_config->rw_stats_lock);
10906 
10907 	return QDF_STATUS_SUCCESS;
10908 }
10909 
10910 static
10911 QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl)
10912 {
10913 	struct dp_soc *soc;
10914 	QDF_STATUS status;
10915 
10916 	if (!soc_hdl) {
10917 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
10918 		return QDF_STATUS_E_INVAL;
10919 	}
10920 
10921 	soc = soc_hdl;
10922 
10923 	soc->sysfs_config = qdf_mem_malloc(sizeof(struct sysfs_stats_config));
10924 	if (!soc->sysfs_config) {
10925 		dp_cdp_err("failed to allocate memory for sysfs_config no memory");
10926 		return QDF_STATUS_E_NOMEM;
10927 	}
10928 
10929 	status = qdf_event_create(&soc->sysfs_config->sysfs_txrx_fw_request_done);
10930 	/* create event for fw stats request from sysfs */
10931 	if (status != QDF_STATUS_SUCCESS) {
10932 		dp_cdp_err("failed to create event sysfs_txrx_fw_request_done");
10933 		qdf_mem_free(soc->sysfs_config);
10934 		soc->sysfs_config = NULL;
10935 		return QDF_STATUS_E_FAILURE;
10936 	}
10937 
10938 	qdf_spinlock_create(&soc->sysfs_config->rw_stats_lock);
10939 	qdf_mutex_create(&soc->sysfs_config->sysfs_read_lock);
10940 	qdf_spinlock_create(&soc->sysfs_config->sysfs_write_user_buffer);
10941 
10942 	return QDF_STATUS_SUCCESS;
10943 }
10944 
10945 static
10946 QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl)
10947 {
10948 	struct dp_soc *soc;
10949 	QDF_STATUS status;
10950 
10951 	if (!soc_hdl) {
10952 		dp_cdp_err("%pK: soc_hdl is NULL", soc_hdl);
10953 		return QDF_STATUS_E_INVAL;
10954 	}
10955 
10956 	soc = soc_hdl;
10957 	if (!soc->sysfs_config) {
10958 		dp_cdp_err("soc->sysfs_config is NULL");
10959 		return QDF_STATUS_E_FAILURE;
10960 	}
10961 
10962 	status = qdf_event_destroy(&soc->sysfs_config->sysfs_txrx_fw_request_done);
10963 	if (status != QDF_STATUS_SUCCESS)
10964 		dp_cdp_err("Failed to detroy event sysfs_txrx_fw_request_done ");
10965 
10966 	qdf_mutex_destroy(&soc->sysfs_config->sysfs_read_lock);
10967 	qdf_spinlock_destroy(&soc->sysfs_config->rw_stats_lock);
10968 	qdf_spinlock_destroy(&soc->sysfs_config->sysfs_write_user_buffer);
10969 
10970 	qdf_mem_free(soc->sysfs_config);
10971 
10972 	return QDF_STATUS_SUCCESS;
10973 }
10974 
10975 #else /* WLAN_SYSFS_DP_STATS */
10976 
10977 static
10978 QDF_STATUS dp_sysfs_deinitialize_stats(struct dp_soc *soc_hdl)
10979 {
10980 	return QDF_STATUS_SUCCESS;
10981 }
10982 
10983 static
10984 QDF_STATUS dp_sysfs_initialize_stats(struct dp_soc *soc_hdl)
10985 {
10986 	return QDF_STATUS_SUCCESS;
10987 }
10988 #endif /* WLAN_SYSFS_DP_STATS */
10989 
10990 /**
10991  * dp_txrx_clear_dump_stats() - clear dumpStats
10992  * @soc- soc handle
10993  * @value - stats option
10994  *
10995  * Return: 0 - Success, non-zero - failure
10996  */
10997 static
10998 QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
10999 				    uint8_t value)
11000 {
11001 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11002 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11003 
11004 	if (!soc) {
11005 		dp_err("soc is NULL");
11006 		return QDF_STATUS_E_INVAL;
11007 	}
11008 
11009 	switch (value) {
11010 	case CDP_TXRX_TSO_STATS:
11011 		dp_txrx_clear_tso_stats(soc);
11012 		break;
11013 
11014 	default:
11015 		status = QDF_STATUS_E_INVAL;
11016 		break;
11017 	}
11018 
11019 	return status;
11020 }
11021 
11022 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
11023 /**
11024  * dp_update_flow_control_parameters() - API to store datapath
11025  *                            config parameters
11026  * @soc: soc handle
11027  * @cfg: ini parameter handle
11028  *
11029  * Return: void
11030  */
11031 static inline
11032 void dp_update_flow_control_parameters(struct dp_soc *soc,
11033 				struct cdp_config_params *params)
11034 {
11035 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
11036 					params->tx_flow_stop_queue_threshold;
11037 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
11038 					params->tx_flow_start_queue_offset;
11039 }
11040 #else
11041 static inline
11042 void dp_update_flow_control_parameters(struct dp_soc *soc,
11043 				struct cdp_config_params *params)
11044 {
11045 }
11046 #endif
11047 
11048 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
11049 /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */
11050 #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024
11051 
11052 /* Max packet limit for RX REAP Loop (dp_rx_process) */
11053 #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024
11054 
11055 static
11056 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
11057 					struct cdp_config_params *params)
11058 {
11059 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit =
11060 				params->tx_comp_loop_pkt_limit;
11061 
11062 	if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX)
11063 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true;
11064 	else
11065 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false;
11066 
11067 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit =
11068 				params->rx_reap_loop_pkt_limit;
11069 
11070 	if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX)
11071 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = true;
11072 	else
11073 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = false;
11074 
11075 	soc->wlan_cfg_ctx->rx_hp_oos_update_limit =
11076 				params->rx_hp_oos_update_limit;
11077 
11078 	dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u",
11079 		soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit,
11080 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check,
11081 		soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit,
11082 		soc->wlan_cfg_ctx->rx_enable_eol_data_check,
11083 		soc->wlan_cfg_ctx->rx_hp_oos_update_limit);
11084 }
11085 
11086 static void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
11087 				      uint32_t rx_limit)
11088 {
11089 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit = tx_limit;
11090 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit = rx_limit;
11091 }
11092 
11093 #else
11094 static inline
11095 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
11096 					struct cdp_config_params *params)
11097 { }
11098 
11099 static inline
11100 void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
11101 			       uint32_t rx_limit)
11102 {
11103 }
11104 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
11105 
11106 /**
11107  * dp_update_config_parameters() - API to store datapath
11108  *                            config parameters
11109  * @soc: soc handle
11110  * @cfg: ini parameter handle
11111  *
11112  * Return: status
11113  */
11114 static
11115 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
11116 				struct cdp_config_params *params)
11117 {
11118 	struct dp_soc *soc = (struct dp_soc *)psoc;
11119 
11120 	if (!(soc)) {
11121 		dp_cdp_err("%pK: Invalid handle", soc);
11122 		return QDF_STATUS_E_INVAL;
11123 	}
11124 
11125 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
11126 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
11127 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
11128 	soc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload =
11129 				params->p2p_tcp_udp_checksumoffload;
11130 	soc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload =
11131 				params->nan_tcp_udp_checksumoffload;
11132 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
11133 				params->tcp_udp_checksumoffload;
11134 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
11135 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
11136 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
11137 
11138 	dp_update_rx_soft_irq_limit_params(soc, params);
11139 	dp_update_flow_control_parameters(soc, params);
11140 
11141 	return QDF_STATUS_SUCCESS;
11142 }
11143 
11144 static struct cdp_wds_ops dp_ops_wds = {
11145 	.vdev_set_wds = dp_vdev_set_wds,
11146 #ifdef WDS_VENDOR_EXTENSION
11147 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
11148 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
11149 #endif
11150 };
11151 
11152 /*
11153  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
11154  * @soc_hdl - datapath soc handle
11155  * @vdev_id - virtual interface id
11156  * @callback - callback function
11157  * @ctxt: callback context
11158  *
11159  */
11160 static void
11161 dp_txrx_data_tx_cb_set(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
11162 		       ol_txrx_data_tx_cb callback, void *ctxt)
11163 {
11164 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11165 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11166 						     DP_MOD_ID_CDP);
11167 
11168 	if (!vdev)
11169 		return;
11170 
11171 	vdev->tx_non_std_data_callback.func = callback;
11172 	vdev->tx_non_std_data_callback.ctxt = ctxt;
11173 
11174 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11175 }
11176 
11177 /**
11178  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
11179  * @soc: datapath soc handle
11180  * @pdev_id: id of datapath pdev handle
11181  *
11182  * Return: opaque pointer to dp txrx handle
11183  */
11184 static void *dp_pdev_get_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id)
11185 {
11186 	struct dp_pdev *pdev =
11187 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11188 						   pdev_id);
11189 	if (qdf_unlikely(!pdev))
11190 		return NULL;
11191 
11192 	return pdev->dp_txrx_handle;
11193 }
11194 
11195 /**
11196  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
11197  * @soc: datapath soc handle
11198  * @pdev_id: id of datapath pdev handle
11199  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
11200  *
11201  * Return: void
11202  */
11203 static void
11204 dp_pdev_set_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id,
11205 			   void *dp_txrx_hdl)
11206 {
11207 	struct dp_pdev *pdev =
11208 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11209 						   pdev_id);
11210 
11211 	if (!pdev)
11212 		return;
11213 
11214 	pdev->dp_txrx_handle = dp_txrx_hdl;
11215 }
11216 
11217 /**
11218  * dp_vdev_get_dp_ext_handle() - get dp handle from vdev
11219  * @soc: datapath soc handle
11220  * @vdev_id: vdev id
11221  *
11222  * Return: opaque pointer to dp txrx handle
11223  */
11224 static void *dp_vdev_get_dp_ext_handle(ol_txrx_soc_handle soc_hdl,
11225 				       uint8_t vdev_id)
11226 {
11227 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11228 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11229 						     DP_MOD_ID_CDP);
11230 	void *dp_ext_handle;
11231 
11232 	if (!vdev)
11233 		return NULL;
11234 	dp_ext_handle = vdev->vdev_dp_ext_handle;
11235 
11236 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11237 	return dp_ext_handle;
11238 }
11239 
11240 /**
11241  * dp_vdev_set_dp_ext_handle() - set dp handle in vdev
11242  * @soc: datapath soc handle
11243  * @vdev_id: vdev id
11244  * @size: size of advance dp handle
11245  *
11246  * Return: QDF_STATUS
11247  */
11248 static QDF_STATUS
11249 dp_vdev_set_dp_ext_handle(ol_txrx_soc_handle soc_hdl, uint8_t vdev_id,
11250 			  uint16_t size)
11251 {
11252 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11253 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11254 						     DP_MOD_ID_CDP);
11255 	void *dp_ext_handle;
11256 
11257 	if (!vdev)
11258 		return QDF_STATUS_E_FAILURE;
11259 
11260 	dp_ext_handle = qdf_mem_malloc(size);
11261 
11262 	if (!dp_ext_handle) {
11263 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11264 		return QDF_STATUS_E_FAILURE;
11265 	}
11266 
11267 	vdev->vdev_dp_ext_handle = dp_ext_handle;
11268 
11269 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11270 	return QDF_STATUS_SUCCESS;
11271 }
11272 
11273 /**
11274  * dp_vdev_inform_ll_conn() - Inform vdev to add/delete a latency critical
11275  *			      connection for this vdev
11276  * @soc_hdl: CDP soc handle
11277  * @vdev_id: vdev ID
11278  * @action: Add/Delete action
11279  *
11280  * Returns: QDF_STATUS.
11281  */
11282 static QDF_STATUS
11283 dp_vdev_inform_ll_conn(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
11284 		       enum vdev_ll_conn_actions action)
11285 {
11286 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11287 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11288 						     DP_MOD_ID_CDP);
11289 
11290 	if (!vdev) {
11291 		dp_err("LL connection action for invalid vdev %d", vdev_id);
11292 		return QDF_STATUS_E_FAILURE;
11293 	}
11294 
11295 	switch (action) {
11296 	case CDP_VDEV_LL_CONN_ADD:
11297 		vdev->num_latency_critical_conn++;
11298 		break;
11299 
11300 	case CDP_VDEV_LL_CONN_DEL:
11301 		vdev->num_latency_critical_conn--;
11302 		break;
11303 
11304 	default:
11305 		dp_err("LL connection action invalid %d", action);
11306 		break;
11307 	}
11308 
11309 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11310 	return QDF_STATUS_SUCCESS;
11311 }
11312 
11313 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
11314 /**
11315  * dp_soc_set_swlm_enable() - Enable/Disable SWLM if initialized.
11316  * @soc_hdl: CDP Soc handle
11317  * @value: Enable/Disable value
11318  *
11319  * Returns: QDF_STATUS
11320  */
11321 static QDF_STATUS dp_soc_set_swlm_enable(struct cdp_soc_t *soc_hdl,
11322 					 uint8_t value)
11323 {
11324 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11325 
11326 	if (!soc->swlm.is_init) {
11327 		dp_err("SWLM is not initialized");
11328 		return QDF_STATUS_E_FAILURE;
11329 	}
11330 
11331 	soc->swlm.is_enabled = !!value;
11332 
11333 	return QDF_STATUS_SUCCESS;
11334 }
11335 
11336 /**
11337  * dp_soc_is_swlm_enabled() - Check if SWLM is enabled.
11338  * @soc_hdl: CDP Soc handle
11339  *
11340  * Returns: QDF_STATUS
11341  */
11342 static uint8_t dp_soc_is_swlm_enabled(struct cdp_soc_t *soc_hdl)
11343 {
11344 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11345 
11346 	return soc->swlm.is_enabled;
11347 }
11348 #endif
11349 
11350 /**
11351  * dp_display_srng_info() - Dump the srng HP TP info
11352  * @soc_hdl: CDP Soc handle
11353  *
11354  * This function dumps the SW hp/tp values for the important rings.
11355  * HW hp/tp values are not being dumped, since it can lead to
11356  * READ NOC error when UMAC is in low power state. MCC does not have
11357  * device force wake working yet.
11358  *
11359  * Return: none
11360  */
11361 static void dp_display_srng_info(struct cdp_soc_t *soc_hdl)
11362 {
11363 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11364 	hal_soc_handle_t hal_soc = soc->hal_soc;
11365 	uint32_t hp, tp, i;
11366 
11367 	dp_info("SRNG HP-TP data:");
11368 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
11369 		hal_get_sw_hptp(hal_soc, soc->tcl_data_ring[i].hal_srng,
11370 				&tp, &hp);
11371 		dp_info("TCL DATA ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
11372 
11373 		hal_get_sw_hptp(hal_soc, soc->tx_comp_ring[i].hal_srng,
11374 				&tp, &hp);
11375 		dp_info("TX comp ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
11376 	}
11377 
11378 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
11379 		hal_get_sw_hptp(hal_soc, soc->reo_dest_ring[i].hal_srng,
11380 				&tp, &hp);
11381 		dp_info("REO DST ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
11382 	}
11383 
11384 	hal_get_sw_hptp(hal_soc, soc->reo_exception_ring.hal_srng, &tp, &hp);
11385 	dp_info("REO exception ring: hp=0x%x, tp=0x%x", hp, tp);
11386 
11387 	hal_get_sw_hptp(hal_soc, soc->rx_rel_ring.hal_srng, &tp, &hp);
11388 	dp_info("WBM RX release ring: hp=0x%x, tp=0x%x", hp, tp);
11389 
11390 	hal_get_sw_hptp(hal_soc, soc->wbm_desc_rel_ring.hal_srng, &tp, &hp);
11391 	dp_info("WBM desc release ring: hp=0x%x, tp=0x%x", hp, tp);
11392 }
11393 
11394 /**
11395  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
11396  * @soc_handle: datapath soc handle
11397  *
11398  * Return: opaque pointer to external dp (non-core DP)
11399  */
11400 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
11401 {
11402 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11403 
11404 	return soc->external_txrx_handle;
11405 }
11406 
11407 /**
11408  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
11409  * @soc_handle: datapath soc handle
11410  * @txrx_handle: opaque pointer to external dp (non-core DP)
11411  *
11412  * Return: void
11413  */
11414 static void
11415 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
11416 {
11417 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11418 
11419 	soc->external_txrx_handle = txrx_handle;
11420 }
11421 
11422 /**
11423  * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
11424  * @soc_hdl: datapath soc handle
11425  * @pdev_id: id of the datapath pdev handle
11426  * @lmac_id: lmac id
11427  *
11428  * Return: QDF_STATUS
11429  */
11430 static QDF_STATUS
11431 dp_soc_map_pdev_to_lmac
11432 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
11433 	 uint32_t lmac_id)
11434 {
11435 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11436 
11437 	wlan_cfg_set_hw_mac_idx(soc->wlan_cfg_ctx,
11438 				pdev_id,
11439 				lmac_id);
11440 
11441 	/*Set host PDEV ID for lmac_id*/
11442 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
11443 			      pdev_id,
11444 			      lmac_id);
11445 
11446 	return QDF_STATUS_SUCCESS;
11447 }
11448 
11449 /**
11450  * dp_soc_handle_pdev_mode_change() - Update pdev to lmac mapping
11451  * @soc_hdl: datapath soc handle
11452  * @pdev_id: id of the datapath pdev handle
11453  * @lmac_id: lmac id
11454  *
11455  * In the event of a dynamic mode change, update the pdev to lmac mapping
11456  *
11457  * Return: QDF_STATUS
11458  */
11459 static QDF_STATUS
11460 dp_soc_handle_pdev_mode_change
11461 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
11462 	 uint32_t lmac_id)
11463 {
11464 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11465 	struct dp_vdev *vdev = NULL;
11466 	uint8_t hw_pdev_id, mac_id;
11467 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc,
11468 								  pdev_id);
11469 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
11470 
11471 	if (qdf_unlikely(!pdev))
11472 		return QDF_STATUS_E_FAILURE;
11473 
11474 	pdev->lmac_id = lmac_id;
11475 	pdev->target_pdev_id =
11476 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
11477 	dp_info(" mode change %d %d\n", pdev->pdev_id, pdev->lmac_id);
11478 
11479 	/*Set host PDEV ID for lmac_id*/
11480 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
11481 			      pdev->pdev_id,
11482 			      lmac_id);
11483 
11484 	hw_pdev_id =
11485 		dp_get_target_pdev_id_for_host_pdev_id(soc,
11486 						       pdev->pdev_id);
11487 
11488 	/*
11489 	 * When NSS offload is enabled, send pdev_id->lmac_id
11490 	 * and pdev_id to hw_pdev_id to NSS FW
11491 	 */
11492 	if (nss_config) {
11493 		mac_id = pdev->lmac_id;
11494 		if (soc->cdp_soc.ol_ops->pdev_update_lmac_n_target_pdev_id)
11495 			soc->cdp_soc.ol_ops->
11496 				pdev_update_lmac_n_target_pdev_id(
11497 				soc->ctrl_psoc,
11498 				&pdev_id, &mac_id, &hw_pdev_id);
11499 	}
11500 
11501 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
11502 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
11503 		DP_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
11504 					       hw_pdev_id);
11505 		vdev->lmac_id = pdev->lmac_id;
11506 	}
11507 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
11508 
11509 	return QDF_STATUS_SUCCESS;
11510 }
11511 
11512 /**
11513  * dp_soc_set_pdev_status_down() - set pdev down/up status
11514  * @soc: datapath soc handle
11515  * @pdev_id: id of datapath pdev handle
11516  * @is_pdev_down: pdev down/up status
11517  *
11518  * Return: QDF_STATUS
11519  */
11520 static QDF_STATUS
11521 dp_soc_set_pdev_status_down(struct cdp_soc_t *soc, uint8_t pdev_id,
11522 			    bool is_pdev_down)
11523 {
11524 	struct dp_pdev *pdev =
11525 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11526 						   pdev_id);
11527 	if (!pdev)
11528 		return QDF_STATUS_E_FAILURE;
11529 
11530 	pdev->is_pdev_down = is_pdev_down;
11531 	return QDF_STATUS_SUCCESS;
11532 }
11533 
11534 /**
11535  * dp_get_cfg_capabilities() - get dp capabilities
11536  * @soc_handle: datapath soc handle
11537  * @dp_caps: enum for dp capabilities
11538  *
11539  * Return: bool to determine if dp caps is enabled
11540  */
11541 static bool
11542 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
11543 			enum cdp_capabilities dp_caps)
11544 {
11545 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11546 
11547 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
11548 }
11549 
11550 #ifdef FEATURE_AST
11551 static QDF_STATUS
11552 dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
11553 		       uint8_t *peer_mac)
11554 {
11555 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11556 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11557 	struct dp_peer *peer =
11558 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
11559 					       DP_MOD_ID_CDP);
11560 
11561 	/* Peer can be null for monitor vap mac address */
11562 	if (!peer) {
11563 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
11564 			  "%s: Invalid peer\n", __func__);
11565 		return QDF_STATUS_E_FAILURE;
11566 	}
11567 
11568 	dp_peer_update_state(soc, peer, DP_PEER_STATE_LOGICAL_DELETE);
11569 
11570 	qdf_spin_lock_bh(&soc->ast_lock);
11571 	dp_peer_delete_ast_entries(soc, peer);
11572 	qdf_spin_unlock_bh(&soc->ast_lock);
11573 
11574 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11575 	return status;
11576 }
11577 #endif
11578 
11579 #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS
11580 /**
11581  * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
11582  * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
11583  * @soc: cdp_soc handle
11584  * @pdev_id: id of cdp_pdev handle
11585  * @protocol_type: protocol type for which stats should be displayed
11586  *
11587  * Return: none
11588  */
11589 static inline void
11590 dp_dump_pdev_rx_protocol_tag_stats(struct cdp_soc_t  *soc, uint8_t pdev_id,
11591 				   uint16_t protocol_type)
11592 {
11593 }
11594 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
11595 
11596 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
11597 /**
11598  * dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be
11599  * applied to the desired protocol type packets
11600  * @soc: soc handle
11601  * @pdev_id: id of cdp_pdev handle
11602  * @enable_rx_protocol_tag - bitmask that indicates what protocol types
11603  * are enabled for tagging. zero indicates disable feature, non-zero indicates
11604  * enable feature
11605  * @protocol_type: new protocol type for which the tag is being added
11606  * @tag: user configured tag for the new protocol
11607  *
11608  * Return: Success
11609  */
11610 static inline QDF_STATUS
11611 dp_update_pdev_rx_protocol_tag(struct cdp_soc_t  *soc, uint8_t pdev_id,
11612 			       uint32_t enable_rx_protocol_tag,
11613 			       uint16_t protocol_type,
11614 			       uint16_t tag)
11615 {
11616 	return QDF_STATUS_SUCCESS;
11617 }
11618 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
11619 
11620 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
11621 /**
11622  * dp_set_rx_flow_tag - add/delete a flow
11623  * @soc: soc handle
11624  * @pdev_id: id of cdp_pdev handle
11625  * @flow_info: flow tuple that is to be added to/deleted from flow search table
11626  *
11627  * Return: Success
11628  */
11629 static inline QDF_STATUS
11630 dp_set_rx_flow_tag(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
11631 		   struct cdp_rx_flow_info *flow_info)
11632 {
11633 	return QDF_STATUS_SUCCESS;
11634 }
11635 /**
11636  * dp_dump_rx_flow_tag_stats - dump the number of packets tagged for
11637  * given flow 5-tuple
11638  * @cdp_soc: soc handle
11639  * @pdev_id: id of cdp_pdev handle
11640  * @flow_info: flow 5-tuple for which stats should be displayed
11641  *
11642  * Return: Success
11643  */
11644 static inline QDF_STATUS
11645 dp_dump_rx_flow_tag_stats(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
11646 			  struct cdp_rx_flow_info *flow_info)
11647 {
11648 	return QDF_STATUS_SUCCESS;
11649 }
11650 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
11651 
11652 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
11653 					   uint32_t max_peers,
11654 					   uint32_t max_ast_index,
11655 					   uint8_t peer_map_unmap_versions)
11656 {
11657 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11658 	QDF_STATUS status;
11659 
11660 	soc->max_peers = max_peers;
11661 
11662 	wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
11663 
11664 	status = soc->arch_ops.txrx_peer_map_attach(soc);
11665 	if (!QDF_IS_STATUS_SUCCESS(status)) {
11666 		dp_err("failure in allocating peer tables");
11667 		return QDF_STATUS_E_FAILURE;
11668 	}
11669 
11670 	dp_info("max_peers %u, calculated max_peers %u max_ast_index: %u\n",
11671 		max_peers, soc->max_peer_id, max_ast_index);
11672 
11673 	status = dp_peer_find_attach(soc);
11674 	if (!QDF_IS_STATUS_SUCCESS(status)) {
11675 		dp_err("Peer find attach failure");
11676 		goto fail;
11677 	}
11678 
11679 	soc->peer_map_unmap_versions = peer_map_unmap_versions;
11680 	soc->peer_map_attach_success = TRUE;
11681 
11682 	return QDF_STATUS_SUCCESS;
11683 fail:
11684 	soc->arch_ops.txrx_peer_map_detach(soc);
11685 
11686 	return status;
11687 }
11688 
11689 static QDF_STATUS dp_soc_set_param(struct cdp_soc_t  *soc_hdl,
11690 				   enum cdp_soc_param_t param,
11691 				   uint32_t value)
11692 {
11693 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11694 
11695 	switch (param) {
11696 	case DP_SOC_PARAM_MSDU_EXCEPTION_DESC:
11697 		soc->num_msdu_exception_desc = value;
11698 		dp_info("num_msdu exception_desc %u",
11699 			value);
11700 		break;
11701 	case DP_SOC_PARAM_CMEM_FSE_SUPPORT:
11702 		if (wlan_cfg_is_fst_in_cmem_enabled(soc->wlan_cfg_ctx))
11703 			soc->fst_in_cmem = !!value;
11704 		dp_info("FW supports CMEM FSE %u", value);
11705 		break;
11706 	case DP_SOC_PARAM_MAX_AST_AGEOUT:
11707 		soc->max_ast_ageout_count = value;
11708 		dp_info("Max ast ageout count %u", soc->max_ast_ageout_count);
11709 		break;
11710 	case DP_SOC_PARAM_EAPOL_OVER_CONTROL_PORT:
11711 		soc->eapol_over_control_port = value;
11712 		dp_info("Eapol over control_port:%d",
11713 			soc->eapol_over_control_port);
11714 		break;
11715 	case DP_SOC_PARAM_MULTI_PEER_GRP_CMD_SUPPORT:
11716 		soc->multi_peer_grp_cmd_supported = value;
11717 		dp_info("Multi Peer group command support:%d",
11718 			soc->multi_peer_grp_cmd_supported);
11719 		break;
11720 	default:
11721 		dp_info("not handled param %d ", param);
11722 		break;
11723 	}
11724 
11725 	return QDF_STATUS_SUCCESS;
11726 }
11727 
11728 static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
11729 				      void *stats_ctx)
11730 {
11731 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11732 
11733 	soc->rate_stats_ctx = (struct cdp_soc_rate_stats_ctx *)stats_ctx;
11734 }
11735 
11736 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
11737 /**
11738  * dp_peer_flush_rate_stats_req(): Flush peer rate stats
11739  * @soc: Datapath SOC handle
11740  * @peer: Datapath peer
11741  * @arg: argument to iter function
11742  *
11743  * Return: QDF_STATUS
11744  */
11745 static void
11746 dp_peer_flush_rate_stats_req(struct dp_soc *soc, struct dp_peer *peer,
11747 			     void *arg)
11748 {
11749 	if (peer->bss_peer)
11750 		return;
11751 
11752 	dp_wdi_event_handler(
11753 		WDI_EVENT_FLUSH_RATE_STATS_REQ,
11754 		soc, dp_monitor_peer_get_rdkstats_ctx(soc, peer),
11755 		peer->peer_id,
11756 		WDI_NO_VAL, peer->vdev->pdev->pdev_id);
11757 }
11758 
11759 /**
11760  * dp_flush_rate_stats_req(): Flush peer rate stats in pdev
11761  * @soc_hdl: Datapath SOC handle
11762  * @pdev_id: pdev_id
11763  *
11764  * Return: QDF_STATUS
11765  */
11766 static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
11767 					  uint8_t pdev_id)
11768 {
11769 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11770 	struct dp_pdev *pdev =
11771 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11772 						   pdev_id);
11773 	if (!pdev)
11774 		return QDF_STATUS_E_FAILURE;
11775 
11776 	dp_pdev_iterate_peer(pdev, dp_peer_flush_rate_stats_req, NULL,
11777 			     DP_MOD_ID_CDP);
11778 
11779 	return QDF_STATUS_SUCCESS;
11780 }
11781 #else
11782 static inline QDF_STATUS
11783 dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
11784 			uint8_t pdev_id)
11785 {
11786 	return QDF_STATUS_SUCCESS;
11787 }
11788 #endif
11789 
11790 static void *dp_peer_get_rdkstats_ctx(struct cdp_soc_t *soc_hdl,
11791 				      uint8_t vdev_id,
11792 				      uint8_t *mac_addr)
11793 {
11794 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11795 	struct dp_peer *peer;
11796 	void *rdkstats_ctx = NULL;
11797 
11798 	if (mac_addr) {
11799 		peer = dp_peer_find_hash_find(soc, mac_addr,
11800 					      0, vdev_id,
11801 					      DP_MOD_ID_CDP);
11802 		if (!peer)
11803 			return NULL;
11804 
11805 		if (!IS_MLO_DP_MLD_PEER(peer))
11806 			rdkstats_ctx = dp_monitor_peer_get_rdkstats_ctx(soc,
11807 									peer);
11808 
11809 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11810 	}
11811 
11812 	return rdkstats_ctx;
11813 }
11814 
11815 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
11816 static QDF_STATUS dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
11817 					   uint8_t pdev_id,
11818 					   void *buf)
11819 {
11820 	 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS,
11821 			      (struct dp_soc *)soc, buf, HTT_INVALID_PEER,
11822 			      WDI_NO_VAL, pdev_id);
11823 	return QDF_STATUS_SUCCESS;
11824 }
11825 #else
11826 static inline QDF_STATUS
11827 dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
11828 			 uint8_t pdev_id,
11829 			 void *buf)
11830 {
11831 	return QDF_STATUS_SUCCESS;
11832 }
11833 #endif
11834 
11835 static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle)
11836 {
11837 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11838 
11839 	return soc->rate_stats_ctx;
11840 }
11841 
11842 /*
11843  * dp_get_cfg() - get dp cfg
11844  * @soc: cdp soc handle
11845  * @cfg: cfg enum
11846  *
11847  * Return: cfg value
11848  */
11849 static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg)
11850 {
11851 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
11852 	uint32_t value = 0;
11853 
11854 	switch (cfg) {
11855 	case cfg_dp_enable_data_stall:
11856 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
11857 		break;
11858 	case cfg_dp_enable_p2p_ip_tcp_udp_checksum_offload:
11859 		value = dpsoc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload;
11860 		break;
11861 	case cfg_dp_enable_nan_ip_tcp_udp_checksum_offload:
11862 		value = dpsoc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload;
11863 		break;
11864 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
11865 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
11866 		break;
11867 	case cfg_dp_disable_legacy_mode_csum_offload:
11868 		value = dpsoc->wlan_cfg_ctx->
11869 					legacy_mode_checksumoffload_disable;
11870 		break;
11871 	case cfg_dp_tso_enable:
11872 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
11873 		break;
11874 	case cfg_dp_lro_enable:
11875 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
11876 		break;
11877 	case cfg_dp_gro_enable:
11878 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
11879 		break;
11880 	case cfg_dp_force_gro_enable:
11881 		value = dpsoc->wlan_cfg_ctx->force_gro_enabled;
11882 		break;
11883 	case cfg_dp_sg_enable:
11884 		value = dpsoc->wlan_cfg_ctx->sg_enabled;
11885 		break;
11886 	case cfg_dp_tx_flow_start_queue_offset:
11887 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
11888 		break;
11889 	case cfg_dp_tx_flow_stop_queue_threshold:
11890 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
11891 		break;
11892 	case cfg_dp_disable_intra_bss_fwd:
11893 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
11894 		break;
11895 	case cfg_dp_pktlog_buffer_size:
11896 		value = dpsoc->wlan_cfg_ctx->pktlog_buffer_size;
11897 		break;
11898 	case cfg_dp_wow_check_rx_pending:
11899 		value = dpsoc->wlan_cfg_ctx->wow_check_rx_pending_enable;
11900 		break;
11901 	default:
11902 		value =  0;
11903 	}
11904 
11905 	return value;
11906 }
11907 
11908 #ifdef PEER_FLOW_CONTROL
11909 /**
11910  * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params
11911  * @soc_handle: datapath soc handle
11912  * @pdev_id: id of datapath pdev handle
11913  * @param: ol ath params
11914  * @value: value of the flag
11915  * @buff: Buffer to be passed
11916  *
11917  * Implemented this function same as legacy function. In legacy code, single
11918  * function is used to display stats and update pdev params.
11919  *
11920  * Return: 0 for success. nonzero for failure.
11921  */
11922 static uint32_t dp_tx_flow_ctrl_configure_pdev(struct cdp_soc_t *soc_handle,
11923 					       uint8_t pdev_id,
11924 					       enum _dp_param_t param,
11925 					       uint32_t value, void *buff)
11926 {
11927 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11928 	struct dp_pdev *pdev =
11929 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11930 						   pdev_id);
11931 
11932 	if (qdf_unlikely(!pdev))
11933 		return 1;
11934 
11935 	soc = pdev->soc;
11936 	if (!soc)
11937 		return 1;
11938 
11939 	switch (param) {
11940 #ifdef QCA_ENH_V3_STATS_SUPPORT
11941 	case DP_PARAM_VIDEO_DELAY_STATS_FC:
11942 		if (value)
11943 			pdev->delay_stats_flag = true;
11944 		else
11945 			pdev->delay_stats_flag = false;
11946 		break;
11947 	case DP_PARAM_VIDEO_STATS_FC:
11948 		qdf_print("------- TID Stats ------\n");
11949 		dp_pdev_print_tid_stats(pdev);
11950 		qdf_print("------ Delay Stats ------\n");
11951 		dp_pdev_print_delay_stats(pdev);
11952 		qdf_print("------ Rx Error Stats ------\n");
11953 		dp_pdev_print_rx_error_stats(pdev);
11954 		break;
11955 #endif
11956 	case DP_PARAM_TOTAL_Q_SIZE:
11957 		{
11958 			uint32_t tx_min, tx_max;
11959 
11960 			tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx);
11961 			tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
11962 
11963 			if (!buff) {
11964 				if ((value >= tx_min) && (value <= tx_max)) {
11965 					pdev->num_tx_allowed = value;
11966 				} else {
11967 					dp_tx_info("%pK: Failed to update num_tx_allowed, Q_min = %d Q_max = %d",
11968 						   soc, tx_min, tx_max);
11969 					break;
11970 				}
11971 			} else {
11972 				*(int *)buff = pdev->num_tx_allowed;
11973 			}
11974 		}
11975 		break;
11976 	default:
11977 		dp_tx_info("%pK: not handled param %d ", soc, param);
11978 		break;
11979 	}
11980 
11981 	return 0;
11982 }
11983 #endif
11984 
11985 /**
11986  * dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev
11987  * @psoc: dp soc handle
11988  * @pdev_id: id of DP_PDEV handle
11989  * @pcp: pcp value
11990  * @tid: tid value passed by the user
11991  *
11992  * Return: QDF_STATUS_SUCCESS on success
11993  */
11994 static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
11995 						uint8_t pdev_id,
11996 						uint8_t pcp, uint8_t tid)
11997 {
11998 	struct dp_soc *soc = (struct dp_soc *)psoc;
11999 
12000 	soc->pcp_tid_map[pcp] = tid;
12001 
12002 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
12003 	return QDF_STATUS_SUCCESS;
12004 }
12005 
12006 /**
12007  * dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev
12008  * @soc: DP soc handle
12009  * @vdev_id: id of DP_VDEV handle
12010  * @pcp: pcp value
12011  * @tid: tid value passed by the user
12012  *
12013  * Return: QDF_STATUS_SUCCESS on success
12014  */
12015 static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc_hdl,
12016 						uint8_t vdev_id,
12017 						uint8_t pcp, uint8_t tid)
12018 {
12019 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12020 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12021 						     DP_MOD_ID_CDP);
12022 
12023 	if (!vdev)
12024 		return QDF_STATUS_E_FAILURE;
12025 
12026 	vdev->pcp_tid_map[pcp] = tid;
12027 
12028 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12029 	return QDF_STATUS_SUCCESS;
12030 }
12031 
12032 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
12033 static void dp_drain_txrx(struct cdp_soc_t *soc_handle)
12034 {
12035 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12036 	uint32_t cur_tx_limit, cur_rx_limit;
12037 	uint32_t budget = 0xffff;
12038 	uint32_t val;
12039 	int i;
12040 
12041 	cur_tx_limit = soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit;
12042 	cur_rx_limit = soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit;
12043 
12044 	/* Temporarily increase soft irq limits when going to drain
12045 	 * the UMAC/LMAC SRNGs and restore them after polling.
12046 	 * Though the budget is on higher side, the TX/RX reaping loops
12047 	 * will not execute longer as both TX and RX would be suspended
12048 	 * by the time this API is called.
12049 	 */
12050 	dp_update_soft_irq_limits(soc, budget, budget);
12051 
12052 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
12053 		dp_service_srngs(&soc->intr_ctx[i], budget);
12054 
12055 	dp_update_soft_irq_limits(soc, cur_tx_limit, cur_rx_limit);
12056 
12057 	/* Do a dummy read at offset 0; this will ensure all
12058 	 * pendings writes(HP/TP) are flushed before read returns.
12059 	 */
12060 	val = HAL_REG_READ((struct hal_soc *)soc->hal_soc, 0);
12061 	dp_debug("Register value at offset 0: %u\n", val);
12062 }
12063 #endif
12064 
12065 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
12066 static void
12067 dp_set_pkt_capture_mode(struct cdp_soc_t *soc_handle, bool val)
12068 {
12069 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
12070 
12071 	soc->wlan_cfg_ctx->pkt_capture_mode = val;
12072 }
12073 #endif
12074 
12075 static struct cdp_cmn_ops dp_ops_cmn = {
12076 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
12077 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
12078 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
12079 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
12080 	.txrx_pdev_post_attach = dp_pdev_post_attach_wifi3,
12081 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
12082 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
12083 	.txrx_peer_create = dp_peer_create_wifi3,
12084 	.txrx_peer_setup = dp_peer_setup_wifi3,
12085 #ifdef FEATURE_AST
12086 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
12087 #else
12088 	.txrx_peer_teardown = NULL,
12089 #endif
12090 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
12091 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
12092 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
12093 	.txrx_peer_get_ast_info_by_pdev =
12094 		dp_peer_get_ast_info_by_pdevid_wifi3,
12095 	.txrx_peer_ast_delete_by_soc =
12096 		dp_peer_ast_entry_del_by_soc,
12097 	.txrx_peer_ast_delete_by_pdev =
12098 		dp_peer_ast_entry_del_by_pdev,
12099 	.txrx_peer_delete = dp_peer_delete_wifi3,
12100 	.txrx_vdev_register = dp_vdev_register_wifi3,
12101 	.txrx_soc_detach = dp_soc_detach_wifi3,
12102 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
12103 	.txrx_soc_init = dp_soc_init_wifi3,
12104 #ifndef QCA_HOST_MODE_WIFI_DISABLED
12105 	.txrx_tso_soc_attach = dp_tso_soc_attach,
12106 	.txrx_tso_soc_detach = dp_tso_soc_detach,
12107 	.tx_send = dp_tx_send,
12108 	.tx_send_exc = dp_tx_send_exception,
12109 #endif
12110 	.txrx_pdev_init = dp_pdev_init_wifi3,
12111 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
12112 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
12113 	.txrx_ath_getstats = dp_get_device_stats,
12114 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
12115 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
12116 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
12117 	.delba_process = dp_delba_process_wifi3,
12118 	.set_addba_response = dp_set_addba_response,
12119 	.flush_cache_rx_queue = NULL,
12120 	/* TODO: get API's for dscp-tid need to be added*/
12121 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
12122 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
12123 	.txrx_get_total_per = dp_get_total_per,
12124 	.txrx_stats_request = dp_txrx_stats_request,
12125 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
12126 	.display_stats = dp_txrx_dump_stats,
12127 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
12128 	.txrx_intr_detach = dp_soc_interrupt_detach,
12129 	.set_pn_check = dp_set_pn_check_wifi3,
12130 	.set_key_sec_type = dp_set_key_sec_type_wifi3,
12131 	.update_config_parameters = dp_update_config_parameters,
12132 	/* TODO: Add other functions */
12133 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
12134 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
12135 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
12136 	.get_vdev_dp_ext_txrx_handle = dp_vdev_get_dp_ext_handle,
12137 	.set_vdev_dp_ext_txrx_handle = dp_vdev_set_dp_ext_handle,
12138 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
12139 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
12140 	.map_pdev_to_lmac = dp_soc_map_pdev_to_lmac,
12141 	.handle_mode_change = dp_soc_handle_pdev_mode_change,
12142 	.set_pdev_status_down = dp_soc_set_pdev_status_down,
12143 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
12144 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
12145 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
12146 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
12147 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
12148 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
12149 	.set_soc_param = dp_soc_set_param,
12150 	.txrx_get_os_rx_handles_from_vdev =
12151 					dp_get_os_rx_handles_from_vdev_wifi3,
12152 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
12153 	.get_dp_capabilities = dp_get_cfg_capabilities,
12154 	.txrx_get_cfg = dp_get_cfg,
12155 	.set_rate_stats_ctx = dp_soc_set_rate_stats_ctx,
12156 	.get_rate_stats_ctx = dp_soc_get_rate_stats_ctx,
12157 	.txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats,
12158 	.txrx_flush_rate_stats_request = dp_flush_rate_stats_req,
12159 	.txrx_peer_get_rdkstats_ctx = dp_peer_get_rdkstats_ctx,
12160 
12161 	.set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3,
12162 	.set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3,
12163 
12164 	.txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler,
12165 #ifdef QCA_MULTIPASS_SUPPORT
12166 	.set_vlan_groupkey = dp_set_vlan_groupkey,
12167 #endif
12168 	.get_peer_mac_list = dp_get_peer_mac_list,
12169 #ifdef QCA_SUPPORT_WDS_EXTENDED
12170 	.get_wds_ext_peer_id = dp_wds_ext_get_peer_id,
12171 	.set_wds_ext_peer_rx = dp_wds_ext_set_peer_rx,
12172 #endif /* QCA_SUPPORT_WDS_EXTENDED */
12173 
12174 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
12175 	.txrx_drain = dp_drain_txrx,
12176 #endif
12177 #if defined(FEATURE_RUNTIME_PM)
12178 	.set_rtpm_tput_policy = dp_set_rtpm_tput_policy_requirement,
12179 #endif
12180 #ifdef WLAN_SYSFS_DP_STATS
12181 	.txrx_sysfs_fill_stats = dp_sysfs_fill_stats,
12182 	.txrx_sysfs_set_stat_type = dp_sysfs_set_stat_type,
12183 #endif /* WLAN_SYSFS_DP_STATS */
12184 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
12185 	.set_pkt_capture_mode = dp_set_pkt_capture_mode,
12186 #endif
12187 };
12188 
12189 static struct cdp_ctrl_ops dp_ops_ctrl = {
12190 	.txrx_peer_authorize = dp_peer_authorize,
12191 	.txrx_peer_get_authorize = dp_peer_get_authorize,
12192 #ifdef VDEV_PEER_PROTOCOL_COUNT
12193 	.txrx_enable_peer_protocol_count = dp_enable_vdev_peer_protocol_count,
12194 	.txrx_set_peer_protocol_drop_mask =
12195 		dp_enable_vdev_peer_protocol_drop_mask,
12196 	.txrx_is_peer_protocol_count_enabled =
12197 		dp_is_vdev_peer_protocol_count_enabled,
12198 	.txrx_get_peer_protocol_drop_mask = dp_get_vdev_peer_protocol_drop_mask,
12199 #endif
12200 	.txrx_set_vdev_param = dp_set_vdev_param,
12201 	.txrx_set_psoc_param = dp_set_psoc_param,
12202 	.txrx_get_psoc_param = dp_get_psoc_param,
12203 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
12204 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
12205 	.txrx_get_sec_type = dp_get_sec_type,
12206 	.txrx_wdi_event_sub = dp_wdi_event_sub,
12207 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
12208 	.txrx_set_pdev_param = dp_set_pdev_param,
12209 	.txrx_get_pdev_param = dp_get_pdev_param,
12210 	.txrx_set_peer_param = dp_set_peer_param,
12211 	.txrx_get_peer_param = dp_get_peer_param,
12212 #ifdef VDEV_PEER_PROTOCOL_COUNT
12213 	.txrx_peer_protocol_cnt = dp_peer_stats_update_protocol_cnt,
12214 #endif
12215 #ifdef WLAN_SUPPORT_MSCS
12216 	.txrx_record_mscs_params = dp_record_mscs_params,
12217 #endif
12218 #ifdef WLAN_SUPPORT_SCS
12219 	.txrx_enable_scs_params = dp_enable_scs_params,
12220 	.txrx_record_scs_params = dp_record_scs_params,
12221 #endif
12222 	.set_key = dp_set_michael_key,
12223 	.txrx_get_vdev_param = dp_get_vdev_param,
12224 	.calculate_delay_stats = dp_calculate_delay_stats,
12225 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
12226 	.txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag,
12227 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
12228 	.txrx_dump_pdev_rx_protocol_tag_stats =
12229 				dp_dump_pdev_rx_protocol_tag_stats,
12230 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
12231 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
12232 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
12233 	.txrx_set_rx_flow_tag = dp_set_rx_flow_tag,
12234 	.txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats,
12235 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
12236 #ifdef QCA_MULTIPASS_SUPPORT
12237 	.txrx_peer_set_vlan_id = dp_peer_set_vlan_id,
12238 #endif /*QCA_MULTIPASS_SUPPORT*/
12239 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
12240 	.txrx_set_delta_tsf = dp_set_delta_tsf,
12241 	.txrx_set_tsf_ul_delay_report = dp_set_tsf_ul_delay_report,
12242 	.txrx_get_uplink_delay = dp_get_uplink_delay,
12243 #endif
12244 #ifdef QCA_UNDECODED_METADATA_SUPPORT
12245 	.txrx_set_pdev_phyrx_error_mask = dp_set_pdev_phyrx_error_mask,
12246 	.txrx_get_pdev_phyrx_error_mask = dp_get_pdev_phyrx_error_mask,
12247 #endif
12248 };
12249 
12250 static struct cdp_me_ops dp_ops_me = {
12251 #ifndef QCA_HOST_MODE_WIFI_DISABLED
12252 #ifdef ATH_SUPPORT_IQUE
12253 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
12254 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
12255 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
12256 #endif
12257 #endif
12258 };
12259 
12260 static struct cdp_host_stats_ops dp_ops_host_stats = {
12261 	.txrx_per_peer_stats = dp_get_host_peer_stats,
12262 	.get_fw_peer_stats = dp_get_fw_peer_stats,
12263 	.get_htt_stats = dp_get_htt_stats,
12264 	.txrx_stats_publish = dp_txrx_stats_publish,
12265 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
12266 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
12267 	.txrx_get_soc_stats = dp_txrx_get_soc_stats,
12268 	.txrx_get_peer_stats_param = dp_txrx_get_peer_stats_param,
12269 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
12270 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
12271 	.txrx_get_ratekbps = dp_txrx_get_ratekbps,
12272 	.txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats,
12273 	.txrx_get_peer_delay_stats = dp_txrx_get_peer_delay_stats,
12274 	.txrx_get_peer_jitter_stats = dp_txrx_get_peer_jitter_stats,
12275 #ifdef QCA_VDEV_STATS_HW_OFFLOAD_SUPPORT
12276 	.txrx_alloc_vdev_stats_id = dp_txrx_alloc_vdev_stats_id,
12277 	.txrx_reset_vdev_stats_id = dp_txrx_reset_vdev_stats_id,
12278 #endif
12279 #ifdef WLAN_TX_PKT_CAPTURE_ENH
12280 	.get_peer_tx_capture_stats = dp_peer_get_tx_capture_stats,
12281 	.get_pdev_tx_capture_stats = dp_pdev_get_tx_capture_stats,
12282 #endif /* WLAN_TX_PKT_CAPTURE_ENH */
12283 	/* TODO */
12284 };
12285 
12286 static struct cdp_raw_ops dp_ops_raw = {
12287 	/* TODO */
12288 };
12289 
12290 #ifdef PEER_FLOW_CONTROL
12291 static struct cdp_pflow_ops dp_ops_pflow = {
12292 	dp_tx_flow_ctrl_configure_pdev,
12293 };
12294 #endif /* CONFIG_WIN */
12295 
12296 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
12297 static struct cdp_cfr_ops dp_ops_cfr = {
12298 	.txrx_cfr_filter = NULL,
12299 	.txrx_get_cfr_rcc = dp_get_cfr_rcc,
12300 	.txrx_set_cfr_rcc = dp_set_cfr_rcc,
12301 	.txrx_get_cfr_dbg_stats = dp_get_cfr_dbg_stats,
12302 	.txrx_clear_cfr_dbg_stats = dp_clear_cfr_dbg_stats,
12303 	.txrx_enable_mon_reap_timer = NULL,
12304 };
12305 #endif
12306 
12307 #ifdef WLAN_SUPPORT_MSCS
12308 static struct cdp_mscs_ops dp_ops_mscs = {
12309 	.mscs_peer_lookup_n_get_priority = dp_mscs_peer_lookup_n_get_priority,
12310 };
12311 #endif
12312 
12313 #ifdef WLAN_SUPPORT_MESH_LATENCY
12314 static struct cdp_mesh_latency_ops dp_ops_mesh_latency = {
12315 	.mesh_latency_update_peer_parameter =
12316 		dp_mesh_latency_update_peer_parameter,
12317 };
12318 #endif
12319 
12320 #if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
12321 /**
12322  * dp_flush_ring_hptp() - Update ring shadow
12323  *			  register HP/TP address when runtime
12324  *                        resume
12325  * @opaque_soc: DP soc context
12326  *
12327  * Return: None
12328  */
12329 static
12330 void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
12331 {
12332 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
12333 						 HAL_SRNG_FLUSH_EVENT)) {
12334 		/* Acquire the lock */
12335 		hal_srng_access_start(soc->hal_soc, hal_srng);
12336 
12337 		hal_srng_access_end(soc->hal_soc, hal_srng);
12338 
12339 		hal_srng_set_flush_last_ts(hal_srng);
12340 
12341 		dp_debug("flushed");
12342 	}
12343 }
12344 #endif
12345 
12346 #ifdef DP_TX_TRACKING
12347 
12348 #define DP_TX_COMP_MAX_LATENCY_MS 30000
12349 /**
12350  * dp_tx_comp_delay_check() - calculate time latency for tx completion per pkt
12351  * @timestamp - tx descriptor timestamp
12352  *
12353  * Calculate time latency for tx completion per pkt and trigger self recovery
12354  * when the delay is more than threshold value.
12355  *
12356  * Return: True if delay is more than threshold
12357  */
12358 static bool dp_tx_comp_delay_check(uint64_t timestamp)
12359 {
12360 	uint64_t time_latency, current_time;
12361 
12362 	if (!timestamp)
12363 		return false;
12364 
12365 	if (dp_tx_pkt_tracepoints_enabled()) {
12366 		current_time = qdf_ktime_to_ms(qdf_ktime_real_get());
12367 		time_latency = current_time - timestamp;
12368 		if (time_latency >= DP_TX_COMP_MAX_LATENCY_MS) {
12369 			dp_err_rl("enqueued: %llu ms, current : %llu ms",
12370 				  timestamp, current_time);
12371 			return true;
12372 		}
12373 	} else {
12374 		current_time = qdf_system_ticks();
12375 		time_latency = qdf_system_ticks_to_msecs(current_time -
12376 							 timestamp);
12377 		if (time_latency >= DP_TX_COMP_MAX_LATENCY_MS) {
12378 			dp_err_rl("enqueued: %u ms, current : %u ms",
12379 				  qdf_system_ticks_to_msecs(timestamp),
12380 				  qdf_system_ticks_to_msecs(current_time));
12381 			return true;
12382 		}
12383 	}
12384 
12385 	return false;
12386 }
12387 
12388 /**
12389  * dp_find_missing_tx_comp() - check for leaked descriptor in tx path
12390  * @soc - DP SOC context
12391  *
12392  * Parse through descriptors in all pools and validate magic number and
12393  * completion time. Trigger self recovery if magic value is corrupted.
12394  *
12395  * Return: None.
12396  */
12397 static void dp_find_missing_tx_comp(struct dp_soc *soc)
12398 {
12399 	uint8_t i;
12400 	uint32_t j;
12401 	uint32_t num_desc, page_id, offset;
12402 	uint16_t num_desc_per_page;
12403 	struct dp_tx_desc_s *tx_desc = NULL;
12404 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
12405 
12406 	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
12407 		tx_desc_pool = &soc->tx_desc[i];
12408 		if (!(tx_desc_pool->pool_size) ||
12409 		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
12410 		    !(tx_desc_pool->desc_pages.cacheable_pages))
12411 			continue;
12412 
12413 		num_desc = tx_desc_pool->pool_size;
12414 		num_desc_per_page =
12415 			tx_desc_pool->desc_pages.num_element_per_page;
12416 		for (j = 0; j < num_desc; j++) {
12417 			page_id = j / num_desc_per_page;
12418 			offset = j % num_desc_per_page;
12419 
12420 			if (qdf_unlikely(!(tx_desc_pool->
12421 					 desc_pages.cacheable_pages)))
12422 				break;
12423 
12424 			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
12425 			if (tx_desc->magic == DP_TX_MAGIC_PATTERN_FREE) {
12426 				continue;
12427 			} else if (tx_desc->magic ==
12428 				   DP_TX_MAGIC_PATTERN_INUSE) {
12429 				if (dp_tx_comp_delay_check(
12430 							tx_desc->timestamp)) {
12431 					dp_err_rl("Tx completion not rcvd for id: %u",
12432 						  tx_desc->id);
12433 				}
12434 			} else {
12435 				dp_err_rl("tx desc %u corrupted, flags: 0x%x",
12436 				       tx_desc->id, tx_desc->flags);
12437 			}
12438 		}
12439 	}
12440 }
12441 #else
12442 static inline void dp_find_missing_tx_comp(struct dp_soc *soc)
12443 {
12444 }
12445 #endif
12446 
12447 #ifdef FEATURE_RUNTIME_PM
12448 /**
12449  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
12450  * @soc_hdl: Datapath soc handle
12451  * @pdev_id: id of data path pdev handle
12452  *
12453  * DP is ready to runtime suspend if there are no pending TX packets.
12454  *
12455  * Return: QDF_STATUS
12456  */
12457 static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
12458 {
12459 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12460 	struct dp_pdev *pdev;
12461 	uint8_t i;
12462 	int32_t tx_pending;
12463 
12464 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12465 	if (!pdev) {
12466 		dp_err("pdev is NULL");
12467 		return QDF_STATUS_E_INVAL;
12468 	}
12469 
12470 	/* Abort if there are any pending TX packets */
12471 	tx_pending = dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev));
12472 	if (tx_pending) {
12473 		dp_info_rl("%pK: Abort suspend due to pending TX packets %d",
12474 			   soc, tx_pending);
12475 		dp_find_missing_tx_comp(soc);
12476 		/* perform a force flush if tx is pending */
12477 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
12478 			hal_srng_set_event(soc->tcl_data_ring[i].hal_srng,
12479 					   HAL_SRNG_FLUSH_EVENT);
12480 			dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
12481 		}
12482 		qdf_atomic_set(&soc->tx_pending_rtpm, 0);
12483 
12484 		return QDF_STATUS_E_AGAIN;
12485 	}
12486 
12487 	if (dp_runtime_get_refcount(soc)) {
12488 		dp_init_info("refcount: %d", dp_runtime_get_refcount(soc));
12489 
12490 		return QDF_STATUS_E_AGAIN;
12491 	}
12492 
12493 	if (soc->intr_mode == DP_INTR_POLL)
12494 		qdf_timer_stop(&soc->int_timer);
12495 
12496 	dp_rx_fst_update_pm_suspend_status(soc, true);
12497 
12498 	return QDF_STATUS_SUCCESS;
12499 }
12500 
12501 #define DP_FLUSH_WAIT_CNT 10
12502 #define DP_RUNTIME_SUSPEND_WAIT_MS 10
12503 /**
12504  * dp_runtime_resume() - ensure DP is ready to runtime resume
12505  * @soc_hdl: Datapath soc handle
12506  * @pdev_id: id of data path pdev handle
12507  *
12508  * Resume DP for runtime PM.
12509  *
12510  * Return: QDF_STATUS
12511  */
12512 static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
12513 {
12514 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12515 	int i, suspend_wait = 0;
12516 
12517 	if (soc->intr_mode == DP_INTR_POLL)
12518 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
12519 
12520 	/*
12521 	 * Wait until dp runtime refcount becomes zero or time out, then flush
12522 	 * pending tx for runtime suspend.
12523 	 */
12524 	while (dp_runtime_get_refcount(soc) &&
12525 	       suspend_wait < DP_FLUSH_WAIT_CNT) {
12526 		qdf_sleep(DP_RUNTIME_SUSPEND_WAIT_MS);
12527 		suspend_wait++;
12528 	}
12529 
12530 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
12531 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
12532 	}
12533 	qdf_atomic_set(&soc->tx_pending_rtpm, 0);
12534 
12535 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
12536 	dp_rx_fst_update_pm_suspend_status(soc, false);
12537 
12538 	return QDF_STATUS_SUCCESS;
12539 }
12540 #endif /* FEATURE_RUNTIME_PM */
12541 
12542 /**
12543  * dp_tx_get_success_ack_stats() - get tx success completion count
12544  * @soc_hdl: Datapath soc handle
12545  * @vdevid: vdev identifier
12546  *
12547  * Return: tx success ack count
12548  */
12549 static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl,
12550 					    uint8_t vdev_id)
12551 {
12552 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12553 	struct cdp_vdev_stats *vdev_stats = NULL;
12554 	uint32_t tx_success;
12555 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12556 						     DP_MOD_ID_CDP);
12557 
12558 	if (!vdev) {
12559 		dp_cdp_err("%pK: Invalid vdev id %d", soc, vdev_id);
12560 		return 0;
12561 	}
12562 
12563 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
12564 	if (!vdev_stats) {
12565 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats", soc);
12566 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12567 		return 0;
12568 	}
12569 
12570 	dp_aggregate_vdev_stats(vdev, vdev_stats);
12571 
12572 	tx_success = vdev_stats->tx.tx_success.num;
12573 	qdf_mem_free(vdev_stats);
12574 
12575 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12576 	return tx_success;
12577 }
12578 
12579 #ifdef WLAN_SUPPORT_DATA_STALL
12580 /**
12581  * dp_register_data_stall_detect_cb() - register data stall callback
12582  * @soc_hdl: Datapath soc handle
12583  * @pdev_id: id of data path pdev handle
12584  * @data_stall_detect_callback: data stall callback function
12585  *
12586  * Return: QDF_STATUS Enumeration
12587  */
12588 static
12589 QDF_STATUS dp_register_data_stall_detect_cb(
12590 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12591 			data_stall_detect_cb data_stall_detect_callback)
12592 {
12593 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12594 	struct dp_pdev *pdev;
12595 
12596 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12597 	if (!pdev) {
12598 		dp_err("pdev NULL!");
12599 		return QDF_STATUS_E_INVAL;
12600 	}
12601 
12602 	pdev->data_stall_detect_callback = data_stall_detect_callback;
12603 	return QDF_STATUS_SUCCESS;
12604 }
12605 
12606 /**
12607  * dp_deregister_data_stall_detect_cb() - de-register data stall callback
12608  * @soc_hdl: Datapath soc handle
12609  * @pdev_id: id of data path pdev handle
12610  * @data_stall_detect_callback: data stall callback function
12611  *
12612  * Return: QDF_STATUS Enumeration
12613  */
12614 static
12615 QDF_STATUS dp_deregister_data_stall_detect_cb(
12616 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12617 			data_stall_detect_cb data_stall_detect_callback)
12618 {
12619 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12620 	struct dp_pdev *pdev;
12621 
12622 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12623 	if (!pdev) {
12624 		dp_err("pdev NULL!");
12625 		return QDF_STATUS_E_INVAL;
12626 	}
12627 
12628 	pdev->data_stall_detect_callback = NULL;
12629 	return QDF_STATUS_SUCCESS;
12630 }
12631 
12632 /**
12633  * dp_txrx_post_data_stall_event() - post data stall event
12634  * @soc_hdl: Datapath soc handle
12635  * @indicator: Module triggering data stall
12636  * @data_stall_type: data stall event type
12637  * @pdev_id: pdev id
12638  * @vdev_id_bitmap: vdev id bitmap
12639  * @recovery_type: data stall recovery type
12640  *
12641  * Return: None
12642  */
12643 static void
12644 dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl,
12645 			      enum data_stall_log_event_indicator indicator,
12646 			      enum data_stall_log_event_type data_stall_type,
12647 			      uint32_t pdev_id, uint32_t vdev_id_bitmap,
12648 			      enum data_stall_log_recovery_type recovery_type)
12649 {
12650 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12651 	struct data_stall_event_info data_stall_info;
12652 	struct dp_pdev *pdev;
12653 
12654 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12655 	if (!pdev) {
12656 		dp_err("pdev NULL!");
12657 		return;
12658 	}
12659 
12660 	if (!pdev->data_stall_detect_callback) {
12661 		dp_err("data stall cb not registered!");
12662 		return;
12663 	}
12664 
12665 	dp_info("data_stall_type: %x pdev_id: %d",
12666 		data_stall_type, pdev_id);
12667 
12668 	data_stall_info.indicator = indicator;
12669 	data_stall_info.data_stall_type = data_stall_type;
12670 	data_stall_info.vdev_id_bitmap = vdev_id_bitmap;
12671 	data_stall_info.pdev_id = pdev_id;
12672 	data_stall_info.recovery_type = recovery_type;
12673 
12674 	pdev->data_stall_detect_callback(&data_stall_info);
12675 }
12676 #endif /* WLAN_SUPPORT_DATA_STALL */
12677 
12678 #ifdef WLAN_FEATURE_STATS_EXT
12679 /* rx hw stats event wait timeout in ms */
12680 #define DP_REO_STATUS_STATS_TIMEOUT 1500
12681 /**
12682  * dp_txrx_ext_stats_request - request dp txrx extended stats request
12683  * @soc_hdl: soc handle
12684  * @pdev_id: pdev id
12685  * @req: stats request
12686  *
12687  * Return: QDF_STATUS
12688  */
12689 static QDF_STATUS
12690 dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12691 			  struct cdp_txrx_ext_stats *req)
12692 {
12693 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12694 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12695 	int i = 0;
12696 	int tcl_ring_full = 0;
12697 
12698 	if (!pdev) {
12699 		dp_err("pdev is null");
12700 		return QDF_STATUS_E_INVAL;
12701 	}
12702 
12703 	dp_aggregate_pdev_stats(pdev);
12704 
12705 	for(i = 0 ; i < MAX_TCL_DATA_RINGS; i++)
12706 		tcl_ring_full += soc->stats.tx.tcl_ring_full[i];
12707 
12708 	req->tx_msdu_enqueue = pdev->stats.tx_i.processed.num;
12709 	req->tx_msdu_overflow = tcl_ring_full;
12710 	req->rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
12711 	req->rx_mpdu_delivered = soc->ext_stats.rx_mpdu_received;
12712 	req->rx_mpdu_missed = pdev->stats.err.reo_error;
12713 	/* only count error source from RXDMA */
12714 	req->rx_mpdu_error = pdev->stats.err.rxdma_error;
12715 
12716 	dp_info("ext stats: tx_msdu_enq = %u, tx_msdu_overflow = %u, "
12717 		"tx_mpdu_recieve = %u, rx_mpdu_delivered = %u, "
12718 		"rx_mpdu_missed = %u, rx_mpdu_error = %u",
12719 		req->tx_msdu_enqueue,
12720 		req->tx_msdu_overflow,
12721 		req->rx_mpdu_received,
12722 		req->rx_mpdu_delivered,
12723 		req->rx_mpdu_missed,
12724 		req->rx_mpdu_error);
12725 
12726 	return QDF_STATUS_SUCCESS;
12727 }
12728 
12729 /**
12730  * dp_rx_hw_stats_cb - request rx hw stats response callback
12731  * @soc: soc handle
12732  * @cb_ctxt: callback context
12733  * @reo_status: reo command response status
12734  *
12735  * Return: None
12736  */
12737 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
12738 			      union hal_reo_status *reo_status)
12739 {
12740 	struct dp_req_rx_hw_stats_t *rx_hw_stats = cb_ctxt;
12741 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
12742 	bool is_query_timeout;
12743 
12744 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
12745 	is_query_timeout = rx_hw_stats->is_query_timeout;
12746 	/* free the cb_ctxt if all pending tid stats query is received */
12747 	if (qdf_atomic_dec_and_test(&rx_hw_stats->pending_tid_stats_cnt)) {
12748 		if (!is_query_timeout) {
12749 			qdf_event_set(&soc->rx_hw_stats_event);
12750 			soc->is_last_stats_ctx_init = false;
12751 		}
12752 
12753 		qdf_mem_free(rx_hw_stats);
12754 	}
12755 
12756 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
12757 		dp_info("REO stats failure %d",
12758 			queue_status->header.status);
12759 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
12760 		return;
12761 	}
12762 
12763 	if (!is_query_timeout) {
12764 		soc->ext_stats.rx_mpdu_received +=
12765 					queue_status->mpdu_frms_cnt;
12766 		soc->ext_stats.rx_mpdu_missed +=
12767 					queue_status->hole_cnt;
12768 	}
12769 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
12770 }
12771 
12772 /**
12773  * dp_request_rx_hw_stats - request rx hardware stats
12774  * @soc_hdl: soc handle
12775  * @vdev_id: vdev id
12776  *
12777  * Return: None
12778  */
12779 static QDF_STATUS
12780 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
12781 {
12782 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12783 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12784 						     DP_MOD_ID_CDP);
12785 	struct dp_peer *peer = NULL;
12786 	QDF_STATUS status;
12787 	struct dp_req_rx_hw_stats_t *rx_hw_stats;
12788 	int rx_stats_sent_cnt = 0;
12789 	uint32_t last_rx_mpdu_received;
12790 	uint32_t last_rx_mpdu_missed;
12791 
12792 	if (!vdev) {
12793 		dp_err("vdev is null for vdev_id: %u", vdev_id);
12794 		status = QDF_STATUS_E_INVAL;
12795 		goto out;
12796 	}
12797 
12798 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_CDP);
12799 
12800 	if (!peer) {
12801 		dp_err("Peer is NULL");
12802 		status = QDF_STATUS_E_INVAL;
12803 		goto out;
12804 	}
12805 
12806 	rx_hw_stats = qdf_mem_malloc(sizeof(*rx_hw_stats));
12807 
12808 	if (!rx_hw_stats) {
12809 		dp_err("malloc failed for hw stats structure");
12810 		status = QDF_STATUS_E_INVAL;
12811 		goto out;
12812 	}
12813 
12814 	qdf_event_reset(&soc->rx_hw_stats_event);
12815 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
12816 	/* save the last soc cumulative stats and reset it to 0 */
12817 	last_rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
12818 	last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
12819 	soc->ext_stats.rx_mpdu_received = 0;
12820 
12821 	rx_stats_sent_cnt =
12822 		dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
12823 	if (!rx_stats_sent_cnt) {
12824 		dp_err("no tid stats sent successfully");
12825 		qdf_mem_free(rx_hw_stats);
12826 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
12827 		status = QDF_STATUS_E_INVAL;
12828 		goto out;
12829 	}
12830 	qdf_atomic_set(&rx_hw_stats->pending_tid_stats_cnt,
12831 		       rx_stats_sent_cnt);
12832 	rx_hw_stats->is_query_timeout = false;
12833 	soc->is_last_stats_ctx_init = true;
12834 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
12835 
12836 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
12837 				       DP_REO_STATUS_STATS_TIMEOUT);
12838 
12839 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
12840 	if (status != QDF_STATUS_SUCCESS) {
12841 		dp_info("rx hw stats event timeout");
12842 		if (soc->is_last_stats_ctx_init)
12843 			rx_hw_stats->is_query_timeout = true;
12844 		/**
12845 		 * If query timeout happened, use the last saved stats
12846 		 * for this time query.
12847 		 */
12848 		soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
12849 		soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
12850 	}
12851 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
12852 
12853 out:
12854 	if (peer)
12855 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
12856 	if (vdev)
12857 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12858 
12859 	return status;
12860 }
12861 
12862 /**
12863  * dp_reset_rx_hw_ext_stats - Reset rx hardware ext stats
12864  * @soc_hdl: soc handle
12865  *
12866  * Return: None
12867  */
12868 static
12869 void dp_reset_rx_hw_ext_stats(struct cdp_soc_t *soc_hdl)
12870 {
12871 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12872 
12873 	soc->ext_stats.rx_mpdu_received = 0;
12874 	soc->ext_stats.rx_mpdu_missed = 0;
12875 }
12876 #endif /* WLAN_FEATURE_STATS_EXT */
12877 
12878 static
12879 uint32_t dp_get_tx_rings_grp_bitmap(struct cdp_soc_t *soc_hdl)
12880 {
12881 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12882 
12883 	return soc->wlan_cfg_ctx->tx_rings_grp_bitmap;
12884 }
12885 
12886 #ifdef DP_PEER_EXTENDED_API
12887 static struct cdp_misc_ops dp_ops_misc = {
12888 #ifdef FEATURE_WLAN_TDLS
12889 	.tx_non_std = dp_tx_non_std,
12890 #endif /* FEATURE_WLAN_TDLS */
12891 	.get_opmode = dp_get_opmode,
12892 #ifdef FEATURE_RUNTIME_PM
12893 	.runtime_suspend = dp_runtime_suspend,
12894 	.runtime_resume = dp_runtime_resume,
12895 #endif /* FEATURE_RUNTIME_PM */
12896 	.get_num_rx_contexts = dp_get_num_rx_contexts,
12897 	.get_tx_ack_stats = dp_tx_get_success_ack_stats,
12898 #ifdef WLAN_SUPPORT_DATA_STALL
12899 	.txrx_data_stall_cb_register = dp_register_data_stall_detect_cb,
12900 	.txrx_data_stall_cb_deregister = dp_deregister_data_stall_detect_cb,
12901 	.txrx_post_data_stall_event = dp_txrx_post_data_stall_event,
12902 #endif
12903 
12904 #ifdef WLAN_FEATURE_STATS_EXT
12905 	.txrx_ext_stats_request = dp_txrx_ext_stats_request,
12906 	.request_rx_hw_stats = dp_request_rx_hw_stats,
12907 	.reset_rx_hw_ext_stats = dp_reset_rx_hw_ext_stats,
12908 #endif /* WLAN_FEATURE_STATS_EXT */
12909 	.vdev_inform_ll_conn = dp_vdev_inform_ll_conn,
12910 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
12911 	.set_swlm_enable = dp_soc_set_swlm_enable,
12912 	.is_swlm_enabled = dp_soc_is_swlm_enabled,
12913 #endif
12914 	.display_txrx_hw_info = dp_display_srng_info,
12915 	.get_tx_rings_grp_bitmap = dp_get_tx_rings_grp_bitmap,
12916 };
12917 #endif
12918 
12919 #ifdef DP_FLOW_CTL
12920 static struct cdp_flowctl_ops dp_ops_flowctl = {
12921 	/* WIFI 3.0 DP implement as required. */
12922 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
12923 	.flow_pool_map_handler = dp_tx_flow_pool_map,
12924 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
12925 	.register_pause_cb = dp_txrx_register_pause_cb,
12926 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
12927 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
12928 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
12929 };
12930 
12931 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
12932 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
12933 };
12934 #endif
12935 
12936 #ifdef IPA_OFFLOAD
12937 static struct cdp_ipa_ops dp_ops_ipa = {
12938 	.ipa_get_resource = dp_ipa_get_resource,
12939 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
12940 	.ipa_iounmap_doorbell_vaddr = dp_ipa_iounmap_doorbell_vaddr,
12941 	.ipa_op_response = dp_ipa_op_response,
12942 	.ipa_register_op_cb = dp_ipa_register_op_cb,
12943 	.ipa_deregister_op_cb = dp_ipa_deregister_op_cb,
12944 	.ipa_get_stat = dp_ipa_get_stat,
12945 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
12946 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
12947 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
12948 	.ipa_setup = dp_ipa_setup,
12949 	.ipa_cleanup = dp_ipa_cleanup,
12950 	.ipa_setup_iface = dp_ipa_setup_iface,
12951 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
12952 	.ipa_enable_pipes = dp_ipa_enable_pipes,
12953 	.ipa_disable_pipes = dp_ipa_disable_pipes,
12954 	.ipa_set_perf_level = dp_ipa_set_perf_level,
12955 	.ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd,
12956 	.ipa_tx_buf_smmu_mapping = dp_ipa_tx_buf_smmu_mapping,
12957 	.ipa_tx_buf_smmu_unmapping = dp_ipa_tx_buf_smmu_unmapping
12958 };
12959 #endif
12960 
12961 #ifdef DP_POWER_SAVE
12962 static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
12963 {
12964 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12965 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12966 	int timeout = SUSPEND_DRAIN_WAIT;
12967 	int drain_wait_delay = 50; /* 50 ms */
12968 	int32_t tx_pending;
12969 
12970 	if (qdf_unlikely(!pdev)) {
12971 		dp_err("pdev is NULL");
12972 		return QDF_STATUS_E_INVAL;
12973 	}
12974 
12975 	/* Abort if there are any pending TX packets */
12976 	while ((tx_pending = dp_get_tx_pending((struct cdp_pdev *)pdev))) {
12977 		qdf_sleep(drain_wait_delay);
12978 		if (timeout <= 0) {
12979 			dp_info("TX frames are pending %d, abort suspend",
12980 				tx_pending);
12981 			dp_find_missing_tx_comp(soc);
12982 			return QDF_STATUS_E_TIMEOUT;
12983 		}
12984 		timeout = timeout - drain_wait_delay;
12985 	}
12986 
12987 	if (soc->intr_mode == DP_INTR_POLL)
12988 		qdf_timer_stop(&soc->int_timer);
12989 
12990 	/* Stop monitor reap timer and reap any pending frames in ring */
12991 	dp_monitor_pktlog_reap_pending_frames(pdev);
12992 
12993 	dp_suspend_fse_cache_flush(soc);
12994 
12995 	return QDF_STATUS_SUCCESS;
12996 }
12997 
12998 static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
12999 {
13000 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13001 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13002 	uint8_t i;
13003 
13004 	if (qdf_unlikely(!pdev)) {
13005 		dp_err("pdev is NULL");
13006 		return QDF_STATUS_E_INVAL;
13007 	}
13008 
13009 	if (soc->intr_mode == DP_INTR_POLL)
13010 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
13011 
13012 	/* Start monitor reap timer */
13013 	dp_monitor_pktlog_start_reap_timer(pdev);
13014 
13015 	dp_resume_fse_cache_flush(soc);
13016 
13017 	for (i = 0; i < soc->num_tcl_data_rings; i++)
13018 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
13019 
13020 	return QDF_STATUS_SUCCESS;
13021 }
13022 
13023 /**
13024  * dp_process_wow_ack_rsp() - process wow ack response
13025  * @soc_hdl: datapath soc handle
13026  * @pdev_id: data path pdev handle id
13027  *
13028  * Return: none
13029  */
13030 static void dp_process_wow_ack_rsp(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
13031 {
13032 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13033 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13034 
13035 	if (qdf_unlikely(!pdev)) {
13036 		dp_err("pdev is NULL");
13037 		return;
13038 	}
13039 
13040 	/*
13041 	 * As part of wow enable FW disables the mon status ring and in wow ack
13042 	 * response from FW reap mon status ring to make sure no packets pending
13043 	 * in the ring.
13044 	 */
13045 	dp_monitor_pktlog_reap_pending_frames(pdev);
13046 }
13047 
13048 /**
13049  * dp_process_target_suspend_req() - process target suspend request
13050  * @soc_hdl: datapath soc handle
13051  * @pdev_id: data path pdev handle id
13052  *
13053  * Return: none
13054  */
13055 static void dp_process_target_suspend_req(struct cdp_soc_t *soc_hdl,
13056 					  uint8_t pdev_id)
13057 {
13058 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13059 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13060 
13061 	if (qdf_unlikely(!pdev)) {
13062 		dp_err("pdev is NULL");
13063 		return;
13064 	}
13065 
13066 	/* Stop monitor reap timer and reap any pending frames in ring */
13067 	dp_monitor_pktlog_reap_pending_frames(pdev);
13068 }
13069 
13070 static struct cdp_bus_ops dp_ops_bus = {
13071 	.bus_suspend = dp_bus_suspend,
13072 	.bus_resume = dp_bus_resume,
13073 	.process_wow_ack_rsp = dp_process_wow_ack_rsp,
13074 	.process_target_suspend_req = dp_process_target_suspend_req
13075 };
13076 #endif
13077 
13078 #ifdef DP_FLOW_CTL
13079 static struct cdp_throttle_ops dp_ops_throttle = {
13080 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
13081 };
13082 
13083 static struct cdp_cfg_ops dp_ops_cfg = {
13084 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
13085 };
13086 #endif
13087 
13088 #ifdef DP_PEER_EXTENDED_API
13089 static struct cdp_ocb_ops dp_ops_ocb = {
13090 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
13091 };
13092 
13093 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
13094 	.clear_stats = dp_txrx_clear_dump_stats,
13095 };
13096 
13097 static struct cdp_peer_ops dp_ops_peer = {
13098 	.register_peer = dp_register_peer,
13099 	.clear_peer = dp_clear_peer,
13100 	.find_peer_exist = dp_find_peer_exist,
13101 	.find_peer_exist_on_vdev = dp_find_peer_exist_on_vdev,
13102 	.find_peer_exist_on_other_vdev = dp_find_peer_exist_on_other_vdev,
13103 	.peer_state_update = dp_peer_state_update,
13104 	.get_vdevid = dp_get_vdevid,
13105 	.get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr,
13106 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
13107 	.get_peer_state = dp_get_peer_state,
13108 	.peer_flush_frags = dp_peer_flush_frags,
13109 };
13110 #endif
13111 
13112 static void dp_soc_txrx_ops_attach(struct dp_soc *soc)
13113 {
13114 	soc->cdp_soc.ops->cmn_drv_ops = &dp_ops_cmn;
13115 	soc->cdp_soc.ops->ctrl_ops = &dp_ops_ctrl;
13116 	soc->cdp_soc.ops->me_ops = &dp_ops_me;
13117 	soc->cdp_soc.ops->host_stats_ops = &dp_ops_host_stats;
13118 	soc->cdp_soc.ops->wds_ops = &dp_ops_wds;
13119 	soc->cdp_soc.ops->raw_ops = &dp_ops_raw;
13120 #ifdef PEER_FLOW_CONTROL
13121 	soc->cdp_soc.ops->pflow_ops = &dp_ops_pflow;
13122 #endif /* PEER_FLOW_CONTROL */
13123 #ifdef DP_PEER_EXTENDED_API
13124 	soc->cdp_soc.ops->misc_ops = &dp_ops_misc;
13125 	soc->cdp_soc.ops->ocb_ops = &dp_ops_ocb;
13126 	soc->cdp_soc.ops->peer_ops = &dp_ops_peer;
13127 	soc->cdp_soc.ops->mob_stats_ops = &dp_ops_mob_stats;
13128 #endif
13129 #ifdef DP_FLOW_CTL
13130 	soc->cdp_soc.ops->cfg_ops = &dp_ops_cfg;
13131 	soc->cdp_soc.ops->flowctl_ops = &dp_ops_flowctl;
13132 	soc->cdp_soc.ops->l_flowctl_ops = &dp_ops_l_flowctl;
13133 	soc->cdp_soc.ops->throttle_ops = &dp_ops_throttle;
13134 #endif
13135 #ifdef IPA_OFFLOAD
13136 	soc->cdp_soc.ops->ipa_ops = &dp_ops_ipa;
13137 #endif
13138 #ifdef DP_POWER_SAVE
13139 	soc->cdp_soc.ops->bus_ops = &dp_ops_bus;
13140 #endif
13141 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
13142 	soc->cdp_soc.ops->cfr_ops = &dp_ops_cfr;
13143 #endif
13144 #ifdef WLAN_SUPPORT_MSCS
13145 	soc->cdp_soc.ops->mscs_ops = &dp_ops_mscs;
13146 #endif
13147 #ifdef WLAN_SUPPORT_MESH_LATENCY
13148 	soc->cdp_soc.ops->mesh_latency_ops = &dp_ops_mesh_latency;
13149 #endif
13150 };
13151 
13152 /*
13153  * dp_soc_set_txrx_ring_map()
13154  * @dp_soc: DP handler for soc
13155  *
13156  * Return: Void
13157  */
13158 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
13159 {
13160 	uint32_t i;
13161 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
13162 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
13163 	}
13164 }
13165 
13166 qdf_export_symbol(dp_soc_set_txrx_ring_map);
13167 
13168 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
13169 	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574)
13170 /**
13171  * dp_soc_attach_wifi3() - Attach txrx SOC
13172  * @ctrl_psoc: Opaque SOC handle from control plane
13173  * @params: SOC attach params
13174  *
13175  * Return: DP SOC handle on success, NULL on failure
13176  */
13177 struct cdp_soc_t *
13178 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
13179 		    struct cdp_soc_attach_params *params)
13180 {
13181 	struct dp_soc *dp_soc = NULL;
13182 
13183 	dp_soc = dp_soc_attach(ctrl_psoc, params);
13184 
13185 	return dp_soc_to_cdp_soc_t(dp_soc);
13186 }
13187 
13188 static inline void dp_soc_set_def_pdev(struct dp_soc *soc)
13189 {
13190 	int lmac_id;
13191 
13192 	for (lmac_id = 0; lmac_id < MAX_NUM_LMAC_HW; lmac_id++) {
13193 		/*Set default host PDEV ID for lmac_id*/
13194 		wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
13195 				      INVALID_PDEV_ID, lmac_id);
13196 	}
13197 }
13198 
13199 static uint32_t
13200 dp_get_link_desc_id_start(uint16_t arch_id)
13201 {
13202 	switch (arch_id) {
13203 	case CDP_ARCH_TYPE_LI:
13204 		return LINK_DESC_ID_START_21_BITS_COOKIE;
13205 	case CDP_ARCH_TYPE_BE:
13206 		return LINK_DESC_ID_START_20_BITS_COOKIE;
13207 	default:
13208 		dp_err("unkonwn arch_id 0x%x", arch_id);
13209 		QDF_BUG(0);
13210 		return LINK_DESC_ID_START_21_BITS_COOKIE;
13211 	}
13212 }
13213 
13214 /**
13215  * dp_soc_attach() - Attach txrx SOC
13216  * @ctrl_psoc: Opaque SOC handle from control plane
13217  * @params: SOC attach params
13218  *
13219  * Return: DP SOC handle on success, NULL on failure
13220  */
13221 static struct dp_soc *
13222 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
13223 	      struct cdp_soc_attach_params *params)
13224 {
13225 	int int_ctx;
13226 	struct dp_soc *soc =  NULL;
13227 	uint16_t arch_id;
13228 	struct hif_opaque_softc *hif_handle = params->hif_handle;
13229 	qdf_device_t qdf_osdev = params->qdf_osdev;
13230 	struct ol_if_ops *ol_ops = params->ol_ops;
13231 	uint16_t device_id = params->device_id;
13232 
13233 	if (!hif_handle) {
13234 		dp_err("HIF handle is NULL");
13235 		goto fail0;
13236 	}
13237 	arch_id = cdp_get_arch_type_from_devid(device_id);
13238 	soc = qdf_mem_malloc(dp_get_soc_context_size(device_id));
13239 	if (!soc) {
13240 		dp_err("DP SOC memory allocation failed");
13241 		goto fail0;
13242 	}
13243 
13244 	dp_info("soc memory allocated %pK", soc);
13245 	soc->hif_handle = hif_handle;
13246 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
13247 	if (!soc->hal_soc)
13248 		goto fail1;
13249 
13250 	hif_get_cmem_info(soc->hif_handle,
13251 			  &soc->cmem_base,
13252 			  &soc->cmem_size);
13253 	int_ctx = 0;
13254 	soc->device_id = device_id;
13255 	soc->cdp_soc.ops =
13256 		(struct cdp_ops *)qdf_mem_malloc(sizeof(struct cdp_ops));
13257 	if (!soc->cdp_soc.ops)
13258 		goto fail1;
13259 
13260 	dp_soc_txrx_ops_attach(soc);
13261 	soc->cdp_soc.ol_ops = ol_ops;
13262 	soc->ctrl_psoc = ctrl_psoc;
13263 	soc->osdev = qdf_osdev;
13264 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
13265 	hal_rx_get_tlv_size(soc->hal_soc, &soc->rx_pkt_tlv_size,
13266 			    &soc->rx_mon_pkt_tlv_size);
13267 	soc->idle_link_bm_id = hal_get_idle_link_bm_id(soc->hal_soc,
13268 						       params->mlo_chip_id);
13269 	soc->features.dmac_cmn_src_rxbuf_ring_enabled =
13270 		hal_dmac_cmn_src_rxbuf_ring_get(soc->hal_soc);
13271 	soc->arch_id = arch_id;
13272 	soc->link_desc_id_start =
13273 			dp_get_link_desc_id_start(soc->arch_id);
13274 	dp_configure_arch_ops(soc);
13275 
13276 	/* Reset wbm sg list and flags */
13277 	dp_rx_wbm_sg_list_reset(soc);
13278 
13279 	dp_soc_tx_hw_desc_history_attach(soc);
13280 	dp_soc_rx_history_attach(soc);
13281 	dp_soc_tx_history_attach(soc);
13282 	wlan_set_srng_cfg(&soc->wlan_srng_cfg);
13283 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
13284 	if (!soc->wlan_cfg_ctx) {
13285 		dp_err("wlan_cfg_ctx failed\n");
13286 		goto fail2;
13287 	}
13288 	dp_soc_cfg_attach(soc);
13289 
13290 	if (dp_hw_link_desc_pool_banks_alloc(soc, WLAN_INVALID_PDEV_ID)) {
13291 		dp_err("failed to allocate link desc pool banks");
13292 		goto fail3;
13293 	}
13294 
13295 	if (dp_hw_link_desc_ring_alloc(soc)) {
13296 		dp_err("failed to allocate link_desc_ring");
13297 		goto fail4;
13298 	}
13299 
13300 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_attach(soc,
13301 								 params))) {
13302 		dp_err("unable to do target specific attach");
13303 		goto fail5;
13304 	}
13305 
13306 	if (dp_soc_srng_alloc(soc)) {
13307 		dp_err("failed to allocate soc srng rings");
13308 		goto fail6;
13309 	}
13310 
13311 	if (dp_soc_tx_desc_sw_pools_alloc(soc)) {
13312 		dp_err("dp_soc_tx_desc_sw_pools_alloc failed");
13313 		goto fail7;
13314 	}
13315 
13316 	if (!dp_monitor_modularized_enable()) {
13317 		if (dp_mon_soc_attach_wrapper(soc)) {
13318 			dp_err("failed to attach monitor");
13319 			goto fail8;
13320 		}
13321 	}
13322 
13323 	if (dp_sysfs_initialize_stats(soc) != QDF_STATUS_SUCCESS) {
13324 		dp_err("failed to initialize dp stats sysfs file");
13325 		dp_sysfs_deinitialize_stats(soc);
13326 	}
13327 
13328 	dp_soc_swlm_attach(soc);
13329 	dp_soc_set_interrupt_mode(soc);
13330 	dp_soc_set_def_pdev(soc);
13331 
13332 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
13333 		qdf_dma_mem_stats_read(),
13334 		qdf_heap_mem_stats_read(),
13335 		qdf_skb_total_mem_stats_read());
13336 
13337 	return soc;
13338 fail8:
13339 	dp_soc_tx_desc_sw_pools_free(soc);
13340 fail7:
13341 	dp_soc_srng_free(soc);
13342 fail6:
13343 	soc->arch_ops.txrx_soc_detach(soc);
13344 fail5:
13345 	dp_hw_link_desc_ring_free(soc);
13346 fail4:
13347 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
13348 fail3:
13349 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
13350 fail2:
13351 	qdf_mem_free(soc->cdp_soc.ops);
13352 fail1:
13353 	qdf_mem_free(soc);
13354 fail0:
13355 	return NULL;
13356 }
13357 
13358 /**
13359  * dp_soc_init() - Initialize txrx SOC
13360  * @dp_soc: Opaque DP SOC handle
13361  * @htc_handle: Opaque HTC handle
13362  * @hif_handle: Opaque HIF handle
13363  *
13364  * Return: DP SOC handle on success, NULL on failure
13365  */
13366 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
13367 		  struct hif_opaque_softc *hif_handle)
13368 {
13369 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
13370 	bool is_monitor_mode = false;
13371 	struct hal_reo_params reo_params;
13372 	uint8_t i;
13373 	int num_dp_msi;
13374 	struct dp_mon_ops *mon_ops;
13375 
13376 	wlan_minidump_log(soc, sizeof(*soc), soc->ctrl_psoc,
13377 			  WLAN_MD_DP_SOC, "dp_soc");
13378 
13379 	soc->hif_handle = hif_handle;
13380 
13381 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
13382 	if (!soc->hal_soc)
13383 		goto fail0;
13384 
13385 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_init(soc))) {
13386 		dp_err("unable to do target specific init");
13387 		goto fail0;
13388 	}
13389 
13390 	htt_soc = htt_soc_attach(soc, htc_handle);
13391 	if (!htt_soc)
13392 		goto fail1;
13393 
13394 	soc->htt_handle = htt_soc;
13395 
13396 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
13397 		goto fail2;
13398 
13399 	htt_set_htc_handle(htt_soc, htc_handle);
13400 
13401 	dp_soc_cfg_init(soc);
13402 
13403 	dp_monitor_soc_cfg_init(soc);
13404 	/* Reset/Initialize wbm sg list and flags */
13405 	dp_rx_wbm_sg_list_reset(soc);
13406 
13407 	/* Note: Any SRNG ring initialization should happen only after
13408 	 * Interrupt mode is set and followed by filling up the
13409 	 * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER.
13410 	 */
13411 	dp_soc_set_interrupt_mode(soc);
13412 	if (soc->cdp_soc.ol_ops->get_con_mode &&
13413 	    soc->cdp_soc.ol_ops->get_con_mode() ==
13414 	    QDF_GLOBAL_MONITOR_MODE)
13415 		is_monitor_mode = true;
13416 
13417 	num_dp_msi = dp_get_num_msi_available(soc, soc->intr_mode);
13418 	if (num_dp_msi < 0) {
13419 		dp_init_err("%pK: dp_interrupt assignment failed", soc);
13420 		goto fail3;
13421 	}
13422 
13423 	wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, num_dp_msi,
13424 				     soc->intr_mode, is_monitor_mode);
13425 
13426 	/* initialize WBM_IDLE_LINK ring */
13427 	if (dp_hw_link_desc_ring_init(soc)) {
13428 		dp_init_err("%pK: dp_hw_link_desc_ring_init failed", soc);
13429 		goto fail3;
13430 	}
13431 
13432 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
13433 
13434 	if (dp_soc_srng_init(soc)) {
13435 		dp_init_err("%pK: dp_soc_srng_init failed", soc);
13436 		goto fail4;
13437 	}
13438 
13439 	if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
13440 			       htt_get_htc_handle(htt_soc),
13441 			       soc->hal_soc, soc->osdev) == NULL)
13442 		goto fail5;
13443 
13444 	/* Initialize descriptors in TCL Rings */
13445 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
13446 		hal_tx_init_data_ring(soc->hal_soc,
13447 				      soc->tcl_data_ring[i].hal_srng);
13448 	}
13449 
13450 	if (dp_soc_tx_desc_sw_pools_init(soc)) {
13451 		dp_init_err("%pK: dp_tx_soc_attach failed", soc);
13452 		goto fail6;
13453 	}
13454 
13455 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
13456 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
13457 	soc->cce_disable = false;
13458 	soc->max_ast_ageout_count = MAX_AST_AGEOUT_COUNT;
13459 
13460 	soc->sta_mode_search_policy = DP_TX_ADDR_SEARCH_ADDR_POLICY;
13461 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
13462 	qdf_spinlock_create(&soc->vdev_map_lock);
13463 	qdf_atomic_init(&soc->num_tx_outstanding);
13464 	qdf_atomic_init(&soc->num_tx_exception);
13465 	soc->num_tx_allowed =
13466 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
13467 
13468 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
13469 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
13470 				CDP_CFG_MAX_PEER_ID);
13471 
13472 		if (ret != -EINVAL)
13473 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
13474 
13475 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
13476 				CDP_CFG_CCE_DISABLE);
13477 		if (ret == 1)
13478 			soc->cce_disable = true;
13479 	}
13480 
13481 	/*
13482 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
13483 	 * and IPQ5018 WMAC2 is not there in these platforms.
13484 	 */
13485 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018 ||
13486 	    soc->disable_mac2_intr)
13487 		dp_soc_disable_unused_mac_intr_mask(soc, 0x2);
13488 
13489 	/*
13490 	 * Skip registering hw ring interrupts for WMAC1 on IPQ5018
13491 	 * WMAC1 is not there in this platform.
13492 	 */
13493 	if (soc->disable_mac1_intr)
13494 		dp_soc_disable_unused_mac_intr_mask(soc, 0x1);
13495 
13496 	/* Setup HW REO */
13497 	qdf_mem_zero(&reo_params, sizeof(reo_params));
13498 
13499 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
13500 		/*
13501 		 * Reo ring remap is not required if both radios
13502 		 * are offloaded to NSS
13503 		 */
13504 		if (soc->arch_ops.reo_remap_config(soc, &reo_params.remap0,
13505 						   &reo_params.remap1,
13506 						   &reo_params.remap2))
13507 			reo_params.rx_hash_enabled = true;
13508 		else
13509 			reo_params.rx_hash_enabled = false;
13510 	}
13511 
13512 	/* setup the global rx defrag waitlist */
13513 	TAILQ_INIT(&soc->rx.defrag.waitlist);
13514 	soc->rx.defrag.timeout_ms =
13515 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
13516 	soc->rx.defrag.next_flush_ms = 0;
13517 	soc->rx.flags.defrag_timeout_check =
13518 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
13519 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
13520 
13521 	/*
13522 	 * set the fragment destination ring
13523 	 */
13524 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
13525 
13526 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
13527 		reo_params.alt_dst_ind_0 = REO_REMAP_RELEASE;
13528 
13529 	hal_reo_setup(soc->hal_soc, &reo_params);
13530 
13531 	hal_reo_set_err_dst_remap(soc->hal_soc);
13532 
13533 	soc->features.pn_in_reo_dest = hal_reo_enable_pn_in_dest(soc->hal_soc);
13534 
13535 	mon_ops = dp_mon_ops_get(soc);
13536 	if (mon_ops && mon_ops->mon_soc_init)
13537 		mon_ops->mon_soc_init(soc);
13538 
13539 	qdf_atomic_set(&soc->cmn_init_done, 1);
13540 
13541 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
13542 
13543 	qdf_spinlock_create(&soc->ast_lock);
13544 	dp_peer_mec_spinlock_create(soc);
13545 
13546 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
13547 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
13548 	INIT_RX_HW_STATS_LOCK(soc);
13549 
13550 	qdf_nbuf_queue_init(&soc->invalid_buf_queue);
13551 	/* fill the tx/rx cpu ring map*/
13552 	dp_soc_set_txrx_ring_map(soc);
13553 
13554 	TAILQ_INIT(&soc->inactive_peer_list);
13555 	qdf_spinlock_create(&soc->inactive_peer_list_lock);
13556 	TAILQ_INIT(&soc->inactive_vdev_list);
13557 	qdf_spinlock_create(&soc->inactive_vdev_list_lock);
13558 	qdf_spinlock_create(&soc->htt_stats.lock);
13559 	/* initialize work queue for stats processing */
13560 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
13561 
13562 	dp_reo_desc_deferred_freelist_create(soc);
13563 
13564 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
13565 		qdf_dma_mem_stats_read(),
13566 		qdf_heap_mem_stats_read(),
13567 		qdf_skb_total_mem_stats_read());
13568 
13569 	soc->vdev_stats_id_map = 0;
13570 
13571 	return soc;
13572 fail6:
13573 	htt_soc_htc_dealloc(soc->htt_handle);
13574 fail5:
13575 	dp_soc_srng_deinit(soc);
13576 fail4:
13577 	dp_hw_link_desc_ring_deinit(soc);
13578 fail3:
13579 	htt_htc_pkt_pool_free(htt_soc);
13580 fail2:
13581 	htt_soc_detach(htt_soc);
13582 fail1:
13583 	soc->arch_ops.txrx_soc_deinit(soc);
13584 fail0:
13585 	return NULL;
13586 }
13587 
13588 /**
13589  * dp_soc_init_wifi3() - Initialize txrx SOC
13590  * @soc: Opaque DP SOC handle
13591  * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
13592  * @hif_handle: Opaque HIF handle
13593  * @htc_handle: Opaque HTC handle
13594  * @qdf_osdev: QDF device (Unused)
13595  * @ol_ops: Offload Operations (Unused)
13596  * @device_id: Device ID (Unused)
13597  *
13598  * Return: DP SOC handle on success, NULL on failure
13599  */
13600 void *dp_soc_init_wifi3(struct cdp_soc_t *soc,
13601 			struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
13602 			struct hif_opaque_softc *hif_handle,
13603 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
13604 			struct ol_if_ops *ol_ops, uint16_t device_id)
13605 {
13606 	return dp_soc_init((struct dp_soc *)soc, htc_handle, hif_handle);
13607 }
13608 
13609 #endif
13610 
13611 /*
13612  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
13613  *
13614  * @soc: handle to DP soc
13615  * @mac_id: MAC id
13616  *
13617  * Return: Return pdev corresponding to MAC
13618  */
13619 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
13620 {
13621 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
13622 		return (mac_id < MAX_PDEV_CNT) ? soc->pdev_list[mac_id] : NULL;
13623 
13624 	/* Typically for MCL as there only 1 PDEV*/
13625 	return soc->pdev_list[0];
13626 }
13627 
13628 /*
13629  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
13630  * @soc:		DP SoC context
13631  * @max_mac_rings:	No of MAC rings
13632  *
13633  * Return: None
13634  */
13635 void dp_is_hw_dbs_enable(struct dp_soc *soc,
13636 				int *max_mac_rings)
13637 {
13638 	bool dbs_enable = false;
13639 
13640 	if (soc->cdp_soc.ol_ops->is_hw_dbs_capable)
13641 		dbs_enable = soc->cdp_soc.ol_ops->
13642 				is_hw_dbs_capable((void *)soc->ctrl_psoc);
13643 
13644 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
13645 	dp_info("dbs_enable %d, max_mac_rings %d",
13646 		dbs_enable, *max_mac_rings);
13647 }
13648 
13649 qdf_export_symbol(dp_is_hw_dbs_enable);
13650 
13651 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
13652 /**
13653  * dp_get_cfr_rcc() - get cfr rcc config
13654  * @soc_hdl: Datapath soc handle
13655  * @pdev_id: id of objmgr pdev
13656  *
13657  * Return: true/false based on cfr mode setting
13658  */
13659 static
13660 bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
13661 {
13662 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13663 	struct dp_pdev *pdev = NULL;
13664 
13665 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13666 	if (!pdev) {
13667 		dp_err("pdev is NULL");
13668 		return false;
13669 	}
13670 
13671 	return pdev->cfr_rcc_mode;
13672 }
13673 
13674 /**
13675  * dp_set_cfr_rcc() - enable/disable cfr rcc config
13676  * @soc_hdl: Datapath soc handle
13677  * @pdev_id: id of objmgr pdev
13678  * @enable: Enable/Disable cfr rcc mode
13679  *
13680  * Return: none
13681  */
13682 static
13683 void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, bool enable)
13684 {
13685 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13686 	struct dp_pdev *pdev = NULL;
13687 
13688 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13689 	if (!pdev) {
13690 		dp_err("pdev is NULL");
13691 		return;
13692 	}
13693 
13694 	pdev->cfr_rcc_mode = enable;
13695 }
13696 
13697 /*
13698  * dp_get_cfr_dbg_stats - Get the debug statistics for CFR
13699  * @soc_hdl: Datapath soc handle
13700  * @pdev_id: id of data path pdev handle
13701  * @cfr_rcc_stats: CFR RCC debug statistics buffer
13702  *
13703  * Return: none
13704  */
13705 static inline void
13706 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
13707 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats)
13708 {
13709 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13710 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13711 
13712 	if (!pdev) {
13713 		dp_err("Invalid pdev");
13714 		return;
13715 	}
13716 
13717 	qdf_mem_copy(cfr_rcc_stats, &pdev->stats.rcc,
13718 		     sizeof(struct cdp_cfr_rcc_stats));
13719 }
13720 
13721 /*
13722  * dp_clear_cfr_dbg_stats - Clear debug statistics for CFR
13723  * @soc_hdl: Datapath soc handle
13724  * @pdev_id: id of data path pdev handle
13725  *
13726  * Return: none
13727  */
13728 static void dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl,
13729 				   uint8_t pdev_id)
13730 {
13731 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13732 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13733 
13734 	if (!pdev) {
13735 		dp_err("dp pdev is NULL");
13736 		return;
13737 	}
13738 
13739 	qdf_mem_zero(&pdev->stats.rcc, sizeof(pdev->stats.rcc));
13740 }
13741 #endif
13742 
13743 /**
13744  * dp_bucket_index() - Return index from array
13745  *
13746  * @delay: delay measured
13747  * @array: array used to index corresponding delay
13748  *
13749  * Return: index
13750  */
13751 static uint8_t dp_bucket_index(uint32_t delay, uint16_t *array)
13752 {
13753 	uint8_t i = CDP_DELAY_BUCKET_0;
13754 
13755 	for (; i < CDP_DELAY_BUCKET_MAX - 1; i++) {
13756 		if (delay >= array[i] && delay <= array[i + 1])
13757 			return i;
13758 	}
13759 
13760 	return (CDP_DELAY_BUCKET_MAX - 1);
13761 }
13762 
13763 /**
13764  * dp_fill_delay_buckets() - Fill delay statistics bucket for each
13765  *				type of delay
13766  *
13767  * @pdev: pdev handle
13768  * @delay: delay in ms
13769  * @tid: tid value
13770  * @mode: type of tx delay mode
13771  * @ring_id: ring number
13772  * Return: pointer to cdp_delay_stats structure
13773  */
13774 static struct cdp_delay_stats *
13775 dp_fill_delay_buckets(struct dp_pdev *pdev, uint32_t delay,
13776 		      uint8_t tid, uint8_t mode, uint8_t ring_id)
13777 {
13778 	uint8_t delay_index = 0;
13779 	struct cdp_tid_tx_stats *tstats =
13780 		&pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
13781 	struct cdp_tid_rx_stats *rstats =
13782 		&pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
13783 	/*
13784 	 * cdp_fw_to_hw_delay_range
13785 	 * Fw to hw delay ranges in milliseconds
13786 	 */
13787 	uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
13788 		0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
13789 
13790 	/*
13791 	 * cdp_sw_enq_delay_range
13792 	 * Software enqueue delay ranges in milliseconds
13793 	 */
13794 	uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
13795 		0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
13796 
13797 	/*
13798 	 * cdp_intfrm_delay_range
13799 	 * Interframe delay ranges in milliseconds
13800 	 */
13801 	uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
13802 		0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
13803 
13804 	/*
13805 	 * Update delay stats in proper bucket
13806 	 */
13807 	switch (mode) {
13808 	/* Software Enqueue delay ranges */
13809 	case CDP_DELAY_STATS_SW_ENQ:
13810 
13811 		delay_index = dp_bucket_index(delay, cdp_sw_enq_delay);
13812 		tstats->swq_delay.delay_bucket[delay_index]++;
13813 		return &tstats->swq_delay;
13814 
13815 	/* Tx Completion delay ranges */
13816 	case CDP_DELAY_STATS_FW_HW_TRANSMIT:
13817 
13818 		delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay);
13819 		tstats->hwtx_delay.delay_bucket[delay_index]++;
13820 		return &tstats->hwtx_delay;
13821 
13822 	/* Interframe tx delay ranges */
13823 	case CDP_DELAY_STATS_TX_INTERFRAME:
13824 
13825 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
13826 		tstats->intfrm_delay.delay_bucket[delay_index]++;
13827 		return &tstats->intfrm_delay;
13828 
13829 	/* Interframe rx delay ranges */
13830 	case CDP_DELAY_STATS_RX_INTERFRAME:
13831 
13832 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
13833 		rstats->intfrm_delay.delay_bucket[delay_index]++;
13834 		return &rstats->intfrm_delay;
13835 
13836 	/* Ring reap to indication to network stack */
13837 	case CDP_DELAY_STATS_REAP_STACK:
13838 
13839 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
13840 		rstats->to_stack_delay.delay_bucket[delay_index]++;
13841 		return &rstats->to_stack_delay;
13842 	default:
13843 		dp_debug("Incorrect delay mode: %d", mode);
13844 	}
13845 
13846 	return NULL;
13847 }
13848 
13849 /**
13850  * dp_update_delay_stats() - Update delay statistics in structure
13851  *				and fill min, max and avg delay
13852  *
13853  * @pdev: pdev handle
13854  * @delay: delay in ms
13855  * @tid: tid value
13856  * @mode: type of tx delay mode
13857  * @ring id: ring number
13858  * Return: none
13859  */
13860 void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay,
13861 			   uint8_t tid, uint8_t mode, uint8_t ring_id)
13862 {
13863 	struct cdp_delay_stats *dstats = NULL;
13864 
13865 	/*
13866 	 * Delay ranges are different for different delay modes
13867 	 * Get the correct index to update delay bucket
13868 	 */
13869 	dstats = dp_fill_delay_buckets(pdev, delay, tid, mode, ring_id);
13870 	if (qdf_unlikely(!dstats))
13871 		return;
13872 
13873 	if (delay != 0) {
13874 		/*
13875 		 * Compute minimum,average and maximum
13876 		 * delay
13877 		 */
13878 		if (delay < dstats->min_delay)
13879 			dstats->min_delay = delay;
13880 
13881 		if (delay > dstats->max_delay)
13882 			dstats->max_delay = delay;
13883 
13884 		/*
13885 		 * Average over delay measured till now
13886 		 */
13887 		if (!dstats->avg_delay)
13888 			dstats->avg_delay = delay;
13889 		else
13890 			dstats->avg_delay = ((delay + dstats->avg_delay) / 2);
13891 	}
13892 }
13893 
13894 /**
13895  * dp_get_peer_mac_list(): function to get peer mac list of vdev
13896  * @soc: Datapath soc handle
13897  * @vdev_id: vdev id
13898  * @newmac: Table of the clients mac
13899  * @mac_cnt: No. of MACs required
13900  * @limit: Limit the number of clients
13901  *
13902  * return: no of clients
13903  */
13904 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id,
13905 			      u_int8_t newmac[][QDF_MAC_ADDR_SIZE],
13906 			      u_int16_t mac_cnt, bool limit)
13907 {
13908 	struct dp_soc *dp_soc = (struct dp_soc *)soc;
13909 	struct dp_vdev *vdev =
13910 		dp_vdev_get_ref_by_id(dp_soc, vdev_id, DP_MOD_ID_CDP);
13911 	struct dp_peer *peer;
13912 	uint16_t new_mac_cnt = 0;
13913 
13914 	if (!vdev)
13915 		return new_mac_cnt;
13916 
13917 	if (limit && (vdev->num_peers > mac_cnt))
13918 		return 0;
13919 
13920 	qdf_spin_lock_bh(&vdev->peer_list_lock);
13921 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
13922 		if (peer->bss_peer)
13923 			continue;
13924 		if (new_mac_cnt < mac_cnt) {
13925 			WLAN_ADDR_COPY(newmac[new_mac_cnt], peer->mac_addr.raw);
13926 			new_mac_cnt++;
13927 		}
13928 	}
13929 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
13930 	dp_vdev_unref_delete(dp_soc, vdev, DP_MOD_ID_CDP);
13931 	return new_mac_cnt;
13932 }
13933 
13934 #ifdef QCA_SUPPORT_WDS_EXTENDED
13935 uint16_t dp_wds_ext_get_peer_id(ol_txrx_soc_handle soc,
13936 				uint8_t vdev_id,
13937 				uint8_t *mac)
13938 {
13939 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
13940 						       mac, 0, vdev_id,
13941 						       DP_MOD_ID_CDP);
13942 	uint16_t peer_id = HTT_INVALID_PEER;
13943 
13944 	if (!peer) {
13945 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
13946 		return peer_id;
13947 	}
13948 
13949 	peer_id = peer->peer_id;
13950 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
13951 	return peer_id;
13952 }
13953 
13954 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc,
13955 				  uint8_t vdev_id,
13956 				  uint8_t *mac,
13957 				  ol_txrx_rx_fp rx,
13958 				  ol_osif_peer_handle osif_peer)
13959 {
13960 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
13961 						       mac, 0, vdev_id,
13962 						       DP_MOD_ID_CDP);
13963 	QDF_STATUS status = QDF_STATUS_E_INVAL;
13964 
13965 	if (!peer) {
13966 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
13967 		return status;
13968 	}
13969 	if (!peer->txrx_peer) {
13970 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
13971 		return status;
13972 	}
13973 
13974 	if (rx) {
13975 		if (peer->txrx_peer->osif_rx) {
13976 			status = QDF_STATUS_E_ALREADY;
13977 		} else {
13978 			peer->txrx_peer->osif_rx = rx;
13979 			status = QDF_STATUS_SUCCESS;
13980 		}
13981 	} else {
13982 		if (peer->txrx_peer->osif_rx) {
13983 			peer->txrx_peer->osif_rx = NULL;
13984 			status = QDF_STATUS_SUCCESS;
13985 		} else {
13986 			status = QDF_STATUS_E_ALREADY;
13987 		}
13988 	}
13989 
13990 	peer->txrx_peer->wds_ext.osif_peer = osif_peer;
13991 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
13992 
13993 	return status;
13994 }
13995 #endif /* QCA_SUPPORT_WDS_EXTENDED */
13996 
13997 /**
13998  * dp_pdev_srng_deinit() - de-initialize all pdev srng ring including
13999  *			   monitor rings
14000  * @pdev: Datapath pdev handle
14001  *
14002  */
14003 static void dp_pdev_srng_deinit(struct dp_pdev *pdev)
14004 {
14005 	struct dp_soc *soc = pdev->soc;
14006 	uint8_t i;
14007 
14008 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled)
14009 		dp_srng_deinit(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
14010 			       RXDMA_BUF,
14011 			       pdev->lmac_id);
14012 
14013 	if (!soc->rxdma2sw_rings_not_supported) {
14014 		for (i = 0;
14015 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
14016 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
14017 								 pdev->pdev_id);
14018 
14019 			wlan_minidump_remove(soc->rxdma_err_dst_ring[lmac_id].
14020 							base_vaddr_unaligned,
14021 					     soc->rxdma_err_dst_ring[lmac_id].
14022 								alloc_size,
14023 					     soc->ctrl_psoc,
14024 					     WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
14025 					     "rxdma_err_dst");
14026 			dp_srng_deinit(soc, &soc->rxdma_err_dst_ring[lmac_id],
14027 				       RXDMA_DST, lmac_id);
14028 		}
14029 	}
14030 
14031 
14032 }
14033 
14034 /**
14035  * dp_pdev_srng_init() - initialize all pdev srng rings including
14036  *			   monitor rings
14037  * @pdev: Datapath pdev handle
14038  *
14039  * return: QDF_STATUS_SUCCESS on success
14040  *	   QDF_STATUS_E_NOMEM on failure
14041  */
14042 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev)
14043 {
14044 	struct dp_soc *soc = pdev->soc;
14045 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
14046 	uint32_t i;
14047 
14048 	soc_cfg_ctx = soc->wlan_cfg_ctx;
14049 
14050 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
14051 		if (dp_srng_init(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
14052 				 RXDMA_BUF, 0, pdev->lmac_id)) {
14053 			dp_init_err("%pK: dp_srng_init failed rx refill ring",
14054 				    soc);
14055 			goto fail1;
14056 		}
14057 	}
14058 
14059 	/* LMAC RxDMA to SW Rings configuration */
14060 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
14061 		/* Only valid for MCL */
14062 		pdev = soc->pdev_list[0];
14063 
14064 	if (!soc->rxdma2sw_rings_not_supported) {
14065 		for (i = 0;
14066 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
14067 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
14068 								 pdev->pdev_id);
14069 			struct dp_srng *srng =
14070 				&soc->rxdma_err_dst_ring[lmac_id];
14071 
14072 			if (srng->hal_srng)
14073 				continue;
14074 
14075 			if (dp_srng_init(soc, srng, RXDMA_DST, 0, lmac_id)) {
14076 				dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring",
14077 					    soc);
14078 				goto fail1;
14079 			}
14080 			wlan_minidump_log(soc->rxdma_err_dst_ring[lmac_id].
14081 						base_vaddr_unaligned,
14082 					  soc->rxdma_err_dst_ring[lmac_id].
14083 						alloc_size,
14084 					  soc->ctrl_psoc,
14085 					  WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
14086 					  "rxdma_err_dst");
14087 		}
14088 	}
14089 	return QDF_STATUS_SUCCESS;
14090 
14091 fail1:
14092 	dp_pdev_srng_deinit(pdev);
14093 	return QDF_STATUS_E_NOMEM;
14094 }
14095 
14096 /**
14097  * dp_pdev_srng_free() - free all pdev srng rings including monitor rings
14098  * pdev: Datapath pdev handle
14099  *
14100  */
14101 static void dp_pdev_srng_free(struct dp_pdev *pdev)
14102 {
14103 	struct dp_soc *soc = pdev->soc;
14104 	uint8_t i;
14105 
14106 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled)
14107 		dp_srng_free(soc, &soc->rx_refill_buf_ring[pdev->lmac_id]);
14108 
14109 	if (!soc->rxdma2sw_rings_not_supported) {
14110 		for (i = 0;
14111 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
14112 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
14113 								 pdev->pdev_id);
14114 
14115 			dp_srng_free(soc, &soc->rxdma_err_dst_ring[lmac_id]);
14116 		}
14117 	}
14118 }
14119 
14120 /**
14121  * dp_pdev_srng_alloc() - allocate memory for all pdev srng rings including
14122  *			  monitor rings
14123  * pdev: Datapath pdev handle
14124  *
14125  * return: QDF_STATUS_SUCCESS on success
14126  *	   QDF_STATUS_E_NOMEM on failure
14127  */
14128 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev)
14129 {
14130 	struct dp_soc *soc = pdev->soc;
14131 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
14132 	uint32_t ring_size;
14133 	uint32_t i;
14134 
14135 	soc_cfg_ctx = soc->wlan_cfg_ctx;
14136 
14137 	ring_size = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
14138 	if (!soc->features.dmac_cmn_src_rxbuf_ring_enabled) {
14139 		if (dp_srng_alloc(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
14140 				  RXDMA_BUF, ring_size, 0)) {
14141 			dp_init_err("%pK: dp_srng_alloc failed rx refill ring",
14142 				    soc);
14143 			goto fail1;
14144 		}
14145 	}
14146 
14147 	ring_size = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
14148 	/* LMAC RxDMA to SW Rings configuration */
14149 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
14150 		/* Only valid for MCL */
14151 		pdev = soc->pdev_list[0];
14152 
14153 	if (!soc->rxdma2sw_rings_not_supported) {
14154 		for (i = 0;
14155 		     i < soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev; i++) {
14156 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i,
14157 								 pdev->pdev_id);
14158 			struct dp_srng *srng =
14159 				&soc->rxdma_err_dst_ring[lmac_id];
14160 
14161 			if (srng->base_vaddr_unaligned)
14162 				continue;
14163 
14164 			if (dp_srng_alloc(soc, srng, RXDMA_DST, ring_size, 0)) {
14165 				dp_init_err("%pK:" RNG_ERR "rxdma_err_dst_ring",
14166 					    soc);
14167 				goto fail1;
14168 			}
14169 		}
14170 	}
14171 
14172 	return QDF_STATUS_SUCCESS;
14173 fail1:
14174 	dp_pdev_srng_free(pdev);
14175 	return QDF_STATUS_E_NOMEM;
14176 }
14177 
14178 /**
14179  * dp_soc_srng_deinit() - de-initialize soc srng rings
14180  * @soc: Datapath soc handle
14181  *
14182  */
14183 static void dp_soc_srng_deinit(struct dp_soc *soc)
14184 {
14185 	uint32_t i;
14186 
14187 	if (soc->arch_ops.txrx_soc_srng_deinit)
14188 		soc->arch_ops.txrx_soc_srng_deinit(soc);
14189 
14190 	/* Free the ring memories */
14191 	/* Common rings */
14192 	wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
14193 			     soc->wbm_desc_rel_ring.alloc_size,
14194 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_WBM_DESC_REL,
14195 			     "wbm_desc_rel_ring");
14196 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
14197 
14198 	/* Tx data rings */
14199 	for (i = 0; i < soc->num_tcl_data_rings; i++)
14200 		dp_deinit_tx_pair_by_index(soc, i);
14201 
14202 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
14203 		dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
14204 		dp_ipa_deinit_alt_tx_ring(soc);
14205 	}
14206 
14207 	/* TCL command and status rings */
14208 	if (soc->init_tcl_cmd_cred_ring) {
14209 		wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
14210 				     soc->tcl_cmd_credit_ring.alloc_size,
14211 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_CMD,
14212 				     "wbm_desc_rel_ring");
14213 		dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring,
14214 			       TCL_CMD_CREDIT, 0);
14215 	}
14216 
14217 	wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned,
14218 			     soc->tcl_status_ring.alloc_size,
14219 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_STATUS,
14220 			     "wbm_desc_rel_ring");
14221 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
14222 
14223 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
14224 		/* TODO: Get number of rings and ring sizes
14225 		 * from wlan_cfg
14226 		 */
14227 		wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned,
14228 				     soc->reo_dest_ring[i].alloc_size,
14229 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_DEST,
14230 				     "reo_dest_ring");
14231 		dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i);
14232 	}
14233 
14234 	/* REO reinjection ring */
14235 	wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned,
14236 			     soc->reo_reinject_ring.alloc_size,
14237 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_REINJECT,
14238 			     "reo_reinject_ring");
14239 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
14240 
14241 	/* Rx release ring */
14242 	wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned,
14243 			     soc->rx_rel_ring.alloc_size,
14244 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_RX_REL,
14245 			     "reo_release_ring");
14246 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
14247 
14248 	/* Rx exception ring */
14249 	/* TODO: Better to store ring_type and ring_num in
14250 	 * dp_srng during setup
14251 	 */
14252 	wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned,
14253 			     soc->reo_exception_ring.alloc_size,
14254 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_EXCEPTION,
14255 			     "reo_exception_ring");
14256 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
14257 
14258 	/* REO command and status rings */
14259 	wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned,
14260 			     soc->reo_cmd_ring.alloc_size,
14261 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_CMD,
14262 			     "reo_cmd_ring");
14263 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
14264 	wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned,
14265 			     soc->reo_status_ring.alloc_size,
14266 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_STATUS,
14267 			     "reo_status_ring");
14268 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
14269 }
14270 
14271 /**
14272  * dp_soc_srng_init() - Initialize soc level srng rings
14273  * @soc: Datapath soc handle
14274  *
14275  * return: QDF_STATUS_SUCCESS on success
14276  *	   QDF_STATUS_E_FAILURE on failure
14277  */
14278 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc)
14279 {
14280 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
14281 	uint8_t i;
14282 	uint8_t wbm2_sw_rx_rel_ring_id;
14283 
14284 	soc_cfg_ctx = soc->wlan_cfg_ctx;
14285 
14286 	dp_enable_verbose_debug(soc);
14287 
14288 	/* WBM descriptor release ring */
14289 	if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) {
14290 		dp_init_err("%pK: dp_srng_init failed for wbm_desc_rel_ring", soc);
14291 		goto fail1;
14292 	}
14293 
14294 	wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
14295 			  soc->wbm_desc_rel_ring.alloc_size,
14296 			  soc->ctrl_psoc,
14297 			  WLAN_MD_DP_SRNG_WBM_DESC_REL,
14298 			  "wbm_desc_rel_ring");
14299 
14300 	if (soc->init_tcl_cmd_cred_ring) {
14301 		/* TCL command and status rings */
14302 		if (dp_srng_init(soc, &soc->tcl_cmd_credit_ring,
14303 				 TCL_CMD_CREDIT, 0, 0)) {
14304 			dp_init_err("%pK: dp_srng_init failed for tcl_cmd_ring", soc);
14305 			goto fail1;
14306 		}
14307 
14308 		wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
14309 				  soc->tcl_cmd_credit_ring.alloc_size,
14310 				  soc->ctrl_psoc,
14311 				  WLAN_MD_DP_SRNG_TCL_CMD,
14312 				  "wbm_desc_rel_ring");
14313 	}
14314 
14315 	if (dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0)) {
14316 		dp_init_err("%pK: dp_srng_init failed for tcl_status_ring", soc);
14317 		goto fail1;
14318 	}
14319 
14320 	wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned,
14321 			  soc->tcl_status_ring.alloc_size,
14322 			  soc->ctrl_psoc,
14323 			  WLAN_MD_DP_SRNG_TCL_STATUS,
14324 			  "wbm_desc_rel_ring");
14325 
14326 	/* REO reinjection ring */
14327 	if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) {
14328 		dp_init_err("%pK: dp_srng_init failed for reo_reinject_ring", soc);
14329 		goto fail1;
14330 	}
14331 
14332 	wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned,
14333 			  soc->reo_reinject_ring.alloc_size,
14334 			  soc->ctrl_psoc,
14335 			  WLAN_MD_DP_SRNG_REO_REINJECT,
14336 			  "reo_reinject_ring");
14337 
14338 	wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(soc_cfg_ctx);
14339 	/* Rx release ring */
14340 	if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
14341 			 wbm2_sw_rx_rel_ring_id, 0)) {
14342 		dp_init_err("%pK: dp_srng_init failed for rx_rel_ring", soc);
14343 		goto fail1;
14344 	}
14345 
14346 	wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned,
14347 			  soc->rx_rel_ring.alloc_size,
14348 			  soc->ctrl_psoc,
14349 			  WLAN_MD_DP_SRNG_RX_REL,
14350 			  "reo_release_ring");
14351 
14352 	/* Rx exception ring */
14353 	if (dp_srng_init(soc, &soc->reo_exception_ring,
14354 			 REO_EXCEPTION, 0, MAX_REO_DEST_RINGS)) {
14355 		dp_init_err("%pK: dp_srng_init failed - reo_exception", soc);
14356 		goto fail1;
14357 	}
14358 
14359 	wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned,
14360 			  soc->reo_exception_ring.alloc_size,
14361 			  soc->ctrl_psoc,
14362 			  WLAN_MD_DP_SRNG_REO_EXCEPTION,
14363 			  "reo_exception_ring");
14364 
14365 	/* REO command and status rings */
14366 	if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) {
14367 		dp_init_err("%pK: dp_srng_init failed for reo_cmd_ring", soc);
14368 		goto fail1;
14369 	}
14370 
14371 	wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned,
14372 			  soc->reo_cmd_ring.alloc_size,
14373 			  soc->ctrl_psoc,
14374 			  WLAN_MD_DP_SRNG_REO_CMD,
14375 			  "reo_cmd_ring");
14376 
14377 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
14378 	TAILQ_INIT(&soc->rx.reo_cmd_list);
14379 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
14380 
14381 	if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) {
14382 		dp_init_err("%pK: dp_srng_init failed for reo_status_ring", soc);
14383 		goto fail1;
14384 	}
14385 
14386 	wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned,
14387 			  soc->reo_status_ring.alloc_size,
14388 			  soc->ctrl_psoc,
14389 			  WLAN_MD_DP_SRNG_REO_STATUS,
14390 			  "reo_status_ring");
14391 
14392 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
14393 		if (dp_init_tx_ring_pair_by_index(soc, i))
14394 			goto fail1;
14395 	}
14396 
14397 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
14398 		if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
14399 			goto fail1;
14400 
14401 		if (dp_ipa_init_alt_tx_ring(soc))
14402 			goto fail1;
14403 	}
14404 
14405 	dp_create_ext_stats_event(soc);
14406 
14407 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
14408 		/* Initialize REO destination ring */
14409 		if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) {
14410 			dp_init_err("%pK: dp_srng_init failed for reo_dest_ringn", soc);
14411 			goto fail1;
14412 		}
14413 
14414 		wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned,
14415 				  soc->reo_dest_ring[i].alloc_size,
14416 				  soc->ctrl_psoc,
14417 				  WLAN_MD_DP_SRNG_REO_DEST,
14418 				  "reo_dest_ring");
14419 	}
14420 
14421 	if (soc->arch_ops.txrx_soc_srng_init) {
14422 		if (soc->arch_ops.txrx_soc_srng_init(soc)) {
14423 			dp_init_err("%pK: dp_srng_init failed for arch rings",
14424 				    soc);
14425 			goto fail1;
14426 		}
14427 	}
14428 
14429 	return QDF_STATUS_SUCCESS;
14430 fail1:
14431 	/*
14432 	 * Cleanup will be done as part of soc_detach, which will
14433 	 * be called on pdev attach failure
14434 	 */
14435 	dp_soc_srng_deinit(soc);
14436 	return QDF_STATUS_E_FAILURE;
14437 }
14438 
14439 /**
14440  * dp_soc_srng_free() - free soc level srng rings
14441  * @soc: Datapath soc handle
14442  *
14443  */
14444 static void dp_soc_srng_free(struct dp_soc *soc)
14445 {
14446 	uint32_t i;
14447 
14448 	if (soc->arch_ops.txrx_soc_srng_free)
14449 		soc->arch_ops.txrx_soc_srng_free(soc);
14450 
14451 	dp_srng_free(soc, &soc->wbm_desc_rel_ring);
14452 
14453 	for (i = 0; i < soc->num_tcl_data_rings; i++)
14454 		dp_free_tx_ring_pair_by_index(soc, i);
14455 
14456 	/* Free IPA rings for TCL_TX and TCL_COMPL ring */
14457 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
14458 		dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
14459 		dp_ipa_free_alt_tx_ring(soc);
14460 	}
14461 
14462 	if (soc->init_tcl_cmd_cred_ring)
14463 		dp_srng_free(soc, &soc->tcl_cmd_credit_ring);
14464 
14465 	dp_srng_free(soc, &soc->tcl_status_ring);
14466 
14467 	for (i = 0; i < soc->num_reo_dest_rings; i++)
14468 		dp_srng_free(soc, &soc->reo_dest_ring[i]);
14469 
14470 	dp_srng_free(soc, &soc->reo_reinject_ring);
14471 	dp_srng_free(soc, &soc->rx_rel_ring);
14472 
14473 	dp_srng_free(soc, &soc->reo_exception_ring);
14474 
14475 	dp_srng_free(soc, &soc->reo_cmd_ring);
14476 	dp_srng_free(soc, &soc->reo_status_ring);
14477 }
14478 
14479 /**
14480  * dp_soc_srng_alloc() - Allocate memory for soc level srng rings
14481  * @soc: Datapath soc handle
14482  *
14483  * return: QDF_STATUS_SUCCESS on success
14484  *	   QDF_STATUS_E_NOMEM on failure
14485  */
14486 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc)
14487 {
14488 	uint32_t entries;
14489 	uint32_t i;
14490 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
14491 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
14492 	uint32_t tx_comp_ring_size, tx_ring_size, reo_dst_ring_size;
14493 
14494 	soc_cfg_ctx = soc->wlan_cfg_ctx;
14495 
14496 	/* sw2wbm link descriptor release ring */
14497 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
14498 	if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE,
14499 			  entries, 0)) {
14500 		dp_init_err("%pK: dp_srng_alloc failed for wbm_desc_rel_ring", soc);
14501 		goto fail1;
14502 	}
14503 
14504 	entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx);
14505 	/* TCL command and status rings */
14506 	if (soc->init_tcl_cmd_cred_ring) {
14507 		if (dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring,
14508 				  TCL_CMD_CREDIT, entries, 0)) {
14509 			dp_init_err("%pK: dp_srng_alloc failed for tcl_cmd_ring", soc);
14510 			goto fail1;
14511 		}
14512 	}
14513 
14514 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
14515 	if (dp_srng_alloc(soc, &soc->tcl_status_ring, TCL_STATUS, entries,
14516 			  0)) {
14517 		dp_init_err("%pK: dp_srng_alloc failed for tcl_status_ring", soc);
14518 		goto fail1;
14519 	}
14520 
14521 	/* REO reinjection ring */
14522 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
14523 	if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT,
14524 			  entries, 0)) {
14525 		dp_init_err("%pK: dp_srng_alloc failed for reo_reinject_ring", soc);
14526 		goto fail1;
14527 	}
14528 
14529 	/* Rx release ring */
14530 	entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx);
14531 	if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
14532 			  entries, 0)) {
14533 		dp_init_err("%pK: dp_srng_alloc failed for rx_rel_ring", soc);
14534 		goto fail1;
14535 	}
14536 
14537 	/* Rx exception ring */
14538 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
14539 	if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION,
14540 			  entries, 0)) {
14541 		dp_init_err("%pK: dp_srng_alloc failed - reo_exception", soc);
14542 		goto fail1;
14543 	}
14544 
14545 	/* REO command and status rings */
14546 	entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx);
14547 	if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) {
14548 		dp_init_err("%pK: dp_srng_alloc failed for reo_cmd_ring", soc);
14549 		goto fail1;
14550 	}
14551 
14552 	entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx);
14553 	if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS,
14554 			  entries, 0)) {
14555 		dp_init_err("%pK: dp_srng_alloc failed for reo_status_ring", soc);
14556 		goto fail1;
14557 	}
14558 
14559 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
14560 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
14561 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx);
14562 
14563 	/* Disable cached desc if NSS offload is enabled */
14564 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
14565 		cached = 0;
14566 
14567 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
14568 		if (dp_alloc_tx_ring_pair_by_index(soc, i))
14569 			goto fail1;
14570 	}
14571 
14572 	/* IPA rings for TCL_TX and TX_COMP will be allocated here */
14573 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
14574 		if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
14575 			goto fail1;
14576 
14577 		if (dp_ipa_alloc_alt_tx_ring(soc))
14578 			goto fail1;
14579 	}
14580 
14581 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
14582 		/* Setup REO destination ring */
14583 		if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST,
14584 				  reo_dst_ring_size, cached)) {
14585 			dp_init_err("%pK: dp_srng_alloc failed for reo_dest_ring", soc);
14586 			goto fail1;
14587 		}
14588 	}
14589 
14590 	if (soc->arch_ops.txrx_soc_srng_alloc) {
14591 		if (soc->arch_ops.txrx_soc_srng_alloc(soc)) {
14592 			dp_init_err("%pK: dp_srng_alloc failed for arch rings",
14593 				    soc);
14594 			goto fail1;
14595 		}
14596 	}
14597 
14598 	return QDF_STATUS_SUCCESS;
14599 
14600 fail1:
14601 	dp_soc_srng_free(soc);
14602 	return QDF_STATUS_E_NOMEM;
14603 }
14604 
14605 static void dp_soc_cfg_dump(struct dp_soc *soc, uint32_t target_type)
14606 {
14607 	dp_init_info("DP soc Dump for Target = %d", target_type);
14608 	dp_init_info("ast_override_support = %d, da_war_enabled = %d,",
14609 		     soc->ast_override_support, soc->da_war_enabled);
14610 
14611 	wlan_cfg_dp_soc_ctx_dump(soc->wlan_cfg_ctx);
14612 }
14613 
14614 /**
14615  * dp_soc_cfg_init() - initialize target specific configuration
14616  *		       during dp_soc_init
14617  * @soc: dp soc handle
14618  */
14619 static void dp_soc_cfg_init(struct dp_soc *soc)
14620 {
14621 	uint32_t target_type;
14622 
14623 	target_type = hal_get_target_type(soc->hal_soc);
14624 	switch (target_type) {
14625 	case TARGET_TYPE_QCA6290:
14626 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
14627 					       REO_DST_RING_SIZE_QCA6290);
14628 		soc->ast_override_support = 1;
14629 		soc->da_war_enabled = false;
14630 		break;
14631 	case TARGET_TYPE_QCA6390:
14632 	case TARGET_TYPE_QCA6490:
14633 	case TARGET_TYPE_QCA6750:
14634 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
14635 					       REO_DST_RING_SIZE_QCA6290);
14636 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
14637 		soc->ast_override_support = 1;
14638 		if (soc->cdp_soc.ol_ops->get_con_mode &&
14639 		    soc->cdp_soc.ol_ops->get_con_mode() ==
14640 		    QDF_GLOBAL_MONITOR_MODE) {
14641 			int int_ctx;
14642 
14643 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
14644 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
14645 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
14646 			}
14647 		}
14648 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
14649 		break;
14650 	case TARGET_TYPE_KIWI:
14651 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
14652 					       REO_DST_RING_SIZE_QCA6290);
14653 		soc->ast_override_support = 1;
14654 
14655 		if (soc->cdp_soc.ol_ops->get_con_mode &&
14656 		    soc->cdp_soc.ol_ops->get_con_mode() ==
14657 		    QDF_GLOBAL_MONITOR_MODE) {
14658 			int int_ctx;
14659 
14660 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS;
14661 			     int_ctx++) {
14662 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
14663 				if (dp_is_monitor_mode_using_poll(soc))
14664 					soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
14665 			}
14666 		}
14667 
14668 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
14669 		soc->wlan_cfg_ctx->num_rxdma_dst_rings_per_pdev = 1;
14670 		/* use only MAC0 status ring */
14671 		soc->wlan_cfg_ctx->num_rxdma_status_rings_per_pdev = 1;
14672 		break;
14673 	case TARGET_TYPE_QCA8074:
14674 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
14675 		soc->da_war_enabled = true;
14676 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
14677 		break;
14678 	case TARGET_TYPE_QCA8074V2:
14679 	case TARGET_TYPE_QCA6018:
14680 	case TARGET_TYPE_QCA9574:
14681 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
14682 		soc->ast_override_support = 1;
14683 		soc->per_tid_basize_max_tid = 8;
14684 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
14685 		soc->da_war_enabled = false;
14686 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
14687 		break;
14688 	case TARGET_TYPE_QCN9000:
14689 		soc->ast_override_support = 1;
14690 		soc->da_war_enabled = false;
14691 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
14692 		soc->per_tid_basize_max_tid = 8;
14693 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
14694 		soc->lmac_polled_mode = 0;
14695 		soc->wbm_release_desc_rx_sg_support = 1;
14696 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
14697 		break;
14698 	case TARGET_TYPE_QCA5018:
14699 	case TARGET_TYPE_QCN6122:
14700 		soc->ast_override_support = 1;
14701 		soc->da_war_enabled = false;
14702 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
14703 		soc->per_tid_basize_max_tid = 8;
14704 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS_11AX;
14705 		soc->disable_mac1_intr = 1;
14706 		soc->disable_mac2_intr = 1;
14707 		soc->wbm_release_desc_rx_sg_support = 1;
14708 		break;
14709 	case TARGET_TYPE_QCN9224:
14710 		soc->ast_override_support = 1;
14711 		soc->da_war_enabled = false;
14712 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
14713 		soc->per_tid_basize_max_tid = 8;
14714 		soc->wbm_release_desc_rx_sg_support = 1;
14715 		soc->rxdma2sw_rings_not_supported = 1;
14716 		soc->wbm_sg_last_msdu_war = 1;
14717 		soc->ast_offload_support = AST_OFFLOAD_ENABLE_STATUS;
14718 		soc->mec_fw_offload = FW_MEC_FW_OFFLOAD_ENABLED;
14719 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
14720 		wlan_cfg_set_txmon_hw_support(soc->wlan_cfg_ctx, true);
14721 		break;
14722 	default:
14723 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
14724 		qdf_assert_always(0);
14725 		break;
14726 	}
14727 	dp_soc_cfg_dump(soc, target_type);
14728 }
14729 
14730 /**
14731  * dp_soc_cfg_attach() - set target specific configuration in
14732  *			 dp soc cfg.
14733  * @soc: dp soc handle
14734  */
14735 static void dp_soc_cfg_attach(struct dp_soc *soc)
14736 {
14737 	int target_type;
14738 	int nss_cfg = 0;
14739 
14740 	target_type = hal_get_target_type(soc->hal_soc);
14741 	switch (target_type) {
14742 	case TARGET_TYPE_QCA6290:
14743 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
14744 					       REO_DST_RING_SIZE_QCA6290);
14745 		break;
14746 	case TARGET_TYPE_QCA6390:
14747 	case TARGET_TYPE_QCA6490:
14748 	case TARGET_TYPE_QCA6750:
14749 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
14750 					       REO_DST_RING_SIZE_QCA6290);
14751 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
14752 		break;
14753 	case TARGET_TYPE_KIWI:
14754 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
14755 					       REO_DST_RING_SIZE_QCA6290);
14756 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
14757 		break;
14758 	case TARGET_TYPE_QCA8074:
14759 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
14760 		break;
14761 	case TARGET_TYPE_QCA8074V2:
14762 	case TARGET_TYPE_QCA6018:
14763 	case TARGET_TYPE_QCA9574:
14764 	case TARGET_TYPE_QCN6122:
14765 	case TARGET_TYPE_QCA5018:
14766 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
14767 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
14768 		break;
14769 	case TARGET_TYPE_QCN9000:
14770 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
14771 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
14772 		break;
14773 	case TARGET_TYPE_QCN9224:
14774 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
14775 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
14776 		break;
14777 	default:
14778 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
14779 		qdf_assert_always(0);
14780 		break;
14781 	}
14782 
14783 	if (soc->cdp_soc.ol_ops->get_soc_nss_cfg)
14784 		nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc);
14785 
14786 	wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg);
14787 
14788 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
14789 		wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0);
14790 		wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0);
14791 		wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0);
14792 		wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0);
14793 		soc->init_tcl_cmd_cred_ring = false;
14794 		soc->num_tcl_data_rings =
14795 			wlan_cfg_num_nss_tcl_data_rings(soc->wlan_cfg_ctx);
14796 		soc->num_reo_dest_rings =
14797 			wlan_cfg_num_nss_reo_dest_rings(soc->wlan_cfg_ctx);
14798 
14799 	} else {
14800 		soc->init_tcl_cmd_cred_ring = true;
14801 		soc->num_tx_comp_rings =
14802 			wlan_cfg_num_tx_comp_rings(soc->wlan_cfg_ctx);
14803 		soc->num_tcl_data_rings =
14804 			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
14805 		soc->num_reo_dest_rings =
14806 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
14807 	}
14808 
14809 	soc->arch_ops.soc_cfg_attach(soc);
14810 }
14811 
14812 static inline  void dp_pdev_set_default_reo(struct dp_pdev *pdev)
14813 {
14814 	struct dp_soc *soc = pdev->soc;
14815 
14816 	switch (pdev->pdev_id) {
14817 	case 0:
14818 		pdev->reo_dest =
14819 			wlan_cfg_radio0_default_reo_get(soc->wlan_cfg_ctx);
14820 		break;
14821 
14822 	case 1:
14823 		pdev->reo_dest =
14824 			wlan_cfg_radio1_default_reo_get(soc->wlan_cfg_ctx);
14825 		break;
14826 
14827 	case 2:
14828 		pdev->reo_dest =
14829 			wlan_cfg_radio2_default_reo_get(soc->wlan_cfg_ctx);
14830 		break;
14831 
14832 	default:
14833 		dp_init_err("%pK: Invalid pdev_id %d for reo selection",
14834 			    soc, pdev->pdev_id);
14835 		break;
14836 	}
14837 }
14838 
14839 static QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc,
14840 				      HTC_HANDLE htc_handle,
14841 				      qdf_device_t qdf_osdev,
14842 				      uint8_t pdev_id)
14843 {
14844 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
14845 	int nss_cfg;
14846 	void *sojourn_buf;
14847 	QDF_STATUS ret;
14848 
14849 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
14850 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
14851 
14852 	soc_cfg_ctx = soc->wlan_cfg_ctx;
14853 	pdev->soc = soc;
14854 	pdev->pdev_id = pdev_id;
14855 
14856 	/*
14857 	 * Variable to prevent double pdev deinitialization during
14858 	 * radio detach execution .i.e. in the absence of any vdev.
14859 	 */
14860 	pdev->pdev_deinit = 0;
14861 
14862 	if (dp_wdi_event_attach(pdev)) {
14863 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
14864 			  "dp_wdi_evet_attach failed");
14865 		goto fail0;
14866 	}
14867 
14868 	if (dp_pdev_srng_init(pdev)) {
14869 		dp_init_err("%pK: Failed to initialize pdev srng rings", soc);
14870 		goto fail1;
14871 	}
14872 
14873 	/* Initialize descriptors in TCL Rings used by IPA */
14874 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
14875 		hal_tx_init_data_ring(soc->hal_soc,
14876 				      soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng);
14877 		dp_ipa_hal_tx_init_alt_data_ring(soc);
14878 	}
14879 
14880 	/*
14881 	 * Initialize command/credit ring descriptor
14882 	 * Command/CREDIT ring also used for sending DATA cmds
14883 	 */
14884 	if (soc->init_tcl_cmd_cred_ring)
14885 		hal_tx_init_cmd_credit_ring(soc->hal_soc,
14886 					    soc->tcl_cmd_credit_ring.hal_srng);
14887 
14888 	dp_tx_pdev_init(pdev);
14889 
14890 	/*
14891 	 * set nss pdev config based on soc config
14892 	 */
14893 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
14894 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
14895 					 (nss_cfg & (1 << pdev_id)));
14896 	pdev->target_pdev_id =
14897 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
14898 
14899 	if (soc->preferred_hw_mode == WMI_HOST_HW_MODE_2G_PHYB &&
14900 	    pdev->lmac_id == PHYB_2G_LMAC_ID) {
14901 		pdev->target_pdev_id = PHYB_2G_TARGET_PDEV_ID;
14902 	}
14903 
14904 	/* Reset the cpu ring map if radio is NSS offloaded */
14905 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
14906 		dp_soc_reset_cpu_ring_map(soc);
14907 		dp_soc_reset_intr_mask(soc);
14908 	}
14909 
14910 	TAILQ_INIT(&pdev->vdev_list);
14911 	qdf_spinlock_create(&pdev->vdev_list_lock);
14912 	pdev->vdev_count = 0;
14913 
14914 	qdf_spinlock_create(&pdev->tx_mutex);
14915 	pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_INVALID_LMAC_ID;
14916 	pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_INVALID_LMAC_ID;
14917 	pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_INVALID_LMAC_ID;
14918 
14919 	DP_STATS_INIT(pdev);
14920 
14921 	dp_local_peer_id_pool_init(pdev);
14922 
14923 	dp_dscp_tid_map_setup(pdev);
14924 	dp_pcp_tid_map_setup(pdev);
14925 
14926 	/* set the reo destination during initialization */
14927 	dp_pdev_set_default_reo(pdev);
14928 
14929 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
14930 
14931 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
14932 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
14933 			      TRUE);
14934 
14935 	if (!pdev->sojourn_buf) {
14936 		dp_init_err("%pK: Failed to allocate sojourn buf", soc);
14937 		goto fail2;
14938 	}
14939 	sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
14940 	qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
14941 
14942 	qdf_event_create(&pdev->fw_peer_stats_event);
14943 
14944 	pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
14945 
14946 	if (dp_rxdma_ring_setup(soc, pdev)) {
14947 		dp_init_err("%pK: RXDMA ring config failed", soc);
14948 		goto fail3;
14949 	}
14950 
14951 	if (dp_init_ipa_rx_refill_buf_ring(soc, pdev))
14952 		goto fail3;
14953 
14954 	if (dp_ipa_ring_resource_setup(soc, pdev))
14955 		goto fail4;
14956 
14957 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
14958 		dp_init_err("%pK: dp_ipa_uc_attach failed", soc);
14959 		goto fail4;
14960 	}
14961 
14962 	ret = dp_rx_fst_attach(soc, pdev);
14963 	if ((ret != QDF_STATUS_SUCCESS) &&
14964 	    (ret != QDF_STATUS_E_NOSUPPORT)) {
14965 		dp_init_err("%pK: RX Flow Search Table attach failed: pdev %d err %d",
14966 			    soc, pdev_id, ret);
14967 		goto fail5;
14968 	}
14969 
14970 	if (dp_pdev_bkp_stats_attach(pdev) != QDF_STATUS_SUCCESS) {
14971 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
14972 			  FL("dp_pdev_bkp_stats_attach failed"));
14973 		goto fail6;
14974 	}
14975 
14976 	if (dp_monitor_pdev_init(pdev)) {
14977 		dp_init_err("%pK: dp_monitor_pdev_init failed\n", soc);
14978 		goto fail7;
14979 	}
14980 
14981 	/* initialize sw rx descriptors */
14982 	dp_rx_pdev_desc_pool_init(pdev);
14983 	/* allocate buffers and replenish the RxDMA ring */
14984 	dp_rx_pdev_buffers_alloc(pdev);
14985 
14986 	dp_init_tso_stats(pdev);
14987 
14988 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
14989 		qdf_dma_mem_stats_read(),
14990 		qdf_heap_mem_stats_read(),
14991 		qdf_skb_total_mem_stats_read());
14992 
14993 	return QDF_STATUS_SUCCESS;
14994 fail7:
14995 	dp_pdev_bkp_stats_detach(pdev);
14996 fail6:
14997 	dp_rx_fst_detach(soc, pdev);
14998 fail5:
14999 	dp_ipa_uc_detach(soc, pdev);
15000 fail4:
15001 	dp_deinit_ipa_rx_refill_buf_ring(soc, pdev);
15002 fail3:
15003 	dp_rxdma_ring_cleanup(soc, pdev);
15004 	qdf_nbuf_free(pdev->sojourn_buf);
15005 fail2:
15006 	qdf_spinlock_destroy(&pdev->tx_mutex);
15007 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
15008 	dp_pdev_srng_deinit(pdev);
15009 fail1:
15010 	dp_wdi_event_detach(pdev);
15011 fail0:
15012 	return QDF_STATUS_E_FAILURE;
15013 }
15014 
15015 /*
15016  * dp_pdev_init_wifi3() - Init txrx pdev
15017  * @htc_handle: HTC handle for host-target interface
15018  * @qdf_osdev: QDF OS device
15019  * @force: Force deinit
15020  *
15021  * Return: QDF_STATUS
15022  */
15023 static QDF_STATUS dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
15024 				     HTC_HANDLE htc_handle,
15025 				     qdf_device_t qdf_osdev,
15026 				     uint8_t pdev_id)
15027 {
15028 	return dp_pdev_init(txrx_soc, htc_handle, qdf_osdev, pdev_id);
15029 }
15030 
15031