xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision ffc4a9de53c8817b86d03f8fb3c9a829bfec09d5)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_hw_headers.h>
25 #include <hal_api.h>
26 #include <hif.h>
27 #include <htt.h>
28 #include <wdi_event.h>
29 #include <queue.h>
30 #include "dp_types.h"
31 #include "dp_internal.h"
32 #include "dp_tx.h"
33 #include "dp_tx_desc.h"
34 #include "dp_rx.h"
35 #include "dp_rx_mon.h"
36 #ifdef DP_RATETABLE_SUPPORT
37 #include "dp_ratetable.h"
38 #endif
39 #include <cdp_txrx_handle.h>
40 #include <wlan_cfg.h>
41 #include <wlan_utility.h>
42 #include "cdp_txrx_cmn_struct.h"
43 #include "cdp_txrx_stats_struct.h"
44 #include "cdp_txrx_cmn_reg.h"
45 #include <qdf_util.h>
46 #include "dp_peer.h"
47 #include "dp_rx_mon.h"
48 #include "htt_stats.h"
49 #include "dp_htt.h"
50 #ifdef WLAN_SUPPORT_RX_FISA
51 #include <dp_fisa_rx.h>
52 #endif
53 #include "htt_ppdu_stats.h"
54 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
55 #include "cfg_ucfg_api.h"
56 #include "dp_mon_filter.h"
57 
58 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
59 #include "cdp_txrx_flow_ctrl_v2.h"
60 #else
61 static inline void
62 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
63 {
64 	return;
65 }
66 #endif
67 #include "dp_ipa.h"
68 #include "dp_cal_client_api.h"
69 #ifdef FEATURE_WDS
70 #include "dp_txrx_wds.h"
71 #endif
72 #ifdef WLAN_SUPPORT_MSCS
73 #include "dp_mscs.h"
74 #endif
75 #ifdef WLAN_SUPPORT_MESH_LATENCY
76 #include "dp_mesh_latency.h"
77 #endif
78 #ifdef ATH_SUPPORT_IQUE
79 #include "dp_txrx_me.h"
80 #endif
81 #if defined(DP_CON_MON)
82 #ifndef REMOVE_PKT_LOG
83 #include <pktlog_ac_api.h>
84 #include <pktlog_ac.h>
85 #endif
86 #endif
87 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
88 #include <dp_swlm.h>
89 #endif
90 
91 #ifdef WLAN_FEATURE_STATS_EXT
92 #define INIT_RX_HW_STATS_LOCK(_soc) \
93 	qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
94 #define DEINIT_RX_HW_STATS_LOCK(_soc) \
95 	qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
96 #else
97 #define INIT_RX_HW_STATS_LOCK(_soc)  /* no op */
98 #define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
99 #endif
100 
101 #if defined(DP_PEER_EXTENDED_API) || defined(WLAN_DP_PENDING_MEM_FLUSH)
102 #define SET_PEER_REF_CNT_ONE(_peer) \
103 	qdf_atomic_set(&(_peer)->ref_cnt, 1)
104 #else
105 #define SET_PEER_REF_CNT_ONE(_peer)
106 #endif
107 
108 #define dp_init_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_INIT, params)
109 #define dp_init_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_INIT, params)
110 #define dp_init_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_INIT, params)
111 #define dp_init_info(params...) \
112 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_INIT, ## params)
113 #define dp_init_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_INIT, params)
114 
115 #define dp_vdev_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_VDEV, params)
116 #define dp_vdev_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_VDEV, params)
117 #define dp_vdev_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_VDEV, params)
118 #define dp_vdev_info(params...) \
119 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_VDEV, ## params)
120 #define dp_vdev_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_VDEV, params)
121 
122 void dp_configure_arch_ops(struct dp_soc *soc);
123 qdf_size_t dp_get_soc_context_size(uint16_t device_id);
124 
125 /*
126  * The max size of cdp_peer_stats_param_t is limited to 16 bytes.
127  * If the buffer size is exceeding this size limit,
128  * dp_txrx_get_peer_stats is to be used instead.
129  */
130 QDF_COMPILE_TIME_ASSERT(cdp_peer_stats_param_t_max_size,
131 			(sizeof(cdp_peer_stats_param_t) <= 16));
132 
133 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
134 /*
135  * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS
136  * also should be updated accordingly
137  */
138 QDF_COMPILE_TIME_ASSERT(num_intr_grps,
139 			HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS);
140 
141 /*
142  * HIF_EVENT_HIST_MAX should always be power of 2
143  */
144 QDF_COMPILE_TIME_ASSERT(hif_event_history_size,
145 			(HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0);
146 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
147 
148 /*
149  * If WLAN_CFG_INT_NUM_CONTEXTS is changed,
150  * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated
151  */
152 QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs,
153 			WLAN_CFG_INT_NUM_CONTEXTS_MAX >=
154 			WLAN_CFG_INT_NUM_CONTEXTS);
155 
156 #ifdef WLAN_RX_PKT_CAPTURE_ENH
157 #include "dp_rx_mon_feature.h"
158 #else
159 /*
160  * dp_config_enh_rx_capture()- API to enable/disable enhanced rx capture
161  * @pdev_handle: DP_PDEV handle
162  * @val: user provided value
163  *
164  * Return: QDF_STATUS
165  */
166 static QDF_STATUS
167 dp_config_enh_rx_capture(struct dp_pdev *pdev_handle, uint8_t val)
168 {
169 	return QDF_STATUS_E_INVAL;
170 }
171 #endif /* WLAN_RX_PKT_CAPTURE_ENH */
172 
173 #ifdef WLAN_TX_PKT_CAPTURE_ENH
174 #include "dp_tx_capture.h"
175 #else
176 /*
177  * dp_config_enh_tx_capture()- API to enable/disable enhanced tx capture
178  * @pdev_handle: DP_PDEV handle
179  * @val: user provided value
180  *
181  * Return: QDF_STATUS
182  */
183 static QDF_STATUS
184 dp_config_enh_tx_capture(struct dp_pdev *pdev_handle, uint8_t val)
185 {
186 	return QDF_STATUS_E_INVAL;
187 }
188 #endif
189 
190 static void dp_pdev_srng_deinit(struct dp_pdev *pdev);
191 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev);
192 static void dp_pdev_srng_free(struct dp_pdev *pdev);
193 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev);
194 
195 static void dp_soc_srng_deinit(struct dp_soc *soc);
196 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc);
197 static void dp_soc_srng_free(struct dp_soc *soc);
198 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc);
199 
200 static void dp_soc_cfg_init(struct dp_soc *soc);
201 static void dp_soc_cfg_attach(struct dp_soc *soc);
202 
203 static inline
204 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
205 				HTC_HANDLE htc_handle,
206 				qdf_device_t qdf_osdev,
207 				uint8_t pdev_id);
208 
209 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id);
210 
211 static QDF_STATUS
212 dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
213 		   HTC_HANDLE htc_handle,
214 		   qdf_device_t qdf_osdev,
215 		   uint8_t pdev_id);
216 
217 static QDF_STATUS
218 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, int force);
219 
220 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc);
221 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc);
222 
223 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
224 		  struct hif_opaque_softc *hif_handle);
225 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
226 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc,
227 				       uint8_t pdev_id,
228 				       int force);
229 static struct dp_soc *
230 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
231 	      struct hif_opaque_softc *hif_handle,
232 	      HTC_HANDLE htc_handle,
233 	      qdf_device_t qdf_osdev,
234 	      struct ol_if_ops *ol_ops, uint16_t device_id);
235 static void dp_pktlogmod_exit(struct dp_pdev *handle);
236 static inline QDF_STATUS dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl,
237 					      uint8_t vdev_id,
238 					      uint8_t *peer_mac_addr);
239 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
240 				       uint8_t vdev_id,
241 				       uint8_t *peer_mac, uint32_t bitmap);
242 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
243 				bool unmap_only);
244 #ifdef ENABLE_VERBOSE_DEBUG
245 bool is_dp_verbose_debug_enabled;
246 #endif
247 
248 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
249 static void dp_cfr_filter(struct cdp_soc_t *soc_hdl,
250 			  uint8_t pdev_id,
251 			  bool enable,
252 			  struct cdp_monitor_filter *filter_val);
253 static bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
254 static void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
255 			   bool enable);
256 static inline void
257 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
258 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats);
259 static inline void
260 dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
261 static inline void
262 dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
263 			 bool enable);
264 #endif
265 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
266 						uint8_t index);
267 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index);
268 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index);
269 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
270 						 uint8_t index);
271 static inline bool
272 dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev);
273 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
274 					    enum hal_ring_type ring_type,
275 					    int ring_num);
276 static QDF_STATUS dp_vdev_set_monitor_mode_rings(struct dp_pdev *pdev,
277 						 uint8_t delayed_replenish);
278 static void dp_vdev_set_monitor_mode_buf_rings(struct dp_pdev *pdev);
279 
280 #define DP_INTR_POLL_TIMER_MS	5
281 
282 #define MON_VDEV_TIMER_INIT 0x1
283 #define MON_VDEV_TIMER_RUNNING 0x2
284 
285 /* Generic AST entry aging timer value */
286 #define DP_AST_AGING_TIMER_DEFAULT_MS	1000
287 #define DP_MCS_LENGTH (6*MAX_MCS)
288 
289 #define DP_CURR_FW_STATS_AVAIL 19
290 #define DP_HTT_DBG_EXT_STATS_MAX 256
291 #define DP_MAX_SLEEP_TIME 100
292 #ifndef QCA_WIFI_3_0_EMU
293 #define SUSPEND_DRAIN_WAIT 500
294 #else
295 #define SUSPEND_DRAIN_WAIT 3000
296 #endif
297 
298 #ifdef IPA_OFFLOAD
299 /* Exclude IPA rings from the interrupt context */
300 #define TX_RING_MASK_VAL	0xb
301 #define RX_RING_MASK_VAL	0x7
302 #else
303 #define TX_RING_MASK_VAL	0xF
304 #define RX_RING_MASK_VAL	0xF
305 #endif
306 
307 #define STR_MAXLEN	64
308 
309 #define RNG_ERR		"SRNG setup failed for"
310 
311 /* Threshold for peer's cached buf queue beyond which frames are dropped */
312 #define DP_RX_CACHED_BUFQ_THRESH 64
313 
314 /* Budget to reap monitor status ring */
315 #define DP_MON_REAP_BUDGET 1024
316 
317 /**
318  * default_dscp_tid_map - Default DSCP-TID mapping
319  *
320  * DSCP        TID
321  * 000000      0
322  * 001000      1
323  * 010000      2
324  * 011000      3
325  * 100000      4
326  * 101000      5
327  * 110000      6
328  * 111000      7
329  */
330 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
331 	0, 0, 0, 0, 0, 0, 0, 0,
332 	1, 1, 1, 1, 1, 1, 1, 1,
333 	2, 2, 2, 2, 2, 2, 2, 2,
334 	3, 3, 3, 3, 3, 3, 3, 3,
335 	4, 4, 4, 4, 4, 4, 4, 4,
336 	5, 5, 5, 5, 5, 5, 5, 5,
337 	6, 6, 6, 6, 6, 6, 6, 6,
338 	7, 7, 7, 7, 7, 7, 7, 7,
339 };
340 
341 /**
342  * default_pcp_tid_map - Default PCP-TID mapping
343  *
344  * PCP     TID
345  * 000      0
346  * 001      1
347  * 010      2
348  * 011      3
349  * 100      4
350  * 101      5
351  * 110      6
352  * 111      7
353  */
354 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
355 	0, 1, 2, 3, 4, 5, 6, 7,
356 };
357 
358 /**
359  * @brief Cpu to tx ring map
360  */
361 uint8_t
362 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
363 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
364 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
365 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
366 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
367 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
368 #ifdef WLAN_TX_PKT_CAPTURE_ENH
369 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
370 #endif
371 };
372 
373 /**
374  * @brief Select the type of statistics
375  */
376 enum dp_stats_type {
377 	STATS_FW = 0,
378 	STATS_HOST = 1,
379 	STATS_TYPE_MAX = 2,
380 };
381 
382 /**
383  * @brief General Firmware statistics options
384  *
385  */
386 enum dp_fw_stats {
387 	TXRX_FW_STATS_INVALID	= -1,
388 };
389 
390 /**
391  * dp_stats_mapping_table - Firmware and Host statistics
392  * currently supported
393  */
394 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
395 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
396 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
397 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
398 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
399 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
400 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
401 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
402 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
403 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
404 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
405 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
406 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
407 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
408 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
409 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
410 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
411 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
412 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
413 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
414 	/* Last ENUM for HTT FW STATS */
415 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
416 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
417 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
418 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
419 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
420 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
421 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
422 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
423 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
424 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
425 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
426 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
427 	{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
428 	{TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS},
429 	{TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS},
430 	{TXRX_FW_STATS_INVALID, TXRX_SOC_REO_HW_DESC_DUMP},
431 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, TXRX_HOST_STATS_INVALID}
432 };
433 
434 /* MCL specific functions */
435 #if defined(DP_CON_MON)
436 /**
437  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
438  * @soc: pointer to dp_soc handle
439  * @intr_ctx_num: interrupt context number for which mon mask is needed
440  *
441  * For MCL, monitor mode rings are being processed in timer contexts (polled).
442  * This function is returning 0, since in interrupt mode(softirq based RX),
443  * we donot want to process monitor mode rings in a softirq.
444  *
445  * So, in case packet log is enabled for SAP/STA/P2P modes,
446  * regular interrupt processing will not process monitor mode rings. It would be
447  * done in a separate timer context.
448  *
449  * Return: 0
450  */
451 static inline
452 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
453 {
454 	return 0;
455 }
456 
457 /*
458  * dp_service_mon_rings()- service monitor rings
459  * @soc: soc dp handle
460  * @quota: number of ring entry that can be serviced
461  *
462  * Return: None
463  *
464  */
465 static void dp_service_mon_rings(struct  dp_soc *soc, uint32_t quota)
466 {
467 	int ring = 0, work_done;
468 	struct dp_pdev *pdev = NULL;
469 
470 	for (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
471 		pdev = dp_get_pdev_for_lmac_id(soc, ring);
472 		if (!pdev)
473 			continue;
474 		work_done = dp_mon_process(soc, NULL, ring, quota);
475 
476 		dp_rx_mon_dest_debug("Reaped %d descs from Monitor rings",
477 				     work_done);
478 	}
479 }
480 
481 /*
482  * dp_mon_reap_timer_handler()- timer to reap monitor rings
483  * reqd as we are not getting ppdu end interrupts
484  * @arg: SoC Handle
485  *
486  * Return:
487  *
488  */
489 static void dp_mon_reap_timer_handler(void *arg)
490 {
491 	struct dp_soc *soc = (struct dp_soc *)arg;
492 
493 	dp_service_mon_rings(soc, QCA_NAPI_BUDGET);
494 
495 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
496 }
497 
498 #ifndef REMOVE_PKT_LOG
499 /**
500  * dp_pkt_log_init() - API to initialize packet log
501  * @soc_hdl: Datapath soc handle
502  * @pdev_id: id of data path pdev handle
503  * @scn: HIF context
504  *
505  * Return: none
506  */
507 void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn)
508 {
509 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
510 	struct dp_pdev *handle =
511 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
512 
513 	if (!handle) {
514 		dp_err("pdev handle is NULL");
515 		return;
516 	}
517 
518 	if (handle->pkt_log_init) {
519 		dp_init_err("%pK: Packet log not initialized", soc);
520 		return;
521 	}
522 
523 	pktlog_sethandle(&handle->pl_dev, scn);
524 	pktlog_set_pdev_id(handle->pl_dev, pdev_id);
525 	pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
526 
527 	if (pktlogmod_init(scn)) {
528 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
529 			  "%s: pktlogmod_init failed", __func__);
530 		handle->pkt_log_init = false;
531 	} else {
532 		handle->pkt_log_init = true;
533 	}
534 }
535 
536 /**
537  * dp_pkt_log_con_service() - connect packet log service
538  * @soc_hdl: Datapath soc handle
539  * @pdev_id: id of data path pdev handle
540  * @scn: device context
541  *
542  * Return: none
543  */
544 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
545 				   uint8_t pdev_id, void *scn)
546 {
547 	dp_pkt_log_init(soc_hdl, pdev_id, scn);
548 	pktlog_htc_attach();
549 }
550 
551 /**
552  * dp_pktlogmod_exit() - API to cleanup pktlog info
553  * @pdev: Pdev handle
554  *
555  * Return: none
556  */
557 static void dp_pktlogmod_exit(struct dp_pdev *pdev)
558 {
559 	struct dp_soc *soc = pdev->soc;
560 	struct hif_opaque_softc *scn = soc->hif_handle;
561 
562 	if (!scn) {
563 		dp_err("Invalid hif(scn) handle");
564 		return;
565 	}
566 
567 	/* stop mon_reap_timer if it has been started */
568 	if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED &&
569 	    soc->reap_timer_init && (!dp_is_enable_reap_timer_non_pkt(pdev)))
570 		qdf_timer_sync_cancel(&soc->mon_reap_timer);
571 
572 	pktlogmod_exit(scn);
573 	pdev->pkt_log_init = false;
574 }
575 #else
576 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
577 				   uint8_t pdev_id, void *scn)
578 {
579 }
580 
581 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
582 #endif
583 /**
584  * dp_get_num_rx_contexts() - get number of RX contexts
585  * @soc_hdl: cdp opaque soc handle
586  *
587  * Return: number of RX contexts
588  */
589 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
590 {
591 	int i;
592 	int num_rx_contexts = 0;
593 
594 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
595 
596 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
597 		if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i))
598 			num_rx_contexts++;
599 
600 	return num_rx_contexts;
601 }
602 
603 #else
604 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
605 
606 /**
607  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
608  * @soc: pointer to dp_soc handle
609  * @intr_ctx_num: interrupt context number for which mon mask is needed
610  *
611  * Return: mon mask value
612  */
613 static inline
614 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
615 {
616 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
617 }
618 
619 /*
620  * dp_service_lmac_rings()- timer to reap lmac rings
621  * @arg: SoC Handle
622  *
623  * Return:
624  *
625  */
626 static void dp_service_lmac_rings(void *arg)
627 {
628 	struct dp_soc *soc = (struct dp_soc *)arg;
629 	int ring = 0, i;
630 	struct dp_pdev *pdev = NULL;
631 	union dp_rx_desc_list_elem_t *desc_list = NULL;
632 	union dp_rx_desc_list_elem_t *tail = NULL;
633 
634 	/* Process LMAC interrupts */
635 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
636 		int mac_for_pdev = ring;
637 		struct dp_srng *rx_refill_buf_ring;
638 
639 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
640 		if (!pdev)
641 			continue;
642 
643 		rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev];
644 
645 		dp_mon_process(soc, NULL, mac_for_pdev,
646 			       QCA_NAPI_BUDGET);
647 
648 		for (i = 0;
649 		     i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
650 			dp_rxdma_err_process(&soc->intr_ctx[i], soc,
651 					     mac_for_pdev,
652 					     QCA_NAPI_BUDGET);
653 
654 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF,
655 						  mac_for_pdev))
656 			dp_rx_buffers_replenish(soc, mac_for_pdev,
657 						rx_refill_buf_ring,
658 						&soc->rx_desc_buf[mac_for_pdev],
659 						0, &desc_list, &tail);
660 	}
661 
662 	qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
663 }
664 
665 #endif
666 
667 #ifdef FEATURE_MEC
668 void dp_peer_mec_flush_entries(struct dp_soc *soc)
669 {
670 	unsigned int index;
671 	struct dp_mec_entry *mecentry, *mecentry_next;
672 
673 	TAILQ_HEAD(, dp_mec_entry) free_list;
674 	TAILQ_INIT(&free_list);
675 
676 	if (!soc->mec_hash.mask)
677 		return;
678 
679 	if (!soc->mec_hash.bins)
680 		return;
681 
682 	if (!qdf_atomic_read(&soc->mec_cnt))
683 		return;
684 
685 	qdf_spin_lock_bh(&soc->mec_lock);
686 	for (index = 0; index <= soc->mec_hash.mask; index++) {
687 		if (!TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
688 			TAILQ_FOREACH_SAFE(mecentry, &soc->mec_hash.bins[index],
689 					   hash_list_elem, mecentry_next) {
690 			    dp_peer_mec_detach_entry(soc, mecentry, &free_list);
691 			}
692 		}
693 	}
694 	qdf_spin_unlock_bh(&soc->mec_lock);
695 
696 	dp_peer_mec_free_list(soc, &free_list);
697 }
698 
699 /**
700  * dp_print_mec_entries() - Dump MEC entries in table
701  * @soc: Datapath soc handle
702  *
703  * Return: none
704  */
705 static void dp_print_mec_stats(struct dp_soc *soc)
706 {
707 	int i;
708 	uint32_t index;
709 	struct dp_mec_entry *mecentry = NULL, *mec_list;
710 	uint32_t num_entries = 0;
711 
712 	DP_PRINT_STATS("MEC Stats:");
713 	DP_PRINT_STATS("   Entries Added   = %d", soc->stats.mec.added);
714 	DP_PRINT_STATS("   Entries Deleted = %d", soc->stats.mec.deleted);
715 
716 	if (!qdf_atomic_read(&soc->mec_cnt))
717 		return;
718 
719 	mec_list = qdf_mem_malloc(sizeof(*mecentry) * DP_PEER_MAX_MEC_ENTRY);
720 	if (!mec_list) {
721 		dp_peer_warn("%pK: failed to allocate mec_list", soc);
722 		return;
723 	}
724 
725 	DP_PRINT_STATS("MEC Table:");
726 	for (index = 0; index <= soc->mec_hash.mask; index++) {
727 		qdf_spin_lock_bh(&soc->mec_lock);
728 		if (TAILQ_EMPTY(&soc->mec_hash.bins[index])) {
729 			qdf_spin_unlock_bh(&soc->mec_lock);
730 			continue;
731 		}
732 
733 		TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index],
734 			      hash_list_elem) {
735 			qdf_mem_copy(&mec_list[num_entries], mecentry,
736 				     sizeof(*mecentry));
737 			num_entries++;
738 		}
739 		qdf_spin_unlock_bh(&soc->mec_lock);
740 	}
741 
742 	if (!num_entries) {
743 		qdf_mem_free(mec_list);
744 		return;
745 	}
746 
747 	for (i = 0; i < num_entries; i++) {
748 		DP_PRINT_STATS("%6d mac_addr = " QDF_MAC_ADDR_FMT
749 			       " is_active = %d pdev_id = %d vdev_id = %d",
750 			       i,
751 			       QDF_MAC_ADDR_REF(mec_list[i].mac_addr.raw),
752 			       mec_list[i].is_active,
753 			       mec_list[i].pdev_id,
754 			       mec_list[i].vdev_id);
755 	}
756 	qdf_mem_free(mec_list);
757 }
758 #else
759 static void dp_print_mec_stats(struct dp_soc *soc)
760 {
761 }
762 #endif
763 
764 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
765 				 uint8_t vdev_id,
766 				 uint8_t *peer_mac,
767 				 uint8_t *mac_addr,
768 				 enum cdp_txrx_ast_entry_type type,
769 				 uint32_t flags)
770 {
771 	int ret = -1;
772 	QDF_STATUS status = QDF_STATUS_SUCCESS;
773 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
774 						       peer_mac, 0, vdev_id,
775 						       DP_MOD_ID_CDP);
776 
777 	if (!peer) {
778 		dp_peer_debug("Peer is NULL!");
779 		return ret;
780 	}
781 
782 	status = dp_peer_add_ast((struct dp_soc *)soc_hdl,
783 				 peer,
784 				 mac_addr,
785 				 type,
786 				 flags);
787 	if ((status == QDF_STATUS_SUCCESS) ||
788 	    (status == QDF_STATUS_E_ALREADY) ||
789 	    (status == QDF_STATUS_E_AGAIN))
790 		ret = 0;
791 
792 	dp_hmwds_ast_add_notify(peer, mac_addr,
793 				type, status, false);
794 
795 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
796 
797 	return ret;
798 }
799 
800 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
801 						uint8_t vdev_id,
802 						uint8_t *peer_mac,
803 						uint8_t *wds_macaddr,
804 						uint32_t flags)
805 {
806 	int status = -1;
807 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
808 	struct dp_ast_entry  *ast_entry = NULL;
809 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
810 						       peer_mac, 0, vdev_id,
811 						       DP_MOD_ID_CDP);
812 
813 	if (!peer) {
814 		dp_peer_debug("Peer is NULL!");
815 		return status;
816 	}
817 
818 	qdf_spin_lock_bh(&soc->ast_lock);
819 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
820 						    peer->vdev->pdev->pdev_id);
821 
822 	if (ast_entry) {
823 		status = dp_peer_update_ast(soc,
824 					    peer,
825 					    ast_entry, flags);
826 	}
827 	qdf_spin_unlock_bh(&soc->ast_lock);
828 
829 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
830 
831 	return status;
832 }
833 
834 /*
835  * dp_peer_reset_ast_entries() - Deletes all HMWDS entries for a peer
836  * @soc_handle:		Datapath SOC handle
837  * @peer:		DP peer
838  * @arg:		callback argument
839  *
840  * Return: None
841  */
842 static void
843 dp_peer_reset_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
844 {
845 	struct dp_ast_entry *ast_entry = NULL;
846 	struct dp_ast_entry *tmp_ast_entry;
847 
848 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
849 		if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
850 		    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
851 			dp_peer_del_ast(soc, ast_entry);
852 	}
853 }
854 
855 /*
856  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
857  * @soc_handle:		Datapath SOC handle
858  * @wds_macaddr:	WDS entry MAC Address
859  * @peer_macaddr:	WDS entry MAC Address
860  * @vdev_id:		id of vdev handle
861  * Return: QDF_STATUS
862  */
863 static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
864 					 uint8_t *wds_macaddr,
865 					 uint8_t *peer_mac_addr,
866 					 uint8_t vdev_id)
867 {
868 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
869 	struct dp_ast_entry *ast_entry = NULL;
870 	struct dp_peer *peer;
871 	struct dp_pdev *pdev;
872 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
873 						     DP_MOD_ID_CDP);
874 
875 	if (!vdev)
876 		return QDF_STATUS_E_FAILURE;
877 
878 	pdev = vdev->pdev;
879 
880 	if (peer_mac_addr) {
881 		peer = dp_peer_find_hash_find(soc, peer_mac_addr,
882 					      0, vdev->vdev_id,
883 					      DP_MOD_ID_CDP);
884 		if (!peer) {
885 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
886 			return QDF_STATUS_E_FAILURE;
887 		}
888 
889 		qdf_spin_lock_bh(&soc->ast_lock);
890 		dp_peer_reset_ast_entries(soc, peer, NULL);
891 		qdf_spin_unlock_bh(&soc->ast_lock);
892 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
893 	} else if (wds_macaddr) {
894 		qdf_spin_lock_bh(&soc->ast_lock);
895 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
896 							    pdev->pdev_id);
897 
898 		if (ast_entry) {
899 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
900 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
901 				dp_peer_del_ast(soc, ast_entry);
902 		}
903 		qdf_spin_unlock_bh(&soc->ast_lock);
904 	}
905 
906 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
907 	return QDF_STATUS_SUCCESS;
908 }
909 
910 /*
911  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
912  * @soc:		Datapath SOC handle
913  * @vdev_id:		id of vdev object
914  *
915  * Return: QDF_STATUS
916  */
917 static QDF_STATUS
918 dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
919 			     uint8_t vdev_id)
920 {
921 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
922 
923 	qdf_spin_lock_bh(&soc->ast_lock);
924 
925 	dp_soc_iterate_peer(soc, dp_peer_reset_ast_entries, NULL,
926 			    DP_MOD_ID_CDP);
927 	qdf_spin_unlock_bh(&soc->ast_lock);
928 
929 	return QDF_STATUS_SUCCESS;
930 }
931 
932 /*
933  * dp_peer_flush_ast_entries() - Delete all wds and hmwds ast entries of a peer
934  * @soc:		Datapath SOC
935  * @peer:		Datapath peer
936  * @arg:		arg to callback
937  *
938  * Return: None
939  */
940 static void
941 dp_peer_flush_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
942 {
943 	struct dp_ast_entry *ase = NULL;
944 	struct dp_ast_entry *temp_ase;
945 
946 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
947 		if ((ase->type ==
948 			CDP_TXRX_AST_TYPE_STATIC) ||
949 			(ase->type ==
950 			 CDP_TXRX_AST_TYPE_SELF) ||
951 			(ase->type ==
952 			 CDP_TXRX_AST_TYPE_STA_BSS))
953 			continue;
954 		dp_peer_del_ast(soc, ase);
955 	}
956 }
957 
958 /*
959  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
960  * @soc:		Datapath SOC handle
961  *
962  * Return: None
963  */
964 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
965 {
966 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
967 
968 	qdf_spin_lock_bh(&soc->ast_lock);
969 
970 	dp_soc_iterate_peer(soc, dp_peer_flush_ast_entries, NULL,
971 			    DP_MOD_ID_CDP);
972 
973 	qdf_spin_unlock_bh(&soc->ast_lock);
974 	dp_peer_mec_flush_entries(soc);
975 }
976 
977 /**
978  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
979  *                                       and return ast entry information
980  *                                       of first ast entry found in the
981  *                                       table with given mac address
982  *
983  * @soc : data path soc handle
984  * @ast_mac_addr : AST entry mac address
985  * @ast_entry_info : ast entry information
986  *
987  * return : true if ast entry found with ast_mac_addr
988  *          false if ast entry not found
989  */
990 static bool dp_peer_get_ast_info_by_soc_wifi3
991 	(struct cdp_soc_t *soc_hdl,
992 	 uint8_t *ast_mac_addr,
993 	 struct cdp_ast_entry_info *ast_entry_info)
994 {
995 	struct dp_ast_entry *ast_entry = NULL;
996 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
997 	struct dp_peer *peer = NULL;
998 
999 	qdf_spin_lock_bh(&soc->ast_lock);
1000 
1001 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
1002 	if ((!ast_entry) ||
1003 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
1004 		qdf_spin_unlock_bh(&soc->ast_lock);
1005 		return false;
1006 	}
1007 
1008 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1009 				     DP_MOD_ID_AST);
1010 	if (!peer) {
1011 		qdf_spin_unlock_bh(&soc->ast_lock);
1012 		return false;
1013 	}
1014 
1015 	ast_entry_info->type = ast_entry->type;
1016 	ast_entry_info->pdev_id = ast_entry->pdev_id;
1017 	ast_entry_info->vdev_id = ast_entry->vdev_id;
1018 	ast_entry_info->peer_id = ast_entry->peer_id;
1019 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
1020 		     &peer->mac_addr.raw[0],
1021 		     QDF_MAC_ADDR_SIZE);
1022 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1023 	qdf_spin_unlock_bh(&soc->ast_lock);
1024 	return true;
1025 }
1026 
1027 /**
1028  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
1029  *                                          and return ast entry information
1030  *                                          if mac address and pdev_id matches
1031  *
1032  * @soc : data path soc handle
1033  * @ast_mac_addr : AST entry mac address
1034  * @pdev_id : pdev_id
1035  * @ast_entry_info : ast entry information
1036  *
1037  * return : true if ast entry found with ast_mac_addr
1038  *          false if ast entry not found
1039  */
1040 static bool dp_peer_get_ast_info_by_pdevid_wifi3
1041 		(struct cdp_soc_t *soc_hdl,
1042 		 uint8_t *ast_mac_addr,
1043 		 uint8_t pdev_id,
1044 		 struct cdp_ast_entry_info *ast_entry_info)
1045 {
1046 	struct dp_ast_entry *ast_entry;
1047 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
1048 	struct dp_peer *peer = NULL;
1049 
1050 	qdf_spin_lock_bh(&soc->ast_lock);
1051 
1052 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr,
1053 						    pdev_id);
1054 
1055 	if ((!ast_entry) ||
1056 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
1057 		qdf_spin_unlock_bh(&soc->ast_lock);
1058 		return false;
1059 	}
1060 
1061 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1062 				     DP_MOD_ID_AST);
1063 	if (!peer) {
1064 		qdf_spin_unlock_bh(&soc->ast_lock);
1065 		return false;
1066 	}
1067 
1068 	ast_entry_info->type = ast_entry->type;
1069 	ast_entry_info->pdev_id = ast_entry->pdev_id;
1070 	ast_entry_info->vdev_id = ast_entry->vdev_id;
1071 	ast_entry_info->peer_id = ast_entry->peer_id;
1072 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
1073 		     &peer->mac_addr.raw[0],
1074 		     QDF_MAC_ADDR_SIZE);
1075 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1076 	qdf_spin_unlock_bh(&soc->ast_lock);
1077 	return true;
1078 }
1079 
1080 /**
1081  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
1082  *                            with given mac address
1083  *
1084  * @soc : data path soc handle
1085  * @ast_mac_addr : AST entry mac address
1086  * @callback : callback function to called on ast delete response from FW
1087  * @cookie : argument to be passed to callback
1088  *
1089  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
1090  *          is sent
1091  *          QDF_STATUS_E_INVAL false if ast entry not found
1092  */
1093 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
1094 					       uint8_t *mac_addr,
1095 					       txrx_ast_free_cb callback,
1096 					       void *cookie)
1097 
1098 {
1099 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1100 	struct dp_ast_entry *ast_entry = NULL;
1101 	txrx_ast_free_cb cb = NULL;
1102 	void *arg = NULL;
1103 
1104 	qdf_spin_lock_bh(&soc->ast_lock);
1105 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1106 	if (!ast_entry) {
1107 		qdf_spin_unlock_bh(&soc->ast_lock);
1108 		return -QDF_STATUS_E_INVAL;
1109 	}
1110 
1111 	if (ast_entry->callback) {
1112 		cb = ast_entry->callback;
1113 		arg = ast_entry->cookie;
1114 	}
1115 
1116 	ast_entry->callback = callback;
1117 	ast_entry->cookie = cookie;
1118 
1119 	/*
1120 	 * if delete_in_progress is set AST delete is sent to target
1121 	 * and host is waiting for response should not send delete
1122 	 * again
1123 	 */
1124 	if (!ast_entry->delete_in_progress)
1125 		dp_peer_del_ast(soc, ast_entry);
1126 
1127 	qdf_spin_unlock_bh(&soc->ast_lock);
1128 	if (cb) {
1129 		cb(soc->ctrl_psoc,
1130 		   dp_soc_to_cdp_soc(soc),
1131 		   arg,
1132 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1133 	}
1134 	return QDF_STATUS_SUCCESS;
1135 }
1136 
1137 /**
1138  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
1139  *                                   table if mac address and pdev_id matches
1140  *
1141  * @soc : data path soc handle
1142  * @ast_mac_addr : AST entry mac address
1143  * @pdev_id : pdev id
1144  * @callback : callback function to called on ast delete response from FW
1145  * @cookie : argument to be passed to callback
1146  *
1147  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
1148  *          is sent
1149  *          QDF_STATUS_E_INVAL false if ast entry not found
1150  */
1151 
1152 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
1153 						uint8_t *mac_addr,
1154 						uint8_t pdev_id,
1155 						txrx_ast_free_cb callback,
1156 						void *cookie)
1157 
1158 {
1159 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1160 	struct dp_ast_entry *ast_entry;
1161 	txrx_ast_free_cb cb = NULL;
1162 	void *arg = NULL;
1163 
1164 	qdf_spin_lock_bh(&soc->ast_lock);
1165 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
1166 
1167 	if (!ast_entry) {
1168 		qdf_spin_unlock_bh(&soc->ast_lock);
1169 		return -QDF_STATUS_E_INVAL;
1170 	}
1171 
1172 	if (ast_entry->callback) {
1173 		cb = ast_entry->callback;
1174 		arg = ast_entry->cookie;
1175 	}
1176 
1177 	ast_entry->callback = callback;
1178 	ast_entry->cookie = cookie;
1179 
1180 	/*
1181 	 * if delete_in_progress is set AST delete is sent to target
1182 	 * and host is waiting for response should not sent delete
1183 	 * again
1184 	 */
1185 	if (!ast_entry->delete_in_progress)
1186 		dp_peer_del_ast(soc, ast_entry);
1187 
1188 	qdf_spin_unlock_bh(&soc->ast_lock);
1189 
1190 	if (cb) {
1191 		cb(soc->ctrl_psoc,
1192 		   dp_soc_to_cdp_soc(soc),
1193 		   arg,
1194 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1195 	}
1196 	return QDF_STATUS_SUCCESS;
1197 }
1198 
1199 /**
1200  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
1201  * @ring_num: ring num of the ring being queried
1202  * @grp_mask: the grp_mask array for the ring type in question.
1203  *
1204  * The grp_mask array is indexed by group number and the bit fields correspond
1205  * to ring numbers.  We are finding which interrupt group a ring belongs to.
1206  *
1207  * Return: the index in the grp_mask array with the ring number.
1208  * -QDF_STATUS_E_NOENT if no entry is found
1209  */
1210 static int dp_srng_find_ring_in_mask(int ring_num, uint8_t *grp_mask)
1211 {
1212 	int ext_group_num;
1213 	uint8_t mask = 1 << ring_num;
1214 
1215 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
1216 	     ext_group_num++) {
1217 		if (mask & grp_mask[ext_group_num])
1218 			return ext_group_num;
1219 	}
1220 
1221 	return -QDF_STATUS_E_NOENT;
1222 }
1223 
1224 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
1225 				       enum hal_ring_type ring_type,
1226 				       int ring_num)
1227 {
1228 	uint8_t *grp_mask;
1229 
1230 	switch (ring_type) {
1231 	case WBM2SW_RELEASE:
1232 		/* dp_tx_comp_handler - soc->tx_comp_ring */
1233 		if (ring_num < 3)
1234 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1235 
1236 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
1237 		else if (ring_num == 3) {
1238 			/* sw treats this as a separate ring type */
1239 			grp_mask = &soc->wlan_cfg_ctx->
1240 				int_rx_wbm_rel_ring_mask[0];
1241 			ring_num = 0;
1242 		} else {
1243 			qdf_assert(0);
1244 			return -QDF_STATUS_E_NOENT;
1245 		}
1246 	break;
1247 
1248 	case REO_EXCEPTION:
1249 		/* dp_rx_err_process - &soc->reo_exception_ring */
1250 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
1251 	break;
1252 
1253 	case REO_DST:
1254 		/* dp_rx_process - soc->reo_dest_ring */
1255 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1256 	break;
1257 
1258 	case REO_STATUS:
1259 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
1260 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
1261 	break;
1262 
1263 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
1264 	case RXDMA_MONITOR_STATUS:
1265 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
1266 	case RXDMA_MONITOR_DST:
1267 		/* dp_mon_process */
1268 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
1269 	break;
1270 	case RXDMA_DST:
1271 		/* dp_rxdma_err_process */
1272 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
1273 	break;
1274 
1275 	case RXDMA_BUF:
1276 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1277 	break;
1278 
1279 	case RXDMA_MONITOR_BUF:
1280 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
1281 	break;
1282 
1283 	case TCL_DATA:
1284 	/* CMD_CREDIT_RING is used as command in 8074 and credit in 9000 */
1285 	case TCL_CMD_CREDIT:
1286 	case REO_CMD:
1287 	case SW2WBM_RELEASE:
1288 	case WBM_IDLE_LINK:
1289 		/* normally empty SW_TO_HW rings */
1290 		return -QDF_STATUS_E_NOENT;
1291 	break;
1292 
1293 	case TCL_STATUS:
1294 	case REO_REINJECT:
1295 		/* misc unused rings */
1296 		return -QDF_STATUS_E_NOENT;
1297 	break;
1298 
1299 	case CE_SRC:
1300 	case CE_DST:
1301 	case CE_DST_STATUS:
1302 		/* CE_rings - currently handled by hif */
1303 	default:
1304 		return -QDF_STATUS_E_NOENT;
1305 	break;
1306 	}
1307 
1308 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
1309 }
1310 
1311 /*
1312  * dp_get_num_msi_available()- API to get number of MSIs available
1313  * @dp_soc: DP soc Handle
1314  * @interrupt_mode: Mode of interrupts
1315  *
1316  * Return: Number of MSIs available or 0 in case of integrated
1317  */
1318 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
1319 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1320 {
1321 	return 0;
1322 }
1323 #else
1324 /*
1325  * dp_get_num_msi_available()- API to get number of MSIs available
1326  * @dp_soc: DP soc Handle
1327  * @interrupt_mode: Mode of interrupts
1328  *
1329  * Return: Number of MSIs available or 0 in case of integrated
1330  */
1331 static int dp_get_num_msi_available(struct dp_soc *soc, int interrupt_mode)
1332 {
1333 	int msi_data_count;
1334 	int msi_data_start;
1335 	int msi_irq_start;
1336 	int ret;
1337 
1338 	if (interrupt_mode == DP_INTR_INTEGRATED) {
1339 		return 0;
1340 	} else if (interrupt_mode == DP_INTR_MSI || interrupt_mode ==
1341 		   DP_INTR_POLL) {
1342 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1343 						  &msi_data_count,
1344 						  &msi_data_start,
1345 						  &msi_irq_start);
1346 		if (ret) {
1347 			qdf_err("Unable to get DP MSI assignment %d",
1348 				interrupt_mode);
1349 			return -EINVAL;
1350 		}
1351 		return msi_data_count;
1352 	}
1353 	qdf_err("Interrupt mode invalid %d", interrupt_mode);
1354 	return -EINVAL;
1355 }
1356 #endif
1357 
1358 /**
1359  * dp_is_msi_group_number_invalid() - check msi_group_number valid or not
1360  * @msi_group_number: MSI group number.
1361  * @msi_data_count: MSI data count.
1362  *
1363  * Return: true if msi_group_number is valid.
1364  */
1365 #ifdef WLAN_ONE_MSI_VECTOR
1366 static bool dp_is_msi_group_number_invalid(int msi_group_number,
1367 					   int msi_data_count)
1368 {
1369 	return false;
1370 }
1371 #else
1372 static bool dp_is_msi_group_number_invalid(int msi_group_number,
1373 					   int msi_data_count)
1374 {
1375 	return msi_group_number > msi_data_count;
1376 }
1377 #endif
1378 
1379 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
1380 			      *ring_params, int ring_type, int ring_num)
1381 {
1382 	int msi_group_number;
1383 	int msi_data_count;
1384 	int ret;
1385 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1386 
1387 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1388 					    &msi_data_count, &msi_data_start,
1389 					    &msi_irq_start);
1390 
1391 	if (ret)
1392 		return;
1393 
1394 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
1395 						       ring_num);
1396 	if (msi_group_number < 0) {
1397 		dp_init_info("%pK: ring not part of an ext_group; ring_type: %d,ring_num %d",
1398 			     soc, ring_type, ring_num);
1399 		ring_params->msi_addr = 0;
1400 		ring_params->msi_data = 0;
1401 		return;
1402 	}
1403 
1404 	if (dp_is_msi_group_number_invalid(msi_group_number, msi_data_count)) {
1405 		dp_init_warn("%pK: 2 msi_groups will share an msi; msi_group_num %d",
1406 			     soc, msi_group_number);
1407 
1408 		QDF_ASSERT(0);
1409 	}
1410 
1411 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1412 
1413 	ring_params->msi_addr = addr_low;
1414 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1415 	ring_params->msi_data = (msi_group_number % msi_data_count)
1416 		+ msi_data_start;
1417 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1418 }
1419 
1420 #ifdef FEATURE_AST
1421 /**
1422  * dp_print_peer_ast_entries() - Dump AST entries of peer
1423  * @soc: Datapath soc handle
1424  * @peer: Datapath peer
1425  * @arg: argument to iterate function
1426  *
1427  * return void
1428  */
1429 static void
1430 dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1431 {
1432 	struct dp_ast_entry *ase, *tmp_ase;
1433 	uint32_t num_entries = 0;
1434 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1435 			"NONE", "STATIC", "SELF", "WDS", "HMWDS", "BSS",
1436 			"DA", "HMWDS_SEC"};
1437 
1438 	DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1439 	    DP_PRINT_STATS("%6d mac_addr = "QDF_MAC_ADDR_FMT
1440 		    " peer_mac_addr = "QDF_MAC_ADDR_FMT
1441 		    " peer_id = %u"
1442 		    " type = %s"
1443 		    " next_hop = %d"
1444 		    " is_active = %d"
1445 		    " ast_idx = %d"
1446 		    " ast_hash = %d"
1447 		    " delete_in_progress = %d"
1448 		    " pdev_id = %d"
1449 		    " vdev_id = %d",
1450 		    ++num_entries,
1451 		    QDF_MAC_ADDR_REF(ase->mac_addr.raw),
1452 		    QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1453 		    ase->peer_id,
1454 		    type[ase->type],
1455 		    ase->next_hop,
1456 		    ase->is_active,
1457 		    ase->ast_idx,
1458 		    ase->ast_hash_value,
1459 		    ase->delete_in_progress,
1460 		    ase->pdev_id,
1461 		    ase->vdev_id);
1462 	}
1463 }
1464 
1465 /**
1466  * dp_print_ast_stats() - Dump AST table contents
1467  * @soc: Datapath soc handle
1468  *
1469  * return void
1470  */
1471 void dp_print_ast_stats(struct dp_soc *soc)
1472 {
1473 	DP_PRINT_STATS("AST Stats:");
1474 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1475 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1476 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1477 	DP_PRINT_STATS("	Entries MAP ERR  = %d", soc->stats.ast.map_err);
1478 	DP_PRINT_STATS("	Entries Mismatch ERR  = %d",
1479 		       soc->stats.ast.ast_mismatch);
1480 
1481 	DP_PRINT_STATS("AST Table:");
1482 
1483 	qdf_spin_lock_bh(&soc->ast_lock);
1484 
1485 	dp_soc_iterate_peer(soc, dp_print_peer_ast_entries, NULL,
1486 			    DP_MOD_ID_GENERIC_STATS);
1487 
1488 	qdf_spin_unlock_bh(&soc->ast_lock);
1489 }
1490 #else
1491 void dp_print_ast_stats(struct dp_soc *soc)
1492 {
1493 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1494 	return;
1495 }
1496 #endif
1497 
1498 /**
1499  * dp_print_peer_info() - Dump peer info
1500  * @soc: Datapath soc handle
1501  * @peer: Datapath peer handle
1502  * @arg: argument to iter function
1503  *
1504  * return void
1505  */
1506 static void
1507 dp_print_peer_info(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1508 {
1509 	DP_PRINT_STATS("    peer_mac_addr = "QDF_MAC_ADDR_FMT
1510 		       " nawds_enabled = %d"
1511 		       " bss_peer = %d"
1512 		       " wds_enabled = %d"
1513 		       " tx_cap_enabled = %d"
1514 		       " rx_cap_enabled = %d"
1515 		       " peer id = %d",
1516 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1517 		       peer->nawds_enabled,
1518 		       peer->bss_peer,
1519 		       peer->wds_enabled,
1520 		       peer->tx_cap_enabled,
1521 		       peer->rx_cap_enabled,
1522 		       peer->peer_id);
1523 }
1524 
1525 /**
1526  * dp_print_peer_table() - Dump all Peer stats
1527  * @vdev: Datapath Vdev handle
1528  *
1529  * return void
1530  */
1531 static void dp_print_peer_table(struct dp_vdev *vdev)
1532 {
1533 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1534 	dp_vdev_iterate_peer(vdev, dp_print_peer_info, NULL,
1535 			     DP_MOD_ID_GENERIC_STATS);
1536 }
1537 
1538 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
1539 /**
1540  * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt
1541  * threshold values from the wlan_srng_cfg table for each ring type
1542  * @soc: device handle
1543  * @ring_params: per ring specific parameters
1544  * @ring_type: Ring type
1545  * @ring_num: Ring number for a given ring type
1546  *
1547  * Fill the ring params with the interrupt threshold
1548  * configuration parameters available in the per ring type wlan_srng_cfg
1549  * table.
1550  *
1551  * Return: None
1552  */
1553 static void
1554 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1555 				       struct hal_srng_params *ring_params,
1556 				       int ring_type, int ring_num,
1557 				       int num_entries)
1558 {
1559 	if (ring_type == REO_DST) {
1560 		ring_params->intr_timer_thres_us =
1561 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1562 		ring_params->intr_batch_cntr_thres_entries =
1563 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1564 	} else if (ring_type == WBM2SW_RELEASE && (ring_num == 3)) {
1565 		ring_params->intr_timer_thres_us =
1566 				wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1567 		ring_params->intr_batch_cntr_thres_entries =
1568 				wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1569 	} else {
1570 		ring_params->intr_timer_thres_us =
1571 				soc->wlan_srng_cfg[ring_type].timer_threshold;
1572 		ring_params->intr_batch_cntr_thres_entries =
1573 				soc->wlan_srng_cfg[ring_type].batch_count_threshold;
1574 	}
1575 	ring_params->low_threshold =
1576 			soc->wlan_srng_cfg[ring_type].low_threshold;
1577 	if (ring_params->low_threshold)
1578 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1579 }
1580 #else
1581 static void
1582 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1583 				       struct hal_srng_params *ring_params,
1584 				       int ring_type, int ring_num,
1585 				       int num_entries)
1586 {
1587 	if (ring_type == REO_DST) {
1588 		ring_params->intr_timer_thres_us =
1589 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1590 		ring_params->intr_batch_cntr_thres_entries =
1591 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1592 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
1593 		ring_params->intr_timer_thres_us =
1594 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1595 		ring_params->intr_batch_cntr_thres_entries =
1596 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1597 	} else {
1598 		ring_params->intr_timer_thres_us =
1599 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1600 		ring_params->intr_batch_cntr_thres_entries =
1601 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1602 	}
1603 
1604 	/* Enable low threshold interrupts for rx buffer rings (regular and
1605 	 * monitor buffer rings.
1606 	 * TODO: See if this is required for any other ring
1607 	 */
1608 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1609 	    (ring_type == RXDMA_MONITOR_STATUS)) {
1610 		/* TODO: Setting low threshold to 1/8th of ring size
1611 		 * see if this needs to be configurable
1612 		 */
1613 		ring_params->low_threshold = num_entries >> 3;
1614 		ring_params->intr_timer_thres_us =
1615 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1616 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1617 		ring_params->intr_batch_cntr_thres_entries = 0;
1618 	}
1619 
1620 	/* During initialisation monitor rings are only filled with
1621 	 * MON_BUF_MIN_ENTRIES entries. So low threshold needs to be set to
1622 	 * a value less than that. Low threshold value is reconfigured again
1623 	 * to 1/8th of the ring size when monitor vap is created.
1624 	 */
1625 	if (ring_type == RXDMA_MONITOR_BUF)
1626 		ring_params->low_threshold = MON_BUF_MIN_ENTRIES >> 1;
1627 
1628 	/* In case of PCI chipsets, we dont have PPDU end interrupts,
1629 	 * so MONITOR STATUS ring is reaped by receiving MSI from srng.
1630 	 * Keep batch threshold as 8 so that interrupt is received for
1631 	 * every 4 packets in MONITOR_STATUS ring
1632 	 */
1633 	if ((ring_type == RXDMA_MONITOR_STATUS) &&
1634 	    (soc->intr_mode == DP_INTR_MSI))
1635 		ring_params->intr_batch_cntr_thres_entries = 4;
1636 }
1637 #endif
1638 
1639 #ifdef DP_MEM_PRE_ALLOC
1640 
1641 void *dp_context_alloc_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
1642 			   size_t ctxt_size)
1643 {
1644 	void *ctxt_mem;
1645 
1646 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_context) {
1647 		dp_warn("dp_prealloc_get_context null!");
1648 		goto dynamic_alloc;
1649 	}
1650 
1651 	ctxt_mem = soc->cdp_soc.ol_ops->dp_prealloc_get_context(ctxt_type);
1652 
1653 	if (ctxt_mem)
1654 		goto end;
1655 
1656 dynamic_alloc:
1657 	dp_info("Pre-alloc of ctxt failed. Dynamic allocation");
1658 	ctxt_mem = qdf_mem_malloc(ctxt_size);
1659 end:
1660 	return ctxt_mem;
1661 }
1662 
1663 void dp_context_free_mem(struct dp_soc *soc, enum dp_ctxt_type ctxt_type,
1664 			 void *vaddr)
1665 {
1666 	QDF_STATUS status;
1667 
1668 	if (soc->cdp_soc.ol_ops->dp_prealloc_put_context) {
1669 		status = soc->cdp_soc.ol_ops->dp_prealloc_put_context(
1670 								ctxt_type,
1671 								vaddr);
1672 	} else {
1673 		dp_warn("dp_prealloc_get_context null!");
1674 		status = QDF_STATUS_E_NOSUPPORT;
1675 	}
1676 
1677 	if (QDF_IS_STATUS_ERROR(status)) {
1678 		dp_info("Context not pre-allocated");
1679 		qdf_mem_free(vaddr);
1680 	}
1681 }
1682 
1683 static inline
1684 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
1685 					   struct dp_srng *srng,
1686 					   uint32_t ring_type)
1687 {
1688 	void *mem;
1689 
1690 	qdf_assert(!srng->is_mem_prealloc);
1691 
1692 	if (!soc->cdp_soc.ol_ops->dp_prealloc_get_consistent) {
1693 		dp_warn("dp_prealloc_get_consistent is null!");
1694 		goto qdf;
1695 	}
1696 
1697 	mem =
1698 		soc->cdp_soc.ol_ops->dp_prealloc_get_consistent
1699 						(&srng->alloc_size,
1700 						 &srng->base_vaddr_unaligned,
1701 						 &srng->base_paddr_unaligned,
1702 						 &srng->base_paddr_aligned,
1703 						 DP_RING_BASE_ALIGN, ring_type);
1704 
1705 	if (mem) {
1706 		srng->is_mem_prealloc = true;
1707 		goto end;
1708 	}
1709 qdf:
1710 	mem =  qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
1711 						&srng->base_vaddr_unaligned,
1712 						&srng->base_paddr_unaligned,
1713 						&srng->base_paddr_aligned,
1714 						DP_RING_BASE_ALIGN);
1715 end:
1716 	dp_info("%s memory %pK dp_srng %pK ring_type %d alloc_size %d num_entries %d",
1717 		srng->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", mem,
1718 		srng, ring_type, srng->alloc_size, srng->num_entries);
1719 	return mem;
1720 }
1721 
1722 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
1723 					       struct dp_srng *srng)
1724 {
1725 	if (srng->is_mem_prealloc) {
1726 		if (!soc->cdp_soc.ol_ops->dp_prealloc_put_consistent) {
1727 			dp_warn("dp_prealloc_put_consistent is null!");
1728 			QDF_BUG(0);
1729 			return;
1730 		}
1731 		soc->cdp_soc.ol_ops->dp_prealloc_put_consistent
1732 						(srng->alloc_size,
1733 						 srng->base_vaddr_unaligned,
1734 						 srng->base_paddr_unaligned);
1735 
1736 	} else {
1737 		qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1738 					srng->alloc_size,
1739 					srng->base_vaddr_unaligned,
1740 					srng->base_paddr_unaligned, 0);
1741 	}
1742 }
1743 
1744 void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
1745 				   enum dp_desc_type desc_type,
1746 				   struct qdf_mem_multi_page_t *pages,
1747 				   size_t element_size,
1748 				   uint16_t element_num,
1749 				   qdf_dma_context_t memctxt,
1750 				   bool cacheable)
1751 {
1752 	if (!soc->cdp_soc.ol_ops->dp_get_multi_pages) {
1753 		dp_warn("dp_get_multi_pages is null!");
1754 		goto qdf;
1755 	}
1756 
1757 	pages->num_pages = 0;
1758 	pages->is_mem_prealloc = 0;
1759 	soc->cdp_soc.ol_ops->dp_get_multi_pages(desc_type,
1760 						element_size,
1761 						element_num,
1762 						pages,
1763 						cacheable);
1764 	if (pages->num_pages)
1765 		goto end;
1766 
1767 qdf:
1768 	qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size,
1769 				  element_num, memctxt, cacheable);
1770 end:
1771 	dp_info("%s desc_type %d element_size %d element_num %d cacheable %d",
1772 		pages->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc",
1773 		desc_type, (int)element_size, element_num, cacheable);
1774 }
1775 
1776 void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
1777 				  enum dp_desc_type desc_type,
1778 				  struct qdf_mem_multi_page_t *pages,
1779 				  qdf_dma_context_t memctxt,
1780 				  bool cacheable)
1781 {
1782 	if (pages->is_mem_prealloc) {
1783 		if (!soc->cdp_soc.ol_ops->dp_put_multi_pages) {
1784 			dp_warn("dp_put_multi_pages is null!");
1785 			QDF_BUG(0);
1786 			return;
1787 		}
1788 
1789 		soc->cdp_soc.ol_ops->dp_put_multi_pages(desc_type, pages);
1790 		qdf_mem_zero(pages, sizeof(*pages));
1791 	} else {
1792 		qdf_mem_multi_pages_free(soc->osdev, pages,
1793 					 memctxt, cacheable);
1794 	}
1795 }
1796 
1797 #else
1798 
1799 static inline
1800 void *dp_srng_aligned_mem_alloc_consistent(struct dp_soc *soc,
1801 					   struct dp_srng *srng,
1802 					   uint32_t ring_type)
1803 
1804 {
1805 	return qdf_aligned_mem_alloc_consistent(soc->osdev, &srng->alloc_size,
1806 						&srng->base_vaddr_unaligned,
1807 						&srng->base_paddr_unaligned,
1808 						&srng->base_paddr_aligned,
1809 						DP_RING_BASE_ALIGN);
1810 }
1811 
1812 static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
1813 					       struct dp_srng *srng)
1814 {
1815 	qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1816 				srng->alloc_size,
1817 				srng->base_vaddr_unaligned,
1818 				srng->base_paddr_unaligned, 0);
1819 }
1820 
1821 #endif /* DP_MEM_PRE_ALLOC */
1822 
1823 /*
1824  * dp_srng_free() - Free SRNG memory
1825  * @soc  : Data path soc handle
1826  * @srng : SRNG pointer
1827  *
1828  * return: None
1829  */
1830 static void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng)
1831 {
1832 	if (srng->alloc_size && srng->base_vaddr_unaligned) {
1833 		if (!srng->cached) {
1834 			dp_srng_mem_free_consistent(soc, srng);
1835 		} else {
1836 			qdf_mem_free(srng->base_vaddr_unaligned);
1837 		}
1838 		srng->alloc_size = 0;
1839 		srng->base_vaddr_unaligned = NULL;
1840 	}
1841 	srng->hal_srng = NULL;
1842 }
1843 
1844 /*
1845  * dp_srng_init() - Initialize SRNG
1846  * @soc  : Data path soc handle
1847  * @srng : SRNG pointer
1848  * @ring_type : Ring Type
1849  * @ring_num: Ring number
1850  * @mac_id: mac_id
1851  *
1852  * return: QDF_STATUS
1853  */
1854 static QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
1855 			       int ring_type, int ring_num, int mac_id)
1856 {
1857 	hal_soc_handle_t hal_soc = soc->hal_soc;
1858 	struct hal_srng_params ring_params;
1859 
1860 	if (srng->hal_srng) {
1861 		dp_init_err("%pK: Ring type: %d, num:%d is already initialized",
1862 			    soc, ring_type, ring_num);
1863 		return QDF_STATUS_SUCCESS;
1864 	}
1865 
1866 	/* memset the srng ring to zero */
1867 	qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size);
1868 
1869 	qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params));
1870 	ring_params.ring_base_paddr = srng->base_paddr_aligned;
1871 	ring_params.ring_base_vaddr = srng->base_vaddr_aligned;
1872 
1873 	ring_params.num_entries = srng->num_entries;
1874 
1875 	dp_info("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
1876 		ring_type, ring_num,
1877 		(void *)ring_params.ring_base_vaddr,
1878 		(void *)ring_params.ring_base_paddr,
1879 		ring_params.num_entries);
1880 
1881 	if (soc->intr_mode == DP_INTR_MSI) {
1882 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
1883 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
1884 				 ring_type, ring_num);
1885 
1886 	} else {
1887 		ring_params.msi_data = 0;
1888 		ring_params.msi_addr = 0;
1889 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
1890 				 ring_type, ring_num);
1891 	}
1892 
1893 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
1894 					       ring_type, ring_num,
1895 					       srng->num_entries);
1896 
1897 	if (srng->cached)
1898 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
1899 
1900 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
1901 					mac_id, &ring_params);
1902 
1903 	if (!srng->hal_srng) {
1904 		dp_srng_free(soc, srng);
1905 		return QDF_STATUS_E_FAILURE;
1906 	}
1907 
1908 	return QDF_STATUS_SUCCESS;
1909 }
1910 
1911 /*
1912  * dp_srng_alloc() - Allocate memory for SRNG
1913  * @soc  : Data path soc handle
1914  * @srng : SRNG pointer
1915  * @ring_type : Ring Type
1916  * @num_entries: Number of entries
1917  * @cached: cached flag variable
1918  *
1919  * return: QDF_STATUS
1920  */
1921 static QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
1922 				int ring_type, uint32_t num_entries,
1923 				bool cached)
1924 {
1925 	hal_soc_handle_t hal_soc = soc->hal_soc;
1926 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
1927 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
1928 
1929 	if (srng->base_vaddr_unaligned) {
1930 		dp_init_err("%pK: Ring type: %d, is already allocated",
1931 			    soc, ring_type);
1932 		return QDF_STATUS_SUCCESS;
1933 	}
1934 
1935 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
1936 	srng->hal_srng = NULL;
1937 	srng->alloc_size = num_entries * entry_size;
1938 	srng->num_entries = num_entries;
1939 	srng->cached = cached;
1940 
1941 	if (!cached) {
1942 		srng->base_vaddr_aligned =
1943 		    dp_srng_aligned_mem_alloc_consistent(soc,
1944 							 srng,
1945 							 ring_type);
1946 	} else {
1947 		srng->base_vaddr_aligned = qdf_aligned_malloc(
1948 					&srng->alloc_size,
1949 					&srng->base_vaddr_unaligned,
1950 					&srng->base_paddr_unaligned,
1951 					&srng->base_paddr_aligned,
1952 					DP_RING_BASE_ALIGN);
1953 	}
1954 
1955 	if (!srng->base_vaddr_aligned)
1956 		return QDF_STATUS_E_NOMEM;
1957 
1958 	return QDF_STATUS_SUCCESS;
1959 }
1960 
1961 /*
1962  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
1963  * @soc: DP SOC handle
1964  * @srng: source ring structure
1965  * @ring_type: type of ring
1966  * @ring_num: ring number
1967  *
1968  * Return: None
1969  */
1970 static void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
1971 			   int ring_type, int ring_num)
1972 {
1973 	if (!srng->hal_srng) {
1974 		dp_init_err("%pK: Ring type: %d, num:%d not setup",
1975 			    soc, ring_type, ring_num);
1976 		return;
1977 	}
1978 
1979 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1980 	srng->hal_srng = NULL;
1981 }
1982 
1983 /* TODO: Need this interface from HIF */
1984 void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle);
1985 
1986 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1987 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
1988 			 hal_ring_handle_t hal_ring_hdl)
1989 {
1990 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1991 	uint32_t hp, tp;
1992 	uint8_t ring_id;
1993 
1994 	if (!int_ctx)
1995 		return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
1996 
1997 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
1998 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
1999 
2000 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2001 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START);
2002 
2003 	return dp_hal_srng_access_start(hal_soc, hal_ring_hdl);
2004 }
2005 
2006 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
2007 			hal_ring_handle_t hal_ring_hdl)
2008 {
2009 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
2010 	uint32_t hp, tp;
2011 	uint8_t ring_id;
2012 
2013 	if (!int_ctx)
2014 		return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2015 
2016 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
2017 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
2018 
2019 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
2020 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END);
2021 
2022 	return dp_hal_srng_access_end(hal_soc, hal_ring_hdl);
2023 }
2024 
2025 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2026 					      uint8_t hist_group_id)
2027 {
2028 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2029 			 0, 0, 0, HIF_EVENT_TIMER_ENTRY);
2030 }
2031 
2032 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2033 					     uint8_t hist_group_id)
2034 {
2035 	hif_record_event(dp_soc->hif_handle, hist_group_id,
2036 			 0, 0, 0, HIF_EVENT_TIMER_EXIT);
2037 }
2038 #else
2039 
2040 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
2041 					      uint8_t hist_group_id)
2042 {
2043 }
2044 
2045 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
2046 					     uint8_t hist_group_id)
2047 {
2048 }
2049 
2050 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
2051 
2052 /*
2053  * dp_should_timer_irq_yield() - Decide if the bottom half should yield
2054  * @soc: DP soc handle
2055  * @work_done: work done in softirq context
2056  * @start_time: start time for the softirq
2057  *
2058  * Return: enum with yield code
2059  */
2060 static enum timer_yield_status
2061 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
2062 			  uint64_t start_time)
2063 {
2064 	uint64_t cur_time = qdf_get_log_timestamp();
2065 
2066 	if (!work_done)
2067 		return DP_TIMER_WORK_DONE;
2068 
2069 	if (cur_time - start_time > DP_MAX_TIMER_EXEC_TIME_TICKS)
2070 		return DP_TIMER_TIME_EXHAUST;
2071 
2072 	return DP_TIMER_NO_YIELD;
2073 }
2074 
2075 /**
2076  * dp_process_lmac_rings() - Process LMAC rings
2077  * @int_ctx: interrupt context
2078  * @total_budget: budget of work which can be done
2079  *
2080  * Return: work done
2081  */
2082 static int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget)
2083 {
2084 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2085 	struct dp_soc *soc = int_ctx->soc;
2086 	uint32_t remaining_quota = total_budget;
2087 	struct dp_pdev *pdev = NULL;
2088 	uint32_t work_done  = 0;
2089 	int budget = total_budget;
2090 	int ring = 0;
2091 
2092 	/* Process LMAC interrupts */
2093 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
2094 		int mac_for_pdev = ring;
2095 
2096 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
2097 		if (!pdev)
2098 			continue;
2099 		if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
2100 			work_done = dp_mon_process(soc, int_ctx, mac_for_pdev,
2101 						   remaining_quota);
2102 			if (work_done)
2103 				intr_stats->num_rx_mon_ring_masks++;
2104 			budget -= work_done;
2105 			if (budget <= 0)
2106 				goto budget_done;
2107 			remaining_quota = budget;
2108 		}
2109 
2110 		if (int_ctx->rxdma2host_ring_mask &
2111 				(1 << mac_for_pdev)) {
2112 			work_done = dp_rxdma_err_process(int_ctx, soc,
2113 							 mac_for_pdev,
2114 							 remaining_quota);
2115 			if (work_done)
2116 				intr_stats->num_rxdma2host_ring_masks++;
2117 			budget -=  work_done;
2118 			if (budget <= 0)
2119 				goto budget_done;
2120 			remaining_quota = budget;
2121 		}
2122 
2123 		if (int_ctx->host2rxdma_ring_mask &
2124 					(1 << mac_for_pdev)) {
2125 			union dp_rx_desc_list_elem_t *desc_list = NULL;
2126 			union dp_rx_desc_list_elem_t *tail = NULL;
2127 			struct dp_srng *rx_refill_buf_ring;
2128 
2129 			if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
2130 				rx_refill_buf_ring =
2131 					&soc->rx_refill_buf_ring[mac_for_pdev];
2132 			else
2133 				rx_refill_buf_ring =
2134 					&soc->rx_refill_buf_ring[pdev->lmac_id];
2135 
2136 			intr_stats->num_host2rxdma_ring_masks++;
2137 			DP_STATS_INC(pdev, replenish.low_thresh_intrs,
2138 				     1);
2139 			dp_rx_buffers_replenish(soc, mac_for_pdev,
2140 						rx_refill_buf_ring,
2141 						&soc->rx_desc_buf[mac_for_pdev],
2142 						0, &desc_list, &tail);
2143 		}
2144 	}
2145 
2146 budget_done:
2147 	return total_budget - budget;
2148 }
2149 
2150 #ifndef QCA_HOST_MODE_WIFI_DISABLED
2151 
2152 /*
2153  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
2154  * @dp_ctx: DP SOC handle
2155  * @budget: Number of frames/descriptors that can be processed in one shot
2156  *
2157  * Return: remaining budget/quota for the soc device
2158  */
2159 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
2160 {
2161 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2162 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2163 	struct dp_soc *soc = int_ctx->soc;
2164 	int ring = 0;
2165 	uint32_t work_done  = 0;
2166 	int budget = dp_budget;
2167 	uint8_t tx_mask = int_ctx->tx_ring_mask;
2168 	uint8_t rx_mask = int_ctx->rx_ring_mask;
2169 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
2170 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
2171 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2172 	uint32_t remaining_quota = dp_budget;
2173 
2174 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n",
2175 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
2176 			 reo_status_mask,
2177 			 int_ctx->rx_mon_ring_mask,
2178 			 int_ctx->host2rxdma_ring_mask,
2179 			 int_ctx->rxdma2host_ring_mask);
2180 
2181 	/* Process Tx completion interrupts first to return back buffers */
2182 	while (tx_mask) {
2183 		if (tx_mask & 0x1) {
2184 			work_done = dp_tx_comp_handler(int_ctx,
2185 						       soc,
2186 						       soc->tx_comp_ring[ring].hal_srng,
2187 						       ring, remaining_quota);
2188 
2189 			if (work_done) {
2190 				intr_stats->num_tx_ring_masks[ring]++;
2191 				dp_verbose_debug("tx mask 0x%x ring %d, budget %d, work_done %d",
2192 						 tx_mask, ring, budget,
2193 						 work_done);
2194 			}
2195 
2196 			budget -= work_done;
2197 			if (budget <= 0)
2198 				goto budget_done;
2199 
2200 			remaining_quota = budget;
2201 		}
2202 		tx_mask = tx_mask >> 1;
2203 		ring++;
2204 	}
2205 
2206 	/* Process REO Exception ring interrupt */
2207 	if (rx_err_mask) {
2208 		work_done = dp_rx_err_process(int_ctx, soc,
2209 					      soc->reo_exception_ring.hal_srng,
2210 					      remaining_quota);
2211 
2212 		if (work_done) {
2213 			intr_stats->num_rx_err_ring_masks++;
2214 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
2215 					 work_done, budget);
2216 		}
2217 
2218 		budget -=  work_done;
2219 		if (budget <= 0) {
2220 			goto budget_done;
2221 		}
2222 		remaining_quota = budget;
2223 	}
2224 
2225 	/* Process Rx WBM release ring interrupt */
2226 	if (rx_wbm_rel_mask) {
2227 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
2228 						  soc->rx_rel_ring.hal_srng,
2229 						  remaining_quota);
2230 
2231 		if (work_done) {
2232 			intr_stats->num_rx_wbm_rel_ring_masks++;
2233 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
2234 					 work_done, budget);
2235 		}
2236 
2237 		budget -=  work_done;
2238 		if (budget <= 0) {
2239 			goto budget_done;
2240 		}
2241 		remaining_quota = budget;
2242 	}
2243 
2244 	/* Process Rx interrupts */
2245 	if (rx_mask) {
2246 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
2247 			if (!(rx_mask & (1 << ring)))
2248 				continue;
2249 			work_done = soc->arch_ops.dp_rx_process(int_ctx,
2250 						  soc->reo_dest_ring[ring].hal_srng,
2251 						  ring,
2252 						  remaining_quota);
2253 			if (work_done) {
2254 				intr_stats->num_rx_ring_masks[ring]++;
2255 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
2256 						 rx_mask, ring,
2257 						 work_done, budget);
2258 				budget -=  work_done;
2259 				if (budget <= 0)
2260 					goto budget_done;
2261 				remaining_quota = budget;
2262 			}
2263 		}
2264 	}
2265 
2266 	if (reo_status_mask) {
2267 		if (dp_reo_status_ring_handler(int_ctx, soc))
2268 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2269 	}
2270 
2271 	if (qdf_unlikely(!(soc->mon_vdev_timer_state & MON_VDEV_TIMER_RUNNING))) {
2272 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2273 		if (work_done) {
2274 			budget -=  work_done;
2275 			if (budget <= 0)
2276 				goto budget_done;
2277 			remaining_quota = budget;
2278 		}
2279 	}
2280 
2281 	qdf_lro_flush(int_ctx->lro_ctx);
2282 	intr_stats->num_masks++;
2283 
2284 budget_done:
2285 	return dp_budget - budget;
2286 }
2287 
2288 #else /* QCA_HOST_MODE_WIFI_DISABLED */
2289 
2290 /*
2291  * dp_service_srngs() - Top level handler for DP Monitor Ring interrupts
2292  * @dp_ctx: DP SOC handle
2293  * @budget: Number of frames/descriptors that can be processed in one shot
2294  *
2295  * Return: remaining budget/quota for the soc device
2296  */
2297 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
2298 {
2299 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
2300 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
2301 	struct dp_soc *soc = int_ctx->soc;
2302 	uint32_t remaining_quota = dp_budget;
2303 	uint32_t work_done  = 0;
2304 	int budget = dp_budget;
2305 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
2306 
2307 	if (reo_status_mask) {
2308 		if (dp_reo_status_ring_handler(int_ctx, soc))
2309 			int_ctx->intr_stats.num_reo_status_ring_masks++;
2310 	}
2311 
2312 	if (qdf_unlikely(!(soc->mon_vdev_timer_state & MON_VDEV_TIMER_RUNNING))) {
2313 		work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
2314 		if (work_done) {
2315 			budget -=  work_done;
2316 			if (budget <= 0)
2317 				goto budget_done;
2318 			remaining_quota = budget;
2319 		}
2320 	}
2321 
2322 	qdf_lro_flush(int_ctx->lro_ctx);
2323 	intr_stats->num_masks++;
2324 
2325 budget_done:
2326 	return dp_budget - budget;
2327 }
2328 
2329 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
2330 
2331 /* dp_mon_vdev_timer()- timer poll for interrupts
2332  *
2333  * @arg: SoC Handle
2334  *
2335  * Return:
2336  *
2337  */
2338 static void dp_mon_vdev_timer(void *arg)
2339 {
2340 	struct dp_soc *soc = (struct dp_soc *)arg;
2341 	struct dp_pdev *pdev = soc->pdev_list[0];
2342 	enum timer_yield_status yield = DP_TIMER_NO_YIELD;
2343 	uint32_t work_done  = 0, total_work_done = 0;
2344 	int budget = 0xffff;
2345 	uint32_t remaining_quota = budget;
2346 	uint64_t start_time;
2347 	uint32_t lmac_id = DP_MON_INVALID_LMAC_ID;
2348 	uint32_t lmac_iter;
2349 	int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
2350 
2351 	if (!qdf_atomic_read(&soc->cmn_init_done))
2352 		return;
2353 
2354 	if (pdev->mon_chan_band != REG_BAND_UNKNOWN)
2355 		lmac_id = pdev->ch_band_lmac_id_mapping[pdev->mon_chan_band];
2356 
2357 	start_time = qdf_get_log_timestamp();
2358 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
2359 
2360 	while (yield == DP_TIMER_NO_YIELD) {
2361 		for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) {
2362 			if (lmac_iter == lmac_id)
2363 				work_done = dp_mon_process(
2364 						    soc, NULL,
2365 						    lmac_iter, remaining_quota);
2366 			else
2367 				work_done =
2368 					dp_mon_drop_packets_for_mac(pdev,
2369 								    lmac_iter,
2370 								    remaining_quota);
2371 			if (work_done) {
2372 				budget -=  work_done;
2373 				if (budget <= 0) {
2374 					yield = DP_TIMER_WORK_EXHAUST;
2375 					goto budget_done;
2376 				}
2377 				remaining_quota = budget;
2378 				total_work_done += work_done;
2379 			}
2380 		}
2381 
2382 		yield = dp_should_timer_irq_yield(soc, total_work_done,
2383 						  start_time);
2384 		total_work_done = 0;
2385 	}
2386 
2387 budget_done:
2388 	if (yield == DP_TIMER_WORK_EXHAUST ||
2389 	    yield == DP_TIMER_TIME_EXHAUST)
2390 		qdf_timer_mod(&soc->mon_vdev_timer, 1);
2391 	else
2392 		qdf_timer_mod(&soc->mon_vdev_timer, DP_INTR_POLL_TIMER_MS);
2393 }
2394 
2395 /* dp_interrupt_timer()- timer poll for interrupts
2396  *
2397  * @arg: SoC Handle
2398  *
2399  * Return:
2400  *
2401  */
2402 static void dp_interrupt_timer(void *arg)
2403 {
2404 	struct dp_soc *soc = (struct dp_soc *) arg;
2405 	struct dp_pdev *pdev = soc->pdev_list[0];
2406 	enum timer_yield_status yield = DP_TIMER_NO_YIELD;
2407 	uint32_t work_done  = 0, total_work_done = 0;
2408 	int budget = 0xffff, i;
2409 	uint32_t remaining_quota = budget;
2410 	uint64_t start_time;
2411 	uint32_t lmac_id = DP_MON_INVALID_LMAC_ID;
2412 	uint8_t dp_intr_id = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
2413 	uint32_t lmac_iter;
2414 	int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
2415 
2416 	/*
2417 	 * this logic makes all data path interfacing rings (UMAC/LMAC)
2418 	 * and Monitor rings polling mode when NSS offload is disabled
2419 	 */
2420 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx) &&
2421 	    !wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
2422 		if (qdf_atomic_read(&soc->cmn_init_done)) {
2423 			for (i = 0; i < wlan_cfg_get_num_contexts(
2424 						soc->wlan_cfg_ctx); i++)
2425 				dp_service_srngs(&soc->intr_ctx[i], 0xffff);
2426 
2427 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
2428 		}
2429 		return;
2430 	}
2431 
2432 	if (!qdf_atomic_read(&soc->cmn_init_done))
2433 		return;
2434 
2435 	if (pdev->mon_chan_band != REG_BAND_UNKNOWN) {
2436 		lmac_id = pdev->ch_band_lmac_id_mapping[pdev->mon_chan_band];
2437 		if (qdf_likely(lmac_id != DP_MON_INVALID_LMAC_ID)) {
2438 			dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id];
2439 			dp_srng_record_timer_entry(soc, dp_intr_id);
2440 		}
2441 	}
2442 
2443 	start_time = qdf_get_log_timestamp();
2444 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
2445 
2446 	while (yield == DP_TIMER_NO_YIELD) {
2447 		for (lmac_iter = 0; lmac_iter < max_mac_rings; lmac_iter++) {
2448 			if (lmac_iter == lmac_id)
2449 				work_done = dp_mon_process(soc,
2450 						    &soc->intr_ctx[dp_intr_id],
2451 						    lmac_iter, remaining_quota);
2452 			else
2453 				work_done = dp_mon_drop_packets_for_mac(pdev,
2454 							       lmac_iter,
2455 							       remaining_quota);
2456 			if (work_done) {
2457 				budget -=  work_done;
2458 				if (budget <= 0) {
2459 					yield = DP_TIMER_WORK_EXHAUST;
2460 					goto budget_done;
2461 				}
2462 				remaining_quota = budget;
2463 				total_work_done += work_done;
2464 			}
2465 		}
2466 
2467 		yield = dp_should_timer_irq_yield(soc, total_work_done,
2468 						  start_time);
2469 		total_work_done = 0;
2470 	}
2471 
2472 budget_done:
2473 	if (yield == DP_TIMER_WORK_EXHAUST ||
2474 	    yield == DP_TIMER_TIME_EXHAUST)
2475 		qdf_timer_mod(&soc->int_timer, 1);
2476 	else
2477 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
2478 
2479 	if (lmac_id != DP_MON_INVALID_LMAC_ID)
2480 		dp_srng_record_timer_exit(soc, dp_intr_id);
2481 }
2482 
2483 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
2484 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
2485 					struct dp_intr *intr_ctx)
2486 {
2487 	if (intr_ctx->rx_mon_ring_mask)
2488 		return true;
2489 
2490 	return false;
2491 }
2492 #else
2493 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
2494 					struct dp_intr *intr_ctx)
2495 {
2496 	return false;
2497 }
2498 #endif
2499 
2500 /*
2501  * dp_soc_attach_poll() - Register handlers for DP interrupts
2502  * @txrx_soc: DP SOC handle
2503  *
2504  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
2505  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
2506  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
2507  *
2508  * Return: 0 for success, nonzero for failure.
2509  */
2510 static QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
2511 {
2512 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2513 	int i;
2514 	int lmac_id = 0;
2515 
2516 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
2517 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
2518 	soc->intr_mode = DP_INTR_POLL;
2519 
2520 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
2521 		soc->intr_ctx[i].dp_intr_id = i;
2522 		soc->intr_ctx[i].tx_ring_mask =
2523 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
2524 		soc->intr_ctx[i].rx_ring_mask =
2525 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
2526 		soc->intr_ctx[i].rx_mon_ring_mask =
2527 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
2528 		soc->intr_ctx[i].rx_err_ring_mask =
2529 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
2530 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
2531 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
2532 		soc->intr_ctx[i].reo_status_ring_mask =
2533 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
2534 		soc->intr_ctx[i].rxdma2host_ring_mask =
2535 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
2536 		soc->intr_ctx[i].soc = soc;
2537 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
2538 
2539 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
2540 			hif_event_history_init(soc->hif_handle, i);
2541 			soc->mon_intr_id_lmac_map[lmac_id] = i;
2542 			lmac_id++;
2543 		}
2544 	}
2545 
2546 	qdf_timer_init(soc->osdev, &soc->int_timer,
2547 			dp_interrupt_timer, (void *)soc,
2548 			QDF_TIMER_TYPE_WAKE_APPS);
2549 
2550 	return QDF_STATUS_SUCCESS;
2551 }
2552 
2553 /**
2554  * dp_soc_set_interrupt_mode() - Set the interrupt mode in soc
2555  * soc: DP soc handle
2556  *
2557  * Set the appropriate interrupt mode flag in the soc
2558  */
2559 static void dp_soc_set_interrupt_mode(struct dp_soc *soc)
2560 {
2561 	uint32_t msi_base_data, msi_vector_start;
2562 	int msi_vector_count, ret;
2563 
2564 	soc->intr_mode = DP_INTR_INTEGRATED;
2565 
2566 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
2567 	    (soc->cdp_soc.ol_ops->get_con_mode &&
2568 	     soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) {
2569 		soc->intr_mode = DP_INTR_POLL;
2570 	} else {
2571 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
2572 						  &msi_vector_count,
2573 						  &msi_base_data,
2574 						  &msi_vector_start);
2575 		if (ret)
2576 			return;
2577 
2578 		soc->intr_mode = DP_INTR_MSI;
2579 	}
2580 }
2581 
2582 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc);
2583 #if defined(DP_INTR_POLL_BOTH)
2584 /*
2585  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
2586  * @txrx_soc: DP SOC handle
2587  *
2588  * Call the appropriate attach function based on the mode of operation.
2589  * This is a WAR for enabling monitor mode.
2590  *
2591  * Return: 0 for success. nonzero for failure.
2592  */
2593 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2594 {
2595 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2596 
2597 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
2598 	    (soc->cdp_soc.ol_ops->get_con_mode &&
2599 	     soc->cdp_soc.ol_ops->get_con_mode() ==
2600 	     QDF_GLOBAL_MONITOR_MODE)) {
2601 		dp_info("Poll mode");
2602 		return dp_soc_attach_poll(txrx_soc);
2603 	} else {
2604 		dp_info("Interrupt  mode");
2605 		return dp_soc_interrupt_attach(txrx_soc);
2606 	}
2607 }
2608 #else
2609 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
2610 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2611 {
2612 	return dp_soc_attach_poll(txrx_soc);
2613 }
2614 #else
2615 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2616 {
2617 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2618 
2619 	if (wlan_cfg_is_poll_mode_enabled(soc->wlan_cfg_ctx))
2620 		return dp_soc_attach_poll(txrx_soc);
2621 	else
2622 		return dp_soc_interrupt_attach(txrx_soc);
2623 }
2624 #endif
2625 #endif
2626 
2627 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
2628 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
2629 {
2630 	int j;
2631 	int num_irq = 0;
2632 
2633 	int tx_mask =
2634 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2635 	int rx_mask =
2636 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2637 	int rx_mon_mask =
2638 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2639 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
2640 					soc->wlan_cfg_ctx, intr_ctx_num);
2641 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
2642 					soc->wlan_cfg_ctx, intr_ctx_num);
2643 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
2644 					soc->wlan_cfg_ctx, intr_ctx_num);
2645 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
2646 					soc->wlan_cfg_ctx, intr_ctx_num);
2647 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
2648 					soc->wlan_cfg_ctx, intr_ctx_num);
2649 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
2650 					soc->wlan_cfg_ctx, intr_ctx_num);
2651 
2652 	soc->intr_mode = DP_INTR_INTEGRATED;
2653 
2654 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
2655 
2656 		if (tx_mask & (1 << j)) {
2657 			irq_id_map[num_irq++] =
2658 				(wbm2host_tx_completions_ring1 - j);
2659 		}
2660 
2661 		if (rx_mask & (1 << j)) {
2662 			irq_id_map[num_irq++] =
2663 				(reo2host_destination_ring1 - j);
2664 		}
2665 
2666 		if (rxdma2host_ring_mask & (1 << j)) {
2667 			irq_id_map[num_irq++] =
2668 				rxdma2host_destination_ring_mac1 - j;
2669 		}
2670 
2671 		if (host2rxdma_ring_mask & (1 << j)) {
2672 			irq_id_map[num_irq++] =
2673 				host2rxdma_host_buf_ring_mac1 -	j;
2674 		}
2675 
2676 		if (host2rxdma_mon_ring_mask & (1 << j)) {
2677 			irq_id_map[num_irq++] =
2678 				host2rxdma_monitor_ring1 - j;
2679 		}
2680 
2681 		if (rx_mon_mask & (1 << j)) {
2682 			irq_id_map[num_irq++] =
2683 				ppdu_end_interrupts_mac1 - j;
2684 			irq_id_map[num_irq++] =
2685 				rxdma2host_monitor_status_ring_mac1 - j;
2686 			irq_id_map[num_irq++] =
2687 				rxdma2host_monitor_destination_mac1 - j;
2688 		}
2689 
2690 		if (rx_wbm_rel_ring_mask & (1 << j))
2691 			irq_id_map[num_irq++] = wbm2host_rx_release;
2692 
2693 		if (rx_err_ring_mask & (1 << j))
2694 			irq_id_map[num_irq++] = reo2host_exception;
2695 
2696 		if (reo_status_ring_mask & (1 << j))
2697 			irq_id_map[num_irq++] = reo2host_status;
2698 
2699 	}
2700 	*num_irq_r = num_irq;
2701 }
2702 
2703 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
2704 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
2705 		int msi_vector_count, int msi_vector_start)
2706 {
2707 	int tx_mask = wlan_cfg_get_tx_ring_mask(
2708 					soc->wlan_cfg_ctx, intr_ctx_num);
2709 	int rx_mask = wlan_cfg_get_rx_ring_mask(
2710 					soc->wlan_cfg_ctx, intr_ctx_num);
2711 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
2712 					soc->wlan_cfg_ctx, intr_ctx_num);
2713 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
2714 					soc->wlan_cfg_ctx, intr_ctx_num);
2715 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
2716 					soc->wlan_cfg_ctx, intr_ctx_num);
2717 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
2718 					soc->wlan_cfg_ctx, intr_ctx_num);
2719 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
2720 					soc->wlan_cfg_ctx, intr_ctx_num);
2721 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
2722 					soc->wlan_cfg_ctx, intr_ctx_num);
2723 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
2724 					soc->wlan_cfg_ctx, intr_ctx_num);
2725 
2726 	unsigned int vector =
2727 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
2728 	int num_irq = 0;
2729 
2730 	soc->intr_mode = DP_INTR_MSI;
2731 
2732 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
2733 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask |
2734 	    host2rxdma_ring_mask | host2rxdma_mon_ring_mask)
2735 		irq_id_map[num_irq++] =
2736 			pld_get_msi_irq(soc->osdev->dev, vector);
2737 
2738 	*num_irq_r = num_irq;
2739 }
2740 
2741 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
2742 				    int *irq_id_map, int *num_irq)
2743 {
2744 	int msi_vector_count, ret;
2745 	uint32_t msi_base_data, msi_vector_start;
2746 
2747 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
2748 					    &msi_vector_count,
2749 					    &msi_base_data,
2750 					    &msi_vector_start);
2751 	if (ret)
2752 		return dp_soc_interrupt_map_calculate_integrated(soc,
2753 				intr_ctx_num, irq_id_map, num_irq);
2754 
2755 	else
2756 		dp_soc_interrupt_map_calculate_msi(soc,
2757 				intr_ctx_num, irq_id_map, num_irq,
2758 				msi_vector_count, msi_vector_start);
2759 }
2760 
2761 /*
2762  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
2763  * @txrx_soc: DP SOC handle
2764  *
2765  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
2766  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
2767  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
2768  *
2769  * Return: 0 for success. nonzero for failure.
2770  */
2771 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
2772 {
2773 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2774 
2775 	int i = 0;
2776 	int num_irq = 0;
2777 
2778 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
2779 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
2780 
2781 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
2782 		int ret = 0;
2783 
2784 		/* Map of IRQ ids registered with one interrupt context */
2785 		int irq_id_map[HIF_MAX_GRP_IRQ];
2786 
2787 		int tx_mask =
2788 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
2789 		int rx_mask =
2790 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
2791 		int rx_mon_mask =
2792 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
2793 		int rx_err_ring_mask =
2794 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
2795 		int rx_wbm_rel_ring_mask =
2796 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
2797 		int reo_status_ring_mask =
2798 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
2799 		int rxdma2host_ring_mask =
2800 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
2801 		int host2rxdma_ring_mask =
2802 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
2803 		int host2rxdma_mon_ring_mask =
2804 			wlan_cfg_get_host2rxdma_mon_ring_mask(
2805 				soc->wlan_cfg_ctx, i);
2806 
2807 		soc->intr_ctx[i].dp_intr_id = i;
2808 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
2809 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
2810 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
2811 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
2812 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
2813 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
2814 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
2815 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
2816 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
2817 			 host2rxdma_mon_ring_mask;
2818 
2819 		soc->intr_ctx[i].soc = soc;
2820 
2821 		num_irq = 0;
2822 
2823 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
2824 					       &num_irq);
2825 
2826 		ret = hif_register_ext_group(soc->hif_handle,
2827 				num_irq, irq_id_map, dp_service_srngs,
2828 				&soc->intr_ctx[i], "dp_intr",
2829 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
2830 
2831 		if (ret) {
2832 			dp_init_err("%pK: failed, ret = %d", soc, ret);
2833 
2834 			return QDF_STATUS_E_FAILURE;
2835 		}
2836 
2837 		hif_event_history_init(soc->hif_handle, i);
2838 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
2839 	}
2840 
2841 	hif_configure_ext_group_interrupts(soc->hif_handle);
2842 
2843 	return QDF_STATUS_SUCCESS;
2844 }
2845 
2846 /*
2847  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
2848  * @txrx_soc: DP SOC handle
2849  *
2850  * Return: none
2851  */
2852 static void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
2853 {
2854 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2855 	int i;
2856 
2857 	if (soc->intr_mode == DP_INTR_POLL) {
2858 		qdf_timer_free(&soc->int_timer);
2859 	} else {
2860 		hif_deconfigure_ext_group_interrupts(soc->hif_handle);
2861 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
2862 	}
2863 
2864 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
2865 		soc->intr_ctx[i].tx_ring_mask = 0;
2866 		soc->intr_ctx[i].rx_ring_mask = 0;
2867 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
2868 		soc->intr_ctx[i].rx_err_ring_mask = 0;
2869 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
2870 		soc->intr_ctx[i].reo_status_ring_mask = 0;
2871 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
2872 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
2873 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
2874 
2875 		hif_event_history_deinit(soc->hif_handle, i);
2876 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
2877 	}
2878 
2879 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
2880 		    sizeof(soc->mon_intr_id_lmac_map),
2881 		    DP_MON_INVALID_LMAC_ID);
2882 }
2883 
2884 #define AVG_MAX_MPDUS_PER_TID 128
2885 #define AVG_TIDS_PER_CLIENT 2
2886 #define AVG_FLOWS_PER_TID 2
2887 #define AVG_MSDUS_PER_FLOW 128
2888 #define AVG_MSDUS_PER_MPDU 4
2889 
2890 /*
2891  * dp_hw_link_desc_pool_banks_free() - Free h/w link desc pool banks
2892  * @soc: DP SOC handle
2893  * @mac_id: mac id
2894  *
2895  * Return: none
2896  */
2897 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
2898 {
2899 	struct qdf_mem_multi_page_t *pages;
2900 
2901 	if (mac_id != WLAN_INVALID_PDEV_ID)
2902 		pages = &soc->mon_link_desc_pages[mac_id];
2903 	else
2904 		pages = &soc->link_desc_pages;
2905 
2906 	if (pages->dma_pages) {
2907 		wlan_minidump_remove((void *)
2908 				     pages->dma_pages->page_v_addr_start,
2909 				     pages->num_pages * pages->page_size,
2910 				     soc->ctrl_psoc,
2911 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
2912 				     "hw_link_desc_bank");
2913 		dp_desc_multi_pages_mem_free(soc, DP_HW_LINK_DESC_TYPE,
2914 					     pages, 0, false);
2915 	}
2916 }
2917 
2918 /*
2919  * dp_hw_link_desc_pool_banks_alloc() - Allocate h/w link desc pool banks
2920  * @soc: DP SOC handle
2921  * @mac_id: mac id
2922  *
2923  * Allocates memory pages for link descriptors, the page size is 4K for
2924  * MCL and 2MB for WIN. if the mac_id is invalid link descriptor pages are
2925  * allocated for regular RX/TX and if the there is a proper mac_id link
2926  * descriptors are allocated for RX monitor mode.
2927  *
2928  * Return: QDF_STATUS_SUCCESS: Success
2929  *	   QDF_STATUS_E_FAILURE: Failure
2930  */
2931 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
2932 {
2933 	hal_soc_handle_t hal_soc = soc->hal_soc;
2934 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
2935 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
2936 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
2937 	uint32_t num_mpdus_per_link_desc = hal_num_mpdus_per_link_desc(hal_soc);
2938 	uint32_t num_msdus_per_link_desc = hal_num_msdus_per_link_desc(hal_soc);
2939 	uint32_t num_mpdu_links_per_queue_desc =
2940 		hal_num_mpdu_links_per_queue_desc(hal_soc);
2941 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
2942 	uint32_t *total_link_descs, total_mem_size;
2943 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
2944 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
2945 	uint32_t num_entries;
2946 	struct qdf_mem_multi_page_t *pages;
2947 	struct dp_srng *dp_srng;
2948 	uint8_t minidump_str[MINIDUMP_STR_SIZE];
2949 
2950 	/* Only Tx queue descriptors are allocated from common link descriptor
2951 	 * pool Rx queue descriptors are not included in this because (REO queue
2952 	 * extension descriptors) they are expected to be allocated contiguously
2953 	 * with REO queue descriptors
2954 	 */
2955 	if (mac_id != WLAN_INVALID_PDEV_ID) {
2956 		pages = &soc->mon_link_desc_pages[mac_id];
2957 		dp_srng = &soc->rxdma_mon_desc_ring[mac_id];
2958 		num_entries = dp_srng->alloc_size /
2959 			hal_srng_get_entrysize(soc->hal_soc,
2960 					       RXDMA_MONITOR_DESC);
2961 		total_link_descs = &soc->total_mon_link_descs[mac_id];
2962 		qdf_str_lcopy(minidump_str, "mon_link_desc_bank",
2963 			      MINIDUMP_STR_SIZE);
2964 	} else {
2965 		num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2966 			AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
2967 
2968 		num_mpdu_queue_descs = num_mpdu_link_descs /
2969 			num_mpdu_links_per_queue_desc;
2970 
2971 		num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2972 			AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
2973 			num_msdus_per_link_desc;
2974 
2975 		num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2976 			AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
2977 
2978 		num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
2979 			num_tx_msdu_link_descs + num_rx_msdu_link_descs;
2980 
2981 		pages = &soc->link_desc_pages;
2982 		total_link_descs = &soc->total_link_descs;
2983 		qdf_str_lcopy(minidump_str, "link_desc_bank",
2984 			      MINIDUMP_STR_SIZE);
2985 	}
2986 
2987 	/* If link descriptor banks are allocated, return from here */
2988 	if (pages->num_pages)
2989 		return QDF_STATUS_SUCCESS;
2990 
2991 	/* Round up to power of 2 */
2992 	*total_link_descs = 1;
2993 	while (*total_link_descs < num_entries)
2994 		*total_link_descs <<= 1;
2995 
2996 	dp_init_info("%pK: total_link_descs: %u, link_desc_size: %d",
2997 		     soc, *total_link_descs, link_desc_size);
2998 	total_mem_size =  *total_link_descs * link_desc_size;
2999 	total_mem_size += link_desc_align;
3000 
3001 	dp_init_info("%pK: total_mem_size: %d",
3002 		     soc, total_mem_size);
3003 
3004 	dp_set_max_page_size(pages, max_alloc_size);
3005 	dp_desc_multi_pages_mem_alloc(soc, DP_HW_LINK_DESC_TYPE,
3006 				      pages,
3007 				      link_desc_size,
3008 				      *total_link_descs,
3009 				      0, false);
3010 	if (!pages->num_pages) {
3011 		dp_err("Multi page alloc fail for hw link desc pool");
3012 		return QDF_STATUS_E_FAULT;
3013 	}
3014 
3015 	wlan_minidump_log(pages->dma_pages->page_v_addr_start,
3016 			  pages->num_pages * pages->page_size,
3017 			  soc->ctrl_psoc,
3018 			  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3019 			  "hw_link_desc_bank");
3020 
3021 	return QDF_STATUS_SUCCESS;
3022 }
3023 
3024 /*
3025  * dp_hw_link_desc_ring_free() - Free h/w link desc rings
3026  * @soc: DP SOC handle
3027  *
3028  * Return: none
3029  */
3030 static void dp_hw_link_desc_ring_free(struct dp_soc *soc)
3031 {
3032 	uint32_t i;
3033 	uint32_t size = soc->wbm_idle_scatter_buf_size;
3034 	void *vaddr = soc->wbm_idle_link_ring.base_vaddr_unaligned;
3035 	qdf_dma_addr_t paddr;
3036 
3037 	if (soc->wbm_idle_scatter_buf_base_vaddr[0]) {
3038 		for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3039 			vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3040 			paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3041 			if (vaddr) {
3042 				qdf_mem_free_consistent(soc->osdev,
3043 							soc->osdev->dev,
3044 							size,
3045 							vaddr,
3046 							paddr,
3047 							0);
3048 				vaddr = NULL;
3049 			}
3050 		}
3051 	} else {
3052 		wlan_minidump_remove(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3053 				     soc->wbm_idle_link_ring.alloc_size,
3054 				     soc->ctrl_psoc,
3055 				     WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3056 				     "wbm_idle_link_ring");
3057 		dp_srng_free(soc, &soc->wbm_idle_link_ring);
3058 	}
3059 }
3060 
3061 /*
3062  * dp_hw_link_desc_ring_alloc() - Allocate hw link desc rings
3063  * @soc: DP SOC handle
3064  *
3065  * Allocate memory for WBM_IDLE_LINK srng ring if the number of
3066  * link descriptors is less then the max_allocated size. else
3067  * allocate memory for wbm_idle_scatter_buffer.
3068  *
3069  * Return: QDF_STATUS_SUCCESS: success
3070  *         QDF_STATUS_E_NO_MEM: No memory (Failure)
3071  */
3072 static QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc)
3073 {
3074 	uint32_t entry_size, i;
3075 	uint32_t total_mem_size;
3076 	qdf_dma_addr_t *baseaddr = NULL;
3077 	struct dp_srng *dp_srng;
3078 	uint32_t ring_type;
3079 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
3080 	uint32_t tlds;
3081 
3082 	ring_type = WBM_IDLE_LINK;
3083 	dp_srng = &soc->wbm_idle_link_ring;
3084 	tlds = soc->total_link_descs;
3085 
3086 	entry_size = hal_srng_get_entrysize(soc->hal_soc, ring_type);
3087 	total_mem_size = entry_size * tlds;
3088 
3089 	if (total_mem_size <= max_alloc_size) {
3090 		if (dp_srng_alloc(soc, dp_srng, ring_type, tlds, 0)) {
3091 			dp_init_err("%pK: Link desc idle ring setup failed",
3092 				    soc);
3093 			goto fail;
3094 		}
3095 
3096 		wlan_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
3097 				  soc->wbm_idle_link_ring.alloc_size,
3098 				  soc->ctrl_psoc,
3099 				  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
3100 				  "wbm_idle_link_ring");
3101 	} else {
3102 		uint32_t num_scatter_bufs;
3103 		uint32_t num_entries_per_buf;
3104 		uint32_t buf_size = 0;
3105 
3106 		soc->wbm_idle_scatter_buf_size =
3107 			hal_idle_list_scatter_buf_size(soc->hal_soc);
3108 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
3109 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
3110 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
3111 					soc->hal_soc, total_mem_size,
3112 					soc->wbm_idle_scatter_buf_size);
3113 
3114 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
3115 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3116 				  FL("scatter bufs size out of bounds"));
3117 			goto fail;
3118 		}
3119 
3120 		for (i = 0; i < num_scatter_bufs; i++) {
3121 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
3122 			buf_size = soc->wbm_idle_scatter_buf_size;
3123 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
3124 				qdf_mem_alloc_consistent(soc->osdev,
3125 							 soc->osdev->dev,
3126 							 buf_size,
3127 							 baseaddr);
3128 
3129 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
3130 				QDF_TRACE(QDF_MODULE_ID_DP,
3131 					  QDF_TRACE_LEVEL_ERROR,
3132 					  FL("Scatter lst memory alloc fail"));
3133 				goto fail;
3134 			}
3135 		}
3136 		soc->num_scatter_bufs = num_scatter_bufs;
3137 	}
3138 	return QDF_STATUS_SUCCESS;
3139 
3140 fail:
3141 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
3142 		void *vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
3143 		qdf_dma_addr_t paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
3144 
3145 		if (vaddr) {
3146 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
3147 						soc->wbm_idle_scatter_buf_size,
3148 						vaddr,
3149 						paddr, 0);
3150 			vaddr = NULL;
3151 		}
3152 	}
3153 	return QDF_STATUS_E_NOMEM;
3154 }
3155 
3156 /*
3157  * dp_hw_link_desc_ring_init() - Initialize hw link desc rings
3158  * @soc: DP SOC handle
3159  *
3160  * Return: QDF_STATUS_SUCCESS: success
3161  *         QDF_STATUS_E_FAILURE: failure
3162  */
3163 static QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc)
3164 {
3165 	struct dp_srng *dp_srng = &soc->wbm_idle_link_ring;
3166 
3167 	if (dp_srng->base_vaddr_unaligned) {
3168 		if (dp_srng_init(soc, dp_srng, WBM_IDLE_LINK, 0, 0))
3169 			return QDF_STATUS_E_FAILURE;
3170 	}
3171 	return QDF_STATUS_SUCCESS;
3172 }
3173 
3174 /*
3175  * dp_hw_link_desc_ring_deinit() - Reset hw link desc rings
3176  * @soc: DP SOC handle
3177  *
3178  * Return: None
3179  */
3180 static void dp_hw_link_desc_ring_deinit(struct dp_soc *soc)
3181 {
3182 	dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0);
3183 }
3184 
3185 /*
3186  * dp_hw_link_desc_ring_replenish() - Replenish hw link desc rings
3187  * @soc: DP SOC handle
3188  * @mac_id: mac id
3189  *
3190  * Return: None
3191  */
3192 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id)
3193 {
3194 	uint32_t cookie = 0;
3195 	uint32_t page_idx = 0;
3196 	struct qdf_mem_multi_page_t *pages;
3197 	struct qdf_mem_dma_page_t *dma_pages;
3198 	uint32_t offset = 0;
3199 	uint32_t count = 0;
3200 	void *desc_srng;
3201 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
3202 	uint32_t total_link_descs;
3203 	uint32_t scatter_buf_num;
3204 	uint32_t num_entries_per_buf = 0;
3205 	uint32_t rem_entries;
3206 	uint32_t num_descs_per_page;
3207 	uint32_t num_scatter_bufs = 0;
3208 	uint8_t *scatter_buf_ptr;
3209 	void *desc;
3210 
3211 	num_scatter_bufs = soc->num_scatter_bufs;
3212 
3213 	if (mac_id == WLAN_INVALID_PDEV_ID) {
3214 		pages = &soc->link_desc_pages;
3215 		total_link_descs = soc->total_link_descs;
3216 		desc_srng = soc->wbm_idle_link_ring.hal_srng;
3217 	} else {
3218 		pages = &soc->mon_link_desc_pages[mac_id];
3219 		total_link_descs = soc->total_mon_link_descs[mac_id];
3220 		desc_srng = soc->rxdma_mon_desc_ring[mac_id].hal_srng;
3221 	}
3222 
3223 	dma_pages = pages->dma_pages;
3224 	do {
3225 		qdf_mem_zero(dma_pages[page_idx].page_v_addr_start,
3226 			     pages->page_size);
3227 		page_idx++;
3228 	} while (page_idx < pages->num_pages);
3229 
3230 	if (desc_srng) {
3231 		hal_srng_access_start_unlocked(soc->hal_soc, desc_srng);
3232 		page_idx = 0;
3233 		count = 0;
3234 		offset = 0;
3235 		pages = &soc->link_desc_pages;
3236 		while ((desc = hal_srng_src_get_next(soc->hal_soc,
3237 						     desc_srng)) &&
3238 			(count < total_link_descs)) {
3239 			page_idx = count / pages->num_element_per_page;
3240 			offset = count % pages->num_element_per_page;
3241 			cookie = LINK_DESC_COOKIE(count, page_idx,
3242 						  soc->link_desc_id_start);
3243 
3244 			hal_set_link_desc_addr(soc->hal_soc, desc, cookie,
3245 					       dma_pages[page_idx].page_p_addr
3246 					       + (offset * link_desc_size));
3247 			count++;
3248 		}
3249 		hal_srng_access_end_unlocked(soc->hal_soc, desc_srng);
3250 	} else {
3251 		/* Populate idle list scatter buffers with link descriptor
3252 		 * pointers
3253 		 */
3254 		scatter_buf_num = 0;
3255 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
3256 					soc->hal_soc,
3257 					soc->wbm_idle_scatter_buf_size);
3258 
3259 		scatter_buf_ptr = (uint8_t *)(
3260 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
3261 		rem_entries = num_entries_per_buf;
3262 		pages = &soc->link_desc_pages;
3263 		page_idx = 0; count = 0;
3264 		offset = 0;
3265 		num_descs_per_page = pages->num_element_per_page;
3266 
3267 		while (count < total_link_descs) {
3268 			page_idx = count / num_descs_per_page;
3269 			offset = count % num_descs_per_page;
3270 			cookie = LINK_DESC_COOKIE(count, page_idx,
3271 						  soc->link_desc_id_start);
3272 			hal_set_link_desc_addr(soc->hal_soc,
3273 					       (void *)scatter_buf_ptr,
3274 					       cookie,
3275 					       dma_pages[page_idx].page_p_addr +
3276 					       (offset * link_desc_size));
3277 			rem_entries--;
3278 			if (rem_entries) {
3279 				scatter_buf_ptr += link_desc_size;
3280 			} else {
3281 				rem_entries = num_entries_per_buf;
3282 				scatter_buf_num++;
3283 				if (scatter_buf_num >= num_scatter_bufs)
3284 					break;
3285 				scatter_buf_ptr = (uint8_t *)
3286 					(soc->wbm_idle_scatter_buf_base_vaddr[
3287 					 scatter_buf_num]);
3288 			}
3289 			count++;
3290 		}
3291 		/* Setup link descriptor idle list in HW */
3292 		hal_setup_link_idle_list(soc->hal_soc,
3293 			soc->wbm_idle_scatter_buf_base_paddr,
3294 			soc->wbm_idle_scatter_buf_base_vaddr,
3295 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
3296 			(uint32_t)(scatter_buf_ptr -
3297 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
3298 			scatter_buf_num-1])), total_link_descs);
3299 	}
3300 }
3301 
3302 #ifdef IPA_OFFLOAD
3303 #define REO_DST_RING_SIZE_QCA6290 1023
3304 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
3305 #define REO_DST_RING_SIZE_QCA8074 1023
3306 #define REO_DST_RING_SIZE_QCN9000 2048
3307 #else
3308 #define REO_DST_RING_SIZE_QCA8074 8
3309 #define REO_DST_RING_SIZE_QCN9000 8
3310 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
3311 
3312 #ifdef IPA_WDI3_TX_TWO_PIPES
3313 static int dp_ipa_get_tx_alt_comp_ring_num(int ring_num)
3314 {
3315 	/* IPA alternate TX comp ring for 2G is WBM2SW4 */
3316 	if (ring_num == IPA_TX_ALT_COMP_RING_IDX)
3317 		ring_num = 4;
3318 
3319 	return ring_num;
3320 }
3321 
3322 #ifdef DP_MEMORY_OPT
3323 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3324 {
3325 	return dp_init_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3326 }
3327 
3328 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3329 {
3330 	dp_deinit_tx_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3331 }
3332 
3333 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3334 {
3335 	return dp_alloc_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3336 }
3337 
3338 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3339 {
3340 	dp_free_tx_ring_pair_by_index(soc, IPA_TX_ALT_RING_IDX);
3341 }
3342 
3343 #else /* !DP_MEMORY_OPT */
3344 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3345 {
3346 	return 0;
3347 }
3348 
3349 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3350 {
3351 }
3352 
3353 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3354 {
3355 	return 0
3356 }
3357 
3358 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3359 {
3360 }
3361 #endif /* DP_MEMORY_OPT */
3362 
3363 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
3364 {
3365 	hal_tx_init_data_ring(soc->hal_soc,
3366 			      soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng);
3367 }
3368 
3369 #else /* !IPA_WDI3_TX_TWO_PIPES */
3370 static int dp_ipa_get_tx_alt_comp_ring_num(int ring_num)
3371 {
3372 	return ring_num;
3373 }
3374 
3375 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3376 {
3377 	return 0;
3378 }
3379 
3380 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3381 {
3382 }
3383 
3384 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3385 {
3386 	return 0;
3387 }
3388 
3389 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3390 {
3391 }
3392 
3393 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
3394 {
3395 }
3396 
3397 #endif /* IPA_WDI3_TX_TWO_PIPES */
3398 
3399 #else
3400 
3401 #define REO_DST_RING_SIZE_QCA6290 1024
3402 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
3403 #define REO_DST_RING_SIZE_QCA8074 2048
3404 #define REO_DST_RING_SIZE_QCN9000 2048
3405 #else
3406 #define REO_DST_RING_SIZE_QCA8074 8
3407 #define REO_DST_RING_SIZE_QCN9000 8
3408 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
3409 
3410 static int dp_ipa_init_alt_tx_ring(struct dp_soc *soc)
3411 {
3412 	return 0;
3413 }
3414 
3415 static void dp_ipa_deinit_alt_tx_ring(struct dp_soc *soc)
3416 {
3417 }
3418 
3419 static int dp_ipa_alloc_alt_tx_ring(struct dp_soc *soc)
3420 {
3421 	return 0;
3422 }
3423 
3424 static void dp_ipa_free_alt_tx_ring(struct dp_soc *soc)
3425 {
3426 }
3427 
3428 static int dp_ipa_get_tx_alt_comp_ring_num(int ring_num)
3429 {
3430 	return ring_num;
3431 }
3432 
3433 static void dp_ipa_hal_tx_init_alt_data_ring(struct dp_soc *soc)
3434 {
3435 }
3436 
3437 #endif /* IPA_OFFLOAD */
3438 
3439 /*
3440  * dp_soc_reset_ring_map() - Reset cpu ring map
3441  * @soc: Datapath soc handler
3442  *
3443  * This api resets the default cpu ring map
3444  */
3445 
3446 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
3447 {
3448 	uint8_t i;
3449 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
3450 
3451 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
3452 		switch (nss_config) {
3453 		case dp_nss_cfg_first_radio:
3454 			/*
3455 			 * Setting Tx ring map for one nss offloaded radio
3456 			 */
3457 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
3458 			break;
3459 
3460 		case dp_nss_cfg_second_radio:
3461 			/*
3462 			 * Setting Tx ring for two nss offloaded radios
3463 			 */
3464 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
3465 			break;
3466 
3467 		case dp_nss_cfg_dbdc:
3468 			/*
3469 			 * Setting Tx ring map for 2 nss offloaded radios
3470 			 */
3471 			soc->tx_ring_map[i] =
3472 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
3473 			break;
3474 
3475 		case dp_nss_cfg_dbtc:
3476 			/*
3477 			 * Setting Tx ring map for 3 nss offloaded radios
3478 			 */
3479 			soc->tx_ring_map[i] =
3480 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
3481 			break;
3482 
3483 		default:
3484 			dp_err("tx_ring_map failed due to invalid nss cfg");
3485 			break;
3486 		}
3487 	}
3488 }
3489 
3490 /*
3491  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
3492  * @dp_soc - DP soc handle
3493  * @ring_type - ring type
3494  * @ring_num - ring_num
3495  *
3496  * return 0 or 1
3497  */
3498 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
3499 {
3500 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
3501 	uint8_t status = 0;
3502 
3503 	switch (ring_type) {
3504 	case WBM2SW_RELEASE:
3505 	case REO_DST:
3506 	case RXDMA_BUF:
3507 	case REO_EXCEPTION:
3508 		status = ((nss_config) & (1 << ring_num));
3509 		break;
3510 	default:
3511 		break;
3512 	}
3513 
3514 	return status;
3515 }
3516 
3517 /*
3518  * dp_soc_disable_unused_mac_intr_mask() - reset interrupt mask for
3519  *					  unused WMAC hw rings
3520  * @dp_soc - DP Soc handle
3521  * @mac_num - wmac num
3522  *
3523  * Return: Return void
3524  */
3525 static void dp_soc_disable_unused_mac_intr_mask(struct dp_soc *soc,
3526 						int mac_num)
3527 {
3528 	uint8_t *grp_mask = NULL;
3529 	int group_number;
3530 
3531 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
3532 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
3533 	wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
3534 					  group_number, 0x0);
3535 
3536 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
3537 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
3538 	wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
3539 				      group_number, 0x0);
3540 
3541 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
3542 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
3543 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
3544 					  group_number, 0x0);
3545 
3546 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
3547 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
3548 	wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
3549 					      group_number, 0x0);
3550 }
3551 
3552 /*
3553  * dp_soc_reset_intr_mask() - reset interrupt mask
3554  * @dp_soc - DP Soc handle
3555  *
3556  * Return: Return void
3557  */
3558 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
3559 {
3560 	uint8_t j;
3561 	uint8_t *grp_mask = NULL;
3562 	int group_number, mask, num_ring;
3563 
3564 	/* number of tx ring */
3565 	num_ring = soc->num_tcl_data_rings;
3566 
3567 	/*
3568 	 * group mask for tx completion  ring.
3569 	 */
3570 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
3571 
3572 	/* loop and reset the mask for only offloaded ring */
3573 	for (j = 0; j < WLAN_CFG_NUM_TCL_DATA_RINGS; j++) {
3574 		/*
3575 		 * Group number corresponding to tx offloaded ring.
3576 		 */
3577 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
3578 		if (group_number < 0) {
3579 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
3580 				      soc, WBM2SW_RELEASE, j);
3581 			return;
3582 		}
3583 
3584 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
3585 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j) &&
3586 		    (!mask)) {
3587 			continue;
3588 		}
3589 
3590 		/* reset the tx mask for offloaded ring */
3591 		mask &= (~(1 << j));
3592 
3593 		/*
3594 		 * reset the interrupt mask for offloaded ring.
3595 		 */
3596 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
3597 	}
3598 
3599 	/* number of rx rings */
3600 	num_ring = soc->num_reo_dest_rings;
3601 
3602 	/*
3603 	 * group mask for reo destination ring.
3604 	 */
3605 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
3606 
3607 	/* loop and reset the mask for only offloaded ring */
3608 	for (j = 0; j < WLAN_CFG_NUM_REO_DEST_RING; j++) {
3609 		/*
3610 		 * Group number corresponding to rx offloaded ring.
3611 		 */
3612 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
3613 		if (group_number < 0) {
3614 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
3615 				      soc, REO_DST, j);
3616 			return;
3617 		}
3618 
3619 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
3620 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j) &&
3621 		    (!mask)) {
3622 			continue;
3623 		}
3624 
3625 		/* reset the interrupt mask for offloaded ring */
3626 		mask &= (~(1 << j));
3627 
3628 		/*
3629 		 * set the interrupt mask to zero for rx offloaded radio.
3630 		 */
3631 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
3632 	}
3633 
3634 	/*
3635 	 * group mask for Rx buffer refill ring
3636 	 */
3637 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
3638 
3639 	/* loop and reset the mask for only offloaded ring */
3640 	for (j = 0; j < MAX_PDEV_CNT; j++) {
3641 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
3642 
3643 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
3644 			continue;
3645 		}
3646 
3647 		/*
3648 		 * Group number corresponding to rx offloaded ring.
3649 		 */
3650 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
3651 		if (group_number < 0) {
3652 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
3653 				      soc, REO_DST, lmac_id);
3654 			return;
3655 		}
3656 
3657 		/* set the interrupt mask for offloaded ring */
3658 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
3659 				group_number);
3660 		mask &= (~(1 << lmac_id));
3661 
3662 		/*
3663 		 * set the interrupt mask to zero for rx offloaded radio.
3664 		 */
3665 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
3666 			group_number, mask);
3667 	}
3668 
3669 	grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
3670 
3671 	for (j = 0; j < num_ring; j++) {
3672 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_EXCEPTION, j)) {
3673 			continue;
3674 		}
3675 
3676 		/*
3677 		 * Group number corresponding to rx err ring.
3678 		 */
3679 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
3680 		if (group_number < 0) {
3681 			dp_init_debug("%pK: ring not part of any group; ring_type: %d,ring_num %d",
3682 				      soc, REO_EXCEPTION, j);
3683 			return;
3684 		}
3685 
3686 		wlan_cfg_set_rx_err_ring_mask(soc->wlan_cfg_ctx,
3687 					      group_number, 0);
3688 	}
3689 }
3690 
3691 #ifdef IPA_OFFLOAD
3692 /**
3693  * dp_reo_remap_config() - configure reo remap register value based
3694  *                         nss configuration.
3695  *		based on offload_radio value below remap configuration
3696  *		get applied.
3697  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
3698  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
3699  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
3700  *		3 - both Radios handled by NSS (remap not required)
3701  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
3702  *
3703  * @remap1: output parameter indicates reo remap 1 register value
3704  * @remap2: output parameter indicates reo remap 2 register value
3705  * Return: bool type, true if remap is configured else false.
3706  */
3707 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap1, uint32_t *remap2)
3708 {
3709 	uint32_t ring[4] = {REO_REMAP_SW1, REO_REMAP_SW2,
3710 						REO_REMAP_SW3};
3711 	hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3712 				      3, remap1, remap2);
3713 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
3714 
3715 	return true;
3716 }
3717 
3718 #ifdef IPA_WDI3_TX_TWO_PIPES
3719 static bool dp_ipa_is_alt_tx_ring(int index)
3720 {
3721 	return index == IPA_TX_ALT_RING_IDX;
3722 }
3723 
3724 static bool dp_ipa_is_alt_tx_comp_ring(int index)
3725 {
3726 	return index == IPA_TX_ALT_COMP_RING_IDX;
3727 }
3728 #else /* !IPA_WDI3_TX_TWO_PIPES */
3729 static bool dp_ipa_is_alt_tx_ring(int index)
3730 {
3731 	return false;
3732 }
3733 
3734 static bool dp_ipa_is_alt_tx_comp_ring(int index)
3735 {
3736 	return false;
3737 }
3738 #endif /* IPA_WDI3_TX_TWO_PIPES */
3739 
3740 /**
3741  * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA
3742  *
3743  * @tx_ring_num: Tx ring number
3744  * @tx_ipa_ring_sz: Return param only updated for IPA.
3745  * @soc_cfg_ctx: dp soc cfg context
3746  *
3747  * Return: None
3748  */
3749 static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz,
3750 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
3751 {
3752 	if (tx_ring_num == IPA_TCL_DATA_RING_IDX ||
3753 	    dp_ipa_is_alt_tx_ring(tx_ring_num))
3754 		*tx_ipa_ring_sz = wlan_cfg_ipa_tx_ring_size(soc_cfg_ctx);
3755 }
3756 
3757 /**
3758  * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA
3759  *
3760  * @tx_comp_ring_num: Tx comp ring number
3761  * @tx_comp_ipa_ring_sz: Return param only updated for IPA.
3762  * @soc_cfg_ctx: dp soc cfg context
3763  *
3764  * Return: None
3765  */
3766 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
3767 					 int *tx_comp_ipa_ring_sz,
3768 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
3769 {
3770 	if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX ||
3771 	    dp_ipa_is_alt_tx_comp_ring(tx_comp_ring_num))
3772 		*tx_comp_ipa_ring_sz =
3773 				wlan_cfg_ipa_tx_comp_ring_size(soc_cfg_ctx);
3774 }
3775 #else
3776 static uint8_t dp_reo_ring_selection(uint32_t value, uint32_t *ring)
3777 {
3778 	uint8_t num = 0;
3779 
3780 	switch (value) {
3781 	case 0xF:
3782 		num = 4;
3783 		ring[0] = REO_REMAP_SW1;
3784 		ring[1] = REO_REMAP_SW2;
3785 		ring[2] = REO_REMAP_SW3;
3786 		ring[3] = REO_REMAP_SW4;
3787 		break;
3788 	case 0xE:
3789 		num = 3;
3790 		ring[0] = REO_REMAP_SW2;
3791 		ring[1] = REO_REMAP_SW3;
3792 		ring[2] = REO_REMAP_SW4;
3793 		break;
3794 	case 0xD:
3795 		num = 3;
3796 		ring[0] = REO_REMAP_SW1;
3797 		ring[1] = REO_REMAP_SW3;
3798 		ring[2] = REO_REMAP_SW4;
3799 		break;
3800 	case 0xC:
3801 		num = 2;
3802 		ring[0] = REO_REMAP_SW3;
3803 		ring[1] = REO_REMAP_SW4;
3804 		break;
3805 	case 0xB:
3806 		num = 3;
3807 		ring[0] = REO_REMAP_SW1;
3808 		ring[1] = REO_REMAP_SW2;
3809 		ring[2] = REO_REMAP_SW4;
3810 		break;
3811 	case 0xA:
3812 		num = 2;
3813 		ring[0] = REO_REMAP_SW2;
3814 		ring[1] = REO_REMAP_SW4;
3815 		break;
3816 	case 0x9:
3817 		num = 2;
3818 		ring[0] = REO_REMAP_SW1;
3819 		ring[1] = REO_REMAP_SW4;
3820 		break;
3821 	case 0x8:
3822 		num = 1;
3823 		ring[0] = REO_REMAP_SW4;
3824 		break;
3825 	case 0x7:
3826 		num = 3;
3827 		ring[0] = REO_REMAP_SW1;
3828 		ring[1] = REO_REMAP_SW2;
3829 		ring[2] = REO_REMAP_SW3;
3830 		break;
3831 	case 0x6:
3832 		num = 2;
3833 		ring[0] = REO_REMAP_SW2;
3834 		ring[1] = REO_REMAP_SW3;
3835 		break;
3836 	case 0x5:
3837 		num = 2;
3838 		ring[0] = REO_REMAP_SW1;
3839 		ring[1] = REO_REMAP_SW3;
3840 		break;
3841 	case 0x4:
3842 		num = 1;
3843 		ring[0] = REO_REMAP_SW3;
3844 		break;
3845 	case 0x3:
3846 		num = 2;
3847 		ring[0] = REO_REMAP_SW1;
3848 		ring[1] = REO_REMAP_SW2;
3849 		break;
3850 	case 0x2:
3851 		num = 1;
3852 		ring[0] = REO_REMAP_SW2;
3853 		break;
3854 	case 0x1:
3855 		num = 1;
3856 		ring[0] = REO_REMAP_SW1;
3857 		break;
3858 	}
3859 	return num;
3860 }
3861 
3862 static bool dp_reo_remap_config(struct dp_soc *soc,
3863 				uint32_t *remap1,
3864 				uint32_t *remap2)
3865 {
3866 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
3867 	uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
3868 	uint8_t target_type, num;
3869 	uint32_t ring[4];
3870 	uint32_t value;
3871 
3872 	target_type = hal_get_target_type(soc->hal_soc);
3873 
3874 	switch (offload_radio) {
3875 	case dp_nss_cfg_default:
3876 		value = reo_config & 0xF;
3877 		num = dp_reo_ring_selection(value, ring);
3878 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3879 					      num, remap1, remap2);
3880 
3881 		break;
3882 	case dp_nss_cfg_first_radio:
3883 		value = reo_config & 0xE;
3884 		num = dp_reo_ring_selection(value, ring);
3885 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3886 					      num, remap1, remap2);
3887 
3888 		break;
3889 	case dp_nss_cfg_second_radio:
3890 		value = reo_config & 0xD;
3891 		num = dp_reo_ring_selection(value, ring);
3892 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3893 					      num, remap1, remap2);
3894 
3895 		break;
3896 	case dp_nss_cfg_dbdc:
3897 	case dp_nss_cfg_dbtc:
3898 		/* return false if both or all are offloaded to NSS */
3899 		return false;
3900 	}
3901 
3902 	dp_debug("remap1 %x remap2 %x offload_radio %u",
3903 		 *remap1, *remap2, offload_radio);
3904 	return true;
3905 }
3906 
3907 static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz,
3908 				    struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
3909 {
3910 }
3911 
3912 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
3913 					 int *tx_comp_ipa_ring_sz,
3914 				       struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx)
3915 {
3916 }
3917 #endif /* IPA_OFFLOAD */
3918 
3919 /*
3920  * dp_reo_frag_dst_set() - configure reo register to set the
3921  *                        fragment destination ring
3922  * @soc : Datapath soc
3923  * @frag_dst_ring : output parameter to set fragment destination ring
3924  *
3925  * Based on offload_radio below fragment destination rings is selected
3926  * 0 - TCL
3927  * 1 - SW1
3928  * 2 - SW2
3929  * 3 - SW3
3930  * 4 - SW4
3931  * 5 - Release
3932  * 6 - FW
3933  * 7 - alternate select
3934  *
3935  * return: void
3936  */
3937 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
3938 {
3939 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
3940 
3941 	switch (offload_radio) {
3942 	case dp_nss_cfg_default:
3943 		*frag_dst_ring = REO_REMAP_TCL;
3944 		break;
3945 	case dp_nss_cfg_first_radio:
3946 		/*
3947 		 * This configuration is valid for single band radio which
3948 		 * is also NSS offload.
3949 		 */
3950 	case dp_nss_cfg_dbdc:
3951 	case dp_nss_cfg_dbtc:
3952 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
3953 		break;
3954 	default:
3955 		dp_init_err("%pK: dp_reo_frag_dst_set invalid offload radio config", soc);
3956 		break;
3957 	}
3958 }
3959 
3960 #ifdef ENABLE_VERBOSE_DEBUG
3961 static void dp_enable_verbose_debug(struct dp_soc *soc)
3962 {
3963 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3964 
3965 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3966 
3967 	if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask)
3968 		is_dp_verbose_debug_enabled = true;
3969 
3970 	if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask)
3971 		hal_set_verbose_debug(true);
3972 	else
3973 		hal_set_verbose_debug(false);
3974 }
3975 #else
3976 static void dp_enable_verbose_debug(struct dp_soc *soc)
3977 {
3978 }
3979 #endif
3980 
3981 #ifdef WLAN_FEATURE_STATS_EXT
3982 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
3983 {
3984 	qdf_event_create(&soc->rx_hw_stats_event);
3985 }
3986 #else
3987 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
3988 {
3989 }
3990 #endif
3991 
3992 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index)
3993 {
3994 	int ring_num;
3995 
3996 	wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned,
3997 			     soc->tcl_data_ring[index].alloc_size,
3998 			     soc->ctrl_psoc,
3999 			     WLAN_MD_DP_SRNG_TCL_DATA,
4000 			     "tcl_data_ring");
4001 	dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA, index);
4002 
4003 	wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned,
4004 			     soc->tx_comp_ring[index].alloc_size,
4005 			     soc->ctrl_psoc,
4006 			     WLAN_MD_DP_SRNG_TX_COMP,
4007 			     "tcl_comp_ring");
4008 	ring_num = dp_ipa_get_tx_alt_comp_ring_num(index);
4009 	dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4010 		       ring_num);
4011 }
4012 
4013 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
4014 						uint8_t index)
4015 {
4016 	int ring_num;
4017 
4018 	if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA, index, 0)) {
4019 		dp_err("dp_srng_init failed for tcl_data_ring");
4020 		goto fail1;
4021 	}
4022 	wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned,
4023 			  soc->tcl_data_ring[index].alloc_size,
4024 			  soc->ctrl_psoc,
4025 			  WLAN_MD_DP_SRNG_TCL_DATA,
4026 			  "tcl_data_ring");
4027 
4028 	ring_num = dp_ipa_get_tx_alt_comp_ring_num(index);
4029 	if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4030 			 ring_num, 0)) {
4031 		dp_err("dp_srng_init failed for tx_comp_ring");
4032 		goto fail1;
4033 	}
4034 	wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned,
4035 			  soc->tx_comp_ring[index].alloc_size,
4036 			  soc->ctrl_psoc,
4037 			  WLAN_MD_DP_SRNG_TX_COMP,
4038 			  "tcl_comp_ring");
4039 
4040 	return QDF_STATUS_SUCCESS;
4041 
4042 fail1:
4043 	return QDF_STATUS_E_FAILURE;
4044 }
4045 
4046 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index)
4047 {
4048 	dp_srng_free(soc, &soc->tcl_data_ring[index]);
4049 	dp_srng_free(soc, &soc->tx_comp_ring[index]);
4050 }
4051 
4052 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
4053 						 uint8_t index)
4054 {
4055 	int tx_ring_size;
4056 	int tx_comp_ring_size;
4057 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
4058 	int cached = 0;
4059 
4060 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
4061 	dp_ipa_get_tx_ring_size(index, &tx_ring_size, soc_cfg_ctx);
4062 
4063 	if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA,
4064 			  tx_ring_size, cached)) {
4065 		dp_err("dp_srng_alloc failed for tcl_data_ring");
4066 		goto fail1;
4067 	}
4068 
4069 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
4070 	dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size, soc_cfg_ctx);
4071 	/* Enable cached TCL desc if NSS offload is disabled */
4072 	if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
4073 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
4074 
4075 	if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
4076 			  tx_comp_ring_size, cached)) {
4077 		dp_err("dp_srng_alloc failed for tx_comp_ring");
4078 		goto fail1;
4079 	}
4080 
4081 	return QDF_STATUS_SUCCESS;
4082 
4083 fail1:
4084 	return QDF_STATUS_E_FAILURE;
4085 }
4086 
4087 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
4088 {
4089 	struct cdp_lro_hash_config lro_hash;
4090 	QDF_STATUS status;
4091 
4092 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
4093 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
4094 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
4095 		dp_err("LRO, GRO and RX hash disabled");
4096 		return QDF_STATUS_E_FAILURE;
4097 	}
4098 
4099 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
4100 
4101 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
4102 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
4103 		lro_hash.lro_enable = 1;
4104 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
4105 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
4106 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
4107 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
4108 	}
4109 
4110 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
4111 			     (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
4112 			      LRO_IPV4_SEED_ARR_SZ));
4113 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
4114 			     (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
4115 			      LRO_IPV6_SEED_ARR_SZ));
4116 
4117 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
4118 
4119 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
4120 		QDF_BUG(0);
4121 		dp_err("lro_hash_config not configured");
4122 		return QDF_STATUS_E_FAILURE;
4123 	}
4124 
4125 	status = soc->cdp_soc.ol_ops->lro_hash_config(soc->ctrl_psoc,
4126 						      pdev->pdev_id,
4127 						      &lro_hash);
4128 	if (!QDF_IS_STATUS_SUCCESS(status)) {
4129 		dp_err("failed to send lro_hash_config to FW %u", status);
4130 		return status;
4131 	}
4132 
4133 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
4134 		lro_hash.lro_enable, lro_hash.tcp_flag,
4135 		lro_hash.tcp_flag_mask);
4136 
4137 	dp_info("toeplitz_hash_ipv4:");
4138 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4139 			   lro_hash.toeplitz_hash_ipv4,
4140 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
4141 			   LRO_IPV4_SEED_ARR_SZ));
4142 
4143 	dp_info("toeplitz_hash_ipv6:");
4144 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4145 			   lro_hash.toeplitz_hash_ipv6,
4146 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
4147 			   LRO_IPV6_SEED_ARR_SZ));
4148 
4149 	return status;
4150 }
4151 
4152 /*
4153  * dp_rxdma_ring_setup() - configure the RX DMA rings
4154  * @soc: data path SoC handle
4155  * @pdev: Physical device handle
4156  *
4157  * Return: 0 - success, > 0 - failure
4158  */
4159 #ifdef QCA_HOST2FW_RXBUF_RING
4160 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
4161 {
4162 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
4163 	int max_mac_rings;
4164 	int i;
4165 	int ring_size;
4166 
4167 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
4168 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
4169 	ring_size =  wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx);
4170 
4171 	for (i = 0; i < max_mac_rings; i++) {
4172 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
4173 		if (dp_srng_alloc(soc, &pdev->rx_mac_buf_ring[i],
4174 				  RXDMA_BUF, ring_size, 0)) {
4175 			dp_init_err("%pK: failed rx mac ring setup", soc);
4176 			return QDF_STATUS_E_FAILURE;
4177 		}
4178 
4179 		if (dp_srng_init(soc, &pdev->rx_mac_buf_ring[i],
4180 				 RXDMA_BUF, 1, i)) {
4181 			dp_init_err("%pK: failed rx mac ring setup", soc);
4182 
4183 			dp_srng_free(soc, &pdev->rx_mac_buf_ring[i]);
4184 			return QDF_STATUS_E_FAILURE;
4185 		}
4186 	}
4187 	return QDF_STATUS_SUCCESS;
4188 }
4189 #else
4190 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
4191 {
4192 	return QDF_STATUS_SUCCESS;
4193 }
4194 #endif
4195 
4196 /**
4197  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
4198  * @pdev - DP_PDEV handle
4199  *
4200  * Return: void
4201  */
4202 static inline void
4203 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
4204 {
4205 	uint8_t map_id;
4206 	struct dp_soc *soc = pdev->soc;
4207 
4208 	if (!soc)
4209 		return;
4210 
4211 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
4212 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
4213 			     default_dscp_tid_map,
4214 			     sizeof(default_dscp_tid_map));
4215 	}
4216 
4217 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
4218 		hal_tx_set_dscp_tid_map(soc->hal_soc,
4219 					default_dscp_tid_map,
4220 					map_id);
4221 	}
4222 }
4223 
4224 /**
4225  * dp_pcp_tid_map_setup(): Initialize the pcp-tid maps
4226  * @pdev - DP_PDEV handle
4227  *
4228  * Return: void
4229  */
4230 static inline void
4231 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
4232 {
4233 	struct dp_soc *soc = pdev->soc;
4234 
4235 	if (!soc)
4236 		return;
4237 
4238 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
4239 		     sizeof(default_pcp_tid_map));
4240 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
4241 }
4242 
4243 #ifdef IPA_OFFLOAD
4244 /**
4245  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
4246  * @soc: data path instance
4247  * @pdev: core txrx pdev context
4248  *
4249  * Return: QDF_STATUS_SUCCESS: success
4250  *         QDF_STATUS_E_RESOURCES: Error return
4251  */
4252 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4253 					   struct dp_pdev *pdev)
4254 {
4255 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4256 	int entries;
4257 
4258 	soc_cfg_ctx = soc->wlan_cfg_ctx;
4259 	entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
4260 
4261 	/* Setup second Rx refill buffer ring */
4262 	if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
4263 			  entries, 0)) {
4264 		dp_init_err("%pK: dp_srng_alloc failed second rx refill ring", soc);
4265 		return QDF_STATUS_E_FAILURE;
4266 	}
4267 
4268 	if (dp_srng_init(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
4269 			 IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id)) {
4270 		dp_init_err("%pK: dp_srng_init failed second rx refill ring", soc);
4271 		return QDF_STATUS_E_FAILURE;
4272 	}
4273 
4274 	return QDF_STATUS_SUCCESS;
4275 }
4276 
4277 /**
4278  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
4279  * @soc: data path instance
4280  * @pdev: core txrx pdev context
4281  *
4282  * Return: void
4283  */
4284 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4285 					      struct dp_pdev *pdev)
4286 {
4287 	dp_srng_deinit(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 0);
4288 	dp_srng_free(soc, &pdev->rx_refill_buf_ring2);
4289 }
4290 
4291 #else
4292 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4293 					   struct dp_pdev *pdev)
4294 {
4295 	return QDF_STATUS_SUCCESS;
4296 }
4297 
4298 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
4299 					      struct dp_pdev *pdev)
4300 {
4301 }
4302 #endif
4303 
4304 #if !defined(DISABLE_MON_CONFIG)
4305 /**
4306  * dp_mon_ring_deinit() - Deinitialize monitor rings
4307  * @pdev: DP pdev handle
4308  *
4309  */
4310 static void dp_mon_rings_deinit(struct dp_pdev *pdev)
4311 {
4312 	int mac_id = 0;
4313 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
4314 	struct dp_soc *soc = pdev->soc;
4315 
4316 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
4317 
4318 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4319 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
4320 							 pdev->pdev_id);
4321 
4322 		dp_srng_deinit(soc, &soc->rxdma_mon_status_ring[lmac_id],
4323 			       RXDMA_MONITOR_STATUS, 0);
4324 
4325 		if (!soc->wlan_cfg_ctx->rxdma1_enable)
4326 			continue;
4327 
4328 		dp_srng_deinit(soc, &soc->rxdma_mon_buf_ring[lmac_id],
4329 			       RXDMA_MONITOR_BUF, 0);
4330 		dp_srng_deinit(soc, &soc->rxdma_mon_dst_ring[lmac_id],
4331 			       RXDMA_MONITOR_DST, 0);
4332 		dp_srng_deinit(soc, &soc->rxdma_mon_desc_ring[lmac_id],
4333 			       RXDMA_MONITOR_DESC, 0);
4334 	}
4335 }
4336 
4337 /**
4338  * dp_mon_rings_free() - free monitor rings
4339  * @pdev: Datapath pdev handle
4340  *
4341  */
4342 static void dp_mon_rings_free(struct dp_pdev *pdev)
4343 {
4344 	int mac_id = 0;
4345 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
4346 	struct dp_soc *soc = pdev->soc;
4347 
4348 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
4349 
4350 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4351 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
4352 							 pdev->pdev_id);
4353 
4354 		dp_srng_free(soc, &soc->rxdma_mon_status_ring[lmac_id]);
4355 
4356 		if (!soc->wlan_cfg_ctx->rxdma1_enable)
4357 			continue;
4358 
4359 		dp_srng_free(soc, &soc->rxdma_mon_buf_ring[lmac_id]);
4360 		dp_srng_free(soc, &soc->rxdma_mon_dst_ring[lmac_id]);
4361 		dp_srng_free(soc, &soc->rxdma_mon_desc_ring[lmac_id]);
4362 	}
4363 }
4364 
4365 /**
4366  * dp_mon_rings_init() - Initialize monitor srng rings
4367  * @pdev: Datapath pdev handle
4368  *
4369  * return: QDF_STATUS_SUCCESS on success
4370  *	   QDF_STATUS_E_NOMEM on failure
4371  */
4372 static
4373 QDF_STATUS dp_mon_rings_init(struct dp_soc *soc, struct dp_pdev *pdev)
4374 {
4375 	int mac_id = 0;
4376 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
4377 
4378 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
4379 
4380 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4381 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
4382 							 pdev->pdev_id);
4383 
4384 		if (dp_srng_init(soc, &soc->rxdma_mon_status_ring[lmac_id],
4385 				 RXDMA_MONITOR_STATUS, 0, lmac_id)) {
4386 			dp_init_err("%pK: " RNG_ERR "rxdma_mon_status_ring", soc);
4387 			goto fail1;
4388 		}
4389 
4390 		if (!soc->wlan_cfg_ctx->rxdma1_enable)
4391 			continue;
4392 
4393 		if (dp_srng_init(soc, &soc->rxdma_mon_buf_ring[lmac_id],
4394 				 RXDMA_MONITOR_BUF, 0, lmac_id)) {
4395 			dp_init_err("%pK: " RNG_ERR "rxdma_mon_buf_ring ", soc);
4396 			goto fail1;
4397 		}
4398 
4399 		if (dp_srng_init(soc, &soc->rxdma_mon_dst_ring[lmac_id],
4400 				 RXDMA_MONITOR_DST, 0, lmac_id)) {
4401 			dp_init_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", soc);
4402 			goto fail1;
4403 		}
4404 
4405 		if (dp_srng_init(soc, &soc->rxdma_mon_desc_ring[lmac_id],
4406 				 RXDMA_MONITOR_DESC, 0, lmac_id)) {
4407 			dp_init_err("%pK: " RNG_ERR "rxdma_mon_desc_ring", soc);
4408 			goto fail1;
4409 		}
4410 	}
4411 	return QDF_STATUS_SUCCESS;
4412 
4413 fail1:
4414 	dp_mon_rings_deinit(pdev);
4415 	return QDF_STATUS_E_NOMEM;
4416 }
4417 
4418 /**
4419  * dp_mon_rings_alloc() - Allocate memory for monitor srng rings
4420  * @soc: Datapath soc handle
4421  * @pdev: Datapath pdev handle
4422  *
4423  * return: QDF_STATUS_SUCCESS on success
4424  *	   QDF_STATUS_E_NOMEM on failure
4425  */
4426 static
4427 QDF_STATUS dp_mon_rings_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
4428 {
4429 	int mac_id = 0;
4430 	int entries;
4431 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
4432 
4433 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
4434 
4435 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4436 		int lmac_id =
4437 		dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id);
4438 		entries = wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
4439 		if (dp_srng_alloc(soc, &soc->rxdma_mon_status_ring[lmac_id],
4440 				  RXDMA_MONITOR_STATUS, entries, 0)) {
4441 			dp_init_err("%pK: " RNG_ERR "rxdma_mon_status_ring", soc);
4442 			goto fail1;
4443 		}
4444 
4445 		if (!soc->wlan_cfg_ctx->rxdma1_enable)
4446 			continue;
4447 
4448 		entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
4449 		if (dp_srng_alloc(soc, &soc->rxdma_mon_buf_ring[lmac_id],
4450 				  RXDMA_MONITOR_BUF, entries, 0)) {
4451 			dp_init_err("%pK: " RNG_ERR "rxdma_mon_buf_ring ", soc);
4452 			goto fail1;
4453 		}
4454 
4455 		entries = wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
4456 		if (dp_srng_alloc(soc, &soc->rxdma_mon_dst_ring[lmac_id],
4457 				  RXDMA_MONITOR_DST, entries, 0)) {
4458 			dp_init_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", soc);
4459 			goto fail1;
4460 		}
4461 
4462 		entries = wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
4463 		if (dp_srng_alloc(soc, &soc->rxdma_mon_desc_ring[lmac_id],
4464 				  RXDMA_MONITOR_DESC, entries, 0)) {
4465 			dp_init_err("%pK: " RNG_ERR "rxdma_mon_desc_ring", soc);
4466 			goto fail1;
4467 		}
4468 	}
4469 	return QDF_STATUS_SUCCESS;
4470 
4471 fail1:
4472 	dp_mon_rings_free(pdev);
4473 	return QDF_STATUS_E_NOMEM;
4474 }
4475 #else
4476 static void dp_mon_rings_free(struct dp_pdev *pdev)
4477 {
4478 }
4479 
4480 static void dp_mon_rings_deinit(struct dp_pdev *pdev)
4481 {
4482 }
4483 
4484 static
4485 QDF_STATUS dp_mon_rings_init(struct dp_soc *soc, struct dp_pdev *pdev)
4486 {
4487 	return QDF_STATUS_SUCCESS;
4488 }
4489 
4490 static
4491 QDF_STATUS dp_mon_rings_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
4492 {
4493 	return QDF_STATUS_SUCCESS;
4494 }
4495 #endif
4496 
4497 #ifdef ATH_SUPPORT_EXT_STAT
4498 /*dp_peer_cal_clients_stats_update - update peer stats on cal client timer
4499  * @soc : Datapath SOC
4500  * @peer : Datapath peer
4501  * @arg : argument to iter function
4502  */
4503 static void
4504 dp_peer_cal_clients_stats_update(struct dp_soc *soc,
4505 				 struct dp_peer *peer,
4506 				 void *arg)
4507 {
4508 	dp_cal_client_update_peer_stats(&peer->stats);
4509 }
4510 
4511 /*dp_iterate_update_peer_list - update peer stats on cal client timer
4512  * @pdev_hdl: pdev handle
4513  */
4514 void  dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
4515 {
4516 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
4517 
4518 	dp_pdev_iterate_peer(pdev, dp_peer_cal_clients_stats_update, NULL,
4519 			     DP_MOD_ID_CDP);
4520 }
4521 #else
4522 void  dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
4523 {
4524 }
4525 #endif
4526 
4527 /*
4528  * dp_htt_ppdu_stats_attach() - attach resources for HTT PPDU stats processing
4529  * @pdev: Datapath PDEV handle
4530  *
4531  * Return: QDF_STATUS_SUCCESS: Success
4532  *         QDF_STATUS_E_NOMEM: Error
4533  */
4534 static QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev)
4535 {
4536 	pdev->ppdu_tlv_buf = qdf_mem_malloc(HTT_T2H_MAX_MSG_SIZE);
4537 
4538 	if (!pdev->ppdu_tlv_buf) {
4539 		QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "ppdu_tlv_buf alloc fail");
4540 		return QDF_STATUS_E_NOMEM;
4541 	}
4542 
4543 	return QDF_STATUS_SUCCESS;
4544 }
4545 
4546 #ifdef DP_TX_HW_DESC_HISTORY
4547 /**
4548  * dp_soc_tx_hw_desc_history_attach - Attach TX HW descriptor history
4549  *
4550  * @soc: DP soc handle
4551  *
4552  * Return: None
4553  */
4554 static void dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
4555 {
4556 	soc->tx_hw_desc_history = dp_context_alloc_mem(
4557 			soc, DP_TX_HW_DESC_HIST_TYPE,
4558 			sizeof(*soc->tx_hw_desc_history));
4559 	if (soc->tx_hw_desc_history)
4560 		soc->tx_hw_desc_history->index = 0;
4561 }
4562 
4563 static void dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
4564 {
4565 	dp_context_free_mem(soc, DP_TX_HW_DESC_HIST_TYPE,
4566 			    soc->tx_hw_desc_history);
4567 }
4568 
4569 #else /* DP_TX_HW_DESC_HISTORY */
4570 static inline void
4571 dp_soc_tx_hw_desc_history_attach(struct dp_soc *soc)
4572 {
4573 }
4574 
4575 static inline void
4576 dp_soc_tx_hw_desc_history_detach(struct dp_soc *soc)
4577 {
4578 }
4579 #endif /* DP_TX_HW_DESC_HISTORY */
4580 
4581 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
4582 #ifndef RX_DEFRAG_DO_NOT_REINJECT
4583 /**
4584  * dp_soc_rx_reinject_ring_history_attach - Attach the reo reinject ring
4585  *					    history.
4586  * @soc: DP soc handle
4587  *
4588  * Return: None
4589  */
4590 static void dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
4591 {
4592 	soc->rx_reinject_ring_history =
4593 		dp_context_alloc_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
4594 				     sizeof(struct dp_rx_reinject_history));
4595 	if (soc->rx_reinject_ring_history)
4596 		qdf_atomic_init(&soc->rx_reinject_ring_history->index);
4597 }
4598 #else /* RX_DEFRAG_DO_NOT_REINJECT */
4599 static inline void
4600 dp_soc_rx_reinject_ring_history_attach(struct dp_soc *soc)
4601 {
4602 }
4603 #endif /* RX_DEFRAG_DO_NOT_REINJECT */
4604 
4605 /**
4606  * dp_soc_rx_history_attach() - Attach the ring history record buffers
4607  * @soc: DP soc structure
4608  *
4609  * This function allocates the memory for recording the rx ring, rx error
4610  * ring and the reinject ring entries. There is no error returned in case
4611  * of allocation failure since the record function checks if the history is
4612  * initialized or not. We do not want to fail the driver load in case of
4613  * failure to allocate memory for debug history.
4614  *
4615  * Returns: None
4616  */
4617 static void dp_soc_rx_history_attach(struct dp_soc *soc)
4618 {
4619 	int i;
4620 	uint32_t rx_ring_hist_size;
4621 	uint32_t rx_refill_ring_hist_size;
4622 
4623 	rx_ring_hist_size = sizeof(*soc->rx_ring_history[0]);
4624 	rx_refill_ring_hist_size = sizeof(*soc->rx_refill_ring_history[0]);
4625 
4626 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
4627 		soc->rx_ring_history[i] = dp_context_alloc_mem(
4628 				soc, DP_RX_RING_HIST_TYPE, rx_ring_hist_size);
4629 		if (soc->rx_ring_history[i])
4630 			qdf_atomic_init(&soc->rx_ring_history[i]->index);
4631 	}
4632 
4633 	soc->rx_err_ring_history = dp_context_alloc_mem(
4634 			soc, DP_RX_ERR_RING_HIST_TYPE, rx_ring_hist_size);
4635 	if (soc->rx_err_ring_history)
4636 		qdf_atomic_init(&soc->rx_err_ring_history->index);
4637 
4638 	dp_soc_rx_reinject_ring_history_attach(soc);
4639 
4640 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4641 		soc->rx_refill_ring_history[i] = dp_context_alloc_mem(
4642 						soc,
4643 						DP_RX_REFILL_RING_HIST_TYPE,
4644 						rx_refill_ring_hist_size);
4645 
4646 		if (soc->rx_refill_ring_history[i])
4647 			qdf_atomic_init(&soc->rx_refill_ring_history[i]->index);
4648 	}
4649 }
4650 
4651 static void dp_soc_rx_history_detach(struct dp_soc *soc)
4652 {
4653 	int i;
4654 
4655 	for (i = 0; i < MAX_REO_DEST_RINGS; i++)
4656 		dp_context_free_mem(soc, DP_RX_RING_HIST_TYPE,
4657 				    soc->rx_ring_history[i]);
4658 
4659 	dp_context_free_mem(soc, DP_RX_ERR_RING_HIST_TYPE,
4660 			    soc->rx_err_ring_history);
4661 
4662 	/*
4663 	 * No need for a featurized detach since qdf_mem_free takes
4664 	 * care of NULL pointer.
4665 	 */
4666 	dp_context_free_mem(soc, DP_RX_REINJECT_RING_HIST_TYPE,
4667 			    soc->rx_reinject_ring_history);
4668 
4669 	for (i = 0; i < MAX_PDEV_CNT; i++)
4670 		dp_context_free_mem(soc, DP_RX_REFILL_RING_HIST_TYPE,
4671 				    soc->rx_refill_ring_history[i]);
4672 }
4673 
4674 #else
4675 static inline void dp_soc_rx_history_attach(struct dp_soc *soc)
4676 {
4677 }
4678 
4679 static inline void dp_soc_rx_history_detach(struct dp_soc *soc)
4680 {
4681 }
4682 #endif
4683 
4684 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
4685 /**
4686  * dp_soc_tx_history_attach() - Attach the ring history record buffers
4687  * @soc: DP soc structure
4688  *
4689  * This function allocates the memory for recording the tx tcl ring and
4690  * the tx comp ring entries. There is no error returned in case
4691  * of allocation failure since the record function checks if the history is
4692  * initialized or not. We do not want to fail the driver load in case of
4693  * failure to allocate memory for debug history.
4694  *
4695  * Returns: None
4696  */
4697 static void dp_soc_tx_history_attach(struct dp_soc *soc)
4698 {
4699 	uint32_t tx_tcl_hist_size;
4700 	uint32_t tx_comp_hist_size;
4701 
4702 	tx_tcl_hist_size = sizeof(*soc->tx_tcl_history);
4703 	soc->tx_tcl_history = dp_context_alloc_mem(soc, DP_TX_TCL_HIST_TYPE,
4704 						   tx_tcl_hist_size);
4705 	if (soc->tx_tcl_history)
4706 		qdf_atomic_init(&soc->tx_tcl_history->index);
4707 
4708 	tx_comp_hist_size = sizeof(*soc->tx_comp_history);
4709 	soc->tx_comp_history = dp_context_alloc_mem(soc, DP_TX_COMP_HIST_TYPE,
4710 						    tx_comp_hist_size);
4711 	if (soc->tx_comp_history)
4712 		qdf_atomic_init(&soc->tx_comp_history->index);
4713 }
4714 
4715 /**
4716  * dp_soc_tx_history_detach() - Detach the ring history record buffers
4717  * @soc: DP soc structure
4718  *
4719  * This function frees the memory for recording the tx tcl ring and
4720  * the tx comp ring entries.
4721  *
4722  * Returns: None
4723  */
4724 static void dp_soc_tx_history_detach(struct dp_soc *soc)
4725 {
4726 	dp_context_free_mem(soc, DP_TX_TCL_HIST_TYPE, soc->tx_tcl_history);
4727 	dp_context_free_mem(soc, DP_TX_COMP_HIST_TYPE, soc->tx_comp_history);
4728 }
4729 
4730 #else
4731 static inline void dp_soc_tx_history_attach(struct dp_soc *soc)
4732 {
4733 }
4734 
4735 static inline void dp_soc_tx_history_detach(struct dp_soc *soc)
4736 {
4737 }
4738 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
4739 
4740 /*
4741 * dp_pdev_attach_wifi3() - attach txrx pdev
4742 * @txrx_soc: Datapath SOC handle
4743 * @htc_handle: HTC handle for host-target interface
4744 * @qdf_osdev: QDF OS device
4745 * @pdev_id: PDEV ID
4746 *
4747 * Return: QDF_STATUS
4748 */
4749 static inline QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
4750 					      HTC_HANDLE htc_handle,
4751 					      qdf_device_t qdf_osdev,
4752 					      uint8_t pdev_id)
4753 {
4754 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4755 	struct dp_pdev *pdev = NULL;
4756 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
4757 	int nss_cfg;
4758 
4759 	pdev = dp_context_alloc_mem(soc, DP_PDEV_TYPE, sizeof(*pdev));
4760 	if (!pdev) {
4761 		dp_init_err("%pK: DP PDEV memory allocation failed",
4762 			    soc);
4763 		goto fail0;
4764 	}
4765 	wlan_minidump_log(pdev, sizeof(*pdev), soc->ctrl_psoc,
4766 			  WLAN_MD_DP_PDEV, "dp_pdev");
4767 
4768 	soc_cfg_ctx = soc->wlan_cfg_ctx;
4769 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
4770 
4771 	if (!pdev->wlan_cfg_ctx) {
4772 		dp_init_err("%pK: pdev cfg_attach failed", soc);
4773 		goto fail1;
4774 	}
4775 
4776 	/*
4777 	 * set nss pdev config based on soc config
4778 	 */
4779 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
4780 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
4781 					 (nss_cfg & (1 << pdev_id)));
4782 
4783 	pdev->soc = soc;
4784 	pdev->pdev_id = pdev_id;
4785 	soc->pdev_list[pdev_id] = pdev;
4786 
4787 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
4788 	soc->pdev_count++;
4789 
4790 	/* Allocate memory for pdev srng rings */
4791 	if (dp_pdev_srng_alloc(pdev)) {
4792 		dp_init_err("%pK: dp_pdev_srng_alloc failed", soc);
4793 		goto fail2;
4794 	}
4795 
4796 	/* Rx specific init */
4797 	if (dp_rx_pdev_desc_pool_alloc(pdev)) {
4798 		dp_init_err("%pK: dp_rx_pdev_attach failed", soc);
4799 		goto fail3;
4800 	}
4801 
4802 	/* Rx monitor mode specific init */
4803 	if (dp_rx_pdev_mon_desc_pool_alloc(pdev)) {
4804 		dp_init_err("%pK: dp_rx_pdev_mon_attach failed", soc);
4805 		goto fail4;
4806 	}
4807 
4808 	return QDF_STATUS_SUCCESS;
4809 fail4:
4810 	dp_rx_pdev_desc_pool_free(pdev);
4811 fail3:
4812 	dp_pdev_srng_free(pdev);
4813 fail2:
4814 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
4815 fail1:
4816 	soc->pdev_list[pdev_id] = NULL;
4817 	qdf_mem_free(pdev);
4818 fail0:
4819 	return QDF_STATUS_E_FAILURE;
4820 }
4821 
4822 /*
4823  * dp_rxdma_ring_cleanup() - configure the RX DMA rings
4824  * @soc: data path SoC handle
4825  * @pdev: Physical device handle
4826  *
4827  * Return: void
4828  */
4829 #ifdef QCA_HOST2FW_RXBUF_RING
4830 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
4831 {
4832 	int i;
4833 
4834 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
4835 		dp_srng_deinit(soc, &pdev->rx_mac_buf_ring[i], RXDMA_BUF, 1);
4836 		dp_srng_free(soc, &pdev->rx_mac_buf_ring[i]);
4837 	}
4838 
4839 	if (soc->reap_timer_init) {
4840 		qdf_timer_free(&soc->mon_reap_timer);
4841 		soc->reap_timer_init = 0;
4842 	}
4843 }
4844 #else
4845 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
4846 {
4847 	if (soc->lmac_timer_init) {
4848 		qdf_timer_stop(&soc->lmac_reap_timer);
4849 		qdf_timer_free(&soc->lmac_reap_timer);
4850 		soc->lmac_timer_init = 0;
4851 	}
4852 }
4853 #endif
4854 
4855 /*
4856  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
4857  * @pdev: device object
4858  *
4859  * Return: void
4860  */
4861 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
4862 {
4863 	struct dp_neighbour_peer *peer = NULL;
4864 	struct dp_neighbour_peer *temp_peer = NULL;
4865 
4866 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
4867 			   neighbour_peer_list_elem, temp_peer) {
4868 		/* delete this peer from the list */
4869 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
4870 			     peer, neighbour_peer_list_elem);
4871 		qdf_mem_free(peer);
4872 	}
4873 
4874 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
4875 }
4876 
4877 /**
4878 * dp_htt_ppdu_stats_detach() - detach stats resources
4879 * @pdev: Datapath PDEV handle
4880 *
4881 * Return: void
4882 */
4883 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
4884 {
4885 	struct ppdu_info *ppdu_info, *ppdu_info_next;
4886 
4887 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
4888 			   ppdu_info_list_elem, ppdu_info_next) {
4889 		if (!ppdu_info)
4890 			break;
4891 		TAILQ_REMOVE(&pdev->ppdu_info_list,
4892 			     ppdu_info, ppdu_info_list_elem);
4893 		pdev->list_depth--;
4894 		qdf_assert_always(ppdu_info->nbuf);
4895 		qdf_nbuf_free(ppdu_info->nbuf);
4896 		qdf_mem_free(ppdu_info);
4897 	}
4898 
4899 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->sched_comp_ppdu_list,
4900 			   ppdu_info_list_elem, ppdu_info_next) {
4901 		if (!ppdu_info)
4902 			break;
4903 		TAILQ_REMOVE(&pdev->sched_comp_ppdu_list,
4904 			     ppdu_info, ppdu_info_list_elem);
4905 		pdev->sched_comp_list_depth--;
4906 		qdf_assert_always(ppdu_info->nbuf);
4907 		qdf_nbuf_free(ppdu_info->nbuf);
4908 		qdf_mem_free(ppdu_info);
4909 	}
4910 
4911 	if (pdev->ppdu_tlv_buf)
4912 		qdf_mem_free(pdev->ppdu_tlv_buf);
4913 
4914 }
4915 
4916 #ifdef WLAN_DP_PENDING_MEM_FLUSH
4917 /**
4918  * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev
4919  * @pdev: Datapath PDEV handle
4920  *
4921  * This is the last chance to flush all pending dp vdevs/peers,
4922  * some peer/vdev leak case like Non-SSR + peer unmap missing
4923  * will be covered here.
4924  *
4925  * Return: None
4926  */
4927 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
4928 {
4929 	struct dp_vdev *vdev = NULL;
4930 	struct dp_soc *soc = pdev->soc;
4931 
4932 	if (TAILQ_EMPTY(&soc->inactive_vdev_list))
4933 		return;
4934 
4935 	while (true) {
4936 		qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
4937 		TAILQ_FOREACH(vdev, &soc->inactive_vdev_list,
4938 			      inactive_list_elem) {
4939 			if (vdev->pdev == pdev)
4940 				break;
4941 		}
4942 		qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
4943 
4944 		/* vdev will be freed when all peers get cleanup */
4945 		if (vdev)
4946 			dp_vdev_flush_peers((struct cdp_vdev *)vdev, 0);
4947 		else
4948 			break;
4949 	}
4950 }
4951 #else
4952 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
4953 {
4954 }
4955 #endif
4956 
4957 /**
4958  * dp_pdev_deinit() - Deinit txrx pdev
4959  * @txrx_pdev: Datapath PDEV handle
4960  * @force: Force deinit
4961  *
4962  * Return: None
4963  */
4964 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
4965 {
4966 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
4967 	qdf_nbuf_t curr_nbuf, next_nbuf;
4968 
4969 	if (pdev->pdev_deinit)
4970 		return;
4971 
4972 	dp_tx_me_exit(pdev);
4973 	dp_rx_fst_detach(pdev->soc, pdev);
4974 	dp_rx_pdev_mon_buffers_free(pdev);
4975 	dp_rx_pdev_buffers_free(pdev);
4976 	dp_rx_pdev_mon_desc_pool_deinit(pdev);
4977 	dp_rx_pdev_desc_pool_deinit(pdev);
4978 	dp_pdev_bkp_stats_detach(pdev);
4979 	dp_htt_ppdu_stats_detach(pdev);
4980 	dp_tx_ppdu_stats_detach(pdev);
4981 	qdf_event_destroy(&pdev->fw_peer_stats_event);
4982 	dp_cal_client_detach(&pdev->cal_client_ctx);
4983 	if (pdev->sojourn_buf)
4984 		qdf_nbuf_free(pdev->sojourn_buf);
4985 
4986 	dp_pdev_flush_pending_vdevs(pdev);
4987 	dp_tx_desc_flush(pdev, NULL, true);
4988 	dp_pktlogmod_exit(pdev);
4989 	dp_neighbour_peers_detach(pdev);
4990 
4991 	qdf_spinlock_destroy(&pdev->tx_mutex);
4992 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
4993 	qdf_spinlock_destroy(&pdev->ppdu_stats_lock);
4994 
4995 	if (pdev->invalid_peer)
4996 		qdf_mem_free(pdev->invalid_peer);
4997 
4998 	if (pdev->filter)
4999 		dp_mon_filter_dealloc(pdev);
5000 
5001 	dp_pdev_srng_deinit(pdev);
5002 
5003 	dp_ipa_uc_detach(pdev->soc, pdev);
5004 	dp_cleanup_ipa_rx_refill_buf_ring(pdev->soc, pdev);
5005 	dp_rxdma_ring_cleanup(pdev->soc, pdev);
5006 
5007 	curr_nbuf = pdev->invalid_peer_head_msdu;
5008 	while (curr_nbuf) {
5009 		next_nbuf = qdf_nbuf_next(curr_nbuf);
5010 		qdf_nbuf_free(curr_nbuf);
5011 		curr_nbuf = next_nbuf;
5012 	}
5013 	pdev->invalid_peer_head_msdu = NULL;
5014 	pdev->invalid_peer_tail_msdu = NULL;
5015 
5016 	dp_wdi_event_detach(pdev);
5017 	pdev->pdev_deinit = 1;
5018 }
5019 
5020 /**
5021  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
5022  * @psoc: Datapath psoc handle
5023  * @pdev_id: Id of datapath PDEV handle
5024  * @force: Force deinit
5025  *
5026  * Return: QDF_STATUS
5027  */
5028 static QDF_STATUS
5029 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
5030 		     int force)
5031 {
5032 	struct dp_pdev *txrx_pdev;
5033 
5034 	txrx_pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
5035 						       pdev_id);
5036 
5037 	if (!txrx_pdev)
5038 		return QDF_STATUS_E_FAILURE;
5039 
5040 	dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force);
5041 
5042 	return QDF_STATUS_SUCCESS;
5043 }
5044 
5045 /*
5046  * dp_pdev_post_attach() - Do post pdev attach after dev_alloc_name
5047  * @txrx_pdev: Datapath PDEV handle
5048  *
5049  * Return: None
5050  */
5051 static void dp_pdev_post_attach(struct cdp_pdev *txrx_pdev)
5052 {
5053 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
5054 
5055 	dp_tx_capture_debugfs_init(pdev);
5056 
5057 	if (dp_pdev_htt_stats_dbgfs_init(pdev)) {
5058 		dp_init_err("%pK: Failed to initialize pdev HTT stats debugfs", pdev->soc);
5059 	}
5060 }
5061 
5062 /*
5063  * dp_pdev_post_attach_wifi3() - attach txrx pdev post
5064  * @psoc: Datapath soc handle
5065  * @pdev_id: pdev id of pdev
5066  *
5067  * Return: QDF_STATUS
5068  */
5069 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *soc,
5070 				     uint8_t pdev_id)
5071 {
5072 	struct dp_pdev *pdev;
5073 
5074 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
5075 						  pdev_id);
5076 
5077 	if (!pdev) {
5078 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
5079 			    (struct dp_soc *)soc, pdev_id);
5080 		return QDF_STATUS_E_FAILURE;
5081 	}
5082 
5083 	dp_pdev_post_attach((struct cdp_pdev *)pdev);
5084 	return QDF_STATUS_SUCCESS;
5085 }
5086 
5087 /*
5088  * dp_pdev_detach() - Complete rest of pdev detach
5089  * @txrx_pdev: Datapath PDEV handle
5090  * @force: Force deinit
5091  *
5092  * Return: None
5093  */
5094 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
5095 {
5096 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
5097 	struct dp_soc *soc = pdev->soc;
5098 
5099 	dp_pdev_htt_stats_dbgfs_deinit(pdev);
5100 	dp_rx_pdev_mon_desc_pool_free(pdev);
5101 	dp_rx_pdev_desc_pool_free(pdev);
5102 	dp_pdev_srng_free(pdev);
5103 
5104 	soc->pdev_count--;
5105 	soc->pdev_list[pdev->pdev_id] = NULL;
5106 
5107 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
5108 	wlan_minidump_remove(pdev, sizeof(*pdev), soc->ctrl_psoc,
5109 			     WLAN_MD_DP_PDEV, "dp_pdev");
5110 	dp_context_free_mem(soc, DP_PDEV_TYPE, pdev);
5111 }
5112 
5113 /*
5114  * dp_pdev_detach_wifi3() - detach txrx pdev
5115  * @psoc: Datapath soc handle
5116  * @pdev_id: pdev id of pdev
5117  * @force: Force detach
5118  *
5119  * Return: QDF_STATUS
5120  */
5121 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
5122 				       int force)
5123 {
5124 	struct dp_pdev *pdev;
5125 
5126 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
5127 						  pdev_id);
5128 
5129 	if (!pdev) {
5130 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
5131 			    (struct dp_soc *)psoc, pdev_id);
5132 		return QDF_STATUS_E_FAILURE;
5133 	}
5134 
5135 	dp_pdev_detach((struct cdp_pdev *)pdev, force);
5136 	return QDF_STATUS_SUCCESS;
5137 }
5138 
5139 /*
5140  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
5141  * @soc: DP SOC handle
5142  */
5143 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
5144 {
5145 	struct reo_desc_list_node *desc;
5146 	struct dp_rx_tid *rx_tid;
5147 
5148 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
5149 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
5150 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
5151 		rx_tid = &desc->rx_tid;
5152 		qdf_mem_unmap_nbytes_single(soc->osdev,
5153 			rx_tid->hw_qdesc_paddr,
5154 			QDF_DMA_BIDIRECTIONAL,
5155 			rx_tid->hw_qdesc_alloc_size);
5156 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
5157 		qdf_mem_free(desc);
5158 	}
5159 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
5160 	qdf_list_destroy(&soc->reo_desc_freelist);
5161 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
5162 }
5163 
5164 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
5165 /*
5166  * dp_reo_desc_deferred_freelist_create() - Initialize the resources used
5167  *                                          for deferred reo desc list
5168  * @psoc: Datapath soc handle
5169  *
5170  * Return: void
5171  */
5172 static void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
5173 {
5174 	qdf_spinlock_create(&soc->reo_desc_deferred_freelist_lock);
5175 	qdf_list_create(&soc->reo_desc_deferred_freelist,
5176 			REO_DESC_DEFERRED_FREELIST_SIZE);
5177 	soc->reo_desc_deferred_freelist_init = true;
5178 }
5179 
5180 /*
5181  * dp_reo_desc_deferred_freelist_destroy() - loop the deferred free list &
5182  *                                           free the leftover REO QDESCs
5183  * @psoc: Datapath soc handle
5184  *
5185  * Return: void
5186  */
5187 static void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
5188 {
5189 	struct reo_desc_deferred_freelist_node *desc;
5190 
5191 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
5192 	soc->reo_desc_deferred_freelist_init = false;
5193 	while (qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
5194 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
5195 		qdf_mem_unmap_nbytes_single(soc->osdev,
5196 					    desc->hw_qdesc_paddr,
5197 					    QDF_DMA_BIDIRECTIONAL,
5198 					    desc->hw_qdesc_alloc_size);
5199 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
5200 		qdf_mem_free(desc);
5201 	}
5202 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
5203 
5204 	qdf_list_destroy(&soc->reo_desc_deferred_freelist);
5205 	qdf_spinlock_destroy(&soc->reo_desc_deferred_freelist_lock);
5206 }
5207 #else
5208 static inline void dp_reo_desc_deferred_freelist_create(struct dp_soc *soc)
5209 {
5210 }
5211 
5212 static inline void dp_reo_desc_deferred_freelist_destroy(struct dp_soc *soc)
5213 {
5214 }
5215 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
5216 
5217 /*
5218  * dp_soc_reset_txrx_ring_map() - reset tx ring map
5219  * @soc: DP SOC handle
5220  *
5221  */
5222 static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc)
5223 {
5224 	uint32_t i;
5225 
5226 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++)
5227 		soc->tx_ring_map[i] = 0;
5228 }
5229 
5230 /*
5231  * dp_soc_print_inactive_objects() - prints inactive peer and vdev list
5232  * @soc: DP SOC handle
5233  *
5234  */
5235 static void dp_soc_print_inactive_objects(struct dp_soc *soc)
5236 {
5237 	struct dp_peer *peer = NULL;
5238 	struct dp_peer *tmp_peer = NULL;
5239 	struct dp_vdev *vdev = NULL;
5240 	struct dp_vdev *tmp_vdev = NULL;
5241 	int i = 0;
5242 	uint32_t count;
5243 
5244 	if (TAILQ_EMPTY(&soc->inactive_peer_list) &&
5245 	    TAILQ_EMPTY(&soc->inactive_vdev_list))
5246 		return;
5247 
5248 	TAILQ_FOREACH_SAFE(peer, &soc->inactive_peer_list,
5249 			   inactive_list_elem, tmp_peer) {
5250 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
5251 			count = qdf_atomic_read(&peer->mod_refs[i]);
5252 			if (count)
5253 				DP_PRINT_STATS("peer %pK Module id %u ==> %u",
5254 					       peer, i, count);
5255 		}
5256 	}
5257 
5258 	TAILQ_FOREACH_SAFE(vdev, &soc->inactive_vdev_list,
5259 			   inactive_list_elem, tmp_vdev) {
5260 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
5261 			count = qdf_atomic_read(&vdev->mod_refs[i]);
5262 			if (count)
5263 				DP_PRINT_STATS("vdev %pK Module id %u ==> %u",
5264 					       vdev, i, count);
5265 		}
5266 	}
5267 	QDF_BUG(0);
5268 }
5269 
5270 /**
5271  * dp_soc_deinit() - Deinitialize txrx SOC
5272  * @txrx_soc: Opaque DP SOC handle
5273  *
5274  * Return: None
5275  */
5276 static void dp_soc_deinit(void *txrx_soc)
5277 {
5278 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5279 	struct htt_soc *htt_soc = soc->htt_handle;
5280 
5281 	qdf_atomic_set(&soc->cmn_init_done, 0);
5282 
5283 	/* free peer tables & AST tables allocated during peer_map_attach */
5284 	if (soc->peer_map_attach_success) {
5285 		dp_peer_find_detach(soc);
5286 		soc->peer_map_attach_success = FALSE;
5287 	}
5288 
5289 	qdf_flush_work(&soc->htt_stats.work);
5290 	qdf_disable_work(&soc->htt_stats.work);
5291 
5292 	qdf_spinlock_destroy(&soc->htt_stats.lock);
5293 
5294 	dp_soc_reset_txrx_ring_map(soc);
5295 
5296 	dp_reo_desc_freelist_destroy(soc);
5297 	dp_reo_desc_deferred_freelist_destroy(soc);
5298 
5299 	DEINIT_RX_HW_STATS_LOCK(soc);
5300 
5301 	qdf_spinlock_destroy(&soc->ast_lock);
5302 
5303 	dp_peer_mec_spinlock_destroy(soc);
5304 
5305 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
5306 
5307 	qdf_nbuf_queue_free(&soc->invalid_buf_queue);
5308 
5309 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
5310 
5311 	qdf_spinlock_destroy(&soc->vdev_map_lock);
5312 
5313 	dp_reo_cmdlist_destroy(soc);
5314 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
5315 
5316 	dp_soc_tx_desc_sw_pools_deinit(soc);
5317 
5318 	dp_soc_srng_deinit(soc);
5319 
5320 	dp_hw_link_desc_ring_deinit(soc);
5321 
5322 	dp_soc_print_inactive_objects(soc);
5323 	qdf_spinlock_destroy(&soc->inactive_peer_list_lock);
5324 	qdf_spinlock_destroy(&soc->inactive_vdev_list_lock);
5325 
5326 	htt_soc_htc_dealloc(soc->htt_handle);
5327 
5328 	htt_soc_detach(htt_soc);
5329 
5330 	/* Free wbm sg list and reset flags in down path */
5331 	dp_rx_wbm_sg_list_deinit(soc);
5332 
5333 	wlan_minidump_remove(soc, sizeof(*soc), soc->ctrl_psoc,
5334 			     WLAN_MD_DP_SOC, "dp_soc");
5335 }
5336 
5337 /**
5338  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
5339  * @txrx_soc: Opaque DP SOC handle
5340  *
5341  * Return: None
5342  */
5343 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc)
5344 {
5345 	dp_soc_deinit(txrx_soc);
5346 }
5347 
5348 /*
5349  * dp_soc_detach() - Detach rest of txrx SOC
5350  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
5351  *
5352  * Return: None
5353  */
5354 static void dp_soc_detach(struct cdp_soc_t *txrx_soc)
5355 {
5356 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5357 
5358 	soc->arch_ops.txrx_soc_detach(soc);
5359 
5360 	dp_soc_swlm_detach(soc);
5361 	dp_soc_tx_desc_sw_pools_free(soc);
5362 	dp_soc_srng_free(soc);
5363 	dp_hw_link_desc_ring_free(soc);
5364 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
5365 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
5366 	dp_soc_tx_hw_desc_history_detach(soc);
5367 	dp_soc_tx_history_detach(soc);
5368 	dp_soc_rx_history_detach(soc);
5369 	if (soc->mon_vdev_timer_state & MON_VDEV_TIMER_INIT) {
5370 		qdf_timer_free(&soc->mon_vdev_timer);
5371 		soc->mon_vdev_timer_state = 0;
5372 	}
5373 
5374 	qdf_mem_free(soc);
5375 }
5376 
5377 /*
5378  * dp_soc_detach_wifi3() - Detach txrx SOC
5379  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
5380  *
5381  * Return: None
5382  */
5383 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc)
5384 {
5385 	dp_soc_detach(txrx_soc);
5386 }
5387 
5388 #if !defined(DISABLE_MON_CONFIG)
5389 /**
5390  * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings
5391  * @soc: soc handle
5392  * @pdev: physical device handle
5393  * @mac_id: ring number
5394  * @mac_for_pdev: mac_id
5395  *
5396  * Return: non-zero for failure, zero for success
5397  */
5398 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
5399 					struct dp_pdev *pdev,
5400 					int mac_id,
5401 					int mac_for_pdev)
5402 {
5403 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5404 
5405 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
5406 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
5407 					soc->rxdma_mon_buf_ring[mac_id]
5408 					.hal_srng,
5409 					RXDMA_MONITOR_BUF);
5410 
5411 		if (status != QDF_STATUS_SUCCESS) {
5412 			dp_err("Failed to send htt srng setup message for Rxdma mon buf ring");
5413 			return status;
5414 		}
5415 
5416 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
5417 					soc->rxdma_mon_dst_ring[mac_id]
5418 					.hal_srng,
5419 					RXDMA_MONITOR_DST);
5420 
5421 		if (status != QDF_STATUS_SUCCESS) {
5422 			dp_err("Failed to send htt srng setup message for Rxdma mon dst ring");
5423 			return status;
5424 		}
5425 
5426 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
5427 					soc->rxdma_mon_status_ring[mac_id]
5428 					.hal_srng,
5429 					RXDMA_MONITOR_STATUS);
5430 
5431 		if (status != QDF_STATUS_SUCCESS) {
5432 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
5433 			return status;
5434 		}
5435 
5436 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
5437 				soc->rxdma_mon_desc_ring[mac_id]
5438 					.hal_srng,
5439 					RXDMA_MONITOR_DESC);
5440 
5441 		if (status != QDF_STATUS_SUCCESS) {
5442 			dp_err("Failed to send htt srng message for Rxdma mon desc ring");
5443 			return status;
5444 		}
5445 	} else {
5446 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
5447 					soc->rxdma_mon_status_ring[mac_id]
5448 					.hal_srng,
5449 					RXDMA_MONITOR_STATUS);
5450 
5451 		if (status != QDF_STATUS_SUCCESS) {
5452 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
5453 			return status;
5454 		}
5455 	}
5456 
5457 	return status;
5458 
5459 }
5460 #else
5461 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
5462 					struct dp_pdev *pdev,
5463 					int mac_id,
5464 					int mac_for_pdev)
5465 {
5466 	return QDF_STATUS_SUCCESS;
5467 }
5468 #endif
5469 
5470 /*
5471  * dp_rxdma_ring_config() - configure the RX DMA rings
5472  *
5473  * This function is used to configure the MAC rings.
5474  * On MCL host provides buffers in Host2FW ring
5475  * FW refills (copies) buffers to the ring and updates
5476  * ring_idx in register
5477  *
5478  * @soc: data path SoC handle
5479  *
5480  * Return: zero on success, non-zero on failure
5481  */
5482 #ifdef QCA_HOST2FW_RXBUF_RING
5483 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
5484 {
5485 	int i;
5486 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5487 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5488 		struct dp_pdev *pdev = soc->pdev_list[i];
5489 
5490 		if (pdev) {
5491 			int mac_id;
5492 			bool dbs_enable = 0;
5493 			int max_mac_rings =
5494 				 wlan_cfg_get_num_mac_rings
5495 				(pdev->wlan_cfg_ctx);
5496 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
5497 
5498 			htt_srng_setup(soc->htt_handle, 0,
5499 				       soc->rx_refill_buf_ring[lmac_id]
5500 				       .hal_srng,
5501 				       RXDMA_BUF);
5502 
5503 			if (pdev->rx_refill_buf_ring2.hal_srng)
5504 				htt_srng_setup(soc->htt_handle, 0,
5505 					pdev->rx_refill_buf_ring2.hal_srng,
5506 					RXDMA_BUF);
5507 
5508 			if (soc->cdp_soc.ol_ops->
5509 				is_hw_dbs_2x2_capable) {
5510 				dbs_enable = soc->cdp_soc.ol_ops->
5511 					is_hw_dbs_2x2_capable(
5512 							(void *)soc->ctrl_psoc);
5513 			}
5514 
5515 			if (dbs_enable) {
5516 				QDF_TRACE(QDF_MODULE_ID_TXRX,
5517 				QDF_TRACE_LEVEL_ERROR,
5518 				FL("DBS enabled max_mac_rings %d"),
5519 					 max_mac_rings);
5520 			} else {
5521 				max_mac_rings = 1;
5522 				QDF_TRACE(QDF_MODULE_ID_TXRX,
5523 					 QDF_TRACE_LEVEL_ERROR,
5524 					 FL("DBS disabled, max_mac_rings %d"),
5525 					 max_mac_rings);
5526 			}
5527 
5528 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5529 					 FL("pdev_id %d max_mac_rings %d"),
5530 					 pdev->pdev_id, max_mac_rings);
5531 
5532 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
5533 				int mac_for_pdev =
5534 					dp_get_mac_id_for_pdev(mac_id,
5535 							       pdev->pdev_id);
5536 				/*
5537 				 * Obtain lmac id from pdev to access the LMAC
5538 				 * ring in soc context
5539 				 */
5540 				lmac_id =
5541 				dp_get_lmac_id_for_pdev_id(soc,
5542 							   mac_id,
5543 							   pdev->pdev_id);
5544 				QDF_TRACE(QDF_MODULE_ID_TXRX,
5545 					 QDF_TRACE_LEVEL_ERROR,
5546 					 FL("mac_id %d"), mac_for_pdev);
5547 
5548 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
5549 					 pdev->rx_mac_buf_ring[mac_id]
5550 						.hal_srng,
5551 					 RXDMA_BUF);
5552 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
5553 				soc->rxdma_err_dst_ring[lmac_id]
5554 					.hal_srng,
5555 					RXDMA_DST);
5556 
5557 				/* Configure monitor mode rings */
5558 				status = dp_mon_htt_srng_setup(soc, pdev,
5559 							       lmac_id,
5560 							       mac_for_pdev);
5561 				if (status != QDF_STATUS_SUCCESS) {
5562 					dp_err("Failed to send htt monitor messages to target");
5563 					return status;
5564 				}
5565 
5566 			}
5567 		}
5568 	}
5569 
5570 	/*
5571 	 * Timer to reap rxdma status rings.
5572 	 * Needed until we enable ppdu end interrupts
5573 	 */
5574 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
5575 		       dp_mon_reap_timer_handler, (void *)soc,
5576 		       QDF_TIMER_TYPE_WAKE_APPS);
5577 	soc->reap_timer_init = 1;
5578 	qdf_timer_init(soc->osdev, &soc->mon_vdev_timer,
5579 		       dp_mon_vdev_timer, (void *)soc,
5580 		       QDF_TIMER_TYPE_WAKE_APPS);
5581 	soc->mon_vdev_timer_state |= MON_VDEV_TIMER_INIT;
5582 	return status;
5583 }
5584 #else
5585 /* This is only for WIN */
5586 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
5587 {
5588 	int i;
5589 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5590 	int mac_for_pdev;
5591 	int lmac_id;
5592 
5593 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5594 		struct dp_pdev *pdev =  soc->pdev_list[i];
5595 
5596 		if (!pdev)
5597 			continue;
5598 
5599 		mac_for_pdev = i;
5600 		lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
5601 
5602 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
5603 			       soc->rx_refill_buf_ring[lmac_id].
5604 			       hal_srng, RXDMA_BUF);
5605 #ifndef DISABLE_MON_CONFIG
5606 
5607 		if (soc->wlan_cfg_ctx->rxdma1_enable &&
5608 		    wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx)) {
5609 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
5610 				       soc->rxdma_mon_buf_ring[lmac_id].hal_srng,
5611 				       RXDMA_MONITOR_BUF);
5612 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
5613 				       soc->rxdma_mon_dst_ring[lmac_id].hal_srng,
5614 				       RXDMA_MONITOR_DST);
5615 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
5616 				       soc->rxdma_mon_desc_ring[lmac_id].hal_srng,
5617 				       RXDMA_MONITOR_DESC);
5618 		}
5619 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
5620 			       soc->rxdma_mon_status_ring[lmac_id].hal_srng,
5621 			       RXDMA_MONITOR_STATUS);
5622 #endif
5623 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
5624 			       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
5625 			       RXDMA_DST);
5626 	}
5627 
5628 	/* Configure LMAC rings in Polled mode */
5629 	if (soc->lmac_polled_mode) {
5630 		/*
5631 		 * Timer to reap lmac rings.
5632 		 */
5633 		qdf_timer_init(soc->osdev, &soc->lmac_reap_timer,
5634 			       dp_service_lmac_rings, (void *)soc,
5635 			       QDF_TIMER_TYPE_WAKE_APPS);
5636 		soc->lmac_timer_init = 1;
5637 		qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
5638 	}
5639 	return status;
5640 }
5641 #endif
5642 
5643 /*
5644  * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine
5645  *
5646  * This function is used to configure the FSE HW block in RX OLE on a
5647  * per pdev basis. Here, we will be programming parameters related to
5648  * the Flow Search Table.
5649  *
5650  * @soc: data path SoC handle
5651  *
5652  * Return: zero on success, non-zero on failure
5653  */
5654 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
5655 static QDF_STATUS
5656 dp_rx_target_fst_config(struct dp_soc *soc)
5657 {
5658 	int i;
5659 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5660 
5661 	for (i = 0; i < MAX_PDEV_CNT; i++) {
5662 		struct dp_pdev *pdev = soc->pdev_list[i];
5663 
5664 		/* Flow search is not enabled if NSS offload is enabled */
5665 		if (pdev &&
5666 		    !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
5667 			status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev);
5668 			if (status != QDF_STATUS_SUCCESS)
5669 				break;
5670 		}
5671 	}
5672 	return status;
5673 }
5674 #elif defined(WLAN_SUPPORT_RX_FISA)
5675 /**
5676  * dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW
5677  * @soc: SoC handle
5678  *
5679  * Return: Success
5680  */
5681 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
5682 {
5683 	/* Check if it is enabled in the INI */
5684 	if (!soc->fisa_enable) {
5685 		dp_err("RX FISA feature is disabled");
5686 		return QDF_STATUS_E_NOSUPPORT;
5687 	}
5688 
5689 	return dp_rx_flow_send_fst_fw_setup(soc, soc->pdev_list[0]);
5690 }
5691 
5692 #define FISA_MAX_TIMEOUT 0xffffffff
5693 #define FISA_DISABLE_TIMEOUT 0
5694 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
5695 {
5696 	struct dp_htt_rx_fisa_cfg fisa_config;
5697 
5698 	fisa_config.pdev_id = 0;
5699 	fisa_config.fisa_timeout = FISA_MAX_TIMEOUT;
5700 
5701 	return dp_htt_rx_fisa_config(soc->pdev_list[0], &fisa_config);
5702 }
5703 #else /* !WLAN_SUPPORT_RX_FISA */
5704 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
5705 {
5706 	return QDF_STATUS_SUCCESS;
5707 }
5708 #endif /* !WLAN_SUPPORT_RX_FISA */
5709 
5710 #ifndef WLAN_SUPPORT_RX_FISA
5711 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
5712 {
5713 	return QDF_STATUS_SUCCESS;
5714 }
5715 
5716 static QDF_STATUS dp_rx_dump_fisa_stats(struct dp_soc *soc)
5717 {
5718 	return QDF_STATUS_SUCCESS;
5719 }
5720 
5721 static void dp_rx_dump_fisa_table(struct dp_soc *soc)
5722 {
5723 }
5724 
5725 static void dp_suspend_fse_cache_flush(struct dp_soc *soc)
5726 {
5727 }
5728 
5729 static void dp_resume_fse_cache_flush(struct dp_soc *soc)
5730 {
5731 }
5732 #endif /* !WLAN_SUPPORT_RX_FISA */
5733 
5734 #ifndef WLAN_DP_FEATURE_SW_LATENCY_MGR
5735 static inline QDF_STATUS dp_print_swlm_stats(struct dp_soc *soc)
5736 {
5737 	return QDF_STATUS_SUCCESS;
5738 }
5739 #endif /* !WLAN_DP_FEATURE_SW_LATENCY_MGR */
5740 
5741 /*
5742  * dp_soc_attach_target_wifi3() - SOC initialization in the target
5743  * @cdp_soc: Opaque Datapath SOC handle
5744  *
5745  * Return: zero on success, non-zero on failure
5746  */
5747 static QDF_STATUS
5748 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
5749 {
5750 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
5751 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5752 
5753 	htt_soc_attach_target(soc->htt_handle);
5754 
5755 	status = dp_rxdma_ring_config(soc);
5756 	if (status != QDF_STATUS_SUCCESS) {
5757 		dp_err("Failed to send htt srng setup messages to target");
5758 		return status;
5759 	}
5760 
5761 	status = soc->arch_ops.dp_rxdma_ring_sel_cfg(soc);
5762 	if (status != QDF_STATUS_SUCCESS) {
5763 		dp_err("Failed to send htt ring config message to target");
5764 		return status;
5765 	}
5766 
5767 	status = dp_rx_target_fst_config(soc);
5768 	if (status != QDF_STATUS_SUCCESS &&
5769 	    status != QDF_STATUS_E_NOSUPPORT) {
5770 		dp_err("Failed to send htt fst setup config message to target");
5771 		return status;
5772 	}
5773 
5774 	if (status == QDF_STATUS_SUCCESS) {
5775 		status = dp_rx_fisa_config(soc);
5776 		if (status != QDF_STATUS_SUCCESS) {
5777 			dp_err("Failed to send htt FISA config message to target");
5778 			return status;
5779 		}
5780 	}
5781 
5782 	DP_STATS_INIT(soc);
5783 
5784 	dp_runtime_init(soc);
5785 
5786 	/* initialize work queue for stats processing */
5787 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
5788 
5789 	return QDF_STATUS_SUCCESS;
5790 }
5791 
5792 #ifdef QCA_SUPPORT_FULL_MON
5793 static inline QDF_STATUS
5794 dp_soc_config_full_mon_mode(struct dp_pdev *pdev, enum dp_full_mon_config val)
5795 {
5796 	struct dp_soc *soc = pdev->soc;
5797 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5798 
5799 	if (!soc->full_mon_mode)
5800 		return QDF_STATUS_SUCCESS;
5801 
5802 	if ((htt_h2t_full_mon_cfg(soc->htt_handle,
5803 				  pdev->pdev_id,
5804 				  val)) != QDF_STATUS_SUCCESS) {
5805 		status = QDF_STATUS_E_FAILURE;
5806 	}
5807 
5808 	return status;
5809 }
5810 #else
5811 static inline QDF_STATUS
5812 dp_soc_config_full_mon_mode(struct dp_pdev *pdev, enum dp_full_mon_config val)
5813 {
5814 	return 0;
5815 }
5816 #endif
5817 
5818 /*
5819  * dp_vdev_id_map_tbl_add() - Add vdev into vdev_id table
5820  * @soc: SoC handle
5821  * @vdev: vdev handle
5822  * @vdev_id: vdev_id
5823  *
5824  * Return: None
5825  */
5826 static void dp_vdev_id_map_tbl_add(struct dp_soc *soc,
5827 				   struct dp_vdev *vdev,
5828 				   uint8_t vdev_id)
5829 {
5830 	QDF_ASSERT(vdev_id <= MAX_VDEV_CNT);
5831 
5832 	qdf_spin_lock_bh(&soc->vdev_map_lock);
5833 
5834 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
5835 			QDF_STATUS_SUCCESS) {
5836 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK vdev_id %u",
5837 			     soc, vdev, vdev_id);
5838 		qdf_spin_unlock_bh(&soc->vdev_map_lock);
5839 		return;
5840 	}
5841 
5842 	if (!soc->vdev_id_map[vdev_id])
5843 		soc->vdev_id_map[vdev_id] = vdev;
5844 	else
5845 		QDF_ASSERT(0);
5846 
5847 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
5848 }
5849 
5850 /*
5851  * dp_vdev_id_map_tbl_remove() - remove vdev from vdev_id table
5852  * @soc: SoC handle
5853  * @vdev: vdev handle
5854  *
5855  * Return: None
5856  */
5857 static void dp_vdev_id_map_tbl_remove(struct dp_soc *soc,
5858 				      struct dp_vdev *vdev)
5859 {
5860 	qdf_spin_lock_bh(&soc->vdev_map_lock);
5861 	QDF_ASSERT(soc->vdev_id_map[vdev->vdev_id] == vdev);
5862 
5863 	soc->vdev_id_map[vdev->vdev_id] = NULL;
5864 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
5865 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
5866 }
5867 
5868 /*
5869  * dp_vdev_pdev_list_add() - add vdev into pdev's list
5870  * @soc: soc handle
5871  * @pdev: pdev handle
5872  * @vdev: vdev handle
5873  *
5874  * return: none
5875  */
5876 static void dp_vdev_pdev_list_add(struct dp_soc *soc,
5877 				  struct dp_pdev *pdev,
5878 				  struct dp_vdev *vdev)
5879 {
5880 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5881 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
5882 			QDF_STATUS_SUCCESS) {
5883 		dp_vdev_info("%pK: unable to get vdev reference at MAP vdev %pK",
5884 			     soc, vdev);
5885 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5886 		return;
5887 	}
5888 	/* add this vdev into the pdev's list */
5889 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
5890 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5891 }
5892 
5893 /*
5894  * dp_vdev_pdev_list_remove() - remove vdev from pdev's list
5895  * @soc: SoC handle
5896  * @pdev: pdev handle
5897  * @vdev: VDEV handle
5898  *
5899  * Return: none
5900  */
5901 static void dp_vdev_pdev_list_remove(struct dp_soc *soc,
5902 				     struct dp_pdev *pdev,
5903 				     struct dp_vdev *vdev)
5904 {
5905 	uint8_t found = 0;
5906 	struct dp_vdev *tmpvdev = NULL;
5907 
5908 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5909 	TAILQ_FOREACH(tmpvdev, &pdev->vdev_list, vdev_list_elem) {
5910 		if (tmpvdev == vdev) {
5911 			found = 1;
5912 			break;
5913 		}
5914 	}
5915 
5916 	if (found) {
5917 		TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
5918 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
5919 	} else {
5920 		dp_vdev_debug("%pK: vdev:%pK not found in pdev:%pK vdevlist:%pK",
5921 			      soc, vdev, pdev, &pdev->vdev_list);
5922 		QDF_ASSERT(0);
5923 	}
5924 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5925 }
5926 
5927 /*
5928 * dp_vdev_attach_wifi3() - attach txrx vdev
5929 * @txrx_pdev: Datapath PDEV handle
5930 * @vdev_mac_addr: MAC address of the virtual interface
5931 * @vdev_id: VDEV Id
5932 * @wlan_op_mode: VDEV operating mode
5933 * @subtype: VDEV operating subtype
5934 *
5935 * Return: status
5936 */
5937 static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc,
5938 				       uint8_t pdev_id,
5939 				       uint8_t *vdev_mac_addr,
5940 				       uint8_t vdev_id,
5941 				       enum wlan_op_mode op_mode,
5942 				       enum wlan_op_subtype subtype)
5943 {
5944 	int i = 0;
5945 	qdf_size_t vdev_context_size;
5946 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
5947 	struct dp_pdev *pdev =
5948 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
5949 						   pdev_id);
5950 	struct dp_vdev *vdev;
5951 
5952 	vdev_context_size =
5953 		soc->arch_ops.txrx_get_context_size(DP_CONTEXT_TYPE_VDEV);
5954 	vdev = qdf_mem_malloc(vdev_context_size);
5955 
5956 	if (!pdev) {
5957 		dp_init_err("%pK: DP PDEV is Null for pdev id %d",
5958 			    cdp_soc, pdev_id);
5959 		qdf_mem_free(vdev);
5960 		goto fail0;
5961 	}
5962 
5963 	if (!vdev) {
5964 		dp_init_err("%pK: DP VDEV memory allocation failed",
5965 			    cdp_soc);
5966 		goto fail0;
5967 	}
5968 
5969 	wlan_minidump_log(vdev, sizeof(*vdev), soc->ctrl_psoc,
5970 			  WLAN_MD_DP_VDEV, "dp_vdev");
5971 
5972 	vdev->pdev = pdev;
5973 	vdev->vdev_id = vdev_id;
5974 	vdev->opmode = op_mode;
5975 	vdev->subtype = subtype;
5976 	vdev->osdev = soc->osdev;
5977 
5978 	vdev->osif_rx = NULL;
5979 	vdev->osif_rsim_rx_decap = NULL;
5980 	vdev->osif_get_key = NULL;
5981 	vdev->osif_rx_mon = NULL;
5982 	vdev->osif_tx_free_ext = NULL;
5983 	vdev->osif_vdev = NULL;
5984 
5985 	vdev->delete.pending = 0;
5986 	vdev->safemode = 0;
5987 	vdev->drop_unenc = 1;
5988 	vdev->sec_type = cdp_sec_type_none;
5989 	vdev->multipass_en = false;
5990 	qdf_atomic_init(&vdev->ref_cnt);
5991 	for (i = 0; i < DP_MOD_ID_MAX; i++)
5992 		qdf_atomic_init(&vdev->mod_refs[i]);
5993 
5994 	/* Take one reference for create*/
5995 	qdf_atomic_inc(&vdev->ref_cnt);
5996 	qdf_atomic_inc(&vdev->mod_refs[DP_MOD_ID_CONFIG]);
5997 	vdev->num_peers = 0;
5998 #ifdef notyet
5999 	vdev->filters_num = 0;
6000 #endif
6001 	vdev->lmac_id = pdev->lmac_id;
6002 
6003 	qdf_mem_copy(
6004 		&vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
6005 
6006 	/* TODO: Initialize default HTT meta data that will be used in
6007 	 * TCL descriptors for packets transmitted from this VDEV
6008 	 */
6009 
6010 	qdf_spinlock_create(&vdev->peer_list_lock);
6011 	TAILQ_INIT(&vdev->peer_list);
6012 	dp_peer_multipass_list_init(vdev);
6013 	if ((soc->intr_mode == DP_INTR_POLL) &&
6014 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
6015 		if ((pdev->vdev_count == 0) ||
6016 		    (wlan_op_mode_monitor == vdev->opmode))
6017 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
6018 	} else if (soc->intr_mode == DP_INTR_MSI &&
6019 		   wlan_op_mode_monitor == vdev->opmode &&
6020 		   soc->mon_vdev_timer_state & MON_VDEV_TIMER_INIT) {
6021 		qdf_timer_mod(&soc->mon_vdev_timer, DP_INTR_POLL_TIMER_MS);
6022 		soc->mon_vdev_timer_state |= MON_VDEV_TIMER_RUNNING;
6023 	}
6024 
6025 	dp_vdev_id_map_tbl_add(soc, vdev, vdev_id);
6026 
6027 	if (wlan_op_mode_monitor == vdev->opmode) {
6028 		dp_vdev_set_monitor_mode_buf_rings(pdev);
6029 		pdev->monitor_vdev = vdev;
6030 		return QDF_STATUS_SUCCESS;
6031 	}
6032 
6033 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
6034 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
6035 	vdev->dscp_tid_map_id = 0;
6036 	vdev->mcast_enhancement_en = 0;
6037 	vdev->igmp_mcast_enhanc_en = 0;
6038 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
6039 	vdev->prev_tx_enq_tstamp = 0;
6040 	vdev->prev_rx_deliver_tstamp = 0;
6041 	vdev->skip_sw_tid_classification = DP_TX_HW_DSCP_TID_MAP_VALID;
6042 
6043 	dp_vdev_pdev_list_add(soc, pdev, vdev);
6044 	pdev->vdev_count++;
6045 
6046 	if (wlan_op_mode_sta != vdev->opmode)
6047 		vdev->ap_bridge_enabled = true;
6048 	else
6049 		vdev->ap_bridge_enabled = false;
6050 	dp_init_info("%pK: wlan_cfg_ap_bridge_enabled %d",
6051 		     cdp_soc, vdev->ap_bridge_enabled);
6052 
6053 	dp_tx_vdev_attach(vdev);
6054 
6055 	if (!pdev->is_lro_hash_configured) {
6056 		if (QDF_IS_STATUS_SUCCESS(dp_lro_hash_setup(soc, pdev)))
6057 			pdev->is_lro_hash_configured = true;
6058 		else
6059 			dp_err("LRO hash setup failure!");
6060 	}
6061 
6062 	dp_info("Created vdev %pK ("QDF_MAC_ADDR_FMT")", vdev,
6063 		QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
6064 	DP_STATS_INIT(vdev);
6065 
6066 	if (QDF_IS_STATUS_ERROR(soc->arch_ops.txrx_vdev_attach(soc, vdev)))
6067 		goto fail0;
6068 
6069 	if (wlan_op_mode_sta == vdev->opmode)
6070 		dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id,
6071 				     vdev->mac_addr.raw);
6072 	return QDF_STATUS_SUCCESS;
6073 
6074 fail0:
6075 	return QDF_STATUS_E_FAILURE;
6076 }
6077 
6078 #ifndef QCA_HOST_MODE_WIFI_DISABLED
6079 /**
6080  * dp_vdev_register_tx_handler() - Register Tx handler
6081  * @vdev: struct dp_vdev *
6082  * @soc: struct dp_soc *
6083  * @txrx_ops: struct ol_txrx_ops *
6084  */
6085 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
6086 					       struct dp_soc *soc,
6087 					       struct ol_txrx_ops *txrx_ops)
6088 {
6089 	/* Enable vdev_id check only for ap, if flag is enabled */
6090 	if (vdev->mesh_vdev)
6091 		txrx_ops->tx.tx = dp_tx_send_mesh;
6092 	else if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
6093 		 (vdev->opmode == wlan_op_mode_ap))
6094 		txrx_ops->tx.tx = dp_tx_send_vdev_id_check;
6095 	else
6096 		txrx_ops->tx.tx = dp_tx_send;
6097 
6098 	/* Avoid check in regular exception Path */
6099 	if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
6100 	    (vdev->opmode == wlan_op_mode_ap))
6101 		txrx_ops->tx.tx_exception = dp_tx_send_exception_vdev_id_check;
6102 	else
6103 		txrx_ops->tx.tx_exception = dp_tx_send_exception;
6104 
6105 	dp_info("Configure tx_vdev_id_chk_handler Feature Flag: %d and mode:%d for vdev_id:%d",
6106 		wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx),
6107 		vdev->opmode, vdev->vdev_id);
6108 }
6109 #else /* QCA_HOST_MODE_WIFI_DISABLED */
6110 static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
6111 					       struct dp_soc *soc,
6112 					       struct ol_txrx_ops *txrx_ops)
6113 {
6114 }
6115 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
6116 
6117 /**
6118  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
6119  * @soc: Datapath soc handle
6120  * @vdev_id: id of Datapath VDEV handle
6121  * @osif_vdev: OSIF vdev handle
6122  * @txrx_ops: Tx and Rx operations
6123  *
6124  * Return: DP VDEV handle on success, NULL on failure
6125  */
6126 static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc_hdl,
6127 					 uint8_t vdev_id,
6128 					 ol_osif_vdev_handle osif_vdev,
6129 					 struct ol_txrx_ops *txrx_ops)
6130 {
6131 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6132 	struct dp_vdev *vdev =	dp_vdev_get_ref_by_id(soc, vdev_id,
6133 						      DP_MOD_ID_CDP);
6134 
6135 	if (!vdev)
6136 		return QDF_STATUS_E_FAILURE;
6137 
6138 	vdev->osif_vdev = osif_vdev;
6139 	vdev->osif_rx = txrx_ops->rx.rx;
6140 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
6141 	vdev->osif_rx_flush = txrx_ops->rx.rx_flush;
6142 	vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush;
6143 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
6144 	vdev->osif_fisa_rx = txrx_ops->rx.osif_fisa_rx;
6145 	vdev->osif_fisa_flush = txrx_ops->rx.osif_fisa_flush;
6146 	vdev->osif_get_key = txrx_ops->get_key;
6147 	vdev->osif_rx_mon = txrx_ops->rx.mon;
6148 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
6149 	vdev->tx_comp = txrx_ops->tx.tx_comp;
6150 	vdev->stats_cb = txrx_ops->rx.stats_rx;
6151 #ifdef notyet
6152 #if ATH_SUPPORT_WAPI
6153 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
6154 #endif
6155 #endif
6156 #ifdef UMAC_SUPPORT_PROXY_ARP
6157 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
6158 #endif
6159 	vdev->me_convert = txrx_ops->me_convert;
6160 
6161 	dp_vdev_register_tx_handler(vdev, soc, txrx_ops);
6162 
6163 	dp_init_info("%pK: DP Vdev Register success", soc);
6164 
6165 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6166 	return QDF_STATUS_SUCCESS;
6167 }
6168 
6169 /**
6170  * dp_peer_delete() - delete DP peer
6171  *
6172  * @soc: Datatpath soc
6173  * @peer: Datapath peer
6174  * @arg: argument to iter function
6175  *
6176  * Return: void
6177  */
6178 static void
6179 dp_peer_delete(struct dp_soc *soc,
6180 	       struct dp_peer *peer,
6181 	       void *arg)
6182 {
6183 	if (!peer->valid)
6184 		return;
6185 
6186 	dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
6187 			     peer->vdev->vdev_id,
6188 			     peer->mac_addr.raw, 0);
6189 }
6190 
6191 /**
6192  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
6193  * @vdev: Datapath VDEV handle
6194  * @unmap_only: Flag to indicate "only unmap"
6195  *
6196  * Return: void
6197  */
6198 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
6199 {
6200 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6201 	struct dp_pdev *pdev = vdev->pdev;
6202 	struct dp_soc *soc = pdev->soc;
6203 	struct dp_peer *peer;
6204 	uint32_t i = 0;
6205 
6206 
6207 	if (!unmap_only)
6208 		dp_vdev_iterate_peer_lock_safe(vdev, dp_peer_delete, NULL,
6209 					       DP_MOD_ID_CDP);
6210 
6211 	for (i = 0; i < soc->max_peers ; i++) {
6212 		peer = __dp_peer_get_ref_by_id(soc, i, DP_MOD_ID_CDP);
6213 
6214 		if (!peer)
6215 			continue;
6216 
6217 		if (peer->vdev != vdev) {
6218 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6219 			continue;
6220 		}
6221 
6222 		dp_info("peer: "QDF_MAC_ADDR_FMT" is getting unmap",
6223 			QDF_MAC_ADDR_REF(peer->mac_addr.raw));
6224 
6225 		dp_rx_peer_unmap_handler(soc, i,
6226 					 vdev->vdev_id,
6227 					 peer->mac_addr.raw, 0,
6228 					 DP_PEER_WDS_COUNT_INVALID);
6229 		SET_PEER_REF_CNT_ONE(peer);
6230 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6231 	}
6232 
6233 }
6234 
6235 /*
6236  * dp_vdev_detach_wifi3() - Detach txrx vdev
6237  * @cdp_soc: Datapath soc handle
6238  * @vdev_id: VDEV Id
6239  * @callback: Callback OL_IF on completion of detach
6240  * @cb_context:	Callback context
6241  *
6242  */
6243 static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc,
6244 				       uint8_t vdev_id,
6245 				       ol_txrx_vdev_delete_cb callback,
6246 				       void *cb_context)
6247 {
6248 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
6249 	struct dp_pdev *pdev;
6250 	struct dp_neighbour_peer *peer = NULL;
6251 	struct dp_neighbour_peer *temp_peer = NULL;
6252 	struct dp_peer *vap_self_peer = NULL;
6253 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
6254 						     DP_MOD_ID_CDP);
6255 
6256 	if (!vdev)
6257 		return QDF_STATUS_E_FAILURE;
6258 
6259 	soc->arch_ops.txrx_vdev_detach(soc, vdev);
6260 
6261 	pdev = vdev->pdev;
6262 
6263 	vap_self_peer = dp_sta_vdev_self_peer_ref_n_get(soc, vdev,
6264 							DP_MOD_ID_CONFIG);
6265 	if (vap_self_peer) {
6266 		qdf_spin_lock_bh(&soc->ast_lock);
6267 		if (vap_self_peer->self_ast_entry) {
6268 			dp_peer_del_ast(soc, vap_self_peer->self_ast_entry);
6269 			vap_self_peer->self_ast_entry = NULL;
6270 		}
6271 		qdf_spin_unlock_bh(&soc->ast_lock);
6272 
6273 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc, vdev->vdev_id,
6274 				     vap_self_peer->mac_addr.raw, 0);
6275 		dp_peer_unref_delete(vap_self_peer, DP_MOD_ID_CONFIG);
6276 	}
6277 
6278 	/*
6279 	 * If Target is hung, flush all peers before detaching vdev
6280 	 * this will free all references held due to missing
6281 	 * unmap commands from Target
6282 	 */
6283 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
6284 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, false);
6285 	else if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
6286 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, true);
6287 
6288 	dp_rx_vdev_detach(vdev);
6289 	/*
6290 	 * move it after dp_rx_vdev_detach(),
6291 	 * as the call back done in dp_rx_vdev_detach()
6292 	 * still need to get vdev pointer by vdev_id.
6293 	 */
6294 	dp_vdev_id_map_tbl_remove(soc, vdev);
6295 
6296 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
6297 	if (!soc->hw_nac_monitor_support) {
6298 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
6299 			      neighbour_peer_list_elem) {
6300 			QDF_ASSERT(peer->vdev != vdev);
6301 		}
6302 	} else {
6303 		TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
6304 				   neighbour_peer_list_elem, temp_peer) {
6305 			if (peer->vdev == vdev) {
6306 				TAILQ_REMOVE(&pdev->neighbour_peers_list, peer,
6307 					     neighbour_peer_list_elem);
6308 				qdf_mem_free(peer);
6309 			}
6310 		}
6311 	}
6312 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
6313 
6314 	dp_tx_vdev_multipass_deinit(vdev);
6315 
6316 	if (vdev->vdev_dp_ext_handle) {
6317 		qdf_mem_free(vdev->vdev_dp_ext_handle);
6318 		vdev->vdev_dp_ext_handle = NULL;
6319 	}
6320 	/* indicate that the vdev needs to be deleted */
6321 	vdev->delete.pending = 1;
6322 	vdev->delete.callback = callback;
6323 	vdev->delete.context = cb_context;
6324 
6325 	if (vdev->opmode != wlan_op_mode_monitor)
6326 		dp_vdev_pdev_list_remove(soc, pdev, vdev);
6327 
6328 	pdev->vdev_count--;
6329 	/* release reference taken above for find */
6330 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6331 
6332 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
6333 	TAILQ_INSERT_TAIL(&soc->inactive_vdev_list, vdev, inactive_list_elem);
6334 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
6335 
6336 	/* release reference taken at dp_vdev_create */
6337 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
6338 
6339 	return QDF_STATUS_SUCCESS;
6340 }
6341 
6342 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
6343 						uint8_t *peer_mac_addr)
6344 {
6345 	struct dp_peer *peer;
6346 	struct dp_soc *soc = vdev->pdev->soc;
6347 
6348 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
6349 	TAILQ_FOREACH(peer, &soc->inactive_peer_list,
6350 		      inactive_list_elem) {
6351 
6352 		/* reuse bss peer only when vdev matches*/
6353 		if (peer->bss_peer && (peer->vdev == vdev) &&
6354 		    qdf_mem_cmp(peer_mac_addr, peer->mac_addr.raw,
6355 				QDF_MAC_ADDR_SIZE) == 0) {
6356 			/* increment ref count for cdp_peer_create*/
6357 			if (dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG) ==
6358 						QDF_STATUS_SUCCESS) {
6359 				TAILQ_REMOVE(&soc->inactive_peer_list, peer,
6360 					     inactive_list_elem);
6361 				qdf_spin_unlock_bh
6362 					(&soc->inactive_peer_list_lock);
6363 				return peer;
6364 			}
6365 		}
6366 	}
6367 
6368 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
6369 	return NULL;
6370 }
6371 
6372 #ifdef FEATURE_AST
6373 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
6374 					       struct dp_pdev *pdev,
6375 					       uint8_t *peer_mac_addr)
6376 {
6377 	struct dp_ast_entry *ast_entry;
6378 
6379 	qdf_spin_lock_bh(&soc->ast_lock);
6380 	if (soc->ast_override_support)
6381 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
6382 							    pdev->pdev_id);
6383 	else
6384 		ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
6385 
6386 	if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress)
6387 		dp_peer_del_ast(soc, ast_entry);
6388 
6389 	qdf_spin_unlock_bh(&soc->ast_lock);
6390 }
6391 #endif
6392 
6393 #ifdef PEER_CACHE_RX_PKTS
6394 static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
6395 {
6396 	qdf_spinlock_create(&peer->bufq_info.bufq_lock);
6397 	peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
6398 	qdf_list_create(&peer->bufq_info.cached_bufq, DP_RX_CACHED_BUFQ_THRESH);
6399 }
6400 #else
6401 static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
6402 {
6403 }
6404 #endif
6405 
6406 /*
6407  * dp_peer_create_wifi3() - attach txrx peer
6408  * @soc_hdl: Datapath soc handle
6409  * @vdev_id: id of vdev
6410  * @peer_mac_addr: Peer MAC address
6411  *
6412  * Return: 0 on success, -1 on failure
6413  */
6414 static QDF_STATUS
6415 dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
6416 		     uint8_t *peer_mac_addr)
6417 {
6418 	struct dp_peer *peer;
6419 	int i;
6420 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6421 	struct dp_pdev *pdev;
6422 	struct cdp_peer_cookie peer_cookie;
6423 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
6424 	struct dp_vdev *vdev = NULL;
6425 
6426 	if (!peer_mac_addr)
6427 		return QDF_STATUS_E_FAILURE;
6428 
6429 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
6430 
6431 	if (!vdev)
6432 		return QDF_STATUS_E_FAILURE;
6433 
6434 	pdev = vdev->pdev;
6435 	soc = pdev->soc;
6436 
6437 	/*
6438 	 * If a peer entry with given MAC address already exists,
6439 	 * reuse the peer and reset the state of peer.
6440 	 */
6441 	peer = dp_peer_can_reuse(vdev, peer_mac_addr);
6442 
6443 	if (peer) {
6444 		dp_peer_vdev_list_add(soc, vdev, peer);
6445 
6446 		dp_peer_find_hash_add(soc, peer);
6447 		qdf_atomic_init(&peer->is_default_route_set);
6448 		dp_peer_cleanup(vdev, peer);
6449 
6450 		for (i = 0; i < DP_MAX_TIDS; i++)
6451 			qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
6452 
6453 		qdf_spin_lock_bh(&soc->ast_lock);
6454 		dp_peer_delete_ast_entries(soc, peer);
6455 		qdf_spin_unlock_bh(&soc->ast_lock);
6456 
6457 		if ((vdev->opmode == wlan_op_mode_sta) &&
6458 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
6459 		     QDF_MAC_ADDR_SIZE)) {
6460 			ast_type = CDP_TXRX_AST_TYPE_SELF;
6461 		}
6462 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
6463 
6464 		peer->valid = 1;
6465 		dp_local_peer_id_alloc(pdev, peer);
6466 
6467 		qdf_spinlock_create(&peer->peer_info_lock);
6468 		dp_peer_rx_bufq_resources_init(peer);
6469 
6470 		DP_STATS_INIT(peer);
6471 		DP_STATS_UPD(peer, rx.avg_snr, CDP_INVALID_SNR);
6472 
6473 		/*
6474 		 * In tx_monitor mode, filter may be set for unassociated peer
6475 		 * when unassociated peer get associated peer need to
6476 		 * update tx_cap_enabled flag to support peer filter.
6477 		 */
6478 		dp_peer_tx_capture_filter_check(pdev, peer);
6479 
6480 		dp_set_peer_isolation(peer, false);
6481 
6482 		dp_wds_ext_peer_init(peer);
6483 
6484 		dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
6485 
6486 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6487 		return QDF_STATUS_SUCCESS;
6488 	} else {
6489 		/*
6490 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
6491 		 * need to remove the AST entry which was earlier added as a WDS
6492 		 * entry.
6493 		 * If an AST entry exists, but no peer entry exists with a given
6494 		 * MAC addresses, we could deduce it as a WDS entry
6495 		 */
6496 		dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
6497 	}
6498 
6499 #ifdef notyet
6500 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
6501 		soc->mempool_ol_ath_peer);
6502 #else
6503 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
6504 #endif
6505 	wlan_minidump_log(peer,
6506 			  sizeof(*peer),
6507 			  soc->ctrl_psoc,
6508 			  WLAN_MD_DP_PEER, "dp_peer");
6509 	if (!peer) {
6510 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6511 		return QDF_STATUS_E_FAILURE; /* failure */
6512 	}
6513 
6514 	qdf_mem_zero(peer, sizeof(struct dp_peer));
6515 
6516 	TAILQ_INIT(&peer->ast_entry_list);
6517 
6518 	/* store provided params */
6519 	peer->vdev = vdev;
6520 	/* get the vdev reference for new peer */
6521 	dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CHILD);
6522 
6523 	if ((vdev->opmode == wlan_op_mode_sta) &&
6524 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
6525 			 QDF_MAC_ADDR_SIZE)) {
6526 		ast_type = CDP_TXRX_AST_TYPE_SELF;
6527 	}
6528 	qdf_spinlock_create(&peer->peer_state_lock);
6529 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
6530 	qdf_spinlock_create(&peer->peer_info_lock);
6531 	dp_wds_ext_peer_init(peer);
6532 
6533 	dp_peer_rx_bufq_resources_init(peer);
6534 
6535 	qdf_mem_copy(
6536 		&peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
6537 
6538 	/* initialize the peer_id */
6539 	peer->peer_id = HTT_INVALID_PEER;
6540 
6541 	/* reset the ast index to flowid table */
6542 	dp_peer_reset_flowq_map(peer);
6543 
6544 	qdf_atomic_init(&peer->ref_cnt);
6545 
6546 	for (i = 0; i < DP_MOD_ID_MAX; i++)
6547 		qdf_atomic_init(&peer->mod_refs[i]);
6548 
6549 	/* keep one reference for attach */
6550 	qdf_atomic_inc(&peer->ref_cnt);
6551 	qdf_atomic_inc(&peer->mod_refs[DP_MOD_ID_CONFIG]);
6552 
6553 	dp_peer_vdev_list_add(soc, vdev, peer);
6554 
6555 	/* TODO: See if hash based search is required */
6556 	dp_peer_find_hash_add(soc, peer);
6557 
6558 	/* Initialize the peer state */
6559 	peer->state = OL_TXRX_PEER_STATE_DISC;
6560 
6561 	dp_info("vdev %pK created peer %pK ("QDF_MAC_ADDR_FMT") ref_cnt: %d",
6562 		vdev, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
6563 		qdf_atomic_read(&peer->ref_cnt));
6564 	/*
6565 	 * For every peer MAp message search and set if bss_peer
6566 	 */
6567 	if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
6568 			QDF_MAC_ADDR_SIZE) == 0 &&
6569 			(wlan_op_mode_sta != vdev->opmode)) {
6570 		dp_info("vdev bss_peer!!");
6571 		peer->bss_peer = 1;
6572 	}
6573 
6574 	if (wlan_op_mode_sta == vdev->opmode &&
6575 	    qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
6576 			QDF_MAC_ADDR_SIZE) == 0) {
6577 		peer->sta_self_peer = 1;
6578 	}
6579 
6580 	for (i = 0; i < DP_MAX_TIDS; i++)
6581 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
6582 
6583 	peer->valid = 1;
6584 	dp_local_peer_id_alloc(pdev, peer);
6585 	DP_STATS_INIT(peer);
6586 	DP_STATS_UPD(peer, rx.avg_snr, CDP_INVALID_SNR);
6587 
6588 	qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
6589 		     QDF_MAC_ADDR_SIZE);
6590 	peer_cookie.ctx = NULL;
6591 	peer_cookie.pdev_id = pdev->pdev_id;
6592 	peer_cookie.cookie = pdev->next_peer_cookie++;
6593 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6594 	dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, pdev->soc,
6595 			     (void *)&peer_cookie,
6596 			     peer->peer_id, WDI_NO_VAL, pdev->pdev_id);
6597 #endif
6598 	if (soc->rdkstats_enabled) {
6599 		if (!peer_cookie.ctx) {
6600 			pdev->next_peer_cookie--;
6601 			qdf_err("Failed to initialize peer rate stats");
6602 		} else {
6603 			peer->rdkstats_ctx = (struct cdp_peer_rate_stats_ctx *)
6604 						peer_cookie.ctx;
6605 		}
6606 	}
6607 
6608 	/*
6609 	 * Allocate peer extended stats context. Fall through in
6610 	 * case of failure as its not an implicit requirement to have
6611 	 * this object for regular statistics updates.
6612 	 */
6613 	if (dp_peer_ext_stats_ctx_alloc(soc, peer) !=
6614 			QDF_STATUS_SUCCESS)
6615 		dp_warn("peer ext_stats ctx alloc failed");
6616 
6617 	/*
6618 	 * In tx_monitor mode, filter may be set for unassociated peer
6619 	 * when unassociated peer get associated peer need to
6620 	 * update tx_cap_enabled flag to support peer filter.
6621 	 */
6622 	dp_peer_tx_capture_filter_check(pdev, peer);
6623 
6624 	dp_set_peer_isolation(peer, false);
6625 
6626 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
6627 
6628 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6629 
6630 	return QDF_STATUS_SUCCESS;
6631 }
6632 
6633 /*
6634  * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
6635  * @vdev: Datapath VDEV handle
6636  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
6637  * @hash_based: pointer to hash value (enabled/disabled) to be populated
6638  *
6639  * Return: None
6640  */
6641 static
6642 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
6643 				  enum cdp_host_reo_dest_ring *reo_dest,
6644 				  bool *hash_based)
6645 {
6646 	struct dp_soc *soc;
6647 	struct dp_pdev *pdev;
6648 
6649 	pdev = vdev->pdev;
6650 	soc = pdev->soc;
6651 	/*
6652 	 * hash based steering is disabled for Radios which are offloaded
6653 	 * to NSS
6654 	 */
6655 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
6656 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
6657 
6658 	/*
6659 	 * Below line of code will ensure the proper reo_dest ring is chosen
6660 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
6661 	 */
6662 	*reo_dest = pdev->reo_dest;
6663 }
6664 
6665 #ifdef IPA_OFFLOAD
6666 /**
6667  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
6668  * @vdev: Virtual device
6669  *
6670  * Return: true if the vdev is of subtype P2P
6671  *	   false if the vdev is of any other subtype
6672  */
6673 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
6674 {
6675 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
6676 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
6677 	    vdev->subtype == wlan_op_subtype_p2p_go)
6678 		return true;
6679 
6680 	return false;
6681 }
6682 
6683 /*
6684  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
6685  * @vdev: Datapath VDEV handle
6686  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
6687  * @hash_based: pointer to hash value (enabled/disabled) to be populated
6688  *
6689  * If IPA is enabled in ini, for SAP mode, disable hash based
6690  * steering, use default reo_dst ring for RX. Use config values for other modes.
6691  * Return: None
6692  */
6693 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
6694 				       enum cdp_host_reo_dest_ring *reo_dest,
6695 				       bool *hash_based)
6696 {
6697 	struct dp_soc *soc;
6698 	struct dp_pdev *pdev;
6699 
6700 	pdev = vdev->pdev;
6701 	soc = pdev->soc;
6702 
6703 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
6704 
6705 	/* For P2P-GO interfaces we do not need to change the REO
6706 	 * configuration even if IPA config is enabled
6707 	 */
6708 	if (dp_is_vdev_subtype_p2p(vdev))
6709 		return;
6710 
6711 	/*
6712 	 * If IPA is enabled, disable hash-based flow steering and set
6713 	 * reo_dest_ring_4 as the REO ring to receive packets on.
6714 	 * IPA is configured to reap reo_dest_ring_4.
6715 	 *
6716 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
6717 	 * value enum value is from 1 - 4.
6718 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
6719 	 */
6720 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
6721 		if (vdev->opmode == wlan_op_mode_ap) {
6722 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
6723 			*hash_based = 0;
6724 		} else if (vdev->opmode == wlan_op_mode_sta &&
6725 			   dp_ipa_is_mdm_platform()) {
6726 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
6727 		}
6728 	}
6729 }
6730 
6731 #else
6732 
6733 /*
6734  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
6735  * @vdev: Datapath VDEV handle
6736  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
6737  * @hash_based: pointer to hash value (enabled/disabled) to be populated
6738  *
6739  * Use system config values for hash based steering.
6740  * Return: None
6741  */
6742 
6743 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
6744 				       enum cdp_host_reo_dest_ring *reo_dest,
6745 				       bool *hash_based)
6746 {
6747 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
6748 }
6749 #endif /* IPA_OFFLOAD */
6750 
6751 /*
6752  * dp_peer_setup_wifi3() - initialize the peer
6753  * @soc_hdl: soc handle object
6754  * @vdev_id : vdev_id of vdev object
6755  * @peer_mac: Peer's mac address
6756  *
6757  * Return: QDF_STATUS
6758  */
6759 static QDF_STATUS
6760 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
6761 		    uint8_t *peer_mac)
6762 {
6763 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6764 	struct dp_pdev *pdev;
6765 	bool hash_based = 0;
6766 	enum cdp_host_reo_dest_ring reo_dest;
6767 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6768 	struct dp_vdev *vdev = NULL;
6769 	struct dp_peer *peer =
6770 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
6771 					       DP_MOD_ID_CDP);
6772 	enum wlan_op_mode vdev_opmode;
6773 
6774 	if (!peer)
6775 		return QDF_STATUS_E_FAILURE;
6776 
6777 	vdev = peer->vdev;
6778 	if (!vdev) {
6779 		status = QDF_STATUS_E_FAILURE;
6780 		goto fail;
6781 	}
6782 
6783 	/* save vdev related member in case vdev freed */
6784 	vdev_opmode = vdev->opmode;
6785 	pdev = vdev->pdev;
6786 	dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
6787 
6788 	dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
6789 		pdev->pdev_id, vdev->vdev_id,
6790 		vdev->opmode, hash_based, reo_dest);
6791 
6792 	/*
6793 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
6794 	 * i.e both the devices have same MAC address. In these
6795 	 * cases we want such pkts to be processed in NULL Q handler
6796 	 * which is REO2TCL ring. for this reason we should
6797 	 * not setup reo_queues and default route for bss_peer.
6798 	 */
6799 	dp_peer_tx_init(pdev, peer);
6800 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
6801 		status = QDF_STATUS_E_FAILURE;
6802 		goto fail;
6803 	}
6804 
6805 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
6806 		/* TODO: Check the destination ring number to be passed to FW */
6807 		soc->cdp_soc.ol_ops->peer_set_default_routing(
6808 				soc->ctrl_psoc,
6809 				peer->vdev->pdev->pdev_id,
6810 				peer->mac_addr.raw,
6811 				peer->vdev->vdev_id, hash_based, reo_dest);
6812 	}
6813 
6814 	qdf_atomic_set(&peer->is_default_route_set, 1);
6815 
6816 	if (vdev_opmode != wlan_op_mode_monitor)
6817 		dp_peer_rx_init(pdev, peer);
6818 
6819 	dp_peer_ppdu_delayed_ba_init(peer);
6820 
6821 fail:
6822 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6823 	return status;
6824 }
6825 
6826 /*
6827  * dp_cp_peer_del_resp_handler - Handle the peer delete response
6828  * @soc_hdl: Datapath SOC handle
6829  * @vdev_id: id of virtual device object
6830  * @mac_addr: Mac address of the peer
6831  *
6832  * Return: QDF_STATUS
6833  */
6834 static QDF_STATUS dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl,
6835 					      uint8_t vdev_id,
6836 					      uint8_t *mac_addr)
6837 {
6838 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6839 	struct dp_ast_entry  *ast_entry = NULL;
6840 	txrx_ast_free_cb cb = NULL;
6841 	void *cookie;
6842 
6843 	qdf_spin_lock_bh(&soc->ast_lock);
6844 
6845 	ast_entry =
6846 		dp_peer_ast_hash_find_by_vdevid(soc, mac_addr,
6847 						vdev_id);
6848 
6849 	/* in case of qwrap we have multiple BSS peers
6850 	 * with same mac address
6851 	 *
6852 	 * AST entry for this mac address will be created
6853 	 * only for one peer hence it will be NULL here
6854 	 */
6855 	if ((!ast_entry || !ast_entry->delete_in_progress) ||
6856 	    (ast_entry->peer_id != HTT_INVALID_PEER)) {
6857 		qdf_spin_unlock_bh(&soc->ast_lock);
6858 		return QDF_STATUS_E_FAILURE;
6859 	}
6860 
6861 	if (ast_entry->is_mapped)
6862 		soc->ast_table[ast_entry->ast_idx] = NULL;
6863 
6864 	DP_STATS_INC(soc, ast.deleted, 1);
6865 	dp_peer_ast_hash_remove(soc, ast_entry);
6866 
6867 	cb = ast_entry->callback;
6868 	cookie = ast_entry->cookie;
6869 	ast_entry->callback = NULL;
6870 	ast_entry->cookie = NULL;
6871 
6872 	soc->num_ast_entries--;
6873 	qdf_spin_unlock_bh(&soc->ast_lock);
6874 
6875 	if (cb) {
6876 		cb(soc->ctrl_psoc,
6877 		   dp_soc_to_cdp_soc(soc),
6878 		   cookie,
6879 		   CDP_TXRX_AST_DELETED);
6880 	}
6881 	qdf_mem_free(ast_entry);
6882 
6883 	return QDF_STATUS_SUCCESS;
6884 }
6885 
6886 /*
6887  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
6888  * @txrx_soc: cdp soc handle
6889  * @ac: Access category
6890  * @value: timeout value in millisec
6891  *
6892  * Return: void
6893  */
6894 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
6895 				    uint8_t ac, uint32_t value)
6896 {
6897 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6898 
6899 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
6900 }
6901 
6902 /*
6903  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
6904  * @txrx_soc: cdp soc handle
6905  * @ac: access category
6906  * @value: timeout value in millisec
6907  *
6908  * Return: void
6909  */
6910 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
6911 				    uint8_t ac, uint32_t *value)
6912 {
6913 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6914 
6915 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
6916 }
6917 
6918 /*
6919  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
6920  * @txrx_soc: cdp soc handle
6921  * @pdev_id: id of physical device object
6922  * @val: reo destination ring index (1 - 4)
6923  *
6924  * Return: QDF_STATUS
6925  */
6926 static QDF_STATUS
6927 dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
6928 		     enum cdp_host_reo_dest_ring val)
6929 {
6930 	struct dp_pdev *pdev =
6931 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
6932 						   pdev_id);
6933 
6934 	if (pdev) {
6935 		pdev->reo_dest = val;
6936 		return QDF_STATUS_SUCCESS;
6937 	}
6938 
6939 	return QDF_STATUS_E_FAILURE;
6940 }
6941 
6942 /*
6943  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
6944  * @txrx_soc: cdp soc handle
6945  * @pdev_id: id of physical device object
6946  *
6947  * Return: reo destination ring index
6948  */
6949 static enum cdp_host_reo_dest_ring
6950 dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id)
6951 {
6952 	struct dp_pdev *pdev =
6953 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
6954 						   pdev_id);
6955 
6956 	if (pdev)
6957 		return pdev->reo_dest;
6958 	else
6959 		return cdp_host_reo_dest_ring_unknown;
6960 }
6961 
6962 #ifdef ATH_SUPPORT_NAC
6963 /*
6964  * dp_set_filter_neigh_peers() - set filter neighbour peers for smart mesh
6965  * @pdev_handle: device object
6966  * @val: value to be set
6967  *
6968  * Return: void
6969  */
6970 static int dp_set_filter_neigh_peers(struct dp_pdev *pdev,
6971 				     bool val)
6972 {
6973 	/* Enable/Disable smart mesh filtering. This flag will be checked
6974 	 * during rx processing to check if packets are from NAC clients.
6975 	 */
6976 	pdev->filter_neighbour_peers = val;
6977 	return 0;
6978 }
6979 #else
6980 static int dp_set_filter_neigh_peers(struct dp_pdev *pdev,
6981 				     bool val)
6982 {
6983 	return 0;
6984 }
6985 #endif /* ATH_SUPPORT_NAC */
6986 
6987 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
6988 /*
6989  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
6990  * address for smart mesh filtering
6991  * @txrx_soc: cdp soc handle
6992  * @vdev_id: id of virtual device object
6993  * @cmd: Add/Del command
6994  * @macaddr: nac client mac address
6995  *
6996  * Return: success/failure
6997  */
6998 static int dp_update_filter_neighbour_peers(struct cdp_soc_t *soc_hdl,
6999 					    uint8_t vdev_id,
7000 					    uint32_t cmd, uint8_t *macaddr)
7001 {
7002 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7003 	struct dp_pdev *pdev;
7004 	struct dp_neighbour_peer *peer = NULL;
7005 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7006 						     DP_MOD_ID_CDP);
7007 
7008 	if (!vdev || !macaddr)
7009 		goto fail0;
7010 
7011 	pdev = vdev->pdev;
7012 
7013 	if (!pdev)
7014 		goto fail0;
7015 
7016 	/* Store address of NAC (neighbour peer) which will be checked
7017 	 * against TA of received packets.
7018 	 */
7019 	if (cmd == DP_NAC_PARAM_ADD) {
7020 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
7021 				sizeof(*peer));
7022 
7023 		if (!peer) {
7024 			dp_cdp_err("%pK: DP neighbour peer node memory allocation failed"
7025 				   , soc);
7026 			goto fail0;
7027 		}
7028 
7029 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
7030 			macaddr, QDF_MAC_ADDR_SIZE);
7031 		peer->vdev = vdev;
7032 
7033 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
7034 
7035 		/* add this neighbour peer into the list */
7036 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
7037 				neighbour_peer_list_elem);
7038 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
7039 
7040 		/* first neighbour */
7041 		if (!pdev->neighbour_peers_added) {
7042 			QDF_STATUS status = QDF_STATUS_SUCCESS;
7043 
7044 			pdev->neighbour_peers_added = true;
7045 			if (!wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx))
7046 				dp_vdev_set_monitor_mode_rings(pdev, true);
7047 
7048 			dp_mon_filter_setup_smart_monitor(pdev);
7049 			status = dp_mon_filter_update(pdev);
7050 			if (status != QDF_STATUS_SUCCESS) {
7051 				dp_cdp_err("%pK: smart mon filter setup failed",
7052 					   soc);
7053 				dp_mon_filter_reset_smart_monitor(pdev);
7054 				pdev->neighbour_peers_added = false;
7055 			}
7056 		}
7057 
7058 	} else if (cmd == DP_NAC_PARAM_DEL) {
7059 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
7060 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
7061 				neighbour_peer_list_elem) {
7062 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
7063 				macaddr, QDF_MAC_ADDR_SIZE)) {
7064 				/* delete this peer from the list */
7065 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
7066 					peer, neighbour_peer_list_elem);
7067 				qdf_mem_free(peer);
7068 				break;
7069 			}
7070 		}
7071 		/* last neighbour deleted */
7072 		if (TAILQ_EMPTY(&pdev->neighbour_peers_list)) {
7073 			QDF_STATUS status = QDF_STATUS_SUCCESS;
7074 
7075 			dp_mon_filter_reset_smart_monitor(pdev);
7076 			status = dp_mon_filter_update(pdev);
7077 			if (status != QDF_STATUS_SUCCESS) {
7078 				dp_cdp_err("%pK: smart mon filter clear failed",
7079 					   soc);
7080 			}
7081 			pdev->neighbour_peers_added = false;
7082 
7083 		}
7084 
7085 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
7086 	}
7087 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7088 	return 1;
7089 
7090 fail0:
7091 	if (vdev)
7092 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7093 	return 0;
7094 }
7095 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
7096 
7097 #ifdef WLAN_SUPPORT_MSCS
7098 /*
7099  * dp_record_mscs_params - MSCS parameters sent by the STA in
7100  * the MSCS Request to the AP. The AP makes a note of these
7101  * parameters while comparing the MSDUs sent by the STA, to
7102  * send the downlink traffic with correct User priority.
7103  * @soc - Datapath soc handle
7104  * @peer_mac - STA Mac address
7105  * @vdev_id - ID of the vdev handle
7106  * @mscs_params - Structure having MSCS parameters obtained
7107  * from handshake
7108  * @active - Flag to set MSCS active/inactive
7109  * return type - QDF_STATUS - Success/Invalid
7110  */
7111 static QDF_STATUS
7112 dp_record_mscs_params(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
7113 		      uint8_t vdev_id, struct cdp_mscs_params *mscs_params,
7114 		      bool active)
7115 {
7116 	struct dp_peer *peer;
7117 	QDF_STATUS status = QDF_STATUS_E_INVAL;
7118 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7119 
7120 	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
7121 				      DP_MOD_ID_CDP);
7122 
7123 	if (!peer) {
7124 		dp_err("Peer is NULL!");
7125 		goto fail;
7126 	}
7127 	if (!active) {
7128 		dp_info("MSCS Procedure is terminated");
7129 		peer->mscs_active = active;
7130 		goto fail;
7131 	}
7132 
7133 	if (mscs_params->classifier_type == IEEE80211_TCLAS_MASK_CLA_TYPE_4) {
7134 		/* Populate entries inside IPV4 database first */
7135 		peer->mscs_ipv4_parameter.user_priority_bitmap =
7136 			mscs_params->user_pri_bitmap;
7137 		peer->mscs_ipv4_parameter.user_priority_limit =
7138 			mscs_params->user_pri_limit;
7139 		peer->mscs_ipv4_parameter.classifier_mask =
7140 			mscs_params->classifier_mask;
7141 
7142 		/* Populate entries inside IPV6 database */
7143 		peer->mscs_ipv6_parameter.user_priority_bitmap =
7144 			mscs_params->user_pri_bitmap;
7145 		peer->mscs_ipv6_parameter.user_priority_limit =
7146 			mscs_params->user_pri_limit;
7147 		peer->mscs_ipv6_parameter.classifier_mask =
7148 			mscs_params->classifier_mask;
7149 		peer->mscs_active = 1;
7150 		dp_info("\n\tMSCS Procedure request based parameters for "QDF_MAC_ADDR_FMT"\n"
7151 			"\tClassifier_type = %d\tUser priority bitmap = %x\n"
7152 			"\tUser priority limit = %x\tClassifier mask = %x",
7153 			QDF_MAC_ADDR_REF(peer_mac),
7154 			mscs_params->classifier_type,
7155 			peer->mscs_ipv4_parameter.user_priority_bitmap,
7156 			peer->mscs_ipv4_parameter.user_priority_limit,
7157 			peer->mscs_ipv4_parameter.classifier_mask);
7158 	}
7159 
7160 	status = QDF_STATUS_SUCCESS;
7161 fail:
7162 	if (peer)
7163 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7164 	return status;
7165 }
7166 #endif
7167 
7168 /*
7169  * dp_get_sec_type() - Get the security type
7170  * @soc: soc handle
7171  * @vdev_id: id of dp handle
7172  * @peer_mac: mac of datapath PEER handle
7173  * @sec_idx:    Security id (mcast, ucast)
7174  *
7175  * return sec_type: Security type
7176  */
7177 static int dp_get_sec_type(struct cdp_soc_t *soc, uint8_t vdev_id,
7178 			   uint8_t *peer_mac, uint8_t sec_idx)
7179 {
7180 	int sec_type = 0;
7181 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
7182 						       peer_mac, 0, vdev_id,
7183 						       DP_MOD_ID_CDP);
7184 
7185 	if (!peer) {
7186 		dp_cdp_err("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
7187 		return sec_type;
7188 	}
7189 
7190 	sec_type = peer->security[sec_idx].sec_type;
7191 
7192 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7193 	return sec_type;
7194 }
7195 
7196 /*
7197  * dp_peer_authorize() - authorize txrx peer
7198  * @soc: soc handle
7199  * @vdev_id: id of dp handle
7200  * @peer_mac: mac of datapath PEER handle
7201  * @authorize
7202  *
7203  */
7204 static QDF_STATUS
7205 dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
7206 		  uint8_t *peer_mac, uint32_t authorize)
7207 {
7208 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7209 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7210 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
7211 						      0, vdev_id,
7212 						      DP_MOD_ID_CDP);
7213 
7214 	if (!peer) {
7215 		dp_cdp_debug("%pK: Peer is NULL!\n", soc);
7216 		status = QDF_STATUS_E_FAILURE;
7217 	} else {
7218 		peer->authorize = authorize ? 1 : 0;
7219 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7220 	}
7221 
7222 	return status;
7223 }
7224 
7225 static void dp_flush_monitor_rings(struct dp_soc *soc)
7226 {
7227 	struct dp_pdev *pdev = soc->pdev_list[0];
7228 	hal_soc_handle_t hal_soc = soc->hal_soc;
7229 	uint32_t lmac_id;
7230 	uint32_t hp, tp;
7231 	uint8_t dp_intr_id;
7232 	int budget;
7233 	void *mon_dst_srng;
7234 
7235 	/* Reset monitor filters before reaping the ring*/
7236 	qdf_spin_lock_bh(&pdev->mon_lock);
7237 	dp_mon_filter_reset_mon_mode(pdev);
7238 	if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS)
7239 		dp_info("failed to reset monitor filters");
7240 	qdf_spin_unlock_bh(&pdev->mon_lock);
7241 
7242 	if (pdev->mon_chan_band == REG_BAND_UNKNOWN)
7243 		return;
7244 
7245 	lmac_id = pdev->ch_band_lmac_id_mapping[pdev->mon_chan_band];
7246 	if (qdf_unlikely(lmac_id == DP_MON_INVALID_LMAC_ID))
7247 		return;
7248 
7249 	dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id];
7250 	mon_dst_srng = dp_rxdma_get_mon_dst_ring(pdev, lmac_id);
7251 
7252 	/* reap full ring */
7253 	budget = wlan_cfg_get_dma_mon_stat_ring_size(pdev->wlan_cfg_ctx);
7254 
7255 	hal_get_sw_hptp(hal_soc, mon_dst_srng, &tp, &hp);
7256 	dp_info("Before reap: Monitor DST ring HP %u TP %u", hp, tp);
7257 
7258 	dp_mon_process(soc, &soc->intr_ctx[dp_intr_id], lmac_id, budget);
7259 
7260 	hal_get_sw_hptp(hal_soc, mon_dst_srng, &tp, &hp);
7261 	dp_info("After reap: Monitor DST ring HP %u TP %u", hp, tp);
7262 }
7263 
7264 /**
7265  * dp_vdev_unref_delete() - check and process vdev delete
7266  * @soc : DP specific soc pointer
7267  * @vdev: DP specific vdev pointer
7268  * @mod_id: module id
7269  *
7270  */
7271 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev,
7272 			  enum dp_mod_id mod_id)
7273 {
7274 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
7275 	void *vdev_delete_context = NULL;
7276 	uint8_t vdev_id = vdev->vdev_id;
7277 	struct dp_pdev *pdev = vdev->pdev;
7278 	struct dp_vdev *tmp_vdev = NULL;
7279 	uint8_t found = 0;
7280 
7281 	QDF_ASSERT(qdf_atomic_dec_return(&vdev->mod_refs[mod_id]) >= 0);
7282 
7283 	/* Return if this is not the last reference*/
7284 	if (!qdf_atomic_dec_and_test(&vdev->ref_cnt))
7285 		return;
7286 
7287 	/*
7288 	 * This should be set as last reference need to released
7289 	 * after cdp_vdev_detach() is called
7290 	 *
7291 	 * if this assert is hit there is a ref count issue
7292 	 */
7293 	QDF_ASSERT(vdev->delete.pending);
7294 
7295 	vdev_delete_cb = vdev->delete.callback;
7296 	vdev_delete_context = vdev->delete.context;
7297 
7298 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")- its last peer is done",
7299 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
7300 
7301 	if (wlan_op_mode_monitor == vdev->opmode) {
7302 		if (soc->intr_mode == DP_INTR_POLL) {
7303 			qdf_timer_sync_cancel(&soc->int_timer);
7304 			dp_flush_monitor_rings(soc);
7305 		} else if (soc->intr_mode == DP_INTR_MSI &&
7306 			soc->mon_vdev_timer_state & MON_VDEV_TIMER_RUNNING) {
7307 			qdf_timer_sync_cancel(&soc->mon_vdev_timer);
7308 			dp_flush_monitor_rings(soc);
7309 			soc->mon_vdev_timer_state &= ~MON_VDEV_TIMER_RUNNING;
7310 		}
7311 		pdev->monitor_vdev = NULL;
7312 		goto free_vdev;
7313 	}
7314 	/* all peers are gone, go ahead and delete it */
7315 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
7316 			FLOW_TYPE_VDEV, vdev_id);
7317 	dp_tx_vdev_detach(vdev);
7318 
7319 free_vdev:
7320 	qdf_spinlock_destroy(&vdev->peer_list_lock);
7321 
7322 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
7323 	TAILQ_FOREACH(tmp_vdev, &soc->inactive_vdev_list,
7324 		      inactive_list_elem) {
7325 		if (tmp_vdev == vdev) {
7326 			found = 1;
7327 			break;
7328 		}
7329 	}
7330 	if (found)
7331 		TAILQ_REMOVE(&soc->inactive_vdev_list, vdev,
7332 			     inactive_list_elem);
7333 	/* delete this peer from the list */
7334 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
7335 
7336 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")",
7337 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
7338 	wlan_minidump_remove(vdev, sizeof(*vdev), soc->ctrl_psoc,
7339 			     WLAN_MD_DP_VDEV, "dp_vdev");
7340 	qdf_mem_free(vdev);
7341 	vdev = NULL;
7342 
7343 	if (vdev_delete_cb)
7344 		vdev_delete_cb(vdev_delete_context);
7345 }
7346 
7347 /*
7348  * dp_peer_unref_delete() - unref and delete peer
7349  * @peer_handle:    Datapath peer handle
7350  * @mod_id:         ID of module releasing reference
7351  *
7352  */
7353 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id mod_id)
7354 {
7355 	struct dp_vdev *vdev = peer->vdev;
7356 	struct dp_pdev *pdev = vdev->pdev;
7357 	struct dp_soc *soc = pdev->soc;
7358 	uint16_t peer_id;
7359 	struct cdp_peer_cookie peer_cookie;
7360 	struct dp_peer *tmp_peer;
7361 	bool found = false;
7362 	int tid = 0;
7363 
7364 	if (mod_id > DP_MOD_ID_RX)
7365 		QDF_ASSERT(qdf_atomic_dec_return(&peer->mod_refs[mod_id]) >= 0);
7366 
7367 	/*
7368 	 * Hold the lock all the way from checking if the peer ref count
7369 	 * is zero until the peer references are removed from the hash
7370 	 * table and vdev list (if the peer ref count is zero).
7371 	 * This protects against a new HL tx operation starting to use the
7372 	 * peer object just after this function concludes it's done being used.
7373 	 * Furthermore, the lock needs to be held while checking whether the
7374 	 * vdev's list of peers is empty, to make sure that list is not modified
7375 	 * concurrently with the empty check.
7376 	 */
7377 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
7378 		peer_id = peer->peer_id;
7379 
7380 		/*
7381 		 * Make sure that the reference to the peer in
7382 		 * peer object map is removed
7383 		 */
7384 		QDF_ASSERT(peer_id == HTT_INVALID_PEER);
7385 
7386 		dp_peer_debug("Deleting peer %pK ("QDF_MAC_ADDR_FMT")", peer,
7387 			      QDF_MAC_ADDR_REF(peer->mac_addr.raw));
7388 
7389 		/*
7390 		 * Deallocate the extended stats contenxt
7391 		 */
7392 		dp_peer_ext_stats_ctx_dealloc(soc, peer);
7393 
7394 		/* send peer destroy event to upper layer */
7395 		qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
7396 			     QDF_MAC_ADDR_SIZE);
7397 		peer_cookie.ctx = NULL;
7398 		peer_cookie.ctx = (struct cdp_stats_cookie *)
7399 					peer->rdkstats_ctx;
7400 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7401 		dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY,
7402 				     soc,
7403 				     (void *)&peer_cookie,
7404 				     peer->peer_id,
7405 				     WDI_NO_VAL,
7406 				     pdev->pdev_id);
7407 #endif
7408 		peer->rdkstats_ctx = NULL;
7409 		wlan_minidump_remove(peer, sizeof(*peer), soc->ctrl_psoc,
7410 				     WLAN_MD_DP_PEER, "dp_peer");
7411 
7412 		qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
7413 		TAILQ_FOREACH(tmp_peer, &soc->inactive_peer_list,
7414 			      inactive_list_elem) {
7415 			if (tmp_peer == peer) {
7416 				found = 1;
7417 				break;
7418 			}
7419 		}
7420 		if (found)
7421 			TAILQ_REMOVE(&soc->inactive_peer_list, peer,
7422 				     inactive_list_elem);
7423 		/* delete this peer from the list */
7424 		qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
7425 		DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
7426 		dp_peer_update_state(soc, peer, DP_PEER_STATE_FREED);
7427 
7428 		/* cleanup the peer data */
7429 		dp_peer_cleanup(vdev, peer);
7430 		for (tid = 0; tid < DP_MAX_TIDS; tid++)
7431 			qdf_spinlock_destroy(&peer->rx_tid[tid].tid_lock);
7432 
7433 		qdf_spinlock_destroy(&peer->peer_state_lock);
7434 		qdf_mem_free(peer);
7435 
7436 		/*
7437 		 * Decrement ref count taken at peer create
7438 		 */
7439 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CHILD);
7440 	}
7441 }
7442 
7443 #ifdef PEER_CACHE_RX_PKTS
7444 static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
7445 {
7446 	qdf_list_destroy(&peer->bufq_info.cached_bufq);
7447 	qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
7448 }
7449 #else
7450 static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
7451 {
7452 }
7453 #endif
7454 
7455 /*
7456  * dp_peer_detach_wifi3() – Detach txrx peer
7457  * @soc_hdl: soc handle
7458  * @vdev_id: id of dp handle
7459  * @peer_mac: mac of datapath PEER handle
7460  * @bitmap: bitmap indicating special handling of request.
7461  *
7462  */
7463 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
7464 				       uint8_t vdev_id,
7465 				       uint8_t *peer_mac, uint32_t bitmap)
7466 {
7467 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7468 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
7469 						      0, vdev_id,
7470 						      DP_MOD_ID_CDP);
7471 	struct dp_vdev *vdev = NULL;
7472 
7473 	/* Peer can be null for monitor vap mac address */
7474 	if (!peer) {
7475 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7476 			  "%s: Invalid peer\n", __func__);
7477 		return QDF_STATUS_E_FAILURE;
7478 	}
7479 
7480 	if (!peer->valid) {
7481 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7482 		dp_err("Invalid peer: "QDF_MAC_ADDR_FMT,
7483 			QDF_MAC_ADDR_REF(peer_mac));
7484 		return QDF_STATUS_E_ALREADY;
7485 	}
7486 
7487 	vdev = peer->vdev;
7488 
7489 	if (!vdev)
7490 		return QDF_STATUS_E_FAILURE;
7491 	peer->valid = 0;
7492 
7493 	dp_init_info("%pK: peer %pK (" QDF_MAC_ADDR_FMT ")",
7494 		     soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
7495 
7496 	dp_local_peer_id_free(peer->vdev->pdev, peer);
7497 
7498 	/* Drop all rx packets before deleting peer */
7499 	dp_clear_peer_internal(soc, peer);
7500 
7501 	dp_peer_rx_bufq_resources_deinit(peer);
7502 
7503 	qdf_spinlock_destroy(&peer->peer_info_lock);
7504 	dp_peer_multipass_list_remove(peer);
7505 
7506 	/* remove the reference to the peer from the hash table */
7507 	dp_peer_find_hash_remove(soc, peer);
7508 
7509 	dp_peer_vdev_list_remove(soc, vdev, peer);
7510 
7511 	qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
7512 	TAILQ_INSERT_TAIL(&soc->inactive_peer_list, peer,
7513 			  inactive_list_elem);
7514 	qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
7515 
7516 	/*
7517 	 * Remove the reference added during peer_attach.
7518 	 * The peer will still be left allocated until the
7519 	 * PEER_UNMAP message arrives to remove the other
7520 	 * reference, added by the PEER_MAP message.
7521 	 */
7522 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
7523 	/*
7524 	 * Remove the reference taken above
7525 	 */
7526 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7527 
7528 	return QDF_STATUS_SUCCESS;
7529 }
7530 
7531 /*
7532  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
7533  * @soc_hdl: Datapath soc handle
7534  * @vdev_id: virtual interface id
7535  *
7536  * Return: MAC address on success, NULL on failure.
7537  *
7538  */
7539 static uint8_t *dp_get_vdev_mac_addr_wifi3(struct cdp_soc_t *soc_hdl,
7540 					   uint8_t vdev_id)
7541 {
7542 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7543 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7544 						     DP_MOD_ID_CDP);
7545 	uint8_t *mac = NULL;
7546 
7547 	if (!vdev)
7548 		return NULL;
7549 
7550 	mac = vdev->mac_addr.raw;
7551 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7552 
7553 	return mac;
7554 }
7555 
7556 /*
7557  * dp_vdev_set_wds() - Enable per packet stats
7558  * @soc: DP soc handle
7559  * @vdev_id: id of DP VDEV handle
7560  * @val: value
7561  *
7562  * Return: none
7563  */
7564 static int dp_vdev_set_wds(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
7565 			   uint32_t val)
7566 {
7567 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7568 	struct dp_vdev *vdev =
7569 		dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id,
7570 				      DP_MOD_ID_CDP);
7571 
7572 	if (!vdev)
7573 		return QDF_STATUS_E_FAILURE;
7574 
7575 	vdev->wds_enabled = val;
7576 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7577 
7578 	return QDF_STATUS_SUCCESS;
7579 }
7580 
7581 /*
7582  * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev id of monitor mode
7583  * @soc_hdl: datapath soc handle
7584  * @pdev_id: physical device instance id
7585  *
7586  * Return: virtual interface id
7587  */
7588 static uint8_t dp_get_mon_vdev_from_pdev_wifi3(struct cdp_soc_t *soc_hdl,
7589 					       uint8_t pdev_id)
7590 {
7591 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7592 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
7593 
7594 	if (qdf_unlikely(!pdev || !pdev->monitor_vdev))
7595 		return -EINVAL;
7596 
7597 	return pdev->monitor_vdev->vdev_id;
7598 }
7599 
7600 static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
7601 {
7602 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7603 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7604 						     DP_MOD_ID_CDP);
7605 	int opmode;
7606 
7607 	if (!vdev) {
7608 		dp_err("vdev for id %d is NULL", vdev_id);
7609 		return -EINVAL;
7610 	}
7611 	opmode = vdev->opmode;
7612 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7613 
7614 	return opmode;
7615 }
7616 
7617 /**
7618  * dp_get_os_rx_handles_from_vdev_wifi3() - Get os rx handles for a vdev
7619  * @soc_hdl: ol_txrx_soc_handle handle
7620  * @vdev_id: vdev id for which os rx handles are needed
7621  * @stack_fn_p: pointer to stack function pointer
7622  * @osif_handle_p: pointer to ol_osif_vdev_handle
7623  *
7624  * Return: void
7625  */
7626 static
7627 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_soc_t *soc_hdl,
7628 					  uint8_t vdev_id,
7629 					  ol_txrx_rx_fp *stack_fn_p,
7630 					  ol_osif_vdev_handle *osif_vdev_p)
7631 {
7632 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7633 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7634 						     DP_MOD_ID_CDP);
7635 
7636 	if (!vdev)
7637 		return;
7638 
7639 	*stack_fn_p = vdev->osif_rx_stack;
7640 	*osif_vdev_p = vdev->osif_vdev;
7641 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7642 }
7643 
7644 /**
7645  * dp_get_ctrl_pdev_from_vdev() - Get control pdev of vdev
7646  * @soc_hdl: datapath soc handle
7647  * @vdev_id: virtual device/interface id
7648  *
7649  * Return: Handle to control pdev
7650  */
7651 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(
7652 						struct cdp_soc_t *soc_hdl,
7653 						uint8_t vdev_id)
7654 {
7655 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7656 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7657 						     DP_MOD_ID_CDP);
7658 	struct dp_pdev *pdev;
7659 
7660 	if (!vdev)
7661 		return NULL;
7662 
7663 	pdev = vdev->pdev;
7664 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7665 	return pdev ? (struct cdp_cfg *)pdev->wlan_cfg_ctx : NULL;
7666 }
7667 
7668 /**
7669  * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
7670  *                                 ring based on target
7671  * @soc: soc handle
7672  * @mac_for_pdev: WIN- pdev_id, MCL- mac id
7673  * @pdev: physical device handle
7674  * @ring_num: mac id
7675  * @htt_tlv_filter: tlv filter
7676  *
7677  * Return: zero on success, non-zero on failure
7678  */
7679 static inline
7680 QDF_STATUS dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
7681 				       struct dp_pdev *pdev, uint8_t ring_num,
7682 				       struct htt_rx_ring_tlv_filter htt_tlv_filter)
7683 {
7684 	QDF_STATUS status;
7685 
7686 	if (soc->wlan_cfg_ctx->rxdma1_enable)
7687 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
7688 					     soc->rxdma_mon_buf_ring[ring_num]
7689 					     .hal_srng,
7690 					     RXDMA_MONITOR_BUF,
7691 					     RX_MONITOR_BUFFER_SIZE,
7692 					     &htt_tlv_filter);
7693 	else
7694 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
7695 					     pdev->rx_mac_buf_ring[ring_num]
7696 					     .hal_srng,
7697 					     RXDMA_BUF, RX_DATA_BUFFER_SIZE,
7698 					     &htt_tlv_filter);
7699 
7700 	return status;
7701 }
7702 
7703 static inline void
7704 dp_pdev_disable_mcopy_code(struct dp_pdev *pdev)
7705 {
7706 	pdev->mcopy_mode = M_COPY_DISABLED;
7707 	pdev->monitor_vdev = NULL;
7708 }
7709 
7710 /**
7711  * dp_reset_monitor_mode() - Disable monitor mode
7712  * @soc_hdl: Datapath soc handle
7713  * @pdev_id: id of datapath PDEV handle
7714  *
7715  * Return: QDF_STATUS
7716  */
7717 QDF_STATUS dp_reset_monitor_mode(struct cdp_soc_t *soc_hdl,
7718 				 uint8_t pdev_id,
7719 				 uint8_t special_monitor)
7720 {
7721 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
7722 	struct dp_pdev *pdev =
7723 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7724 						   pdev_id);
7725 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7726 
7727 	if (!pdev)
7728 		return QDF_STATUS_E_FAILURE;
7729 
7730 	qdf_spin_lock_bh(&pdev->mon_lock);
7731 
7732 	dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_DISABLE);
7733 	pdev->monitor_vdev = NULL;
7734 
7735 	/*
7736 	 * Lite monitor mode, smart monitor mode and monitor
7737 	 * mode uses this APIs to filter reset and mode disable
7738 	 */
7739 	if (pdev->mcopy_mode) {
7740 #if defined(FEATURE_PERPKT_INFO)
7741 		dp_pdev_disable_mcopy_code(pdev);
7742 		dp_mon_filter_reset_mcopy_mode(pdev);
7743 #endif /* FEATURE_PERPKT_INFO */
7744 	} else if (special_monitor) {
7745 #if defined(ATH_SUPPORT_NAC)
7746 		dp_mon_filter_reset_smart_monitor(pdev);
7747 #endif /* ATH_SUPPORT_NAC */
7748 	} else {
7749 		dp_mon_filter_reset_mon_mode(pdev);
7750 	}
7751 
7752 	status = dp_mon_filter_update(pdev);
7753 	if (status != QDF_STATUS_SUCCESS) {
7754 		dp_rx_mon_dest_err("%pK: Failed to reset monitor filters",
7755 				   soc);
7756 	}
7757 	pdev->monitor_configured = false;
7758 
7759 	qdf_spin_unlock_bh(&pdev->mon_lock);
7760 	return QDF_STATUS_SUCCESS;
7761 }
7762 
7763 /**
7764  * dp_get_tx_pending() - read pending tx
7765  * @pdev_handle: Datapath PDEV handle
7766  *
7767  * Return: outstanding tx
7768  */
7769 static uint32_t dp_get_tx_pending(struct cdp_pdev *pdev_handle)
7770 {
7771 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7772 
7773 	return qdf_atomic_read(&pdev->num_tx_outstanding);
7774 }
7775 
7776 /**
7777  * dp_get_peer_mac_from_peer_id() - get peer mac
7778  * @pdev_handle: Datapath PDEV handle
7779  * @peer_id: Peer ID
7780  * @peer_mac: MAC addr of PEER
7781  *
7782  * Return: QDF_STATUS
7783  */
7784 static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc,
7785 					       uint32_t peer_id,
7786 					       uint8_t *peer_mac)
7787 {
7788 	struct dp_peer *peer;
7789 
7790 	if (soc && peer_mac) {
7791 		peer = dp_peer_get_ref_by_id((struct dp_soc *)soc,
7792 					     (uint16_t)peer_id,
7793 					     DP_MOD_ID_CDP);
7794 		if (peer) {
7795 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
7796 				     QDF_MAC_ADDR_SIZE);
7797 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7798 			return QDF_STATUS_SUCCESS;
7799 		}
7800 	}
7801 
7802 	return QDF_STATUS_E_FAILURE;
7803 }
7804 
7805 /**
7806  * dp_vdev_set_monitor_mode_rings () - set monitor mode rings
7807  *
7808  * Allocate SW descriptor pool, buffers, link descriptor memory
7809  * Initialize monitor related SRNGs
7810  *
7811  * @pdev: DP pdev object
7812  *
7813  * Return: QDF_STATUS
7814  */
7815 static QDF_STATUS dp_vdev_set_monitor_mode_rings(struct dp_pdev *pdev,
7816 						 uint8_t delayed_replenish)
7817 {
7818 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
7819 	uint32_t mac_id;
7820 	uint32_t mac_for_pdev;
7821 	struct dp_soc *soc = pdev->soc;
7822 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7823 	struct dp_srng *mon_buf_ring;
7824 	uint32_t num_entries;
7825 
7826 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
7827 
7828 	/* If monitor rings are aleady initilized, return from here */
7829 	if (pdev->pdev_mon_init)
7830 		return QDF_STATUS_SUCCESS;
7831 
7832 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
7833 		mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id,
7834 							  pdev->pdev_id);
7835 
7836 		/* Allocate sw rx descriptor pool for mon RxDMA buffer ring */
7837 		status = dp_rx_pdev_mon_buf_desc_pool_alloc(pdev, mac_for_pdev);
7838 		if (!QDF_IS_STATUS_SUCCESS(status)) {
7839 			dp_err("%s: dp_rx_pdev_mon_buf_desc_pool_alloc() failed\n",
7840 			       __func__);
7841 			goto fail0;
7842 		}
7843 
7844 		dp_rx_pdev_mon_buf_desc_pool_init(pdev, mac_for_pdev);
7845 
7846 		/* If monitor buffers are already allocated,
7847 		 * do not allocate.
7848 		 */
7849 		status = dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev,
7850 							  delayed_replenish);
7851 
7852 		mon_buf_ring = &pdev->soc->rxdma_mon_buf_ring[mac_for_pdev];
7853 		/*
7854 		 * Configure low interrupt threshld when monitor mode is
7855 		 * configured.
7856 		 */
7857 		if (mon_buf_ring->hal_srng) {
7858 			num_entries = mon_buf_ring->num_entries;
7859 			hal_set_low_threshold(mon_buf_ring->hal_srng,
7860 					      num_entries >> 3);
7861 			htt_srng_setup(pdev->soc->htt_handle,
7862 				       pdev->pdev_id,
7863 				       mon_buf_ring->hal_srng,
7864 				       RXDMA_MONITOR_BUF);
7865 		}
7866 
7867 		/* Allocate link descriptors for the mon link descriptor ring */
7868 		status = dp_hw_link_desc_pool_banks_alloc(soc, mac_for_pdev);
7869 		if (!QDF_IS_STATUS_SUCCESS(status)) {
7870 			dp_err("%s: dp_hw_link_desc_pool_banks_alloc() failed",
7871 			       __func__);
7872 			goto fail0;
7873 		}
7874 		dp_link_desc_ring_replenish(soc, mac_for_pdev);
7875 
7876 		htt_srng_setup(soc->htt_handle, pdev->pdev_id,
7877 			       soc->rxdma_mon_desc_ring[mac_for_pdev].hal_srng,
7878 			       RXDMA_MONITOR_DESC);
7879 		htt_srng_setup(soc->htt_handle, pdev->pdev_id,
7880 			       soc->rxdma_mon_dst_ring[mac_for_pdev].hal_srng,
7881 			       RXDMA_MONITOR_DST);
7882 	}
7883 	pdev->pdev_mon_init = 1;
7884 
7885 	return QDF_STATUS_SUCCESS;
7886 
7887 fail0:
7888 	return QDF_STATUS_E_FAILURE;
7889 }
7890 
7891 /**
7892  * dp_vdev_set_monitor_mode_buf_rings () - set monitor mode buf rings
7893  *
7894  * Allocate SW descriptor pool, buffers, link descriptor memory
7895  * Initialize monitor related SRNGs
7896  *
7897  * @pdev: DP pdev object
7898  *
7899  * Return: void
7900  */
7901 static void dp_vdev_set_monitor_mode_buf_rings(struct dp_pdev *pdev)
7902 {
7903 	uint32_t mac_id;
7904 	uint32_t mac_for_pdev;
7905 	struct dp_srng *mon_buf_ring;
7906 	uint32_t num_entries;
7907 	struct dp_soc *soc = pdev->soc;
7908 
7909 	/* If delay monitor replenish is disabled, allocate link descriptor
7910 	 * monitor ring buffers of ring size.
7911 	 */
7912 	if (!wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx)) {
7913 		dp_vdev_set_monitor_mode_rings(pdev, false);
7914 	} else {
7915 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
7916 			mac_for_pdev =
7917 				dp_get_lmac_id_for_pdev_id(pdev->soc,
7918 							   mac_id,
7919 							   pdev->pdev_id);
7920 
7921 			dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev,
7922 							 FALSE);
7923 			mon_buf_ring =
7924 				&pdev->soc->rxdma_mon_buf_ring[mac_for_pdev];
7925 			/*
7926 			 * Configure low interrupt threshld when monitor mode is
7927 			 * configured.
7928 			 */
7929 			if (mon_buf_ring->hal_srng) {
7930 				num_entries = mon_buf_ring->num_entries;
7931 				hal_set_low_threshold(mon_buf_ring->hal_srng,
7932 						      num_entries >> 3);
7933 				htt_srng_setup(pdev->soc->htt_handle,
7934 					       pdev->pdev_id,
7935 					       mon_buf_ring->hal_srng,
7936 					       RXDMA_MONITOR_BUF);
7937 			}
7938 		}
7939 	}
7940 }
7941 
7942 /**
7943  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
7944  * @vdev_handle: Datapath VDEV handle
7945  * @smart_monitor: Flag to denote if its smart monitor mode
7946  *
7947  * Return: 0 on success, not 0 on failure
7948  */
7949 static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *dp_soc,
7950 					   uint8_t vdev_id,
7951 					   uint8_t special_monitor)
7952 {
7953 	struct dp_soc *soc = (struct dp_soc *)dp_soc;
7954 	struct dp_pdev *pdev;
7955 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
7956 						     DP_MOD_ID_CDP);
7957 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7958 
7959 	if (!vdev)
7960 		return QDF_STATUS_E_FAILURE;
7961 
7962 	pdev = vdev->pdev;
7963 	pdev->monitor_vdev = vdev;
7964 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
7965 		  "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
7966 		  pdev, pdev->pdev_id, pdev->soc, vdev);
7967 
7968 	/*
7969 	 * do not configure monitor buf ring and filter for smart and
7970 	 * lite monitor
7971 	 * for smart monitor filters are added along with first NAC
7972 	 * for lite monitor required configuration done through
7973 	 * dp_set_pdev_param
7974 	 */
7975 
7976 	if (special_monitor) {
7977 		status = QDF_STATUS_SUCCESS;
7978 		goto fail;
7979 	}
7980 
7981 	/*Check if current pdev's monitor_vdev exists */
7982 	if (pdev->monitor_configured) {
7983 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
7984 			  "monitor vap already created vdev=%pK\n", vdev);
7985 		status = QDF_STATUS_E_RESOURCES;
7986 		goto fail;
7987 	}
7988 
7989 	pdev->monitor_configured = true;
7990 
7991 	dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_ENABLE);
7992 	dp_mon_filter_setup_mon_mode(pdev);
7993 	status = dp_mon_filter_update(pdev);
7994 	if (status != QDF_STATUS_SUCCESS) {
7995 		dp_cdp_err("%pK: Failed to reset monitor filters", soc);
7996 		dp_mon_filter_reset_mon_mode(pdev);
7997 		pdev->monitor_configured = false;
7998 		pdev->monitor_vdev = NULL;
7999 	}
8000 
8001 fail:
8002 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8003 	return status;
8004 }
8005 
8006 /**
8007  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
8008  * @soc: soc handle
8009  * @pdev_id: id of Datapath PDEV handle
8010  * @filter_val: Flag to select Filter for monitor mode
8011  * Return: 0 on success, not 0 on failure
8012  */
8013 static QDF_STATUS
8014 dp_pdev_set_advance_monitor_filter(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
8015 				   struct cdp_monitor_filter *filter_val)
8016 {
8017 	/* Many monitor VAPs can exists in a system but only one can be up at
8018 	 * anytime
8019 	 */
8020 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8021 	struct dp_vdev *vdev;
8022 	struct dp_pdev *pdev =
8023 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8024 						   pdev_id);
8025 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8026 
8027 	if (!pdev)
8028 		return QDF_STATUS_E_FAILURE;
8029 
8030 	vdev = pdev->monitor_vdev;
8031 
8032 	if (!vdev)
8033 		return QDF_STATUS_E_FAILURE;
8034 
8035 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
8036 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
8037 		pdev, pdev_id, soc, vdev);
8038 
8039 	/*Check if current pdev's monitor_vdev exists */
8040 	if (!pdev->monitor_vdev) {
8041 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
8042 			"vdev=%pK", vdev);
8043 		qdf_assert(vdev);
8044 	}
8045 
8046 	/* update filter mode, type in pdev structure */
8047 	pdev->mon_filter_mode = filter_val->mode;
8048 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
8049 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
8050 	pdev->fp_data_filter = filter_val->fp_data;
8051 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
8052 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
8053 	pdev->mo_data_filter = filter_val->mo_data;
8054 
8055 	dp_mon_filter_setup_mon_mode(pdev);
8056 	status = dp_mon_filter_update(pdev);
8057 	if (status != QDF_STATUS_SUCCESS) {
8058 		dp_rx_mon_dest_err("%pK: Failed to set filter for advance mon mode",
8059 				   soc);
8060 		dp_mon_filter_reset_mon_mode(pdev);
8061 	}
8062 
8063 	return status;
8064 }
8065 
8066 /**
8067  * dp_deliver_tx_mgmt() - Deliver mgmt frame for tx capture
8068  * @cdp_soc : data path soc handle
8069  * @pdev_id : pdev_id
8070  * @nbuf: Management frame buffer
8071  */
8072 static QDF_STATUS
8073 dp_deliver_tx_mgmt(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf)
8074 {
8075 	struct dp_pdev *pdev =
8076 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
8077 						   pdev_id);
8078 
8079 	if (!pdev)
8080 		return QDF_STATUS_E_FAILURE;
8081 
8082 	dp_deliver_mgmt_frm(pdev, nbuf);
8083 
8084 	return QDF_STATUS_SUCCESS;
8085 }
8086 
8087 /**
8088  * dp_set_bsscolor() - sets bsscolor for tx capture
8089  * @pdev: Datapath PDEV handle
8090  * @bsscolor: new bsscolor
8091  */
8092 static void
8093 dp_mon_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor)
8094 {
8095 	pdev->rx_mon_recv_status.bsscolor = bsscolor;
8096 }
8097 
8098 /**
8099  * dp_pdev_get_filter_ucast_data() - get DP PDEV monitor ucast filter
8100  * @soc : data path soc handle
8101  * @pdev_id : pdev_id
8102  * Return: true on ucast filter flag set
8103  */
8104 static bool dp_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle)
8105 {
8106 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8107 
8108 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
8109 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
8110 		return true;
8111 
8112 	return false;
8113 }
8114 
8115 /**
8116  * dp_pdev_get_filter_mcast_data() - get DP PDEV monitor mcast filter
8117  * @pdev_handle: Datapath PDEV handle
8118  * Return: true on mcast filter flag set
8119  */
8120 static bool dp_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle)
8121 {
8122 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8123 
8124 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
8125 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
8126 		return true;
8127 
8128 	return false;
8129 }
8130 
8131 /**
8132  * dp_pdev_get_filter_non_data() - get DP PDEV monitor non_data filter
8133  * @pdev_handle: Datapath PDEV handle
8134  * Return: true on non data filter flag set
8135  */
8136 static bool dp_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle)
8137 {
8138 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8139 
8140 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
8141 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
8142 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
8143 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
8144 			return true;
8145 		}
8146 	}
8147 
8148 	return false;
8149 }
8150 
8151 #ifdef MESH_MODE_SUPPORT
8152 static
8153 void dp_vdev_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
8154 {
8155 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
8156 
8157 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
8158 	vdev->mesh_vdev = val;
8159 	if (val)
8160 		vdev->skip_sw_tid_classification |=
8161 			DP_TX_MESH_ENABLED;
8162 	else
8163 		vdev->skip_sw_tid_classification &=
8164 			~DP_TX_MESH_ENABLED;
8165 }
8166 
8167 /*
8168  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
8169  * @vdev_hdl: virtual device object
8170  * @val: value to be set
8171  *
8172  * Return: void
8173  */
8174 static
8175 void dp_vdev_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
8176 {
8177 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
8178 
8179 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
8180 	vdev->mesh_rx_filter = val;
8181 }
8182 #endif
8183 
8184 /*
8185  * dp_vdev_set_hlos_tid_override() - to set hlos tid override
8186  * @vdev_hdl: virtual device object
8187  * @val: value to be set
8188  *
8189  * Return: void
8190  */
8191 static
8192 void dp_vdev_set_hlos_tid_override(struct dp_vdev *vdev, uint32_t val)
8193 {
8194 	dp_cdp_info("%pK: val %d", vdev->pdev->soc, val);
8195 	if (val)
8196 		vdev->skip_sw_tid_classification |=
8197 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
8198 	else
8199 		vdev->skip_sw_tid_classification &=
8200 			~DP_TXRX_HLOS_TID_OVERRIDE_ENABLED;
8201 }
8202 
8203 /*
8204  * dp_vdev_get_hlos_tid_override() - to get hlos tid override flag
8205  * @vdev_hdl: virtual device object
8206  * @val: value to be set
8207  *
8208  * Return: 1 if this flag is set
8209  */
8210 static
8211 uint8_t dp_vdev_get_hlos_tid_override(struct cdp_vdev *vdev_hdl)
8212 {
8213 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
8214 
8215 	return !!(vdev->skip_sw_tid_classification &
8216 			DP_TXRX_HLOS_TID_OVERRIDE_ENABLED);
8217 }
8218 
8219 #ifdef VDEV_PEER_PROTOCOL_COUNT
8220 static void dp_enable_vdev_peer_protocol_count(struct cdp_soc_t *soc_hdl,
8221 					       int8_t vdev_id,
8222 					       bool enable)
8223 {
8224 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8225 	struct dp_vdev *vdev;
8226 
8227 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
8228 	if (!vdev)
8229 		return;
8230 
8231 	dp_info("enable %d vdev_id %d", enable, vdev_id);
8232 	vdev->peer_protocol_count_track = enable;
8233 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8234 }
8235 
8236 static void dp_enable_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
8237 						   int8_t vdev_id,
8238 						   int drop_mask)
8239 {
8240 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8241 	struct dp_vdev *vdev;
8242 
8243 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
8244 	if (!vdev)
8245 		return;
8246 
8247 	dp_info("drop_mask %d vdev_id %d", drop_mask, vdev_id);
8248 	vdev->peer_protocol_count_dropmask = drop_mask;
8249 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8250 }
8251 
8252 static int dp_is_vdev_peer_protocol_count_enabled(struct cdp_soc_t *soc_hdl,
8253 						  int8_t vdev_id)
8254 {
8255 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8256 	struct dp_vdev *vdev;
8257 	int peer_protocol_count_track;
8258 
8259 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
8260 	if (!vdev)
8261 		return 0;
8262 
8263 	dp_info("enable %d vdev_id %d", vdev->peer_protocol_count_track,
8264 		vdev_id);
8265 	peer_protocol_count_track =
8266 		vdev->peer_protocol_count_track;
8267 
8268 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8269 	return peer_protocol_count_track;
8270 }
8271 
8272 static int dp_get_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
8273 					       int8_t vdev_id)
8274 {
8275 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8276 	struct dp_vdev *vdev;
8277 	int peer_protocol_count_dropmask;
8278 
8279 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
8280 	if (!vdev)
8281 		return 0;
8282 
8283 	dp_info("drop_mask %d vdev_id %d", vdev->peer_protocol_count_dropmask,
8284 		vdev_id);
8285 	peer_protocol_count_dropmask =
8286 		vdev->peer_protocol_count_dropmask;
8287 
8288 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8289 	return peer_protocol_count_dropmask;
8290 }
8291 
8292 #endif
8293 
8294 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data)
8295 {
8296 	uint8_t pdev_count;
8297 
8298 	for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) {
8299 		if (soc->pdev_list[pdev_count] &&
8300 		    soc->pdev_list[pdev_count] == data)
8301 			return true;
8302 	}
8303 	return false;
8304 }
8305 
8306 /**
8307  * dp_rx_bar_stats_cb(): BAR received stats callback
8308  * @soc: SOC handle
8309  * @cb_ctxt: Call back context
8310  * @reo_status: Reo status
8311  *
8312  * return: void
8313  */
8314 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
8315 	union hal_reo_status *reo_status)
8316 {
8317 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
8318 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
8319 
8320 	if (!dp_check_pdev_exists(soc, pdev)) {
8321 		dp_err_rl("pdev doesn't exist");
8322 		return;
8323 	}
8324 
8325 	if (!qdf_atomic_read(&soc->cmn_init_done))
8326 		return;
8327 
8328 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
8329 		DP_PRINT_STATS("REO stats failure %d",
8330 			       queue_status->header.status);
8331 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
8332 		return;
8333 	}
8334 
8335 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
8336 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
8337 
8338 }
8339 
8340 /**
8341  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
8342  * @vdev: DP VDEV handle
8343  *
8344  * return: void
8345  */
8346 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
8347 			     struct cdp_vdev_stats *vdev_stats)
8348 {
8349 	struct dp_soc *soc = NULL;
8350 
8351 	if (!vdev || !vdev->pdev)
8352 		return;
8353 
8354 	soc = vdev->pdev->soc;
8355 
8356 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
8357 
8358 	dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats,
8359 			     DP_MOD_ID_GENERIC_STATS);
8360 
8361 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
8362 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
8363 			     vdev_stats, vdev->vdev_id,
8364 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
8365 #endif
8366 }
8367 
8368 void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
8369 {
8370 	struct dp_vdev *vdev = NULL;
8371 	struct dp_soc *soc;
8372 	struct cdp_vdev_stats *vdev_stats =
8373 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
8374 
8375 	if (!vdev_stats) {
8376 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
8377 			   pdev->soc);
8378 		return;
8379 	}
8380 
8381 	qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
8382 	qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
8383 	qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
8384 
8385 	if (pdev->mcopy_mode)
8386 		DP_UPDATE_STATS(pdev, pdev->invalid_peer);
8387 
8388 	soc = pdev->soc;
8389 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
8390 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
8391 
8392 		dp_aggregate_vdev_stats(vdev, vdev_stats);
8393 		dp_update_pdev_stats(pdev, vdev_stats);
8394 		dp_update_pdev_ingress_stats(pdev, vdev);
8395 	}
8396 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
8397 	qdf_mem_free(vdev_stats);
8398 
8399 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
8400 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
8401 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
8402 #endif
8403 }
8404 
8405 /**
8406  * dp_vdev_getstats() - get vdev packet level stats
8407  * @vdev_handle: Datapath VDEV handle
8408  * @stats: cdp network device stats structure
8409  *
8410  * Return: QDF_STATUS
8411  */
8412 static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle,
8413 				   struct cdp_dev_stats *stats)
8414 {
8415 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8416 	struct dp_pdev *pdev;
8417 	struct dp_soc *soc;
8418 	struct cdp_vdev_stats *vdev_stats;
8419 
8420 	if (!vdev)
8421 		return QDF_STATUS_E_FAILURE;
8422 
8423 	pdev = vdev->pdev;
8424 	if (!pdev)
8425 		return QDF_STATUS_E_FAILURE;
8426 
8427 	soc = pdev->soc;
8428 
8429 	vdev_stats = qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
8430 
8431 	if (!vdev_stats) {
8432 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
8433 			   soc);
8434 		return QDF_STATUS_E_FAILURE;
8435 	}
8436 
8437 	dp_aggregate_vdev_stats(vdev, vdev_stats);
8438 
8439 	stats->tx_packets = vdev_stats->tx_i.rcvd.num;
8440 	stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
8441 
8442 	stats->tx_errors = vdev_stats->tx.tx_failed +
8443 		vdev_stats->tx_i.dropped.dropped_pkt.num;
8444 	stats->tx_dropped = stats->tx_errors;
8445 
8446 	stats->rx_packets = vdev_stats->rx.unicast.num +
8447 		vdev_stats->rx.multicast.num +
8448 		vdev_stats->rx.bcast.num;
8449 	stats->rx_bytes = vdev_stats->rx.unicast.bytes +
8450 		vdev_stats->rx.multicast.bytes +
8451 		vdev_stats->rx.bcast.bytes;
8452 
8453 	qdf_mem_free(vdev_stats);
8454 
8455 	return QDF_STATUS_SUCCESS;
8456 }
8457 
8458 /**
8459  * dp_pdev_getstats() - get pdev packet level stats
8460  * @pdev_handle: Datapath PDEV handle
8461  * @stats: cdp network device stats structure
8462  *
8463  * Return: QDF_STATUS
8464  */
8465 static void dp_pdev_getstats(struct cdp_pdev *pdev_handle,
8466 			     struct cdp_dev_stats *stats)
8467 {
8468 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8469 
8470 	dp_aggregate_pdev_stats(pdev);
8471 
8472 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
8473 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
8474 
8475 	stats->tx_errors = pdev->stats.tx.tx_failed +
8476 		pdev->stats.tx_i.dropped.dropped_pkt.num;
8477 	stats->tx_dropped = stats->tx_errors;
8478 
8479 	stats->rx_packets = pdev->stats.rx.unicast.num +
8480 		pdev->stats.rx.multicast.num +
8481 		pdev->stats.rx.bcast.num;
8482 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
8483 		pdev->stats.rx.multicast.bytes +
8484 		pdev->stats.rx.bcast.bytes;
8485 	stats->rx_errors = pdev->stats.err.ip_csum_err +
8486 		pdev->stats.err.tcp_udp_csum_err +
8487 		pdev->stats.rx.err.mic_err +
8488 		pdev->stats.rx.err.decrypt_err +
8489 		pdev->stats.err.rxdma_error +
8490 		pdev->stats.err.reo_error;
8491 	stats->rx_dropped = pdev->stats.dropped.msdu_not_done +
8492 		pdev->stats.dropped.mec +
8493 		pdev->stats.dropped.mesh_filter +
8494 		pdev->stats.dropped.wifi_parse +
8495 		pdev->stats.dropped.mon_rx_drop +
8496 		pdev->stats.dropped.mon_radiotap_update_err;
8497 }
8498 
8499 /**
8500  * dp_get_device_stats() - get interface level packet stats
8501  * @soc: soc handle
8502  * @id : vdev_id or pdev_id based on type
8503  * @stats: cdp network device stats structure
8504  * @type: device type pdev/vdev
8505  *
8506  * Return: QDF_STATUS
8507  */
8508 static QDF_STATUS dp_get_device_stats(struct cdp_soc_t *soc_hdl, uint8_t id,
8509 				      struct cdp_dev_stats *stats,
8510 				      uint8_t type)
8511 {
8512 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8513 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
8514 	struct dp_vdev *vdev;
8515 
8516 	switch (type) {
8517 	case UPDATE_VDEV_STATS:
8518 		vdev = dp_vdev_get_ref_by_id(soc, id, DP_MOD_ID_CDP);
8519 
8520 		if (vdev) {
8521 			status = dp_vdev_getstats((struct cdp_vdev *)vdev,
8522 						  stats);
8523 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8524 		}
8525 		return status;
8526 	case UPDATE_PDEV_STATS:
8527 		{
8528 			struct dp_pdev *pdev =
8529 				dp_get_pdev_from_soc_pdev_id_wifi3(
8530 						(struct dp_soc *)soc,
8531 						 id);
8532 			if (pdev) {
8533 				dp_pdev_getstats((struct cdp_pdev *)pdev,
8534 						 stats);
8535 				return QDF_STATUS_SUCCESS;
8536 			}
8537 		}
8538 		break;
8539 	default:
8540 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
8541 			"apstats cannot be updated for this input "
8542 			"type %d", type);
8543 		break;
8544 	}
8545 
8546 	return QDF_STATUS_E_FAILURE;
8547 }
8548 
8549 const
8550 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
8551 {
8552 	switch (ring_type) {
8553 	case REO_DST:
8554 		return "Reo_dst";
8555 	case REO_EXCEPTION:
8556 		return "Reo_exception";
8557 	case REO_CMD:
8558 		return "Reo_cmd";
8559 	case REO_REINJECT:
8560 		return "Reo_reinject";
8561 	case REO_STATUS:
8562 		return "Reo_status";
8563 	case WBM2SW_RELEASE:
8564 		return "wbm2sw_release";
8565 	case TCL_DATA:
8566 		return "tcl_data";
8567 	case TCL_CMD_CREDIT:
8568 		return "tcl_cmd_credit";
8569 	case TCL_STATUS:
8570 		return "tcl_status";
8571 	case SW2WBM_RELEASE:
8572 		return "sw2wbm_release";
8573 	case RXDMA_BUF:
8574 		return "Rxdma_buf";
8575 	case RXDMA_DST:
8576 		return "Rxdma_dst";
8577 	case RXDMA_MONITOR_BUF:
8578 		return "Rxdma_monitor_buf";
8579 	case RXDMA_MONITOR_DESC:
8580 		return "Rxdma_monitor_desc";
8581 	case RXDMA_MONITOR_STATUS:
8582 		return "Rxdma_monitor_status";
8583 	case WBM_IDLE_LINK:
8584 		return "WBM_hw_idle_link";
8585 	default:
8586 		dp_err("Invalid ring type");
8587 		break;
8588 	}
8589 	return "Invalid";
8590 }
8591 
8592 /*
8593  * dp_print_napi_stats(): NAPI stats
8594  * @soc - soc handle
8595  */
8596 void dp_print_napi_stats(struct dp_soc *soc)
8597 {
8598 	hif_print_napi_stats(soc->hif_handle);
8599 }
8600 
8601 #ifdef QCA_PEER_EXT_STATS
8602 /**
8603  * dp_txrx_host_peer_ext_stats_clr: Reinitialize the txrx peer ext stats
8604  *
8605  */
8606 static inline void dp_txrx_host_peer_ext_stats_clr(struct dp_peer *peer)
8607 {
8608 	if (peer->pext_stats)
8609 		qdf_mem_zero(peer->pext_stats, sizeof(*peer->pext_stats));
8610 }
8611 #else
8612 static inline void dp_txrx_host_peer_ext_stats_clr(struct dp_peer *peer)
8613 {
8614 }
8615 #endif
8616 
8617 /**
8618  * dp_txrx_host_peer_stats_clr): Reinitialize the txrx peer stats
8619  * @soc: Datapath soc
8620  * @peer: Datatpath peer
8621  * @arg: argument to iter function
8622  *
8623  * Return: QDF_STATUS
8624  */
8625 static inline void
8626 dp_txrx_host_peer_stats_clr(struct dp_soc *soc,
8627 			    struct dp_peer *peer,
8628 			    void *arg)
8629 {
8630 	struct dp_rx_tid *rx_tid;
8631 	uint8_t tid;
8632 
8633 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
8634 		rx_tid = &peer->rx_tid[tid];
8635 		DP_STATS_CLR(rx_tid);
8636 	}
8637 
8638 	DP_STATS_CLR(peer);
8639 
8640 	dp_txrx_host_peer_ext_stats_clr(peer);
8641 
8642 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
8643 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, peer->vdev->pdev->soc,
8644 			     &peer->stats,  peer->peer_id,
8645 			     UPDATE_PEER_STATS, peer->vdev->pdev->pdev_id);
8646 #endif
8647 }
8648 
8649 /**
8650  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
8651  * @vdev: DP_VDEV handle
8652  * @dp_soc: DP_SOC handle
8653  *
8654  * Return: QDF_STATUS
8655  */
8656 static inline QDF_STATUS
8657 dp_txrx_host_stats_clr(struct dp_vdev *vdev, struct dp_soc *soc)
8658 {
8659 	if (!vdev || !vdev->pdev)
8660 		return QDF_STATUS_E_FAILURE;
8661 
8662 	/*
8663 	 * if NSS offload is enabled, then send message
8664 	 * to NSS FW to clear the stats. Once NSS FW clears the statistics
8665 	 * then clear host statistics.
8666 	 */
8667 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
8668 		if (soc->cdp_soc.ol_ops->nss_stats_clr)
8669 			soc->cdp_soc.ol_ops->nss_stats_clr(soc->ctrl_psoc,
8670 							   vdev->vdev_id);
8671 	}
8672 
8673 	DP_STATS_CLR(vdev->pdev);
8674 	DP_STATS_CLR(vdev->pdev->soc);
8675 	DP_STATS_CLR(vdev);
8676 
8677 	hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
8678 
8679 	dp_vdev_iterate_peer(vdev, dp_txrx_host_peer_stats_clr, NULL,
8680 			     DP_MOD_ID_GENERIC_STATS);
8681 
8682 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
8683 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
8684 			     &vdev->stats,  vdev->vdev_id,
8685 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
8686 #endif
8687 	return QDF_STATUS_SUCCESS;
8688 }
8689 
8690 /*
8691  * dp_get_host_peer_stats()- function to print peer stats
8692  * @soc: dp_soc handle
8693  * @mac_addr: mac address of the peer
8694  *
8695  * Return: QDF_STATUS
8696  */
8697 static QDF_STATUS
8698 dp_get_host_peer_stats(struct cdp_soc_t *soc, uint8_t *mac_addr)
8699 {
8700 	struct dp_peer *peer = NULL;
8701 
8702 	if (!mac_addr) {
8703 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
8704 			  "%s: NULL peer mac addr\n", __func__);
8705 		return QDF_STATUS_E_FAILURE;
8706 	}
8707 
8708 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
8709 				      mac_addr, 0,
8710 				      DP_VDEV_ALL,
8711 				      DP_MOD_ID_CDP);
8712 	if (!peer) {
8713 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
8714 			  "%s: Invalid peer\n", __func__);
8715 		return QDF_STATUS_E_FAILURE;
8716 	}
8717 
8718 	dp_print_peer_stats(peer);
8719 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
8720 
8721 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8722 
8723 	return QDF_STATUS_SUCCESS;
8724 }
8725 
8726 /**
8727  * dp_txrx_stats_help() - Helper function for Txrx_Stats
8728  *
8729  * Return: None
8730  */
8731 static void dp_txrx_stats_help(void)
8732 {
8733 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
8734 	dp_info("stats_option:");
8735 	dp_info("  1 -- HTT Tx Statistics");
8736 	dp_info("  2 -- HTT Rx Statistics");
8737 	dp_info("  3 -- HTT Tx HW Queue Statistics");
8738 	dp_info("  4 -- HTT Tx HW Sched Statistics");
8739 	dp_info("  5 -- HTT Error Statistics");
8740 	dp_info("  6 -- HTT TQM Statistics");
8741 	dp_info("  7 -- HTT TQM CMDQ Statistics");
8742 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
8743 	dp_info("  9 -- HTT Tx Rate Statistics");
8744 	dp_info(" 10 -- HTT Rx Rate Statistics");
8745 	dp_info(" 11 -- HTT Peer Statistics");
8746 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
8747 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
8748 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
8749 	dp_info(" 15 -- HTT SRNG Statistics");
8750 	dp_info(" 16 -- HTT SFM Info Statistics");
8751 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
8752 	dp_info(" 18 -- HTT Peer List Details");
8753 	dp_info(" 20 -- Clear Host Statistics");
8754 	dp_info(" 21 -- Host Rx Rate Statistics");
8755 	dp_info(" 22 -- Host Tx Rate Statistics");
8756 	dp_info(" 23 -- Host Tx Statistics");
8757 	dp_info(" 24 -- Host Rx Statistics");
8758 	dp_info(" 25 -- Host AST Statistics");
8759 	dp_info(" 26 -- Host SRNG PTR Statistics");
8760 	dp_info(" 27 -- Host Mon Statistics");
8761 	dp_info(" 28 -- Host REO Queue Statistics");
8762 	dp_info(" 29 -- Host Soc cfg param Statistics");
8763 	dp_info(" 30 -- Host pdev cfg param Statistics");
8764 	dp_info(" 31 -- Host FISA stats");
8765 	dp_info(" 32 -- Host Register Work stats");
8766 }
8767 
8768 /**
8769  * dp_print_host_stats()- Function to print the stats aggregated at host
8770  * @vdev_handle: DP_VDEV handle
8771  * @req: host stats type
8772  * @soc: dp soc handler
8773  *
8774  * Return: 0 on success, print error message in case of failure
8775  */
8776 static int
8777 dp_print_host_stats(struct dp_vdev *vdev,
8778 		    struct cdp_txrx_stats_req *req,
8779 		    struct dp_soc *soc)
8780 {
8781 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
8782 	enum cdp_host_txrx_stats type =
8783 			dp_stats_mapping_table[req->stats][STATS_HOST];
8784 
8785 	dp_aggregate_pdev_stats(pdev);
8786 
8787 	switch (type) {
8788 	case TXRX_CLEAR_STATS:
8789 		dp_txrx_host_stats_clr(vdev, soc);
8790 		break;
8791 	case TXRX_RX_RATE_STATS:
8792 		dp_print_rx_rates(vdev);
8793 		break;
8794 	case TXRX_TX_RATE_STATS:
8795 		dp_print_tx_rates(vdev);
8796 		break;
8797 	case TXRX_TX_HOST_STATS:
8798 		dp_print_pdev_tx_stats(pdev);
8799 		dp_print_soc_tx_stats(pdev->soc);
8800 		break;
8801 	case TXRX_RX_HOST_STATS:
8802 		dp_print_pdev_rx_stats(pdev);
8803 		dp_print_soc_rx_stats(pdev->soc);
8804 		break;
8805 	case TXRX_AST_STATS:
8806 		dp_print_ast_stats(pdev->soc);
8807 		dp_print_mec_stats(pdev->soc);
8808 		dp_print_peer_table(vdev);
8809 		break;
8810 	case TXRX_SRNG_PTR_STATS:
8811 		dp_print_ring_stats(pdev);
8812 		break;
8813 	case TXRX_RX_MON_STATS:
8814 		dp_print_pdev_rx_mon_stats(pdev);
8815 		break;
8816 	case TXRX_REO_QUEUE_STATS:
8817 		dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc,
8818 				       req->peer_addr);
8819 		break;
8820 	case TXRX_SOC_CFG_PARAMS:
8821 		dp_print_soc_cfg_params(pdev->soc);
8822 		break;
8823 	case TXRX_PDEV_CFG_PARAMS:
8824 		dp_print_pdev_cfg_params(pdev);
8825 		break;
8826 	case TXRX_NAPI_STATS:
8827 		dp_print_napi_stats(pdev->soc);
8828 		break;
8829 	case TXRX_SOC_INTERRUPT_STATS:
8830 		dp_print_soc_interrupt_stats(pdev->soc);
8831 		break;
8832 	case TXRX_SOC_FSE_STATS:
8833 		dp_rx_dump_fisa_table(pdev->soc);
8834 		break;
8835 	case TXRX_HAL_REG_WRITE_STATS:
8836 		hal_dump_reg_write_stats(pdev->soc->hal_soc);
8837 		hal_dump_reg_write_srng_stats(pdev->soc->hal_soc);
8838 		break;
8839 	case TXRX_SOC_REO_HW_DESC_DUMP:
8840 		dp_get_rx_reo_queue_info((struct cdp_soc_t *)pdev->soc,
8841 					 vdev->vdev_id);
8842 		break;
8843 	default:
8844 		dp_info("Wrong Input For TxRx Host Stats");
8845 		dp_txrx_stats_help();
8846 		break;
8847 	}
8848 	return 0;
8849 }
8850 
8851 /*
8852  * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
8853  *                              modes are enabled or not.
8854  * @dp_pdev: dp pdev handle.
8855  *
8856  * Return: bool
8857  */
8858 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
8859 {
8860 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
8861 	    !pdev->mcopy_mode)
8862 		return true;
8863 	else
8864 		return false;
8865 }
8866 
8867 /*
8868  *dp_set_bpr_enable() - API to enable/disable bpr feature
8869  *@pdev_handle: DP_PDEV handle.
8870  *@val: Provided value.
8871  *
8872  *Return: 0 for success. nonzero for failure.
8873  */
8874 static QDF_STATUS
8875 dp_set_bpr_enable(struct dp_pdev *pdev, int val)
8876 {
8877 	switch (val) {
8878 	case CDP_BPR_DISABLE:
8879 		pdev->bpr_enable = CDP_BPR_DISABLE;
8880 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
8881 		    !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
8882 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
8883 		} else if (pdev->enhanced_stats_en &&
8884 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
8885 			   !pdev->pktlog_ppdu_stats) {
8886 			dp_h2t_cfg_stats_msg_send(pdev,
8887 						  DP_PPDU_STATS_CFG_ENH_STATS,
8888 						  pdev->pdev_id);
8889 		}
8890 		break;
8891 	case CDP_BPR_ENABLE:
8892 		pdev->bpr_enable = CDP_BPR_ENABLE;
8893 		if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
8894 		    !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
8895 			dp_h2t_cfg_stats_msg_send(pdev,
8896 						  DP_PPDU_STATS_CFG_BPR,
8897 						  pdev->pdev_id);
8898 		} else if (pdev->enhanced_stats_en &&
8899 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
8900 			   !pdev->pktlog_ppdu_stats) {
8901 			dp_h2t_cfg_stats_msg_send(pdev,
8902 						  DP_PPDU_STATS_CFG_BPR_ENH,
8903 						  pdev->pdev_id);
8904 		} else if (pdev->pktlog_ppdu_stats) {
8905 			dp_h2t_cfg_stats_msg_send(pdev,
8906 						  DP_PPDU_STATS_CFG_BPR_PKTLOG,
8907 						  pdev->pdev_id);
8908 		}
8909 		break;
8910 	default:
8911 		break;
8912 	}
8913 
8914 	return QDF_STATUS_SUCCESS;
8915 }
8916 
8917 /*
8918  * dp_pdev_tid_stats_ingress_inc
8919  * @pdev: pdev handle
8920  * @val: increase in value
8921  *
8922  * Return: void
8923  */
8924 static void
8925 dp_pdev_tid_stats_ingress_inc(struct dp_pdev *pdev, uint32_t val)
8926 {
8927 	pdev->stats.tid_stats.ingress_stack += val;
8928 }
8929 
8930 /*
8931  * dp_pdev_tid_stats_osif_drop
8932  * @pdev: pdev handle
8933  * @val: increase in value
8934  *
8935  * Return: void
8936  */
8937 static void
8938 dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val)
8939 {
8940 	pdev->stats.tid_stats.osif_drop += val;
8941 }
8942 
8943 /*
8944  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
8945  * @pdev: DP_PDEV handle
8946  * @val: user provided value
8947  *
8948  * Return: 0 for success. nonzero for failure.
8949  */
8950 static QDF_STATUS
8951 dp_config_debug_sniffer(struct dp_pdev *pdev, int val)
8952 {
8953 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8954 
8955 	/*
8956 	 * Note: The mirror copy mode cannot co-exist with any other
8957 	 * monitor modes. Hence disabling the filter for this mode will
8958 	 * reset the monitor destination ring filters.
8959 	 */
8960 	if (pdev->mcopy_mode) {
8961 #ifdef FEATURE_PERPKT_INFO
8962 		dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_DISABLE);
8963 		dp_pdev_disable_mcopy_code(pdev);
8964 		dp_mon_filter_reset_mcopy_mode(pdev);
8965 		status = dp_mon_filter_update(pdev);
8966 		if (status != QDF_STATUS_SUCCESS) {
8967 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8968 				  FL("Failed to reset AM copy mode filters"));
8969 		}
8970 		pdev->monitor_configured = false;
8971 #endif /* FEATURE_PERPKT_INFO */
8972 	}
8973 	switch (val) {
8974 	case 0:
8975 		pdev->tx_sniffer_enable = 0;
8976 		pdev->monitor_configured = false;
8977 
8978 		/*
8979 		 * We don't need to reset the Rx monitor status ring  or call
8980 		 * the API dp_ppdu_ring_reset() if all debug sniffer mode is
8981 		 * disabled. The Rx monitor status ring will be disabled when
8982 		 * the last mode using the monitor status ring get disabled.
8983 		 */
8984 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
8985 		    !pdev->bpr_enable) {
8986 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
8987 		} else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
8988 			dp_h2t_cfg_stats_msg_send(pdev,
8989 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
8990 		} else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
8991 			dp_h2t_cfg_stats_msg_send(pdev,
8992 						  DP_PPDU_STATS_CFG_BPR_ENH,
8993 						  pdev->pdev_id);
8994 		} else {
8995 			dp_h2t_cfg_stats_msg_send(pdev,
8996 						  DP_PPDU_STATS_CFG_BPR,
8997 						  pdev->pdev_id);
8998 		}
8999 		break;
9000 
9001 	case 1:
9002 		pdev->tx_sniffer_enable = 1;
9003 		pdev->monitor_configured = false;
9004 
9005 		if (!pdev->pktlog_ppdu_stats)
9006 			dp_h2t_cfg_stats_msg_send(pdev,
9007 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
9008 		break;
9009 	case 2:
9010 	case 4:
9011 		if (pdev->monitor_vdev) {
9012 			status = QDF_STATUS_E_RESOURCES;
9013 			break;
9014 		}
9015 
9016 #ifdef FEATURE_PERPKT_INFO
9017 		pdev->mcopy_mode = val;
9018 		pdev->tx_sniffer_enable = 0;
9019 		pdev->monitor_configured = true;
9020 
9021 		if (!wlan_cfg_is_delay_mon_replenish(pdev->soc->wlan_cfg_ctx))
9022 			dp_vdev_set_monitor_mode_rings(pdev, true);
9023 
9024 		/*
9025 		 * Setup the M copy mode filter.
9026 		 */
9027 		dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_ENABLE);
9028 		dp_mon_filter_setup_mcopy_mode(pdev);
9029 		status = dp_mon_filter_update(pdev);
9030 		if (status != QDF_STATUS_SUCCESS) {
9031 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
9032 				  FL("Failed to set M_copy mode filters"));
9033 			dp_mon_filter_reset_mcopy_mode(pdev);
9034 			dp_pdev_disable_mcopy_code(pdev);
9035 			return status;
9036 		}
9037 
9038 		if (!pdev->pktlog_ppdu_stats)
9039 			dp_h2t_cfg_stats_msg_send(pdev,
9040 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
9041 #endif /* FEATURE_PERPKT_INFO */
9042 		break;
9043 
9044 	default:
9045 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
9046 			"Invalid value");
9047 		break;
9048 	}
9049 	return status;
9050 }
9051 
9052 #ifdef FEATURE_PERPKT_INFO
9053 /*
9054  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
9055  * @soc_handle: DP_SOC handle
9056  * @pdev_id: id of DP_PDEV handle
9057  *
9058  * Return: QDF_STATUS
9059  */
9060 static QDF_STATUS
9061 dp_enable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
9062 {
9063 	struct dp_pdev *pdev = NULL;
9064 	QDF_STATUS status = QDF_STATUS_SUCCESS;
9065 
9066 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9067 						  pdev_id);
9068 
9069 	if (!pdev)
9070 		return QDF_STATUS_E_FAILURE;
9071 
9072 	if (pdev->enhanced_stats_en == 0)
9073 		dp_cal_client_timer_start(pdev->cal_client_ctx);
9074 
9075 	pdev->enhanced_stats_en = 1;
9076 
9077 	dp_mon_filter_setup_enhanced_stats(pdev);
9078 	status = dp_mon_filter_update(pdev);
9079 	if (status != QDF_STATUS_SUCCESS) {
9080 		dp_cdp_err("%pK: Failed to set enhanced mode filters", soc);
9081 		dp_mon_filter_reset_enhanced_stats(pdev);
9082 		dp_cal_client_timer_stop(pdev->cal_client_ctx);
9083 		pdev->enhanced_stats_en = 0;
9084 		return QDF_STATUS_E_FAILURE;
9085 	}
9086 
9087 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
9088 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
9089 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
9090 		dp_h2t_cfg_stats_msg_send(pdev,
9091 					  DP_PPDU_STATS_CFG_BPR_ENH,
9092 					  pdev->pdev_id);
9093 	}
9094 
9095 	return QDF_STATUS_SUCCESS;
9096 }
9097 
9098 /*
9099  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
9100  *
9101  * @param soc - the soc handle
9102  * @param pdev_id - pdev_id of pdev
9103  * @return - QDF_STATUS
9104  */
9105 static QDF_STATUS
9106 dp_disable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
9107 {
9108 	struct dp_pdev *pdev =
9109 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9110 						   pdev_id);
9111 
9112 	if (!pdev)
9113 		return QDF_STATUS_E_FAILURE;
9114 
9115 	if (pdev->enhanced_stats_en == 1)
9116 		dp_cal_client_timer_stop(pdev->cal_client_ctx);
9117 
9118 	pdev->enhanced_stats_en = 0;
9119 
9120 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
9121 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
9122 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
9123 		dp_h2t_cfg_stats_msg_send(pdev,
9124 					  DP_PPDU_STATS_CFG_BPR,
9125 					  pdev->pdev_id);
9126 	}
9127 
9128 	dp_mon_filter_reset_enhanced_stats(pdev);
9129 	if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
9130 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
9131 			  FL("Failed to reset enhanced mode filters"));
9132 	}
9133 
9134 	return QDF_STATUS_SUCCESS;
9135 }
9136 #endif /* FEATURE_PERPKT_INFO */
9137 
9138 /*
9139  * dp_get_fw_peer_stats()- function to print peer stats
9140  * @soc: soc handle
9141  * @pdev_id : id of the pdev handle
9142  * @mac_addr: mac address of the peer
9143  * @cap: Type of htt stats requested
9144  * @is_wait: if set, wait on completion from firmware response
9145  *
9146  * Currently Supporting only MAC ID based requests Only
9147  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
9148  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
9149  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
9150  *
9151  * Return: QDF_STATUS
9152  */
9153 static QDF_STATUS
9154 dp_get_fw_peer_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
9155 		     uint8_t *mac_addr,
9156 		     uint32_t cap, uint32_t is_wait)
9157 {
9158 	int i;
9159 	uint32_t config_param0 = 0;
9160 	uint32_t config_param1 = 0;
9161 	uint32_t config_param2 = 0;
9162 	uint32_t config_param3 = 0;
9163 	struct dp_pdev *pdev =
9164 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9165 						   pdev_id);
9166 
9167 	if (!pdev)
9168 		return QDF_STATUS_E_FAILURE;
9169 
9170 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
9171 	config_param0 |= (1 << (cap + 1));
9172 
9173 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
9174 		config_param1 |= (1 << i);
9175 	}
9176 
9177 	config_param2 |= (mac_addr[0] & 0x000000ff);
9178 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
9179 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
9180 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
9181 
9182 	config_param3 |= (mac_addr[4] & 0x000000ff);
9183 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
9184 
9185 	if (is_wait) {
9186 		qdf_event_reset(&pdev->fw_peer_stats_event);
9187 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
9188 					  config_param0, config_param1,
9189 					  config_param2, config_param3,
9190 					  0, DBG_STATS_COOKIE_DP_STATS, 0);
9191 		qdf_wait_single_event(&pdev->fw_peer_stats_event,
9192 				      DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
9193 	} else {
9194 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
9195 					  config_param0, config_param1,
9196 					  config_param2, config_param3,
9197 					  0, DBG_STATS_COOKIE_DEFAULT, 0);
9198 	}
9199 
9200 	return QDF_STATUS_SUCCESS;
9201 
9202 }
9203 
9204 /* This struct definition will be removed from here
9205  * once it get added in FW headers*/
9206 struct httstats_cmd_req {
9207     uint32_t    config_param0;
9208     uint32_t    config_param1;
9209     uint32_t    config_param2;
9210     uint32_t    config_param3;
9211     int cookie;
9212     u_int8_t    stats_id;
9213 };
9214 
9215 /*
9216  * dp_get_htt_stats: function to process the httstas request
9217  * @soc: DP soc handle
9218  * @pdev_id: id of pdev handle
9219  * @data: pointer to request data
9220  * @data_len: length for request data
9221  *
9222  * return: QDF_STATUS
9223  */
9224 static QDF_STATUS
9225 dp_get_htt_stats(struct cdp_soc_t *soc, uint8_t pdev_id, void *data,
9226 		 uint32_t data_len)
9227 {
9228 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
9229 	struct dp_pdev *pdev =
9230 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9231 						   pdev_id);
9232 
9233 	if (!pdev)
9234 		return QDF_STATUS_E_FAILURE;
9235 
9236 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
9237 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
9238 				req->config_param0, req->config_param1,
9239 				req->config_param2, req->config_param3,
9240 				req->cookie, DBG_STATS_COOKIE_DEFAULT, 0);
9241 
9242 	return QDF_STATUS_SUCCESS;
9243 }
9244 
9245 /**
9246  * dp_set_pdev_tidmap_prty_wifi3(): update tidmap priority in pdev
9247  * @pdev: DP_PDEV handle
9248  * @prio: tidmap priority value passed by the user
9249  *
9250  * Return: QDF_STATUS_SUCCESS on success
9251  */
9252 static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct dp_pdev *pdev,
9253 						uint8_t prio)
9254 {
9255 	struct dp_soc *soc = pdev->soc;
9256 
9257 	soc->tidmap_prty = prio;
9258 
9259 	hal_tx_set_tidmap_prty(soc->hal_soc, prio);
9260 	return QDF_STATUS_SUCCESS;
9261 }
9262 
9263 /*
9264  * dp_get_peer_param: function to get parameters in peer
9265  * @cdp_soc: DP soc handle
9266  * @vdev_id: id of vdev handle
9267  * @peer_mac: peer mac address
9268  * @param: parameter type to be set
9269  * @val : address of buffer
9270  *
9271  * Return: val
9272  */
9273 static QDF_STATUS dp_get_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
9274 				    uint8_t *peer_mac,
9275 				    enum cdp_peer_param_type param,
9276 				    cdp_config_param_type *val)
9277 {
9278 	return QDF_STATUS_SUCCESS;
9279 }
9280 
9281 #ifdef WLAN_ATF_ENABLE
9282 static void dp_set_atf_stats_enable(struct dp_pdev *pdev, bool value)
9283 {
9284 	if (!pdev) {
9285 		dp_cdp_err("Invalid pdev");
9286 		return;
9287 	}
9288 
9289 	pdev->dp_atf_stats_enable = value;
9290 }
9291 #else
9292 static void dp_set_atf_stats_enable(struct dp_pdev *pdev, bool value)
9293 {
9294 }
9295 #endif
9296 
9297 /*
9298  * dp_set_peer_param: function to set parameters in peer
9299  * @cdp_soc: DP soc handle
9300  * @vdev_id: id of vdev handle
9301  * @peer_mac: peer mac address
9302  * @param: parameter type to be set
9303  * @val: value of parameter to be set
9304  *
9305  * Return: 0 for success. nonzero for failure.
9306  */
9307 static QDF_STATUS dp_set_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
9308 				    uint8_t *peer_mac,
9309 				    enum cdp_peer_param_type param,
9310 				    cdp_config_param_type val)
9311 {
9312 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
9313 						      peer_mac, 0, vdev_id,
9314 						      DP_MOD_ID_CDP);
9315 
9316 	if (!peer)
9317 		return QDF_STATUS_E_FAILURE;
9318 
9319 	switch (param) {
9320 	case CDP_CONFIG_NAWDS:
9321 		peer->nawds_enabled = val.cdp_peer_param_nawds;
9322 		break;
9323 	case CDP_CONFIG_NAC:
9324 		peer->nac = !!(val.cdp_peer_param_nac);
9325 		break;
9326 	case CDP_CONFIG_ISOLATION:
9327 		dp_set_peer_isolation(peer, val.cdp_peer_param_isolation);
9328 		break;
9329 	case CDP_CONFIG_IN_TWT:
9330 		peer->in_twt = !!(val.cdp_peer_param_in_twt);
9331 		break;
9332 	default:
9333 		break;
9334 	}
9335 
9336 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9337 
9338 	return QDF_STATUS_SUCCESS;
9339 }
9340 
9341 /*
9342  * dp_get_pdev_param: function to get parameters from pdev
9343  * @cdp_soc: DP soc handle
9344  * @pdev_id: id of pdev handle
9345  * @param: parameter type to be get
9346  * @value : buffer for value
9347  *
9348  * Return: status
9349  */
9350 static QDF_STATUS dp_get_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
9351 				    enum cdp_pdev_param_type param,
9352 				    cdp_config_param_type *val)
9353 {
9354 	struct cdp_pdev *pdev = (struct cdp_pdev *)
9355 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
9356 						   pdev_id);
9357 	if (!pdev)
9358 		return QDF_STATUS_E_FAILURE;
9359 
9360 	switch (param) {
9361 	case CDP_CONFIG_VOW:
9362 		val->cdp_pdev_param_cfg_vow =
9363 				((struct dp_pdev *)pdev)->delay_stats_flag;
9364 		break;
9365 	case CDP_TX_PENDING:
9366 		val->cdp_pdev_param_tx_pending = dp_get_tx_pending(pdev);
9367 		break;
9368 	case CDP_FILTER_MCAST_DATA:
9369 		val->cdp_pdev_param_fltr_mcast =
9370 					dp_pdev_get_filter_mcast_data(pdev);
9371 		break;
9372 	case CDP_FILTER_NO_DATA:
9373 		val->cdp_pdev_param_fltr_none =
9374 					dp_pdev_get_filter_non_data(pdev);
9375 		break;
9376 	case CDP_FILTER_UCAST_DATA:
9377 		val->cdp_pdev_param_fltr_ucast =
9378 					dp_pdev_get_filter_ucast_data(pdev);
9379 		break;
9380 	default:
9381 		return QDF_STATUS_E_FAILURE;
9382 	}
9383 
9384 	return QDF_STATUS_SUCCESS;
9385 }
9386 
9387 /*
9388  * dp_set_pdev_param: function to set parameters in pdev
9389  * @cdp_soc: DP soc handle
9390  * @pdev_id: id of pdev handle
9391  * @param: parameter type to be set
9392  * @val: value of parameter to be set
9393  *
9394  * Return: 0 for success. nonzero for failure.
9395  */
9396 static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
9397 				    enum cdp_pdev_param_type param,
9398 				    cdp_config_param_type val)
9399 {
9400 	int target_type;
9401 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
9402 	struct dp_pdev *pdev =
9403 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
9404 						   pdev_id);
9405 	if (!pdev)
9406 		return QDF_STATUS_E_FAILURE;
9407 
9408 	target_type = hal_get_target_type(soc->hal_soc);
9409 	switch (target_type) {
9410 	case TARGET_TYPE_QCA6750:
9411 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_5G_LMAC_ID;
9412 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_5G_LMAC_ID;
9413 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_6G_LMAC_ID;
9414 		break;
9415 	default:
9416 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_2G_LMAC_ID;
9417 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_5G_LMAC_ID;
9418 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_6G_LMAC_ID;
9419 		break;
9420 	}
9421 
9422 	switch (param) {
9423 	case CDP_CONFIG_TX_CAPTURE:
9424 		return dp_config_debug_sniffer(pdev,
9425 					       val.cdp_pdev_param_tx_capture);
9426 	case CDP_CONFIG_DEBUG_SNIFFER:
9427 		return dp_config_debug_sniffer(pdev,
9428 					       val.cdp_pdev_param_dbg_snf);
9429 	case CDP_CONFIG_BPR_ENABLE:
9430 		return dp_set_bpr_enable(pdev, val.cdp_pdev_param_bpr_enable);
9431 	case CDP_CONFIG_PRIMARY_RADIO:
9432 		pdev->is_primary = val.cdp_pdev_param_primary_radio;
9433 		break;
9434 	case CDP_CONFIG_CAPTURE_LATENCY:
9435 		pdev->latency_capture_enable = val.cdp_pdev_param_cptr_latcy;
9436 		break;
9437 	case CDP_INGRESS_STATS:
9438 		dp_pdev_tid_stats_ingress_inc(pdev,
9439 					      val.cdp_pdev_param_ingrs_stats);
9440 		break;
9441 	case CDP_OSIF_DROP:
9442 		dp_pdev_tid_stats_osif_drop(pdev,
9443 					    val.cdp_pdev_param_osif_drop);
9444 		break;
9445 	case CDP_CONFIG_ENH_RX_CAPTURE:
9446 		return dp_config_enh_rx_capture(pdev,
9447 						val.cdp_pdev_param_en_rx_cap);
9448 	case CDP_CONFIG_ENH_TX_CAPTURE:
9449 		return dp_config_enh_tx_capture(pdev,
9450 						val.cdp_pdev_param_en_tx_cap);
9451 	case CDP_CONFIG_HMMC_TID_OVERRIDE:
9452 		pdev->hmmc_tid_override_en = val.cdp_pdev_param_hmmc_tid_ovrd;
9453 		break;
9454 	case CDP_CONFIG_HMMC_TID_VALUE:
9455 		pdev->hmmc_tid = val.cdp_pdev_param_hmmc_tid;
9456 		break;
9457 	case CDP_CHAN_NOISE_FLOOR:
9458 		pdev->chan_noise_floor = val.cdp_pdev_param_chn_noise_flr;
9459 		break;
9460 	case CDP_TIDMAP_PRTY:
9461 		dp_set_pdev_tidmap_prty_wifi3(pdev,
9462 					      val.cdp_pdev_param_tidmap_prty);
9463 		break;
9464 	case CDP_FILTER_NEIGH_PEERS:
9465 		dp_set_filter_neigh_peers(pdev,
9466 					  val.cdp_pdev_param_fltr_neigh_peers);
9467 		break;
9468 	case CDP_MONITOR_CHANNEL:
9469 		pdev->mon_chan_num = val.cdp_pdev_param_monitor_chan;
9470 		break;
9471 	case CDP_MONITOR_FREQUENCY:
9472 		pdev->mon_chan_freq = val.cdp_pdev_param_mon_freq;
9473 		pdev->mon_chan_band =
9474 				wlan_reg_freq_to_band(pdev->mon_chan_freq);
9475 		break;
9476 	case CDP_CONFIG_BSS_COLOR:
9477 		dp_mon_set_bsscolor(pdev, val.cdp_pdev_param_bss_color);
9478 		break;
9479 	case CDP_SET_ATF_STATS_ENABLE:
9480 		dp_set_atf_stats_enable(pdev,
9481 					val.cdp_pdev_param_atf_stats_enable);
9482 		break;
9483 	case CDP_CONFIG_SPECIAL_VAP:
9484 		dp_vdev_set_monitor_mode_buf_rings(pdev);
9485 		break;
9486 	default:
9487 		return QDF_STATUS_E_INVAL;
9488 	}
9489 	return QDF_STATUS_SUCCESS;
9490 }
9491 
9492 #ifdef QCA_PEER_EXT_STATS
9493 static void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
9494 					  qdf_nbuf_t nbuf)
9495 {
9496 	struct dp_peer *peer = NULL;
9497 	uint16_t peer_id, ring_id;
9498 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
9499 	struct cdp_peer_ext_stats *pext_stats = NULL;
9500 
9501 	peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
9502 	if (peer_id > soc->max_peers)
9503 		return;
9504 
9505 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
9506 	if (qdf_unlikely(!peer))
9507 		return;
9508 
9509 	if (qdf_likely(peer->pext_stats)) {
9510 		pext_stats = peer->pext_stats;
9511 		ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
9512 		dp_rx_compute_tid_delay(&pext_stats->delay_stats[tid][ring_id],
9513 					nbuf);
9514 	}
9515 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9516 }
9517 #else
9518 static inline void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
9519 						 qdf_nbuf_t nbuf)
9520 {
9521 }
9522 #endif
9523 
9524 /*
9525  * dp_calculate_delay_stats: function to get rx delay stats
9526  * @cdp_soc: DP soc handle
9527  * @vdev_id: id of DP vdev handle
9528  * @nbuf: skb
9529  *
9530  * Return: QDF_STATUS
9531  */
9532 static QDF_STATUS
9533 dp_calculate_delay_stats(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
9534 			 qdf_nbuf_t nbuf)
9535 {
9536 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
9537 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9538 						     DP_MOD_ID_CDP);
9539 
9540 	if (!vdev)
9541 		return QDF_STATUS_SUCCESS;
9542 
9543 	if (vdev->pdev->delay_stats_flag)
9544 		dp_rx_compute_delay(vdev, nbuf);
9545 	else
9546 		dp_rx_update_peer_delay_stats(soc, nbuf);
9547 
9548 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9549 	return QDF_STATUS_SUCCESS;
9550 }
9551 
9552 /*
9553  * dp_get_vdev_param: function to get parameters from vdev
9554  * @cdp_soc : DP soc handle
9555  * @vdev_id: id of DP vdev handle
9556  * @param: parameter type to get value
9557  * @val: buffer address
9558  *
9559  * return: status
9560  */
9561 static QDF_STATUS dp_get_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
9562 				    enum cdp_vdev_param_type param,
9563 				    cdp_config_param_type *val)
9564 {
9565 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
9566 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9567 						     DP_MOD_ID_CDP);
9568 
9569 	if (!vdev)
9570 		return QDF_STATUS_E_FAILURE;
9571 
9572 	switch (param) {
9573 	case CDP_ENABLE_WDS:
9574 		val->cdp_vdev_param_wds = vdev->wds_enabled;
9575 		break;
9576 	case CDP_ENABLE_MEC:
9577 		val->cdp_vdev_param_mec = vdev->mec_enabled;
9578 		break;
9579 	case CDP_ENABLE_DA_WAR:
9580 		val->cdp_vdev_param_da_war = vdev->pdev->soc->da_war_enabled;
9581 		break;
9582 	case CDP_ENABLE_IGMP_MCAST_EN:
9583 		val->cdp_vdev_param_igmp_mcast_en = vdev->igmp_mcast_enhanc_en;
9584 		break;
9585 	case CDP_ENABLE_MCAST_EN:
9586 		val->cdp_vdev_param_mcast_en = vdev->mcast_enhancement_en;
9587 		break;
9588 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
9589 		val->cdp_vdev_param_hlos_tid_override =
9590 			    dp_vdev_get_hlos_tid_override((struct cdp_vdev *)vdev);
9591 		break;
9592 	case CDP_ENABLE_PEER_AUTHORIZE:
9593 		val->cdp_vdev_param_peer_authorize =
9594 			    vdev->peer_authorize;
9595 		break;
9596 #ifdef WLAN_SUPPORT_MESH_LATENCY
9597 	case CDP_ENABLE_PEER_TID_LATENCY:
9598 		val->cdp_vdev_param_peer_tid_latency_enable =
9599 			vdev->peer_tid_latency_enabled;
9600 		break;
9601 	case CDP_SET_VAP_MESH_TID:
9602 		val->cdp_vdev_param_mesh_tid =
9603 				vdev->mesh_tid_latency_config.latency_tid;
9604 		break;
9605 #endif
9606 	default:
9607 		dp_cdp_err("%pk: param value %d is wrong\n",
9608 			   soc, param);
9609 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9610 		return QDF_STATUS_E_FAILURE;
9611 	}
9612 
9613 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9614 	return QDF_STATUS_SUCCESS;
9615 }
9616 
9617 /*
9618  * dp_set_vdev_param: function to set parameters in vdev
9619  * @cdp_soc : DP soc handle
9620  * @vdev_id: id of DP vdev handle
9621  * @param: parameter type to get value
9622  * @val: value
9623  *
9624  * return: QDF_STATUS
9625  */
9626 static QDF_STATUS
9627 dp_set_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
9628 		  enum cdp_vdev_param_type param, cdp_config_param_type val)
9629 {
9630 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
9631 	struct dp_vdev *vdev =
9632 		dp_vdev_get_ref_by_id(dsoc, vdev_id, DP_MOD_ID_CDP);
9633 	uint32_t var = 0;
9634 
9635 	if (!vdev)
9636 		return QDF_STATUS_E_FAILURE;
9637 
9638 	switch (param) {
9639 	case CDP_ENABLE_WDS:
9640 		dp_cdp_err("%pK: wds_enable %d for vdev(%pK) id(%d)\n",
9641 			   dsoc, val.cdp_vdev_param_wds, vdev, vdev->vdev_id);
9642 		vdev->wds_enabled = val.cdp_vdev_param_wds;
9643 		break;
9644 	case CDP_ENABLE_MEC:
9645 		dp_cdp_err("%pK: mec_enable %d for vdev(%pK) id(%d)\n",
9646 			   dsoc, val.cdp_vdev_param_mec, vdev, vdev->vdev_id);
9647 		vdev->mec_enabled = val.cdp_vdev_param_mec;
9648 		break;
9649 	case CDP_ENABLE_DA_WAR:
9650 		dp_cdp_err("%pK: da_war_enable %d for vdev(%pK) id(%d)\n",
9651 			   dsoc, val.cdp_vdev_param_da_war, vdev, vdev->vdev_id);
9652 		vdev->pdev->soc->da_war_enabled = val.cdp_vdev_param_da_war;
9653 		dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
9654 					     vdev->pdev->soc));
9655 		break;
9656 	case CDP_ENABLE_NAWDS:
9657 		vdev->nawds_enabled = val.cdp_vdev_param_nawds;
9658 		break;
9659 	case CDP_ENABLE_MCAST_EN:
9660 		vdev->mcast_enhancement_en = val.cdp_vdev_param_mcast_en;
9661 		break;
9662 	case CDP_ENABLE_IGMP_MCAST_EN:
9663 		vdev->igmp_mcast_enhanc_en = val.cdp_vdev_param_igmp_mcast_en;
9664 		break;
9665 	case CDP_ENABLE_PROXYSTA:
9666 		vdev->proxysta_vdev = val.cdp_vdev_param_proxysta;
9667 		break;
9668 	case CDP_UPDATE_TDLS_FLAGS:
9669 		vdev->tdls_link_connected = val.cdp_vdev_param_tdls_flags;
9670 		break;
9671 	case CDP_CFG_WDS_AGING_TIMER:
9672 		var = val.cdp_vdev_param_aging_tmr;
9673 		if (!var)
9674 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
9675 		else if (var != vdev->wds_aging_timer_val)
9676 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, var);
9677 
9678 		vdev->wds_aging_timer_val = var;
9679 		break;
9680 	case CDP_ENABLE_AP_BRIDGE:
9681 		if (wlan_op_mode_sta != vdev->opmode)
9682 			vdev->ap_bridge_enabled = val.cdp_vdev_param_ap_brdg_en;
9683 		else
9684 			vdev->ap_bridge_enabled = false;
9685 		break;
9686 	case CDP_ENABLE_CIPHER:
9687 		vdev->sec_type = val.cdp_vdev_param_cipher_en;
9688 		break;
9689 	case CDP_ENABLE_QWRAP_ISOLATION:
9690 		vdev->isolation_vdev = val.cdp_vdev_param_qwrap_isolation;
9691 		break;
9692 	case CDP_UPDATE_MULTIPASS:
9693 		vdev->multipass_en = val.cdp_vdev_param_update_multipass;
9694 		break;
9695 	case CDP_TX_ENCAP_TYPE:
9696 		vdev->tx_encap_type = val.cdp_vdev_param_tx_encap;
9697 		break;
9698 	case CDP_RX_DECAP_TYPE:
9699 		vdev->rx_decap_type = val.cdp_vdev_param_rx_decap;
9700 		break;
9701 	case CDP_TID_VDEV_PRTY:
9702 		vdev->tidmap_prty = val.cdp_vdev_param_tidmap_prty;
9703 		break;
9704 	case CDP_TIDMAP_TBL_ID:
9705 		vdev->tidmap_tbl_id = val.cdp_vdev_param_tidmap_tbl_id;
9706 		break;
9707 #ifdef MESH_MODE_SUPPORT
9708 	case CDP_MESH_RX_FILTER:
9709 		dp_vdev_set_mesh_rx_filter((struct cdp_vdev *)vdev,
9710 					   val.cdp_vdev_param_mesh_rx_filter);
9711 		break;
9712 	case CDP_MESH_MODE:
9713 		dp_vdev_set_mesh_mode((struct cdp_vdev *)vdev,
9714 				      val.cdp_vdev_param_mesh_mode);
9715 		break;
9716 #endif
9717 	case CDP_ENABLE_CSUM:
9718 		dp_info("vdev_id %d enable Checksum %d", vdev_id,
9719 			val.cdp_enable_tx_checksum);
9720 		vdev->csum_enabled = val.cdp_enable_tx_checksum;
9721 		break;
9722 	case CDP_ENABLE_HLOS_TID_OVERRIDE:
9723 		dp_info("vdev_id %d enable hlod tid override %d", vdev_id,
9724 			val.cdp_vdev_param_hlos_tid_override);
9725 		dp_vdev_set_hlos_tid_override(vdev,
9726 				val.cdp_vdev_param_hlos_tid_override);
9727 		break;
9728 #ifdef QCA_SUPPORT_WDS_EXTENDED
9729 	case CDP_CFG_WDS_EXT:
9730 		vdev->wds_ext_enabled = val.cdp_vdev_param_wds_ext;
9731 		break;
9732 #endif
9733 	case CDP_ENABLE_PEER_AUTHORIZE:
9734 		vdev->peer_authorize = val.cdp_vdev_param_peer_authorize;
9735 		break;
9736 #ifdef WLAN_SUPPORT_MESH_LATENCY
9737 	case CDP_ENABLE_PEER_TID_LATENCY:
9738 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
9739 			val.cdp_vdev_param_peer_tid_latency_enable);
9740 		vdev->peer_tid_latency_enabled =
9741 			val.cdp_vdev_param_peer_tid_latency_enable;
9742 		break;
9743 	case CDP_SET_VAP_MESH_TID:
9744 		dp_info("vdev_id %d enable peer tid latency %d", vdev_id,
9745 			val.cdp_vdev_param_mesh_tid);
9746 		vdev->mesh_tid_latency_config.latency_tid
9747 				= val.cdp_vdev_param_mesh_tid;
9748 		break;
9749 #endif
9750 	default:
9751 		break;
9752 	}
9753 
9754 	dp_tx_vdev_update_search_flags((struct dp_vdev *)vdev);
9755 	dp_vdev_unref_delete(dsoc, vdev, DP_MOD_ID_CDP);
9756 
9757 	return QDF_STATUS_SUCCESS;
9758 }
9759 
9760 /*
9761  * dp_set_psoc_param: function to set parameters in psoc
9762  * @cdp_soc : DP soc handle
9763  * @param: parameter type to be set
9764  * @val: value of parameter to be set
9765  *
9766  * return: QDF_STATUS
9767  */
9768 static QDF_STATUS
9769 dp_set_psoc_param(struct cdp_soc_t *cdp_soc,
9770 		  enum cdp_psoc_param_type param, cdp_config_param_type val)
9771 {
9772 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
9773 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->wlan_cfg_ctx;
9774 
9775 	switch (param) {
9776 	case CDP_ENABLE_RATE_STATS:
9777 		soc->rdkstats_enabled = val.cdp_psoc_param_en_rate_stats;
9778 		break;
9779 	case CDP_SET_NSS_CFG:
9780 		wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx,
9781 					    val.cdp_psoc_param_en_nss_cfg);
9782 		/*
9783 		 * TODO: masked out based on the per offloaded radio
9784 		 */
9785 		switch (val.cdp_psoc_param_en_nss_cfg) {
9786 		case dp_nss_cfg_default:
9787 			break;
9788 		case dp_nss_cfg_first_radio:
9789 		/*
9790 		 * This configuration is valid for single band radio which
9791 		 * is also NSS offload.
9792 		 */
9793 		case dp_nss_cfg_dbdc:
9794 		case dp_nss_cfg_dbtc:
9795 			wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
9796 			wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
9797 			wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
9798 			wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
9799 			break;
9800 		default:
9801 			dp_cdp_err("%pK: Invalid offload config %d",
9802 				   soc, val.cdp_psoc_param_en_nss_cfg);
9803 		}
9804 
9805 			dp_cdp_err("%pK: nss-wifi<0> nss config is enabled"
9806 				   , soc);
9807 		break;
9808 	case CDP_SET_PREFERRED_HW_MODE:
9809 		soc->preferred_hw_mode = val.cdp_psoc_param_preferred_hw_mode;
9810 		break;
9811 	default:
9812 		break;
9813 	}
9814 
9815 	return QDF_STATUS_SUCCESS;
9816 }
9817 
9818 /*
9819  * dp_get_psoc_param: function to get parameters in soc
9820  * @cdp_soc : DP soc handle
9821  * @param: parameter type to be set
9822  * @val: address of buffer
9823  *
9824  * return: status
9825  */
9826 static QDF_STATUS dp_get_psoc_param(struct cdp_soc_t *cdp_soc,
9827 				    enum cdp_psoc_param_type param,
9828 				    cdp_config_param_type *val)
9829 {
9830 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
9831 
9832 	if (!soc)
9833 		return QDF_STATUS_E_FAILURE;
9834 
9835 	switch (param) {
9836 	case CDP_CFG_PEER_EXT_STATS:
9837 		val->cdp_psoc_param_pext_stats =
9838 			wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx);
9839 		break;
9840 	default:
9841 		dp_warn("Invalid param");
9842 		break;
9843 	}
9844 
9845 	return QDF_STATUS_SUCCESS;
9846 }
9847 
9848 /**
9849  * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer
9850  * @soc: DP_SOC handle
9851  * @pdev_id: id of DP_PDEV handle
9852  * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
9853  * @is_tx_pkt_cap_enable: enable/disable/delete/print
9854  * Tx packet capture in monitor mode
9855  * @peer_mac: MAC address for which the above need to be enabled/disabled
9856  *
9857  * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
9858  */
9859 QDF_STATUS
9860 dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
9861 				  uint8_t pdev_id,
9862 				  bool is_rx_pkt_cap_enable,
9863 				  uint8_t is_tx_pkt_cap_enable,
9864 				  uint8_t *peer_mac)
9865 {
9866 	struct dp_peer *peer;
9867 	QDF_STATUS status;
9868 	struct dp_pdev *pdev =
9869 			dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9870 							   pdev_id);
9871 	if (!pdev)
9872 		return QDF_STATUS_E_FAILURE;
9873 
9874 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
9875 				      peer_mac, 0, DP_VDEV_ALL,
9876 				      DP_MOD_ID_CDP);
9877 	if (!peer)
9878 		return QDF_STATUS_E_FAILURE;
9879 
9880 	/* we need to set tx pkt capture for non associated peer */
9881 	status = dp_peer_set_tx_capture_enabled(pdev, peer,
9882 						is_tx_pkt_cap_enable,
9883 						peer_mac);
9884 
9885 	status = dp_peer_set_rx_capture_enabled(pdev, peer,
9886 						is_rx_pkt_cap_enable,
9887 						peer_mac);
9888 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9889 
9890 	return status;
9891 }
9892 
9893 /*
9894  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
9895  * @soc: DP_SOC handle
9896  * @vdev_id: id of DP_VDEV handle
9897  * @map_id:ID of map that needs to be updated
9898  *
9899  * Return: QDF_STATUS
9900  */
9901 static QDF_STATUS dp_set_vdev_dscp_tid_map_wifi3(ol_txrx_soc_handle cdp_soc,
9902 						 uint8_t vdev_id,
9903 						 uint8_t map_id)
9904 {
9905 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
9906 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9907 						     DP_MOD_ID_CDP);
9908 	if (vdev) {
9909 		vdev->dscp_tid_map_id = map_id;
9910 		/* Updatr flag for transmit tid classification */
9911 		if (vdev->dscp_tid_map_id < soc->num_hw_dscp_tid_map)
9912 			vdev->skip_sw_tid_classification |=
9913 				DP_TX_HW_DSCP_TID_MAP_VALID;
9914 		else
9915 			vdev->skip_sw_tid_classification &=
9916 				~DP_TX_HW_DSCP_TID_MAP_VALID;
9917 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9918 		return QDF_STATUS_SUCCESS;
9919 	}
9920 
9921 	return QDF_STATUS_E_FAILURE;
9922 }
9923 
9924 #ifdef DP_RATETABLE_SUPPORT
9925 static int dp_txrx_get_ratekbps(int preamb, int mcs,
9926 				int htflag, int gintval)
9927 {
9928 	uint32_t rix;
9929 	uint16_t ratecode;
9930 
9931 	return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
9932 			       (uint8_t)preamb, 1, &rix, &ratecode);
9933 }
9934 #else
9935 static int dp_txrx_get_ratekbps(int preamb, int mcs,
9936 				int htflag, int gintval)
9937 {
9938 	return 0;
9939 }
9940 #endif
9941 
9942 /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
9943  * @soc: DP soc handle
9944  * @pdev_id: id of DP pdev handle
9945  * @pdev_stats: buffer to copy to
9946  *
9947  * return : status success/failure
9948  */
9949 static QDF_STATUS
9950 dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
9951 		       struct cdp_pdev_stats *pdev_stats)
9952 {
9953 	struct dp_pdev *pdev =
9954 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9955 						   pdev_id);
9956 	if (!pdev)
9957 		return QDF_STATUS_E_FAILURE;
9958 
9959 	dp_aggregate_pdev_stats(pdev);
9960 
9961 	qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats));
9962 	return QDF_STATUS_SUCCESS;
9963 }
9964 
9965 /* dp_txrx_update_vdev_me_stats(): Update vdev ME stats sent from CDP
9966  * @vdev: DP vdev handle
9967  * @buf: buffer containing specific stats structure
9968  *
9969  * Returns: void
9970  */
9971 static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev,
9972 					 void *buf)
9973 {
9974 	struct cdp_tx_ingress_stats *host_stats = NULL;
9975 
9976 	if (!buf) {
9977 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
9978 		return;
9979 	}
9980 	host_stats = (struct cdp_tx_ingress_stats *)buf;
9981 
9982 	DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt,
9983 			 host_stats->mcast_en.mcast_pkt.num,
9984 			 host_stats->mcast_en.mcast_pkt.bytes);
9985 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error,
9986 		     host_stats->mcast_en.dropped_map_error);
9987 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac,
9988 		     host_stats->mcast_en.dropped_self_mac);
9989 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail,
9990 		     host_stats->mcast_en.dropped_send_fail);
9991 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast,
9992 		     host_stats->mcast_en.ucast);
9993 	DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc,
9994 		     host_stats->mcast_en.fail_seg_alloc);
9995 	DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail,
9996 		     host_stats->mcast_en.clone_fail);
9997 }
9998 
9999 /* dp_txrx_update_vdev_igmp_me_stats(): Update vdev IGMP ME stats sent from CDP
10000  * @vdev: DP vdev handle
10001  * @buf: buffer containing specific stats structure
10002  *
10003  * Returns: void
10004  */
10005 static void dp_txrx_update_vdev_igmp_me_stats(struct dp_vdev *vdev,
10006 					      void *buf)
10007 {
10008 	struct cdp_tx_ingress_stats *host_stats = NULL;
10009 
10010 	if (!buf) {
10011 		dp_cdp_err("%pK: Invalid host stats buf", vdev->pdev->soc);
10012 		return;
10013 	}
10014 	host_stats = (struct cdp_tx_ingress_stats *)buf;
10015 
10016 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_rcvd,
10017 		     host_stats->igmp_mcast_en.igmp_rcvd);
10018 	DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_ucast_converted,
10019 		     host_stats->igmp_mcast_en.igmp_ucast_converted);
10020 }
10021 
10022 /* dp_txrx_update_vdev_host_stats(): Update stats sent through CDP
10023  * @soc: DP soc handle
10024  * @vdev_id: id of DP vdev handle
10025  * @buf: buffer containing specific stats structure
10026  * @stats_id: stats type
10027  *
10028  * Returns: QDF_STATUS
10029  */
10030 static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc_hdl,
10031 						 uint8_t vdev_id,
10032 						 void *buf,
10033 						 uint16_t stats_id)
10034 {
10035 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10036 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10037 						     DP_MOD_ID_CDP);
10038 
10039 	if (!vdev) {
10040 		dp_cdp_err("%pK: Invalid vdev handle", soc);
10041 		return QDF_STATUS_E_FAILURE;
10042 	}
10043 
10044 	switch (stats_id) {
10045 	case DP_VDEV_STATS_PKT_CNT_ONLY:
10046 		break;
10047 	case DP_VDEV_STATS_TX_ME:
10048 		dp_txrx_update_vdev_me_stats(vdev, buf);
10049 		dp_txrx_update_vdev_igmp_me_stats(vdev, buf);
10050 		break;
10051 	default:
10052 		qdf_info("Invalid stats_id %d", stats_id);
10053 		break;
10054 	}
10055 
10056 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10057 	return QDF_STATUS_SUCCESS;
10058 }
10059 
10060 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
10061  * @soc: soc handle
10062  * @vdev_id: id of vdev handle
10063  * @peer_mac: mac of DP_PEER handle
10064  * @peer_stats: buffer to copy to
10065  * return : status success/failure
10066  */
10067 static QDF_STATUS
10068 dp_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
10069 		       uint8_t *peer_mac, struct cdp_peer_stats *peer_stats)
10070 {
10071 	QDF_STATUS status = QDF_STATUS_SUCCESS;
10072 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
10073 						       peer_mac, 0, vdev_id,
10074 						       DP_MOD_ID_CDP);
10075 
10076 	if (!peer)
10077 		return QDF_STATUS_E_FAILURE;
10078 
10079 	qdf_mem_copy(peer_stats, &peer->stats,
10080 		     sizeof(struct cdp_peer_stats));
10081 
10082 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10083 
10084 	return status;
10085 }
10086 
10087 /* dp_txrx_get_peer_stats_param - will return specified cdp_peer_stats
10088  * @param soc - soc handle
10089  * @param vdev_id - vdev_id of vdev object
10090  * @param peer_mac - mac address of the peer
10091  * @param type - enum of required stats
10092  * @param buf - buffer to hold the value
10093  * return : status success/failure
10094  */
10095 static QDF_STATUS
10096 dp_txrx_get_peer_stats_param(struct cdp_soc_t *soc, uint8_t vdev_id,
10097 			     uint8_t *peer_mac, enum cdp_peer_stats_type type,
10098 			     cdp_peer_stats_param_t *buf)
10099 {
10100 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
10101 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
10102 						      peer_mac, 0, vdev_id,
10103 						      DP_MOD_ID_CDP);
10104 
10105 	if (!peer) {
10106 		dp_peer_err("%pK: Invalid Peer for Mac " QDF_MAC_ADDR_FMT,
10107 			    soc, QDF_MAC_ADDR_REF(peer_mac));
10108 		return QDF_STATUS_E_FAILURE;
10109 	} else if (type < cdp_peer_stats_max) {
10110 		switch (type) {
10111 		case cdp_peer_tx_ucast:
10112 			buf->tx_ucast = peer->stats.tx.ucast;
10113 			break;
10114 		case cdp_peer_tx_mcast:
10115 			buf->tx_mcast = peer->stats.tx.mcast;
10116 			break;
10117 		case cdp_peer_tx_rate:
10118 			buf->tx_rate = peer->stats.tx.tx_rate;
10119 			break;
10120 		case cdp_peer_tx_last_tx_rate:
10121 			buf->last_tx_rate = peer->stats.tx.last_tx_rate;
10122 			break;
10123 		case cdp_peer_tx_inactive_time:
10124 			buf->tx_inactive_time = peer->stats.tx.inactive_time;
10125 			break;
10126 		case cdp_peer_tx_ratecode:
10127 			buf->tx_ratecode = peer->stats.tx.tx_ratecode;
10128 			break;
10129 		case cdp_peer_tx_flags:
10130 			buf->tx_flags = peer->stats.tx.tx_flags;
10131 			break;
10132 		case cdp_peer_tx_power:
10133 			buf->tx_power = peer->stats.tx.tx_power;
10134 			break;
10135 		case cdp_peer_rx_rate:
10136 			buf->rx_rate = peer->stats.rx.rx_rate;
10137 			break;
10138 		case cdp_peer_rx_last_rx_rate:
10139 			buf->last_rx_rate = peer->stats.rx.last_rx_rate;
10140 			break;
10141 		case cdp_peer_rx_ratecode:
10142 			buf->rx_ratecode = peer->stats.rx.rx_ratecode;
10143 			break;
10144 		case cdp_peer_rx_ucast:
10145 			buf->rx_ucast = peer->stats.rx.unicast;
10146 			break;
10147 		case cdp_peer_rx_flags:
10148 			buf->rx_flags = peer->stats.rx.rx_flags;
10149 			break;
10150 		case cdp_peer_rx_avg_snr:
10151 			buf->rx_avg_snr = peer->stats.rx.avg_snr;
10152 			break;
10153 		default:
10154 			dp_peer_err("%pK: Invalid value", soc);
10155 			ret = QDF_STATUS_E_FAILURE;
10156 			break;
10157 		}
10158 	} else {
10159 		dp_peer_err("%pK: Invalid value", soc);
10160 		ret = QDF_STATUS_E_FAILURE;
10161 	}
10162 
10163 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10164 
10165 	return ret;
10166 }
10167 
10168 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
10169  * @soc: soc handle
10170  * @vdev_id: id of vdev handle
10171  * @peer_mac: mac of DP_PEER handle
10172  *
10173  * return : QDF_STATUS
10174  */
10175 static QDF_STATUS
10176 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
10177 			 uint8_t *peer_mac)
10178 {
10179 	QDF_STATUS status = QDF_STATUS_SUCCESS;
10180 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
10181 						      peer_mac, 0, vdev_id,
10182 						      DP_MOD_ID_CDP);
10183 
10184 	if (!peer)
10185 		return QDF_STATUS_E_FAILURE;
10186 
10187 	qdf_mem_zero(&peer->stats, sizeof(peer->stats));
10188 
10189 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10190 
10191 	return status;
10192 }
10193 
10194 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
10195  * @vdev_handle: DP_VDEV handle
10196  * @buf: buffer for vdev stats
10197  *
10198  * return : int
10199  */
10200 static int dp_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
10201 				  void *buf, bool is_aggregate)
10202 {
10203 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10204 	struct cdp_vdev_stats *vdev_stats;
10205 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10206 						     DP_MOD_ID_CDP);
10207 
10208 	if (!vdev)
10209 		return 1;
10210 
10211 	vdev_stats = (struct cdp_vdev_stats *)buf;
10212 
10213 	if (is_aggregate) {
10214 		dp_aggregate_vdev_stats(vdev, buf);
10215 	} else {
10216 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
10217 	}
10218 
10219 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10220 	return 0;
10221 }
10222 
10223 /*
10224  * dp_get_total_per(): get total per
10225  * @soc: DP soc handle
10226  * @pdev_id: id of DP_PDEV handle
10227  *
10228  * Return: % error rate using retries per packet and success packets
10229  */
10230 static int dp_get_total_per(struct cdp_soc_t *soc, uint8_t pdev_id)
10231 {
10232 	struct dp_pdev *pdev =
10233 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10234 						   pdev_id);
10235 
10236 	if (!pdev)
10237 		return 0;
10238 
10239 	dp_aggregate_pdev_stats(pdev);
10240 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
10241 		return 0;
10242 	return ((pdev->stats.tx.retries * 100) /
10243 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
10244 }
10245 
10246 /*
10247  * dp_txrx_stats_publish(): publish pdev stats into a buffer
10248  * @soc: DP soc handle
10249  * @pdev_id: id of DP_PDEV handle
10250  * @buf: to hold pdev_stats
10251  *
10252  * Return: int
10253  */
10254 static int
10255 dp_txrx_stats_publish(struct cdp_soc_t *soc, uint8_t pdev_id,
10256 		      struct cdp_stats_extd *buf)
10257 {
10258 	struct cdp_txrx_stats_req req = {0,};
10259 	struct dp_pdev *pdev =
10260 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10261 						   pdev_id);
10262 
10263 	if (!pdev)
10264 		return TXRX_STATS_LEVEL_OFF;
10265 
10266 	dp_aggregate_pdev_stats(pdev);
10267 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
10268 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
10269 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
10270 				req.param1, req.param2, req.param3, 0,
10271 				req.cookie_val, 0);
10272 
10273 	msleep(DP_MAX_SLEEP_TIME);
10274 
10275 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
10276 	req.cookie_val = DBG_STATS_COOKIE_DP_STATS;
10277 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
10278 				req.param1, req.param2, req.param3, 0,
10279 				req.cookie_val, 0);
10280 
10281 	msleep(DP_MAX_SLEEP_TIME);
10282 	qdf_mem_copy(buf, &pdev->stats, sizeof(struct cdp_pdev_stats));
10283 
10284 	return TXRX_STATS_LEVEL;
10285 }
10286 
10287 /**
10288  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
10289  * @soc: soc handle
10290  * @pdev_id: id of DP_PDEV handle
10291  * @map_id: ID of map that needs to be updated
10292  * @tos: index value in map
10293  * @tid: tid value passed by the user
10294  *
10295  * Return: QDF_STATUS
10296  */
10297 static QDF_STATUS
10298 dp_set_pdev_dscp_tid_map_wifi3(struct cdp_soc_t *soc_handle,
10299 			       uint8_t pdev_id,
10300 			       uint8_t map_id,
10301 			       uint8_t tos, uint8_t tid)
10302 {
10303 	uint8_t dscp;
10304 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10305 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10306 
10307 	if (!pdev)
10308 		return QDF_STATUS_E_FAILURE;
10309 
10310 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
10311 	pdev->dscp_tid_map[map_id][dscp] = tid;
10312 
10313 	if (map_id < soc->num_hw_dscp_tid_map)
10314 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
10315 				       map_id, dscp);
10316 	else
10317 		return QDF_STATUS_E_FAILURE;
10318 
10319 	return QDF_STATUS_SUCCESS;
10320 }
10321 
10322 /**
10323  * dp_fw_stats_process(): Process TxRX FW stats request
10324  * @vdev_handle: DP VDEV handle
10325  * @req: stats request
10326  *
10327  * return: int
10328  */
10329 static int dp_fw_stats_process(struct dp_vdev *vdev,
10330 			       struct cdp_txrx_stats_req *req)
10331 {
10332 	struct dp_pdev *pdev = NULL;
10333 	uint32_t stats = req->stats;
10334 	uint8_t mac_id = req->mac_id;
10335 
10336 	if (!vdev) {
10337 		DP_TRACE(NONE, "VDEV not found");
10338 		return 1;
10339 	}
10340 	pdev = vdev->pdev;
10341 
10342 	/*
10343 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
10344 	 * from param0 to param3 according to below rule:
10345 	 *
10346 	 * PARAM:
10347 	 *   - config_param0 : start_offset (stats type)
10348 	 *   - config_param1 : stats bmask from start offset
10349 	 *   - config_param2 : stats bmask from start offset + 32
10350 	 *   - config_param3 : stats bmask from start offset + 64
10351 	 */
10352 	if (req->stats == CDP_TXRX_STATS_0) {
10353 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
10354 		req->param1 = 0xFFFFFFFF;
10355 		req->param2 = 0xFFFFFFFF;
10356 		req->param3 = 0xFFFFFFFF;
10357 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
10358 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
10359 	}
10360 
10361 	if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT) {
10362 		return dp_h2t_ext_stats_msg_send(pdev,
10363 				HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT,
10364 				req->param0, req->param1, req->param2,
10365 				req->param3, 0, DBG_STATS_COOKIE_DEFAULT,
10366 				mac_id);
10367 	} else {
10368 		return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
10369 				req->param1, req->param2, req->param3,
10370 				0, DBG_STATS_COOKIE_DEFAULT, mac_id);
10371 	}
10372 }
10373 
10374 /**
10375  * dp_txrx_stats_request - function to map to firmware and host stats
10376  * @soc: soc handle
10377  * @vdev_id: virtual device ID
10378  * @req: stats request
10379  *
10380  * Return: QDF_STATUS
10381  */
10382 static
10383 QDF_STATUS dp_txrx_stats_request(struct cdp_soc_t *soc_handle,
10384 				 uint8_t vdev_id,
10385 				 struct cdp_txrx_stats_req *req)
10386 {
10387 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_handle);
10388 	int host_stats;
10389 	int fw_stats;
10390 	enum cdp_stats stats;
10391 	int num_stats;
10392 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10393 						     DP_MOD_ID_CDP);
10394 	QDF_STATUS status = QDF_STATUS_E_INVAL;
10395 
10396 	if (!vdev || !req) {
10397 		dp_cdp_err("%pK: Invalid vdev/req instance", soc);
10398 		status = QDF_STATUS_E_INVAL;
10399 		goto fail0;
10400 	}
10401 
10402 	if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) {
10403 		dp_err("Invalid mac id request");
10404 		status = QDF_STATUS_E_INVAL;
10405 		goto fail0;
10406 	}
10407 
10408 	stats = req->stats;
10409 	if (stats >= CDP_TXRX_MAX_STATS) {
10410 		status = QDF_STATUS_E_INVAL;
10411 		goto fail0;
10412 	}
10413 
10414 	/*
10415 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
10416 	 *			has to be updated if new FW HTT stats added
10417 	 */
10418 	if (stats > CDP_TXRX_STATS_HTT_MAX)
10419 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
10420 
10421 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
10422 
10423 	if (stats >= num_stats) {
10424 		dp_cdp_err("%pK : Invalid stats option: %d", soc, stats);
10425 		status = QDF_STATUS_E_INVAL;
10426 		goto fail0;
10427 	}
10428 
10429 	req->stats = stats;
10430 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
10431 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
10432 
10433 	dp_info("stats: %u fw_stats_type: %d host_stats: %d",
10434 		stats, fw_stats, host_stats);
10435 
10436 	if (fw_stats != TXRX_FW_STATS_INVALID) {
10437 		/* update request with FW stats type */
10438 		req->stats = fw_stats;
10439 		status = dp_fw_stats_process(vdev, req);
10440 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
10441 			(host_stats <= TXRX_HOST_STATS_MAX))
10442 		status = dp_print_host_stats(vdev, req, soc);
10443 	else
10444 		dp_cdp_info("%pK: Wrong Input for TxRx Stats", soc);
10445 fail0:
10446 	if (vdev)
10447 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10448 	return status;
10449 }
10450 
10451 /*
10452  * dp_txrx_dump_stats() -  Dump statistics
10453  * @value - Statistics option
10454  */
10455 static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value,
10456 				     enum qdf_stats_verbosity_level level)
10457 {
10458 	struct dp_soc *soc =
10459 		(struct dp_soc *)psoc;
10460 	QDF_STATUS status = QDF_STATUS_SUCCESS;
10461 
10462 	if (!soc) {
10463 		dp_cdp_err("%pK: soc is NULL", soc);
10464 		return QDF_STATUS_E_INVAL;
10465 	}
10466 
10467 	switch (value) {
10468 	case CDP_TXRX_PATH_STATS:
10469 		dp_txrx_path_stats(soc);
10470 		dp_print_soc_interrupt_stats(soc);
10471 		hal_dump_reg_write_stats(soc->hal_soc);
10472 		break;
10473 
10474 	case CDP_RX_RING_STATS:
10475 		dp_print_per_ring_stats(soc);
10476 		break;
10477 
10478 	case CDP_TXRX_TSO_STATS:
10479 		dp_print_tso_stats(soc, level);
10480 		break;
10481 
10482 	case CDP_DUMP_TX_FLOW_POOL_INFO:
10483 		if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH)
10484 			cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
10485 		break;
10486 
10487 	case CDP_DP_NAPI_STATS:
10488 		dp_print_napi_stats(soc);
10489 		break;
10490 
10491 	case CDP_TXRX_DESC_STATS:
10492 		/* TODO: NOT IMPLEMENTED */
10493 		break;
10494 
10495 	case CDP_DP_RX_FISA_STATS:
10496 		dp_rx_dump_fisa_stats(soc);
10497 		break;
10498 
10499 	case CDP_DP_SWLM_STATS:
10500 		dp_print_swlm_stats(soc);
10501 		break;
10502 
10503 	default:
10504 		status = QDF_STATUS_E_INVAL;
10505 		break;
10506 	}
10507 
10508 	return status;
10509 
10510 }
10511 
10512 /**
10513  * dp_txrx_clear_dump_stats() - clear dumpStats
10514  * @soc- soc handle
10515  * @value - stats option
10516  *
10517  * Return: 0 - Success, non-zero - failure
10518  */
10519 static
10520 QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
10521 				    uint8_t value)
10522 {
10523 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10524 	QDF_STATUS status = QDF_STATUS_SUCCESS;
10525 
10526 	if (!soc) {
10527 		dp_err("soc is NULL");
10528 		return QDF_STATUS_E_INVAL;
10529 	}
10530 
10531 	switch (value) {
10532 	case CDP_TXRX_TSO_STATS:
10533 		dp_txrx_clear_tso_stats(soc);
10534 		break;
10535 
10536 	default:
10537 		status = QDF_STATUS_E_INVAL;
10538 		break;
10539 	}
10540 
10541 	return status;
10542 }
10543 
10544 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
10545 /**
10546  * dp_update_flow_control_parameters() - API to store datapath
10547  *                            config parameters
10548  * @soc: soc handle
10549  * @cfg: ini parameter handle
10550  *
10551  * Return: void
10552  */
10553 static inline
10554 void dp_update_flow_control_parameters(struct dp_soc *soc,
10555 				struct cdp_config_params *params)
10556 {
10557 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
10558 					params->tx_flow_stop_queue_threshold;
10559 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
10560 					params->tx_flow_start_queue_offset;
10561 }
10562 #else
10563 static inline
10564 void dp_update_flow_control_parameters(struct dp_soc *soc,
10565 				struct cdp_config_params *params)
10566 {
10567 }
10568 #endif
10569 
10570 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
10571 /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */
10572 #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024
10573 
10574 /* Max packet limit for RX REAP Loop (dp_rx_process) */
10575 #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024
10576 
10577 static
10578 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
10579 					struct cdp_config_params *params)
10580 {
10581 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit =
10582 				params->tx_comp_loop_pkt_limit;
10583 
10584 	if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX)
10585 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true;
10586 	else
10587 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false;
10588 
10589 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit =
10590 				params->rx_reap_loop_pkt_limit;
10591 
10592 	if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX)
10593 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = true;
10594 	else
10595 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = false;
10596 
10597 	soc->wlan_cfg_ctx->rx_hp_oos_update_limit =
10598 				params->rx_hp_oos_update_limit;
10599 
10600 	dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u",
10601 		soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit,
10602 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check,
10603 		soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit,
10604 		soc->wlan_cfg_ctx->rx_enable_eol_data_check,
10605 		soc->wlan_cfg_ctx->rx_hp_oos_update_limit);
10606 }
10607 
10608 static void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
10609 				      uint32_t rx_limit)
10610 {
10611 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit = tx_limit;
10612 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit = rx_limit;
10613 }
10614 
10615 #else
10616 static inline
10617 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
10618 					struct cdp_config_params *params)
10619 { }
10620 
10621 static inline
10622 void dp_update_soft_irq_limits(struct dp_soc *soc, uint32_t tx_limit,
10623 			       uint32_t rx_limit)
10624 {
10625 }
10626 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
10627 
10628 /**
10629  * dp_update_config_parameters() - API to store datapath
10630  *                            config parameters
10631  * @soc: soc handle
10632  * @cfg: ini parameter handle
10633  *
10634  * Return: status
10635  */
10636 static
10637 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
10638 				struct cdp_config_params *params)
10639 {
10640 	struct dp_soc *soc = (struct dp_soc *)psoc;
10641 
10642 	if (!(soc)) {
10643 		dp_cdp_err("%pK: Invalid handle", soc);
10644 		return QDF_STATUS_E_INVAL;
10645 	}
10646 
10647 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
10648 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
10649 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
10650 	soc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload =
10651 				params->p2p_tcp_udp_checksumoffload;
10652 	soc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload =
10653 				params->nan_tcp_udp_checksumoffload;
10654 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
10655 				params->tcp_udp_checksumoffload;
10656 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
10657 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
10658 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
10659 
10660 	dp_update_rx_soft_irq_limit_params(soc, params);
10661 	dp_update_flow_control_parameters(soc, params);
10662 
10663 	return QDF_STATUS_SUCCESS;
10664 }
10665 
10666 static struct cdp_wds_ops dp_ops_wds = {
10667 	.vdev_set_wds = dp_vdev_set_wds,
10668 #ifdef WDS_VENDOR_EXTENSION
10669 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
10670 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
10671 #endif
10672 };
10673 
10674 /*
10675  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
10676  * @soc_hdl - datapath soc handle
10677  * @vdev_id - virtual interface id
10678  * @callback - callback function
10679  * @ctxt: callback context
10680  *
10681  */
10682 static void
10683 dp_txrx_data_tx_cb_set(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
10684 		       ol_txrx_data_tx_cb callback, void *ctxt)
10685 {
10686 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10687 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10688 						     DP_MOD_ID_CDP);
10689 
10690 	if (!vdev)
10691 		return;
10692 
10693 	vdev->tx_non_std_data_callback.func = callback;
10694 	vdev->tx_non_std_data_callback.ctxt = ctxt;
10695 
10696 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10697 }
10698 
10699 /**
10700  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
10701  * @soc: datapath soc handle
10702  * @pdev_id: id of datapath pdev handle
10703  *
10704  * Return: opaque pointer to dp txrx handle
10705  */
10706 static void *dp_pdev_get_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id)
10707 {
10708 	struct dp_pdev *pdev =
10709 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10710 						   pdev_id);
10711 	if (qdf_unlikely(!pdev))
10712 		return NULL;
10713 
10714 	return pdev->dp_txrx_handle;
10715 }
10716 
10717 /**
10718  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
10719  * @soc: datapath soc handle
10720  * @pdev_id: id of datapath pdev handle
10721  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
10722  *
10723  * Return: void
10724  */
10725 static void
10726 dp_pdev_set_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id,
10727 			   void *dp_txrx_hdl)
10728 {
10729 	struct dp_pdev *pdev =
10730 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10731 						   pdev_id);
10732 
10733 	if (!pdev)
10734 		return;
10735 
10736 	pdev->dp_txrx_handle = dp_txrx_hdl;
10737 }
10738 
10739 /**
10740  * dp_vdev_get_dp_ext_handle() - get dp handle from vdev
10741  * @soc: datapath soc handle
10742  * @vdev_id: vdev id
10743  *
10744  * Return: opaque pointer to dp txrx handle
10745  */
10746 static void *dp_vdev_get_dp_ext_handle(ol_txrx_soc_handle soc_hdl,
10747 				       uint8_t vdev_id)
10748 {
10749 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10750 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10751 						     DP_MOD_ID_CDP);
10752 	void *dp_ext_handle;
10753 
10754 	if (!vdev)
10755 		return NULL;
10756 	dp_ext_handle = vdev->vdev_dp_ext_handle;
10757 
10758 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10759 	return dp_ext_handle;
10760 }
10761 
10762 /**
10763  * dp_vdev_set_dp_ext_handle() - set dp handle in vdev
10764  * @soc: datapath soc handle
10765  * @vdev_id: vdev id
10766  * @size: size of advance dp handle
10767  *
10768  * Return: QDF_STATUS
10769  */
10770 static QDF_STATUS
10771 dp_vdev_set_dp_ext_handle(ol_txrx_soc_handle soc_hdl, uint8_t vdev_id,
10772 			  uint16_t size)
10773 {
10774 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10775 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10776 						     DP_MOD_ID_CDP);
10777 	void *dp_ext_handle;
10778 
10779 	if (!vdev)
10780 		return QDF_STATUS_E_FAILURE;
10781 
10782 	dp_ext_handle = qdf_mem_malloc(size);
10783 
10784 	if (!dp_ext_handle) {
10785 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10786 		return QDF_STATUS_E_FAILURE;
10787 	}
10788 
10789 	vdev->vdev_dp_ext_handle = dp_ext_handle;
10790 
10791 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10792 	return QDF_STATUS_SUCCESS;
10793 }
10794 
10795 /**
10796  * dp_vdev_inform_ll_conn() - Inform vdev to add/delete a latency critical
10797  *			      connection for this vdev
10798  * @soc_hdl: CDP soc handle
10799  * @vdev_id: vdev ID
10800  * @action: Add/Delete action
10801  *
10802  * Returns: QDF_STATUS.
10803  */
10804 static QDF_STATUS
10805 dp_vdev_inform_ll_conn(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
10806 		       enum vdev_ll_conn_actions action)
10807 {
10808 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10809 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10810 						     DP_MOD_ID_CDP);
10811 
10812 	if (!vdev) {
10813 		dp_err("LL connection action for invalid vdev %d", vdev_id);
10814 		return QDF_STATUS_E_FAILURE;
10815 	}
10816 
10817 	switch (action) {
10818 	case CDP_VDEV_LL_CONN_ADD:
10819 		vdev->num_latency_critical_conn++;
10820 		break;
10821 
10822 	case CDP_VDEV_LL_CONN_DEL:
10823 		vdev->num_latency_critical_conn--;
10824 		break;
10825 
10826 	default:
10827 		dp_err("LL connection action invalid %d", action);
10828 		break;
10829 	}
10830 
10831 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10832 	return QDF_STATUS_SUCCESS;
10833 }
10834 
10835 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
10836 /**
10837  * dp_soc_set_swlm_enable() - Enable/Disable SWLM if initialized.
10838  * @soc_hdl: CDP Soc handle
10839  * @value: Enable/Disable value
10840  *
10841  * Returns: QDF_STATUS
10842  */
10843 static QDF_STATUS dp_soc_set_swlm_enable(struct cdp_soc_t *soc_hdl,
10844 					 uint8_t value)
10845 {
10846 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10847 
10848 	if (!soc->swlm.is_init) {
10849 		dp_err("SWLM is not initialized");
10850 		return QDF_STATUS_E_FAILURE;
10851 	}
10852 
10853 	soc->swlm.is_enabled = !!value;
10854 
10855 	return QDF_STATUS_SUCCESS;
10856 }
10857 
10858 /**
10859  * dp_soc_is_swlm_enabled() - Check if SWLM is enabled.
10860  * @soc_hdl: CDP Soc handle
10861  *
10862  * Returns: QDF_STATUS
10863  */
10864 static uint8_t dp_soc_is_swlm_enabled(struct cdp_soc_t *soc_hdl)
10865 {
10866 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10867 
10868 	return soc->swlm.is_enabled;
10869 }
10870 #endif
10871 
10872 /**
10873  * dp_display_srng_info() - Dump the srng HP TP info
10874  * @soc_hdl: CDP Soc handle
10875  *
10876  * This function dumps the SW hp/tp values for the important rings.
10877  * HW hp/tp values are not being dumped, since it can lead to
10878  * READ NOC error when UMAC is in low power state. MCC does not have
10879  * device force wake working yet.
10880  *
10881  * Return: none
10882  */
10883 static void dp_display_srng_info(struct cdp_soc_t *soc_hdl)
10884 {
10885 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10886 	hal_soc_handle_t hal_soc = soc->hal_soc;
10887 	uint32_t hp, tp, i;
10888 
10889 	dp_info("SRNG HP-TP data:");
10890 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
10891 		hal_get_sw_hptp(hal_soc, soc->tcl_data_ring[i].hal_srng,
10892 				&hp, &tp);
10893 		dp_info("TCL DATA ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
10894 
10895 		hal_get_sw_hptp(hal_soc, soc->tx_comp_ring[i].hal_srng,
10896 				&hp, &tp);
10897 		dp_info("TX comp ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
10898 	}
10899 
10900 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
10901 		hal_get_sw_hptp(hal_soc, soc->reo_dest_ring[i].hal_srng,
10902 				&hp, &tp);
10903 		dp_info("REO DST ring[%d]: hp=0x%x, tp=0x%x", i, hp, tp);
10904 	}
10905 
10906 	hal_get_sw_hptp(hal_soc, soc->reo_exception_ring.hal_srng, &hp, &tp);
10907 	dp_info("REO exception ring: hp=0x%x, tp=0x%x", hp, tp);
10908 
10909 	hal_get_sw_hptp(hal_soc, soc->rx_rel_ring.hal_srng, &hp, &tp);
10910 	dp_info("WBM RX release ring: hp=0x%x, tp=0x%x", hp, tp);
10911 
10912 	hal_get_sw_hptp(hal_soc, soc->wbm_desc_rel_ring.hal_srng, &hp, &tp);
10913 	dp_info("WBM desc release ring: hp=0x%x, tp=0x%x", hp, tp);
10914 }
10915 
10916 /**
10917  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
10918  * @soc_handle: datapath soc handle
10919  *
10920  * Return: opaque pointer to external dp (non-core DP)
10921  */
10922 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
10923 {
10924 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10925 
10926 	return soc->external_txrx_handle;
10927 }
10928 
10929 /**
10930  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
10931  * @soc_handle: datapath soc handle
10932  * @txrx_handle: opaque pointer to external dp (non-core DP)
10933  *
10934  * Return: void
10935  */
10936 static void
10937 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
10938 {
10939 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10940 
10941 	soc->external_txrx_handle = txrx_handle;
10942 }
10943 
10944 /**
10945  * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
10946  * @soc_hdl: datapath soc handle
10947  * @pdev_id: id of the datapath pdev handle
10948  * @lmac_id: lmac id
10949  *
10950  * Return: QDF_STATUS
10951  */
10952 static QDF_STATUS
10953 dp_soc_map_pdev_to_lmac
10954 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
10955 	 uint32_t lmac_id)
10956 {
10957 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10958 
10959 	wlan_cfg_set_hw_mac_idx(soc->wlan_cfg_ctx,
10960 				pdev_id,
10961 				lmac_id);
10962 
10963 	/*Set host PDEV ID for lmac_id*/
10964 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
10965 			      pdev_id,
10966 			      lmac_id);
10967 
10968 	return QDF_STATUS_SUCCESS;
10969 }
10970 
10971 /**
10972  * dp_soc_handle_pdev_mode_change() - Update pdev to lmac mapping
10973  * @soc_hdl: datapath soc handle
10974  * @pdev_id: id of the datapath pdev handle
10975  * @lmac_id: lmac id
10976  *
10977  * In the event of a dynamic mode change, update the pdev to lmac mapping
10978  *
10979  * Return: QDF_STATUS
10980  */
10981 static QDF_STATUS
10982 dp_soc_handle_pdev_mode_change
10983 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
10984 	 uint32_t lmac_id)
10985 {
10986 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10987 	struct dp_vdev *vdev = NULL;
10988 	uint8_t hw_pdev_id, mac_id;
10989 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc,
10990 								  pdev_id);
10991 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
10992 
10993 	if (qdf_unlikely(!pdev))
10994 		return QDF_STATUS_E_FAILURE;
10995 
10996 	pdev->lmac_id = lmac_id;
10997 	pdev->target_pdev_id =
10998 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
10999 	dp_info(" mode change %d %d\n", pdev->pdev_id, pdev->lmac_id);
11000 
11001 	/*Set host PDEV ID for lmac_id*/
11002 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
11003 			      pdev->pdev_id,
11004 			      lmac_id);
11005 
11006 	hw_pdev_id =
11007 		dp_get_target_pdev_id_for_host_pdev_id(soc,
11008 						       pdev->pdev_id);
11009 
11010 	/*
11011 	 * When NSS offload is enabled, send pdev_id->lmac_id
11012 	 * and pdev_id to hw_pdev_id to NSS FW
11013 	 */
11014 	if (nss_config) {
11015 		mac_id = pdev->lmac_id;
11016 		if (soc->cdp_soc.ol_ops->pdev_update_lmac_n_target_pdev_id)
11017 			soc->cdp_soc.ol_ops->
11018 				pdev_update_lmac_n_target_pdev_id(
11019 				soc->ctrl_psoc,
11020 				&pdev_id, &mac_id, &hw_pdev_id);
11021 	}
11022 
11023 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
11024 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
11025 		HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
11026 						hw_pdev_id);
11027 		vdev->lmac_id = pdev->lmac_id;
11028 	}
11029 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
11030 
11031 	return QDF_STATUS_SUCCESS;
11032 }
11033 
11034 /**
11035  * dp_soc_set_pdev_status_down() - set pdev down/up status
11036  * @soc: datapath soc handle
11037  * @pdev_id: id of datapath pdev handle
11038  * @is_pdev_down: pdev down/up status
11039  *
11040  * Return: QDF_STATUS
11041  */
11042 static QDF_STATUS
11043 dp_soc_set_pdev_status_down(struct cdp_soc_t *soc, uint8_t pdev_id,
11044 			    bool is_pdev_down)
11045 {
11046 	struct dp_pdev *pdev =
11047 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11048 						   pdev_id);
11049 	if (!pdev)
11050 		return QDF_STATUS_E_FAILURE;
11051 
11052 	pdev->is_pdev_down = is_pdev_down;
11053 	return QDF_STATUS_SUCCESS;
11054 }
11055 
11056 /**
11057  * dp_get_cfg_capabilities() - get dp capabilities
11058  * @soc_handle: datapath soc handle
11059  * @dp_caps: enum for dp capabilities
11060  *
11061  * Return: bool to determine if dp caps is enabled
11062  */
11063 static bool
11064 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
11065 			enum cdp_capabilities dp_caps)
11066 {
11067 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11068 
11069 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
11070 }
11071 
11072 #ifdef FEATURE_AST
11073 static QDF_STATUS
11074 dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
11075 		       uint8_t *peer_mac)
11076 {
11077 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11078 	QDF_STATUS status = QDF_STATUS_SUCCESS;
11079 	struct dp_peer *peer =
11080 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
11081 					       DP_MOD_ID_CDP);
11082 
11083 	/* Peer can be null for monitor vap mac address */
11084 	if (!peer) {
11085 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
11086 			  "%s: Invalid peer\n", __func__);
11087 		return QDF_STATUS_E_FAILURE;
11088 	}
11089 
11090 	dp_peer_update_state(soc, peer, DP_PEER_STATE_LOGICAL_DELETE);
11091 
11092 	qdf_spin_lock_bh(&soc->ast_lock);
11093 	dp_peer_delete_ast_entries(soc, peer);
11094 	qdf_spin_unlock_bh(&soc->ast_lock);
11095 
11096 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11097 	return status;
11098 }
11099 #endif
11100 
11101 #ifdef ATH_SUPPORT_NAC_RSSI
11102 /**
11103  * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
11104  * @soc_hdl: DP soc handle
11105  * @vdev_id: id of DP vdev handle
11106  * @mac_addr: neighbour mac
11107  * @rssi: rssi value
11108  *
11109  * Return: 0 for success. nonzero for failure.
11110  */
11111 static QDF_STATUS  dp_vdev_get_neighbour_rssi(struct cdp_soc_t *soc_hdl,
11112 					      uint8_t vdev_id,
11113 					      char *mac_addr,
11114 					      uint8_t *rssi)
11115 {
11116 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11117 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11118 						     DP_MOD_ID_CDP);
11119 	struct dp_pdev *pdev;
11120 	struct dp_neighbour_peer *peer = NULL;
11121 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
11122 
11123 	if (!vdev)
11124 		return status;
11125 
11126 	pdev = vdev->pdev;
11127 	*rssi = 0;
11128 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
11129 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
11130 		      neighbour_peer_list_elem) {
11131 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
11132 				mac_addr, QDF_MAC_ADDR_SIZE) == 0) {
11133 			*rssi = peer->rssi;
11134 			status = QDF_STATUS_SUCCESS;
11135 			break;
11136 		}
11137 	}
11138 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
11139 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11140 	return status;
11141 }
11142 
11143 static QDF_STATUS
11144 dp_config_for_nac_rssi(struct cdp_soc_t *cdp_soc,
11145 		       uint8_t vdev_id,
11146 		       enum cdp_nac_param_cmd cmd, char *bssid,
11147 		       char *client_macaddr,
11148 		       uint8_t chan_num)
11149 {
11150 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
11151 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11152 						     DP_MOD_ID_CDP);
11153 	struct dp_pdev *pdev;
11154 
11155 	if (!vdev)
11156 		return QDF_STATUS_E_FAILURE;
11157 
11158 	pdev = (struct dp_pdev *)vdev->pdev;
11159 	pdev->nac_rssi_filtering = 1;
11160 	/* Store address of NAC (neighbour peer) which will be checked
11161 	 * against TA of received packets.
11162 	 */
11163 
11164 	if (cmd == CDP_NAC_PARAM_ADD) {
11165 		dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id,
11166 						 DP_NAC_PARAM_ADD,
11167 						 (uint8_t *)client_macaddr);
11168 	} else if (cmd == CDP_NAC_PARAM_DEL) {
11169 		dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id,
11170 						 DP_NAC_PARAM_DEL,
11171 						 (uint8_t *)client_macaddr);
11172 	}
11173 
11174 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
11175 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
11176 			(soc->ctrl_psoc, pdev->pdev_id,
11177 			 vdev->vdev_id, cmd, bssid, client_macaddr);
11178 
11179 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11180 	return QDF_STATUS_SUCCESS;
11181 }
11182 #endif
11183 
11184 /**
11185  * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
11186  * for pktlog
11187  * @soc: cdp_soc handle
11188  * @pdev_id: id of dp pdev handle
11189  * @mac_addr: Peer mac address
11190  * @enb_dsb: Enable or disable peer based filtering
11191  *
11192  * Return: QDF_STATUS
11193  */
11194 static int
11195 dp_enable_peer_based_pktlog(struct cdp_soc_t *soc, uint8_t pdev_id,
11196 			    uint8_t *mac_addr, uint8_t enb_dsb)
11197 {
11198 	struct dp_peer *peer;
11199 	struct dp_pdev *pdev =
11200 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11201 						   pdev_id);
11202 
11203 	if (!pdev)
11204 		return QDF_STATUS_E_FAILURE;
11205 
11206 	peer = dp_peer_find_hash_find((struct dp_soc *)soc, mac_addr,
11207 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
11208 
11209 	if (!peer) {
11210 		dp_err("Invalid Peer");
11211 		return QDF_STATUS_E_FAILURE;
11212 	}
11213 
11214 	peer->peer_based_pktlog_filter = enb_dsb;
11215 	pdev->dp_peer_based_pktlog = enb_dsb;
11216 
11217 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11218 
11219 	return QDF_STATUS_SUCCESS;
11220 }
11221 
11222 #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS
11223 /**
11224  * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
11225  * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
11226  * @soc: cdp_soc handle
11227  * @pdev_id: id of cdp_pdev handle
11228  * @protocol_type: protocol type for which stats should be displayed
11229  *
11230  * Return: none
11231  */
11232 static inline void
11233 dp_dump_pdev_rx_protocol_tag_stats(struct cdp_soc_t  *soc, uint8_t pdev_id,
11234 				   uint16_t protocol_type)
11235 {
11236 }
11237 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
11238 
11239 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
11240 /**
11241  * dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be
11242  * applied to the desired protocol type packets
11243  * @soc: soc handle
11244  * @pdev_id: id of cdp_pdev handle
11245  * @enable_rx_protocol_tag - bitmask that indicates what protocol types
11246  * are enabled for tagging. zero indicates disable feature, non-zero indicates
11247  * enable feature
11248  * @protocol_type: new protocol type for which the tag is being added
11249  * @tag: user configured tag for the new protocol
11250  *
11251  * Return: Success
11252  */
11253 static inline QDF_STATUS
11254 dp_update_pdev_rx_protocol_tag(struct cdp_soc_t  *soc, uint8_t pdev_id,
11255 			       uint32_t enable_rx_protocol_tag,
11256 			       uint16_t protocol_type,
11257 			       uint16_t tag)
11258 {
11259 	return QDF_STATUS_SUCCESS;
11260 }
11261 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
11262 
11263 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
11264 /**
11265  * dp_set_rx_flow_tag - add/delete a flow
11266  * @soc: soc handle
11267  * @pdev_id: id of cdp_pdev handle
11268  * @flow_info: flow tuple that is to be added to/deleted from flow search table
11269  *
11270  * Return: Success
11271  */
11272 static inline QDF_STATUS
11273 dp_set_rx_flow_tag(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
11274 		   struct cdp_rx_flow_info *flow_info)
11275 {
11276 	return QDF_STATUS_SUCCESS;
11277 }
11278 /**
11279  * dp_dump_rx_flow_tag_stats - dump the number of packets tagged for
11280  * given flow 5-tuple
11281  * @cdp_soc: soc handle
11282  * @pdev_id: id of cdp_pdev handle
11283  * @flow_info: flow 5-tuple for which stats should be displayed
11284  *
11285  * Return: Success
11286  */
11287 static inline QDF_STATUS
11288 dp_dump_rx_flow_tag_stats(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
11289 			  struct cdp_rx_flow_info *flow_info)
11290 {
11291 	return QDF_STATUS_SUCCESS;
11292 }
11293 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
11294 
11295 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
11296 					   uint32_t max_peers,
11297 					   uint32_t max_ast_index,
11298 					   bool peer_map_unmap_v2)
11299 {
11300 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11301 
11302 	soc->max_peers = max_peers;
11303 
11304 	qdf_print ("%s max_peers %u, max_ast_index: %u\n",
11305 		   __func__, max_peers, max_ast_index);
11306 	wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
11307 
11308 	if (dp_peer_find_attach(soc))
11309 		return QDF_STATUS_E_FAILURE;
11310 
11311 	soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
11312 	soc->peer_map_attach_success = TRUE;
11313 
11314 	return QDF_STATUS_SUCCESS;
11315 }
11316 
11317 static QDF_STATUS dp_soc_set_param(struct cdp_soc_t  *soc_hdl,
11318 				   enum cdp_soc_param_t param,
11319 				   uint32_t value)
11320 {
11321 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11322 
11323 	switch (param) {
11324 	case DP_SOC_PARAM_MSDU_EXCEPTION_DESC:
11325 		soc->num_msdu_exception_desc = value;
11326 		dp_info("num_msdu exception_desc %u",
11327 			value);
11328 		break;
11329 	case DP_SOC_PARAM_CMEM_FSE_SUPPORT:
11330 		if (wlan_cfg_is_fst_in_cmem_enabled(soc->wlan_cfg_ctx))
11331 			soc->fst_in_cmem = !!value;
11332 		dp_info("FW supports CMEM FSE %u", value);
11333 		break;
11334 	case DP_SOC_PARAM_MAX_AST_AGEOUT:
11335 		soc->max_ast_ageout_count = value;
11336 		dp_info("Max ast ageout count %u", soc->max_ast_ageout_count);
11337 		break;
11338 	default:
11339 		dp_info("not handled param %d ", param);
11340 		break;
11341 	}
11342 
11343 	return QDF_STATUS_SUCCESS;
11344 }
11345 
11346 static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
11347 				      void *stats_ctx)
11348 {
11349 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11350 
11351 	soc->rate_stats_ctx = (struct cdp_soc_rate_stats_ctx *)stats_ctx;
11352 }
11353 
11354 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
11355 /**
11356  * dp_peer_flush_rate_stats_req(): Flush peer rate stats
11357  * @soc: Datapath SOC handle
11358  * @peer: Datapath peer
11359  * @arg: argument to iter function
11360  *
11361  * Return: QDF_STATUS
11362  */
11363 static void
11364 dp_peer_flush_rate_stats_req(struct dp_soc *soc, struct dp_peer *peer,
11365 			     void *arg)
11366 {
11367 	if (peer->bss_peer)
11368 		return;
11369 
11370 	dp_wdi_event_handler(
11371 		WDI_EVENT_FLUSH_RATE_STATS_REQ,
11372 		soc, peer->rdkstats_ctx,
11373 		peer->peer_id,
11374 		WDI_NO_VAL, peer->vdev->pdev->pdev_id);
11375 }
11376 
11377 /**
11378  * dp_flush_rate_stats_req(): Flush peer rate stats in pdev
11379  * @soc_hdl: Datapath SOC handle
11380  * @pdev_id: pdev_id
11381  *
11382  * Return: QDF_STATUS
11383  */
11384 static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
11385 					  uint8_t pdev_id)
11386 {
11387 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11388 	struct dp_pdev *pdev =
11389 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11390 						   pdev_id);
11391 	if (!pdev)
11392 		return QDF_STATUS_E_FAILURE;
11393 
11394 	dp_pdev_iterate_peer(pdev, dp_peer_flush_rate_stats_req, NULL,
11395 			     DP_MOD_ID_CDP);
11396 
11397 	return QDF_STATUS_SUCCESS;
11398 }
11399 #else
11400 static inline QDF_STATUS
11401 dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
11402 			uint8_t pdev_id)
11403 {
11404 	return QDF_STATUS_SUCCESS;
11405 }
11406 #endif
11407 
11408 static void *dp_peer_get_rdkstats_ctx(struct cdp_soc_t *soc_hdl,
11409 				      uint8_t vdev_id,
11410 				      uint8_t *mac_addr)
11411 {
11412 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
11413 	struct dp_peer *peer;
11414 	void *rdkstats_ctx = NULL;
11415 
11416 	if (mac_addr) {
11417 		peer = dp_peer_find_hash_find(soc, mac_addr,
11418 					      0, vdev_id,
11419 					      DP_MOD_ID_CDP);
11420 		if (!peer)
11421 			return NULL;
11422 
11423 		rdkstats_ctx = peer->rdkstats_ctx;
11424 
11425 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
11426 	}
11427 
11428 	return rdkstats_ctx;
11429 }
11430 
11431 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
11432 static QDF_STATUS dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
11433 					   uint8_t pdev_id,
11434 					   void *buf)
11435 {
11436 	 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS,
11437 			      (struct dp_soc *)soc, buf, HTT_INVALID_PEER,
11438 			      WDI_NO_VAL, pdev_id);
11439 	return QDF_STATUS_SUCCESS;
11440 }
11441 #else
11442 static inline QDF_STATUS
11443 dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
11444 			 uint8_t pdev_id,
11445 			 void *buf)
11446 {
11447 	return QDF_STATUS_SUCCESS;
11448 }
11449 #endif
11450 
11451 static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle)
11452 {
11453 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11454 
11455 	return soc->rate_stats_ctx;
11456 }
11457 
11458 /*
11459  * dp_get_cfg() - get dp cfg
11460  * @soc: cdp soc handle
11461  * @cfg: cfg enum
11462  *
11463  * Return: cfg value
11464  */
11465 static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg)
11466 {
11467 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
11468 	uint32_t value = 0;
11469 
11470 	switch (cfg) {
11471 	case cfg_dp_enable_data_stall:
11472 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
11473 		break;
11474 	case cfg_dp_enable_p2p_ip_tcp_udp_checksum_offload:
11475 		value = dpsoc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload;
11476 		break;
11477 	case cfg_dp_enable_nan_ip_tcp_udp_checksum_offload:
11478 		value = dpsoc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload;
11479 		break;
11480 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
11481 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
11482 		break;
11483 	case cfg_dp_disable_legacy_mode_csum_offload:
11484 		value = dpsoc->wlan_cfg_ctx->
11485 					legacy_mode_checksumoffload_disable;
11486 		break;
11487 	case cfg_dp_tso_enable:
11488 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
11489 		break;
11490 	case cfg_dp_lro_enable:
11491 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
11492 		break;
11493 	case cfg_dp_gro_enable:
11494 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
11495 		break;
11496 	case cfg_dp_sg_enable:
11497 		value = dpsoc->wlan_cfg_ctx->sg_enabled;
11498 		break;
11499 	case cfg_dp_tx_flow_start_queue_offset:
11500 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
11501 		break;
11502 	case cfg_dp_tx_flow_stop_queue_threshold:
11503 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
11504 		break;
11505 	case cfg_dp_disable_intra_bss_fwd:
11506 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
11507 		break;
11508 	case cfg_dp_pktlog_buffer_size:
11509 		value = dpsoc->wlan_cfg_ctx->pktlog_buffer_size;
11510 		break;
11511 	case cfg_dp_wow_check_rx_pending:
11512 		value = dpsoc->wlan_cfg_ctx->wow_check_rx_pending_enable;
11513 		break;
11514 	default:
11515 		value =  0;
11516 	}
11517 
11518 	return value;
11519 }
11520 
11521 #ifdef PEER_FLOW_CONTROL
11522 /**
11523  * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params
11524  * @soc_handle: datapath soc handle
11525  * @pdev_id: id of datapath pdev handle
11526  * @param: ol ath params
11527  * @value: value of the flag
11528  * @buff: Buffer to be passed
11529  *
11530  * Implemented this function same as legacy function. In legacy code, single
11531  * function is used to display stats and update pdev params.
11532  *
11533  * Return: 0 for success. nonzero for failure.
11534  */
11535 static uint32_t dp_tx_flow_ctrl_configure_pdev(struct cdp_soc_t *soc_handle,
11536 					       uint8_t pdev_id,
11537 					       enum _dp_param_t param,
11538 					       uint32_t value, void *buff)
11539 {
11540 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11541 	struct dp_pdev *pdev =
11542 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
11543 						   pdev_id);
11544 
11545 	if (qdf_unlikely(!pdev))
11546 		return 1;
11547 
11548 	soc = pdev->soc;
11549 	if (!soc)
11550 		return 1;
11551 
11552 	switch (param) {
11553 #ifdef QCA_ENH_V3_STATS_SUPPORT
11554 	case DP_PARAM_VIDEO_DELAY_STATS_FC:
11555 		if (value)
11556 			pdev->delay_stats_flag = true;
11557 		else
11558 			pdev->delay_stats_flag = false;
11559 		break;
11560 	case DP_PARAM_VIDEO_STATS_FC:
11561 		qdf_print("------- TID Stats ------\n");
11562 		dp_pdev_print_tid_stats(pdev);
11563 		qdf_print("------ Delay Stats ------\n");
11564 		dp_pdev_print_delay_stats(pdev);
11565 		break;
11566 #endif
11567 	case DP_PARAM_TOTAL_Q_SIZE:
11568 		{
11569 			uint32_t tx_min, tx_max;
11570 
11571 			tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx);
11572 			tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
11573 
11574 			if (!buff) {
11575 				if ((value >= tx_min) && (value <= tx_max)) {
11576 					pdev->num_tx_allowed = value;
11577 				} else {
11578 					dp_tx_info("%pK: Failed to update num_tx_allowed, Q_min = %d Q_max = %d",
11579 						   soc, tx_min, tx_max);
11580 					break;
11581 				}
11582 			} else {
11583 				*(int *)buff = pdev->num_tx_allowed;
11584 			}
11585 		}
11586 		break;
11587 	default:
11588 		dp_tx_info("%pK: not handled param %d ", soc, param);
11589 		break;
11590 	}
11591 
11592 	return 0;
11593 }
11594 #endif
11595 
11596 /**
11597  * dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev
11598  * @psoc: dp soc handle
11599  * @pdev_id: id of DP_PDEV handle
11600  * @pcp: pcp value
11601  * @tid: tid value passed by the user
11602  *
11603  * Return: QDF_STATUS_SUCCESS on success
11604  */
11605 static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
11606 						uint8_t pdev_id,
11607 						uint8_t pcp, uint8_t tid)
11608 {
11609 	struct dp_soc *soc = (struct dp_soc *)psoc;
11610 
11611 	soc->pcp_tid_map[pcp] = tid;
11612 
11613 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
11614 	return QDF_STATUS_SUCCESS;
11615 }
11616 
11617 /**
11618  * dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev
11619  * @soc: DP soc handle
11620  * @vdev_id: id of DP_VDEV handle
11621  * @pcp: pcp value
11622  * @tid: tid value passed by the user
11623  *
11624  * Return: QDF_STATUS_SUCCESS on success
11625  */
11626 static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc_hdl,
11627 						uint8_t vdev_id,
11628 						uint8_t pcp, uint8_t tid)
11629 {
11630 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11631 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
11632 						     DP_MOD_ID_CDP);
11633 
11634 	if (!vdev)
11635 		return QDF_STATUS_E_FAILURE;
11636 
11637 	vdev->pcp_tid_map[pcp] = tid;
11638 
11639 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
11640 	return QDF_STATUS_SUCCESS;
11641 }
11642 
11643 #ifdef QCA_SUPPORT_FULL_MON
11644 static inline QDF_STATUS
11645 dp_config_full_mon_mode(struct cdp_soc_t *soc_handle,
11646 			uint8_t val)
11647 {
11648 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11649 
11650 	soc->full_mon_mode = val;
11651 	qdf_alert("Configure full monitor mode val: %d ", val);
11652 
11653 	return QDF_STATUS_SUCCESS;
11654 }
11655 #else
11656 static inline QDF_STATUS
11657 dp_config_full_mon_mode(struct cdp_soc_t *soc_handle,
11658 			uint8_t val)
11659 {
11660 	return 0;
11661 }
11662 #endif
11663 
11664 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
11665 static void dp_drain_txrx(struct cdp_soc_t *soc_handle)
11666 {
11667 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
11668 	uint32_t cur_tx_limit, cur_rx_limit;
11669 	uint32_t budget = 0xffff;
11670 	uint32_t val;
11671 	int i;
11672 
11673 	cur_tx_limit = soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit;
11674 	cur_rx_limit = soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit;
11675 
11676 	/* Temporarily increase soft irq limits when going to drain
11677 	 * the UMAC/LMAC SRNGs and restore them after polling.
11678 	 * Though the budget is on higher side, the TX/RX reaping loops
11679 	 * will not execute longer as both TX and RX would be suspended
11680 	 * by the time this API is called.
11681 	 */
11682 	dp_update_soft_irq_limits(soc, budget, budget);
11683 
11684 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
11685 		dp_service_srngs(&soc->intr_ctx[i], budget);
11686 
11687 	dp_update_soft_irq_limits(soc, cur_tx_limit, cur_rx_limit);
11688 
11689 	/* Do a dummy read at offset 0; this will ensure all
11690 	 * pendings writes(HP/TP) are flushed before read returns.
11691 	 */
11692 	val = HAL_REG_READ((struct hal_soc *)soc->hal_soc, 0);
11693 	dp_debug("Register value at offset 0: %u\n", val);
11694 }
11695 #endif
11696 
11697 static struct cdp_cmn_ops dp_ops_cmn = {
11698 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
11699 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
11700 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
11701 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
11702 	.txrx_pdev_post_attach = dp_pdev_post_attach_wifi3,
11703 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
11704 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
11705 	.txrx_peer_create = dp_peer_create_wifi3,
11706 	.txrx_peer_setup = dp_peer_setup_wifi3,
11707 #ifdef FEATURE_AST
11708 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
11709 #else
11710 	.txrx_peer_teardown = NULL,
11711 #endif
11712 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
11713 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
11714 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
11715 	.txrx_peer_get_ast_info_by_pdev =
11716 		dp_peer_get_ast_info_by_pdevid_wifi3,
11717 	.txrx_peer_ast_delete_by_soc =
11718 		dp_peer_ast_entry_del_by_soc,
11719 	.txrx_peer_ast_delete_by_pdev =
11720 		dp_peer_ast_entry_del_by_pdev,
11721 	.txrx_peer_delete = dp_peer_delete_wifi3,
11722 	.txrx_vdev_register = dp_vdev_register_wifi3,
11723 	.txrx_soc_detach = dp_soc_detach_wifi3,
11724 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
11725 	.txrx_soc_init = dp_soc_init_wifi3,
11726 #ifndef QCA_HOST_MODE_WIFI_DISABLED
11727 	.txrx_tso_soc_attach = dp_tso_soc_attach,
11728 	.txrx_tso_soc_detach = dp_tso_soc_detach,
11729 	.tx_send = dp_tx_send,
11730 	.tx_send_exc = dp_tx_send_exception,
11731 #endif
11732 	.txrx_pdev_init = dp_pdev_init_wifi3,
11733 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
11734 	.txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3,
11735 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
11736 	.txrx_ath_getstats = dp_get_device_stats,
11737 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
11738 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
11739 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
11740 	.delba_process = dp_delba_process_wifi3,
11741 	.set_addba_response = dp_set_addba_response,
11742 	.flush_cache_rx_queue = NULL,
11743 	/* TODO: get API's for dscp-tid need to be added*/
11744 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
11745 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
11746 	.txrx_get_total_per = dp_get_total_per,
11747 	.txrx_stats_request = dp_txrx_stats_request,
11748 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
11749 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
11750 	.display_stats = dp_txrx_dump_stats,
11751 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
11752 	.txrx_intr_detach = dp_soc_interrupt_detach,
11753 	.set_pn_check = dp_set_pn_check_wifi3,
11754 	.set_key_sec_type = dp_set_key_sec_type_wifi3,
11755 	.update_config_parameters = dp_update_config_parameters,
11756 	/* TODO: Add other functions */
11757 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
11758 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
11759 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
11760 	.get_vdev_dp_ext_txrx_handle = dp_vdev_get_dp_ext_handle,
11761 	.set_vdev_dp_ext_txrx_handle = dp_vdev_set_dp_ext_handle,
11762 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
11763 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
11764 	.map_pdev_to_lmac = dp_soc_map_pdev_to_lmac,
11765 	.handle_mode_change = dp_soc_handle_pdev_mode_change,
11766 	.set_pdev_status_down = dp_soc_set_pdev_status_down,
11767 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
11768 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
11769 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
11770 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
11771 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
11772 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
11773 	.set_soc_param = dp_soc_set_param,
11774 	.txrx_get_os_rx_handles_from_vdev =
11775 					dp_get_os_rx_handles_from_vdev_wifi3,
11776 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
11777 	.get_dp_capabilities = dp_get_cfg_capabilities,
11778 	.txrx_get_cfg = dp_get_cfg,
11779 	.set_rate_stats_ctx = dp_soc_set_rate_stats_ctx,
11780 	.get_rate_stats_ctx = dp_soc_get_rate_stats_ctx,
11781 	.txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats,
11782 	.txrx_flush_rate_stats_request = dp_flush_rate_stats_req,
11783 	.txrx_peer_get_rdkstats_ctx = dp_peer_get_rdkstats_ctx,
11784 
11785 	.set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3,
11786 	.set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3,
11787 
11788 	.txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler,
11789 #ifdef QCA_MULTIPASS_SUPPORT
11790 	.set_vlan_groupkey = dp_set_vlan_groupkey,
11791 #endif
11792 	.get_peer_mac_list = dp_get_peer_mac_list,
11793 #ifdef QCA_SUPPORT_WDS_EXTENDED
11794 	.get_wds_ext_peer_id = dp_wds_ext_get_peer_id,
11795 	.set_wds_ext_peer_rx = dp_wds_ext_set_peer_rx,
11796 #endif /* QCA_SUPPORT_WDS_EXTENDED */
11797 
11798 #if defined(FEATURE_RUNTIME_PM) || defined(DP_POWER_SAVE)
11799 	.txrx_drain = dp_drain_txrx,
11800 #endif
11801 };
11802 
11803 static struct cdp_ctrl_ops dp_ops_ctrl = {
11804 	.txrx_peer_authorize = dp_peer_authorize,
11805 #ifdef VDEV_PEER_PROTOCOL_COUNT
11806 	.txrx_enable_peer_protocol_count = dp_enable_vdev_peer_protocol_count,
11807 	.txrx_set_peer_protocol_drop_mask =
11808 		dp_enable_vdev_peer_protocol_drop_mask,
11809 	.txrx_is_peer_protocol_count_enabled =
11810 		dp_is_vdev_peer_protocol_count_enabled,
11811 	.txrx_get_peer_protocol_drop_mask = dp_get_vdev_peer_protocol_drop_mask,
11812 #endif
11813 	.txrx_set_vdev_param = dp_set_vdev_param,
11814 	.txrx_set_psoc_param = dp_set_psoc_param,
11815 	.txrx_get_psoc_param = dp_get_psoc_param,
11816 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
11817 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
11818 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
11819 	.txrx_update_filter_neighbour_peers =
11820 		dp_update_filter_neighbour_peers,
11821 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
11822 	.txrx_get_sec_type = dp_get_sec_type,
11823 	.txrx_wdi_event_sub = dp_wdi_event_sub,
11824 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
11825 #ifdef WDI_EVENT_ENABLE
11826 	.txrx_get_pldev = dp_get_pldev,
11827 #endif
11828 	.txrx_set_pdev_param = dp_set_pdev_param,
11829 	.txrx_get_pdev_param = dp_get_pdev_param,
11830 	.txrx_set_peer_param = dp_set_peer_param,
11831 	.txrx_get_peer_param = dp_get_peer_param,
11832 #ifdef VDEV_PEER_PROTOCOL_COUNT
11833 	.txrx_peer_protocol_cnt = dp_peer_stats_update_protocol_cnt,
11834 #endif
11835 #ifdef ATH_SUPPORT_NAC_RSSI
11836 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
11837 	.txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
11838 #endif
11839 #ifdef WLAN_SUPPORT_MSCS
11840 	.txrx_record_mscs_params = dp_record_mscs_params,
11841 #endif
11842 	.set_key = dp_set_michael_key,
11843 	.txrx_get_vdev_param = dp_get_vdev_param,
11844 	.enable_peer_based_pktlog = dp_enable_peer_based_pktlog,
11845 	.calculate_delay_stats = dp_calculate_delay_stats,
11846 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
11847 	.txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag,
11848 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
11849 	.txrx_dump_pdev_rx_protocol_tag_stats =
11850 				dp_dump_pdev_rx_protocol_tag_stats,
11851 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
11852 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
11853 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
11854 	.txrx_set_rx_flow_tag = dp_set_rx_flow_tag,
11855 	.txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats,
11856 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
11857 #ifdef QCA_MULTIPASS_SUPPORT
11858 	.txrx_peer_set_vlan_id = dp_peer_set_vlan_id,
11859 #endif /*QCA_MULTIPASS_SUPPORT*/
11860 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
11861 	.txrx_update_peer_pkt_capture_params =
11862 		 dp_peer_update_pkt_capture_params,
11863 #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
11864 };
11865 
11866 static struct cdp_me_ops dp_ops_me = {
11867 #ifndef QCA_HOST_MODE_WIFI_DISABLED
11868 #ifdef ATH_SUPPORT_IQUE
11869 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
11870 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
11871 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
11872 #endif
11873 #endif
11874 };
11875 
11876 static struct cdp_mon_ops dp_ops_mon = {
11877 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
11878 	/* Added support for HK advance filter */
11879 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
11880 	.txrx_deliver_tx_mgmt = dp_deliver_tx_mgmt,
11881 	.config_full_mon_mode = dp_config_full_mon_mode,
11882 };
11883 
11884 static struct cdp_host_stats_ops dp_ops_host_stats = {
11885 	.txrx_per_peer_stats = dp_get_host_peer_stats,
11886 	.get_fw_peer_stats = dp_get_fw_peer_stats,
11887 	.get_htt_stats = dp_get_htt_stats,
11888 #ifdef FEATURE_PERPKT_INFO
11889 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
11890 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
11891 #endif /* FEATURE_PERPKT_INFO */
11892 	.txrx_stats_publish = dp_txrx_stats_publish,
11893 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
11894 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
11895 	.txrx_get_peer_stats_param = dp_txrx_get_peer_stats_param,
11896 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
11897 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
11898 	.txrx_get_ratekbps = dp_txrx_get_ratekbps,
11899 	.txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats,
11900 	/* TODO */
11901 };
11902 
11903 static struct cdp_raw_ops dp_ops_raw = {
11904 	/* TODO */
11905 };
11906 
11907 #ifdef PEER_FLOW_CONTROL
11908 static struct cdp_pflow_ops dp_ops_pflow = {
11909 	dp_tx_flow_ctrl_configure_pdev,
11910 };
11911 #endif /* CONFIG_WIN */
11912 
11913 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
11914 static struct cdp_cfr_ops dp_ops_cfr = {
11915 	.txrx_cfr_filter = dp_cfr_filter,
11916 	.txrx_get_cfr_rcc = dp_get_cfr_rcc,
11917 	.txrx_set_cfr_rcc = dp_set_cfr_rcc,
11918 	.txrx_get_cfr_dbg_stats = dp_get_cfr_dbg_stats,
11919 	.txrx_clear_cfr_dbg_stats = dp_clear_cfr_dbg_stats,
11920 	.txrx_enable_mon_reap_timer = dp_enable_mon_reap_timer,
11921 };
11922 #endif
11923 
11924 #ifdef WLAN_SUPPORT_MSCS
11925 static struct cdp_mscs_ops dp_ops_mscs = {
11926 	.mscs_peer_lookup_n_get_priority = dp_mscs_peer_lookup_n_get_priority,
11927 };
11928 #endif
11929 
11930 #ifdef WLAN_SUPPORT_MESH_LATENCY
11931 static struct cdp_mesh_latency_ops dp_ops_mesh_latency = {
11932 	.mesh_latency_update_peer_parameter =
11933 		dp_mesh_latency_update_peer_parameter,
11934 };
11935 #endif
11936 
11937 #ifdef FEATURE_RUNTIME_PM
11938 /**
11939  * dp_flush_ring_hptp() - Update ring shadow
11940  *			  register HP/TP address when runtime
11941  *                        resume
11942  * @opaque_soc: DP soc context
11943  *
11944  * Return: None
11945  */
11946 static
11947 void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
11948 {
11949 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
11950 						 HAL_SRNG_FLUSH_EVENT)) {
11951 		/* Acquire the lock */
11952 		hal_srng_access_start(soc->hal_soc, hal_srng);
11953 
11954 		hal_srng_access_end(soc->hal_soc, hal_srng);
11955 
11956 		hal_srng_set_flush_last_ts(hal_srng);
11957 		dp_debug("flushed");
11958 	}
11959 }
11960 
11961 /**
11962  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
11963  * @soc_hdl: Datapath soc handle
11964  * @pdev_id: id of data path pdev handle
11965  *
11966  * DP is ready to runtime suspend if there are no pending TX packets.
11967  *
11968  * Return: QDF_STATUS
11969  */
11970 static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
11971 {
11972 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11973 	struct dp_pdev *pdev;
11974 	uint8_t i;
11975 
11976 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11977 	if (!pdev) {
11978 		dp_err("pdev is NULL");
11979 		return QDF_STATUS_E_INVAL;
11980 	}
11981 
11982 	/* Abort if there are any pending TX packets */
11983 	if (dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev)) > 0) {
11984 		dp_init_info("%pK: Abort suspend due to pending TX packets", soc);
11985 
11986 		/* perform a force flush if tx is pending */
11987 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
11988 			hal_srng_set_event(soc->tcl_data_ring[i].hal_srng,
11989 					   HAL_SRNG_FLUSH_EVENT);
11990 			dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
11991 		}
11992 
11993 		return QDF_STATUS_E_AGAIN;
11994 	}
11995 
11996 	if (dp_runtime_get_refcount(soc)) {
11997 		dp_init_info("refcount: %d", dp_runtime_get_refcount(soc));
11998 
11999 		return QDF_STATUS_E_AGAIN;
12000 	}
12001 
12002 	if (soc->intr_mode == DP_INTR_POLL)
12003 		qdf_timer_stop(&soc->int_timer);
12004 
12005 	dp_rx_fst_update_pm_suspend_status(soc, true);
12006 
12007 	return QDF_STATUS_SUCCESS;
12008 }
12009 
12010 #define DP_FLUSH_WAIT_CNT 10
12011 #define DP_RUNTIME_SUSPEND_WAIT_MS 10
12012 /**
12013  * dp_runtime_resume() - ensure DP is ready to runtime resume
12014  * @soc_hdl: Datapath soc handle
12015  * @pdev_id: id of data path pdev handle
12016  *
12017  * Resume DP for runtime PM.
12018  *
12019  * Return: QDF_STATUS
12020  */
12021 static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
12022 {
12023 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12024 	int i, suspend_wait = 0;
12025 
12026 	if (soc->intr_mode == DP_INTR_POLL)
12027 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
12028 
12029 	/*
12030 	 * Wait until dp runtime refcount becomes zero or time out, then flush
12031 	 * pending tx for runtime suspend.
12032 	 */
12033 	while (dp_runtime_get_refcount(soc) &&
12034 	       suspend_wait < DP_FLUSH_WAIT_CNT) {
12035 		qdf_sleep(DP_RUNTIME_SUSPEND_WAIT_MS);
12036 		suspend_wait++;
12037 	}
12038 
12039 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
12040 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
12041 	}
12042 
12043 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
12044 	dp_rx_fst_update_pm_suspend_status(soc, false);
12045 
12046 	return QDF_STATUS_SUCCESS;
12047 }
12048 #endif /* FEATURE_RUNTIME_PM */
12049 
12050 /**
12051  * dp_tx_get_success_ack_stats() - get tx success completion count
12052  * @soc_hdl: Datapath soc handle
12053  * @vdevid: vdev identifier
12054  *
12055  * Return: tx success ack count
12056  */
12057 static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl,
12058 					    uint8_t vdev_id)
12059 {
12060 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12061 	struct cdp_vdev_stats *vdev_stats = NULL;
12062 	uint32_t tx_success;
12063 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12064 						     DP_MOD_ID_CDP);
12065 
12066 	if (!vdev) {
12067 		dp_cdp_err("%pK: Invalid vdev id %d", soc, vdev_id);
12068 		return 0;
12069 	}
12070 
12071 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
12072 	if (!vdev_stats) {
12073 		dp_cdp_err("%pK: DP alloc failure - unable to get alloc vdev stats", soc);
12074 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12075 		return 0;
12076 	}
12077 
12078 	dp_aggregate_vdev_stats(vdev, vdev_stats);
12079 
12080 	tx_success = vdev_stats->tx.tx_success.num;
12081 	qdf_mem_free(vdev_stats);
12082 
12083 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12084 	return tx_success;
12085 }
12086 
12087 #ifdef WLAN_SUPPORT_DATA_STALL
12088 /**
12089  * dp_register_data_stall_detect_cb() - register data stall callback
12090  * @soc_hdl: Datapath soc handle
12091  * @pdev_id: id of data path pdev handle
12092  * @data_stall_detect_callback: data stall callback function
12093  *
12094  * Return: QDF_STATUS Enumeration
12095  */
12096 static
12097 QDF_STATUS dp_register_data_stall_detect_cb(
12098 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12099 			data_stall_detect_cb data_stall_detect_callback)
12100 {
12101 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12102 	struct dp_pdev *pdev;
12103 
12104 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12105 	if (!pdev) {
12106 		dp_err("pdev NULL!");
12107 		return QDF_STATUS_E_INVAL;
12108 	}
12109 
12110 	pdev->data_stall_detect_callback = data_stall_detect_callback;
12111 	return QDF_STATUS_SUCCESS;
12112 }
12113 
12114 /**
12115  * dp_deregister_data_stall_detect_cb() - de-register data stall callback
12116  * @soc_hdl: Datapath soc handle
12117  * @pdev_id: id of data path pdev handle
12118  * @data_stall_detect_callback: data stall callback function
12119  *
12120  * Return: QDF_STATUS Enumeration
12121  */
12122 static
12123 QDF_STATUS dp_deregister_data_stall_detect_cb(
12124 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12125 			data_stall_detect_cb data_stall_detect_callback)
12126 {
12127 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12128 	struct dp_pdev *pdev;
12129 
12130 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12131 	if (!pdev) {
12132 		dp_err("pdev NULL!");
12133 		return QDF_STATUS_E_INVAL;
12134 	}
12135 
12136 	pdev->data_stall_detect_callback = NULL;
12137 	return QDF_STATUS_SUCCESS;
12138 }
12139 
12140 /**
12141  * dp_txrx_post_data_stall_event() - post data stall event
12142  * @soc_hdl: Datapath soc handle
12143  * @indicator: Module triggering data stall
12144  * @data_stall_type: data stall event type
12145  * @pdev_id: pdev id
12146  * @vdev_id_bitmap: vdev id bitmap
12147  * @recovery_type: data stall recovery type
12148  *
12149  * Return: None
12150  */
12151 static void
12152 dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl,
12153 			      enum data_stall_log_event_indicator indicator,
12154 			      enum data_stall_log_event_type data_stall_type,
12155 			      uint32_t pdev_id, uint32_t vdev_id_bitmap,
12156 			      enum data_stall_log_recovery_type recovery_type)
12157 {
12158 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12159 	struct data_stall_event_info data_stall_info;
12160 	struct dp_pdev *pdev;
12161 
12162 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12163 	if (!pdev) {
12164 		dp_err("pdev NULL!");
12165 		return;
12166 	}
12167 
12168 	if (!pdev->data_stall_detect_callback) {
12169 		dp_err("data stall cb not registered!");
12170 		return;
12171 	}
12172 
12173 	dp_info("data_stall_type: %x pdev_id: %d",
12174 		data_stall_type, pdev_id);
12175 
12176 	data_stall_info.indicator = indicator;
12177 	data_stall_info.data_stall_type = data_stall_type;
12178 	data_stall_info.vdev_id_bitmap = vdev_id_bitmap;
12179 	data_stall_info.pdev_id = pdev_id;
12180 	data_stall_info.recovery_type = recovery_type;
12181 
12182 	pdev->data_stall_detect_callback(&data_stall_info);
12183 }
12184 #endif /* WLAN_SUPPORT_DATA_STALL */
12185 
12186 #ifdef WLAN_FEATURE_STATS_EXT
12187 /* rx hw stats event wait timeout in ms */
12188 #define DP_REO_STATUS_STATS_TIMEOUT 1500
12189 /**
12190  * dp_txrx_ext_stats_request - request dp txrx extended stats request
12191  * @soc_hdl: soc handle
12192  * @pdev_id: pdev id
12193  * @req: stats request
12194  *
12195  * Return: QDF_STATUS
12196  */
12197 static QDF_STATUS
12198 dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
12199 			  struct cdp_txrx_ext_stats *req)
12200 {
12201 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12202 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12203 
12204 	if (!pdev) {
12205 		dp_err("pdev is null");
12206 		return QDF_STATUS_E_INVAL;
12207 	}
12208 
12209 	dp_aggregate_pdev_stats(pdev);
12210 
12211 	req->tx_msdu_enqueue = pdev->stats.tx_i.processed.num;
12212 	req->tx_msdu_overflow = pdev->stats.tx_i.dropped.ring_full;
12213 	req->rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
12214 	req->rx_mpdu_delivered = soc->ext_stats.rx_mpdu_received;
12215 	req->rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
12216 	/* only count error source from RXDMA */
12217 	req->rx_mpdu_error = pdev->stats.err.rxdma_error;
12218 
12219 	return QDF_STATUS_SUCCESS;
12220 }
12221 
12222 /**
12223  * dp_rx_hw_stats_cb - request rx hw stats response callback
12224  * @soc: soc handle
12225  * @cb_ctxt: callback context
12226  * @reo_status: reo command response status
12227  *
12228  * Return: None
12229  */
12230 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
12231 			      union hal_reo_status *reo_status)
12232 {
12233 	struct dp_req_rx_hw_stats_t *rx_hw_stats = cb_ctxt;
12234 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
12235 	bool is_query_timeout;
12236 
12237 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
12238 	is_query_timeout = rx_hw_stats->is_query_timeout;
12239 	/* free the cb_ctxt if all pending tid stats query is received */
12240 	if (qdf_atomic_dec_and_test(&rx_hw_stats->pending_tid_stats_cnt)) {
12241 		if (!is_query_timeout) {
12242 			qdf_event_set(&soc->rx_hw_stats_event);
12243 			soc->is_last_stats_ctx_init = false;
12244 		}
12245 
12246 		qdf_mem_free(rx_hw_stats);
12247 	}
12248 
12249 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
12250 		dp_info("REO stats failure %d",
12251 			queue_status->header.status);
12252 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
12253 		return;
12254 	}
12255 
12256 	if (!is_query_timeout) {
12257 		soc->ext_stats.rx_mpdu_received +=
12258 					queue_status->mpdu_frms_cnt;
12259 		soc->ext_stats.rx_mpdu_missed +=
12260 					queue_status->hole_cnt;
12261 	}
12262 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
12263 }
12264 
12265 /**
12266  * dp_request_rx_hw_stats - request rx hardware stats
12267  * @soc_hdl: soc handle
12268  * @vdev_id: vdev id
12269  *
12270  * Return: None
12271  */
12272 static QDF_STATUS
12273 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
12274 {
12275 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12276 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
12277 						     DP_MOD_ID_CDP);
12278 	struct dp_peer *peer = NULL;
12279 	QDF_STATUS status;
12280 	struct dp_req_rx_hw_stats_t *rx_hw_stats;
12281 	int rx_stats_sent_cnt = 0;
12282 	uint32_t last_rx_mpdu_received;
12283 	uint32_t last_rx_mpdu_missed;
12284 
12285 	if (!vdev) {
12286 		dp_err("vdev is null for vdev_id: %u", vdev_id);
12287 		status = QDF_STATUS_E_INVAL;
12288 		goto out;
12289 	}
12290 
12291 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_CDP);
12292 
12293 	if (!peer) {
12294 		dp_err("Peer is NULL");
12295 		status = QDF_STATUS_E_INVAL;
12296 		goto out;
12297 	}
12298 
12299 	rx_hw_stats = qdf_mem_malloc(sizeof(*rx_hw_stats));
12300 
12301 	if (!rx_hw_stats) {
12302 		dp_err("malloc failed for hw stats structure");
12303 		status = QDF_STATUS_E_INVAL;
12304 		goto out;
12305 	}
12306 
12307 	qdf_event_reset(&soc->rx_hw_stats_event);
12308 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
12309 	/* save the last soc cumulative stats and reset it to 0 */
12310 	last_rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
12311 	last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
12312 	soc->ext_stats.rx_mpdu_received = 0;
12313 	soc->ext_stats.rx_mpdu_missed = 0;
12314 
12315 	rx_stats_sent_cnt =
12316 		dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
12317 	if (!rx_stats_sent_cnt) {
12318 		dp_err("no tid stats sent successfully");
12319 		qdf_mem_free(rx_hw_stats);
12320 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
12321 		status = QDF_STATUS_E_INVAL;
12322 		goto out;
12323 	}
12324 	qdf_atomic_set(&rx_hw_stats->pending_tid_stats_cnt,
12325 		       rx_stats_sent_cnt);
12326 	rx_hw_stats->is_query_timeout = false;
12327 	soc->is_last_stats_ctx_init = true;
12328 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
12329 
12330 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
12331 				       DP_REO_STATUS_STATS_TIMEOUT);
12332 
12333 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
12334 	if (status != QDF_STATUS_SUCCESS) {
12335 		dp_info("rx hw stats event timeout");
12336 		if (soc->is_last_stats_ctx_init)
12337 			rx_hw_stats->is_query_timeout = true;
12338 		/**
12339 		 * If query timeout happened, use the last saved stats
12340 		 * for this time query.
12341 		 */
12342 		soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
12343 		soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
12344 	}
12345 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
12346 
12347 out:
12348 	if (peer)
12349 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
12350 	if (vdev)
12351 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
12352 
12353 	return status;
12354 }
12355 
12356 /**
12357  * dp_reset_rx_hw_ext_stats - Reset rx hardware ext stats
12358  * @soc_hdl: soc handle
12359  *
12360  * Return: None
12361  */
12362 static
12363 void dp_reset_rx_hw_ext_stats(struct cdp_soc_t *soc_hdl)
12364 {
12365 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
12366 
12367 	soc->ext_stats.rx_mpdu_received = 0;
12368 	soc->ext_stats.rx_mpdu_missed = 0;
12369 }
12370 #endif /* WLAN_FEATURE_STATS_EXT */
12371 
12372 #ifdef DP_PEER_EXTENDED_API
12373 static struct cdp_misc_ops dp_ops_misc = {
12374 #ifdef FEATURE_WLAN_TDLS
12375 	.tx_non_std = dp_tx_non_std,
12376 #endif /* FEATURE_WLAN_TDLS */
12377 	.get_opmode = dp_get_opmode,
12378 #ifdef FEATURE_RUNTIME_PM
12379 	.runtime_suspend = dp_runtime_suspend,
12380 	.runtime_resume = dp_runtime_resume,
12381 #endif /* FEATURE_RUNTIME_PM */
12382 	.pkt_log_init = dp_pkt_log_init,
12383 	.pkt_log_con_service = dp_pkt_log_con_service,
12384 	.get_num_rx_contexts = dp_get_num_rx_contexts,
12385 	.get_tx_ack_stats = dp_tx_get_success_ack_stats,
12386 #ifdef WLAN_SUPPORT_DATA_STALL
12387 	.txrx_data_stall_cb_register = dp_register_data_stall_detect_cb,
12388 	.txrx_data_stall_cb_deregister = dp_deregister_data_stall_detect_cb,
12389 	.txrx_post_data_stall_event = dp_txrx_post_data_stall_event,
12390 #endif
12391 
12392 #ifdef WLAN_FEATURE_STATS_EXT
12393 	.txrx_ext_stats_request = dp_txrx_ext_stats_request,
12394 	.request_rx_hw_stats = dp_request_rx_hw_stats,
12395 	.reset_rx_hw_ext_stats = dp_reset_rx_hw_ext_stats,
12396 #endif /* WLAN_FEATURE_STATS_EXT */
12397 	.vdev_inform_ll_conn = dp_vdev_inform_ll_conn,
12398 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
12399 	.set_swlm_enable = dp_soc_set_swlm_enable,
12400 	.is_swlm_enabled = dp_soc_is_swlm_enabled,
12401 #endif
12402 	.display_txrx_hw_info = dp_display_srng_info,
12403 };
12404 #endif
12405 
12406 #ifdef DP_FLOW_CTL
12407 static struct cdp_flowctl_ops dp_ops_flowctl = {
12408 	/* WIFI 3.0 DP implement as required. */
12409 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
12410 	.flow_pool_map_handler = dp_tx_flow_pool_map,
12411 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
12412 	.register_pause_cb = dp_txrx_register_pause_cb,
12413 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
12414 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
12415 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
12416 };
12417 
12418 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
12419 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
12420 };
12421 #endif
12422 
12423 #ifdef IPA_OFFLOAD
12424 static struct cdp_ipa_ops dp_ops_ipa = {
12425 	.ipa_get_resource = dp_ipa_get_resource,
12426 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
12427 	.ipa_op_response = dp_ipa_op_response,
12428 	.ipa_register_op_cb = dp_ipa_register_op_cb,
12429 	.ipa_deregister_op_cb = dp_ipa_deregister_op_cb,
12430 	.ipa_get_stat = dp_ipa_get_stat,
12431 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
12432 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
12433 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
12434 	.ipa_setup = dp_ipa_setup,
12435 	.ipa_cleanup = dp_ipa_cleanup,
12436 	.ipa_setup_iface = dp_ipa_setup_iface,
12437 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
12438 	.ipa_enable_pipes = dp_ipa_enable_pipes,
12439 	.ipa_disable_pipes = dp_ipa_disable_pipes,
12440 	.ipa_set_perf_level = dp_ipa_set_perf_level,
12441 	.ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd,
12442 	.ipa_tx_buf_smmu_mapping = dp_ipa_tx_buf_smmu_mapping,
12443 	.ipa_tx_buf_smmu_unmapping = dp_ipa_tx_buf_smmu_unmapping
12444 };
12445 #endif
12446 
12447 #ifdef DP_POWER_SAVE
12448 static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
12449 {
12450 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12451 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12452 	int timeout = SUSPEND_DRAIN_WAIT;
12453 	int drain_wait_delay = 50; /* 50 ms */
12454 
12455 	if (qdf_unlikely(!pdev)) {
12456 		dp_err("pdev is NULL");
12457 		return QDF_STATUS_E_INVAL;
12458 	}
12459 
12460 	/* Abort if there are any pending TX packets */
12461 	while (dp_get_tx_pending((struct cdp_pdev *)pdev) > 0) {
12462 		qdf_sleep(drain_wait_delay);
12463 		if (timeout <= 0) {
12464 			dp_err("TX frames are pending, abort suspend");
12465 			return QDF_STATUS_E_TIMEOUT;
12466 		}
12467 		timeout = timeout - drain_wait_delay;
12468 	}
12469 
12470 	if (soc->intr_mode == DP_INTR_POLL)
12471 		qdf_timer_stop(&soc->int_timer);
12472 
12473 	/* Stop monitor reap timer and reap any pending frames in ring */
12474 	if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) ||
12475 	     dp_is_enable_reap_timer_non_pkt(pdev)) &&
12476 	    soc->reap_timer_init) {
12477 		qdf_timer_sync_cancel(&soc->mon_reap_timer);
12478 		dp_service_mon_rings(soc, DP_MON_REAP_BUDGET);
12479 	}
12480 
12481 	dp_suspend_fse_cache_flush(soc);
12482 
12483 	return QDF_STATUS_SUCCESS;
12484 }
12485 
12486 static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
12487 {
12488 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12489 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12490 
12491 	if (qdf_unlikely(!pdev)) {
12492 		dp_err("pdev is NULL");
12493 		return QDF_STATUS_E_INVAL;
12494 	}
12495 
12496 	if (soc->intr_mode == DP_INTR_POLL)
12497 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
12498 
12499 	/* Start monitor reap timer */
12500 	if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) ||
12501 	     dp_is_enable_reap_timer_non_pkt(pdev)) &&
12502 	    soc->reap_timer_init)
12503 		qdf_timer_mod(&soc->mon_reap_timer,
12504 			      DP_INTR_POLL_TIMER_MS);
12505 
12506 	dp_resume_fse_cache_flush(soc);
12507 
12508 	return QDF_STATUS_SUCCESS;
12509 }
12510 
12511 /**
12512  * dp_process_wow_ack_rsp() - process wow ack response
12513  * @soc_hdl: datapath soc handle
12514  * @pdev_id: data path pdev handle id
12515  *
12516  * Return: none
12517  */
12518 static void dp_process_wow_ack_rsp(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
12519 {
12520 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12521 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12522 
12523 	if (qdf_unlikely(!pdev)) {
12524 		dp_err("pdev is NULL");
12525 		return;
12526 	}
12527 
12528 	/*
12529 	 * As part of wow enable FW disables the mon status ring and in wow ack
12530 	 * response from FW reap mon status ring to make sure no packets pending
12531 	 * in the ring.
12532 	 */
12533 	if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) ||
12534 	     dp_is_enable_reap_timer_non_pkt(pdev)) &&
12535 	    soc->reap_timer_init) {
12536 		dp_service_mon_rings(soc, DP_MON_REAP_BUDGET);
12537 	}
12538 }
12539 
12540 /**
12541  * dp_process_target_suspend_req() - process target suspend request
12542  * @soc_hdl: datapath soc handle
12543  * @pdev_id: data path pdev handle id
12544  *
12545  * Return: none
12546  */
12547 static void dp_process_target_suspend_req(struct cdp_soc_t *soc_hdl,
12548 					  uint8_t pdev_id)
12549 {
12550 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
12551 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
12552 
12553 	if (qdf_unlikely(!pdev)) {
12554 		dp_err("pdev is NULL");
12555 		return;
12556 	}
12557 
12558 	/* Stop monitor reap timer and reap any pending frames in ring */
12559 	if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) ||
12560 	     dp_is_enable_reap_timer_non_pkt(pdev)) &&
12561 	    soc->reap_timer_init) {
12562 		qdf_timer_sync_cancel(&soc->mon_reap_timer);
12563 		dp_service_mon_rings(soc, DP_MON_REAP_BUDGET);
12564 	}
12565 }
12566 
12567 static struct cdp_bus_ops dp_ops_bus = {
12568 	.bus_suspend = dp_bus_suspend,
12569 	.bus_resume = dp_bus_resume,
12570 	.process_wow_ack_rsp = dp_process_wow_ack_rsp,
12571 	.process_target_suspend_req = dp_process_target_suspend_req
12572 };
12573 #endif
12574 
12575 #ifdef DP_FLOW_CTL
12576 static struct cdp_throttle_ops dp_ops_throttle = {
12577 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
12578 };
12579 
12580 static struct cdp_cfg_ops dp_ops_cfg = {
12581 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
12582 };
12583 #endif
12584 
12585 #ifdef DP_PEER_EXTENDED_API
12586 static struct cdp_ocb_ops dp_ops_ocb = {
12587 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
12588 };
12589 
12590 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
12591 	.clear_stats = dp_txrx_clear_dump_stats,
12592 };
12593 
12594 static struct cdp_peer_ops dp_ops_peer = {
12595 	.register_peer = dp_register_peer,
12596 	.clear_peer = dp_clear_peer,
12597 	.find_peer_exist = dp_find_peer_exist,
12598 	.find_peer_exist_on_vdev = dp_find_peer_exist_on_vdev,
12599 	.find_peer_exist_on_other_vdev = dp_find_peer_exist_on_other_vdev,
12600 	.peer_state_update = dp_peer_state_update,
12601 	.get_vdevid = dp_get_vdevid,
12602 	.get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr,
12603 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
12604 	.get_peer_state = dp_get_peer_state,
12605 	.peer_flush_frags = dp_peer_flush_frags,
12606 };
12607 #endif
12608 
12609 static struct cdp_ops dp_txrx_ops = {
12610 	.cmn_drv_ops = &dp_ops_cmn,
12611 	.ctrl_ops = &dp_ops_ctrl,
12612 	.me_ops = &dp_ops_me,
12613 	.mon_ops = &dp_ops_mon,
12614 	.host_stats_ops = &dp_ops_host_stats,
12615 	.wds_ops = &dp_ops_wds,
12616 	.raw_ops = &dp_ops_raw,
12617 #ifdef PEER_FLOW_CONTROL
12618 	.pflow_ops = &dp_ops_pflow,
12619 #endif /* PEER_FLOW_CONTROL */
12620 #ifdef DP_PEER_EXTENDED_API
12621 	.misc_ops = &dp_ops_misc,
12622 	.ocb_ops = &dp_ops_ocb,
12623 	.peer_ops = &dp_ops_peer,
12624 	.mob_stats_ops = &dp_ops_mob_stats,
12625 #endif
12626 #ifdef DP_FLOW_CTL
12627 	.cfg_ops = &dp_ops_cfg,
12628 	.flowctl_ops = &dp_ops_flowctl,
12629 	.l_flowctl_ops = &dp_ops_l_flowctl,
12630 	.throttle_ops = &dp_ops_throttle,
12631 #endif
12632 #ifdef IPA_OFFLOAD
12633 	.ipa_ops = &dp_ops_ipa,
12634 #endif
12635 #ifdef DP_POWER_SAVE
12636 	.bus_ops = &dp_ops_bus,
12637 #endif
12638 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
12639 	.cfr_ops = &dp_ops_cfr,
12640 #endif
12641 #ifdef WLAN_SUPPORT_MSCS
12642 	.mscs_ops = &dp_ops_mscs,
12643 #endif
12644 #ifdef WLAN_SUPPORT_MESH_LATENCY
12645 	.mesh_latency_ops = &dp_ops_mesh_latency,
12646 #endif
12647 };
12648 
12649 /*
12650  * dp_soc_set_txrx_ring_map()
12651  * @dp_soc: DP handler for soc
12652  *
12653  * Return: Void
12654  */
12655 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
12656 {
12657 	uint32_t i;
12658 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
12659 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
12660 	}
12661 }
12662 
12663 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
12664 	defined(QCA_WIFI_QCA5018)
12665 /**
12666  * dp_soc_attach_wifi3() - Attach txrx SOC
12667  * @ctrl_psoc: Opaque SOC handle from control plane
12668  * @htc_handle: Opaque HTC handle
12669  * @hif_handle: Opaque HIF handle
12670  * @qdf_osdev: QDF device
12671  * @ol_ops: Offload Operations
12672  * @device_id: Device ID
12673  *
12674  * Return: DP SOC handle on success, NULL on failure
12675  */
12676 struct cdp_soc_t *
12677 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
12678 		    struct hif_opaque_softc *hif_handle,
12679 		    HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
12680 		    struct ol_if_ops *ol_ops, uint16_t device_id)
12681 {
12682 	struct dp_soc *dp_soc = NULL;
12683 
12684 	dp_soc = dp_soc_attach(ctrl_psoc, hif_handle, htc_handle, qdf_osdev,
12685 			       ol_ops, device_id);
12686 	return dp_soc_to_cdp_soc_t(dp_soc);
12687 }
12688 
12689 static inline void dp_soc_set_def_pdev(struct dp_soc *soc)
12690 {
12691 	int lmac_id;
12692 
12693 	for (lmac_id = 0; lmac_id < MAX_NUM_LMAC_HW; lmac_id++) {
12694 		/*Set default host PDEV ID for lmac_id*/
12695 		wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
12696 				      INVALID_PDEV_ID, lmac_id);
12697 	}
12698 }
12699 
12700 static uint32_t
12701 dp_get_link_desc_id_start(uint16_t arch_id)
12702 {
12703 	switch (arch_id) {
12704 	case LITHIUM_DP:
12705 		return LINK_DESC_ID_START_21_BITS_COOKIE;
12706 	case BERYLLIUM_DP:
12707 		return LINK_DESC_ID_START_20_BITS_COOKIE;
12708 	default:
12709 		dp_err("unkonwn arch_id 0x%x", arch_id);
12710 		QDF_BUG(0);
12711 		return LINK_DESC_ID_START_21_BITS_COOKIE;
12712 	}
12713 }
12714 
12715 /**
12716  * dp_soc_attach() - Attach txrx SOC
12717  * @ctrl_psoc: Opaque SOC handle from control plane
12718  * @hif_handle: Opaque HIF handle
12719  * @htc_handle: Opaque HTC handle
12720  * @qdf_osdev: QDF device
12721  * @ol_ops: Offload Operations
12722  * @device_id: Device ID
12723  *
12724  * Return: DP SOC handle on success, NULL on failure
12725  */
12726 static struct dp_soc *
12727 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
12728 	      struct hif_opaque_softc *hif_handle, HTC_HANDLE htc_handle,
12729 	      qdf_device_t qdf_osdev, struct ol_if_ops *ol_ops,
12730 	      uint16_t device_id)
12731 {
12732 	int int_ctx;
12733 	struct dp_soc *soc =  NULL;
12734 	uint16_t arch_id;
12735 
12736 	if (!hif_handle) {
12737 		dp_err("HIF handle is NULL");
12738 		goto fail0;
12739 	}
12740 	arch_id = cdp_get_arch_type_from_devid(device_id);
12741 	soc = qdf_mem_malloc(dp_get_soc_context_size(device_id));
12742 	if (!soc) {
12743 		dp_err("DP SOC memory allocation failed");
12744 		goto fail0;
12745 	}
12746 	dp_info("soc memory allocated %pk", soc);
12747 	soc->hif_handle = hif_handle;
12748 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
12749 	if (!soc->hal_soc)
12750 		goto fail1;
12751 
12752 	int_ctx = 0;
12753 	soc->device_id = device_id;
12754 	soc->cdp_soc.ops = &dp_txrx_ops;
12755 	soc->cdp_soc.ol_ops = ol_ops;
12756 	soc->ctrl_psoc = ctrl_psoc;
12757 	soc->osdev = qdf_osdev;
12758 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
12759 	hal_rx_get_tlv_size(soc->hal_soc, &soc->rx_pkt_tlv_size,
12760 			    &soc->rx_mon_pkt_tlv_size);
12761 
12762 	soc->arch_id = arch_id;
12763 	soc->link_desc_id_start =
12764 			dp_get_link_desc_id_start(soc->arch_id);
12765 	dp_configure_arch_ops(soc);
12766 
12767 	/* Reset wbm sg list and flags */
12768 	dp_rx_wbm_sg_list_reset(soc);
12769 
12770 	dp_soc_tx_hw_desc_history_attach(soc);
12771 	dp_soc_rx_history_attach(soc);
12772 	dp_soc_tx_history_attach(soc);
12773 	wlan_set_srng_cfg(&soc->wlan_srng_cfg);
12774 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
12775 	if (!soc->wlan_cfg_ctx) {
12776 		dp_err("wlan_cfg_ctx failed\n");
12777 		goto fail1;
12778 	}
12779 
12780 	dp_soc_cfg_attach(soc);
12781 
12782 	if (dp_hw_link_desc_pool_banks_alloc(soc, WLAN_INVALID_PDEV_ID)) {
12783 		dp_err("failed to allocate link desc pool banks");
12784 		goto fail2;
12785 	}
12786 
12787 	if (dp_hw_link_desc_ring_alloc(soc)) {
12788 		dp_err("failed to allocate link_desc_ring");
12789 		goto fail3;
12790 	}
12791 
12792 	if (dp_soc_srng_alloc(soc)) {
12793 		dp_err("failed to allocate soc srng rings");
12794 		goto fail4;
12795 	}
12796 
12797 	if (dp_soc_tx_desc_sw_pools_alloc(soc)) {
12798 		dp_err("dp_soc_tx_desc_sw_pools_alloc failed");
12799 		goto fail5;
12800 	}
12801 
12802 	if (!QDF_IS_STATUS_SUCCESS(soc->arch_ops.txrx_soc_attach(soc))) {
12803 		dp_err("unable to do target specific attach");
12804 		goto fail6;
12805 	}
12806 
12807 	dp_soc_swlm_attach(soc);
12808 	dp_soc_set_interrupt_mode(soc);
12809 	dp_soc_set_def_pdev(soc);
12810 
12811 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
12812 		qdf_dma_mem_stats_read(),
12813 		qdf_heap_mem_stats_read(),
12814 		qdf_skb_total_mem_stats_read());
12815 
12816 	return soc;
12817 fail6:
12818 	dp_soc_tx_desc_sw_pools_free(soc);
12819 fail5:
12820 	dp_soc_srng_free(soc);
12821 fail4:
12822 	dp_hw_link_desc_ring_free(soc);
12823 fail3:
12824 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
12825 fail2:
12826 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
12827 fail1:
12828 	qdf_mem_free(soc);
12829 fail0:
12830 	return NULL;
12831 }
12832 
12833 /**
12834  * dp_soc_init() - Initialize txrx SOC
12835  * @dp_soc: Opaque DP SOC handle
12836  * @htc_handle: Opaque HTC handle
12837  * @hif_handle: Opaque HIF handle
12838  *
12839  * Return: DP SOC handle on success, NULL on failure
12840  */
12841 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
12842 		  struct hif_opaque_softc *hif_handle)
12843 {
12844 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
12845 	bool is_monitor_mode = false;
12846 	struct hal_reo_params reo_params;
12847 	uint8_t i;
12848 	int num_dp_msi;
12849 
12850 	wlan_minidump_log(soc, sizeof(*soc), soc->ctrl_psoc,
12851 			  WLAN_MD_DP_SOC, "dp_soc");
12852 
12853 	htt_soc = htt_soc_attach(soc, htc_handle);
12854 	if (!htt_soc)
12855 		goto fail0;
12856 
12857 	soc->htt_handle = htt_soc;
12858 
12859 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
12860 		goto fail1;
12861 
12862 	htt_set_htc_handle(htt_soc, htc_handle);
12863 	soc->hif_handle = hif_handle;
12864 
12865 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
12866 	if (!soc->hal_soc)
12867 		goto fail2;
12868 
12869 	dp_soc_cfg_init(soc);
12870 
12871 	/* Reset/Initialize wbm sg list and flags */
12872 	dp_rx_wbm_sg_list_reset(soc);
12873 
12874 	/* Note: Any SRNG ring initialization should happen only after
12875 	 * Interrupt mode is set and followed by filling up the
12876 	 * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER.
12877 	 */
12878 	dp_soc_set_interrupt_mode(soc);
12879 	if (soc->cdp_soc.ol_ops->get_con_mode &&
12880 	    soc->cdp_soc.ol_ops->get_con_mode() ==
12881 	    QDF_GLOBAL_MONITOR_MODE)
12882 		is_monitor_mode = true;
12883 
12884 	num_dp_msi = dp_get_num_msi_available(soc, soc->intr_mode);
12885 	if (num_dp_msi < 0) {
12886 		dp_init_err("%pK: dp_interrupt assignment failed", soc);
12887 		goto fail3;
12888 	}
12889 	wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, num_dp_msi,
12890 				     soc->intr_mode, is_monitor_mode);
12891 
12892 	/* initialize WBM_IDLE_LINK ring */
12893 	if (dp_hw_link_desc_ring_init(soc)) {
12894 		dp_init_err("%pK: dp_hw_link_desc_ring_init failed", soc);
12895 		goto fail3;
12896 	}
12897 
12898 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
12899 
12900 	if (dp_soc_srng_init(soc)) {
12901 		dp_init_err("%pK: dp_soc_srng_init failed", soc);
12902 		goto fail4;
12903 	}
12904 
12905 	if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
12906 			       htt_get_htc_handle(htt_soc),
12907 			       soc->hal_soc, soc->osdev) == NULL)
12908 		goto fail5;
12909 
12910 	/* Initialize descriptors in TCL Rings */
12911 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
12912 		hal_tx_init_data_ring(soc->hal_soc,
12913 				      soc->tcl_data_ring[i].hal_srng);
12914 	}
12915 
12916 	if (dp_soc_tx_desc_sw_pools_init(soc)) {
12917 		dp_init_err("%pK: dp_tx_soc_attach failed", soc);
12918 		goto fail6;
12919 	}
12920 
12921 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
12922 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
12923 	soc->cce_disable = false;
12924 	soc->max_ast_ageout_count = MAX_AST_AGEOUT_COUNT;
12925 
12926 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
12927 	qdf_spinlock_create(&soc->vdev_map_lock);
12928 	qdf_atomic_init(&soc->num_tx_outstanding);
12929 	qdf_atomic_init(&soc->num_tx_exception);
12930 	soc->num_tx_allowed =
12931 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
12932 
12933 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
12934 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
12935 				CDP_CFG_MAX_PEER_ID);
12936 
12937 		if (ret != -EINVAL)
12938 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
12939 
12940 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
12941 				CDP_CFG_CCE_DISABLE);
12942 		if (ret == 1)
12943 			soc->cce_disable = true;
12944 	}
12945 
12946 	/*
12947 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
12948 	 * and IPQ5018 WMAC2 is not there in these platforms.
12949 	 */
12950 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018 ||
12951 	    soc->disable_mac2_intr)
12952 		dp_soc_disable_unused_mac_intr_mask(soc, 0x2);
12953 
12954 	/*
12955 	 * Skip registering hw ring interrupts for WMAC1 on IPQ5018
12956 	 * WMAC1 is not there in this platform.
12957 	 */
12958 	if (soc->disable_mac1_intr)
12959 		dp_soc_disable_unused_mac_intr_mask(soc, 0x1);
12960 
12961 	/* Setup HW REO */
12962 	qdf_mem_zero(&reo_params, sizeof(reo_params));
12963 
12964 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
12965 		/*
12966 		 * Reo ring remap is not required if both radios
12967 		 * are offloaded to NSS
12968 		 */
12969 		if (dp_reo_remap_config(soc,
12970 					&reo_params.remap1,
12971 					&reo_params.remap2))
12972 			reo_params.rx_hash_enabled = true;
12973 		else
12974 			reo_params.rx_hash_enabled = false;
12975 	}
12976 
12977 	/* setup the global rx defrag waitlist */
12978 	TAILQ_INIT(&soc->rx.defrag.waitlist);
12979 	soc->rx.defrag.timeout_ms =
12980 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
12981 	soc->rx.defrag.next_flush_ms = 0;
12982 	soc->rx.flags.defrag_timeout_check =
12983 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
12984 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
12985 
12986 	/*
12987 	 * set the fragment destination ring
12988 	 */
12989 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
12990 
12991 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
12992 		reo_params.alt_dst_ind_0 = REO_REMAP_RELEASE;
12993 
12994 	hal_reo_setup(soc->hal_soc, &reo_params);
12995 
12996 	hal_reo_set_err_dst_remap(soc->hal_soc);
12997 
12998 	qdf_atomic_set(&soc->cmn_init_done, 1);
12999 
13000 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
13001 
13002 	qdf_spinlock_create(&soc->ast_lock);
13003 	dp_peer_mec_spinlock_create(soc);
13004 
13005 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
13006 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
13007 	INIT_RX_HW_STATS_LOCK(soc);
13008 
13009 	qdf_nbuf_queue_init(&soc->invalid_buf_queue);
13010 	/* fill the tx/rx cpu ring map*/
13011 	dp_soc_set_txrx_ring_map(soc);
13012 
13013 	TAILQ_INIT(&soc->inactive_peer_list);
13014 	qdf_spinlock_create(&soc->inactive_peer_list_lock);
13015 	TAILQ_INIT(&soc->inactive_vdev_list);
13016 	qdf_spinlock_create(&soc->inactive_vdev_list_lock);
13017 	qdf_spinlock_create(&soc->htt_stats.lock);
13018 	/* initialize work queue for stats processing */
13019 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
13020 
13021 	dp_reo_desc_deferred_freelist_create(soc);
13022 
13023 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
13024 		qdf_dma_mem_stats_read(),
13025 		qdf_heap_mem_stats_read(),
13026 		qdf_skb_total_mem_stats_read());
13027 
13028 	return soc;
13029 fail6:
13030 	htt_soc_htc_dealloc(soc->htt_handle);
13031 fail5:
13032 	dp_soc_srng_deinit(soc);
13033 fail4:
13034 	dp_hw_link_desc_ring_deinit(soc);
13035 fail3:
13036 	dp_hw_link_desc_ring_free(soc);
13037 fail2:
13038 	htt_htc_pkt_pool_free(htt_soc);
13039 fail1:
13040 	htt_soc_detach(htt_soc);
13041 fail0:
13042 	return NULL;
13043 }
13044 
13045 /**
13046  * dp_soc_init_wifi3() - Initialize txrx SOC
13047  * @soc: Opaque DP SOC handle
13048  * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
13049  * @hif_handle: Opaque HIF handle
13050  * @htc_handle: Opaque HTC handle
13051  * @qdf_osdev: QDF device (Unused)
13052  * @ol_ops: Offload Operations (Unused)
13053  * @device_id: Device ID (Unused)
13054  *
13055  * Return: DP SOC handle on success, NULL on failure
13056  */
13057 void *dp_soc_init_wifi3(struct cdp_soc_t *soc,
13058 			struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
13059 			struct hif_opaque_softc *hif_handle,
13060 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
13061 			struct ol_if_ops *ol_ops, uint16_t device_id)
13062 {
13063 	return dp_soc_init((struct dp_soc *)soc, htc_handle, hif_handle);
13064 }
13065 
13066 #endif
13067 
13068 /*
13069  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
13070  *
13071  * @soc: handle to DP soc
13072  * @mac_id: MAC id
13073  *
13074  * Return: Return pdev corresponding to MAC
13075  */
13076 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
13077 {
13078 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
13079 		return (mac_id < MAX_PDEV_CNT) ? soc->pdev_list[mac_id] : NULL;
13080 
13081 	/* Typically for MCL as there only 1 PDEV*/
13082 	return soc->pdev_list[0];
13083 }
13084 
13085 /*
13086  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
13087  * @soc:		DP SoC context
13088  * @max_mac_rings:	No of MAC rings
13089  *
13090  * Return: None
13091  */
13092 void dp_is_hw_dbs_enable(struct dp_soc *soc,
13093 				int *max_mac_rings)
13094 {
13095 	bool dbs_enable = false;
13096 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
13097 		dbs_enable = soc->cdp_soc.ol_ops->
13098 		is_hw_dbs_2x2_capable((void *)soc->ctrl_psoc);
13099 
13100 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
13101 }
13102 
13103 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
13104 /*
13105  * dp_cfr_filter() -  Configure HOST RX monitor status ring for CFR
13106  * @soc_hdl: Datapath soc handle
13107  * @pdev_id: id of data path pdev handle
13108  * @enable: Enable/Disable CFR
13109  * @filter_val: Flag to select Filter for monitor mode
13110  */
13111 static void dp_cfr_filter(struct cdp_soc_t *soc_hdl,
13112 			  uint8_t pdev_id,
13113 			  bool enable,
13114 			  struct cdp_monitor_filter *filter_val)
13115 {
13116 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13117 	struct dp_pdev *pdev = NULL;
13118 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
13119 	int max_mac_rings;
13120 	uint8_t mac_id = 0;
13121 
13122 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13123 	if (!pdev) {
13124 		dp_err("pdev is NULL");
13125 		return;
13126 	}
13127 
13128 	if (pdev->monitor_vdev) {
13129 		dp_info("No action is needed since monitor mode is enabled\n");
13130 		return;
13131 	}
13132 	soc = pdev->soc;
13133 	pdev->cfr_rcc_mode = false;
13134 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
13135 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
13136 
13137 	dp_debug("Max_mac_rings %d", max_mac_rings);
13138 	dp_info("enable : %d, mode: 0x%x", enable, filter_val->mode);
13139 
13140 	if (enable) {
13141 		pdev->cfr_rcc_mode = true;
13142 
13143 		htt_tlv_filter.ppdu_start = 1;
13144 		htt_tlv_filter.ppdu_end = 1;
13145 		htt_tlv_filter.ppdu_end_user_stats = 1;
13146 		htt_tlv_filter.ppdu_end_user_stats_ext = 1;
13147 		htt_tlv_filter.ppdu_end_status_done = 1;
13148 		htt_tlv_filter.mpdu_start = 1;
13149 		htt_tlv_filter.offset_valid = false;
13150 
13151 		htt_tlv_filter.enable_fp =
13152 			(filter_val->mode & MON_FILTER_PASS) ? 1 : 0;
13153 		htt_tlv_filter.enable_md = 0;
13154 		htt_tlv_filter.enable_mo =
13155 			(filter_val->mode & MON_FILTER_OTHER) ? 1 : 0;
13156 		htt_tlv_filter.fp_mgmt_filter = filter_val->fp_mgmt;
13157 		htt_tlv_filter.fp_ctrl_filter = filter_val->fp_ctrl;
13158 		htt_tlv_filter.fp_data_filter = filter_val->fp_data;
13159 		htt_tlv_filter.mo_mgmt_filter = filter_val->mo_mgmt;
13160 		htt_tlv_filter.mo_ctrl_filter = filter_val->mo_ctrl;
13161 		htt_tlv_filter.mo_data_filter = filter_val->mo_data;
13162 	}
13163 
13164 	for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
13165 		int mac_for_pdev =
13166 			dp_get_mac_id_for_pdev(mac_id,
13167 					       pdev->pdev_id);
13168 
13169 		htt_h2t_rx_ring_cfg(soc->htt_handle,
13170 				    mac_for_pdev,
13171 				    soc->rxdma_mon_status_ring[mac_id]
13172 				    .hal_srng,
13173 				    RXDMA_MONITOR_STATUS,
13174 				    RX_MON_STATUS_BUF_SIZE,
13175 				    &htt_tlv_filter);
13176 	}
13177 }
13178 
13179 /**
13180  * dp_get_cfr_rcc() - get cfr rcc config
13181  * @soc_hdl: Datapath soc handle
13182  * @pdev_id: id of objmgr pdev
13183  *
13184  * Return: true/false based on cfr mode setting
13185  */
13186 static
13187 bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
13188 {
13189 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13190 	struct dp_pdev *pdev = NULL;
13191 
13192 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13193 	if (!pdev) {
13194 		dp_err("pdev is NULL");
13195 		return false;
13196 	}
13197 
13198 	return pdev->cfr_rcc_mode;
13199 }
13200 
13201 /**
13202  * dp_set_cfr_rcc() - enable/disable cfr rcc config
13203  * @soc_hdl: Datapath soc handle
13204  * @pdev_id: id of objmgr pdev
13205  * @enable: Enable/Disable cfr rcc mode
13206  *
13207  * Return: none
13208  */
13209 static
13210 void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, bool enable)
13211 {
13212 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13213 	struct dp_pdev *pdev = NULL;
13214 
13215 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13216 	if (!pdev) {
13217 		dp_err("pdev is NULL");
13218 		return;
13219 	}
13220 
13221 	pdev->cfr_rcc_mode = enable;
13222 }
13223 
13224 /*
13225  * dp_get_cfr_dbg_stats - Get the debug statistics for CFR
13226  * @soc_hdl: Datapath soc handle
13227  * @pdev_id: id of data path pdev handle
13228  * @cfr_rcc_stats: CFR RCC debug statistics buffer
13229  *
13230  * Return: none
13231  */
13232 static inline void
13233 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
13234 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats)
13235 {
13236 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13237 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13238 
13239 	if (!pdev) {
13240 		dp_err("Invalid pdev");
13241 		return;
13242 	}
13243 
13244 	qdf_mem_copy(cfr_rcc_stats, &pdev->stats.rcc,
13245 		     sizeof(struct cdp_cfr_rcc_stats));
13246 }
13247 
13248 /*
13249  * dp_clear_cfr_dbg_stats - Clear debug statistics for CFR
13250  * @soc_hdl: Datapath soc handle
13251  * @pdev_id: id of data path pdev handle
13252  *
13253  * Return: none
13254  */
13255 static void dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl,
13256 				   uint8_t pdev_id)
13257 {
13258 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13259 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13260 
13261 	if (!pdev) {
13262 		dp_err("dp pdev is NULL");
13263 		return;
13264 	}
13265 
13266 	qdf_mem_zero(&pdev->stats.rcc, sizeof(pdev->stats.rcc));
13267 }
13268 
13269 /*
13270  * dp_enable_mon_reap_timer() - enable/disable reap timer
13271  * @soc_hdl: Datapath soc handle
13272  * @pdev_id: id of objmgr pdev
13273  * @enable: Enable/Disable reap timer of monitor status ring
13274  *
13275  * Return: none
13276  */
13277 static void
13278 dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
13279 			 bool enable)
13280 {
13281 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
13282 	struct dp_pdev *pdev = NULL;
13283 
13284 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
13285 	if (!pdev) {
13286 		dp_err("pdev is NULL");
13287 		return;
13288 	}
13289 
13290 	pdev->enable_reap_timer_non_pkt = enable;
13291 	if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
13292 		dp_debug("pktlog enabled %d", pdev->rx_pktlog_mode);
13293 		return;
13294 	}
13295 
13296 	if (!soc->reap_timer_init) {
13297 		dp_err("reap timer not init");
13298 		return;
13299 	}
13300 
13301 	if (enable)
13302 		qdf_timer_mod(&soc->mon_reap_timer,
13303 			      DP_INTR_POLL_TIMER_MS);
13304 	else
13305 		qdf_timer_sync_cancel(&soc->mon_reap_timer);
13306 }
13307 #endif
13308 
13309 /*
13310  * dp_is_enable_reap_timer_non_pkt() - check if mon reap timer is
13311  * enabled by non-pkt log or not
13312  * @pdev: point to dp pdev
13313  *
13314  * Return: true if mon reap timer is enabled by non-pkt log
13315  */
13316 static bool dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev)
13317 {
13318 	if (!pdev) {
13319 		dp_err("null pdev");
13320 		return false;
13321 	}
13322 
13323 	return pdev->enable_reap_timer_non_pkt;
13324 }
13325 
13326 /*
13327 * dp_set_pktlog_wifi3() - attach txrx vdev
13328 * @pdev: Datapath PDEV handle
13329 * @event: which event's notifications are being subscribed to
13330 * @enable: WDI event subscribe or not. (True or False)
13331 *
13332 * Return: Success, NULL on failure
13333 */
13334 #ifdef WDI_EVENT_ENABLE
13335 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
13336 		bool enable)
13337 {
13338 	struct dp_soc *soc = NULL;
13339 	int max_mac_rings = wlan_cfg_get_num_mac_rings
13340 					(pdev->wlan_cfg_ctx);
13341 	uint8_t mac_id = 0;
13342 
13343 	soc = pdev->soc;
13344 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
13345 
13346 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
13347 			FL("Max_mac_rings %d "),
13348 			max_mac_rings);
13349 
13350 	if (enable) {
13351 		switch (event) {
13352 		case WDI_EVENT_RX_DESC:
13353 			if (pdev->monitor_vdev) {
13354 				/* Nothing needs to be done if monitor mode is
13355 				 * enabled
13356 				 */
13357 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
13358 				return 0;
13359 			}
13360 
13361 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
13362 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
13363 				dp_mon_filter_setup_rx_pkt_log_full(pdev);
13364 				if (dp_mon_filter_update(pdev) !=
13365 						QDF_STATUS_SUCCESS) {
13366 					dp_cdp_err("%pK: Pktlog full filters set failed", soc);
13367 					dp_mon_filter_reset_rx_pkt_log_full(pdev);
13368 					pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
13369 					return 0;
13370 				}
13371 
13372 				if (soc->reap_timer_init &&
13373 				    (!dp_is_enable_reap_timer_non_pkt(pdev)))
13374 					qdf_timer_mod(&soc->mon_reap_timer,
13375 					DP_INTR_POLL_TIMER_MS);
13376 			}
13377 			break;
13378 
13379 		case WDI_EVENT_LITE_RX:
13380 			if (pdev->monitor_vdev) {
13381 				/* Nothing needs to be done if monitor mode is
13382 				 * enabled
13383 				 */
13384 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
13385 				return 0;
13386 			}
13387 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
13388 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
13389 
13390 				/*
13391 				 * Set the packet log lite mode filter.
13392 				 */
13393 				dp_mon_filter_setup_rx_pkt_log_lite(pdev);
13394 				if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
13395 					dp_cdp_err("%pK: Pktlog lite filters set failed", soc);
13396 					dp_mon_filter_reset_rx_pkt_log_lite(pdev);
13397 					pdev->rx_pktlog_mode =
13398 						DP_RX_PKTLOG_DISABLED;
13399 					return 0;
13400 				}
13401 
13402 				if (soc->reap_timer_init &&
13403 				    (!dp_is_enable_reap_timer_non_pkt(pdev)))
13404 					qdf_timer_mod(&soc->mon_reap_timer,
13405 					DP_INTR_POLL_TIMER_MS);
13406 			}
13407 			break;
13408 
13409 		case WDI_EVENT_LITE_T2H:
13410 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
13411 				int mac_for_pdev = dp_get_mac_id_for_pdev(
13412 							mac_id,	pdev->pdev_id);
13413 
13414 				pdev->pktlog_ppdu_stats = true;
13415 				dp_h2t_cfg_stats_msg_send(pdev,
13416 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
13417 					mac_for_pdev);
13418 			}
13419 			break;
13420 
13421 		case WDI_EVENT_RX_CBF:
13422 			if (pdev->monitor_vdev) {
13423 				/* Nothing needs to be done if monitor mode is
13424 				 * enabled
13425 				 */
13426 				dp_info("Monitor mode, CBF setting filters");
13427 				pdev->rx_pktlog_cbf = true;
13428 				return 0;
13429 			}
13430 			if (!pdev->rx_pktlog_cbf) {
13431 				pdev->rx_pktlog_cbf = true;
13432 				pdev->monitor_configured = true;
13433 				dp_vdev_set_monitor_mode_buf_rings(pdev);
13434 				/*
13435 				 * Set the packet log lite mode filter.
13436 				 */
13437 				qdf_info("Non monitor mode: Enable destination ring");
13438 
13439 				dp_mon_filter_setup_rx_pkt_log_cbf(pdev);
13440 				if (dp_mon_filter_update(pdev) !=
13441 				    QDF_STATUS_SUCCESS) {
13442 					dp_err("Pktlog set CBF filters failed");
13443 					dp_mon_filter_reset_rx_pktlog_cbf(pdev);
13444 					pdev->rx_pktlog_mode =
13445 						DP_RX_PKTLOG_DISABLED;
13446 					pdev->monitor_configured = false;
13447 					return 0;
13448 				}
13449 
13450 				if (soc->reap_timer_init &&
13451 				    !dp_is_enable_reap_timer_non_pkt(pdev))
13452 					qdf_timer_mod(&soc->mon_reap_timer,
13453 						      DP_INTR_POLL_TIMER_MS);
13454 			}
13455 			break;
13456 
13457 		default:
13458 			/* Nothing needs to be done for other pktlog types */
13459 			break;
13460 		}
13461 	} else {
13462 		switch (event) {
13463 		case WDI_EVENT_RX_DESC:
13464 		case WDI_EVENT_LITE_RX:
13465 			if (pdev->monitor_vdev) {
13466 				/* Nothing needs to be done if monitor mode is
13467 				 * enabled
13468 				 */
13469 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
13470 				return 0;
13471 			}
13472 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
13473 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
13474 				dp_mon_filter_reset_rx_pkt_log_full(pdev);
13475 				if (dp_mon_filter_update(pdev) !=
13476 						QDF_STATUS_SUCCESS) {
13477 					dp_cdp_err("%pK: Pktlog filters reset failed", soc);
13478 					return 0;
13479 				}
13480 
13481 				dp_mon_filter_reset_rx_pkt_log_lite(pdev);
13482 				if (dp_mon_filter_update(pdev) !=
13483 						QDF_STATUS_SUCCESS) {
13484 					dp_cdp_err("%pK: Pktlog filters reset failed", soc);
13485 					return 0;
13486 				}
13487 
13488 				if (soc->reap_timer_init &&
13489 				    (!dp_is_enable_reap_timer_non_pkt(pdev)))
13490 					qdf_timer_stop(&soc->mon_reap_timer);
13491 			}
13492 			break;
13493 		case WDI_EVENT_LITE_T2H:
13494 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
13495 			 * passing value 0. Once these macros will define in htt
13496 			 * header file will use proper macros
13497 			*/
13498 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
13499 				int mac_for_pdev =
13500 						dp_get_mac_id_for_pdev(mac_id,
13501 								pdev->pdev_id);
13502 
13503 				pdev->pktlog_ppdu_stats = false;
13504 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
13505 					dp_h2t_cfg_stats_msg_send(pdev, 0,
13506 								mac_for_pdev);
13507 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
13508 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
13509 								mac_for_pdev);
13510 				} else if (pdev->enhanced_stats_en) {
13511 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
13512 								mac_for_pdev);
13513 				}
13514 			}
13515 
13516 			break;
13517 		case WDI_EVENT_RX_CBF:
13518 			pdev->rx_pktlog_cbf = false;
13519 			break;
13520 
13521 		default:
13522 			/* Nothing needs to be done for other pktlog types */
13523 			break;
13524 		}
13525 	}
13526 	return 0;
13527 }
13528 #endif
13529 
13530 /**
13531  * dp_bucket_index() - Return index from array
13532  *
13533  * @delay: delay measured
13534  * @array: array used to index corresponding delay
13535  *
13536  * Return: index
13537  */
13538 static uint8_t dp_bucket_index(uint32_t delay, uint16_t *array)
13539 {
13540 	uint8_t i = CDP_DELAY_BUCKET_0;
13541 
13542 	for (; i < CDP_DELAY_BUCKET_MAX - 1; i++) {
13543 		if (delay >= array[i] && delay <= array[i + 1])
13544 			return i;
13545 	}
13546 
13547 	return (CDP_DELAY_BUCKET_MAX - 1);
13548 }
13549 
13550 /**
13551  * dp_fill_delay_buckets() - Fill delay statistics bucket for each
13552  *				type of delay
13553  *
13554  * @pdev: pdev handle
13555  * @delay: delay in ms
13556  * @tid: tid value
13557  * @mode: type of tx delay mode
13558  * @ring_id: ring number
13559  * Return: pointer to cdp_delay_stats structure
13560  */
13561 static struct cdp_delay_stats *
13562 dp_fill_delay_buckets(struct dp_pdev *pdev, uint32_t delay,
13563 		      uint8_t tid, uint8_t mode, uint8_t ring_id)
13564 {
13565 	uint8_t delay_index = 0;
13566 	struct cdp_tid_tx_stats *tstats =
13567 		&pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
13568 	struct cdp_tid_rx_stats *rstats =
13569 		&pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
13570 	/*
13571 	 * cdp_fw_to_hw_delay_range
13572 	 * Fw to hw delay ranges in milliseconds
13573 	 */
13574 	uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
13575 		0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
13576 
13577 	/*
13578 	 * cdp_sw_enq_delay_range
13579 	 * Software enqueue delay ranges in milliseconds
13580 	 */
13581 	uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
13582 		0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
13583 
13584 	/*
13585 	 * cdp_intfrm_delay_range
13586 	 * Interframe delay ranges in milliseconds
13587 	 */
13588 	uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
13589 		0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
13590 
13591 	/*
13592 	 * Update delay stats in proper bucket
13593 	 */
13594 	switch (mode) {
13595 	/* Software Enqueue delay ranges */
13596 	case CDP_DELAY_STATS_SW_ENQ:
13597 
13598 		delay_index = dp_bucket_index(delay, cdp_sw_enq_delay);
13599 		tstats->swq_delay.delay_bucket[delay_index]++;
13600 		return &tstats->swq_delay;
13601 
13602 	/* Tx Completion delay ranges */
13603 	case CDP_DELAY_STATS_FW_HW_TRANSMIT:
13604 
13605 		delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay);
13606 		tstats->hwtx_delay.delay_bucket[delay_index]++;
13607 		return &tstats->hwtx_delay;
13608 
13609 	/* Interframe tx delay ranges */
13610 	case CDP_DELAY_STATS_TX_INTERFRAME:
13611 
13612 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
13613 		tstats->intfrm_delay.delay_bucket[delay_index]++;
13614 		return &tstats->intfrm_delay;
13615 
13616 	/* Interframe rx delay ranges */
13617 	case CDP_DELAY_STATS_RX_INTERFRAME:
13618 
13619 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
13620 		rstats->intfrm_delay.delay_bucket[delay_index]++;
13621 		return &rstats->intfrm_delay;
13622 
13623 	/* Ring reap to indication to network stack */
13624 	case CDP_DELAY_STATS_REAP_STACK:
13625 
13626 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
13627 		rstats->to_stack_delay.delay_bucket[delay_index]++;
13628 		return &rstats->to_stack_delay;
13629 	default:
13630 		dp_debug("Incorrect delay mode: %d", mode);
13631 	}
13632 
13633 	return NULL;
13634 }
13635 
13636 /**
13637  * dp_update_delay_stats() - Update delay statistics in structure
13638  *				and fill min, max and avg delay
13639  *
13640  * @pdev: pdev handle
13641  * @delay: delay in ms
13642  * @tid: tid value
13643  * @mode: type of tx delay mode
13644  * @ring id: ring number
13645  * Return: none
13646  */
13647 void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay,
13648 			   uint8_t tid, uint8_t mode, uint8_t ring_id)
13649 {
13650 	struct cdp_delay_stats *dstats = NULL;
13651 
13652 	/*
13653 	 * Delay ranges are different for different delay modes
13654 	 * Get the correct index to update delay bucket
13655 	 */
13656 	dstats = dp_fill_delay_buckets(pdev, delay, tid, mode, ring_id);
13657 	if (qdf_unlikely(!dstats))
13658 		return;
13659 
13660 	if (delay != 0) {
13661 		/*
13662 		 * Compute minimum,average and maximum
13663 		 * delay
13664 		 */
13665 		if (delay < dstats->min_delay)
13666 			dstats->min_delay = delay;
13667 
13668 		if (delay > dstats->max_delay)
13669 			dstats->max_delay = delay;
13670 
13671 		/*
13672 		 * Average over delay measured till now
13673 		 */
13674 		if (!dstats->avg_delay)
13675 			dstats->avg_delay = delay;
13676 		else
13677 			dstats->avg_delay = ((delay + dstats->avg_delay) / 2);
13678 	}
13679 }
13680 
13681 /**
13682  * dp_get_peer_mac_list(): function to get peer mac list of vdev
13683  * @soc: Datapath soc handle
13684  * @vdev_id: vdev id
13685  * @newmac: Table of the clients mac
13686  * @mac_cnt: No. of MACs required
13687  * @limit: Limit the number of clients
13688  *
13689  * return: no of clients
13690  */
13691 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id,
13692 			      u_int8_t newmac[][QDF_MAC_ADDR_SIZE],
13693 			      u_int16_t mac_cnt, bool limit)
13694 {
13695 	struct dp_soc *dp_soc = (struct dp_soc *)soc;
13696 	struct dp_vdev *vdev =
13697 		dp_vdev_get_ref_by_id(dp_soc, vdev_id, DP_MOD_ID_CDP);
13698 	struct dp_peer *peer;
13699 	uint16_t new_mac_cnt = 0;
13700 
13701 	if (!vdev)
13702 		return new_mac_cnt;
13703 
13704 	if (limit && (vdev->num_peers > mac_cnt))
13705 		return 0;
13706 
13707 	qdf_spin_lock_bh(&vdev->peer_list_lock);
13708 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
13709 		if (peer->bss_peer)
13710 			continue;
13711 		if (new_mac_cnt < mac_cnt) {
13712 			WLAN_ADDR_COPY(newmac[new_mac_cnt], peer->mac_addr.raw);
13713 			new_mac_cnt++;
13714 		}
13715 	}
13716 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
13717 	dp_vdev_unref_delete(dp_soc, vdev, DP_MOD_ID_CDP);
13718 	return new_mac_cnt;
13719 }
13720 
13721 #ifdef QCA_SUPPORT_WDS_EXTENDED
13722 uint16_t dp_wds_ext_get_peer_id(ol_txrx_soc_handle soc,
13723 				uint8_t vdev_id,
13724 				uint8_t *mac)
13725 {
13726 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
13727 						       mac, 0, vdev_id,
13728 						       DP_MOD_ID_CDP);
13729 	uint16_t peer_id = HTT_INVALID_PEER;
13730 
13731 	if (!peer) {
13732 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
13733 		return peer_id;
13734 	}
13735 
13736 	peer_id = peer->peer_id;
13737 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
13738 	return peer_id;
13739 }
13740 
13741 QDF_STATUS dp_wds_ext_set_peer_rx(ol_txrx_soc_handle soc,
13742 				  uint8_t vdev_id,
13743 				  uint8_t *mac,
13744 				  ol_txrx_rx_fp rx,
13745 				  ol_osif_peer_handle osif_peer)
13746 {
13747 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
13748 						       mac, 0, vdev_id,
13749 						       DP_MOD_ID_CDP);
13750 	QDF_STATUS status = QDF_STATUS_E_INVAL;
13751 
13752 	if (!peer) {
13753 		dp_cdp_debug("%pK: Peer is NULL!\n", (struct dp_soc *)soc);
13754 		return status;
13755 	}
13756 
13757 	if (rx) {
13758 		if (peer->osif_rx) {
13759 		    status = QDF_STATUS_E_ALREADY;
13760 		} else {
13761 		    peer->osif_rx = rx;
13762 		    status = QDF_STATUS_SUCCESS;
13763 		}
13764 	} else {
13765 		if (peer->osif_rx) {
13766 		    peer->osif_rx = NULL;
13767 		    status = QDF_STATUS_SUCCESS;
13768 		} else {
13769 		    status = QDF_STATUS_E_ALREADY;
13770 		}
13771 	}
13772 
13773 	peer->wds_ext.osif_peer = osif_peer;
13774 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
13775 
13776 	return status;
13777 }
13778 #endif /* QCA_SUPPORT_WDS_EXTENDED */
13779 
13780 /**
13781  * dp_pdev_srng_deinit() - de-initialize all pdev srng ring including
13782  *			   monitor rings
13783  * @pdev: Datapath pdev handle
13784  *
13785  */
13786 static void dp_pdev_srng_deinit(struct dp_pdev *pdev)
13787 {
13788 	struct dp_soc *soc = pdev->soc;
13789 	uint8_t i;
13790 
13791 	dp_srng_deinit(soc, &soc->rx_refill_buf_ring[pdev->lmac_id], RXDMA_BUF,
13792 		       pdev->lmac_id);
13793 
13794 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
13795 		dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
13796 		dp_ipa_deinit_alt_tx_ring(soc);
13797 	}
13798 
13799 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
13800 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id);
13801 
13802 		wlan_minidump_remove(soc->rxdma_err_dst_ring[lmac_id].base_vaddr_unaligned,
13803 				     soc->rxdma_err_dst_ring[lmac_id].alloc_size,
13804 				     soc->ctrl_psoc,
13805 				     WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
13806 				     "rxdma_err_dst");
13807 		dp_srng_deinit(soc, &soc->rxdma_err_dst_ring[lmac_id],
13808 			       RXDMA_DST, lmac_id);
13809 	}
13810 
13811 	dp_mon_rings_deinit(pdev);
13812 }
13813 
13814 /**
13815  * dp_pdev_srng_init() - initialize all pdev srng rings including
13816  *			   monitor rings
13817  * @pdev: Datapath pdev handle
13818  *
13819  * return: QDF_STATUS_SUCCESS on success
13820  *	   QDF_STATUS_E_NOMEM on failure
13821  */
13822 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev)
13823 {
13824 	struct dp_soc *soc = pdev->soc;
13825 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
13826 	uint32_t i;
13827 
13828 	soc_cfg_ctx = soc->wlan_cfg_ctx;
13829 
13830 	if (dp_srng_init(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
13831 			 RXDMA_BUF, 0, pdev->lmac_id)) {
13832 		dp_init_err("%pK: dp_srng_init failed rx refill ring", soc);
13833 		goto fail1;
13834 	}
13835 
13836 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
13837 		if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
13838 			goto fail1;
13839 
13840 		if (dp_ipa_init_alt_tx_ring(soc))
13841 			goto fail1;
13842 	}
13843 
13844 	if (dp_mon_rings_init(soc, pdev)) {
13845 		dp_init_err("%pK: MONITOR rings setup failed", soc);
13846 		goto fail1;
13847 	}
13848 
13849 	/* LMAC RxDMA to SW Rings configuration */
13850 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
13851 		/* Only valid for MCL */
13852 		pdev = soc->pdev_list[0];
13853 
13854 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
13855 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id);
13856 		struct dp_srng *srng = &soc->rxdma_err_dst_ring[lmac_id];
13857 
13858 		if (srng->hal_srng)
13859 			continue;
13860 
13861 		if (dp_srng_init(soc, srng, RXDMA_DST, 0, lmac_id)) {
13862 			dp_init_err("%pK: " RNG_ERR "rxdma_err_dst_ring", soc);
13863 			goto fail1;
13864 		}
13865 		wlan_minidump_log(soc->rxdma_err_dst_ring[lmac_id].base_vaddr_unaligned,
13866 				  soc->rxdma_err_dst_ring[lmac_id].alloc_size,
13867 				  soc->ctrl_psoc,
13868 				  WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
13869 				  "rxdma_err_dst");
13870 	}
13871 	return QDF_STATUS_SUCCESS;
13872 
13873 fail1:
13874 	dp_pdev_srng_deinit(pdev);
13875 	return QDF_STATUS_E_NOMEM;
13876 }
13877 
13878 /**
13879  * dp_pdev_srng_free() - free all pdev srng rings including monitor rings
13880  * pdev: Datapath pdev handle
13881  *
13882  */
13883 static void dp_pdev_srng_free(struct dp_pdev *pdev)
13884 {
13885 	struct dp_soc *soc = pdev->soc;
13886 	uint8_t i;
13887 
13888 	dp_srng_free(soc, &soc->rx_refill_buf_ring[pdev->lmac_id]);
13889 	dp_mon_rings_free(pdev);
13890 
13891 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
13892 		dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
13893 		dp_ipa_free_alt_tx_ring(soc);
13894 	}
13895 
13896 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
13897 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id);
13898 
13899 		dp_srng_free(soc, &soc->rxdma_err_dst_ring[lmac_id]);
13900 	}
13901 }
13902 
13903 /**
13904  * dp_pdev_srng_alloc() - allocate memory for all pdev srng rings including
13905  *			  monitor rings
13906  * pdev: Datapath pdev handle
13907  *
13908  * return: QDF_STATUS_SUCCESS on success
13909  *	   QDF_STATUS_E_NOMEM on failure
13910  */
13911 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev)
13912 {
13913 	struct dp_soc *soc = pdev->soc;
13914 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
13915 	uint32_t ring_size;
13916 	uint32_t i;
13917 
13918 	soc_cfg_ctx = soc->wlan_cfg_ctx;
13919 
13920 	ring_size = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
13921 	if (dp_srng_alloc(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
13922 			  RXDMA_BUF, ring_size, 0)) {
13923 		dp_init_err("%pK: dp_srng_alloc failed rx refill ring", soc);
13924 		goto fail1;
13925 	}
13926 
13927 	if (dp_mon_rings_alloc(soc, pdev)) {
13928 		dp_init_err("%pK: MONITOR rings setup failed", soc);
13929 		goto fail1;
13930 	}
13931 
13932 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
13933 		if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
13934 			goto fail1;
13935 
13936 		if (dp_ipa_alloc_alt_tx_ring(soc))
13937 			goto fail1;
13938 	}
13939 
13940 	ring_size = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
13941 	/* LMAC RxDMA to SW Rings configuration */
13942 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
13943 		/* Only valid for MCL */
13944 		pdev = soc->pdev_list[0];
13945 
13946 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
13947 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id);
13948 		struct dp_srng *srng = &soc->rxdma_err_dst_ring[lmac_id];
13949 
13950 		if (srng->base_vaddr_unaligned)
13951 			continue;
13952 
13953 		if (dp_srng_alloc(soc, srng, RXDMA_DST, ring_size, 0)) {
13954 			dp_init_err("%pK: " RNG_ERR "rxdma_err_dst_ring", soc);
13955 			goto fail1;
13956 		}
13957 	}
13958 
13959 	return QDF_STATUS_SUCCESS;
13960 fail1:
13961 	dp_pdev_srng_free(pdev);
13962 	return QDF_STATUS_E_NOMEM;
13963 }
13964 
13965 /**
13966  * dp_soc_srng_deinit() - de-initialize soc srng rings
13967  * @soc: Datapath soc handle
13968  *
13969  */
13970 static void dp_soc_srng_deinit(struct dp_soc *soc)
13971 {
13972 	uint32_t i;
13973 	/* Free the ring memories */
13974 	/* Common rings */
13975 	wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
13976 			     soc->wbm_desc_rel_ring.alloc_size,
13977 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_WBM_DESC_REL,
13978 			     "wbm_desc_rel_ring");
13979 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
13980 
13981 	/* Tx data rings */
13982 	for (i = 0; i < soc->num_tcl_data_rings; i++)
13983 		dp_deinit_tx_pair_by_index(soc, i);
13984 
13985 	/* TCL command and status rings */
13986 	if (soc->init_tcl_cmd_cred_ring) {
13987 		wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
13988 				     soc->tcl_cmd_credit_ring.alloc_size,
13989 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_CMD,
13990 				     "wbm_desc_rel_ring");
13991 		dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring,
13992 			       TCL_CMD_CREDIT, 0);
13993 	}
13994 
13995 	wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned,
13996 			     soc->tcl_status_ring.alloc_size,
13997 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_TCL_STATUS,
13998 			     "wbm_desc_rel_ring");
13999 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
14000 
14001 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
14002 		/* TODO: Get number of rings and ring sizes
14003 		 * from wlan_cfg
14004 		 */
14005 		wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned,
14006 				     soc->reo_dest_ring[i].alloc_size,
14007 				     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_DEST,
14008 				     "reo_dest_ring");
14009 		dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i);
14010 	}
14011 
14012 	/* REO reinjection ring */
14013 	wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned,
14014 			     soc->reo_reinject_ring.alloc_size,
14015 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_REINJECT,
14016 			     "reo_reinject_ring");
14017 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
14018 
14019 	/* Rx release ring */
14020 	wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned,
14021 			     soc->rx_rel_ring.alloc_size,
14022 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_RX_REL,
14023 			     "reo_release_ring");
14024 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
14025 
14026 	/* Rx exception ring */
14027 	/* TODO: Better to store ring_type and ring_num in
14028 	 * dp_srng during setup
14029 	 */
14030 	wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned,
14031 			     soc->reo_exception_ring.alloc_size,
14032 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_EXCEPTION,
14033 			     "reo_exception_ring");
14034 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
14035 
14036 	/* REO command and status rings */
14037 	wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned,
14038 			     soc->reo_cmd_ring.alloc_size,
14039 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_CMD,
14040 			     "reo_cmd_ring");
14041 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
14042 	wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned,
14043 			     soc->reo_status_ring.alloc_size,
14044 			     soc->ctrl_psoc, WLAN_MD_DP_SRNG_REO_STATUS,
14045 			     "reo_status_ring");
14046 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
14047 }
14048 
14049 /**
14050  * dp_soc_srng_init() - Initialize soc level srng rings
14051  * @soc: Datapath soc handle
14052  *
14053  * return: QDF_STATUS_SUCCESS on success
14054  *	   QDF_STATUS_E_FAILURE on failure
14055  */
14056 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc)
14057 {
14058 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
14059 	uint8_t i;
14060 
14061 	soc_cfg_ctx = soc->wlan_cfg_ctx;
14062 
14063 	dp_enable_verbose_debug(soc);
14064 
14065 	/* WBM descriptor release ring */
14066 	if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) {
14067 		dp_init_err("%pK: dp_srng_init failed for wbm_desc_rel_ring", soc);
14068 		goto fail1;
14069 	}
14070 
14071 	wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
14072 			  soc->wbm_desc_rel_ring.alloc_size,
14073 			  soc->ctrl_psoc,
14074 			  WLAN_MD_DP_SRNG_WBM_DESC_REL,
14075 			  "wbm_desc_rel_ring");
14076 
14077 	if (soc->init_tcl_cmd_cred_ring) {
14078 		/* TCL command and status rings */
14079 		if (dp_srng_init(soc, &soc->tcl_cmd_credit_ring,
14080 				 TCL_CMD_CREDIT, 0, 0)) {
14081 			dp_init_err("%pK: dp_srng_init failed for tcl_cmd_ring", soc);
14082 			goto fail1;
14083 		}
14084 
14085 		wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
14086 				  soc->tcl_cmd_credit_ring.alloc_size,
14087 				  soc->ctrl_psoc,
14088 				  WLAN_MD_DP_SRNG_TCL_CMD,
14089 				  "wbm_desc_rel_ring");
14090 	}
14091 
14092 	if (dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0)) {
14093 		dp_init_err("%pK: dp_srng_init failed for tcl_status_ring", soc);
14094 		goto fail1;
14095 	}
14096 
14097 	wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned,
14098 			  soc->tcl_status_ring.alloc_size,
14099 			  soc->ctrl_psoc,
14100 			  WLAN_MD_DP_SRNG_TCL_STATUS,
14101 			  "wbm_desc_rel_ring");
14102 
14103 	/* REO reinjection ring */
14104 	if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) {
14105 		dp_init_err("%pK: dp_srng_init failed for reo_reinject_ring", soc);
14106 		goto fail1;
14107 	}
14108 
14109 	wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned,
14110 			  soc->reo_reinject_ring.alloc_size,
14111 			  soc->ctrl_psoc,
14112 			  WLAN_MD_DP_SRNG_REO_REINJECT,
14113 			  "reo_reinject_ring");
14114 
14115 	/* Rx release ring */
14116 	if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0)) {
14117 		dp_init_err("%pK: dp_srng_init failed for rx_rel_ring", soc);
14118 		goto fail1;
14119 	}
14120 
14121 	wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned,
14122 			  soc->rx_rel_ring.alloc_size,
14123 			  soc->ctrl_psoc,
14124 			  WLAN_MD_DP_SRNG_RX_REL,
14125 			  "reo_release_ring");
14126 
14127 	/* Rx exception ring */
14128 	if (dp_srng_init(soc, &soc->reo_exception_ring,
14129 			 REO_EXCEPTION, 0, MAX_REO_DEST_RINGS)) {
14130 		dp_init_err("%pK: dp_srng_init failed - reo_exception", soc);
14131 		goto fail1;
14132 	}
14133 
14134 	wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned,
14135 			  soc->reo_exception_ring.alloc_size,
14136 			  soc->ctrl_psoc,
14137 			  WLAN_MD_DP_SRNG_REO_EXCEPTION,
14138 			  "reo_exception_ring");
14139 
14140 	/* REO command and status rings */
14141 	if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) {
14142 		dp_init_err("%pK: dp_srng_init failed for reo_cmd_ring", soc);
14143 		goto fail1;
14144 	}
14145 
14146 	wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned,
14147 			  soc->reo_cmd_ring.alloc_size,
14148 			  soc->ctrl_psoc,
14149 			  WLAN_MD_DP_SRNG_REO_CMD,
14150 			  "reo_cmd_ring");
14151 
14152 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
14153 	TAILQ_INIT(&soc->rx.reo_cmd_list);
14154 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
14155 
14156 	if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) {
14157 		dp_init_err("%pK: dp_srng_init failed for reo_status_ring", soc);
14158 		goto fail1;
14159 	}
14160 
14161 	wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned,
14162 			  soc->reo_status_ring.alloc_size,
14163 			  soc->ctrl_psoc,
14164 			  WLAN_MD_DP_SRNG_REO_STATUS,
14165 			  "reo_status_ring");
14166 
14167 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
14168 		if (dp_init_tx_ring_pair_by_index(soc, i))
14169 			goto fail1;
14170 	}
14171 
14172 	dp_create_ext_stats_event(soc);
14173 
14174 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
14175 		/* Initialize REO destination ring */
14176 		if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) {
14177 			dp_init_err("%pK: dp_srng_init failed for reo_dest_ringn", soc);
14178 			goto fail1;
14179 		}
14180 
14181 		wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned,
14182 				  soc->reo_dest_ring[i].alloc_size,
14183 				  soc->ctrl_psoc,
14184 				  WLAN_MD_DP_SRNG_REO_DEST,
14185 				  "reo_dest_ring");
14186 	}
14187 
14188 	return QDF_STATUS_SUCCESS;
14189 fail1:
14190 	/*
14191 	 * Cleanup will be done as part of soc_detach, which will
14192 	 * be called on pdev attach failure
14193 	 */
14194 	dp_soc_srng_deinit(soc);
14195 	return QDF_STATUS_E_FAILURE;
14196 }
14197 
14198 /**
14199  * dp_soc_srng_free() - free soc level srng rings
14200  * @soc: Datapath soc handle
14201  *
14202  */
14203 static void dp_soc_srng_free(struct dp_soc *soc)
14204 {
14205 	uint32_t i;
14206 
14207 	dp_srng_free(soc, &soc->wbm_desc_rel_ring);
14208 
14209 	for (i = 0; i < soc->num_tcl_data_rings; i++)
14210 		dp_free_tx_ring_pair_by_index(soc, i);
14211 
14212 	if (soc->init_tcl_cmd_cred_ring)
14213 		dp_srng_free(soc, &soc->tcl_cmd_credit_ring);
14214 
14215 	dp_srng_free(soc, &soc->tcl_status_ring);
14216 
14217 	for (i = 0; i < soc->num_reo_dest_rings; i++)
14218 		dp_srng_free(soc, &soc->reo_dest_ring[i]);
14219 
14220 	dp_srng_free(soc, &soc->reo_reinject_ring);
14221 	dp_srng_free(soc, &soc->rx_rel_ring);
14222 
14223 	dp_srng_free(soc, &soc->reo_exception_ring);
14224 
14225 	dp_srng_free(soc, &soc->reo_cmd_ring);
14226 	dp_srng_free(soc, &soc->reo_status_ring);
14227 }
14228 
14229 /**
14230  * dp_soc_srng_alloc() - Allocate memory for soc level srng rings
14231  * @soc: Datapath soc handle
14232  *
14233  * return: QDF_STATUS_SUCCESS on success
14234  *	   QDF_STATUS_E_NOMEM on failure
14235  */
14236 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc)
14237 {
14238 	uint32_t entries;
14239 	uint32_t i;
14240 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
14241 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
14242 	uint32_t tx_comp_ring_size, tx_ring_size, reo_dst_ring_size;
14243 
14244 	soc_cfg_ctx = soc->wlan_cfg_ctx;
14245 
14246 	/* sw2wbm link descriptor release ring */
14247 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
14248 	if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE,
14249 			  entries, 0)) {
14250 		dp_init_err("%pK: dp_srng_alloc failed for wbm_desc_rel_ring", soc);
14251 		goto fail1;
14252 	}
14253 
14254 	entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx);
14255 	/* TCL command and status rings */
14256 	if (soc->init_tcl_cmd_cred_ring) {
14257 		if (dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring,
14258 				  TCL_CMD_CREDIT, entries, 0)) {
14259 			dp_init_err("%pK: dp_srng_alloc failed for tcl_cmd_ring", soc);
14260 			goto fail1;
14261 		}
14262 	}
14263 
14264 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
14265 	if (dp_srng_alloc(soc, &soc->tcl_status_ring, TCL_STATUS, entries,
14266 			  0)) {
14267 		dp_init_err("%pK: dp_srng_alloc failed for tcl_status_ring", soc);
14268 		goto fail1;
14269 	}
14270 
14271 	/* REO reinjection ring */
14272 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
14273 	if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT,
14274 			  entries, 0)) {
14275 		dp_init_err("%pK: dp_srng_alloc failed for reo_reinject_ring", soc);
14276 		goto fail1;
14277 	}
14278 
14279 	/* Rx release ring */
14280 	entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx);
14281 	if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
14282 			  entries, 0)) {
14283 		dp_init_err("%pK: dp_srng_alloc failed for rx_rel_ring", soc);
14284 		goto fail1;
14285 	}
14286 
14287 	/* Rx exception ring */
14288 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
14289 	if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION,
14290 			  entries, 0)) {
14291 		dp_init_err("%pK: dp_srng_alloc failed - reo_exception", soc);
14292 		goto fail1;
14293 	}
14294 
14295 	/* REO command and status rings */
14296 	entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx);
14297 	if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) {
14298 		dp_init_err("%pK: dp_srng_alloc failed for reo_cmd_ring", soc);
14299 		goto fail1;
14300 	}
14301 
14302 	entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx);
14303 	if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS,
14304 			  entries, 0)) {
14305 		dp_init_err("%pK: dp_srng_alloc failed for reo_status_ring", soc);
14306 		goto fail1;
14307 	}
14308 
14309 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
14310 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
14311 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx);
14312 
14313 	/* Disable cached desc if NSS offload is enabled */
14314 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
14315 		cached = 0;
14316 
14317 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
14318 		if (dp_alloc_tx_ring_pair_by_index(soc, i))
14319 			goto fail1;
14320 	}
14321 
14322 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
14323 		/* Setup REO destination ring */
14324 		if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST,
14325 				  reo_dst_ring_size, cached)) {
14326 			dp_init_err("%pK: dp_srng_alloc failed for reo_dest_ring", soc);
14327 			goto fail1;
14328 		}
14329 	}
14330 
14331 	return QDF_STATUS_SUCCESS;
14332 
14333 fail1:
14334 	dp_soc_srng_free(soc);
14335 	return QDF_STATUS_E_NOMEM;
14336 }
14337 
14338 static void dp_soc_cfg_dump(struct dp_soc *soc, uint32_t target_type)
14339 {
14340 	dp_init_info("DP soc Dump for Target = %d", target_type);
14341 	dp_init_info("ast_override_support = %d, da_war_enabled = %d,",
14342 		     soc->ast_override_support, soc->da_war_enabled);
14343 	dp_init_info("hw_nac_monitor_support = %d",
14344 		     soc->hw_nac_monitor_support);
14345 
14346 	wlan_cfg_dp_soc_ctx_dump(soc->wlan_cfg_ctx);
14347 }
14348 
14349 /**
14350  * dp_soc_cfg_init() - initialize target specific configuration
14351  *		       during dp_soc_init
14352  * @soc: dp soc handle
14353  */
14354 static void dp_soc_cfg_init(struct dp_soc *soc)
14355 {
14356 	uint32_t target_type;
14357 
14358 	target_type = hal_get_target_type(soc->hal_soc);
14359 	switch (target_type) {
14360 	case TARGET_TYPE_QCA6290:
14361 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
14362 					       REO_DST_RING_SIZE_QCA6290);
14363 		soc->ast_override_support = 1;
14364 		soc->da_war_enabled = false;
14365 		break;
14366 	case TARGET_TYPE_QCA6390:
14367 	case TARGET_TYPE_QCA6490:
14368 	case TARGET_TYPE_QCA6750:
14369 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
14370 					       REO_DST_RING_SIZE_QCA6290);
14371 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
14372 		soc->ast_override_support = 1;
14373 		if (soc->cdp_soc.ol_ops->get_con_mode &&
14374 		    soc->cdp_soc.ol_ops->get_con_mode() ==
14375 		    QDF_GLOBAL_MONITOR_MODE) {
14376 			int int_ctx;
14377 
14378 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
14379 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
14380 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
14381 			}
14382 		}
14383 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
14384 		break;
14385 	case TARGET_TYPE_WCN7850:
14386 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
14387 					       REO_DST_RING_SIZE_QCA6290);
14388 		soc->ast_override_support = 1;
14389 
14390 		if (soc->cdp_soc.ol_ops->get_con_mode &&
14391 		    soc->cdp_soc.ol_ops->get_con_mode() ==
14392 		    QDF_GLOBAL_MONITOR_MODE) {
14393 			int int_ctx;
14394 
14395 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS;
14396 			     int_ctx++) {
14397 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
14398 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
14399 			}
14400 		}
14401 
14402 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
14403 		break;
14404 	case TARGET_TYPE_QCA8074:
14405 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
14406 							   MON_BUF_MIN_ENTRIES);
14407 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
14408 					       REO_DST_RING_SIZE_QCA8074);
14409 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
14410 		soc->da_war_enabled = true;
14411 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
14412 		break;
14413 	case TARGET_TYPE_QCA8074V2:
14414 	case TARGET_TYPE_QCA6018:
14415 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
14416 							   MON_BUF_MIN_ENTRIES);
14417 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
14418 					       REO_DST_RING_SIZE_QCA8074);
14419 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
14420 		soc->hw_nac_monitor_support = 1;
14421 		soc->ast_override_support = 1;
14422 		soc->per_tid_basize_max_tid = 8;
14423 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
14424 		soc->da_war_enabled = false;
14425 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
14426 		break;
14427 	case TARGET_TYPE_QCN9000:
14428 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
14429 							   MON_BUF_MIN_ENTRIES);
14430 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
14431 					       REO_DST_RING_SIZE_QCN9000);
14432 		soc->ast_override_support = 1;
14433 		soc->da_war_enabled = false;
14434 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
14435 		soc->hw_nac_monitor_support = 1;
14436 		soc->per_tid_basize_max_tid = 8;
14437 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
14438 		soc->lmac_polled_mode = 0;
14439 		soc->wbm_release_desc_rx_sg_support = 1;
14440 		if (cfg_get(soc->ctrl_psoc, CFG_DP_FULL_MON_MODE))
14441 			dp_config_full_mon_mode((struct cdp_soc_t *)soc, 1);
14442 		break;
14443 	case TARGET_TYPE_QCA5018:
14444 	case TARGET_TYPE_QCN6122:
14445 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
14446 							   MON_BUF_MIN_ENTRIES);
14447 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
14448 					       REO_DST_RING_SIZE_QCA8074);
14449 		soc->ast_override_support = 1;
14450 		soc->da_war_enabled = false;
14451 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
14452 		soc->hw_nac_monitor_support = 1;
14453 		soc->per_tid_basize_max_tid = 8;
14454 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS_11AX;
14455 		soc->disable_mac1_intr = 1;
14456 		soc->disable_mac2_intr = 1;
14457 		soc->wbm_release_desc_rx_sg_support = 1;
14458 		break;
14459 	default:
14460 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
14461 		qdf_assert_always(0);
14462 		break;
14463 	}
14464 	dp_soc_cfg_dump(soc, target_type);
14465 }
14466 
14467 /**
14468  * dp_soc_cfg_attach() - set target specific configuration in
14469  *			 dp soc cfg.
14470  * @soc: dp soc handle
14471  */
14472 static void dp_soc_cfg_attach(struct dp_soc *soc)
14473 {
14474 	int target_type;
14475 	int nss_cfg = 0;
14476 
14477 	target_type = hal_get_target_type(soc->hal_soc);
14478 	switch (target_type) {
14479 	case TARGET_TYPE_QCA6290:
14480 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
14481 					       REO_DST_RING_SIZE_QCA6290);
14482 		break;
14483 	case TARGET_TYPE_QCA6390:
14484 	case TARGET_TYPE_QCA6490:
14485 	case TARGET_TYPE_QCA6750:
14486 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
14487 					       REO_DST_RING_SIZE_QCA6290);
14488 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
14489 		break;
14490 	case TARGET_TYPE_WCN7850:
14491 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
14492 					       REO_DST_RING_SIZE_QCA6290);
14493 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
14494 		break;
14495 	case TARGET_TYPE_QCA8074:
14496 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
14497 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
14498 					       REO_DST_RING_SIZE_QCA8074);
14499 		break;
14500 	case TARGET_TYPE_QCA8074V2:
14501 	case TARGET_TYPE_QCA6018:
14502 	case TARGET_TYPE_QCN6122:
14503 	case TARGET_TYPE_QCA5018:
14504 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
14505 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
14506 					       REO_DST_RING_SIZE_QCA8074);
14507 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
14508 		break;
14509 	case TARGET_TYPE_QCN9000:
14510 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
14511 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
14512 					       REO_DST_RING_SIZE_QCN9000);
14513 		wlan_cfg_set_rxdma1_enable(soc->wlan_cfg_ctx);
14514 		break;
14515 	default:
14516 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
14517 		qdf_assert_always(0);
14518 		break;
14519 	}
14520 
14521 	if (soc->cdp_soc.ol_ops->get_soc_nss_cfg)
14522 		nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc);
14523 
14524 	wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg);
14525 
14526 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
14527 		wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0);
14528 		wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0);
14529 		wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0);
14530 		wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0);
14531 		soc->init_tcl_cmd_cred_ring = false;
14532 		soc->num_tcl_data_rings =
14533 			wlan_cfg_num_nss_tcl_data_rings(soc->wlan_cfg_ctx);
14534 		soc->num_reo_dest_rings =
14535 			wlan_cfg_num_nss_reo_dest_rings(soc->wlan_cfg_ctx);
14536 
14537 	} else {
14538 		soc->init_tcl_cmd_cred_ring = true;
14539 		soc->num_tcl_data_rings =
14540 			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
14541 		soc->num_reo_dest_rings =
14542 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
14543 	}
14544 }
14545 
14546 static inline  void dp_pdev_set_default_reo(struct dp_pdev *pdev)
14547 {
14548 	struct dp_soc *soc = pdev->soc;
14549 
14550 	switch (pdev->pdev_id) {
14551 	case 0:
14552 		pdev->reo_dest =
14553 			wlan_cfg_radio0_default_reo_get(soc->wlan_cfg_ctx);
14554 		break;
14555 
14556 	case 1:
14557 		pdev->reo_dest =
14558 			wlan_cfg_radio1_default_reo_get(soc->wlan_cfg_ctx);
14559 		break;
14560 
14561 	case 2:
14562 		pdev->reo_dest =
14563 			wlan_cfg_radio2_default_reo_get(soc->wlan_cfg_ctx);
14564 		break;
14565 
14566 	default:
14567 		dp_init_err("%pK: Invalid pdev_id %d for reo selection",
14568 			    soc, pdev->pdev_id);
14569 		break;
14570 	}
14571 }
14572 
14573 static QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc,
14574 				      HTC_HANDLE htc_handle,
14575 				      qdf_device_t qdf_osdev,
14576 				      uint8_t pdev_id)
14577 {
14578 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
14579 	int nss_cfg;
14580 	void *sojourn_buf;
14581 	QDF_STATUS ret;
14582 
14583 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
14584 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
14585 
14586 	soc_cfg_ctx = soc->wlan_cfg_ctx;
14587 	pdev->soc = soc;
14588 	pdev->pdev_id = pdev_id;
14589 
14590 	pdev->filter = dp_mon_filter_alloc(pdev);
14591 	if (!pdev->filter) {
14592 		dp_init_err("%pK: Memory allocation failed for monitor filters",
14593 			    soc);
14594 		ret = QDF_STATUS_E_NOMEM;
14595 		goto fail0;
14596 	}
14597 
14598 	/*
14599 	 * Variable to prevent double pdev deinitialization during
14600 	 * radio detach execution .i.e. in the absence of any vdev.
14601 	 */
14602 	pdev->pdev_deinit = 0;
14603 
14604 	if (dp_wdi_event_attach(pdev)) {
14605 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
14606 			  "dp_wdi_evet_attach failed");
14607 		goto fail1;
14608 	}
14609 
14610 	if (dp_pdev_srng_init(pdev)) {
14611 		dp_init_err("%pK: Failed to initialize pdev srng rings", soc);
14612 		goto fail2;
14613 	}
14614 
14615 	/* Initialize descriptors in TCL Rings used by IPA */
14616 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
14617 		hal_tx_init_data_ring(soc->hal_soc,
14618 				      soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng);
14619 		dp_ipa_hal_tx_init_alt_data_ring(soc);
14620 	}
14621 
14622 	/*
14623 	 * Initialize command/credit ring descriptor
14624 	 * Command/CREDIT ring also used for sending DATA cmds
14625 	 */
14626 	if (soc->init_tcl_cmd_cred_ring)
14627 		hal_tx_init_cmd_credit_ring(soc->hal_soc,
14628 					    soc->tcl_cmd_credit_ring.hal_srng);
14629 
14630 	dp_tx_pdev_init(pdev);
14631 	/*
14632 	 * Variable to prevent double pdev deinitialization during
14633 	 * radio detach execution .i.e. in the absence of any vdev.
14634 	 */
14635 	pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer));
14636 
14637 	if (!pdev->invalid_peer) {
14638 		dp_init_err("%pK: Invalid peer memory allocation failed", soc);
14639 		goto fail3;
14640 	}
14641 
14642 	/*
14643 	 * set nss pdev config based on soc config
14644 	 */
14645 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
14646 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
14647 					 (nss_cfg & (1 << pdev_id)));
14648 	pdev->target_pdev_id =
14649 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
14650 
14651 	if (soc->preferred_hw_mode == WMI_HOST_HW_MODE_2G_PHYB &&
14652 	    pdev->lmac_id == PHYB_2G_LMAC_ID) {
14653 		pdev->target_pdev_id = PHYB_2G_TARGET_PDEV_ID;
14654 	}
14655 
14656 	/* Reset the cpu ring map if radio is NSS offloaded */
14657 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
14658 		dp_soc_reset_cpu_ring_map(soc);
14659 		dp_soc_reset_intr_mask(soc);
14660 	}
14661 
14662 	TAILQ_INIT(&pdev->vdev_list);
14663 	qdf_spinlock_create(&pdev->vdev_list_lock);
14664 	qdf_spinlock_create(&pdev->ppdu_stats_lock);
14665 	pdev->vdev_count = 0;
14666 
14667 	qdf_spinlock_create(&pdev->tx_mutex);
14668 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
14669 	TAILQ_INIT(&pdev->neighbour_peers_list);
14670 	pdev->neighbour_peers_added = false;
14671 	pdev->monitor_configured = false;
14672 	pdev->mon_chan_band = REG_BAND_UNKNOWN;
14673 	pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_INVALID_LMAC_ID;
14674 	pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_INVALID_LMAC_ID;
14675 	pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_INVALID_LMAC_ID;
14676 
14677 	DP_STATS_INIT(pdev);
14678 
14679 	/* Monitor filter init */
14680 	pdev->mon_filter_mode = MON_FILTER_ALL;
14681 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
14682 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
14683 	pdev->fp_data_filter = FILTER_DATA_ALL;
14684 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
14685 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
14686 	pdev->mo_data_filter = FILTER_DATA_ALL;
14687 
14688 	dp_local_peer_id_pool_init(pdev);
14689 
14690 	dp_dscp_tid_map_setup(pdev);
14691 	dp_pcp_tid_map_setup(pdev);
14692 
14693 	/* set the reo destination during initialization */
14694 	dp_pdev_set_default_reo(pdev);
14695 
14696 	/*
14697 	 * initialize ppdu tlv list
14698 	 */
14699 	TAILQ_INIT(&pdev->ppdu_info_list);
14700 	TAILQ_INIT(&pdev->sched_comp_ppdu_list);
14701 	pdev->tlv_count = 0;
14702 	pdev->list_depth = 0;
14703 
14704 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
14705 
14706 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
14707 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
14708 			      TRUE);
14709 
14710 	if (!pdev->sojourn_buf) {
14711 		dp_init_err("%pK: Failed to allocate sojourn buf", soc);
14712 		goto fail4;
14713 	}
14714 	sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
14715 	qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
14716 
14717 	/* initlialize cal client timer */
14718 	dp_cal_client_attach(&pdev->cal_client_ctx,
14719 			     dp_pdev_to_cdp_pdev(pdev),
14720 			     pdev->soc->osdev,
14721 			     &dp_iterate_update_peer_list);
14722 	qdf_event_create(&pdev->fw_peer_stats_event);
14723 
14724 	pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
14725 	if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS)
14726 		goto fail5;
14727 
14728 	if (dp_rxdma_ring_setup(soc, pdev)) {
14729 		dp_init_err("%pK: RXDMA ring config failed", soc);
14730 		goto fail6;
14731 	}
14732 
14733 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
14734 		goto fail7;
14735 
14736 	if (dp_ipa_ring_resource_setup(soc, pdev))
14737 		goto fail8;
14738 
14739 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
14740 		dp_init_err("%pK: dp_ipa_uc_attach failed", soc);
14741 		goto fail8;
14742 	}
14743 
14744 	ret = dp_rx_fst_attach(soc, pdev);
14745 	if ((ret != QDF_STATUS_SUCCESS) &&
14746 	    (ret != QDF_STATUS_E_NOSUPPORT)) {
14747 		dp_init_err("%pK: RX Flow Search Table attach failed: pdev %d err %d",
14748 			    soc, pdev_id, ret);
14749 		goto fail9;
14750 	}
14751 
14752 	if (dp_pdev_bkp_stats_attach(pdev) != QDF_STATUS_SUCCESS) {
14753 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
14754 			  FL("dp_pdev_bkp_stats_attach failed"));
14755 		goto fail10;
14756 	}
14757 
14758 	/* initialize sw rx descriptors */
14759 	dp_rx_pdev_desc_pool_init(pdev);
14760 	/* initialize sw monitor rx descriptors */
14761 	dp_rx_pdev_mon_desc_pool_init(pdev);
14762 	/* allocate buffers and replenish the RxDMA ring */
14763 	dp_rx_pdev_buffers_alloc(pdev);
14764 	/* allocate buffers and replenish the monitor RxDMA ring */
14765 	dp_rx_pdev_mon_buffers_alloc(pdev);
14766 
14767 	dp_init_tso_stats(pdev);
14768 	dp_tx_ppdu_stats_attach(pdev);
14769 
14770 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
14771 		qdf_dma_mem_stats_read(),
14772 		qdf_heap_mem_stats_read(),
14773 		qdf_skb_total_mem_stats_read());
14774 
14775 	return QDF_STATUS_SUCCESS;
14776 fail10:
14777 	dp_rx_fst_detach(soc, pdev);
14778 fail9:
14779 	dp_ipa_uc_detach(soc, pdev);
14780 fail8:
14781 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
14782 fail7:
14783 	dp_rxdma_ring_cleanup(soc, pdev);
14784 fail6:
14785 	dp_htt_ppdu_stats_detach(pdev);
14786 fail5:
14787 	qdf_nbuf_free(pdev->sojourn_buf);
14788 fail4:
14789 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
14790 	qdf_spinlock_destroy(&pdev->tx_mutex);
14791 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
14792 	qdf_spinlock_destroy(&pdev->ppdu_stats_lock);
14793 	qdf_mem_free(pdev->invalid_peer);
14794 fail3:
14795 	dp_pdev_srng_deinit(pdev);
14796 fail2:
14797 	dp_wdi_event_detach(pdev);
14798 fail1:
14799 	dp_mon_filter_dealloc(pdev);
14800 fail0:
14801 	return QDF_STATUS_E_FAILURE;
14802 }
14803 
14804 /*
14805  * dp_pdev_init_wifi3() - Init txrx pdev
14806  * @htc_handle: HTC handle for host-target interface
14807  * @qdf_osdev: QDF OS device
14808  * @force: Force deinit
14809  *
14810  * Return: QDF_STATUS
14811  */
14812 static QDF_STATUS dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
14813 				     HTC_HANDLE htc_handle,
14814 				     qdf_device_t qdf_osdev,
14815 				     uint8_t pdev_id)
14816 {
14817 	return dp_pdev_init(txrx_soc, htc_handle, qdf_osdev, pdev_id);
14818 }
14819 
14820