xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision a86b23ee68a2491aede2e03991f3fb37046f4e41)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_hw_headers.h>
25 #include <hal_api.h>
26 #include <hif.h>
27 #include <htt.h>
28 #include <wdi_event.h>
29 #include <queue.h>
30 #include "dp_types.h"
31 #include "dp_internal.h"
32 #include "dp_tx.h"
33 #include "dp_tx_desc.h"
34 #include "dp_rx.h"
35 #include "dp_rx_mon.h"
36 #ifdef DP_RATETABLE_SUPPORT
37 #include "dp_ratetable.h"
38 #endif
39 #include <cdp_txrx_handle.h>
40 #include <wlan_cfg.h>
41 #include <wlan_utility.h>
42 #include "cdp_txrx_cmn_struct.h"
43 #include "cdp_txrx_stats_struct.h"
44 #include "cdp_txrx_cmn_reg.h"
45 #include <qdf_util.h>
46 #include "dp_peer.h"
47 #include "dp_rx_mon.h"
48 #include "htt_stats.h"
49 #include "dp_htt.h"
50 #ifdef WLAN_SUPPORT_RX_FISA
51 #include <dp_fisa_rx.h>
52 #endif
53 #include "htt_ppdu_stats.h"
54 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
55 #include "cfg_ucfg_api.h"
56 #include "dp_mon_filter.h"
57 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
58 #include "cdp_txrx_flow_ctrl_v2.h"
59 #else
60 static inline void
61 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
62 {
63 	return;
64 }
65 #endif
66 #include "dp_ipa.h"
67 #include "dp_cal_client_api.h"
68 #ifdef FEATURE_WDS
69 #include "dp_txrx_wds.h"
70 #endif
71 #ifdef ATH_SUPPORT_IQUE
72 #include "dp_txrx_me.h"
73 #endif
74 #if defined(DP_CON_MON)
75 #ifndef REMOVE_PKT_LOG
76 #include <pktlog_ac_api.h>
77 #include <pktlog_ac.h>
78 #endif
79 #endif
80 
81 #ifdef WLAN_FEATURE_STATS_EXT
82 #define INIT_RX_HW_STATS_LOCK(_soc) \
83 	qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
84 #define DEINIT_RX_HW_STATS_LOCK(_soc) \
85 	qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
86 #else
87 #define INIT_RX_HW_STATS_LOCK(_soc)  /* no op */
88 #define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
89 #endif
90 
91 #ifdef DP_PEER_EXTENDED_API
92 #define SET_PEER_REF_CNT_ONE(_peer) \
93 	qdf_atomic_set(&(_peer)->ref_cnt, 1)
94 #else
95 #define SET_PEER_REF_CNT_ONE(_peer)
96 #endif
97 
98 /*
99  * The max size of cdp_peer_stats_param_t is limited to 16 bytes.
100  * If the buffer size is exceeding this size limit,
101  * dp_txrx_get_peer_stats is to be used instead.
102  */
103 QDF_COMPILE_TIME_ASSERT(cdp_peer_stats_param_t_max_size,
104 			(sizeof(cdp_peer_stats_param_t) <= 16));
105 
106 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
107 /*
108  * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS
109  * also should be updated accordingly
110  */
111 QDF_COMPILE_TIME_ASSERT(num_intr_grps,
112 			HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS);
113 
114 /*
115  * HIF_EVENT_HIST_MAX should always be power of 2
116  */
117 QDF_COMPILE_TIME_ASSERT(hif_event_history_size,
118 			(HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0);
119 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
120 
121 /*
122  * If WLAN_CFG_INT_NUM_CONTEXTS is changed,
123  * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated
124  */
125 QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs,
126 			WLAN_CFG_INT_NUM_CONTEXTS_MAX >=
127 			WLAN_CFG_INT_NUM_CONTEXTS);
128 
129 #ifdef WLAN_RX_PKT_CAPTURE_ENH
130 #include "dp_rx_mon_feature.h"
131 #else
132 /*
133  * dp_config_enh_rx_capture()- API to enable/disable enhanced rx capture
134  * @pdev_handle: DP_PDEV handle
135  * @val: user provided value
136  *
137  * Return: QDF_STATUS
138  */
139 static QDF_STATUS
140 dp_config_enh_rx_capture(struct dp_pdev *pdev_handle, uint8_t val)
141 {
142 	return QDF_STATUS_E_INVAL;
143 }
144 #endif /* WLAN_RX_PKT_CAPTURE_ENH */
145 
146 #ifdef WLAN_TX_PKT_CAPTURE_ENH
147 #include "dp_tx_capture.h"
148 #else
149 /*
150  * dp_config_enh_tx_capture()- API to enable/disable enhanced tx capture
151  * @pdev_handle: DP_PDEV handle
152  * @val: user provided value
153  *
154  * Return: QDF_STATUS
155  */
156 static QDF_STATUS
157 dp_config_enh_tx_capture(struct dp_pdev *pdev_handle, uint8_t val)
158 {
159 	return QDF_STATUS_E_INVAL;
160 }
161 #endif
162 
163 static void dp_pdev_srng_deinit(struct dp_pdev *pdev);
164 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev);
165 static void dp_pdev_srng_free(struct dp_pdev *pdev);
166 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev);
167 
168 static void dp_soc_srng_deinit(struct dp_soc *soc);
169 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc);
170 static void dp_soc_srng_free(struct dp_soc *soc);
171 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc);
172 
173 static void dp_soc_cfg_init(struct dp_soc *soc);
174 static void dp_soc_cfg_attach(struct dp_soc *soc);
175 
176 static inline
177 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
178 				HTC_HANDLE htc_handle,
179 				qdf_device_t qdf_osdev,
180 				uint8_t pdev_id);
181 static QDF_STATUS
182 dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
183 		   HTC_HANDLE htc_handle,
184 		   qdf_device_t qdf_osdev,
185 		   uint8_t pdev_id);
186 
187 static QDF_STATUS
188 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, int force);
189 
190 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc);
191 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc);
192 
193 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
194 		  struct hif_opaque_softc *hif_handle);
195 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
196 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc,
197 				       uint8_t pdev_id,
198 				       int force);
199 static struct dp_soc *
200 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
201 	      struct hif_opaque_softc *hif_handle,
202 	      HTC_HANDLE htc_handle,
203 	      qdf_device_t qdf_osdev,
204 	      struct ol_if_ops *ol_ops, uint16_t device_id);
205 static void dp_pktlogmod_exit(struct dp_pdev *handle);
206 static inline QDF_STATUS dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl,
207 					      uint8_t vdev_id,
208 					      uint8_t *peer_mac_addr);
209 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
210 				       uint8_t vdev_id,
211 				       uint8_t *peer_mac, uint32_t bitmap);
212 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
213 				bool unmap_only);
214 #ifdef ENABLE_VERBOSE_DEBUG
215 bool is_dp_verbose_debug_enabled;
216 #endif
217 
218 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
219 static void dp_cfr_filter(struct cdp_soc_t *soc_hdl,
220 			  uint8_t pdev_id,
221 			  bool enable,
222 			  struct cdp_monitor_filter *filter_val);
223 static bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
224 static void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
225 			   bool enable);
226 static inline void
227 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
228 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats);
229 static inline void
230 dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
231 static inline void
232 dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
233 			 bool enable);
234 #endif
235 static inline bool
236 dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev);
237 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
238 					    enum hal_ring_type ring_type,
239 					    int ring_num);
240 #define DP_INTR_POLL_TIMER_MS	5
241 
242 /* Generic AST entry aging timer value */
243 #define DP_AST_AGING_TIMER_DEFAULT_MS	1000
244 #define DP_MCS_LENGTH (6*MAX_MCS)
245 
246 #define DP_CURR_FW_STATS_AVAIL 19
247 #define DP_HTT_DBG_EXT_STATS_MAX 256
248 #define DP_MAX_SLEEP_TIME 100
249 #ifndef QCA_WIFI_3_0_EMU
250 #define SUSPEND_DRAIN_WAIT 500
251 #else
252 #define SUSPEND_DRAIN_WAIT 3000
253 #endif
254 
255 #ifdef IPA_OFFLOAD
256 /* Exclude IPA rings from the interrupt context */
257 #define TX_RING_MASK_VAL	0xb
258 #define RX_RING_MASK_VAL	0x7
259 #else
260 #define TX_RING_MASK_VAL	0xF
261 #define RX_RING_MASK_VAL	0xF
262 #endif
263 
264 #define STR_MAXLEN	64
265 
266 #define RNG_ERR		"SRNG setup failed for"
267 
268 /* Threshold for peer's cached buf queue beyond which frames are dropped */
269 #define DP_RX_CACHED_BUFQ_THRESH 64
270 
271 /* Budget to reap monitor status ring */
272 #define DP_MON_REAP_BUDGET 1024
273 
274 /**
275  * default_dscp_tid_map - Default DSCP-TID mapping
276  *
277  * DSCP        TID
278  * 000000      0
279  * 001000      1
280  * 010000      2
281  * 011000      3
282  * 100000      4
283  * 101000      5
284  * 110000      6
285  * 111000      7
286  */
287 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
288 	0, 0, 0, 0, 0, 0, 0, 0,
289 	1, 1, 1, 1, 1, 1, 1, 1,
290 	2, 2, 2, 2, 2, 2, 2, 2,
291 	3, 3, 3, 3, 3, 3, 3, 3,
292 	4, 4, 4, 4, 4, 4, 4, 4,
293 	5, 5, 5, 5, 5, 5, 5, 5,
294 	6, 6, 6, 6, 6, 6, 6, 6,
295 	7, 7, 7, 7, 7, 7, 7, 7,
296 };
297 
298 /**
299  * default_pcp_tid_map - Default PCP-TID mapping
300  *
301  * PCP     TID
302  * 000      0
303  * 001      1
304  * 010      2
305  * 011      3
306  * 100      4
307  * 101      5
308  * 110      6
309  * 111      7
310  */
311 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
312 	0, 1, 2, 3, 4, 5, 6, 7,
313 };
314 
315 /**
316  * @brief Cpu to tx ring map
317  */
318 uint8_t
319 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
320 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
321 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
322 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
323 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
324 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
325 #ifdef WLAN_TX_PKT_CAPTURE_ENH
326 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
327 #endif
328 };
329 
330 /**
331  * @brief Select the type of statistics
332  */
333 enum dp_stats_type {
334 	STATS_FW = 0,
335 	STATS_HOST = 1,
336 	STATS_TYPE_MAX = 2,
337 };
338 
339 /**
340  * @brief General Firmware statistics options
341  *
342  */
343 enum dp_fw_stats {
344 	TXRX_FW_STATS_INVALID	= -1,
345 };
346 
347 /**
348  * dp_stats_mapping_table - Firmware and Host statistics
349  * currently supported
350  */
351 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
352 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
353 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
354 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
355 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
356 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
357 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
358 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
359 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
360 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
361 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
362 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
363 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
364 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
365 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
366 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
367 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
368 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
369 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
370 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
371 	/* Last ENUM for HTT FW STATS */
372 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
373 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
374 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
375 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
376 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
377 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
378 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
379 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
380 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
381 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
382 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
383 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
384 	{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
385 	{TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS},
386 	{TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS},
387 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, TXRX_HOST_STATS_INVALID}
388 };
389 
390 /* MCL specific functions */
391 #if defined(DP_CON_MON)
392 /**
393  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
394  * @soc: pointer to dp_soc handle
395  * @intr_ctx_num: interrupt context number for which mon mask is needed
396  *
397  * For MCL, monitor mode rings are being processed in timer contexts (polled).
398  * This function is returning 0, since in interrupt mode(softirq based RX),
399  * we donot want to process monitor mode rings in a softirq.
400  *
401  * So, in case packet log is enabled for SAP/STA/P2P modes,
402  * regular interrupt processing will not process monitor mode rings. It would be
403  * done in a separate timer context.
404  *
405  * Return: 0
406  */
407 static inline
408 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
409 {
410 	return 0;
411 }
412 
413 /*
414  * dp_service_mon_rings()- service monitor rings
415  * @soc: soc dp handle
416  * @quota: number of ring entry that can be serviced
417  *
418  * Return: None
419  *
420  */
421 static void dp_service_mon_rings(struct  dp_soc *soc, uint32_t quota)
422 {
423 	int ring = 0, work_done;
424 	struct dp_pdev *pdev = NULL;
425 
426 	for (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
427 		pdev = dp_get_pdev_for_lmac_id(soc, ring);
428 		if (!pdev)
429 			continue;
430 		work_done = dp_mon_process(soc, ring, quota);
431 
432 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
433 			  FL("Reaped %d descs from Monitor rings"),
434 			  work_done);
435 	}
436 }
437 
438 /*
439  * dp_mon_reap_timer_handler()- timer to reap monitor rings
440  * reqd as we are not getting ppdu end interrupts
441  * @arg: SoC Handle
442  *
443  * Return:
444  *
445  */
446 static void dp_mon_reap_timer_handler(void *arg)
447 {
448 	struct dp_soc *soc = (struct dp_soc *)arg;
449 
450 	dp_service_mon_rings(soc, QCA_NAPI_BUDGET);
451 
452 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
453 }
454 
455 #ifndef REMOVE_PKT_LOG
456 /**
457  * dp_pkt_log_init() - API to initialize packet log
458  * @soc_hdl: Datapath soc handle
459  * @pdev_id: id of data path pdev handle
460  * @scn: HIF context
461  *
462  * Return: none
463  */
464 void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn)
465 {
466 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
467 	struct dp_pdev *handle =
468 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
469 
470 	if (!handle) {
471 		dp_err("pdev handle is NULL");
472 		return;
473 	}
474 
475 	if (handle->pkt_log_init) {
476 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
477 			  "%s: Packet log not initialized", __func__);
478 		return;
479 	}
480 
481 	pktlog_sethandle(&handle->pl_dev, scn);
482 	pktlog_set_pdev_id(handle->pl_dev, pdev_id);
483 	pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
484 
485 	if (pktlogmod_init(scn)) {
486 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
487 			  "%s: pktlogmod_init failed", __func__);
488 		handle->pkt_log_init = false;
489 	} else {
490 		handle->pkt_log_init = true;
491 	}
492 }
493 
494 /**
495  * dp_pkt_log_con_service() - connect packet log service
496  * @soc_hdl: Datapath soc handle
497  * @pdev_id: id of data path pdev handle
498  * @scn: device context
499  *
500  * Return: none
501  */
502 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
503 				   uint8_t pdev_id, void *scn)
504 {
505 	dp_pkt_log_init(soc_hdl, pdev_id, scn);
506 	pktlog_htc_attach();
507 }
508 
509 /**
510  * dp_get_num_rx_contexts() - get number of RX contexts
511  * @soc_hdl: cdp opaque soc handle
512  *
513  * Return: number of RX contexts
514  */
515 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
516 {
517 	int i;
518 	int num_rx_contexts = 0;
519 
520 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
521 
522 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
523 		if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i))
524 			num_rx_contexts++;
525 
526 	return num_rx_contexts;
527 }
528 
529 /**
530  * dp_pktlogmod_exit() - API to cleanup pktlog info
531  * @pdev: Pdev handle
532  *
533  * Return: none
534  */
535 static void dp_pktlogmod_exit(struct dp_pdev *pdev)
536 {
537 	struct dp_soc *soc = pdev->soc;
538 	struct hif_opaque_softc *scn = soc->hif_handle;
539 
540 	if (!scn) {
541 		dp_err("Invalid hif(scn) handle");
542 		return;
543 	}
544 
545 	/* stop mon_reap_timer if it has been started */
546 	if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED &&
547 	    soc->reap_timer_init && (!dp_is_enable_reap_timer_non_pkt(pdev)))
548 		qdf_timer_sync_cancel(&soc->mon_reap_timer);
549 
550 	pktlogmod_exit(scn);
551 	pdev->pkt_log_init = false;
552 }
553 #endif
554 #else
555 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
556 
557 /**
558  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
559  * @soc: pointer to dp_soc handle
560  * @intr_ctx_num: interrupt context number for which mon mask is needed
561  *
562  * Return: mon mask value
563  */
564 static inline
565 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
566 {
567 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
568 }
569 
570 /*
571  * dp_service_lmac_rings()- timer to reap lmac rings
572  * @arg: SoC Handle
573  *
574  * Return:
575  *
576  */
577 static void dp_service_lmac_rings(void *arg)
578 {
579 	struct dp_soc *soc = (struct dp_soc *)arg;
580 	int ring = 0, i;
581 	struct dp_pdev *pdev = NULL;
582 	union dp_rx_desc_list_elem_t *desc_list = NULL;
583 	union dp_rx_desc_list_elem_t *tail = NULL;
584 
585 	/* Process LMAC interrupts */
586 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
587 		int mac_for_pdev = ring;
588 		struct dp_srng *rx_refill_buf_ring;
589 
590 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
591 		if (!pdev)
592 			continue;
593 
594 		rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev];
595 
596 		dp_mon_process(soc, mac_for_pdev,
597 			       QCA_NAPI_BUDGET);
598 
599 		for (i = 0;
600 		     i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
601 			dp_rxdma_err_process(&soc->intr_ctx[i], soc,
602 					     mac_for_pdev,
603 					     QCA_NAPI_BUDGET);
604 
605 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF,
606 						  mac_for_pdev))
607 			dp_rx_buffers_replenish(soc, mac_for_pdev,
608 						rx_refill_buf_ring,
609 						&soc->rx_desc_buf[mac_for_pdev],
610 						0, &desc_list, &tail);
611 	}
612 
613 	qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
614 }
615 
616 #endif
617 
618 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
619 				 uint8_t vdev_id,
620 				 uint8_t *peer_mac,
621 				 uint8_t *mac_addr,
622 				 enum cdp_txrx_ast_entry_type type,
623 				 uint32_t flags)
624 {
625 	int ret = -1;
626 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
627 						       peer_mac, 0, vdev_id);
628 
629 	if (!peer || peer->delete_in_progress) {
630 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
631 			  "%s: Peer is NULL!\n", __func__);
632 		goto fail;
633 	}
634 
635 	ret = dp_peer_add_ast((struct dp_soc *)soc_hdl,
636 			      peer,
637 			      mac_addr,
638 			      type,
639 			      flags);
640 fail:
641 	if (peer)
642 		dp_peer_unref_delete(peer);
643 
644 	return ret;
645 }
646 
647 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
648 						uint8_t vdev_id,
649 						uint8_t *peer_mac,
650 						uint8_t *wds_macaddr,
651 						uint32_t flags)
652 {
653 	int status = -1;
654 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
655 	struct dp_ast_entry  *ast_entry = NULL;
656 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
657 						       peer_mac, 0, vdev_id);
658 
659 	if (!peer || peer->delete_in_progress) {
660 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
661 			  "%s: Peer is NULL!\n", __func__);
662 		goto fail;
663 	}
664 
665 	qdf_spin_lock_bh(&soc->ast_lock);
666 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
667 						    peer->vdev->pdev->pdev_id);
668 
669 	if (ast_entry) {
670 		status = dp_peer_update_ast(soc,
671 					    peer,
672 					    ast_entry, flags);
673 	}
674 	qdf_spin_unlock_bh(&soc->ast_lock);
675 
676 fail:
677 	if (peer)
678 		dp_peer_unref_delete(peer);
679 
680 	return status;
681 }
682 
683 /*
684  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
685  * @soc_handle:		Datapath SOC handle
686  * @wds_macaddr:	WDS entry MAC Address
687  * @peer_macaddr:	WDS entry MAC Address
688  * @vdev_id:		id of vdev handle
689  * Return: QDF_STATUS
690  */
691 static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
692 					 uint8_t *wds_macaddr,
693 					 uint8_t *peer_mac_addr,
694 					 uint8_t vdev_id)
695 {
696 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
697 	struct dp_ast_entry *ast_entry = NULL;
698 	struct dp_ast_entry *tmp_ast_entry;
699 	struct dp_peer *peer;
700 	struct dp_pdev *pdev;
701 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
702 
703 	if (!vdev)
704 		return QDF_STATUS_E_FAILURE;
705 
706 	pdev = vdev->pdev;
707 
708 	if (peer_mac_addr) {
709 		peer = dp_peer_find_hash_find(soc, peer_mac_addr,
710 					      0, vdev->vdev_id);
711 		if (!peer) {
712 			return QDF_STATUS_E_FAILURE;
713 		}
714 
715 		if (peer->delete_in_progress) {
716 			dp_peer_unref_delete(peer);
717 			return QDF_STATUS_E_FAILURE;
718 		}
719 
720 		qdf_spin_lock_bh(&soc->ast_lock);
721 		DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
722 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
723 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
724 				dp_peer_del_ast(soc, ast_entry);
725 		}
726 		qdf_spin_unlock_bh(&soc->ast_lock);
727 		dp_peer_unref_delete(peer);
728 
729 		return QDF_STATUS_SUCCESS;
730 	} else if (wds_macaddr) {
731 		qdf_spin_lock_bh(&soc->ast_lock);
732 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
733 							    pdev->pdev_id);
734 
735 		if (ast_entry) {
736 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
737 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
738 				dp_peer_del_ast(soc, ast_entry);
739 		}
740 		qdf_spin_unlock_bh(&soc->ast_lock);
741 	}
742 
743 	return QDF_STATUS_SUCCESS;
744 }
745 
746 /*
747  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
748  * @soc:		Datapath SOC handle
749  *
750  * Return: QDF_STATUS
751  */
752 static QDF_STATUS
753 dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
754 			     uint8_t vdev_id)
755 {
756 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
757 	struct dp_pdev *pdev;
758 	struct dp_vdev *vdev;
759 	struct dp_peer *peer;
760 	struct dp_ast_entry *ase, *temp_ase;
761 	int i;
762 
763 	qdf_spin_lock_bh(&soc->ast_lock);
764 
765 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
766 		pdev = soc->pdev_list[i];
767 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
768 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
769 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
770 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
771 					if ((ase->type ==
772 						CDP_TXRX_AST_TYPE_WDS_HM) ||
773 					    (ase->type ==
774 						CDP_TXRX_AST_TYPE_WDS_HM_SEC))
775 						dp_peer_del_ast(soc, ase);
776 				}
777 			}
778 		}
779 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
780 	}
781 
782 	qdf_spin_unlock_bh(&soc->ast_lock);
783 
784 	return QDF_STATUS_SUCCESS;
785 }
786 
787 /*
788  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
789  * @soc:		Datapath SOC handle
790  *
791  * Return: None
792  */
793 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
794 {
795 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
796 	struct dp_pdev *pdev;
797 	struct dp_vdev *vdev;
798 	struct dp_peer *peer;
799 	struct dp_ast_entry *ase, *temp_ase;
800 	int i;
801 
802 	qdf_spin_lock_bh(&soc->ast_lock);
803 
804 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
805 		pdev = soc->pdev_list[i];
806 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
807 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
808 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
809 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
810 					if ((ase->type ==
811 						CDP_TXRX_AST_TYPE_STATIC) ||
812 						(ase->type ==
813 						 CDP_TXRX_AST_TYPE_SELF) ||
814 						(ase->type ==
815 						 CDP_TXRX_AST_TYPE_STA_BSS))
816 						continue;
817 					dp_peer_del_ast(soc, ase);
818 				}
819 			}
820 		}
821 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
822 	}
823 
824 	qdf_spin_unlock_bh(&soc->ast_lock);
825 }
826 
827 /**
828  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
829  *                                       and return ast entry information
830  *                                       of first ast entry found in the
831  *                                       table with given mac address
832  *
833  * @soc : data path soc handle
834  * @ast_mac_addr : AST entry mac address
835  * @ast_entry_info : ast entry information
836  *
837  * return : true if ast entry found with ast_mac_addr
838  *          false if ast entry not found
839  */
840 static bool dp_peer_get_ast_info_by_soc_wifi3
841 	(struct cdp_soc_t *soc_hdl,
842 	 uint8_t *ast_mac_addr,
843 	 struct cdp_ast_entry_info *ast_entry_info)
844 {
845 	struct dp_ast_entry *ast_entry = NULL;
846 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
847 
848 	qdf_spin_lock_bh(&soc->ast_lock);
849 
850 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
851 	if (!ast_entry || !ast_entry->peer) {
852 		qdf_spin_unlock_bh(&soc->ast_lock);
853 		return false;
854 	}
855 	if (ast_entry->delete_in_progress && !ast_entry->callback) {
856 		qdf_spin_unlock_bh(&soc->ast_lock);
857 		return false;
858 	}
859 	ast_entry_info->type = ast_entry->type;
860 	ast_entry_info->pdev_id = ast_entry->pdev_id;
861 	ast_entry_info->vdev_id = ast_entry->peer->vdev->vdev_id;
862 	ast_entry_info->peer_id = ast_entry->peer->peer_id;
863 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
864 		     &ast_entry->peer->mac_addr.raw[0],
865 		     QDF_MAC_ADDR_SIZE);
866 	qdf_spin_unlock_bh(&soc->ast_lock);
867 	return true;
868 }
869 
870 /**
871  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
872  *                                          and return ast entry information
873  *                                          if mac address and pdev_id matches
874  *
875  * @soc : data path soc handle
876  * @ast_mac_addr : AST entry mac address
877  * @pdev_id : pdev_id
878  * @ast_entry_info : ast entry information
879  *
880  * return : true if ast entry found with ast_mac_addr
881  *          false if ast entry not found
882  */
883 static bool dp_peer_get_ast_info_by_pdevid_wifi3
884 		(struct cdp_soc_t *soc_hdl,
885 		 uint8_t *ast_mac_addr,
886 		 uint8_t pdev_id,
887 		 struct cdp_ast_entry_info *ast_entry_info)
888 {
889 	struct dp_ast_entry *ast_entry;
890 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
891 
892 	qdf_spin_lock_bh(&soc->ast_lock);
893 
894 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr, pdev_id);
895 
896 	if (!ast_entry || !ast_entry->peer) {
897 		qdf_spin_unlock_bh(&soc->ast_lock);
898 		return false;
899 	}
900 	if (ast_entry->delete_in_progress && !ast_entry->callback) {
901 		qdf_spin_unlock_bh(&soc->ast_lock);
902 		return false;
903 	}
904 	ast_entry_info->type = ast_entry->type;
905 	ast_entry_info->pdev_id = ast_entry->pdev_id;
906 	ast_entry_info->vdev_id = ast_entry->peer->vdev->vdev_id;
907 	ast_entry_info->peer_id = ast_entry->peer->peer_id;
908 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
909 		     &ast_entry->peer->mac_addr.raw[0],
910 		     QDF_MAC_ADDR_SIZE);
911 	qdf_spin_unlock_bh(&soc->ast_lock);
912 	return true;
913 }
914 
915 /**
916  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
917  *                            with given mac address
918  *
919  * @soc : data path soc handle
920  * @ast_mac_addr : AST entry mac address
921  * @callback : callback function to called on ast delete response from FW
922  * @cookie : argument to be passed to callback
923  *
924  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
925  *          is sent
926  *          QDF_STATUS_E_INVAL false if ast entry not found
927  */
928 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
929 					       uint8_t *mac_addr,
930 					       txrx_ast_free_cb callback,
931 					       void *cookie)
932 
933 {
934 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
935 	struct dp_ast_entry *ast_entry = NULL;
936 	txrx_ast_free_cb cb = NULL;
937 	void *arg = NULL;
938 
939 	qdf_spin_lock_bh(&soc->ast_lock);
940 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
941 	if (!ast_entry) {
942 		qdf_spin_unlock_bh(&soc->ast_lock);
943 		return -QDF_STATUS_E_INVAL;
944 	}
945 
946 	if (ast_entry->callback) {
947 		cb = ast_entry->callback;
948 		arg = ast_entry->cookie;
949 	}
950 
951 	ast_entry->callback = callback;
952 	ast_entry->cookie = cookie;
953 
954 	/*
955 	 * if delete_in_progress is set AST delete is sent to target
956 	 * and host is waiting for response should not send delete
957 	 * again
958 	 */
959 	if (!ast_entry->delete_in_progress)
960 		dp_peer_del_ast(soc, ast_entry);
961 
962 	qdf_spin_unlock_bh(&soc->ast_lock);
963 	if (cb) {
964 		cb(soc->ctrl_psoc,
965 		   dp_soc_to_cdp_soc(soc),
966 		   arg,
967 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
968 	}
969 	return QDF_STATUS_SUCCESS;
970 }
971 
972 /**
973  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
974  *                                   table if mac address and pdev_id matches
975  *
976  * @soc : data path soc handle
977  * @ast_mac_addr : AST entry mac address
978  * @pdev_id : pdev id
979  * @callback : callback function to called on ast delete response from FW
980  * @cookie : argument to be passed to callback
981  *
982  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
983  *          is sent
984  *          QDF_STATUS_E_INVAL false if ast entry not found
985  */
986 
987 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
988 						uint8_t *mac_addr,
989 						uint8_t pdev_id,
990 						txrx_ast_free_cb callback,
991 						void *cookie)
992 
993 {
994 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
995 	struct dp_ast_entry *ast_entry;
996 	txrx_ast_free_cb cb = NULL;
997 	void *arg = NULL;
998 
999 	qdf_spin_lock_bh(&soc->ast_lock);
1000 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
1001 
1002 	if (!ast_entry) {
1003 		qdf_spin_unlock_bh(&soc->ast_lock);
1004 		return -QDF_STATUS_E_INVAL;
1005 	}
1006 
1007 	if (ast_entry->callback) {
1008 		cb = ast_entry->callback;
1009 		arg = ast_entry->cookie;
1010 	}
1011 
1012 	ast_entry->callback = callback;
1013 	ast_entry->cookie = cookie;
1014 
1015 	/*
1016 	 * if delete_in_progress is set AST delete is sent to target
1017 	 * and host is waiting for response should not sent delete
1018 	 * again
1019 	 */
1020 	if (!ast_entry->delete_in_progress)
1021 		dp_peer_del_ast(soc, ast_entry);
1022 
1023 	qdf_spin_unlock_bh(&soc->ast_lock);
1024 
1025 	if (cb) {
1026 		cb(soc->ctrl_psoc,
1027 		   dp_soc_to_cdp_soc(soc),
1028 		   arg,
1029 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1030 	}
1031 	return QDF_STATUS_SUCCESS;
1032 }
1033 
1034 /**
1035  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
1036  * @ring_num: ring num of the ring being queried
1037  * @grp_mask: the grp_mask array for the ring type in question.
1038  *
1039  * The grp_mask array is indexed by group number and the bit fields correspond
1040  * to ring numbers.  We are finding which interrupt group a ring belongs to.
1041  *
1042  * Return: the index in the grp_mask array with the ring number.
1043  * -QDF_STATUS_E_NOENT if no entry is found
1044  */
1045 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
1046 {
1047 	int ext_group_num;
1048 	int mask = 1 << ring_num;
1049 
1050 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
1051 	     ext_group_num++) {
1052 		if (mask & grp_mask[ext_group_num])
1053 			return ext_group_num;
1054 	}
1055 
1056 	return -QDF_STATUS_E_NOENT;
1057 }
1058 
1059 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
1060 				       enum hal_ring_type ring_type,
1061 				       int ring_num)
1062 {
1063 	int *grp_mask;
1064 
1065 	switch (ring_type) {
1066 	case WBM2SW_RELEASE:
1067 		/* dp_tx_comp_handler - soc->tx_comp_ring */
1068 		if (ring_num < 3)
1069 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1070 
1071 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
1072 		else if (ring_num == 3) {
1073 			/* sw treats this as a separate ring type */
1074 			grp_mask = &soc->wlan_cfg_ctx->
1075 				int_rx_wbm_rel_ring_mask[0];
1076 			ring_num = 0;
1077 		} else {
1078 			qdf_assert(0);
1079 			return -QDF_STATUS_E_NOENT;
1080 		}
1081 	break;
1082 
1083 	case REO_EXCEPTION:
1084 		/* dp_rx_err_process - &soc->reo_exception_ring */
1085 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
1086 	break;
1087 
1088 	case REO_DST:
1089 		/* dp_rx_process - soc->reo_dest_ring */
1090 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1091 	break;
1092 
1093 	case REO_STATUS:
1094 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
1095 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
1096 	break;
1097 
1098 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
1099 	case RXDMA_MONITOR_STATUS:
1100 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
1101 	case RXDMA_MONITOR_DST:
1102 		/* dp_mon_process */
1103 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
1104 	break;
1105 	case RXDMA_DST:
1106 		/* dp_rxdma_err_process */
1107 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
1108 	break;
1109 
1110 	case RXDMA_BUF:
1111 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1112 	break;
1113 
1114 	case RXDMA_MONITOR_BUF:
1115 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
1116 	break;
1117 
1118 	case TCL_DATA:
1119 	/* CMD_CREDIT_RING is used as command in 8074 and credit in 9000 */
1120 	case TCL_CMD_CREDIT:
1121 	case REO_CMD:
1122 	case SW2WBM_RELEASE:
1123 	case WBM_IDLE_LINK:
1124 		/* normally empty SW_TO_HW rings */
1125 		return -QDF_STATUS_E_NOENT;
1126 	break;
1127 
1128 	case TCL_STATUS:
1129 	case REO_REINJECT:
1130 		/* misc unused rings */
1131 		return -QDF_STATUS_E_NOENT;
1132 	break;
1133 
1134 	case CE_SRC:
1135 	case CE_DST:
1136 	case CE_DST_STATUS:
1137 		/* CE_rings - currently handled by hif */
1138 	default:
1139 		return -QDF_STATUS_E_NOENT;
1140 	break;
1141 	}
1142 
1143 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
1144 }
1145 
1146 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
1147 			      *ring_params, int ring_type, int ring_num)
1148 {
1149 	int msi_group_number;
1150 	int msi_data_count;
1151 	int ret;
1152 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1153 
1154 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1155 					    &msi_data_count, &msi_data_start,
1156 					    &msi_irq_start);
1157 
1158 	if (ret)
1159 		return;
1160 
1161 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
1162 						       ring_num);
1163 	if (msi_group_number < 0) {
1164 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
1165 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
1166 			ring_type, ring_num);
1167 		ring_params->msi_addr = 0;
1168 		ring_params->msi_data = 0;
1169 		return;
1170 	}
1171 
1172 	if (msi_group_number > msi_data_count) {
1173 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
1174 			FL("2 msi_groups will share an msi; msi_group_num %d"),
1175 			msi_group_number);
1176 
1177 		QDF_ASSERT(0);
1178 	}
1179 
1180 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1181 
1182 	ring_params->msi_addr = addr_low;
1183 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1184 	ring_params->msi_data = (msi_group_number % msi_data_count)
1185 		+ msi_data_start;
1186 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1187 }
1188 
1189 /**
1190  * dp_print_ast_stats() - Dump AST table contents
1191  * @soc: Datapath soc handle
1192  *
1193  * return void
1194  */
1195 #ifdef FEATURE_AST
1196 void dp_print_ast_stats(struct dp_soc *soc)
1197 {
1198 	uint8_t i;
1199 	uint8_t num_entries = 0;
1200 	struct dp_vdev *vdev;
1201 	struct dp_pdev *pdev;
1202 	struct dp_peer *peer;
1203 	struct dp_ast_entry *ase, *tmp_ase;
1204 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1205 			"NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS",
1206 			"DA", "HMWDS_SEC"};
1207 
1208 	DP_PRINT_STATS("AST Stats:");
1209 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1210 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1211 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1212 	DP_PRINT_STATS("	Entries MAP ERR  = %d", soc->stats.ast.map_err);
1213 	DP_PRINT_STATS("	Entries Mismatch ERR  = %d",
1214 		       soc->stats.ast.ast_mismatch);
1215 
1216 	DP_PRINT_STATS("AST Table:");
1217 
1218 	qdf_spin_lock_bh(&soc->ast_lock);
1219 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1220 		pdev = soc->pdev_list[i];
1221 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
1222 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1223 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1224 				DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1225 				    DP_PRINT_STATS("%6d mac_addr = %pM"
1226 					    " peer_mac_addr = %pM"
1227 					    " peer_id = %u"
1228 					    " type = %s"
1229 					    " next_hop = %d"
1230 					    " is_active = %d"
1231 					    " ast_idx = %d"
1232 					    " ast_hash = %d"
1233 					    " delete_in_progress = %d"
1234 					    " pdev_id = %d"
1235 					    " vdev_id = %d",
1236 					    ++num_entries,
1237 					    ase->mac_addr.raw,
1238 					    ase->peer->mac_addr.raw,
1239 					    ase->peer->peer_id,
1240 					    type[ase->type],
1241 					    ase->next_hop,
1242 					    ase->is_active,
1243 					    ase->ast_idx,
1244 					    ase->ast_hash_value,
1245 					    ase->delete_in_progress,
1246 					    ase->pdev_id,
1247 					    vdev->vdev_id);
1248 				}
1249 			}
1250 		}
1251 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1252 	}
1253 	qdf_spin_unlock_bh(&soc->ast_lock);
1254 }
1255 #else
1256 void dp_print_ast_stats(struct dp_soc *soc)
1257 {
1258 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1259 	return;
1260 }
1261 #endif
1262 
1263 /**
1264  *  dp_print_peer_table() - Dump all Peer stats
1265  * @vdev: Datapath Vdev handle
1266  *
1267  * return void
1268  */
1269 static void dp_print_peer_table(struct dp_vdev *vdev)
1270 {
1271 	struct dp_peer *peer = NULL;
1272 
1273 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1274 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1275 		if (!peer) {
1276 			DP_PRINT_STATS("Invalid Peer");
1277 			return;
1278 		}
1279 		DP_PRINT_STATS("    peer_mac_addr = %pM"
1280 			       " nawds_enabled = %d"
1281 			       " bss_peer = %d"
1282 			       " wds_enabled = %d"
1283 			       " tx_cap_enabled = %d"
1284 			       " rx_cap_enabled = %d"
1285 			       " delete in progress = %d"
1286 			       " peer id = %d",
1287 			       peer->mac_addr.raw,
1288 			       peer->nawds_enabled,
1289 			       peer->bss_peer,
1290 			       peer->wds_enabled,
1291 			       peer->tx_cap_enabled,
1292 			       peer->rx_cap_enabled,
1293 			       peer->delete_in_progress,
1294 			       peer->peer_id);
1295 	}
1296 }
1297 
1298 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
1299 /**
1300  * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt
1301  * threshold values from the wlan_srng_cfg table for each ring type
1302  * @soc: device handle
1303  * @ring_params: per ring specific parameters
1304  * @ring_type: Ring type
1305  * @ring_num: Ring number for a given ring type
1306  *
1307  * Fill the ring params with the interrupt threshold
1308  * configuration parameters available in the per ring type wlan_srng_cfg
1309  * table.
1310  *
1311  * Return: None
1312  */
1313 static void
1314 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1315 				       struct hal_srng_params *ring_params,
1316 				       int ring_type, int ring_num,
1317 				       int num_entries)
1318 {
1319 	if (ring_type == WBM2SW_RELEASE && (ring_num == 3)) {
1320 		ring_params->intr_timer_thres_us =
1321 				wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1322 		ring_params->intr_batch_cntr_thres_entries =
1323 				wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1324 	} else {
1325 		ring_params->intr_timer_thres_us =
1326 				soc->wlan_srng_cfg[ring_type].timer_threshold;
1327 		ring_params->intr_batch_cntr_thres_entries =
1328 				soc->wlan_srng_cfg[ring_type].batch_count_threshold;
1329 	}
1330 	ring_params->low_threshold =
1331 			soc->wlan_srng_cfg[ring_type].low_threshold;
1332 	if (ring_params->low_threshold)
1333 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1334 }
1335 #else
1336 static void
1337 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1338 				       struct hal_srng_params *ring_params,
1339 				       int ring_type, int ring_num,
1340 				       int num_entries)
1341 {
1342 	if (ring_type == REO_DST) {
1343 		ring_params->intr_timer_thres_us =
1344 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1345 		ring_params->intr_batch_cntr_thres_entries =
1346 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1347 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
1348 		ring_params->intr_timer_thres_us =
1349 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1350 		ring_params->intr_batch_cntr_thres_entries =
1351 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1352 	} else {
1353 		ring_params->intr_timer_thres_us =
1354 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1355 		ring_params->intr_batch_cntr_thres_entries =
1356 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1357 	}
1358 
1359 	/* Enable low threshold interrupts for rx buffer rings (regular and
1360 	 * monitor buffer rings.
1361 	 * TODO: See if this is required for any other ring
1362 	 */
1363 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1364 	    (ring_type == RXDMA_MONITOR_STATUS)) {
1365 		/* TODO: Setting low threshold to 1/8th of ring size
1366 		 * see if this needs to be configurable
1367 		 */
1368 		ring_params->low_threshold = num_entries >> 3;
1369 		ring_params->intr_timer_thres_us =
1370 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1371 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1372 		ring_params->intr_batch_cntr_thres_entries = 0;
1373 	}
1374 
1375 	/* During initialisation monitor rings are only filled with
1376 	 * MON_BUF_MIN_ENTRIES entries. So low threshold needs to be set to
1377 	 * a value less than that. Low threshold value is reconfigured again
1378 	 * to 1/8th of the ring size when monitor vap is created.
1379 	 */
1380 	if (ring_type == RXDMA_MONITOR_BUF)
1381 		ring_params->low_threshold = MON_BUF_MIN_ENTRIES >> 1;
1382 
1383 	/* In case of PCI chipsets, we dont have PPDU end interrupts,
1384 	 * so MONITOR STATUS ring is reaped by receiving MSI from srng.
1385 	 * Keep batch threshold as 8 so that interrupt is received for
1386 	 * every 4 packets in MONITOR_STATUS ring
1387 	 */
1388 	if ((ring_type == RXDMA_MONITOR_STATUS) &&
1389 	    (soc->intr_mode == DP_INTR_MSI))
1390 		ring_params->intr_batch_cntr_thres_entries = 4;
1391 }
1392 #endif
1393 
1394 /*
1395  * dp_srng_free() - Free SRNG memory
1396  * @soc  : Data path soc handle
1397  * @srng : SRNG pointer
1398  *
1399  * return: None
1400  */
1401 
1402 static void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng)
1403 {
1404 	if (srng->alloc_size && srng->base_vaddr_unaligned) {
1405 		if (!srng->cached) {
1406 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1407 						srng->alloc_size,
1408 						srng->base_vaddr_unaligned,
1409 						srng->base_paddr_unaligned, 0);
1410 		} else {
1411 			qdf_mem_free(srng->base_vaddr_unaligned);
1412 		}
1413 		srng->alloc_size = 0;
1414 		srng->base_vaddr_unaligned = NULL;
1415 	}
1416 	srng->hal_srng = NULL;
1417 }
1418 
1419 /*
1420  * dp_srng_init() - Initialize SRNG
1421  * @soc  : Data path soc handle
1422  * @srng : SRNG pointer
1423  * @ring_type : Ring Type
1424  * @ring_num: Ring number
1425  * @mac_id: mac_id
1426  *
1427  * return: QDF_STATUS
1428  */
1429 static QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
1430 			       int ring_type, int ring_num, int mac_id)
1431 {
1432 	hal_soc_handle_t hal_soc = soc->hal_soc;
1433 	struct hal_srng_params ring_params;
1434 
1435 	if (srng->hal_srng) {
1436 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1437 			  FL("Ring type: %d, num:%d is already initialized"),
1438 			  ring_type, ring_num);
1439 		return QDF_STATUS_SUCCESS;
1440 	}
1441 
1442 	/* memset the srng ring to zero */
1443 	qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size);
1444 
1445 	ring_params.flags = 0;
1446 	ring_params.ring_base_paddr = srng->base_paddr_aligned;
1447 	ring_params.ring_base_vaddr = srng->base_vaddr_aligned;
1448 
1449 	ring_params.num_entries = srng->num_entries;
1450 
1451 	dp_verbose_debug("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
1452 			 ring_type, ring_num,
1453 			 (void *)ring_params.ring_base_vaddr,
1454 			 (void *)ring_params.ring_base_paddr,
1455 			 ring_params.num_entries);
1456 
1457 	if (soc->intr_mode == DP_INTR_MSI) {
1458 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
1459 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
1460 				 ring_type, ring_num);
1461 
1462 	} else {
1463 		ring_params.msi_data = 0;
1464 		ring_params.msi_addr = 0;
1465 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
1466 				 ring_type, ring_num);
1467 	}
1468 
1469 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
1470 					       ring_type, ring_num,
1471 					       srng->num_entries);
1472 
1473 	if (srng->cached)
1474 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
1475 
1476 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
1477 					mac_id, &ring_params);
1478 
1479 	if (!srng->hal_srng) {
1480 		dp_srng_free(soc, srng);
1481 		return QDF_STATUS_E_FAILURE;
1482 	}
1483 
1484 	return QDF_STATUS_SUCCESS;
1485 }
1486 
1487 /*
1488  * dp_srng_alloc() - Allocate memory for SRNG
1489  * @soc  : Data path soc handle
1490  * @srng : SRNG pointer
1491  * @ring_type : Ring Type
1492  * @num_entries: Number of entries
1493  * @cached: cached flag variable
1494  *
1495  * return: QDF_STATUS
1496  */
1497 static QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
1498 				int ring_type, uint32_t num_entries,
1499 				bool cached)
1500 {
1501 	hal_soc_handle_t hal_soc = soc->hal_soc;
1502 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
1503 	uint32_t ring_base_align = 32;
1504 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
1505 
1506 	if (srng->base_vaddr_unaligned) {
1507 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1508 			  FL("Ring type: %d, is already allocated"), ring_type);
1509 		return QDF_STATUS_SUCCESS;
1510 	}
1511 
1512 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
1513 	srng->hal_srng = NULL;
1514 	srng->alloc_size = num_entries * entry_size;
1515 	srng->num_entries = num_entries;
1516 	srng->cached = cached;
1517 
1518 	if (!cached) {
1519 		srng->base_vaddr_aligned =
1520 		    qdf_aligned_mem_alloc_consistent(
1521 					soc->osdev, &srng->alloc_size,
1522 					&srng->base_vaddr_unaligned,
1523 					&srng->base_paddr_unaligned,
1524 					&srng->base_paddr_aligned,
1525 					ring_base_align);
1526 	} else {
1527 		srng->base_vaddr_aligned = qdf_aligned_malloc(
1528 					&srng->alloc_size,
1529 					&srng->base_vaddr_unaligned,
1530 					&srng->base_paddr_unaligned,
1531 					&srng->base_paddr_aligned,
1532 					ring_base_align);
1533 	}
1534 
1535 	if (!srng->base_vaddr_aligned)
1536 		return QDF_STATUS_E_NOMEM;
1537 
1538 	return QDF_STATUS_SUCCESS;
1539 }
1540 
1541 /*
1542  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
1543  * @soc: DP SOC handle
1544  * @srng: source ring structure
1545  * @ring_type: type of ring
1546  * @ring_num: ring number
1547  *
1548  * Return: None
1549  */
1550 static void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
1551 			   int ring_type, int ring_num)
1552 {
1553 	if (!srng->hal_srng) {
1554 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1555 			  FL("Ring type: %d, num:%d not setup"),
1556 			  ring_type, ring_num);
1557 		return;
1558 	}
1559 
1560 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1561 	srng->hal_srng = NULL;
1562 }
1563 
1564 /* TODO: Need this interface from HIF */
1565 void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle);
1566 
1567 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1568 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
1569 			 hal_ring_handle_t hal_ring_hdl)
1570 {
1571 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1572 	uint32_t hp, tp;
1573 	uint8_t ring_id;
1574 
1575 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
1576 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
1577 
1578 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
1579 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START);
1580 
1581 	return hal_srng_access_start(hal_soc, hal_ring_hdl);
1582 }
1583 
1584 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
1585 			hal_ring_handle_t hal_ring_hdl)
1586 {
1587 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1588 	uint32_t hp, tp;
1589 	uint8_t ring_id;
1590 
1591 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
1592 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
1593 
1594 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
1595 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END);
1596 
1597 	return hal_srng_access_end(hal_soc, hal_ring_hdl);
1598 }
1599 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1600 
1601 /*
1602  * dp_should_timer_irq_yield() - Decide if the bottom half should yield
1603  * @soc: DP soc handle
1604  * @work_done: work done in softirq context
1605  * @start_time: start time for the softirq
1606  *
1607  * Return: enum with yield code
1608  */
1609 static enum timer_yield_status
1610 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
1611 			  uint64_t start_time)
1612 {
1613 	uint64_t cur_time = qdf_get_log_timestamp();
1614 
1615 	if (!work_done)
1616 		return DP_TIMER_WORK_DONE;
1617 
1618 	if (cur_time - start_time > DP_MAX_TIMER_EXEC_TIME_TICKS)
1619 		return DP_TIMER_TIME_EXHAUST;
1620 
1621 	return DP_TIMER_NO_YIELD;
1622 }
1623 
1624 /**
1625  * dp_process_lmac_rings() - Process LMAC rings
1626  * @int_ctx: interrupt context
1627  * @total_budget: budget of work which can be done
1628  *
1629  * Return: work done
1630  */
1631 static int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget)
1632 {
1633 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
1634 	struct dp_soc *soc = int_ctx->soc;
1635 	uint32_t remaining_quota = total_budget;
1636 	struct dp_pdev *pdev = NULL;
1637 	uint32_t work_done  = 0;
1638 	int budget = total_budget;
1639 	int ring = 0;
1640 
1641 	/* Process LMAC interrupts */
1642 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
1643 		int mac_for_pdev = ring;
1644 
1645 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
1646 		if (!pdev)
1647 			continue;
1648 		if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1649 			work_done = dp_mon_process(soc, mac_for_pdev,
1650 						   remaining_quota);
1651 			if (work_done)
1652 				intr_stats->num_rx_mon_ring_masks++;
1653 			budget -= work_done;
1654 			if (budget <= 0)
1655 				goto budget_done;
1656 			remaining_quota = budget;
1657 		}
1658 
1659 		if (int_ctx->rxdma2host_ring_mask &
1660 				(1 << mac_for_pdev)) {
1661 			work_done = dp_rxdma_err_process(int_ctx, soc,
1662 							 mac_for_pdev,
1663 							 remaining_quota);
1664 			if (work_done)
1665 				intr_stats->num_rxdma2host_ring_masks++;
1666 			budget -=  work_done;
1667 			if (budget <= 0)
1668 				goto budget_done;
1669 			remaining_quota = budget;
1670 		}
1671 
1672 		if (int_ctx->host2rxdma_ring_mask &
1673 					(1 << mac_for_pdev)) {
1674 			union dp_rx_desc_list_elem_t *desc_list = NULL;
1675 			union dp_rx_desc_list_elem_t *tail = NULL;
1676 			struct dp_srng *rx_refill_buf_ring;
1677 
1678 			if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
1679 				rx_refill_buf_ring =
1680 					&soc->rx_refill_buf_ring[mac_for_pdev];
1681 			else
1682 				rx_refill_buf_ring =
1683 					&soc->rx_refill_buf_ring[pdev->lmac_id];
1684 
1685 			intr_stats->num_host2rxdma_ring_masks++;
1686 			DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1687 				     1);
1688 			dp_rx_buffers_replenish(soc, mac_for_pdev,
1689 						rx_refill_buf_ring,
1690 						&soc->rx_desc_buf[mac_for_pdev],
1691 						0, &desc_list, &tail);
1692 		}
1693 	}
1694 
1695 budget_done:
1696 	return total_budget - budget;
1697 }
1698 
1699 /*
1700  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
1701  * @dp_ctx: DP SOC handle
1702  * @budget: Number of frames/descriptors that can be processed in one shot
1703  *
1704  * Return: remaining budget/quota for the soc device
1705  */
1706 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
1707 {
1708 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1709 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
1710 	struct dp_soc *soc = int_ctx->soc;
1711 	int ring = 0;
1712 	uint32_t work_done  = 0;
1713 	int budget = dp_budget;
1714 	uint8_t tx_mask = int_ctx->tx_ring_mask;
1715 	uint8_t rx_mask = int_ctx->rx_ring_mask;
1716 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1717 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
1718 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
1719 	uint32_t remaining_quota = dp_budget;
1720 
1721 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n",
1722 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
1723 			 reo_status_mask,
1724 			 int_ctx->rx_mon_ring_mask,
1725 			 int_ctx->host2rxdma_ring_mask,
1726 			 int_ctx->rxdma2host_ring_mask);
1727 
1728 	/* Process Tx completion interrupts first to return back buffers */
1729 	while (tx_mask) {
1730 		if (tx_mask & 0x1) {
1731 			work_done = dp_tx_comp_handler(int_ctx,
1732 						       soc,
1733 						       soc->tx_comp_ring[ring].hal_srng,
1734 						       ring, remaining_quota);
1735 
1736 			if (work_done) {
1737 				intr_stats->num_tx_ring_masks[ring]++;
1738 				dp_verbose_debug("tx mask 0x%x ring %d, budget %d, work_done %d",
1739 						 tx_mask, ring, budget,
1740 						 work_done);
1741 			}
1742 
1743 			budget -= work_done;
1744 			if (budget <= 0)
1745 				goto budget_done;
1746 
1747 			remaining_quota = budget;
1748 		}
1749 		tx_mask = tx_mask >> 1;
1750 		ring++;
1751 	}
1752 
1753 	/* Process REO Exception ring interrupt */
1754 	if (rx_err_mask) {
1755 		work_done = dp_rx_err_process(int_ctx, soc,
1756 					      soc->reo_exception_ring.hal_srng,
1757 					      remaining_quota);
1758 
1759 		if (work_done) {
1760 			intr_stats->num_rx_err_ring_masks++;
1761 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
1762 					 work_done, budget);
1763 		}
1764 
1765 		budget -=  work_done;
1766 		if (budget <= 0) {
1767 			goto budget_done;
1768 		}
1769 		remaining_quota = budget;
1770 	}
1771 
1772 	/* Process Rx WBM release ring interrupt */
1773 	if (rx_wbm_rel_mask) {
1774 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
1775 						  soc->rx_rel_ring.hal_srng,
1776 						  remaining_quota);
1777 
1778 		if (work_done) {
1779 			intr_stats->num_rx_wbm_rel_ring_masks++;
1780 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
1781 					 work_done, budget);
1782 		}
1783 
1784 		budget -=  work_done;
1785 		if (budget <= 0) {
1786 			goto budget_done;
1787 		}
1788 		remaining_quota = budget;
1789 	}
1790 
1791 	/* Process Rx interrupts */
1792 	if (rx_mask) {
1793 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1794 			if (!(rx_mask & (1 << ring)))
1795 				continue;
1796 			work_done = dp_rx_process(int_ctx,
1797 						  soc->reo_dest_ring[ring].hal_srng,
1798 						  ring,
1799 						  remaining_quota);
1800 			if (work_done) {
1801 				intr_stats->num_rx_ring_masks[ring]++;
1802 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
1803 						 rx_mask, ring,
1804 						 work_done, budget);
1805 				budget -=  work_done;
1806 				if (budget <= 0)
1807 					goto budget_done;
1808 				remaining_quota = budget;
1809 			}
1810 		}
1811 	}
1812 
1813 	if (reo_status_mask) {
1814 		if (dp_reo_status_ring_handler(int_ctx, soc))
1815 			int_ctx->intr_stats.num_reo_status_ring_masks++;
1816 	}
1817 
1818 	work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
1819 	if (work_done) {
1820 		budget -=  work_done;
1821 		if (budget <= 0)
1822 			goto budget_done;
1823 		remaining_quota = budget;
1824 	}
1825 
1826 	qdf_lro_flush(int_ctx->lro_ctx);
1827 	intr_stats->num_masks++;
1828 
1829 budget_done:
1830 	return dp_budget - budget;
1831 }
1832 
1833 /* dp_interrupt_timer()- timer poll for interrupts
1834  *
1835  * @arg: SoC Handle
1836  *
1837  * Return:
1838  *
1839  */
1840 static void dp_interrupt_timer(void *arg)
1841 {
1842 	struct dp_soc *soc = (struct dp_soc *) arg;
1843 	enum timer_yield_status yield = DP_TIMER_NO_YIELD;
1844 	uint32_t work_done  = 0, total_work_done = 0;
1845 	int budget = 0xffff;
1846 	uint32_t remaining_quota = budget;
1847 	uint64_t start_time;
1848 	int i;
1849 
1850 	if (!qdf_atomic_read(&soc->cmn_init_done))
1851 		return;
1852 
1853 	start_time = qdf_get_log_timestamp();
1854 
1855 	while (yield == DP_TIMER_NO_YIELD) {
1856 		for (i = 0;
1857 		     i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1858 			if (!soc->intr_ctx[i].rx_mon_ring_mask)
1859 				continue;
1860 
1861 			work_done = dp_process_lmac_rings(&soc->intr_ctx[i],
1862 							  remaining_quota);
1863 			if (work_done) {
1864 				budget -=  work_done;
1865 				if (budget <= 0) {
1866 					yield = DP_TIMER_WORK_EXHAUST;
1867 					goto budget_done;
1868 				}
1869 				remaining_quota = budget;
1870 				total_work_done += work_done;
1871 			}
1872 		}
1873 
1874 		yield = dp_should_timer_irq_yield(soc, total_work_done,
1875 						  start_time);
1876 		total_work_done = 0;
1877 	}
1878 
1879 budget_done:
1880 	if (yield == DP_TIMER_WORK_EXHAUST ||
1881 	    yield == DP_TIMER_TIME_EXHAUST)
1882 		qdf_timer_mod(&soc->int_timer, 1);
1883 	else
1884 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1885 }
1886 
1887 /*
1888  * dp_soc_attach_poll() - Register handlers for DP interrupts
1889  * @txrx_soc: DP SOC handle
1890  *
1891  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1892  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1893  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1894  *
1895  * Return: 0 for success, nonzero for failure.
1896  */
1897 static QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
1898 {
1899 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1900 	int i;
1901 
1902 	soc->intr_mode = DP_INTR_POLL;
1903 
1904 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1905 		soc->intr_ctx[i].dp_intr_id = i;
1906 		soc->intr_ctx[i].tx_ring_mask =
1907 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1908 		soc->intr_ctx[i].rx_ring_mask =
1909 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1910 		soc->intr_ctx[i].rx_mon_ring_mask =
1911 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1912 		soc->intr_ctx[i].rx_err_ring_mask =
1913 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1914 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1915 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1916 		soc->intr_ctx[i].reo_status_ring_mask =
1917 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1918 		soc->intr_ctx[i].rxdma2host_ring_mask =
1919 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1920 		soc->intr_ctx[i].soc = soc;
1921 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1922 	}
1923 
1924 	qdf_timer_init(soc->osdev, &soc->int_timer,
1925 			dp_interrupt_timer, (void *)soc,
1926 			QDF_TIMER_TYPE_WAKE_APPS);
1927 
1928 	return QDF_STATUS_SUCCESS;
1929 }
1930 
1931 /**
1932  * dp_soc_set_interrupt_mode() - Set the interrupt mode in soc
1933  * soc: DP soc handle
1934  *
1935  * Set the appropriate interrupt mode flag in the soc
1936  */
1937 static void dp_soc_set_interrupt_mode(struct dp_soc *soc)
1938 {
1939 	uint32_t msi_base_data, msi_vector_start;
1940 	int msi_vector_count, ret;
1941 
1942 	soc->intr_mode = DP_INTR_INTEGRATED;
1943 
1944 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1945 	    (soc->cdp_soc.ol_ops->get_con_mode &&
1946 	     soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) {
1947 		soc->intr_mode = DP_INTR_POLL;
1948 	} else {
1949 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1950 						  &msi_vector_count,
1951 						  &msi_base_data,
1952 						  &msi_vector_start);
1953 		if (ret)
1954 			return;
1955 
1956 		soc->intr_mode = DP_INTR_MSI;
1957 	}
1958 }
1959 
1960 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc);
1961 #if defined(DP_INTR_POLL_BOTH)
1962 /*
1963  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1964  * @txrx_soc: DP SOC handle
1965  *
1966  * Call the appropriate attach function based on the mode of operation.
1967  * This is a WAR for enabling monitor mode.
1968  *
1969  * Return: 0 for success. nonzero for failure.
1970  */
1971 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
1972 {
1973 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1974 
1975 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1976 	    (soc->cdp_soc.ol_ops->get_con_mode &&
1977 	     soc->cdp_soc.ol_ops->get_con_mode() ==
1978 	     QDF_GLOBAL_MONITOR_MODE)) {
1979 		dp_info("Poll mode");
1980 		return dp_soc_attach_poll(txrx_soc);
1981 	} else {
1982 		dp_info("Interrupt  mode");
1983 		return dp_soc_interrupt_attach(txrx_soc);
1984 	}
1985 }
1986 #else
1987 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
1988 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
1989 {
1990 	return dp_soc_attach_poll(txrx_soc);
1991 }
1992 #else
1993 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
1994 {
1995 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1996 
1997 	if (hif_is_polled_mode_enabled(soc->hif_handle))
1998 		return dp_soc_attach_poll(txrx_soc);
1999 	else
2000 		return dp_soc_interrupt_attach(txrx_soc);
2001 }
2002 #endif
2003 #endif
2004 
2005 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
2006 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
2007 {
2008 	int j;
2009 	int num_irq = 0;
2010 
2011 	int tx_mask =
2012 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2013 	int rx_mask =
2014 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2015 	int rx_mon_mask =
2016 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2017 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
2018 					soc->wlan_cfg_ctx, intr_ctx_num);
2019 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
2020 					soc->wlan_cfg_ctx, intr_ctx_num);
2021 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
2022 					soc->wlan_cfg_ctx, intr_ctx_num);
2023 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
2024 					soc->wlan_cfg_ctx, intr_ctx_num);
2025 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
2026 					soc->wlan_cfg_ctx, intr_ctx_num);
2027 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
2028 					soc->wlan_cfg_ctx, intr_ctx_num);
2029 
2030 	soc->intr_mode = DP_INTR_INTEGRATED;
2031 
2032 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
2033 
2034 		if (tx_mask & (1 << j)) {
2035 			irq_id_map[num_irq++] =
2036 				(wbm2host_tx_completions_ring1 - j);
2037 		}
2038 
2039 		if (rx_mask & (1 << j)) {
2040 			irq_id_map[num_irq++] =
2041 				(reo2host_destination_ring1 - j);
2042 		}
2043 
2044 		if (rxdma2host_ring_mask & (1 << j)) {
2045 			irq_id_map[num_irq++] =
2046 				rxdma2host_destination_ring_mac1 - j;
2047 		}
2048 
2049 		if (host2rxdma_ring_mask & (1 << j)) {
2050 			irq_id_map[num_irq++] =
2051 				host2rxdma_host_buf_ring_mac1 -	j;
2052 		}
2053 
2054 		if (host2rxdma_mon_ring_mask & (1 << j)) {
2055 			irq_id_map[num_irq++] =
2056 				host2rxdma_monitor_ring1 - j;
2057 		}
2058 
2059 		if (rx_mon_mask & (1 << j)) {
2060 			irq_id_map[num_irq++] =
2061 				ppdu_end_interrupts_mac1 - j;
2062 			irq_id_map[num_irq++] =
2063 				rxdma2host_monitor_status_ring_mac1 - j;
2064 			irq_id_map[num_irq++] =
2065 				rxdma2host_monitor_destination_mac1 - j;
2066 		}
2067 
2068 		if (rx_wbm_rel_ring_mask & (1 << j))
2069 			irq_id_map[num_irq++] = wbm2host_rx_release;
2070 
2071 		if (rx_err_ring_mask & (1 << j))
2072 			irq_id_map[num_irq++] = reo2host_exception;
2073 
2074 		if (reo_status_ring_mask & (1 << j))
2075 			irq_id_map[num_irq++] = reo2host_status;
2076 
2077 	}
2078 	*num_irq_r = num_irq;
2079 }
2080 
2081 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
2082 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
2083 		int msi_vector_count, int msi_vector_start)
2084 {
2085 	int tx_mask = wlan_cfg_get_tx_ring_mask(
2086 					soc->wlan_cfg_ctx, intr_ctx_num);
2087 	int rx_mask = wlan_cfg_get_rx_ring_mask(
2088 					soc->wlan_cfg_ctx, intr_ctx_num);
2089 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
2090 					soc->wlan_cfg_ctx, intr_ctx_num);
2091 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
2092 					soc->wlan_cfg_ctx, intr_ctx_num);
2093 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
2094 					soc->wlan_cfg_ctx, intr_ctx_num);
2095 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
2096 					soc->wlan_cfg_ctx, intr_ctx_num);
2097 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
2098 					soc->wlan_cfg_ctx, intr_ctx_num);
2099 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
2100 					soc->wlan_cfg_ctx, intr_ctx_num);
2101 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
2102 					soc->wlan_cfg_ctx, intr_ctx_num);
2103 
2104 	unsigned int vector =
2105 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
2106 	int num_irq = 0;
2107 
2108 	soc->intr_mode = DP_INTR_MSI;
2109 
2110 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
2111 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask |
2112 	    host2rxdma_ring_mask | host2rxdma_mon_ring_mask)
2113 		irq_id_map[num_irq++] =
2114 			pld_get_msi_irq(soc->osdev->dev, vector);
2115 
2116 	*num_irq_r = num_irq;
2117 }
2118 
2119 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
2120 				    int *irq_id_map, int *num_irq)
2121 {
2122 	int msi_vector_count, ret;
2123 	uint32_t msi_base_data, msi_vector_start;
2124 
2125 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
2126 					    &msi_vector_count,
2127 					    &msi_base_data,
2128 					    &msi_vector_start);
2129 	if (ret)
2130 		return dp_soc_interrupt_map_calculate_integrated(soc,
2131 				intr_ctx_num, irq_id_map, num_irq);
2132 
2133 	else
2134 		dp_soc_interrupt_map_calculate_msi(soc,
2135 				intr_ctx_num, irq_id_map, num_irq,
2136 				msi_vector_count, msi_vector_start);
2137 }
2138 
2139 /*
2140  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
2141  * @txrx_soc: DP SOC handle
2142  *
2143  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
2144  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
2145  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
2146  *
2147  * Return: 0 for success. nonzero for failure.
2148  */
2149 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
2150 {
2151 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2152 
2153 	int i = 0;
2154 	int num_irq = 0;
2155 
2156 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
2157 		int ret = 0;
2158 
2159 		/* Map of IRQ ids registered with one interrupt context */
2160 		int irq_id_map[HIF_MAX_GRP_IRQ];
2161 
2162 		int tx_mask =
2163 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
2164 		int rx_mask =
2165 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
2166 		int rx_mon_mask =
2167 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
2168 		int rx_err_ring_mask =
2169 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
2170 		int rx_wbm_rel_ring_mask =
2171 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
2172 		int reo_status_ring_mask =
2173 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
2174 		int rxdma2host_ring_mask =
2175 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
2176 		int host2rxdma_ring_mask =
2177 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
2178 		int host2rxdma_mon_ring_mask =
2179 			wlan_cfg_get_host2rxdma_mon_ring_mask(
2180 				soc->wlan_cfg_ctx, i);
2181 
2182 		soc->intr_ctx[i].dp_intr_id = i;
2183 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
2184 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
2185 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
2186 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
2187 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
2188 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
2189 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
2190 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
2191 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
2192 			 host2rxdma_mon_ring_mask;
2193 
2194 		soc->intr_ctx[i].soc = soc;
2195 
2196 		num_irq = 0;
2197 
2198 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
2199 					       &num_irq);
2200 
2201 		ret = hif_register_ext_group(soc->hif_handle,
2202 				num_irq, irq_id_map, dp_service_srngs,
2203 				&soc->intr_ctx[i], "dp_intr",
2204 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
2205 
2206 		if (ret) {
2207 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2208 			FL("failed, ret = %d"), ret);
2209 
2210 			return QDF_STATUS_E_FAILURE;
2211 		}
2212 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
2213 	}
2214 
2215 	hif_configure_ext_group_interrupts(soc->hif_handle);
2216 	hif_config_irq_set_perf_affinity_hint(soc->hif_handle);
2217 
2218 	return QDF_STATUS_SUCCESS;
2219 }
2220 
2221 /*
2222  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
2223  * @txrx_soc: DP SOC handle
2224  *
2225  * Return: none
2226  */
2227 static void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
2228 {
2229 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2230 	int i;
2231 
2232 	if (soc->intr_mode == DP_INTR_POLL) {
2233 		qdf_timer_free(&soc->int_timer);
2234 	} else {
2235 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
2236 	}
2237 
2238 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
2239 		soc->intr_ctx[i].tx_ring_mask = 0;
2240 		soc->intr_ctx[i].rx_ring_mask = 0;
2241 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
2242 		soc->intr_ctx[i].rx_err_ring_mask = 0;
2243 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
2244 		soc->intr_ctx[i].reo_status_ring_mask = 0;
2245 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
2246 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
2247 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
2248 
2249 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
2250 	}
2251 }
2252 
2253 #define AVG_MAX_MPDUS_PER_TID 128
2254 #define AVG_TIDS_PER_CLIENT 2
2255 #define AVG_FLOWS_PER_TID 2
2256 #define AVG_MSDUS_PER_FLOW 128
2257 #define AVG_MSDUS_PER_MPDU 4
2258 
2259 /*
2260  * dp_hw_link_desc_pool_banks_free() - Free h/w link desc pool banks
2261  * @soc: DP SOC handle
2262  * @mac_id: mac id
2263  *
2264  * Return: none
2265  */
2266 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
2267 {
2268 	struct qdf_mem_multi_page_t *pages;
2269 
2270 	if (mac_id != WLAN_INVALID_PDEV_ID)
2271 		pages = &soc->mon_link_desc_pages[mac_id];
2272 	else
2273 		pages = &soc->link_desc_pages;
2274 
2275 	if (pages->dma_pages) {
2276 		wlan_minidump_remove((void *)
2277 				     pages->dma_pages->page_v_addr_start);
2278 		qdf_mem_multi_pages_free(soc->osdev, pages, 0, false);
2279 	}
2280 }
2281 
2282 /*
2283  * dp_hw_link_desc_pool_banks_alloc() - Allocate h/w link desc pool banks
2284  * @soc: DP SOC handle
2285  * @mac_id: mac id
2286  *
2287  * Allocates memory pages for link descriptors, the page size is 4K for
2288  * MCL and 2MB for WIN. if the mac_id is invalid link descriptor pages are
2289  * allocated for regular RX/TX and if the there is a proper mac_id link
2290  * descriptors are allocated for RX monitor mode.
2291  *
2292  * Return: QDF_STATUS_SUCCESS: Success
2293  *	   QDF_STATUS_E_FAILURE: Failure
2294  */
2295 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
2296 {
2297 	hal_soc_handle_t hal_soc = soc->hal_soc;
2298 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
2299 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
2300 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
2301 	uint32_t num_mpdus_per_link_desc = hal_num_mpdus_per_link_desc(hal_soc);
2302 	uint32_t num_msdus_per_link_desc = hal_num_msdus_per_link_desc(hal_soc);
2303 	uint32_t num_mpdu_links_per_queue_desc =
2304 		hal_num_mpdu_links_per_queue_desc(hal_soc);
2305 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
2306 	uint32_t *total_link_descs, total_mem_size;
2307 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
2308 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
2309 	uint32_t num_entries;
2310 	struct qdf_mem_multi_page_t *pages;
2311 	struct dp_srng *dp_srng;
2312 	uint8_t minidump_str[MINIDUMP_STR_SIZE];
2313 
2314 	/* Only Tx queue descriptors are allocated from common link descriptor
2315 	 * pool Rx queue descriptors are not included in this because (REO queue
2316 	 * extension descriptors) they are expected to be allocated contiguously
2317 	 * with REO queue descriptors
2318 	 */
2319 	if (mac_id != WLAN_INVALID_PDEV_ID) {
2320 		pages = &soc->mon_link_desc_pages[mac_id];
2321 		dp_srng = &soc->rxdma_mon_desc_ring[mac_id];
2322 		num_entries = dp_srng->alloc_size /
2323 			hal_srng_get_entrysize(soc->hal_soc,
2324 					       RXDMA_MONITOR_DESC);
2325 		total_link_descs = &soc->total_mon_link_descs[mac_id];
2326 		qdf_str_lcopy(minidump_str, "mon_link_desc_bank",
2327 			      MINIDUMP_STR_SIZE);
2328 	} else {
2329 		num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2330 			AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
2331 
2332 		num_mpdu_queue_descs = num_mpdu_link_descs /
2333 			num_mpdu_links_per_queue_desc;
2334 
2335 		num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2336 			AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
2337 			num_msdus_per_link_desc;
2338 
2339 		num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2340 			AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
2341 
2342 		num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
2343 			num_tx_msdu_link_descs + num_rx_msdu_link_descs;
2344 
2345 		pages = &soc->link_desc_pages;
2346 		total_link_descs = &soc->total_link_descs;
2347 		qdf_str_lcopy(minidump_str, "link_desc_bank",
2348 			      MINIDUMP_STR_SIZE);
2349 	}
2350 
2351 	/* Round up to power of 2 */
2352 	*total_link_descs = 1;
2353 	while (*total_link_descs < num_entries)
2354 		*total_link_descs <<= 1;
2355 
2356 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2357 		  FL("total_link_descs: %u, link_desc_size: %d"),
2358 		  *total_link_descs, link_desc_size);
2359 	total_mem_size =  *total_link_descs * link_desc_size;
2360 	total_mem_size += link_desc_align;
2361 
2362 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2363 		  FL("total_mem_size: %d"), total_mem_size);
2364 
2365 	dp_set_max_page_size(pages, max_alloc_size);
2366 	qdf_mem_multi_pages_alloc(soc->osdev,
2367 				  pages,
2368 				  link_desc_size,
2369 				  *total_link_descs,
2370 				  0, false);
2371 	if (!pages->num_pages) {
2372 		dp_err("Multi page alloc fail for hw link desc pool");
2373 		return QDF_STATUS_E_FAULT;
2374 	}
2375 
2376 	wlan_minidump_log(pages->dma_pages->page_v_addr_start,
2377 			  pages->num_pages * pages->page_size,
2378 			  soc->ctrl_psoc,
2379 			  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
2380 			  "hw_link_desc_bank");
2381 
2382 	return QDF_STATUS_SUCCESS;
2383 }
2384 
2385 /*
2386  * dp_hw_link_desc_ring_free() - Free h/w link desc rings
2387  * @soc: DP SOC handle
2388  *
2389  * Return: none
2390  */
2391 static void dp_hw_link_desc_ring_free(struct dp_soc *soc)
2392 {
2393 	uint32_t i;
2394 	uint32_t size = soc->wbm_idle_scatter_buf_size;
2395 	void *vaddr = soc->wbm_idle_link_ring.base_vaddr_unaligned;
2396 	qdf_dma_addr_t paddr;
2397 
2398 	if (soc->wbm_idle_scatter_buf_base_vaddr[0]) {
2399 		for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2400 			vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
2401 			paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
2402 			if (vaddr) {
2403 				qdf_mem_free_consistent(soc->osdev,
2404 							soc->osdev->dev,
2405 							size,
2406 							vaddr,
2407 							paddr,
2408 							0);
2409 				vaddr = NULL;
2410 			}
2411 		}
2412 	} else {
2413 		wlan_minidump_remove(vaddr);
2414 		dp_srng_free(soc, &soc->wbm_idle_link_ring);
2415 	}
2416 }
2417 
2418 /*
2419  * dp_hw_link_desc_ring_alloc() - Allocate hw link desc rings
2420  * @soc: DP SOC handle
2421  *
2422  * Allocate memory for WBM_IDLE_LINK srng ring if the number of
2423  * link descriptors is less then the max_allocated size. else
2424  * allocate memory for wbm_idle_scatter_buffer.
2425  *
2426  * Return: QDF_STATUS_SUCCESS: success
2427  *         QDF_STATUS_E_NO_MEM: No memory (Failure)
2428  */
2429 static QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc)
2430 {
2431 	uint32_t entry_size, i;
2432 	uint32_t total_mem_size;
2433 	qdf_dma_addr_t *baseaddr = NULL;
2434 	struct dp_srng *dp_srng;
2435 	uint32_t ring_type;
2436 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
2437 	uint32_t tlds;
2438 
2439 	ring_type = WBM_IDLE_LINK;
2440 	dp_srng = &soc->wbm_idle_link_ring;
2441 	tlds = soc->total_link_descs;
2442 
2443 	entry_size = hal_srng_get_entrysize(soc->hal_soc, ring_type);
2444 	total_mem_size = entry_size * tlds;
2445 
2446 	if (total_mem_size <= max_alloc_size) {
2447 		if (dp_srng_alloc(soc, dp_srng, ring_type, tlds, 0)) {
2448 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2449 				  FL("Link desc idle ring setup failed"));
2450 			goto fail;
2451 		}
2452 
2453 		wlan_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
2454 				  soc->wbm_idle_link_ring.alloc_size,
2455 				  soc->ctrl_psoc,
2456 				  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
2457 				  "wbm_idle_link_ring");
2458 	} else {
2459 		uint32_t num_scatter_bufs;
2460 		uint32_t num_entries_per_buf;
2461 		uint32_t buf_size = 0;
2462 
2463 		soc->wbm_idle_scatter_buf_size =
2464 			hal_idle_list_scatter_buf_size(soc->hal_soc);
2465 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
2466 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
2467 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
2468 					soc->hal_soc, total_mem_size,
2469 					soc->wbm_idle_scatter_buf_size);
2470 
2471 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
2472 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2473 				  FL("scatter bufs size out of bounds"));
2474 			goto fail;
2475 		}
2476 
2477 		for (i = 0; i < num_scatter_bufs; i++) {
2478 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
2479 			buf_size = soc->wbm_idle_scatter_buf_size;
2480 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
2481 				qdf_mem_alloc_consistent(soc->osdev,
2482 							 soc->osdev->dev,
2483 							 buf_size,
2484 							 baseaddr);
2485 
2486 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2487 				QDF_TRACE(QDF_MODULE_ID_DP,
2488 					  QDF_TRACE_LEVEL_ERROR,
2489 					  FL("Scatter lst memory alloc fail"));
2490 				goto fail;
2491 			}
2492 		}
2493 		soc->num_scatter_bufs = num_scatter_bufs;
2494 	}
2495 	return QDF_STATUS_SUCCESS;
2496 
2497 fail:
2498 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2499 		void *vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
2500 		qdf_dma_addr_t paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
2501 
2502 		if (vaddr) {
2503 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2504 						soc->wbm_idle_scatter_buf_size,
2505 						vaddr,
2506 						paddr, 0);
2507 			vaddr = NULL;
2508 		}
2509 	}
2510 	return QDF_STATUS_E_NOMEM;
2511 }
2512 
2513 /*
2514  * dp_hw_link_desc_ring_init() - Initialize hw link desc rings
2515  * @soc: DP SOC handle
2516  *
2517  * Return: QDF_STATUS_SUCCESS: success
2518  *         QDF_STATUS_E_FAILURE: failure
2519  */
2520 static QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc)
2521 {
2522 	struct dp_srng *dp_srng = &soc->wbm_idle_link_ring;
2523 
2524 	if (dp_srng->base_vaddr_unaligned) {
2525 		if (dp_srng_init(soc, dp_srng, WBM_IDLE_LINK, 0, 0))
2526 			return QDF_STATUS_E_FAILURE;
2527 	}
2528 	return QDF_STATUS_SUCCESS;
2529 }
2530 
2531 /*
2532  * dp_hw_link_desc_ring_deinit() - Reset hw link desc rings
2533  * @soc: DP SOC handle
2534  *
2535  * Return: None
2536  */
2537 static void dp_hw_link_desc_ring_deinit(struct dp_soc *soc)
2538 {
2539 	dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0);
2540 }
2541 
2542 /*
2543  * dp_hw_link_desc_ring_replenish() - Replenish hw link desc rings
2544  * @soc: DP SOC handle
2545  * @mac_id: mac id
2546  *
2547  * Return: None
2548  */
2549 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id)
2550 {
2551 	uint32_t cookie = 0;
2552 	uint32_t page_idx = 0;
2553 	struct qdf_mem_multi_page_t *pages;
2554 	struct qdf_mem_dma_page_t *dma_pages;
2555 	uint32_t offset = 0;
2556 	uint32_t count = 0;
2557 	void *desc_srng;
2558 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
2559 	uint32_t total_link_descs;
2560 	uint32_t scatter_buf_num;
2561 	uint32_t num_entries_per_buf = 0;
2562 	uint32_t rem_entries;
2563 	uint32_t num_descs_per_page;
2564 	uint32_t num_scatter_bufs = 0;
2565 	uint8_t *scatter_buf_ptr;
2566 	void *desc;
2567 
2568 	num_scatter_bufs = soc->num_scatter_bufs;
2569 
2570 	if (mac_id == WLAN_INVALID_PDEV_ID) {
2571 		pages = &soc->link_desc_pages;
2572 		total_link_descs = soc->total_link_descs;
2573 		desc_srng = soc->wbm_idle_link_ring.hal_srng;
2574 	} else {
2575 		pages = &soc->mon_link_desc_pages[mac_id];
2576 		total_link_descs = soc->total_mon_link_descs[mac_id];
2577 		desc_srng = soc->rxdma_mon_desc_ring[mac_id].hal_srng;
2578 	}
2579 
2580 	dma_pages = pages->dma_pages;
2581 	do {
2582 		qdf_mem_zero(dma_pages[page_idx].page_v_addr_start,
2583 			     pages->page_size);
2584 		page_idx++;
2585 	} while (page_idx < pages->num_pages);
2586 
2587 	if (desc_srng) {
2588 		hal_srng_access_start_unlocked(soc->hal_soc, desc_srng);
2589 		page_idx = 0;
2590 		count = 0;
2591 		offset = 0;
2592 		pages = &soc->link_desc_pages;
2593 		while ((desc = hal_srng_src_get_next(soc->hal_soc,
2594 						     desc_srng)) &&
2595 			(count < total_link_descs)) {
2596 			page_idx = count / pages->num_element_per_page;
2597 			offset = count % pages->num_element_per_page;
2598 			cookie = LINK_DESC_COOKIE(count, page_idx);
2599 
2600 			hal_set_link_desc_addr(desc, cookie,
2601 					       dma_pages[page_idx].page_p_addr
2602 					       + (offset * link_desc_size));
2603 			count++;
2604 		}
2605 		hal_srng_access_end_unlocked(soc->hal_soc, desc_srng);
2606 	} else {
2607 		/* Populate idle list scatter buffers with link descriptor
2608 		 * pointers
2609 		 */
2610 		scatter_buf_num = 0;
2611 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
2612 					soc->hal_soc,
2613 					soc->wbm_idle_scatter_buf_size);
2614 
2615 		scatter_buf_ptr = (uint8_t *)(
2616 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
2617 		rem_entries = num_entries_per_buf;
2618 		pages = &soc->link_desc_pages;
2619 		page_idx = 0; count = 0;
2620 		offset = 0;
2621 		num_descs_per_page = pages->num_element_per_page;
2622 
2623 		while (count < total_link_descs) {
2624 			page_idx = count / num_descs_per_page;
2625 			offset = count % num_descs_per_page;
2626 			cookie = LINK_DESC_COOKIE(count, page_idx);
2627 			hal_set_link_desc_addr((void *)scatter_buf_ptr,
2628 					       cookie,
2629 					       dma_pages[page_idx].page_p_addr +
2630 					       (offset * link_desc_size));
2631 			rem_entries--;
2632 			if (rem_entries) {
2633 				scatter_buf_ptr += link_desc_size;
2634 			} else {
2635 				rem_entries = num_entries_per_buf;
2636 				scatter_buf_num++;
2637 				if (scatter_buf_num >= num_scatter_bufs)
2638 					break;
2639 				scatter_buf_ptr = (uint8_t *)
2640 					(soc->wbm_idle_scatter_buf_base_vaddr[
2641 					 scatter_buf_num]);
2642 			}
2643 			count++;
2644 		}
2645 		/* Setup link descriptor idle list in HW */
2646 		hal_setup_link_idle_list(soc->hal_soc,
2647 			soc->wbm_idle_scatter_buf_base_paddr,
2648 			soc->wbm_idle_scatter_buf_base_vaddr,
2649 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
2650 			(uint32_t)(scatter_buf_ptr -
2651 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
2652 			scatter_buf_num-1])), total_link_descs);
2653 	}
2654 }
2655 
2656 #ifdef IPA_OFFLOAD
2657 #define REO_DST_RING_SIZE_QCA6290 1023
2658 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
2659 #define REO_DST_RING_SIZE_QCA8074 1023
2660 #define REO_DST_RING_SIZE_QCN9000 2048
2661 #else
2662 #define REO_DST_RING_SIZE_QCA8074 8
2663 #define REO_DST_RING_SIZE_QCN9000 8
2664 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
2665 
2666 #else
2667 
2668 #define REO_DST_RING_SIZE_QCA6290 1024
2669 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
2670 #define REO_DST_RING_SIZE_QCA8074 2048
2671 #define REO_DST_RING_SIZE_QCN9000 2048
2672 #else
2673 #define REO_DST_RING_SIZE_QCA8074 8
2674 #define REO_DST_RING_SIZE_QCN9000 8
2675 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
2676 #endif /* IPA_OFFLOAD */
2677 
2678 #ifndef FEATURE_WDS
2679 static void dp_soc_wds_attach(struct dp_soc *soc)
2680 {
2681 }
2682 
2683 static void dp_soc_wds_detach(struct dp_soc *soc)
2684 {
2685 }
2686 #endif
2687 /*
2688  * dp_soc_reset_ring_map() - Reset cpu ring map
2689  * @soc: Datapath soc handler
2690  *
2691  * This api resets the default cpu ring map
2692  */
2693 
2694 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
2695 {
2696 	uint8_t i;
2697 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2698 
2699 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
2700 		switch (nss_config) {
2701 		case dp_nss_cfg_first_radio:
2702 			/*
2703 			 * Setting Tx ring map for one nss offloaded radio
2704 			 */
2705 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
2706 			break;
2707 
2708 		case dp_nss_cfg_second_radio:
2709 			/*
2710 			 * Setting Tx ring for two nss offloaded radios
2711 			 */
2712 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
2713 			break;
2714 
2715 		case dp_nss_cfg_dbdc:
2716 			/*
2717 			 * Setting Tx ring map for 2 nss offloaded radios
2718 			 */
2719 			soc->tx_ring_map[i] =
2720 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
2721 			break;
2722 
2723 		case dp_nss_cfg_dbtc:
2724 			/*
2725 			 * Setting Tx ring map for 3 nss offloaded radios
2726 			 */
2727 			soc->tx_ring_map[i] =
2728 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
2729 			break;
2730 
2731 		default:
2732 			dp_err("tx_ring_map failed due to invalid nss cfg");
2733 			break;
2734 		}
2735 	}
2736 }
2737 
2738 /*
2739  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
2740  * @dp_soc - DP soc handle
2741  * @ring_type - ring type
2742  * @ring_num - ring_num
2743  *
2744  * return 0 or 1
2745  */
2746 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
2747 {
2748 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2749 	uint8_t status = 0;
2750 
2751 	switch (ring_type) {
2752 	case WBM2SW_RELEASE:
2753 	case REO_DST:
2754 	case RXDMA_BUF:
2755 		status = ((nss_config) & (1 << ring_num));
2756 		break;
2757 	default:
2758 		break;
2759 	}
2760 
2761 	return status;
2762 }
2763 
2764 /*
2765  * dp_soc_disable_unused_mac_intr_mask() - reset interrupt mask for
2766  *					  unused WMAC hw rings
2767  * @dp_soc - DP Soc handle
2768  * @mac_num - wmac num
2769  *
2770  * Return: Return void
2771  */
2772 static void dp_soc_disable_unused_mac_intr_mask(struct dp_soc *soc,
2773 						int mac_num)
2774 {
2775 	int *grp_mask = NULL;
2776 	int group_number;
2777 
2778 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2779 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2780 	wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2781 					  group_number, 0x0);
2782 
2783 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
2784 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2785 	wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
2786 				      group_number, 0x0);
2787 
2788 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
2789 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2790 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
2791 					  group_number, 0x0);
2792 
2793 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
2794 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2795 	wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
2796 					      group_number, 0x0);
2797 }
2798 
2799 /*
2800  * dp_soc_reset_intr_mask() - reset interrupt mask
2801  * @dp_soc - DP Soc handle
2802  *
2803  * Return: Return void
2804  */
2805 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
2806 {
2807 	uint8_t j;
2808 	int *grp_mask = NULL;
2809 	int group_number, mask, num_ring;
2810 
2811 	/* number of tx ring */
2812 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
2813 
2814 	/*
2815 	 * group mask for tx completion  ring.
2816 	 */
2817 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
2818 
2819 	/* loop and reset the mask for only offloaded ring */
2820 	for (j = 0; j < num_ring; j++) {
2821 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
2822 			continue;
2823 		}
2824 
2825 		/*
2826 		 * Group number corresponding to tx offloaded ring.
2827 		 */
2828 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2829 		if (group_number < 0) {
2830 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2831 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2832 					WBM2SW_RELEASE, j);
2833 			return;
2834 		}
2835 
2836 		/* reset the tx mask for offloaded ring */
2837 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2838 		mask &= (~(1 << j));
2839 
2840 		/*
2841 		 * reset the interrupt mask for offloaded ring.
2842 		 */
2843 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2844 	}
2845 
2846 	/* number of rx rings */
2847 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2848 
2849 	/*
2850 	 * group mask for reo destination ring.
2851 	 */
2852 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2853 
2854 	/* loop and reset the mask for only offloaded ring */
2855 	for (j = 0; j < num_ring; j++) {
2856 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
2857 			continue;
2858 		}
2859 
2860 		/*
2861 		 * Group number corresponding to rx offloaded ring.
2862 		 */
2863 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2864 		if (group_number < 0) {
2865 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2866 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2867 					REO_DST, j);
2868 			return;
2869 		}
2870 
2871 		/* set the interrupt mask for offloaded ring */
2872 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2873 		mask &= (~(1 << j));
2874 
2875 		/*
2876 		 * set the interrupt mask to zero for rx offloaded radio.
2877 		 */
2878 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2879 	}
2880 
2881 	/*
2882 	 * group mask for Rx buffer refill ring
2883 	 */
2884 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2885 
2886 	/* loop and reset the mask for only offloaded ring */
2887 	for (j = 0; j < MAX_PDEV_CNT; j++) {
2888 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
2889 
2890 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2891 			continue;
2892 		}
2893 
2894 		/*
2895 		 * Group number corresponding to rx offloaded ring.
2896 		 */
2897 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
2898 		if (group_number < 0) {
2899 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2900 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2901 					REO_DST, lmac_id);
2902 			return;
2903 		}
2904 
2905 		/* set the interrupt mask for offloaded ring */
2906 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2907 				group_number);
2908 		mask &= (~(1 << lmac_id));
2909 
2910 		/*
2911 		 * set the interrupt mask to zero for rx offloaded radio.
2912 		 */
2913 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2914 			group_number, mask);
2915 	}
2916 }
2917 
2918 #ifdef IPA_OFFLOAD
2919 /**
2920  * dp_reo_remap_config() - configure reo remap register value based
2921  *                         nss configuration.
2922  *		based on offload_radio value below remap configuration
2923  *		get applied.
2924  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
2925  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
2926  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
2927  *		3 - both Radios handled by NSS (remap not required)
2928  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
2929  *
2930  * @remap1: output parameter indicates reo remap 1 register value
2931  * @remap2: output parameter indicates reo remap 2 register value
2932  * Return: bool type, true if remap is configured else false.
2933  */
2934 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap1, uint32_t *remap2)
2935 {
2936 	uint32_t ring[4] = {REO_REMAP_SW1, REO_REMAP_SW2,
2937 						REO_REMAP_SW3};
2938 	hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
2939 				      3, remap1, remap2);
2940 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
2941 
2942 	return true;
2943 }
2944 
2945 /**
2946  * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA
2947  *
2948  * @tx_ring_num: Tx ring number
2949  * @tx_ipa_ring_sz: Return param only updated for IPA.
2950  *
2951  * Return: None
2952  */
2953 static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz)
2954 {
2955 	if (tx_ring_num == IPA_TCL_DATA_RING_IDX)
2956 		*tx_ipa_ring_sz = WLAN_CFG_IPA_TX_RING_SIZE;
2957 }
2958 
2959 /**
2960  * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA
2961  *
2962  * @tx_comp_ring_num: Tx comp ring number
2963  * @tx_comp_ipa_ring_sz: Return param only updated for IPA.
2964  *
2965  * Return: None
2966  */
2967 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
2968 					 int *tx_comp_ipa_ring_sz)
2969 {
2970 	if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX)
2971 		*tx_comp_ipa_ring_sz = WLAN_CFG_IPA_TX_COMP_RING_SIZE;
2972 }
2973 #else
2974 static bool dp_reo_remap_config(struct dp_soc *soc,
2975 				uint32_t *remap1,
2976 				uint32_t *remap2)
2977 {
2978 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2979 	uint8_t target_type;
2980 	uint32_t ring[4];
2981 
2982 	target_type = hal_get_target_type(soc->hal_soc);
2983 
2984 	switch (offload_radio) {
2985 	case dp_nss_cfg_default:
2986 		ring[0] = REO_REMAP_SW1;
2987 		ring[1] = REO_REMAP_SW2;
2988 		ring[2] = REO_REMAP_SW3;
2989 		ring[3] = REO_REMAP_SW4;
2990 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
2991 					      4, remap1, remap2);
2992 
2993 		break;
2994 	case dp_nss_cfg_first_radio:
2995 		ring[0] = REO_REMAP_SW2;
2996 		ring[1] = REO_REMAP_SW3;
2997 		ring[2] = REO_REMAP_SW4;
2998 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
2999 					      3, remap1, remap2);
3000 		break;
3001 	case dp_nss_cfg_second_radio:
3002 		ring[0] = REO_REMAP_SW1;
3003 		ring[1] = REO_REMAP_SW3;
3004 		ring[2] = REO_REMAP_SW4;
3005 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3006 					      3, remap1, remap2);
3007 
3008 		break;
3009 	case dp_nss_cfg_dbdc:
3010 	case dp_nss_cfg_dbtc:
3011 		/* return false if both or all are offloaded to NSS */
3012 		return false;
3013 	}
3014 
3015 	dp_debug("remap1 %x remap2 %x offload_radio %u",
3016 		 *remap1, *remap2, offload_radio);
3017 	return true;
3018 }
3019 
3020 static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz)
3021 {
3022 }
3023 
3024 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
3025 					 int *tx_comp_ipa_ring_sz)
3026 {
3027 }
3028 #endif /* IPA_OFFLOAD */
3029 
3030 /*
3031  * dp_reo_frag_dst_set() - configure reo register to set the
3032  *                        fragment destination ring
3033  * @soc : Datapath soc
3034  * @frag_dst_ring : output parameter to set fragment destination ring
3035  *
3036  * Based on offload_radio below fragment destination rings is selected
3037  * 0 - TCL
3038  * 1 - SW1
3039  * 2 - SW2
3040  * 3 - SW3
3041  * 4 - SW4
3042  * 5 - Release
3043  * 6 - FW
3044  * 7 - alternate select
3045  *
3046  * return: void
3047  */
3048 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
3049 {
3050 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
3051 
3052 	switch (offload_radio) {
3053 	case dp_nss_cfg_default:
3054 		*frag_dst_ring = REO_REMAP_TCL;
3055 		break;
3056 	case dp_nss_cfg_first_radio:
3057 		/*
3058 		 * This configuration is valid for single band radio which
3059 		 * is also NSS offload.
3060 		 */
3061 	case dp_nss_cfg_dbdc:
3062 	case dp_nss_cfg_dbtc:
3063 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
3064 		break;
3065 	default:
3066 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3067 				FL("dp_reo_frag_dst_set invalid offload radio config"));
3068 		break;
3069 	}
3070 }
3071 
3072 #ifdef ENABLE_VERBOSE_DEBUG
3073 static void dp_enable_verbose_debug(struct dp_soc *soc)
3074 {
3075 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3076 
3077 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3078 
3079 	if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask)
3080 		is_dp_verbose_debug_enabled = true;
3081 
3082 	if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask)
3083 		hal_set_verbose_debug(true);
3084 	else
3085 		hal_set_verbose_debug(false);
3086 }
3087 #else
3088 static void dp_enable_verbose_debug(struct dp_soc *soc)
3089 {
3090 }
3091 #endif
3092 
3093 #ifdef WLAN_FEATURE_STATS_EXT
3094 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
3095 {
3096 	qdf_event_create(&soc->rx_hw_stats_event);
3097 }
3098 #else
3099 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
3100 {
3101 }
3102 #endif
3103 
3104 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index)
3105 {
3106 	wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned);
3107 	dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA, index);
3108 
3109 	wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned);
3110 	dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE, index);
3111 }
3112 
3113 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
3114 						uint8_t index)
3115 {
3116 	if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA, index, 0)) {
3117 		dp_err("dp_srng_init failed for tcl_data_ring");
3118 		goto fail1;
3119 	}
3120 	wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned,
3121 			  soc->tcl_data_ring[index].alloc_size,
3122 			  soc->ctrl_psoc,
3123 			  WLAN_MD_DP_SRNG_TCL_DATA,
3124 			  "tcl_data_ring");
3125 
3126 	if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
3127 			 index, 0)) {
3128 		dp_err("dp_srng_init failed for tx_comp_ring");
3129 		goto fail1;
3130 	}
3131 	wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned,
3132 			  soc->tx_comp_ring[index].alloc_size,
3133 			  soc->ctrl_psoc,
3134 			  WLAN_MD_DP_SRNG_TX_COMP,
3135 			  "tcl_comp_ring");
3136 
3137 	return QDF_STATUS_SUCCESS;
3138 
3139 fail1:
3140 	return QDF_STATUS_E_FAILURE;
3141 }
3142 
3143 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index)
3144 {
3145 	dp_srng_free(soc, &soc->tcl_data_ring[index]);
3146 	dp_srng_free(soc, &soc->tx_comp_ring[index]);
3147 }
3148 
3149 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
3150 						 uint8_t index)
3151 {
3152 	int tx_ring_size;
3153 	int tx_comp_ring_size;
3154 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
3155 	int cached = 0;
3156 
3157 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
3158 	dp_ipa_get_tx_ring_size(index, &tx_ring_size);
3159 
3160 	if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA,
3161 			  tx_ring_size, cached)) {
3162 		dp_err("dp_srng_alloc failed for tcl_data_ring");
3163 		goto fail1;
3164 	}
3165 
3166 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
3167 	dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size);
3168 	/* Enable cached TCL desc if NSS offload is disabled */
3169 	if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
3170 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
3171 
3172 	if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
3173 			  tx_comp_ring_size, cached)) {
3174 		dp_err("dp_srng_alloc failed for tx_comp_ring");
3175 		goto fail1;
3176 	}
3177 
3178 	return QDF_STATUS_SUCCESS;
3179 
3180 fail1:
3181 	return QDF_STATUS_E_FAILURE;
3182 }
3183 
3184 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3185 {
3186 	struct cdp_lro_hash_config lro_hash;
3187 	QDF_STATUS status;
3188 
3189 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
3190 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
3191 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
3192 		dp_err("LRO, GRO and RX hash disabled");
3193 		return QDF_STATUS_E_FAILURE;
3194 	}
3195 
3196 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
3197 
3198 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
3199 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
3200 		lro_hash.lro_enable = 1;
3201 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
3202 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
3203 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
3204 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
3205 	}
3206 
3207 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
3208 			     (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
3209 			      LRO_IPV4_SEED_ARR_SZ));
3210 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
3211 			     (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
3212 			      LRO_IPV6_SEED_ARR_SZ));
3213 
3214 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
3215 
3216 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
3217 		QDF_BUG(0);
3218 		dp_err("lro_hash_config not configured");
3219 		return QDF_STATUS_E_FAILURE;
3220 	}
3221 
3222 	status = soc->cdp_soc.ol_ops->lro_hash_config(soc->ctrl_psoc,
3223 						      pdev->pdev_id,
3224 						      &lro_hash);
3225 	if (!QDF_IS_STATUS_SUCCESS(status)) {
3226 		dp_err("failed to send lro_hash_config to FW %u", status);
3227 		return status;
3228 	}
3229 
3230 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
3231 		lro_hash.lro_enable, lro_hash.tcp_flag,
3232 		lro_hash.tcp_flag_mask);
3233 
3234 	dp_info("toeplitz_hash_ipv4:");
3235 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3236 			   lro_hash.toeplitz_hash_ipv4,
3237 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
3238 			   LRO_IPV4_SEED_ARR_SZ));
3239 
3240 	dp_info("toeplitz_hash_ipv6:");
3241 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3242 			   lro_hash.toeplitz_hash_ipv6,
3243 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
3244 			   LRO_IPV6_SEED_ARR_SZ));
3245 
3246 	return status;
3247 }
3248 
3249 /*
3250  * dp_rxdma_ring_setup() - configure the RX DMA rings
3251  * @soc: data path SoC handle
3252  * @pdev: Physical device handle
3253  *
3254  * Return: 0 - success, > 0 - failure
3255  */
3256 #ifdef QCA_HOST2FW_RXBUF_RING
3257 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3258 {
3259 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3260 	int max_mac_rings;
3261 	int i;
3262 	int ring_size;
3263 
3264 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3265 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
3266 	ring_size =  wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx);
3267 
3268 	for (i = 0; i < max_mac_rings; i++) {
3269 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
3270 		if (dp_srng_alloc(soc, &pdev->rx_mac_buf_ring[i],
3271 				  RXDMA_BUF, ring_size, 0)) {
3272 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3273 				  FL("failed rx mac ring setup"));
3274 			return QDF_STATUS_E_FAILURE;
3275 		}
3276 
3277 		if (dp_srng_init(soc, &pdev->rx_mac_buf_ring[i],
3278 				 RXDMA_BUF, 1, i)) {
3279 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3280 				  FL("failed rx mac ring setup"));
3281 
3282 			dp_srng_free(soc, &pdev->rx_mac_buf_ring[i]);
3283 			return QDF_STATUS_E_FAILURE;
3284 		}
3285 	}
3286 	return QDF_STATUS_SUCCESS;
3287 }
3288 #else
3289 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3290 {
3291 	return QDF_STATUS_SUCCESS;
3292 }
3293 #endif
3294 
3295 /**
3296  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
3297  * @pdev - DP_PDEV handle
3298  *
3299  * Return: void
3300  */
3301 static inline void
3302 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
3303 {
3304 	uint8_t map_id;
3305 	struct dp_soc *soc = pdev->soc;
3306 
3307 	if (!soc)
3308 		return;
3309 
3310 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
3311 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
3312 			     default_dscp_tid_map,
3313 			     sizeof(default_dscp_tid_map));
3314 	}
3315 
3316 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
3317 		hal_tx_set_dscp_tid_map(soc->hal_soc,
3318 					default_dscp_tid_map,
3319 					map_id);
3320 	}
3321 }
3322 
3323 /**
3324  * dp_pcp_tid_map_setup(): Initialize the pcp-tid maps
3325  * @pdev - DP_PDEV handle
3326  *
3327  * Return: void
3328  */
3329 static inline void
3330 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
3331 {
3332 	struct dp_soc *soc = pdev->soc;
3333 
3334 	if (!soc)
3335 		return;
3336 
3337 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
3338 		     sizeof(default_pcp_tid_map));
3339 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
3340 }
3341 
3342 #ifdef IPA_OFFLOAD
3343 /**
3344  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
3345  * @soc: data path instance
3346  * @pdev: core txrx pdev context
3347  *
3348  * Return: QDF_STATUS_SUCCESS: success
3349  *         QDF_STATUS_E_RESOURCES: Error return
3350  */
3351 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3352 					   struct dp_pdev *pdev)
3353 {
3354 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3355 	int entries;
3356 
3357 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3358 	entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
3359 
3360 	/* Setup second Rx refill buffer ring */
3361 	if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
3362 			  entries, 0)) {
3363 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3364 			FL("dp_srng_alloc failed second rx refill ring"));
3365 		return QDF_STATUS_E_FAILURE;
3366 	}
3367 
3368 	if (dp_srng_init(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
3369 			 IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id)) {
3370 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3371 			  FL("dp_srng_init failed second rx refill ring"));
3372 		return QDF_STATUS_E_FAILURE;
3373 	}
3374 
3375 	return QDF_STATUS_SUCCESS;
3376 }
3377 
3378 /**
3379  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
3380  * @soc: data path instance
3381  * @pdev: core txrx pdev context
3382  *
3383  * Return: void
3384  */
3385 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3386 					      struct dp_pdev *pdev)
3387 {
3388 	dp_srng_deinit(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 0);
3389 	dp_srng_free(soc, &pdev->rx_refill_buf_ring2);
3390 }
3391 
3392 #else
3393 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3394 					   struct dp_pdev *pdev)
3395 {
3396 	return QDF_STATUS_SUCCESS;
3397 }
3398 
3399 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3400 					      struct dp_pdev *pdev)
3401 {
3402 }
3403 #endif
3404 
3405 #if !defined(DISABLE_MON_CONFIG)
3406 /**
3407  * dp_mon_ring_deinit() - Deinitialize monitor rings
3408  * @pdev: DP pdev handle
3409  *
3410  */
3411 static void dp_mon_rings_deinit(struct dp_pdev *pdev)
3412 {
3413 	int mac_id = 0;
3414 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3415 	struct dp_soc *soc = pdev->soc;
3416 
3417 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3418 
3419 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3420 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
3421 							 pdev->pdev_id);
3422 
3423 		dp_srng_deinit(soc, &soc->rxdma_mon_status_ring[lmac_id],
3424 			       RXDMA_MONITOR_STATUS, 0);
3425 
3426 		if (!soc->wlan_cfg_ctx->rxdma1_enable)
3427 			continue;
3428 
3429 		dp_srng_deinit(soc, &soc->rxdma_mon_buf_ring[lmac_id],
3430 			       RXDMA_MONITOR_BUF, 0);
3431 		dp_srng_deinit(soc, &soc->rxdma_mon_dst_ring[lmac_id],
3432 			       RXDMA_MONITOR_DST, 0);
3433 		dp_srng_deinit(soc, &soc->rxdma_mon_desc_ring[lmac_id],
3434 			       RXDMA_MONITOR_DESC, 0);
3435 	}
3436 }
3437 
3438 /**
3439  * dp_mon_rings_free() - free monitor rings
3440  * @pdev: Datapath pdev handle
3441  *
3442  */
3443 static void dp_mon_rings_free(struct dp_pdev *pdev)
3444 {
3445 	int mac_id = 0;
3446 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3447 	struct dp_soc *soc = pdev->soc;
3448 
3449 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3450 
3451 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3452 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
3453 							 pdev->pdev_id);
3454 
3455 		dp_srng_free(soc, &soc->rxdma_mon_status_ring[lmac_id]);
3456 
3457 		if (!soc->wlan_cfg_ctx->rxdma1_enable)
3458 			continue;
3459 
3460 		dp_srng_free(soc, &soc->rxdma_mon_buf_ring[lmac_id]);
3461 		dp_srng_free(soc, &soc->rxdma_mon_dst_ring[lmac_id]);
3462 		dp_srng_free(soc, &soc->rxdma_mon_desc_ring[lmac_id]);
3463 	}
3464 }
3465 
3466 /**
3467  * dp_mon_rings_init() - Initialize monitor srng rings
3468  * @pdev: Datapath pdev handle
3469  *
3470  * return: QDF_STATUS_SUCCESS on success
3471  *	   QDF_STATUS_E_NOMEM on failure
3472  */
3473 static
3474 QDF_STATUS dp_mon_rings_init(struct dp_soc *soc, struct dp_pdev *pdev)
3475 {
3476 	int mac_id = 0;
3477 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3478 
3479 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3480 
3481 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3482 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
3483 							 pdev->pdev_id);
3484 
3485 		if (dp_srng_init(soc, &soc->rxdma_mon_status_ring[lmac_id],
3486 				 RXDMA_MONITOR_STATUS, 0, lmac_id)) {
3487 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3488 				  FL(RNG_ERR "rxdma_mon_status_ring"));
3489 			goto fail1;
3490 		}
3491 
3492 		if (!soc->wlan_cfg_ctx->rxdma1_enable)
3493 			continue;
3494 
3495 		if (dp_srng_init(soc, &soc->rxdma_mon_buf_ring[lmac_id],
3496 				 RXDMA_MONITOR_BUF, 0, lmac_id)) {
3497 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3498 				  FL(RNG_ERR "rxdma_mon_buf_ring "));
3499 			goto fail1;
3500 		}
3501 
3502 		if (dp_srng_init(soc, &soc->rxdma_mon_dst_ring[lmac_id],
3503 				 RXDMA_MONITOR_DST, 0, lmac_id)) {
3504 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3505 				  FL(RNG_ERR "rxdma_mon_dst_ring"));
3506 			goto fail1;
3507 		}
3508 
3509 		if (dp_srng_init(soc, &soc->rxdma_mon_desc_ring[lmac_id],
3510 				 RXDMA_MONITOR_DESC, 0, lmac_id)) {
3511 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3512 				  FL(RNG_ERR "rxdma_mon_desc_ring"));
3513 			goto fail1;
3514 		}
3515 	}
3516 	return QDF_STATUS_SUCCESS;
3517 
3518 fail1:
3519 	dp_mon_rings_deinit(pdev);
3520 	return QDF_STATUS_E_NOMEM;
3521 }
3522 
3523 /**
3524  * dp_mon_rings_alloc() - Allocate memory for monitor srng rings
3525  * @soc: Datapath soc handle
3526  * @pdev: Datapath pdev handle
3527  *
3528  * return: QDF_STATUS_SUCCESS on success
3529  *	   QDF_STATUS_E_NOMEM on failure
3530  */
3531 static
3532 QDF_STATUS dp_mon_rings_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
3533 {
3534 	int mac_id = 0;
3535 	int entries;
3536 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3537 
3538 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3539 
3540 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3541 		int lmac_id =
3542 		dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id);
3543 		entries = wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
3544 		if (dp_srng_alloc(soc, &soc->rxdma_mon_status_ring[lmac_id],
3545 				  RXDMA_MONITOR_STATUS, entries, 0)) {
3546 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3547 				  FL(RNG_ERR "rxdma_mon_status_ring"));
3548 			goto fail1;
3549 		}
3550 
3551 		if (!soc->wlan_cfg_ctx->rxdma1_enable)
3552 			continue;
3553 
3554 		entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
3555 		if (dp_srng_alloc(soc, &soc->rxdma_mon_buf_ring[lmac_id],
3556 				  RXDMA_MONITOR_BUF, entries, 0)) {
3557 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3558 				  FL(RNG_ERR "rxdma_mon_buf_ring "));
3559 			goto fail1;
3560 		}
3561 
3562 		entries = wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
3563 		if (dp_srng_alloc(soc, &soc->rxdma_mon_dst_ring[lmac_id],
3564 				  RXDMA_MONITOR_DST, entries, 0)) {
3565 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3566 				  FL(RNG_ERR "rxdma_mon_dst_ring"));
3567 			goto fail1;
3568 		}
3569 
3570 		entries = wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
3571 		if (dp_srng_alloc(soc, &soc->rxdma_mon_desc_ring[lmac_id],
3572 				  RXDMA_MONITOR_DESC, entries, 0)) {
3573 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3574 				  FL(RNG_ERR "rxdma_mon_desc_ring"));
3575 			goto fail1;
3576 		}
3577 	}
3578 	return QDF_STATUS_SUCCESS;
3579 
3580 fail1:
3581 	dp_mon_rings_free(pdev);
3582 	return QDF_STATUS_E_NOMEM;
3583 }
3584 #else
3585 static void dp_mon_rings_free(struct dp_pdev *pdev)
3586 {
3587 }
3588 
3589 static void dp_mon_rings_deinit(struct dp_pdev *pdev)
3590 {
3591 }
3592 
3593 static
3594 QDF_STATUS dp_mon_rings_init(struct dp_soc *soc, struct dp_pdev *pdev)
3595 {
3596 	return QDF_STATUS_SUCCESS;
3597 }
3598 
3599 static
3600 QDF_STATUS dp_mon_rings_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
3601 {
3602 	return QDF_STATUS_SUCCESS;
3603 }
3604 #endif
3605 
3606 /*dp_iterate_update_peer_list - update peer stats on cal client timer
3607  * @pdev_hdl: pdev handle
3608  */
3609 #ifdef ATH_SUPPORT_EXT_STAT
3610 void  dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
3611 {
3612 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
3613 	struct dp_soc *soc = pdev->soc;
3614 	struct dp_vdev *vdev = NULL;
3615 	struct dp_peer *peer = NULL;
3616 
3617 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3618 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3619 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
3620 		DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
3621 			dp_cal_client_update_peer_stats(&peer->stats);
3622 		}
3623 	}
3624 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3625 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3626 }
3627 #else
3628 void  dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
3629 {
3630 }
3631 #endif
3632 
3633 /*
3634  * dp_htt_ppdu_stats_attach() - attach resources for HTT PPDU stats processing
3635  * @pdev: Datapath PDEV handle
3636  *
3637  * Return: QDF_STATUS_SUCCESS: Success
3638  *         QDF_STATUS_E_NOMEM: Error
3639  */
3640 static QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev)
3641 {
3642 	pdev->ppdu_tlv_buf = qdf_mem_malloc(HTT_T2H_MAX_MSG_SIZE);
3643 
3644 	if (!pdev->ppdu_tlv_buf) {
3645 		QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "ppdu_tlv_buf alloc fail");
3646 		return QDF_STATUS_E_NOMEM;
3647 	}
3648 
3649 	return QDF_STATUS_SUCCESS;
3650 }
3651 
3652 /*
3653 * dp_pdev_attach_wifi3() - attach txrx pdev
3654 * @txrx_soc: Datapath SOC handle
3655 * @htc_handle: HTC handle for host-target interface
3656 * @qdf_osdev: QDF OS device
3657 * @pdev_id: PDEV ID
3658 *
3659 * Return: QDF_STATUS
3660 */
3661 static inline QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
3662 					      HTC_HANDLE htc_handle,
3663 					      qdf_device_t qdf_osdev,
3664 					      uint8_t pdev_id)
3665 {
3666 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3667 	struct dp_pdev *pdev = NULL;
3668 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3669 	int nss_cfg;
3670 
3671 	pdev = qdf_mem_malloc(sizeof(*pdev));
3672 	if (!pdev) {
3673 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3674 			  FL("DP PDEV memory allocation failed"));
3675 		goto fail0;
3676 	}
3677 	wlan_minidump_log(pdev, sizeof(*pdev), soc->ctrl_psoc,
3678 			  WLAN_MD_DP_PDEV, "dp_pdev");
3679 
3680 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3681 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
3682 
3683 	if (!pdev->wlan_cfg_ctx) {
3684 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3685 			  FL("pdev cfg_attach failed"));
3686 		goto fail1;
3687 	}
3688 
3689 	/*
3690 	 * set nss pdev config based on soc config
3691 	 */
3692 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
3693 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
3694 					 (nss_cfg & (1 << pdev_id)));
3695 
3696 	pdev->soc = soc;
3697 	pdev->pdev_id = pdev_id;
3698 	soc->pdev_list[pdev_id] = pdev;
3699 
3700 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
3701 	soc->pdev_count++;
3702 
3703 	/* Allocate memory for pdev srng rings */
3704 	if (dp_pdev_srng_alloc(pdev)) {
3705 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3706 			  FL("dp_pdev_srng_alloc failed"));
3707 		goto fail2;
3708 	}
3709 
3710 	/* Rx specific init */
3711 	if (dp_rx_pdev_desc_pool_alloc(pdev)) {
3712 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3713 			  FL("dp_rx_pdev_attach failed"));
3714 		goto fail3;
3715 	}
3716 
3717 	/* Rx monitor mode specific init */
3718 	if (dp_rx_pdev_mon_desc_pool_alloc(pdev)) {
3719 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3720 			  "dp_rx_pdev_mon_attach failed");
3721 		goto fail4;
3722 	}
3723 
3724 	return QDF_STATUS_SUCCESS;
3725 fail4:
3726 	dp_rx_pdev_desc_pool_free(pdev);
3727 fail3:
3728 	dp_pdev_srng_free(pdev);
3729 fail2:
3730 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
3731 fail1:
3732 	qdf_mem_free(pdev);
3733 fail0:
3734 	return QDF_STATUS_E_FAILURE;
3735 }
3736 
3737 /*
3738  * dp_rxdma_ring_cleanup() - configure the RX DMA rings
3739  * @soc: data path SoC handle
3740  * @pdev: Physical device handle
3741  *
3742  * Return: void
3743  */
3744 #ifdef QCA_HOST2FW_RXBUF_RING
3745 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
3746 {
3747 	int i;
3748 
3749 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
3750 		dp_srng_deinit(soc, &pdev->rx_mac_buf_ring[i], RXDMA_BUF, 1);
3751 		dp_srng_free(soc, &pdev->rx_mac_buf_ring[i]);
3752 	}
3753 
3754 	if (soc->reap_timer_init) {
3755 		qdf_timer_free(&soc->mon_reap_timer);
3756 		soc->reap_timer_init = 0;
3757 	}
3758 }
3759 #else
3760 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
3761 {
3762 	if (soc->lmac_timer_init) {
3763 		qdf_timer_stop(&soc->lmac_reap_timer);
3764 		qdf_timer_free(&soc->lmac_reap_timer);
3765 		soc->lmac_timer_init = 0;
3766 	}
3767 }
3768 #endif
3769 
3770 /*
3771  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
3772  * @pdev: device object
3773  *
3774  * Return: void
3775  */
3776 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
3777 {
3778 	struct dp_neighbour_peer *peer = NULL;
3779 	struct dp_neighbour_peer *temp_peer = NULL;
3780 
3781 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
3782 			   neighbour_peer_list_elem, temp_peer) {
3783 		/* delete this peer from the list */
3784 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
3785 			     peer, neighbour_peer_list_elem);
3786 		qdf_mem_free(peer);
3787 	}
3788 
3789 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
3790 }
3791 
3792 /**
3793 * dp_htt_ppdu_stats_detach() - detach stats resources
3794 * @pdev: Datapath PDEV handle
3795 *
3796 * Return: void
3797 */
3798 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
3799 {
3800 	struct ppdu_info *ppdu_info, *ppdu_info_next;
3801 
3802 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3803 			ppdu_info_list_elem, ppdu_info_next) {
3804 		if (!ppdu_info)
3805 			break;
3806 		qdf_assert_always(ppdu_info->nbuf);
3807 		qdf_nbuf_free(ppdu_info->nbuf);
3808 		qdf_mem_free(ppdu_info);
3809 	}
3810 
3811 	if (pdev->ppdu_tlv_buf)
3812 		qdf_mem_free(pdev->ppdu_tlv_buf);
3813 
3814 }
3815 
3816 #ifdef WLAN_DP_PENDING_MEM_FLUSH
3817 /**
3818  * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev
3819  * @pdev: Datapath PDEV handle
3820  *
3821  * This is the last chance to flush all pending dp vdevs/peers,
3822  * some peer/vdev leak case like Non-SSR + peer unmap missing
3823  * will be covered here.
3824  *
3825  * Return: None
3826  */
3827 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
3828 {
3829 	struct dp_vdev *vdev = NULL;
3830 
3831 	while (true) {
3832 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
3833 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
3834 			if (vdev->delete.pending)
3835 				break;
3836 		}
3837 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3838 
3839 		/*
3840 		 * vdev will be freed when all peers get cleanup,
3841 		 * dp_delete_pending_vdev will remove vdev from vdev_list
3842 		 * in pdev.
3843 		 */
3844 		if (vdev)
3845 			dp_vdev_flush_peers((struct cdp_vdev *)vdev, 0);
3846 		else
3847 			break;
3848 	}
3849 }
3850 #else
3851 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
3852 {
3853 }
3854 #endif
3855 
3856 /**
3857  * dp_pdev_deinit() - Deinit txrx pdev
3858  * @txrx_pdev: Datapath PDEV handle
3859  * @force: Force deinit
3860  *
3861  * Return: None
3862  */
3863 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
3864 {
3865 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3866 	qdf_nbuf_t curr_nbuf, next_nbuf;
3867 
3868 	if (pdev->pdev_deinit)
3869 		return;
3870 
3871 	dp_tx_me_exit(pdev);
3872 	dp_rx_fst_detach(pdev->soc, pdev);
3873 	dp_rx_pdev_mon_buffers_free(pdev);
3874 	dp_rx_pdev_buffers_free(pdev);
3875 	dp_rx_pdev_mon_desc_pool_deinit(pdev);
3876 	dp_rx_pdev_desc_pool_deinit(pdev);
3877 	dp_htt_ppdu_stats_detach(pdev);
3878 	dp_tx_ppdu_stats_detach(pdev);
3879 	qdf_event_destroy(&pdev->fw_peer_stats_event);
3880 	dp_cal_client_detach(&pdev->cal_client_ctx);
3881 	if (pdev->sojourn_buf)
3882 		qdf_nbuf_free(pdev->sojourn_buf);
3883 
3884 	dp_pdev_flush_pending_vdevs(pdev);
3885 	dp_tx_pdev_detach(pdev);
3886 	dp_pktlogmod_exit(pdev);
3887 	dp_neighbour_peers_detach(pdev);
3888 
3889 	qdf_spinlock_destroy(&pdev->tx_mutex);
3890 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
3891 
3892 	if (pdev->invalid_peer)
3893 		qdf_mem_free(pdev->invalid_peer);
3894 
3895 	if (pdev->filter)
3896 		dp_mon_filter_dealloc(pdev);
3897 
3898 	dp_pdev_srng_deinit(pdev);
3899 
3900 	dp_ipa_uc_detach(pdev->soc, pdev);
3901 	dp_cleanup_ipa_rx_refill_buf_ring(pdev->soc, pdev);
3902 	dp_rxdma_ring_cleanup(pdev->soc, pdev);
3903 
3904 	curr_nbuf = pdev->invalid_peer_head_msdu;
3905 	while (curr_nbuf) {
3906 		next_nbuf = qdf_nbuf_next(curr_nbuf);
3907 		qdf_nbuf_free(curr_nbuf);
3908 		curr_nbuf = next_nbuf;
3909 	}
3910 	pdev->invalid_peer_head_msdu = NULL;
3911 	pdev->invalid_peer_tail_msdu = NULL;
3912 
3913 	dp_wdi_event_detach(pdev);
3914 	pdev->pdev_deinit = 1;
3915 }
3916 
3917 /**
3918  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
3919  * @psoc: Datapath psoc handle
3920  * @pdev_id: Id of datapath PDEV handle
3921  * @force: Force deinit
3922  *
3923  * Return: QDF_STATUS
3924  */
3925 static QDF_STATUS
3926 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
3927 		     int force)
3928 {
3929 	struct dp_pdev *txrx_pdev;
3930 
3931 	txrx_pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
3932 						       pdev_id);
3933 
3934 	if (!txrx_pdev)
3935 		return QDF_STATUS_E_FAILURE;
3936 
3937 	dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force);
3938 
3939 	return QDF_STATUS_SUCCESS;
3940 }
3941 
3942 /*
3943  * dp_pdev_detach() - Complete rest of pdev detach
3944  * @txrx_pdev: Datapath PDEV handle
3945  * @force: Force deinit
3946  *
3947  * Return: None
3948  */
3949 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
3950 {
3951 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3952 	struct dp_soc *soc = pdev->soc;
3953 
3954 	dp_rx_pdev_mon_desc_pool_free(pdev);
3955 	dp_rx_pdev_desc_pool_free(pdev);
3956 	dp_pdev_srng_free(pdev);
3957 
3958 	soc->pdev_count--;
3959 	soc->pdev_list[pdev->pdev_id] = NULL;
3960 
3961 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
3962 	wlan_minidump_remove(pdev);
3963 	qdf_mem_free(pdev);
3964 }
3965 
3966 /*
3967  * dp_pdev_detach_wifi3() - detach txrx pdev
3968  * @psoc: Datapath soc handle
3969  * @pdev_id: pdev id of pdev
3970  * @force: Force detach
3971  *
3972  * Return: QDF_STATUS
3973  */
3974 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
3975 				       int force)
3976 {
3977 	struct dp_pdev *pdev;
3978 
3979 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
3980 						  pdev_id);
3981 
3982 	if (!pdev) {
3983 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3984 			  FL("DP PDEV is Null for pdev id %d"), pdev_id);
3985 		return QDF_STATUS_E_FAILURE;
3986 	}
3987 
3988 	dp_pdev_detach((struct cdp_pdev *)pdev, force);
3989 	return QDF_STATUS_SUCCESS;
3990 }
3991 
3992 /*
3993  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
3994  * @soc: DP SOC handle
3995  */
3996 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
3997 {
3998 	struct reo_desc_list_node *desc;
3999 	struct dp_rx_tid *rx_tid;
4000 
4001 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
4002 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
4003 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
4004 		rx_tid = &desc->rx_tid;
4005 		qdf_mem_unmap_nbytes_single(soc->osdev,
4006 			rx_tid->hw_qdesc_paddr,
4007 			QDF_DMA_BIDIRECTIONAL,
4008 			rx_tid->hw_qdesc_alloc_size);
4009 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
4010 		qdf_mem_free(desc);
4011 	}
4012 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
4013 	qdf_list_destroy(&soc->reo_desc_freelist);
4014 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
4015 }
4016 
4017 /*
4018  * dp_soc_reset_txrx_ring_map() - reset tx ring map
4019  * @soc: DP SOC handle
4020  *
4021  */
4022 static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc)
4023 {
4024 	uint32_t i;
4025 
4026 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++)
4027 		soc->tx_ring_map[i] = 0;
4028 }
4029 
4030 /**
4031  * dp_soc_deinit() - Deinitialize txrx SOC
4032  * @txrx_soc: Opaque DP SOC handle
4033  *
4034  * Return: None
4035  */
4036 static void dp_soc_deinit(void *txrx_soc)
4037 {
4038 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4039 	struct htt_soc *htt_soc = soc->htt_handle;
4040 
4041 	qdf_atomic_set(&soc->cmn_init_done, 0);
4042 
4043 	/* free peer tables & AST tables allocated during peer_map_attach */
4044 	if (soc->peer_map_attach_success) {
4045 		dp_peer_find_detach(soc);
4046 		soc->peer_map_attach_success = FALSE;
4047 	}
4048 
4049 	qdf_flush_work(&soc->htt_stats.work);
4050 	qdf_disable_work(&soc->htt_stats.work);
4051 
4052 	qdf_spinlock_destroy(&soc->htt_stats.lock);
4053 
4054 	dp_soc_reset_txrx_ring_map(soc);
4055 
4056 	dp_reo_desc_freelist_destroy(soc);
4057 
4058 	DEINIT_RX_HW_STATS_LOCK(soc);
4059 
4060 	qdf_spinlock_destroy(&soc->ast_lock);
4061 	qdf_spinlock_destroy(&soc->peer_ref_mutex);
4062 
4063 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
4064 
4065 	dp_soc_wds_detach(soc);
4066 
4067 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
4068 
4069 	dp_reo_cmdlist_destroy(soc);
4070 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
4071 
4072 	dp_soc_tx_desc_sw_pools_deinit(soc);
4073 
4074 	dp_soc_srng_deinit(soc);
4075 
4076 	dp_hw_link_desc_ring_deinit(soc);
4077 
4078 	htt_soc_htc_dealloc(soc->htt_handle);
4079 
4080 	htt_soc_detach(htt_soc);
4081 
4082 	/* Free wbm sg list and reset flags in down path */
4083 	dp_rx_wbm_sg_list_deinit(soc);
4084 
4085 	wlan_minidump_remove(soc);
4086 }
4087 
4088 /**
4089  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
4090  * @txrx_soc: Opaque DP SOC handle
4091  *
4092  * Return: None
4093  */
4094 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc)
4095 {
4096 	dp_soc_deinit(txrx_soc);
4097 }
4098 
4099 /*
4100  * dp_soc_detach() - Detach rest of txrx SOC
4101  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
4102  *
4103  * Return: None
4104  */
4105 static void dp_soc_detach(struct cdp_soc_t *txrx_soc)
4106 {
4107 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4108 
4109 	dp_soc_tx_desc_sw_pools_free(soc);
4110 	dp_soc_srng_free(soc);
4111 	dp_hw_link_desc_ring_free(soc);
4112 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
4113 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
4114 
4115 	qdf_mem_free(soc);
4116 }
4117 
4118 /*
4119  * dp_soc_detach_wifi3() - Detach txrx SOC
4120  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
4121  *
4122  * Return: None
4123  */
4124 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc)
4125 {
4126 	dp_soc_detach(txrx_soc);
4127 }
4128 
4129 #if !defined(DISABLE_MON_CONFIG)
4130 /**
4131  * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings
4132  * @soc: soc handle
4133  * @pdev: physical device handle
4134  * @mac_id: ring number
4135  * @mac_for_pdev: mac_id
4136  *
4137  * Return: non-zero for failure, zero for success
4138  */
4139 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
4140 					struct dp_pdev *pdev,
4141 					int mac_id,
4142 					int mac_for_pdev)
4143 {
4144 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4145 
4146 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
4147 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4148 					soc->rxdma_mon_buf_ring[mac_id]
4149 					.hal_srng,
4150 					RXDMA_MONITOR_BUF);
4151 
4152 		if (status != QDF_STATUS_SUCCESS) {
4153 			dp_err("Failed to send htt srng setup message for Rxdma mon buf ring");
4154 			return status;
4155 		}
4156 
4157 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4158 					soc->rxdma_mon_dst_ring[mac_id]
4159 					.hal_srng,
4160 					RXDMA_MONITOR_DST);
4161 
4162 		if (status != QDF_STATUS_SUCCESS) {
4163 			dp_err("Failed to send htt srng setup message for Rxdma mon dst ring");
4164 			return status;
4165 		}
4166 
4167 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4168 					soc->rxdma_mon_status_ring[mac_id]
4169 					.hal_srng,
4170 					RXDMA_MONITOR_STATUS);
4171 
4172 		if (status != QDF_STATUS_SUCCESS) {
4173 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
4174 			return status;
4175 		}
4176 
4177 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4178 				soc->rxdma_mon_desc_ring[mac_id]
4179 					.hal_srng,
4180 					RXDMA_MONITOR_DESC);
4181 
4182 		if (status != QDF_STATUS_SUCCESS) {
4183 			dp_err("Failed to send htt srng message for Rxdma mon desc ring");
4184 			return status;
4185 		}
4186 	} else {
4187 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4188 					soc->rxdma_mon_status_ring[mac_id]
4189 					.hal_srng,
4190 					RXDMA_MONITOR_STATUS);
4191 
4192 		if (status != QDF_STATUS_SUCCESS) {
4193 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
4194 			return status;
4195 		}
4196 	}
4197 
4198 	return status;
4199 
4200 }
4201 #else
4202 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
4203 					struct dp_pdev *pdev,
4204 					int mac_id,
4205 					int mac_for_pdev)
4206 {
4207 	return QDF_STATUS_SUCCESS;
4208 }
4209 #endif
4210 
4211 /*
4212  * dp_rxdma_ring_config() - configure the RX DMA rings
4213  *
4214  * This function is used to configure the MAC rings.
4215  * On MCL host provides buffers in Host2FW ring
4216  * FW refills (copies) buffers to the ring and updates
4217  * ring_idx in register
4218  *
4219  * @soc: data path SoC handle
4220  *
4221  * Return: zero on success, non-zero on failure
4222  */
4223 #ifdef QCA_HOST2FW_RXBUF_RING
4224 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
4225 {
4226 	int i;
4227 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4228 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4229 		struct dp_pdev *pdev = soc->pdev_list[i];
4230 
4231 		if (pdev) {
4232 			int mac_id;
4233 			bool dbs_enable = 0;
4234 			int max_mac_rings =
4235 				 wlan_cfg_get_num_mac_rings
4236 				(pdev->wlan_cfg_ctx);
4237 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
4238 
4239 			htt_srng_setup(soc->htt_handle, 0,
4240 				       soc->rx_refill_buf_ring[lmac_id]
4241 				       .hal_srng,
4242 				       RXDMA_BUF);
4243 
4244 			if (pdev->rx_refill_buf_ring2.hal_srng)
4245 				htt_srng_setup(soc->htt_handle, 0,
4246 					pdev->rx_refill_buf_ring2.hal_srng,
4247 					RXDMA_BUF);
4248 
4249 			if (soc->cdp_soc.ol_ops->
4250 				is_hw_dbs_2x2_capable) {
4251 				dbs_enable = soc->cdp_soc.ol_ops->
4252 					is_hw_dbs_2x2_capable(
4253 							(void *)soc->ctrl_psoc);
4254 			}
4255 
4256 			if (dbs_enable) {
4257 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4258 				QDF_TRACE_LEVEL_ERROR,
4259 				FL("DBS enabled max_mac_rings %d"),
4260 					 max_mac_rings);
4261 			} else {
4262 				max_mac_rings = 1;
4263 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4264 					 QDF_TRACE_LEVEL_ERROR,
4265 					 FL("DBS disabled, max_mac_rings %d"),
4266 					 max_mac_rings);
4267 			}
4268 
4269 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4270 					 FL("pdev_id %d max_mac_rings %d"),
4271 					 pdev->pdev_id, max_mac_rings);
4272 
4273 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
4274 				int mac_for_pdev =
4275 					dp_get_mac_id_for_pdev(mac_id,
4276 							       pdev->pdev_id);
4277 				/*
4278 				 * Obtain lmac id from pdev to access the LMAC
4279 				 * ring in soc context
4280 				 */
4281 				lmac_id =
4282 				dp_get_lmac_id_for_pdev_id(soc,
4283 							   mac_id,
4284 							   pdev->pdev_id);
4285 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4286 					 QDF_TRACE_LEVEL_ERROR,
4287 					 FL("mac_id %d"), mac_for_pdev);
4288 
4289 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
4290 					 pdev->rx_mac_buf_ring[mac_id]
4291 						.hal_srng,
4292 					 RXDMA_BUF);
4293 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
4294 				soc->rxdma_err_dst_ring[lmac_id]
4295 					.hal_srng,
4296 					RXDMA_DST);
4297 
4298 				/* Configure monitor mode rings */
4299 				status = dp_mon_htt_srng_setup(soc, pdev,
4300 							       lmac_id,
4301 							       mac_for_pdev);
4302 				if (status != QDF_STATUS_SUCCESS) {
4303 					dp_err("Failed to send htt monitor messages to target");
4304 					return status;
4305 				}
4306 
4307 			}
4308 		}
4309 	}
4310 
4311 	/*
4312 	 * Timer to reap rxdma status rings.
4313 	 * Needed until we enable ppdu end interrupts
4314 	 */
4315 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
4316 		       dp_mon_reap_timer_handler, (void *)soc,
4317 		       QDF_TIMER_TYPE_WAKE_APPS);
4318 	soc->reap_timer_init = 1;
4319 	return status;
4320 }
4321 #else
4322 /* This is only for WIN */
4323 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
4324 {
4325 	int i;
4326 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4327 	int mac_for_pdev;
4328 	int lmac_id;
4329 
4330 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4331 		struct dp_pdev *pdev =  soc->pdev_list[i];
4332 
4333 		if (!pdev)
4334 			continue;
4335 
4336 		mac_for_pdev = i;
4337 		lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
4338 
4339 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4340 			       soc->rx_refill_buf_ring[lmac_id].
4341 			       hal_srng, RXDMA_BUF);
4342 #ifndef DISABLE_MON_CONFIG
4343 
4344 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4345 			       soc->rxdma_mon_buf_ring[lmac_id].hal_srng,
4346 			       RXDMA_MONITOR_BUF);
4347 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4348 			       soc->rxdma_mon_dst_ring[lmac_id].hal_srng,
4349 			       RXDMA_MONITOR_DST);
4350 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4351 			       soc->rxdma_mon_status_ring[lmac_id].hal_srng,
4352 			       RXDMA_MONITOR_STATUS);
4353 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4354 			       soc->rxdma_mon_desc_ring[lmac_id].hal_srng,
4355 			       RXDMA_MONITOR_DESC);
4356 #endif
4357 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4358 			       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
4359 			       RXDMA_DST);
4360 	}
4361 
4362 	/* Configure LMAC rings in Polled mode */
4363 	if (soc->lmac_polled_mode) {
4364 		/*
4365 		 * Timer to reap lmac rings.
4366 		 */
4367 		qdf_timer_init(soc->osdev, &soc->lmac_reap_timer,
4368 			       dp_service_lmac_rings, (void *)soc,
4369 			       QDF_TIMER_TYPE_WAKE_APPS);
4370 		soc->lmac_timer_init = 1;
4371 		qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
4372 	}
4373 	return status;
4374 }
4375 #endif
4376 
4377 #ifdef NO_RX_PKT_HDR_TLV
4378 static QDF_STATUS
4379 dp_rxdma_ring_sel_cfg(struct dp_soc *soc)
4380 {
4381 	int i;
4382 	int mac_id;
4383 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
4384 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4385 
4386 	htt_tlv_filter.mpdu_start = 1;
4387 	htt_tlv_filter.msdu_start = 1;
4388 	htt_tlv_filter.mpdu_end = 1;
4389 	htt_tlv_filter.msdu_end = 1;
4390 	htt_tlv_filter.attention = 1;
4391 	htt_tlv_filter.packet = 1;
4392 	htt_tlv_filter.packet_header = 0;
4393 
4394 	htt_tlv_filter.ppdu_start = 0;
4395 	htt_tlv_filter.ppdu_end = 0;
4396 	htt_tlv_filter.ppdu_end_user_stats = 0;
4397 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4398 	htt_tlv_filter.ppdu_end_status_done = 0;
4399 	htt_tlv_filter.enable_fp = 1;
4400 	htt_tlv_filter.enable_md = 0;
4401 	htt_tlv_filter.enable_md = 0;
4402 	htt_tlv_filter.enable_mo = 0;
4403 
4404 	htt_tlv_filter.fp_mgmt_filter = 0;
4405 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
4406 	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
4407 					 FILTER_DATA_MCAST |
4408 					 FILTER_DATA_DATA);
4409 	htt_tlv_filter.mo_mgmt_filter = 0;
4410 	htt_tlv_filter.mo_ctrl_filter = 0;
4411 	htt_tlv_filter.mo_data_filter = 0;
4412 	htt_tlv_filter.md_data_filter = 0;
4413 
4414 	htt_tlv_filter.offset_valid = true;
4415 
4416 	htt_tlv_filter.rx_packet_offset = RX_PKT_TLVS_LEN;
4417 	/*Not subscribing rx_pkt_header*/
4418 	htt_tlv_filter.rx_header_offset = 0;
4419 	htt_tlv_filter.rx_mpdu_start_offset =
4420 				hal_rx_mpdu_start_offset_get(soc->hal_soc);
4421 	htt_tlv_filter.rx_mpdu_end_offset =
4422 				hal_rx_mpdu_end_offset_get(soc->hal_soc);
4423 	htt_tlv_filter.rx_msdu_start_offset =
4424 				hal_rx_msdu_start_offset_get(soc->hal_soc);
4425 	htt_tlv_filter.rx_msdu_end_offset =
4426 				hal_rx_msdu_end_offset_get(soc->hal_soc);
4427 	htt_tlv_filter.rx_attn_offset =
4428 				hal_rx_attn_offset_get(soc->hal_soc);
4429 
4430 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4431 		struct dp_pdev *pdev = soc->pdev_list[i];
4432 
4433 		if (!pdev)
4434 			continue;
4435 
4436 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4437 			int mac_for_pdev =
4438 				dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
4439 			/*
4440 			 * Obtain lmac id from pdev to access the LMAC ring
4441 			 * in soc context
4442 			 */
4443 			int lmac_id =
4444 				dp_get_lmac_id_for_pdev_id(soc, mac_id,
4445 							   pdev->pdev_id);
4446 
4447 			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4448 					    soc->rx_refill_buf_ring[lmac_id].
4449 					    hal_srng,
4450 					    RXDMA_BUF, RX_DATA_BUFFER_SIZE,
4451 					    &htt_tlv_filter);
4452 		}
4453 	}
4454 	return status;
4455 }
4456 #else
4457 static QDF_STATUS
4458 dp_rxdma_ring_sel_cfg(struct dp_soc *soc)
4459 {
4460 	return QDF_STATUS_SUCCESS;
4461 }
4462 #endif
4463 
4464 /*
4465  * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine
4466  *
4467  * This function is used to configure the FSE HW block in RX OLE on a
4468  * per pdev basis. Here, we will be programming parameters related to
4469  * the Flow Search Table.
4470  *
4471  * @soc: data path SoC handle
4472  *
4473  * Return: zero on success, non-zero on failure
4474  */
4475 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
4476 static QDF_STATUS
4477 dp_rx_target_fst_config(struct dp_soc *soc)
4478 {
4479 	int i;
4480 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4481 
4482 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4483 		struct dp_pdev *pdev = soc->pdev_list[i];
4484 
4485 		/* Flow search is not enabled if NSS offload is enabled */
4486 		if (pdev &&
4487 		    !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
4488 			status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev);
4489 			if (status != QDF_STATUS_SUCCESS)
4490 				break;
4491 		}
4492 	}
4493 	return status;
4494 }
4495 #elif defined(WLAN_SUPPORT_RX_FISA)
4496 /**
4497  * dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW
4498  * @soc: SoC handle
4499  *
4500  * Return: Success
4501  */
4502 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
4503 {
4504 	/* Check if it is enabled in the INI */
4505 	if (!soc->fisa_enable) {
4506 		dp_err("RX FISA feature is disabled");
4507 		return QDF_STATUS_E_NOSUPPORT;
4508 	}
4509 
4510 	return dp_rx_flow_send_fst_fw_setup(soc, soc->pdev_list[0]);
4511 }
4512 
4513 #define FISA_MAX_TIMEOUT 0xffffffff
4514 #define FISA_DISABLE_TIMEOUT 0
4515 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
4516 {
4517 	struct dp_htt_rx_fisa_cfg fisa_config;
4518 
4519 	fisa_config.pdev_id = 0;
4520 	fisa_config.fisa_timeout = FISA_MAX_TIMEOUT;
4521 
4522 	return dp_htt_rx_fisa_config(soc->pdev_list[0], &fisa_config);
4523 }
4524 #else /* !WLAN_SUPPORT_RX_FISA */
4525 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
4526 {
4527 	return QDF_STATUS_SUCCESS;
4528 }
4529 #endif /* !WLAN_SUPPORT_RX_FISA */
4530 
4531 #ifndef WLAN_SUPPORT_RX_FISA
4532 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
4533 {
4534 	return QDF_STATUS_SUCCESS;
4535 }
4536 
4537 static QDF_STATUS dp_rx_dump_fisa_stats(struct dp_soc *soc)
4538 {
4539 	return QDF_STATUS_SUCCESS;
4540 }
4541 
4542 static void dp_rx_dump_fisa_table(struct dp_soc *soc)
4543 {
4544 }
4545 #endif /* !WLAN_SUPPORT_RX_FISA */
4546 
4547 /*
4548  * dp_soc_attach_target_wifi3() - SOC initialization in the target
4549  * @cdp_soc: Opaque Datapath SOC handle
4550  *
4551  * Return: zero on success, non-zero on failure
4552  */
4553 static QDF_STATUS
4554 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
4555 {
4556 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
4557 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4558 
4559 	htt_soc_attach_target(soc->htt_handle);
4560 
4561 	status = dp_rxdma_ring_config(soc);
4562 	if (status != QDF_STATUS_SUCCESS) {
4563 		dp_err("Failed to send htt srng setup messages to target");
4564 		return status;
4565 	}
4566 
4567 	status = dp_rxdma_ring_sel_cfg(soc);
4568 	if (status != QDF_STATUS_SUCCESS) {
4569 		dp_err("Failed to send htt ring config message to target");
4570 		return status;
4571 	}
4572 
4573 	status = dp_rx_target_fst_config(soc);
4574 	if (status != QDF_STATUS_SUCCESS &&
4575 	    status != QDF_STATUS_E_NOSUPPORT) {
4576 		dp_err("Failed to send htt fst setup config message to target");
4577 		return status;
4578 	}
4579 
4580 	if (status == QDF_STATUS_SUCCESS) {
4581 		status = dp_rx_fisa_config(soc);
4582 		if (status != QDF_STATUS_SUCCESS) {
4583 			dp_err("Failed to send htt FISA config message to target");
4584 			return status;
4585 		}
4586 	}
4587 
4588 	DP_STATS_INIT(soc);
4589 
4590 	/* initialize work queue for stats processing */
4591 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
4592 
4593 	return QDF_STATUS_SUCCESS;
4594 }
4595 
4596 #ifdef QCA_SUPPORT_FULL_MON
4597 static inline QDF_STATUS
4598 dp_soc_config_full_mon_mode(struct dp_pdev *pdev, enum dp_full_mon_config val)
4599 {
4600 	struct dp_soc *soc = pdev->soc;
4601 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4602 
4603 	if (!soc->full_mon_mode)
4604 		return QDF_STATUS_SUCCESS;
4605 
4606 	if ((htt_h2t_full_mon_cfg(soc->htt_handle,
4607 				  pdev->pdev_id,
4608 				  val)) != QDF_STATUS_SUCCESS) {
4609 		status = QDF_STATUS_E_FAILURE;
4610 	}
4611 
4612 	return status;
4613 }
4614 #else
4615 static inline QDF_STATUS
4616 dp_soc_config_full_mon_mode(struct dp_pdev *pdev, enum dp_full_mon_config val)
4617 {
4618 	return 0;
4619 }
4620 #endif
4621 
4622 /*
4623 * dp_vdev_attach_wifi3() - attach txrx vdev
4624 * @txrx_pdev: Datapath PDEV handle
4625 * @vdev_mac_addr: MAC address of the virtual interface
4626 * @vdev_id: VDEV Id
4627 * @wlan_op_mode: VDEV operating mode
4628 * @subtype: VDEV operating subtype
4629 *
4630 * Return: status
4631 */
4632 static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc,
4633 				       uint8_t pdev_id,
4634 				       uint8_t *vdev_mac_addr,
4635 				       uint8_t vdev_id,
4636 				       enum wlan_op_mode op_mode,
4637 				       enum wlan_op_subtype subtype)
4638 {
4639 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
4640 	struct dp_pdev *pdev =
4641 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
4642 						   pdev_id);
4643 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
4644 
4645 	if (!pdev) {
4646 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4647 			  FL("DP PDEV is Null for pdev id %d"), pdev_id);
4648 		qdf_mem_free(vdev);
4649 		goto fail0;
4650 	}
4651 
4652 	if (!vdev) {
4653 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4654 			FL("DP VDEV memory allocation failed"));
4655 		goto fail0;
4656 	}
4657 
4658 	wlan_minidump_log(vdev, sizeof(*vdev), soc->ctrl_psoc,
4659 			  WLAN_MD_DP_VDEV, "dp_vdev");
4660 
4661 	vdev->pdev = pdev;
4662 	vdev->vdev_id = vdev_id;
4663 	vdev->opmode = op_mode;
4664 	vdev->subtype = subtype;
4665 	vdev->osdev = soc->osdev;
4666 
4667 	vdev->osif_rx = NULL;
4668 	vdev->osif_rsim_rx_decap = NULL;
4669 	vdev->osif_get_key = NULL;
4670 	vdev->osif_rx_mon = NULL;
4671 	vdev->osif_tx_free_ext = NULL;
4672 	vdev->osif_vdev = NULL;
4673 
4674 	vdev->delete.pending = 0;
4675 	vdev->safemode = 0;
4676 	vdev->drop_unenc = 1;
4677 	vdev->sec_type = cdp_sec_type_none;
4678 	vdev->multipass_en = false;
4679 #ifdef notyet
4680 	vdev->filters_num = 0;
4681 #endif
4682 	vdev->lmac_id = pdev->lmac_id;
4683 
4684 	qdf_mem_copy(
4685 		&vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
4686 
4687 	/* TODO: Initialize default HTT meta data that will be used in
4688 	 * TCL descriptors for packets transmitted from this VDEV
4689 	 */
4690 
4691 	TAILQ_INIT(&vdev->peer_list);
4692 	dp_peer_multipass_list_init(vdev);
4693 
4694 	if ((soc->intr_mode == DP_INTR_POLL) &&
4695 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
4696 		if ((pdev->vdev_count == 0) ||
4697 		    (wlan_op_mode_monitor == vdev->opmode))
4698 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
4699 	}
4700 
4701 	soc->vdev_id_map[vdev_id] = vdev;
4702 
4703 	if (wlan_op_mode_monitor == vdev->opmode) {
4704 		pdev->monitor_vdev = vdev;
4705 		return QDF_STATUS_SUCCESS;
4706 	}
4707 
4708 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
4709 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
4710 	vdev->dscp_tid_map_id = 0;
4711 	vdev->mcast_enhancement_en = 0;
4712 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
4713 	vdev->prev_tx_enq_tstamp = 0;
4714 	vdev->prev_rx_deliver_tstamp = 0;
4715 
4716 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4717 	/* add this vdev into the pdev's list */
4718 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
4719 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4720 	pdev->vdev_count++;
4721 
4722 	if (wlan_op_mode_sta != vdev->opmode)
4723 		vdev->ap_bridge_enabled = true;
4724 	else
4725 		vdev->ap_bridge_enabled = false;
4726 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4727 		  "%s: wlan_cfg_ap_bridge_enabled %d",
4728 		  __func__, vdev->ap_bridge_enabled);
4729 
4730 	dp_tx_vdev_attach(vdev);
4731 
4732 	if (pdev->vdev_count == 1)
4733 		dp_lro_hash_setup(soc, pdev);
4734 
4735 	dp_info("Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
4736 	DP_STATS_INIT(vdev);
4737 
4738 	if (wlan_op_mode_sta == vdev->opmode)
4739 		dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id,
4740 				     vdev->mac_addr.raw);
4741 
4742 	return QDF_STATUS_SUCCESS;
4743 
4744 fail0:
4745 	return QDF_STATUS_E_FAILURE;
4746 }
4747 
4748 /**
4749  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
4750  * @soc: Datapath soc handle
4751  * @vdev_id: id of Datapath VDEV handle
4752  * @osif_vdev: OSIF vdev handle
4753  * @txrx_ops: Tx and Rx operations
4754  *
4755  * Return: DP VDEV handle on success, NULL on failure
4756  */
4757 static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc,
4758 					 uint8_t vdev_id,
4759 					 ol_osif_vdev_handle osif_vdev,
4760 					 struct ol_txrx_ops *txrx_ops)
4761 {
4762 	struct dp_vdev *vdev =
4763 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
4764 						   vdev_id);
4765 
4766 	if (!vdev)
4767 		return QDF_STATUS_E_FAILURE;
4768 
4769 	vdev->osif_vdev = osif_vdev;
4770 	vdev->osif_rx = txrx_ops->rx.rx;
4771 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
4772 	vdev->osif_rx_flush = txrx_ops->rx.rx_flush;
4773 	vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush;
4774 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
4775 	vdev->osif_fisa_rx = txrx_ops->rx.osif_fisa_rx;
4776 	vdev->osif_fisa_flush = txrx_ops->rx.osif_fisa_flush;
4777 	vdev->osif_get_key = txrx_ops->get_key;
4778 	vdev->osif_rx_mon = txrx_ops->rx.mon;
4779 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
4780 	vdev->tx_comp = txrx_ops->tx.tx_comp;
4781 	vdev->stats_cb = txrx_ops->rx.stats_rx;
4782 #ifdef notyet
4783 #if ATH_SUPPORT_WAPI
4784 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
4785 #endif
4786 #endif
4787 #ifdef UMAC_SUPPORT_PROXY_ARP
4788 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
4789 #endif
4790 	vdev->me_convert = txrx_ops->me_convert;
4791 
4792 	/* TODO: Enable the following once Tx code is integrated */
4793 	if (vdev->mesh_vdev)
4794 		txrx_ops->tx.tx = dp_tx_send_mesh;
4795 	else
4796 		txrx_ops->tx.tx = dp_tx_send;
4797 
4798 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
4799 
4800 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
4801 		"DP Vdev Register success");
4802 
4803 	return QDF_STATUS_SUCCESS;
4804 }
4805 
4806 /**
4807  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
4808  * @vdev: Datapath VDEV handle
4809  * @unmap_only: Flag to indicate "only unmap"
4810  *
4811  * Return: void
4812  */
4813 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
4814 {
4815 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4816 	struct dp_pdev *pdev = vdev->pdev;
4817 	struct dp_soc *soc = pdev->soc;
4818 	struct dp_peer *peer;
4819 	uint16_t *peer_ids;
4820 	struct dp_peer **peer_array = NULL;
4821 	uint8_t i = 0, j = 0;
4822 	uint8_t m = 0, n = 0;
4823 
4824 	peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(*peer_ids));
4825 	if (!peer_ids) {
4826 		dp_err("DP alloc failure - unable to flush peers");
4827 		return;
4828 	}
4829 
4830 	if (!unmap_only) {
4831 		peer_array = qdf_mem_malloc(
4832 				soc->max_peers * sizeof(struct dp_peer *));
4833 		if (!peer_array) {
4834 			qdf_mem_free(peer_ids);
4835 			dp_err("DP alloc failure - unable to flush peers");
4836 			return;
4837 		}
4838 	}
4839 
4840 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4841 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4842 		if (!unmap_only && n < soc->max_peers)
4843 			peer_array[n++] = peer;
4844 
4845 		if (peer->peer_id != HTT_INVALID_PEER)
4846 			if (j < soc->max_peers)
4847 				peer_ids[j++] = peer->peer_id;
4848 	}
4849 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4850 
4851 	/*
4852 	 * If peer id is invalid, need to flush the peer if
4853 	 * peer valid flag is true, this is needed for NAN + SSR case.
4854 	 */
4855 	if (!unmap_only) {
4856 		for (m = 0; m < n ; m++) {
4857 			peer = peer_array[m];
4858 
4859 			dp_info("peer: %pM is getting deleted",
4860 				peer->mac_addr.raw);
4861 			/* only if peer valid is true */
4862 			if (peer->valid)
4863 				dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
4864 						     vdev->vdev_id,
4865 						     peer->mac_addr.raw, 0);
4866 		}
4867 		qdf_mem_free(peer_array);
4868 	}
4869 
4870 	for (i = 0; i < j ; i++) {
4871 		peer = __dp_peer_find_by_id(soc, peer_ids[i]);
4872 
4873 		if (!peer)
4874 			continue;
4875 
4876 		dp_info("peer ref cnt %d", qdf_atomic_read(&peer->ref_cnt));
4877 		/*
4878 		 * set ref count to one to force delete the peers
4879 		 * with ref count leak
4880 		 */
4881 		SET_PEER_REF_CNT_ONE(peer);
4882 		dp_info("peer: %pM is getting unmap",
4883 			peer->mac_addr.raw);
4884 
4885 		dp_rx_peer_unmap_handler(soc, peer_ids[i],
4886 					 vdev->vdev_id,
4887 					 peer->mac_addr.raw, 0,
4888 					 DP_PEER_WDS_COUNT_INVALID);
4889 	}
4890 
4891 	qdf_mem_free(peer_ids);
4892 	dp_info("Flushed peers for vdev object %pK ", vdev);
4893 }
4894 
4895 /*
4896  * dp_vdev_detach_wifi3() - Detach txrx vdev
4897  * @cdp_soc: Datapath soc handle
4898  * @vdev_id: VDEV Id
4899  * @callback: Callback OL_IF on completion of detach
4900  * @cb_context:	Callback context
4901  *
4902  */
4903 static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc,
4904 				       uint8_t vdev_id,
4905 				       ol_txrx_vdev_delete_cb callback,
4906 				       void *cb_context)
4907 {
4908 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
4909 	struct dp_pdev *pdev;
4910 	struct dp_neighbour_peer *peer = NULL;
4911 	struct dp_neighbour_peer *temp_peer = NULL;
4912 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
4913 
4914 	if (!vdev)
4915 		return QDF_STATUS_E_FAILURE;
4916 
4917 	pdev = vdev->pdev;
4918 
4919 	if (wlan_op_mode_sta == vdev->opmode) {
4920 		if (vdev->vap_self_peer)
4921 			dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
4922 					     vdev->vdev_id,
4923 					     vdev->vap_self_peer->mac_addr.raw,
4924 					     0);
4925 		else
4926 			dp_err("vdev self peer is NULL");
4927 	}
4928 
4929 	/*
4930 	 * If Target is hung, flush all peers before detaching vdev
4931 	 * this will free all references held due to missing
4932 	 * unmap commands from Target
4933 	 */
4934 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
4935 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, false);
4936 	else if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
4937 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, true);
4938 
4939 	dp_rx_vdev_detach(vdev);
4940 	/*
4941 	 * move it after dp_rx_vdev_detach(),
4942 	 * as the call back done in dp_rx_vdev_detach()
4943 	 * still need to get vdev pointer by vdev_id.
4944 	 */
4945 	soc->vdev_id_map[vdev->vdev_id] = NULL;
4946 	/*
4947 	 * Use peer_ref_mutex while accessing peer_list, in case
4948 	 * a peer is in the process of being removed from the list.
4949 	 */
4950 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4951 	/* check that the vdev has no peers allocated */
4952 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
4953 		/* debug print - will be removed later */
4954 		dp_warn("not deleting vdev object %pK (%pM) until deletion finishes for all its peers",
4955 			vdev, vdev->mac_addr.raw);
4956 
4957 		if (vdev->vdev_dp_ext_handle) {
4958 			qdf_mem_free(vdev->vdev_dp_ext_handle);
4959 			vdev->vdev_dp_ext_handle = NULL;
4960 		}
4961 		/* indicate that the vdev needs to be deleted */
4962 		vdev->delete.pending = 1;
4963 		vdev->delete.callback = callback;
4964 		vdev->delete.context = cb_context;
4965 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4966 		return QDF_STATUS_E_FAILURE;
4967 	}
4968 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4969 
4970 	if (wlan_op_mode_monitor == vdev->opmode)
4971 		goto free_vdev;
4972 
4973 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
4974 	if (!soc->hw_nac_monitor_support) {
4975 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
4976 			      neighbour_peer_list_elem) {
4977 			QDF_ASSERT(peer->vdev != vdev);
4978 		}
4979 	} else {
4980 		TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
4981 				   neighbour_peer_list_elem, temp_peer) {
4982 			if (peer->vdev == vdev) {
4983 				TAILQ_REMOVE(&pdev->neighbour_peers_list, peer,
4984 					     neighbour_peer_list_elem);
4985 				qdf_mem_free(peer);
4986 			}
4987 		}
4988 	}
4989 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
4990 
4991 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4992 	/* remove the vdev from its parent pdev's list */
4993 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
4994 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4995 
4996 	dp_tx_vdev_detach(vdev);
4997 	wlan_minidump_remove(vdev);
4998 
4999 free_vdev:
5000 	if (wlan_op_mode_monitor == vdev->opmode) {
5001 		if (soc->intr_mode == DP_INTR_POLL)
5002 			qdf_timer_sync_cancel(&soc->int_timer);
5003 		pdev->monitor_vdev = NULL;
5004 	}
5005 
5006 	if (vdev->vdev_dp_ext_handle) {
5007 		qdf_mem_free(vdev->vdev_dp_ext_handle);
5008 		vdev->vdev_dp_ext_handle = NULL;
5009 	}
5010 
5011 	dp_info("deleting vdev object %pK (%pM)", vdev, vdev->mac_addr.raw);
5012 
5013 	qdf_mem_free(vdev);
5014 
5015 	if (callback)
5016 		callback(cb_context);
5017 
5018 	return QDF_STATUS_SUCCESS;
5019 }
5020 
5021 #ifdef FEATURE_AST
5022 /*
5023  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
5024  * @soc - datapath soc handle
5025  * @peer - datapath peer handle
5026  *
5027  * Delete the AST entries belonging to a peer
5028  */
5029 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
5030 					      struct dp_peer *peer)
5031 {
5032 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
5033 
5034 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
5035 		dp_peer_del_ast(soc, ast_entry);
5036 
5037 	peer->self_ast_entry = NULL;
5038 }
5039 #else
5040 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
5041 					      struct dp_peer *peer)
5042 {
5043 }
5044 #endif
5045 #if ATH_SUPPORT_WRAP
5046 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
5047 						uint8_t *peer_mac_addr)
5048 {
5049 	struct dp_peer *peer;
5050 
5051 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
5052 				      0, vdev->vdev_id);
5053 	if (!peer)
5054 		return NULL;
5055 
5056 	if (peer->bss_peer)
5057 		return peer;
5058 
5059 	dp_peer_unref_delete(peer);
5060 	return NULL;
5061 }
5062 #else
5063 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
5064 						uint8_t *peer_mac_addr)
5065 {
5066 	struct dp_peer *peer;
5067 
5068 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
5069 				      0, vdev->vdev_id);
5070 	if (!peer)
5071 		return NULL;
5072 
5073 	if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
5074 		return peer;
5075 
5076 	dp_peer_unref_delete(peer);
5077 	return NULL;
5078 }
5079 #endif
5080 
5081 #ifdef FEATURE_AST
5082 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
5083 					       struct dp_pdev *pdev,
5084 					       uint8_t *peer_mac_addr)
5085 {
5086 	struct dp_ast_entry *ast_entry;
5087 
5088 	qdf_spin_lock_bh(&soc->ast_lock);
5089 	if (soc->ast_override_support)
5090 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
5091 							    pdev->pdev_id);
5092 	else
5093 		ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
5094 
5095 	if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress)
5096 		dp_peer_del_ast(soc, ast_entry);
5097 
5098 	qdf_spin_unlock_bh(&soc->ast_lock);
5099 }
5100 #endif
5101 
5102 #ifdef PEER_CACHE_RX_PKTS
5103 static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
5104 {
5105 	qdf_spinlock_create(&peer->bufq_info.bufq_lock);
5106 	peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
5107 	qdf_list_create(&peer->bufq_info.cached_bufq, DP_RX_CACHED_BUFQ_THRESH);
5108 }
5109 #else
5110 static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
5111 {
5112 }
5113 #endif
5114 
5115 /*
5116  * dp_peer_create_wifi3() - attach txrx peer
5117  * @soc_hdl: Datapath soc handle
5118  * @vdev_id: id of vdev
5119  * @peer_mac_addr: Peer MAC address
5120  *
5121  * Return: 0 on success, -1 on failure
5122  */
5123 static QDF_STATUS
5124 dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5125 		     uint8_t *peer_mac_addr)
5126 {
5127 	struct dp_peer *peer;
5128 	int i;
5129 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5130 	struct dp_pdev *pdev;
5131 	struct cdp_peer_cookie peer_cookie;
5132 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
5133 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
5134 
5135 	if (!vdev || !peer_mac_addr)
5136 		return QDF_STATUS_E_FAILURE;
5137 
5138 	pdev = vdev->pdev;
5139 	soc = pdev->soc;
5140 
5141 	/*
5142 	 * If a peer entry with given MAC address already exists,
5143 	 * reuse the peer and reset the state of peer.
5144 	 */
5145 	peer = dp_peer_can_reuse(vdev, peer_mac_addr);
5146 
5147 	if (peer) {
5148 		qdf_atomic_init(&peer->is_default_route_set);
5149 		dp_peer_cleanup(vdev, peer, true);
5150 
5151 		qdf_spin_lock_bh(&soc->ast_lock);
5152 		dp_peer_delete_ast_entries(soc, peer);
5153 		peer->delete_in_progress = false;
5154 		qdf_spin_unlock_bh(&soc->ast_lock);
5155 
5156 		if ((vdev->opmode == wlan_op_mode_sta) &&
5157 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
5158 		     QDF_MAC_ADDR_SIZE)) {
5159 			ast_type = CDP_TXRX_AST_TYPE_SELF;
5160 		}
5161 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
5162 		/*
5163 		* Control path maintains a node count which is incremented
5164 		* for every new peer create command. Since new peer is not being
5165 		* created and earlier reference is reused here,
5166 		* peer_unref_delete event is sent to control path to
5167 		* increment the count back.
5168 		*/
5169 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
5170 			soc->cdp_soc.ol_ops->peer_unref_delete(
5171 				soc->ctrl_psoc,
5172 				pdev->pdev_id,
5173 				peer->mac_addr.raw, vdev->mac_addr.raw,
5174 				vdev->opmode);
5175 		}
5176 
5177 		peer->valid = 1;
5178 		dp_local_peer_id_alloc(pdev, peer);
5179 
5180 		qdf_spinlock_create(&peer->peer_info_lock);
5181 		dp_peer_rx_bufq_resources_init(peer);
5182 
5183 		DP_STATS_INIT(peer);
5184 		DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI);
5185 
5186 		/*
5187 		 * In tx_monitor mode, filter may be set for unassociated peer
5188 		 * when unassociated peer get associated peer need to
5189 		 * update tx_cap_enabled flag to support peer filter.
5190 		 */
5191 		dp_peer_tx_capture_filter_check(pdev, peer);
5192 
5193 		dp_set_peer_isolation(peer, false);
5194 
5195 		return QDF_STATUS_SUCCESS;
5196 	} else {
5197 		/*
5198 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
5199 		 * need to remove the AST entry which was earlier added as a WDS
5200 		 * entry.
5201 		 * If an AST entry exists, but no peer entry exists with a given
5202 		 * MAC addresses, we could deduce it as a WDS entry
5203 		 */
5204 		dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
5205 	}
5206 
5207 #ifdef notyet
5208 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
5209 		soc->mempool_ol_ath_peer);
5210 #else
5211 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
5212 #endif
5213 	wlan_minidump_log(peer,
5214 			  sizeof(*peer),
5215 			  soc->ctrl_psoc,
5216 			  WLAN_MD_DP_PEER, "dp_peer");
5217 	if (!peer)
5218 		return QDF_STATUS_E_FAILURE; /* failure */
5219 
5220 	qdf_mem_zero(peer, sizeof(struct dp_peer));
5221 
5222 	TAILQ_INIT(&peer->ast_entry_list);
5223 
5224 	/* store provided params */
5225 	peer->vdev = vdev;
5226 
5227 	if ((vdev->opmode == wlan_op_mode_sta) &&
5228 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
5229 			 QDF_MAC_ADDR_SIZE)) {
5230 		ast_type = CDP_TXRX_AST_TYPE_SELF;
5231 	}
5232 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
5233 	qdf_spinlock_create(&peer->peer_info_lock);
5234 
5235 	dp_peer_rx_bufq_resources_init(peer);
5236 
5237 	qdf_mem_copy(
5238 		&peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
5239 
5240 	/* initialize the peer_id */
5241 	peer->peer_id = HTT_INVALID_PEER;
5242 
5243 	/* reset the ast index to flowid table */
5244 	dp_peer_reset_flowq_map(peer);
5245 
5246 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
5247 
5248 	qdf_atomic_init(&peer->ref_cnt);
5249 
5250 	/* keep one reference for attach */
5251 	qdf_atomic_inc(&peer->ref_cnt);
5252 
5253 	/* add this peer into the vdev's list */
5254 	if (wlan_op_mode_sta == vdev->opmode)
5255 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
5256 	else
5257 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
5258 
5259 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5260 
5261 	/* TODO: See if hash based search is required */
5262 	dp_peer_find_hash_add(soc, peer);
5263 
5264 	/* Initialize the peer state */
5265 	peer->state = OL_TXRX_PEER_STATE_DISC;
5266 
5267 	dp_info("vdev %pK created peer %pK (%pM) ref_cnt: %d",
5268 		vdev, peer, peer->mac_addr.raw,
5269 		qdf_atomic_read(&peer->ref_cnt));
5270 	/*
5271 	 * For every peer MAp message search and set if bss_peer
5272 	 */
5273 	if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
5274 			QDF_MAC_ADDR_SIZE) == 0 &&
5275 			(wlan_op_mode_sta != vdev->opmode)) {
5276 		dp_info("vdev bss_peer!!");
5277 		peer->bss_peer = 1;
5278 		vdev->vap_bss_peer = peer;
5279 	}
5280 
5281 	if (wlan_op_mode_sta == vdev->opmode &&
5282 	    qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
5283 			QDF_MAC_ADDR_SIZE) == 0) {
5284 		vdev->vap_self_peer = peer;
5285 	}
5286 
5287 	for (i = 0; i < DP_MAX_TIDS; i++)
5288 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
5289 
5290 	peer->valid = 1;
5291 	dp_local_peer_id_alloc(pdev, peer);
5292 	DP_STATS_INIT(peer);
5293 	DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI);
5294 
5295 	qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
5296 		     QDF_MAC_ADDR_SIZE);
5297 	peer_cookie.ctx = NULL;
5298 	peer_cookie.pdev_id = pdev->pdev_id;
5299 	peer_cookie.cookie = pdev->next_peer_cookie++;
5300 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
5301 	dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, pdev->soc,
5302 			     (void *)&peer_cookie,
5303 			     peer->peer_id, WDI_NO_VAL, pdev->pdev_id);
5304 #endif
5305 	if (soc->wlanstats_enabled) {
5306 		if (!peer_cookie.ctx) {
5307 			pdev->next_peer_cookie--;
5308 			qdf_err("Failed to initialize peer rate stats");
5309 		} else {
5310 			peer->wlanstats_ctx = (struct cdp_peer_rate_stats_ctx *)
5311 						peer_cookie.ctx;
5312 		}
5313 	}
5314 
5315 	/*
5316 	 * In tx_monitor mode, filter may be set for unassociated peer
5317 	 * when unassociated peer get associated peer need to
5318 	 * update tx_cap_enabled flag to support peer filter.
5319 	 */
5320 	dp_peer_tx_capture_filter_check(pdev, peer);
5321 
5322 	dp_set_peer_isolation(peer, false);
5323 
5324 	return QDF_STATUS_SUCCESS;
5325 }
5326 
5327 /*
5328  * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
5329  * @vdev: Datapath VDEV handle
5330  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5331  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5332  *
5333  * Return: None
5334  */
5335 static
5336 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
5337 				  enum cdp_host_reo_dest_ring *reo_dest,
5338 				  bool *hash_based)
5339 {
5340 	struct dp_soc *soc;
5341 	struct dp_pdev *pdev;
5342 
5343 	pdev = vdev->pdev;
5344 	soc = pdev->soc;
5345 	/*
5346 	 * hash based steering is disabled for Radios which are offloaded
5347 	 * to NSS
5348 	 */
5349 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
5350 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
5351 
5352 	/*
5353 	 * Below line of code will ensure the proper reo_dest ring is chosen
5354 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
5355 	 */
5356 	*reo_dest = pdev->reo_dest;
5357 }
5358 
5359 #ifdef IPA_OFFLOAD
5360 /**
5361  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
5362  * @vdev: Virtual device
5363  *
5364  * Return: true if the vdev is of subtype P2P
5365  *	   false if the vdev is of any other subtype
5366  */
5367 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
5368 {
5369 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
5370 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
5371 	    vdev->subtype == wlan_op_subtype_p2p_go)
5372 		return true;
5373 
5374 	return false;
5375 }
5376 
5377 /*
5378  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
5379  * @vdev: Datapath VDEV handle
5380  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5381  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5382  *
5383  * If IPA is enabled in ini, for SAP mode, disable hash based
5384  * steering, use default reo_dst ring for RX. Use config values for other modes.
5385  * Return: None
5386  */
5387 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
5388 				       enum cdp_host_reo_dest_ring *reo_dest,
5389 				       bool *hash_based)
5390 {
5391 	struct dp_soc *soc;
5392 	struct dp_pdev *pdev;
5393 
5394 	pdev = vdev->pdev;
5395 	soc = pdev->soc;
5396 
5397 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
5398 
5399 	/* For P2P-GO interfaces we do not need to change the REO
5400 	 * configuration even if IPA config is enabled
5401 	 */
5402 	if (dp_is_vdev_subtype_p2p(vdev))
5403 		return;
5404 
5405 	/*
5406 	 * If IPA is enabled, disable hash-based flow steering and set
5407 	 * reo_dest_ring_4 as the REO ring to receive packets on.
5408 	 * IPA is configured to reap reo_dest_ring_4.
5409 	 *
5410 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
5411 	 * value enum value is from 1 - 4.
5412 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
5413 	 */
5414 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5415 		if (vdev->opmode == wlan_op_mode_ap) {
5416 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
5417 			*hash_based = 0;
5418 		} else if (vdev->opmode == wlan_op_mode_sta &&
5419 			   dp_ipa_is_mdm_platform()) {
5420 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
5421 		}
5422 	}
5423 }
5424 
5425 #else
5426 
5427 /*
5428  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
5429  * @vdev: Datapath VDEV handle
5430  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5431  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5432  *
5433  * Use system config values for hash based steering.
5434  * Return: None
5435  */
5436 
5437 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
5438 				       enum cdp_host_reo_dest_ring *reo_dest,
5439 				       bool *hash_based)
5440 {
5441 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
5442 }
5443 #endif /* IPA_OFFLOAD */
5444 
5445 /*
5446  * dp_peer_setup_wifi3() - initialize the peer
5447  * @soc_hdl: soc handle object
5448  * @vdev_id : vdev_id of vdev object
5449  * @peer_mac: Peer's mac address
5450  *
5451  * Return: QDF_STATUS
5452  */
5453 static QDF_STATUS
5454 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5455 		    uint8_t *peer_mac)
5456 {
5457 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5458 	struct dp_pdev *pdev;
5459 	bool hash_based = 0;
5460 	enum cdp_host_reo_dest_ring reo_dest;
5461 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5462 	struct dp_vdev *vdev =
5463 			dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
5464 	struct dp_peer *peer =
5465 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id);
5466 
5467 	if (!vdev || !peer || peer->delete_in_progress) {
5468 		status = QDF_STATUS_E_FAILURE;
5469 		goto fail;
5470 	}
5471 
5472 	pdev = vdev->pdev;
5473 	dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
5474 
5475 	dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
5476 		pdev->pdev_id, vdev->vdev_id,
5477 		vdev->opmode, hash_based, reo_dest);
5478 
5479 
5480 	/*
5481 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
5482 	 * i.e both the devices have same MAC address. In these
5483 	 * cases we want such pkts to be processed in NULL Q handler
5484 	 * which is REO2TCL ring. for this reason we should
5485 	 * not setup reo_queues and default route for bss_peer.
5486 	 */
5487 	dp_peer_tx_init(pdev, peer);
5488 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
5489 		status = QDF_STATUS_E_FAILURE;
5490 		goto fail;
5491 	}
5492 
5493 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
5494 		/* TODO: Check the destination ring number to be passed to FW */
5495 		soc->cdp_soc.ol_ops->peer_set_default_routing(
5496 				soc->ctrl_psoc,
5497 				peer->vdev->pdev->pdev_id,
5498 				peer->mac_addr.raw,
5499 				peer->vdev->vdev_id, hash_based, reo_dest);
5500 	}
5501 
5502 	qdf_atomic_set(&peer->is_default_route_set, 1);
5503 
5504 	dp_peer_rx_init(pdev, peer);
5505 
5506 	dp_peer_ppdu_delayed_ba_init(peer);
5507 
5508 fail:
5509 	if (peer)
5510 		dp_peer_unref_delete(peer);
5511 	return status;
5512 }
5513 
5514 /*
5515  * dp_cp_peer_del_resp_handler - Handle the peer delete response
5516  * @soc_hdl: Datapath SOC handle
5517  * @vdev_id: id of virtual device object
5518  * @mac_addr: Mac address of the peer
5519  *
5520  * Return: QDF_STATUS
5521  */
5522 static QDF_STATUS dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl,
5523 					      uint8_t vdev_id,
5524 					      uint8_t *mac_addr)
5525 {
5526 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5527 	struct dp_ast_entry  *ast_entry = NULL;
5528 	txrx_ast_free_cb cb = NULL;
5529 	void *cookie;
5530 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
5531 
5532 	if (!vdev)
5533 		return QDF_STATUS_E_FAILURE;
5534 
5535 	qdf_spin_lock_bh(&soc->ast_lock);
5536 
5537 	if (soc->ast_override_support)
5538 		ast_entry =
5539 			dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
5540 							vdev->pdev->pdev_id);
5541 	else
5542 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
5543 
5544 	/* in case of qwrap we have multiple BSS peers
5545 	 * with same mac address
5546 	 *
5547 	 * AST entry for this mac address will be created
5548 	 * only for one peer hence it will be NULL here
5549 	 */
5550 	if (!ast_entry || ast_entry->peer || !ast_entry->delete_in_progress) {
5551 		qdf_spin_unlock_bh(&soc->ast_lock);
5552 		return QDF_STATUS_E_FAILURE;
5553 	}
5554 
5555 	if (ast_entry->is_mapped)
5556 		soc->ast_table[ast_entry->ast_idx] = NULL;
5557 
5558 	DP_STATS_INC(soc, ast.deleted, 1);
5559 	dp_peer_ast_hash_remove(soc, ast_entry);
5560 
5561 	cb = ast_entry->callback;
5562 	cookie = ast_entry->cookie;
5563 	ast_entry->callback = NULL;
5564 	ast_entry->cookie = NULL;
5565 
5566 	soc->num_ast_entries--;
5567 	qdf_spin_unlock_bh(&soc->ast_lock);
5568 
5569 	if (cb) {
5570 		cb(soc->ctrl_psoc,
5571 		   dp_soc_to_cdp_soc(soc),
5572 		   cookie,
5573 		   CDP_TXRX_AST_DELETED);
5574 	}
5575 	qdf_mem_free(ast_entry);
5576 
5577 	return QDF_STATUS_SUCCESS;
5578 }
5579 
5580 /*
5581  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
5582  * @txrx_soc: cdp soc handle
5583  * @ac: Access category
5584  * @value: timeout value in millisec
5585  *
5586  * Return: void
5587  */
5588 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
5589 				    uint8_t ac, uint32_t value)
5590 {
5591 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5592 
5593 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
5594 }
5595 
5596 /*
5597  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
5598  * @txrx_soc: cdp soc handle
5599  * @ac: access category
5600  * @value: timeout value in millisec
5601  *
5602  * Return: void
5603  */
5604 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
5605 				    uint8_t ac, uint32_t *value)
5606 {
5607 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5608 
5609 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
5610 }
5611 
5612 /*
5613  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
5614  * @txrx_soc: cdp soc handle
5615  * @pdev_id: id of physical device object
5616  * @val: reo destination ring index (1 - 4)
5617  *
5618  * Return: QDF_STATUS
5619  */
5620 static QDF_STATUS
5621 dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
5622 		     enum cdp_host_reo_dest_ring val)
5623 {
5624 	struct dp_pdev *pdev =
5625 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
5626 						   pdev_id);
5627 
5628 	if (pdev) {
5629 		pdev->reo_dest = val;
5630 		return QDF_STATUS_SUCCESS;
5631 	}
5632 
5633 	return QDF_STATUS_E_FAILURE;
5634 }
5635 
5636 /*
5637  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
5638  * @txrx_soc: cdp soc handle
5639  * @pdev_id: id of physical device object
5640  *
5641  * Return: reo destination ring index
5642  */
5643 static enum cdp_host_reo_dest_ring
5644 dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id)
5645 {
5646 	struct dp_pdev *pdev =
5647 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
5648 						   pdev_id);
5649 
5650 	if (pdev)
5651 		return pdev->reo_dest;
5652 	else
5653 		return cdp_host_reo_dest_ring_unknown;
5654 }
5655 
5656 #ifdef ATH_SUPPORT_NAC
5657 /*
5658  * dp_set_filter_neigh_peers() - set filter neighbour peers for smart mesh
5659  * @pdev_handle: device object
5660  * @val: value to be set
5661  *
5662  * Return: void
5663  */
5664 static int dp_set_filter_neigh_peers(struct dp_pdev *pdev,
5665 				     bool val)
5666 {
5667 	/* Enable/Disable smart mesh filtering. This flag will be checked
5668 	 * during rx processing to check if packets are from NAC clients.
5669 	 */
5670 	pdev->filter_neighbour_peers = val;
5671 	return 0;
5672 }
5673 #else
5674 static int dp_set_filter_neigh_peers(struct dp_pdev *pdev,
5675 				     bool val)
5676 {
5677 	return 0;
5678 }
5679 #endif /* ATH_SUPPORT_NAC */
5680 
5681 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
5682 /*
5683  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
5684  * address for smart mesh filtering
5685  * @txrx_soc: cdp soc handle
5686  * @vdev_id: id of virtual device object
5687  * @cmd: Add/Del command
5688  * @macaddr: nac client mac address
5689  *
5690  * Return: success/failure
5691  */
5692 static int dp_update_filter_neighbour_peers(struct cdp_soc_t *soc,
5693 					    uint8_t vdev_id,
5694 					    uint32_t cmd, uint8_t *macaddr)
5695 {
5696 	struct dp_pdev *pdev;
5697 	struct dp_neighbour_peer *peer = NULL;
5698 	struct dp_vdev *vdev =
5699 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
5700 						   vdev_id);
5701 
5702 	if (!vdev || !macaddr)
5703 		goto fail0;
5704 
5705 	pdev = vdev->pdev;
5706 
5707 	if (!pdev)
5708 		goto fail0;
5709 
5710 	/* Store address of NAC (neighbour peer) which will be checked
5711 	 * against TA of received packets.
5712 	 */
5713 	if (cmd == DP_NAC_PARAM_ADD) {
5714 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
5715 				sizeof(*peer));
5716 
5717 		if (!peer) {
5718 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5719 				FL("DP neighbour peer node memory allocation failed"));
5720 			goto fail0;
5721 		}
5722 
5723 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
5724 			macaddr, QDF_MAC_ADDR_SIZE);
5725 		peer->vdev = vdev;
5726 
5727 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5728 
5729 		/* add this neighbour peer into the list */
5730 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
5731 				neighbour_peer_list_elem);
5732 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5733 
5734 		/* first neighbour */
5735 		if (!pdev->neighbour_peers_added) {
5736 			QDF_STATUS status = QDF_STATUS_SUCCESS;
5737 
5738 			pdev->neighbour_peers_added = true;
5739 			dp_mon_filter_setup_smart_monitor(pdev);
5740 			status = dp_mon_filter_update(pdev);
5741 			if (status != QDF_STATUS_SUCCESS) {
5742 				QDF_TRACE(QDF_MODULE_ID_DP,
5743 					  QDF_TRACE_LEVEL_ERROR,
5744 					  FL("smart mon filter setup failed"));
5745 				dp_mon_filter_reset_smart_monitor(pdev);
5746 				pdev->neighbour_peers_added = false;
5747 			}
5748 		}
5749 		return 1;
5750 
5751 	} else if (cmd == DP_NAC_PARAM_DEL) {
5752 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5753 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
5754 				neighbour_peer_list_elem) {
5755 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
5756 				macaddr, QDF_MAC_ADDR_SIZE)) {
5757 				/* delete this peer from the list */
5758 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
5759 					peer, neighbour_peer_list_elem);
5760 				qdf_mem_free(peer);
5761 				break;
5762 			}
5763 		}
5764 		/* last neighbour deleted */
5765 		if (TAILQ_EMPTY(&pdev->neighbour_peers_list)) {
5766 			QDF_STATUS status = QDF_STATUS_SUCCESS;
5767 
5768 			pdev->neighbour_peers_added = false;
5769 			dp_mon_filter_reset_smart_monitor(pdev);
5770 			status = dp_mon_filter_update(pdev);
5771 			if (status != QDF_STATUS_SUCCESS) {
5772 				QDF_TRACE(QDF_MODULE_ID_DP,
5773 					  QDF_TRACE_LEVEL_ERROR,
5774 					  FL("smart mon filter clear failed"));
5775 			}
5776 
5777 		}
5778 
5779 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5780 		return 1;
5781 
5782 	}
5783 
5784 fail0:
5785 	return 0;
5786 }
5787 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
5788 
5789 /*
5790  * dp_get_sec_type() - Get the security type
5791  * @soc: soc handle
5792  * @vdev_id: id of dp handle
5793  * @peer_mac: mac of datapath PEER handle
5794  * @sec_idx:    Security id (mcast, ucast)
5795  *
5796  * return sec_type: Security type
5797  */
5798 static int dp_get_sec_type(struct cdp_soc_t *soc, uint8_t vdev_id,
5799 			   uint8_t *peer_mac, uint8_t sec_idx)
5800 {
5801 	int sec_type = 0;
5802 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
5803 						       peer_mac, 0, vdev_id);
5804 
5805 	if (!peer || peer->delete_in_progress) {
5806 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
5807 			  "%s: Peer is NULL!\n", __func__);
5808 		goto fail;
5809 	}
5810 
5811 	sec_type = peer->security[sec_idx].sec_type;
5812 fail:
5813 	if (peer)
5814 		dp_peer_unref_delete(peer);
5815 	return sec_type;
5816 }
5817 
5818 /*
5819  * dp_peer_authorize() - authorize txrx peer
5820  * @soc: soc handle
5821  * @vdev_id: id of dp handle
5822  * @peer_mac: mac of datapath PEER handle
5823  * @authorize
5824  *
5825  */
5826 static QDF_STATUS
5827 dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5828 		  uint8_t *peer_mac, uint32_t authorize)
5829 {
5830 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5831 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5832 	struct dp_peer *peer = dp_peer_find_hash_find(soc,
5833 						      peer_mac,
5834 						      0, vdev_id);
5835 
5836 	if (!peer || peer->delete_in_progress) {
5837 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
5838 			  "%s: Peer is NULL!\n", __func__);
5839 		status = QDF_STATUS_E_FAILURE;
5840 	} else {
5841 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
5842 		peer->authorize = authorize ? 1 : 0;
5843 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5844 	}
5845 
5846 	if (peer)
5847 		dp_peer_unref_delete(peer);
5848 
5849 	return status;
5850 }
5851 
5852 /*
5853  * dp_vdev_reset_peer() - Update peer related member in vdev
5854 			  as peer is going to free
5855  * @vdev: datapath vdev handle
5856  * @peer: dataptah peer handle
5857  *
5858  * Return: None
5859  */
5860 static void dp_vdev_reset_peer(struct dp_vdev *vdev,
5861 			       struct dp_peer *peer)
5862 {
5863 	struct dp_peer *bss_peer = NULL;
5864 
5865 	if (!vdev) {
5866 		dp_err("vdev is NULL");
5867 	} else {
5868 		if (vdev->vap_bss_peer == peer) {
5869 			vdev->vap_bss_peer = NULL;
5870 			qdf_mem_zero(vdev->vap_bss_peer_mac_addr,
5871 				     QDF_MAC_ADDR_SIZE);
5872 		}
5873 
5874 		if (vdev && vdev->vap_bss_peer) {
5875 		    bss_peer = vdev->vap_bss_peer;
5876 		    DP_UPDATE_STATS(vdev, peer);
5877 		}
5878 	}
5879 }
5880 
5881 /*
5882  * dp_peer_release_mem() - free dp peer handle memory
5883  * @soc: dataptah soc handle
5884  * @pdev: datapath pdev handle
5885  * @peer: datapath peer handle
5886  * @vdev_opmode: Vdev operation mode
5887  * @vdev_mac_addr: Vdev Mac address
5888  *
5889  * Return: None
5890  */
5891 static void dp_peer_release_mem(struct dp_soc *soc,
5892 				struct dp_pdev *pdev,
5893 				struct dp_peer *peer,
5894 				enum wlan_op_mode vdev_opmode,
5895 				uint8_t *vdev_mac_addr)
5896 {
5897 	if (soc->cdp_soc.ol_ops->peer_unref_delete)
5898 		soc->cdp_soc.ol_ops->peer_unref_delete(
5899 				soc->ctrl_psoc,
5900 				pdev->pdev_id,
5901 				peer->mac_addr.raw, vdev_mac_addr,
5902 				vdev_opmode);
5903 
5904 	/*
5905 	 * Peer AST list hast to be empty here
5906 	 */
5907 	DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
5908 
5909 	qdf_mem_free(peer);
5910 }
5911 
5912 /**
5913  * dp_delete_pending_vdev() - check and process vdev delete
5914  * @pdev: DP specific pdev pointer
5915  * @vdev: DP specific vdev pointer
5916  * @vdev_id: vdev id corresponding to vdev
5917  *
5918  * This API does following:
5919  * 1) It releases tx flow pools buffers as vdev is
5920  *    going down and no peers are associated.
5921  * 2) It also detaches vdev before cleaning vdev (struct dp_vdev) memory
5922  */
5923 static void dp_delete_pending_vdev(struct dp_pdev *pdev, struct dp_vdev *vdev,
5924 				   uint8_t vdev_id)
5925 {
5926 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
5927 	void *vdev_delete_context = NULL;
5928 
5929 	vdev_delete_cb = vdev->delete.callback;
5930 	vdev_delete_context = vdev->delete.context;
5931 
5932 	dp_info("deleting vdev object %pK (%pM)- its last peer is done",
5933 		vdev, vdev->mac_addr.raw);
5934 	/* all peers are gone, go ahead and delete it */
5935 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
5936 			FLOW_TYPE_VDEV, vdev_id);
5937 	dp_tx_vdev_detach(vdev);
5938 
5939 	pdev->soc->vdev_id_map[vdev_id] = NULL;
5940 
5941 	if (wlan_op_mode_monitor == vdev->opmode) {
5942 		pdev->monitor_vdev = NULL;
5943 	} else {
5944 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
5945 		TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
5946 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5947 	}
5948 
5949 	dp_info("deleting vdev object %pK (%pM)",
5950 		vdev, vdev->mac_addr.raw);
5951 	qdf_mem_free(vdev);
5952 	vdev = NULL;
5953 
5954 	if (vdev_delete_cb)
5955 		vdev_delete_cb(vdev_delete_context);
5956 }
5957 
5958 /*
5959  * dp_peer_unref_delete() - unref and delete peer
5960  * @peer_handle:		Datapath peer handle
5961  *
5962  */
5963 void dp_peer_unref_delete(struct dp_peer *peer)
5964 {
5965 	struct dp_vdev *vdev = peer->vdev;
5966 	struct dp_pdev *pdev = vdev->pdev;
5967 	struct dp_soc *soc = pdev->soc;
5968 	struct dp_peer *tmppeer;
5969 	int found = 0;
5970 	uint16_t peer_id;
5971 	uint16_t vdev_id;
5972 	bool vdev_delete = false;
5973 	struct cdp_peer_cookie peer_cookie;
5974 	enum wlan_op_mode vdev_opmode;
5975 	uint8_t vdev_mac_addr[QDF_MAC_ADDR_SIZE];
5976 
5977 	/*
5978 	 * Hold the lock all the way from checking if the peer ref count
5979 	 * is zero until the peer references are removed from the hash
5980 	 * table and vdev list (if the peer ref count is zero).
5981 	 * This protects against a new HL tx operation starting to use the
5982 	 * peer object just after this function concludes it's done being used.
5983 	 * Furthermore, the lock needs to be held while checking whether the
5984 	 * vdev's list of peers is empty, to make sure that list is not modified
5985 	 * concurrently with the empty check.
5986 	 */
5987 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
5988 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
5989 		peer_id = peer->peer_id;
5990 		vdev_id = vdev->vdev_id;
5991 
5992 		/*
5993 		 * Make sure that the reference to the peer in
5994 		 * peer object map is removed
5995 		 */
5996 		if (peer_id != HTT_INVALID_PEER)
5997 			soc->peer_id_to_obj_map[peer_id] = NULL;
5998 
5999 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
6000 			  "Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
6001 
6002 		/* remove the reference to the peer from the hash table */
6003 		dp_peer_find_hash_remove(soc, peer);
6004 
6005 		qdf_spin_lock_bh(&soc->ast_lock);
6006 		if (peer->self_ast_entry) {
6007 			dp_peer_del_ast(soc, peer->self_ast_entry);
6008 		}
6009 		qdf_spin_unlock_bh(&soc->ast_lock);
6010 
6011 		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
6012 			if (tmppeer == peer) {
6013 				found = 1;
6014 				break;
6015 			}
6016 		}
6017 
6018 		if (found) {
6019 			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
6020 				peer_list_elem);
6021 		} else {
6022 			/*Ignoring the remove operation as peer not found*/
6023 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
6024 				  "peer:%pK not found in vdev:%pK peerlist:%pK",
6025 				  peer, vdev, &peer->vdev->peer_list);
6026 		}
6027 
6028 		/* send peer destroy event to upper layer */
6029 		qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
6030 			     QDF_MAC_ADDR_SIZE);
6031 		peer_cookie.ctx = NULL;
6032 		peer_cookie.ctx = (struct cdp_stats_cookie *)
6033 					peer->wlanstats_ctx;
6034 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6035 		dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY,
6036 				     pdev->soc,
6037 				     (void *)&peer_cookie,
6038 				     peer->peer_id,
6039 				     WDI_NO_VAL,
6040 				     pdev->pdev_id);
6041 #endif
6042 		peer->wlanstats_ctx = NULL;
6043 
6044 		/* cleanup the peer data */
6045 		dp_peer_cleanup(vdev, peer, false);
6046 		/* reset this peer related info in vdev */
6047 		dp_vdev_reset_peer(vdev, peer);
6048 		/* save vdev related member in case vdev freed */
6049 		vdev_opmode = vdev->opmode;
6050 		qdf_mem_copy(vdev_mac_addr, vdev->mac_addr.raw,
6051 			     QDF_MAC_ADDR_SIZE);
6052 		/*
6053 		 * check whether the parent vdev is pending for deleting
6054 		 * and no peers left.
6055 		 */
6056 		if (vdev->delete.pending && TAILQ_EMPTY(&vdev->peer_list))
6057 			vdev_delete = true;
6058 		/*
6059 		 * Now that there are no references to the peer, we can
6060 		 * release the peer reference lock.
6061 		 */
6062 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6063 
6064 		wlan_minidump_remove(peer);
6065 		/*
6066 		 * Invoke soc.ol_ops->peer_unref_delete out of
6067 		 * peer_ref_mutex in case deadlock issue.
6068 		 */
6069 		dp_peer_release_mem(soc, pdev, peer,
6070 				    vdev_opmode,
6071 				    vdev_mac_addr);
6072 		/*
6073 		 * Delete the vdev if it's waiting all peer deleted
6074 		 * and it's chance now.
6075 		 */
6076 		if (vdev_delete)
6077 			dp_delete_pending_vdev(pdev, vdev, vdev_id);
6078 
6079 	} else {
6080 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6081 	}
6082 }
6083 
6084 #ifdef PEER_CACHE_RX_PKTS
6085 static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
6086 {
6087 	qdf_list_destroy(&peer->bufq_info.cached_bufq);
6088 	qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
6089 }
6090 #else
6091 static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
6092 {
6093 }
6094 #endif
6095 
6096 /*
6097  * dp_peer_detach_wifi3() – Detach txrx peer
6098  * @soc_hdl: soc handle
6099  * @vdev_id: id of dp handle
6100  * @peer_mac: mac of datapath PEER handle
6101  * @bitmap: bitmap indicating special handling of request.
6102  *
6103  */
6104 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
6105 				       uint8_t vdev_id,
6106 				       uint8_t *peer_mac, uint32_t bitmap)
6107 {
6108 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6109 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
6110 						      0, vdev_id);
6111 
6112 	/* Peer can be null for monitor vap mac address */
6113 	if (!peer) {
6114 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
6115 			  "%s: Invalid peer\n", __func__);
6116 		return QDF_STATUS_E_FAILURE;
6117 	}
6118 
6119 	if (!peer->valid) {
6120 		dp_peer_unref_delete(peer);
6121 		dp_err("Invalid peer: %pM", peer_mac);
6122 		return QDF_STATUS_E_ALREADY;
6123 	}
6124 
6125 	peer->valid = 0;
6126 
6127 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
6128 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
6129 
6130 	dp_local_peer_id_free(peer->vdev->pdev, peer);
6131 
6132 	/* Drop all rx packets before deleting peer */
6133 	dp_clear_peer_internal(soc, peer);
6134 
6135 	dp_peer_rx_bufq_resources_deinit(peer);
6136 
6137 	qdf_spinlock_destroy(&peer->peer_info_lock);
6138 	dp_peer_multipass_list_remove(peer);
6139 
6140 	/*
6141 	 * Remove the reference added during peer_attach.
6142 	 * The peer will still be left allocated until the
6143 	 * PEER_UNMAP message arrives to remove the other
6144 	 * reference, added by the PEER_MAP message.
6145 	 */
6146 	dp_peer_unref_delete(peer);
6147 	/*
6148 	 * Remove the reference taken above
6149 	 */
6150 	dp_peer_unref_delete(peer);
6151 
6152 	return QDF_STATUS_SUCCESS;
6153 }
6154 
6155 /*
6156  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
6157  * @soc_hdl: Datapath soc handle
6158  * @vdev_id: virtual interface id
6159  *
6160  * Return: MAC address on success, NULL on failure.
6161  *
6162  */
6163 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_soc_t *soc_hdl,
6164 					 uint8_t vdev_id)
6165 {
6166 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6167 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
6168 
6169 	if (!vdev)
6170 		return NULL;
6171 
6172 	return vdev->mac_addr.raw;
6173 }
6174 
6175 /*
6176  * dp_vdev_set_wds() - Enable per packet stats
6177  * @soc: DP soc handle
6178  * @vdev_id: id of DP VDEV handle
6179  * @val: value
6180  *
6181  * Return: none
6182  */
6183 static int dp_vdev_set_wds(struct cdp_soc_t *soc, uint8_t vdev_id, uint32_t val)
6184 {
6185 	struct dp_vdev *vdev =
6186 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
6187 						   vdev_id);
6188 
6189 	if (!vdev)
6190 		return QDF_STATUS_E_FAILURE;
6191 
6192 	vdev->wds_enabled = val;
6193 	return QDF_STATUS_SUCCESS;
6194 }
6195 
6196 /*
6197  * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev id of monitor mode
6198  * @soc_hdl: datapath soc handle
6199  * @pdev_id: physical device instance id
6200  *
6201  * Return: virtual interface id
6202  */
6203 static uint8_t dp_get_mon_vdev_from_pdev_wifi3(struct cdp_soc_t *soc_hdl,
6204 					       uint8_t pdev_id)
6205 {
6206 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6207 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
6208 
6209 	if (qdf_unlikely(!pdev))
6210 		return -EINVAL;
6211 
6212 	return pdev->monitor_vdev->vdev_id;
6213 }
6214 
6215 static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
6216 {
6217 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6218 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
6219 
6220 	if (!vdev) {
6221 		dp_err("vdev for id %d is NULL", vdev_id);
6222 		return -EINVAL;
6223 	}
6224 
6225 	return vdev->opmode;
6226 }
6227 
6228 /**
6229  * dp_get_os_rx_handles_from_vdev_wifi3() - Get os rx handles for a vdev
6230  * @soc_hdl: ol_txrx_soc_handle handle
6231  * @vdev_id: vdev id for which os rx handles are needed
6232  * @stack_fn_p: pointer to stack function pointer
6233  * @osif_handle_p: pointer to ol_osif_vdev_handle
6234  *
6235  * Return: void
6236  */
6237 static
6238 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_soc_t *soc_hdl,
6239 					  uint8_t vdev_id,
6240 					  ol_txrx_rx_fp *stack_fn_p,
6241 					  ol_osif_vdev_handle *osif_vdev_p)
6242 {
6243 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6244 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
6245 
6246 	if (!vdev)
6247 		return;
6248 
6249 	*stack_fn_p = vdev->osif_rx_stack;
6250 	*osif_vdev_p = vdev->osif_vdev;
6251 }
6252 
6253 /**
6254  * dp_get_ctrl_pdev_from_vdev() - Get control pdev of vdev
6255  * @soc_hdl: datapath soc handle
6256  * @vdev_id: virtual device/interface id
6257  *
6258  * Return: Handle to control pdev
6259  */
6260 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(
6261 						struct cdp_soc_t *soc_hdl,
6262 						uint8_t vdev_id)
6263 {
6264 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6265 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
6266 	struct dp_pdev *pdev;
6267 
6268 	if (!vdev || !vdev->pdev)
6269 		return NULL;
6270 
6271 	pdev = vdev->pdev;
6272 	return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
6273 }
6274 
6275 /**
6276  * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
6277  *                                 ring based on target
6278  * @soc: soc handle
6279  * @mac_for_pdev: WIN- pdev_id, MCL- mac id
6280  * @pdev: physical device handle
6281  * @ring_num: mac id
6282  * @htt_tlv_filter: tlv filter
6283  *
6284  * Return: zero on success, non-zero on failure
6285  */
6286 static inline
6287 QDF_STATUS dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
6288 				       struct dp_pdev *pdev, uint8_t ring_num,
6289 				       struct htt_rx_ring_tlv_filter htt_tlv_filter)
6290 {
6291 	QDF_STATUS status;
6292 
6293 	if (soc->wlan_cfg_ctx->rxdma1_enable)
6294 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6295 					     soc->rxdma_mon_buf_ring[ring_num]
6296 					     .hal_srng,
6297 					     RXDMA_MONITOR_BUF,
6298 					     RX_MONITOR_BUFFER_SIZE,
6299 					     &htt_tlv_filter);
6300 	else
6301 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6302 					     pdev->rx_mac_buf_ring[ring_num]
6303 					     .hal_srng,
6304 					     RXDMA_BUF, RX_DATA_BUFFER_SIZE,
6305 					     &htt_tlv_filter);
6306 
6307 	return status;
6308 }
6309 
6310 static inline void
6311 dp_pdev_disable_mcopy_code(struct dp_pdev *pdev)
6312 {
6313 	pdev->mcopy_mode = M_COPY_DISABLED;
6314 	pdev->monitor_configured = false;
6315 	pdev->monitor_vdev = NULL;
6316 }
6317 
6318 /**
6319  * dp_reset_monitor_mode() - Disable monitor mode
6320  * @soc_hdl: Datapath soc handle
6321  * @pdev_id: id of datapath PDEV handle
6322  *
6323  * Return: QDF_STATUS
6324  */
6325 QDF_STATUS dp_reset_monitor_mode(struct cdp_soc_t *soc_hdl,
6326 				 uint8_t pdev_id,
6327 				 uint8_t special_monitor)
6328 {
6329 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6330 	struct dp_pdev *pdev =
6331 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
6332 						   pdev_id);
6333 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6334 
6335 	if (!pdev)
6336 		return QDF_STATUS_E_FAILURE;
6337 
6338 	qdf_spin_lock_bh(&pdev->mon_lock);
6339 
6340 	dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_DISABLE);
6341 	pdev->monitor_vdev = NULL;
6342 	pdev->monitor_configured = false;
6343 
6344 	/*
6345 	 * Lite monitor mode, smart monitor mode and monitor
6346 	 * mode uses this APIs to filter reset and mode disable
6347 	 */
6348 	if (pdev->mcopy_mode) {
6349 #if defined(FEATURE_PERPKT_INFO)
6350 		dp_pdev_disable_mcopy_code(pdev);
6351 		dp_mon_filter_reset_mcopy_mode(pdev);
6352 #endif /* FEATURE_PERPKT_INFO */
6353 	} else if (special_monitor) {
6354 #if defined(ATH_SUPPORT_NAC)
6355 		dp_mon_filter_reset_smart_monitor(pdev);
6356 #endif /* ATH_SUPPORT_NAC */
6357 	} else {
6358 		dp_mon_filter_reset_mon_mode(pdev);
6359 	}
6360 
6361 	status = dp_mon_filter_update(pdev);
6362 	if (status != QDF_STATUS_SUCCESS) {
6363 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6364 			  FL("Failed to reset monitor filters"));
6365 	}
6366 
6367 	qdf_spin_unlock_bh(&pdev->mon_lock);
6368 	return QDF_STATUS_SUCCESS;
6369 }
6370 
6371 /**
6372  * dp_get_tx_pending() - read pending tx
6373  * @pdev_handle: Datapath PDEV handle
6374  *
6375  * Return: outstanding tx
6376  */
6377 static uint32_t dp_get_tx_pending(struct cdp_pdev *pdev_handle)
6378 {
6379 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6380 
6381 	return qdf_atomic_read(&pdev->num_tx_outstanding);
6382 }
6383 
6384 /**
6385  * dp_get_peer_mac_from_peer_id() - get peer mac
6386  * @pdev_handle: Datapath PDEV handle
6387  * @peer_id: Peer ID
6388  * @peer_mac: MAC addr of PEER
6389  *
6390  * Return: QDF_STATUS
6391  */
6392 static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc,
6393 					       uint32_t peer_id,
6394 					       uint8_t *peer_mac)
6395 {
6396 	struct dp_peer *peer;
6397 
6398 	if (soc && peer_mac) {
6399 		peer = dp_peer_find_by_id((struct dp_soc *)soc,
6400 					  (uint16_t)peer_id);
6401 		if (peer) {
6402 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
6403 				     QDF_MAC_ADDR_SIZE);
6404 			dp_peer_unref_del_find_by_id(peer);
6405 			return QDF_STATUS_SUCCESS;
6406 		}
6407 	}
6408 
6409 	return QDF_STATUS_E_FAILURE;
6410 }
6411 
6412 /**
6413  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
6414  * @vdev_handle: Datapath VDEV handle
6415  * @smart_monitor: Flag to denote if its smart monitor mode
6416  *
6417  * Return: 0 on success, not 0 on failure
6418  */
6419 static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *soc,
6420 					   uint8_t vdev_id,
6421 					   uint8_t special_monitor)
6422 {
6423 	uint32_t mac_id;
6424 	uint32_t mac_for_pdev;
6425 	struct dp_pdev *pdev;
6426 	uint32_t num_entries;
6427 	struct dp_srng *mon_buf_ring;
6428 	struct dp_vdev *vdev =
6429 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
6430 						   vdev_id);
6431 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6432 
6433 	if (!vdev)
6434 		return QDF_STATUS_E_FAILURE;
6435 
6436 	pdev = vdev->pdev;
6437 	pdev->monitor_vdev = vdev;
6438 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
6439 		  "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
6440 		  pdev, pdev->pdev_id, pdev->soc, vdev);
6441 
6442 	/*
6443 	 * do not configure monitor buf ring and filter for smart and
6444 	 * lite monitor
6445 	 * for smart monitor filters are added along with first NAC
6446 	 * for lite monitor required configuration done through
6447 	 * dp_set_pdev_param
6448 	 */
6449 	if (special_monitor)
6450 		return QDF_STATUS_SUCCESS;
6451 
6452 	/*Check if current pdev's monitor_vdev exists */
6453 	if (pdev->monitor_configured) {
6454 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
6455 			  "monitor vap already created vdev=%pK\n", vdev);
6456 		return QDF_STATUS_E_RESOURCES;
6457 	}
6458 
6459 	pdev->monitor_configured = true;
6460 
6461 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6462 		mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id,
6463 							  pdev->pdev_id);
6464 		dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev,
6465 						 FALSE);
6466 		/*
6467 		 * Configure low interrupt threshld when monitor mode is
6468 		 * configured.
6469 		 */
6470 		mon_buf_ring = &pdev->soc->rxdma_mon_buf_ring[mac_for_pdev];
6471 		if (mon_buf_ring->hal_srng) {
6472 			num_entries = mon_buf_ring->num_entries;
6473 			hal_set_low_threshold(mon_buf_ring->hal_srng,
6474 					      num_entries >> 3);
6475 			htt_srng_setup(pdev->soc->htt_handle,
6476 				       pdev->pdev_id,
6477 				       mon_buf_ring->hal_srng,
6478 				       RXDMA_MONITOR_BUF);
6479 		}
6480 	}
6481 
6482 	dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_ENABLE);
6483 
6484 	dp_mon_filter_setup_mon_mode(pdev);
6485 	status = dp_mon_filter_update(pdev);
6486 	if (status != QDF_STATUS_SUCCESS) {
6487 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6488 			  FL("Failed to reset monitor filters"));
6489 		dp_mon_filter_reset_mon_mode(pdev);
6490 		pdev->monitor_configured = false;
6491 		pdev->monitor_vdev = NULL;
6492 	}
6493 
6494 	return status;
6495 }
6496 
6497 /**
6498  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
6499  * @soc: soc handle
6500  * @pdev_id: id of Datapath PDEV handle
6501  * @filter_val: Flag to select Filter for monitor mode
6502  * Return: 0 on success, not 0 on failure
6503  */
6504 static QDF_STATUS
6505 dp_pdev_set_advance_monitor_filter(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
6506 				   struct cdp_monitor_filter *filter_val)
6507 {
6508 	/* Many monitor VAPs can exists in a system but only one can be up at
6509 	 * anytime
6510 	 */
6511 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6512 	struct dp_vdev *vdev;
6513 	struct dp_pdev *pdev =
6514 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
6515 						   pdev_id);
6516 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6517 
6518 	if (!pdev)
6519 		return QDF_STATUS_E_FAILURE;
6520 
6521 	vdev = pdev->monitor_vdev;
6522 
6523 	if (!vdev)
6524 		return QDF_STATUS_E_FAILURE;
6525 
6526 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
6527 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
6528 		pdev, pdev_id, soc, vdev);
6529 
6530 	/*Check if current pdev's monitor_vdev exists */
6531 	if (!pdev->monitor_vdev) {
6532 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6533 			"vdev=%pK", vdev);
6534 		qdf_assert(vdev);
6535 	}
6536 
6537 	/* update filter mode, type in pdev structure */
6538 	pdev->mon_filter_mode = filter_val->mode;
6539 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
6540 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
6541 	pdev->fp_data_filter = filter_val->fp_data;
6542 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
6543 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
6544 	pdev->mo_data_filter = filter_val->mo_data;
6545 
6546 	dp_mon_filter_setup_mon_mode(pdev);
6547 	status = dp_mon_filter_update(pdev);
6548 	if (status != QDF_STATUS_SUCCESS) {
6549 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6550 			  FL("Failed to set filter for advance mon mode"));
6551 		dp_mon_filter_reset_mon_mode(pdev);
6552 	}
6553 
6554 	return status;
6555 }
6556 
6557 /**
6558  * dp_deliver_tx_mgmt() - Deliver mgmt frame for tx capture
6559  * @cdp_soc : data path soc handle
6560  * @pdev_id : pdev_id
6561  * @nbuf: Management frame buffer
6562  */
6563 static QDF_STATUS
6564 dp_deliver_tx_mgmt(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf)
6565 {
6566 	struct dp_pdev *pdev =
6567 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
6568 						   pdev_id);
6569 
6570 	if (!pdev)
6571 		return QDF_STATUS_E_FAILURE;
6572 
6573 	dp_deliver_mgmt_frm(pdev, nbuf);
6574 
6575 	return QDF_STATUS_SUCCESS;
6576 }
6577 
6578 /**
6579  * dp_set_bsscolor() - sets bsscolor for tx capture
6580  * @pdev: Datapath PDEV handle
6581  * @bsscolor: new bsscolor
6582  */
6583 static void
6584 dp_mon_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor)
6585 {
6586 	pdev->rx_mon_recv_status.bsscolor = bsscolor;
6587 }
6588 
6589 /**
6590  * dp_pdev_get_filter_ucast_data() - get DP PDEV monitor ucast filter
6591  * @soc : data path soc handle
6592  * @pdev_id : pdev_id
6593  * Return: true on ucast filter flag set
6594  */
6595 static bool dp_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle)
6596 {
6597 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6598 
6599 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
6600 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
6601 		return true;
6602 
6603 	return false;
6604 }
6605 
6606 /**
6607  * dp_pdev_get_filter_mcast_data() - get DP PDEV monitor mcast filter
6608  * @pdev_handle: Datapath PDEV handle
6609  * Return: true on mcast filter flag set
6610  */
6611 static bool dp_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle)
6612 {
6613 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6614 
6615 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
6616 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
6617 		return true;
6618 
6619 	return false;
6620 }
6621 
6622 /**
6623  * dp_pdev_get_filter_non_data() - get DP PDEV monitor non_data filter
6624  * @pdev_handle: Datapath PDEV handle
6625  * Return: true on non data filter flag set
6626  */
6627 static bool dp_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle)
6628 {
6629 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6630 
6631 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
6632 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
6633 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
6634 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
6635 			return true;
6636 		}
6637 	}
6638 
6639 	return false;
6640 }
6641 
6642 #ifdef MESH_MODE_SUPPORT
6643 void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
6644 {
6645 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
6646 
6647 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6648 		FL("val %d"), val);
6649 	vdev->mesh_vdev = val;
6650 }
6651 
6652 /*
6653  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
6654  * @vdev_hdl: virtual device object
6655  * @val: value to be set
6656  *
6657  * Return: void
6658  */
6659 void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
6660 {
6661 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
6662 
6663 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6664 		FL("val %d"), val);
6665 	vdev->mesh_rx_filter = val;
6666 }
6667 #endif
6668 
6669 #ifdef VDEV_PEER_PROTOCOL_COUNT
6670 static void dp_enable_vdev_peer_protocol_count(struct cdp_soc_t *soc,
6671 					       int8_t vdev_id,
6672 					       bool enable)
6673 {
6674 	struct dp_vdev *vdev;
6675 
6676 	vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
6677 						  vdev_id);
6678 	dp_info("enable %d vdev_id %d", enable, vdev_id);
6679 	vdev->peer_protocol_count_track = enable;
6680 }
6681 
6682 static void dp_enable_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc,
6683 						   int8_t vdev_id,
6684 						   int drop_mask)
6685 {
6686 	struct dp_vdev *vdev;
6687 
6688 	vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
6689 						  vdev_id);
6690 	dp_info("drop_mask %d vdev_id %d", drop_mask, vdev_id);
6691 	vdev->peer_protocol_count_dropmask = drop_mask;
6692 }
6693 
6694 static int dp_is_vdev_peer_protocol_count_enabled(struct cdp_soc_t *soc,
6695 						  int8_t vdev_id)
6696 {
6697 	struct dp_vdev *vdev;
6698 
6699 	vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
6700 						  vdev_id);
6701 	dp_info("enable %d vdev_id %d", vdev->peer_protocol_count_track,
6702 		vdev_id);
6703 	return vdev->peer_protocol_count_track;
6704 }
6705 
6706 static int dp_get_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc,
6707 					       int8_t vdev_id)
6708 {
6709 	struct dp_vdev *vdev;
6710 
6711 	vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
6712 						  vdev_id);
6713 	dp_info("drop_mask %d vdev_id %d", vdev->peer_protocol_count_dropmask,
6714 		vdev_id);
6715 	return vdev->peer_protocol_count_dropmask;
6716 }
6717 
6718 #endif
6719 
6720 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data)
6721 {
6722 	uint8_t pdev_count;
6723 
6724 	for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) {
6725 		if (soc->pdev_list[pdev_count] &&
6726 		    soc->pdev_list[pdev_count] == data)
6727 			return true;
6728 	}
6729 	return false;
6730 }
6731 
6732 /**
6733  * dp_rx_bar_stats_cb(): BAR received stats callback
6734  * @soc: SOC handle
6735  * @cb_ctxt: Call back context
6736  * @reo_status: Reo status
6737  *
6738  * return: void
6739  */
6740 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
6741 	union hal_reo_status *reo_status)
6742 {
6743 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
6744 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
6745 
6746 	if (!dp_check_pdev_exists(soc, pdev)) {
6747 		dp_err_rl("pdev doesn't exist");
6748 		return;
6749 	}
6750 
6751 	if (!qdf_atomic_read(&soc->cmn_init_done))
6752 		return;
6753 
6754 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
6755 		DP_PRINT_STATS("REO stats failure %d",
6756 			       queue_status->header.status);
6757 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
6758 		return;
6759 	}
6760 
6761 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
6762 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
6763 
6764 }
6765 
6766 /**
6767  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
6768  * @vdev: DP VDEV handle
6769  *
6770  * return: void
6771  */
6772 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
6773 			     struct cdp_vdev_stats *vdev_stats)
6774 {
6775 	struct dp_peer *peer = NULL;
6776 	struct dp_soc *soc = NULL;
6777 
6778 	if (!vdev || !vdev->pdev)
6779 		return;
6780 
6781 	soc = vdev->pdev->soc;
6782 
6783 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
6784 
6785 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
6786 		dp_update_vdev_stats(vdev_stats, peer);
6787 
6788 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6789 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
6790 			     vdev_stats, vdev->vdev_id,
6791 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
6792 #endif
6793 }
6794 
6795 void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
6796 {
6797 	struct dp_vdev *vdev = NULL;
6798 	struct dp_soc *soc;
6799 	struct cdp_vdev_stats *vdev_stats =
6800 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
6801 
6802 	if (!vdev_stats) {
6803 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6804 			  "DP alloc failure - unable to get alloc vdev stats");
6805 		return;
6806 	}
6807 
6808 	qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
6809 	qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
6810 	qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
6811 
6812 	if (pdev->mcopy_mode)
6813 		DP_UPDATE_STATS(pdev, pdev->invalid_peer);
6814 
6815 	soc = pdev->soc;
6816 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
6817 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
6818 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
6819 
6820 		dp_aggregate_vdev_stats(vdev, vdev_stats);
6821 		dp_update_pdev_stats(pdev, vdev_stats);
6822 		dp_update_pdev_ingress_stats(pdev, vdev);
6823 	}
6824 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6825 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6826 	qdf_mem_free(vdev_stats);
6827 
6828 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6829 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
6830 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
6831 #endif
6832 }
6833 
6834 /**
6835  * dp_vdev_getstats() - get vdev packet level stats
6836  * @vdev_handle: Datapath VDEV handle
6837  * @stats: cdp network device stats structure
6838  *
6839  * Return: QDF_STATUS
6840  */
6841 static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle,
6842 				   struct cdp_dev_stats *stats)
6843 {
6844 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6845 	struct dp_pdev *pdev;
6846 	struct dp_soc *soc;
6847 	struct cdp_vdev_stats *vdev_stats;
6848 
6849 	if (!vdev)
6850 		return QDF_STATUS_E_FAILURE;
6851 
6852 	pdev = vdev->pdev;
6853 	if (!pdev)
6854 		return QDF_STATUS_E_FAILURE;
6855 
6856 	soc = pdev->soc;
6857 
6858 	vdev_stats = qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
6859 
6860 	if (!vdev_stats) {
6861 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6862 			  "DP alloc failure - unable to get alloc vdev stats");
6863 		return QDF_STATUS_E_FAILURE;
6864 	}
6865 
6866 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
6867 	dp_aggregate_vdev_stats(vdev, vdev_stats);
6868 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6869 
6870 	stats->tx_packets = vdev_stats->tx_i.rcvd.num;
6871 	stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
6872 
6873 	stats->tx_errors = vdev_stats->tx.tx_failed +
6874 		vdev_stats->tx_i.dropped.dropped_pkt.num;
6875 	stats->tx_dropped = stats->tx_errors;
6876 
6877 	stats->rx_packets = vdev_stats->rx.unicast.num +
6878 		vdev_stats->rx.multicast.num +
6879 		vdev_stats->rx.bcast.num;
6880 	stats->rx_bytes = vdev_stats->rx.unicast.bytes +
6881 		vdev_stats->rx.multicast.bytes +
6882 		vdev_stats->rx.bcast.bytes;
6883 
6884 	qdf_mem_free(vdev_stats);
6885 
6886 	return QDF_STATUS_SUCCESS;
6887 }
6888 
6889 
6890 /**
6891  * dp_pdev_getstats() - get pdev packet level stats
6892  * @pdev_handle: Datapath PDEV handle
6893  * @stats: cdp network device stats structure
6894  *
6895  * Return: QDF_STATUS
6896  */
6897 static void dp_pdev_getstats(struct cdp_pdev *pdev_handle,
6898 			     struct cdp_dev_stats *stats)
6899 {
6900 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6901 
6902 	dp_aggregate_pdev_stats(pdev);
6903 
6904 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
6905 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
6906 
6907 	stats->tx_errors = pdev->stats.tx.tx_failed +
6908 		pdev->stats.tx_i.dropped.dropped_pkt.num;
6909 	stats->tx_dropped = stats->tx_errors;
6910 
6911 	stats->rx_packets = pdev->stats.rx.unicast.num +
6912 		pdev->stats.rx.multicast.num +
6913 		pdev->stats.rx.bcast.num;
6914 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
6915 		pdev->stats.rx.multicast.bytes +
6916 		pdev->stats.rx.bcast.bytes;
6917 	stats->rx_errors = pdev->stats.err.desc_alloc_fail +
6918 		pdev->stats.err.ip_csum_err +
6919 		pdev->stats.err.tcp_udp_csum_err +
6920 		pdev->stats.rx.err.mic_err +
6921 		pdev->stats.rx.err.decrypt_err +
6922 		pdev->stats.err.rxdma_error +
6923 		pdev->stats.err.reo_error;
6924 	stats->rx_dropped = pdev->stats.dropped.msdu_not_done +
6925 		pdev->stats.dropped.mec +
6926 		pdev->stats.dropped.mesh_filter +
6927 		pdev->stats.dropped.wifi_parse +
6928 		pdev->stats.dropped.mon_rx_drop +
6929 		pdev->stats.dropped.mon_radiotap_update_err;
6930 }
6931 
6932 /**
6933  * dp_get_device_stats() - get interface level packet stats
6934  * @soc: soc handle
6935  * @id : vdev_id or pdev_id based on type
6936  * @stats: cdp network device stats structure
6937  * @type: device type pdev/vdev
6938  *
6939  * Return: QDF_STATUS
6940  */
6941 static QDF_STATUS dp_get_device_stats(struct cdp_soc_t *soc, uint8_t id,
6942 				      struct cdp_dev_stats *stats,
6943 				      uint8_t type)
6944 {
6945 	switch (type) {
6946 	case UPDATE_VDEV_STATS:
6947 		return dp_vdev_getstats(
6948 			(struct cdp_vdev *)dp_get_vdev_from_soc_vdev_id_wifi3(
6949 			 (struct dp_soc *)soc, id), stats);
6950 	case UPDATE_PDEV_STATS:
6951 		{
6952 			struct dp_pdev *pdev =
6953 				dp_get_pdev_from_soc_pdev_id_wifi3(
6954 						(struct dp_soc *)soc,
6955 						 id);
6956 			if (pdev) {
6957 				dp_pdev_getstats((struct cdp_pdev *)pdev,
6958 						 stats);
6959 				return QDF_STATUS_SUCCESS;
6960 			}
6961 		}
6962 		break;
6963 	default:
6964 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6965 			"apstats cannot be updated for this input "
6966 			"type %d", type);
6967 		break;
6968 	}
6969 
6970 	return QDF_STATUS_E_FAILURE;
6971 }
6972 
6973 const
6974 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
6975 {
6976 	switch (ring_type) {
6977 	case REO_DST:
6978 		return "Reo_dst";
6979 	case REO_EXCEPTION:
6980 		return "Reo_exception";
6981 	case REO_CMD:
6982 		return "Reo_cmd";
6983 	case REO_REINJECT:
6984 		return "Reo_reinject";
6985 	case REO_STATUS:
6986 		return "Reo_status";
6987 	case WBM2SW_RELEASE:
6988 		return "wbm2sw_release";
6989 	case TCL_DATA:
6990 		return "tcl_data";
6991 	case TCL_CMD_CREDIT:
6992 		return "tcl_cmd_credit";
6993 	case TCL_STATUS:
6994 		return "tcl_status";
6995 	case SW2WBM_RELEASE:
6996 		return "sw2wbm_release";
6997 	case RXDMA_BUF:
6998 		return "Rxdma_buf";
6999 	case RXDMA_DST:
7000 		return "Rxdma_dst";
7001 	case RXDMA_MONITOR_BUF:
7002 		return "Rxdma_monitor_buf";
7003 	case RXDMA_MONITOR_DESC:
7004 		return "Rxdma_monitor_desc";
7005 	case RXDMA_MONITOR_STATUS:
7006 		return "Rxdma_monitor_status";
7007 	default:
7008 		dp_err("Invalid ring type");
7009 		break;
7010 	}
7011 	return "Invalid";
7012 }
7013 
7014 /*
7015  * dp_print_napi_stats(): NAPI stats
7016  * @soc - soc handle
7017  */
7018 void dp_print_napi_stats(struct dp_soc *soc)
7019 {
7020 	hif_print_napi_stats(soc->hif_handle);
7021 }
7022 
7023 /**
7024  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
7025  * @vdev: DP_VDEV handle
7026  *
7027  * Return: QDF_STATUS
7028  */
7029 static inline QDF_STATUS
7030 dp_txrx_host_stats_clr(struct dp_vdev *vdev)
7031 {
7032 	struct dp_peer *peer = NULL;
7033 
7034 	if (!vdev || !vdev->pdev)
7035 		return QDF_STATUS_E_FAILURE;
7036 
7037 	DP_STATS_CLR(vdev->pdev);
7038 	DP_STATS_CLR(vdev->pdev->soc);
7039 	DP_STATS_CLR(vdev);
7040 
7041 	hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
7042 
7043 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
7044 		if (!peer)
7045 			return QDF_STATUS_E_FAILURE;
7046 		DP_STATS_CLR(peer);
7047 
7048 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7049 		dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
7050 				     &peer->stats,  peer->peer_id,
7051 				     UPDATE_PEER_STATS, vdev->pdev->pdev_id);
7052 #endif
7053 	}
7054 
7055 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7056 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
7057 			     &vdev->stats,  vdev->vdev_id,
7058 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
7059 #endif
7060 	return QDF_STATUS_SUCCESS;
7061 }
7062 
7063 /*
7064  * dp_get_host_peer_stats()- function to print peer stats
7065  * @soc: dp_soc handle
7066  * @mac_addr: mac address of the peer
7067  *
7068  * Return: QDF_STATUS
7069  */
7070 static QDF_STATUS
7071 dp_get_host_peer_stats(struct cdp_soc_t *soc, uint8_t *mac_addr)
7072 {
7073 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7074 	struct dp_peer *peer = NULL;
7075 
7076 	if (!mac_addr) {
7077 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7078 			  "%s: NULL peer mac addr\n", __func__);
7079 		status = QDF_STATUS_E_FAILURE;
7080 		goto fail;
7081 	}
7082 
7083 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
7084 				      mac_addr, 0,
7085 				      DP_VDEV_ALL);
7086 	if (!peer || peer->delete_in_progress) {
7087 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7088 			  "%s: Invalid peer\n", __func__);
7089 		status = QDF_STATUS_E_FAILURE;
7090 		goto fail;
7091 	}
7092 
7093 	dp_print_peer_stats(peer);
7094 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
7095 fail:
7096 	if (peer)
7097 		dp_peer_unref_delete(peer);
7098 
7099 	return status;
7100 }
7101 
7102 /**
7103  * dp_txrx_stats_help() - Helper function for Txrx_Stats
7104  *
7105  * Return: None
7106  */
7107 static void dp_txrx_stats_help(void)
7108 {
7109 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
7110 	dp_info("stats_option:");
7111 	dp_info("  1 -- HTT Tx Statistics");
7112 	dp_info("  2 -- HTT Rx Statistics");
7113 	dp_info("  3 -- HTT Tx HW Queue Statistics");
7114 	dp_info("  4 -- HTT Tx HW Sched Statistics");
7115 	dp_info("  5 -- HTT Error Statistics");
7116 	dp_info("  6 -- HTT TQM Statistics");
7117 	dp_info("  7 -- HTT TQM CMDQ Statistics");
7118 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
7119 	dp_info("  9 -- HTT Tx Rate Statistics");
7120 	dp_info(" 10 -- HTT Rx Rate Statistics");
7121 	dp_info(" 11 -- HTT Peer Statistics");
7122 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
7123 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
7124 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
7125 	dp_info(" 15 -- HTT SRNG Statistics");
7126 	dp_info(" 16 -- HTT SFM Info Statistics");
7127 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
7128 	dp_info(" 18 -- HTT Peer List Details");
7129 	dp_info(" 20 -- Clear Host Statistics");
7130 	dp_info(" 21 -- Host Rx Rate Statistics");
7131 	dp_info(" 22 -- Host Tx Rate Statistics");
7132 	dp_info(" 23 -- Host Tx Statistics");
7133 	dp_info(" 24 -- Host Rx Statistics");
7134 	dp_info(" 25 -- Host AST Statistics");
7135 	dp_info(" 26 -- Host SRNG PTR Statistics");
7136 	dp_info(" 27 -- Host Mon Statistics");
7137 	dp_info(" 28 -- Host REO Queue Statistics");
7138 	dp_info(" 29 -- Host Soc cfg param Statistics");
7139 	dp_info(" 30 -- Host pdev cfg param Statistics");
7140 	dp_info(" 31 -- Host FISA stats");
7141 	dp_info(" 32 -- Host Register Work stats");
7142 }
7143 
7144 /**
7145  * dp_print_host_stats()- Function to print the stats aggregated at host
7146  * @vdev_handle: DP_VDEV handle
7147  * @type: host stats type
7148  *
7149  * Return: 0 on success, print error message in case of failure
7150  */
7151 static int
7152 dp_print_host_stats(struct dp_vdev *vdev,
7153 		    struct cdp_txrx_stats_req *req)
7154 {
7155 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
7156 	enum cdp_host_txrx_stats type =
7157 			dp_stats_mapping_table[req->stats][STATS_HOST];
7158 
7159 	dp_aggregate_pdev_stats(pdev);
7160 
7161 	switch (type) {
7162 	case TXRX_CLEAR_STATS:
7163 		dp_txrx_host_stats_clr(vdev);
7164 		break;
7165 	case TXRX_RX_RATE_STATS:
7166 		dp_print_rx_rates(vdev);
7167 		break;
7168 	case TXRX_TX_RATE_STATS:
7169 		dp_print_tx_rates(vdev);
7170 		break;
7171 	case TXRX_TX_HOST_STATS:
7172 		dp_print_pdev_tx_stats(pdev);
7173 		dp_print_soc_tx_stats(pdev->soc);
7174 		break;
7175 	case TXRX_RX_HOST_STATS:
7176 		dp_print_pdev_rx_stats(pdev);
7177 		dp_print_soc_rx_stats(pdev->soc);
7178 		break;
7179 	case TXRX_AST_STATS:
7180 		dp_print_ast_stats(pdev->soc);
7181 		dp_print_peer_table(vdev);
7182 		break;
7183 	case TXRX_SRNG_PTR_STATS:
7184 		dp_print_ring_stats(pdev);
7185 		break;
7186 	case TXRX_RX_MON_STATS:
7187 		dp_print_pdev_rx_mon_stats(pdev);
7188 		break;
7189 	case TXRX_REO_QUEUE_STATS:
7190 		dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc,
7191 				       req->peer_addr);
7192 		break;
7193 	case TXRX_SOC_CFG_PARAMS:
7194 		dp_print_soc_cfg_params(pdev->soc);
7195 		break;
7196 	case TXRX_PDEV_CFG_PARAMS:
7197 		dp_print_pdev_cfg_params(pdev);
7198 		break;
7199 	case TXRX_NAPI_STATS:
7200 		dp_print_napi_stats(pdev->soc);
7201 		break;
7202 	case TXRX_SOC_INTERRUPT_STATS:
7203 		dp_print_soc_interrupt_stats(pdev->soc);
7204 		break;
7205 	case TXRX_SOC_FSE_STATS:
7206 		dp_rx_dump_fisa_table(pdev->soc);
7207 		break;
7208 	case TXRX_HAL_REG_WRITE_STATS:
7209 		hal_dump_reg_write_stats(pdev->soc->hal_soc);
7210 		hal_dump_reg_write_srng_stats(pdev->soc->hal_soc);
7211 		break;
7212 	default:
7213 		dp_info("Wrong Input For TxRx Host Stats");
7214 		dp_txrx_stats_help();
7215 		break;
7216 	}
7217 	return 0;
7218 }
7219 
7220 /*
7221  * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
7222  *                              modes are enabled or not.
7223  * @dp_pdev: dp pdev handle.
7224  *
7225  * Return: bool
7226  */
7227 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
7228 {
7229 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
7230 	    !pdev->mcopy_mode)
7231 		return true;
7232 	else
7233 		return false;
7234 }
7235 
7236 /*
7237  *dp_set_bpr_enable() - API to enable/disable bpr feature
7238  *@pdev_handle: DP_PDEV handle.
7239  *@val: Provided value.
7240  *
7241  *Return: 0 for success. nonzero for failure.
7242  */
7243 static QDF_STATUS
7244 dp_set_bpr_enable(struct dp_pdev *pdev, int val)
7245 {
7246 	switch (val) {
7247 	case CDP_BPR_DISABLE:
7248 		pdev->bpr_enable = CDP_BPR_DISABLE;
7249 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7250 		    !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
7251 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7252 		} else if (pdev->enhanced_stats_en &&
7253 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
7254 			   !pdev->pktlog_ppdu_stats) {
7255 			dp_h2t_cfg_stats_msg_send(pdev,
7256 						  DP_PPDU_STATS_CFG_ENH_STATS,
7257 						  pdev->pdev_id);
7258 		}
7259 		break;
7260 	case CDP_BPR_ENABLE:
7261 		pdev->bpr_enable = CDP_BPR_ENABLE;
7262 		if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
7263 		    !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
7264 			dp_h2t_cfg_stats_msg_send(pdev,
7265 						  DP_PPDU_STATS_CFG_BPR,
7266 						  pdev->pdev_id);
7267 		} else if (pdev->enhanced_stats_en &&
7268 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
7269 			   !pdev->pktlog_ppdu_stats) {
7270 			dp_h2t_cfg_stats_msg_send(pdev,
7271 						  DP_PPDU_STATS_CFG_BPR_ENH,
7272 						  pdev->pdev_id);
7273 		} else if (pdev->pktlog_ppdu_stats) {
7274 			dp_h2t_cfg_stats_msg_send(pdev,
7275 						  DP_PPDU_STATS_CFG_BPR_PKTLOG,
7276 						  pdev->pdev_id);
7277 		}
7278 		break;
7279 	default:
7280 		break;
7281 	}
7282 
7283 	return QDF_STATUS_SUCCESS;
7284 }
7285 
7286 /*
7287  * dp_pdev_tid_stats_ingress_inc
7288  * @pdev: pdev handle
7289  * @val: increase in value
7290  *
7291  * Return: void
7292  */
7293 static void
7294 dp_pdev_tid_stats_ingress_inc(struct dp_pdev *pdev, uint32_t val)
7295 {
7296 	pdev->stats.tid_stats.ingress_stack += val;
7297 }
7298 
7299 /*
7300  * dp_pdev_tid_stats_osif_drop
7301  * @pdev: pdev handle
7302  * @val: increase in value
7303  *
7304  * Return: void
7305  */
7306 static void
7307 dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val)
7308 {
7309 	pdev->stats.tid_stats.osif_drop += val;
7310 }
7311 
7312 
7313 /*
7314  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
7315  * @pdev: DP_PDEV handle
7316  * @val: user provided value
7317  *
7318  * Return: 0 for success. nonzero for failure.
7319  */
7320 static QDF_STATUS
7321 dp_config_debug_sniffer(struct dp_pdev *pdev, int val)
7322 {
7323 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7324 
7325 	/*
7326 	 * Note: The mirror copy mode cannot co-exist with any other
7327 	 * monitor modes. Hence disabling the filter for this mode will
7328 	 * reset the monitor destination ring filters.
7329 	 */
7330 	if (pdev->mcopy_mode) {
7331 #ifdef FEATURE_PERPKT_INFO
7332 		dp_pdev_disable_mcopy_code(pdev);
7333 		dp_mon_filter_reset_mcopy_mode(pdev);
7334 		status = dp_mon_filter_update(pdev);
7335 		if (status != QDF_STATUS_SUCCESS) {
7336 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7337 				  FL("Failed to reset AM copy mode filters"));
7338 		}
7339 #endif /* FEATURE_PERPKT_INFO */
7340 	}
7341 	switch (val) {
7342 	case 0:
7343 		pdev->tx_sniffer_enable = 0;
7344 		pdev->monitor_configured = false;
7345 
7346 		/*
7347 		 * We don't need to reset the Rx monitor status ring  or call
7348 		 * the API dp_ppdu_ring_reset() if all debug sniffer mode is
7349 		 * disabled. The Rx monitor status ring will be disabled when
7350 		 * the last mode using the monitor status ring get disabled.
7351 		 */
7352 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7353 		    !pdev->bpr_enable) {
7354 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7355 		} else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
7356 			dp_h2t_cfg_stats_msg_send(pdev,
7357 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
7358 		} else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
7359 			dp_h2t_cfg_stats_msg_send(pdev,
7360 						  DP_PPDU_STATS_CFG_BPR_ENH,
7361 						  pdev->pdev_id);
7362 		} else {
7363 			dp_h2t_cfg_stats_msg_send(pdev,
7364 						  DP_PPDU_STATS_CFG_BPR,
7365 						  pdev->pdev_id);
7366 		}
7367 		break;
7368 
7369 	case 1:
7370 		pdev->tx_sniffer_enable = 1;
7371 		pdev->monitor_configured = false;
7372 
7373 		if (!pdev->pktlog_ppdu_stats)
7374 			dp_h2t_cfg_stats_msg_send(pdev,
7375 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
7376 		break;
7377 	case 2:
7378 	case 4:
7379 		if (pdev->monitor_vdev) {
7380 			status = QDF_STATUS_E_RESOURCES;
7381 			break;
7382 		}
7383 
7384 #ifdef FEATURE_PERPKT_INFO
7385 		pdev->mcopy_mode = val;
7386 		pdev->tx_sniffer_enable = 0;
7387 		pdev->monitor_configured = true;
7388 
7389 		/*
7390 		 * Setup the M copy mode filter.
7391 		 */
7392 		dp_mon_filter_setup_mcopy_mode(pdev);
7393 		status = dp_mon_filter_update(pdev);
7394 		if (status != QDF_STATUS_SUCCESS) {
7395 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7396 				  FL("Failed to set M_copy mode filters"));
7397 			dp_mon_filter_reset_mcopy_mode(pdev);
7398 			dp_pdev_disable_mcopy_code(pdev);
7399 			return status;
7400 		}
7401 
7402 		if (!pdev->pktlog_ppdu_stats)
7403 			dp_h2t_cfg_stats_msg_send(pdev,
7404 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
7405 #endif /* FEATURE_PERPKT_INFO */
7406 		break;
7407 
7408 	default:
7409 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7410 			"Invalid value");
7411 		break;
7412 	}
7413 	return status;
7414 }
7415 
7416 #ifdef FEATURE_PERPKT_INFO
7417 /*
7418  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
7419  * @soc_handle: DP_SOC handle
7420  * @pdev_id: id of DP_PDEV handle
7421  *
7422  * Return: QDF_STATUS
7423  */
7424 static QDF_STATUS
7425 dp_enable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
7426 {
7427 	struct dp_pdev *pdev = NULL;
7428 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7429 
7430 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7431 						  pdev_id);
7432 
7433 	if (!pdev)
7434 		return QDF_STATUS_E_FAILURE;
7435 
7436 	if (pdev->enhanced_stats_en == 0)
7437 		dp_cal_client_timer_start(pdev->cal_client_ctx);
7438 
7439 	pdev->enhanced_stats_en = 1;
7440 
7441 	dp_mon_filter_setup_enhanced_stats(pdev);
7442 	status = dp_mon_filter_update(pdev);
7443 	if (status != QDF_STATUS_SUCCESS) {
7444 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7445 			  FL("Failed to set enhanced mode filters"));
7446 		dp_mon_filter_reset_enhanced_stats(pdev);
7447 		dp_cal_client_timer_stop(pdev->cal_client_ctx);
7448 		pdev->enhanced_stats_en = 0;
7449 		return QDF_STATUS_E_FAILURE;
7450 	}
7451 
7452 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
7453 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
7454 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7455 		dp_h2t_cfg_stats_msg_send(pdev,
7456 					  DP_PPDU_STATS_CFG_BPR_ENH,
7457 					  pdev->pdev_id);
7458 	}
7459 
7460 	return QDF_STATUS_SUCCESS;
7461 }
7462 
7463 /*
7464  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
7465  *
7466  * @param soc - the soc handle
7467  * @param pdev_id - pdev_id of pdev
7468  * @return - QDF_STATUS
7469  */
7470 static QDF_STATUS
7471 dp_disable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
7472 {
7473 	struct dp_pdev *pdev =
7474 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7475 						   pdev_id);
7476 
7477 	if (!pdev)
7478 		return QDF_STATUS_E_FAILURE;
7479 
7480 	if (pdev->enhanced_stats_en == 1)
7481 		dp_cal_client_timer_stop(pdev->cal_client_ctx);
7482 
7483 	pdev->enhanced_stats_en = 0;
7484 
7485 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
7486 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7487 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7488 		dp_h2t_cfg_stats_msg_send(pdev,
7489 					  DP_PPDU_STATS_CFG_BPR,
7490 					  pdev->pdev_id);
7491 	}
7492 
7493 	dp_mon_filter_reset_enhanced_stats(pdev);
7494 	if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
7495 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7496 			  FL("Failed to reset enhanced mode filters"));
7497 	}
7498 
7499 	return QDF_STATUS_SUCCESS;
7500 }
7501 #endif /* FEATURE_PERPKT_INFO */
7502 
7503 /*
7504  * dp_get_fw_peer_stats()- function to print peer stats
7505  * @soc: soc handle
7506  * @pdev_id : id of the pdev handle
7507  * @mac_addr: mac address of the peer
7508  * @cap: Type of htt stats requested
7509  * @is_wait: if set, wait on completion from firmware response
7510  *
7511  * Currently Supporting only MAC ID based requests Only
7512  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
7513  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
7514  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
7515  *
7516  * Return: QDF_STATUS
7517  */
7518 static QDF_STATUS
7519 dp_get_fw_peer_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
7520 		     uint8_t *mac_addr,
7521 		     uint32_t cap, uint32_t is_wait)
7522 {
7523 	int i;
7524 	uint32_t config_param0 = 0;
7525 	uint32_t config_param1 = 0;
7526 	uint32_t config_param2 = 0;
7527 	uint32_t config_param3 = 0;
7528 	struct dp_pdev *pdev =
7529 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7530 						   pdev_id);
7531 
7532 	if (!pdev)
7533 		return QDF_STATUS_E_FAILURE;
7534 
7535 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
7536 	config_param0 |= (1 << (cap + 1));
7537 
7538 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
7539 		config_param1 |= (1 << i);
7540 	}
7541 
7542 	config_param2 |= (mac_addr[0] & 0x000000ff);
7543 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
7544 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
7545 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
7546 
7547 	config_param3 |= (mac_addr[4] & 0x000000ff);
7548 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
7549 
7550 	if (is_wait) {
7551 		qdf_event_reset(&pdev->fw_peer_stats_event);
7552 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
7553 					  config_param0, config_param1,
7554 					  config_param2, config_param3,
7555 					  0, 1, 0);
7556 		qdf_wait_single_event(&pdev->fw_peer_stats_event,
7557 				      DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
7558 	} else {
7559 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
7560 					  config_param0, config_param1,
7561 					  config_param2, config_param3,
7562 					  0, 0, 0);
7563 	}
7564 
7565 	return QDF_STATUS_SUCCESS;
7566 
7567 }
7568 
7569 /* This struct definition will be removed from here
7570  * once it get added in FW headers*/
7571 struct httstats_cmd_req {
7572     uint32_t    config_param0;
7573     uint32_t    config_param1;
7574     uint32_t    config_param2;
7575     uint32_t    config_param3;
7576     int cookie;
7577     u_int8_t    stats_id;
7578 };
7579 
7580 /*
7581  * dp_get_htt_stats: function to process the httstas request
7582  * @soc: DP soc handle
7583  * @pdev_id: id of pdev handle
7584  * @data: pointer to request data
7585  * @data_len: length for request data
7586  *
7587  * return: QDF_STATUS
7588  */
7589 static QDF_STATUS
7590 dp_get_htt_stats(struct cdp_soc_t *soc, uint8_t pdev_id, void *data,
7591 		 uint32_t data_len)
7592 {
7593 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
7594 	struct dp_pdev *pdev =
7595 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7596 						   pdev_id);
7597 
7598 	if (!pdev)
7599 		return QDF_STATUS_E_FAILURE;
7600 
7601 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
7602 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
7603 				req->config_param0, req->config_param1,
7604 				req->config_param2, req->config_param3,
7605 				req->cookie, 0, 0);
7606 
7607 	return QDF_STATUS_SUCCESS;
7608 }
7609 
7610 /**
7611  * dp_set_pdev_tidmap_prty_wifi3(): update tidmap priority in pdev
7612  * @pdev: DP_PDEV handle
7613  * @prio: tidmap priority value passed by the user
7614  *
7615  * Return: QDF_STATUS_SUCCESS on success
7616  */
7617 static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct dp_pdev *pdev,
7618 						uint8_t prio)
7619 {
7620 	struct dp_soc *soc = pdev->soc;
7621 
7622 	soc->tidmap_prty = prio;
7623 
7624 	hal_tx_set_tidmap_prty(soc->hal_soc, prio);
7625 	return QDF_STATUS_SUCCESS;
7626 }
7627 
7628 /*
7629  * dp_get_peer_param: function to get parameters in peer
7630  * @cdp_soc: DP soc handle
7631  * @vdev_id: id of vdev handle
7632  * @peer_mac: peer mac address
7633  * @param: parameter type to be set
7634  * @val : address of buffer
7635  *
7636  * Return: val
7637  */
7638 static QDF_STATUS dp_get_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
7639 				    uint8_t *peer_mac,
7640 				    enum cdp_peer_param_type param,
7641 				    cdp_config_param_type *val)
7642 {
7643 	return QDF_STATUS_SUCCESS;
7644 }
7645 
7646 /*
7647  * dp_set_peer_param: function to set parameters in peer
7648  * @cdp_soc: DP soc handle
7649  * @vdev_id: id of vdev handle
7650  * @peer_mac: peer mac address
7651  * @param: parameter type to be set
7652  * @val: value of parameter to be set
7653  *
7654  * Return: 0 for success. nonzero for failure.
7655  */
7656 static QDF_STATUS dp_set_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
7657 				    uint8_t *peer_mac,
7658 				    enum cdp_peer_param_type param,
7659 				    cdp_config_param_type val)
7660 {
7661 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
7662 						      peer_mac, 0, vdev_id);
7663 
7664 	if (!peer || peer->delete_in_progress)
7665 		goto fail;
7666 
7667 	switch (param) {
7668 	case CDP_CONFIG_NAWDS:
7669 		peer->nawds_enabled = val.cdp_peer_param_nawds;
7670 		break;
7671 	case CDP_CONFIG_NAC:
7672 		peer->nac = !!(val.cdp_peer_param_nac);
7673 		break;
7674 	case CDP_CONFIG_ISOLATION:
7675 		dp_set_peer_isolation(peer, val.cdp_peer_param_isolation);
7676 		break;
7677 	default:
7678 		break;
7679 	}
7680 
7681 fail:
7682 	if (peer)
7683 		dp_peer_unref_delete(peer);
7684 
7685 	return QDF_STATUS_SUCCESS;
7686 }
7687 
7688 /*
7689  * dp_get_pdev_param: function to get parameters from pdev
7690  * @cdp_soc: DP soc handle
7691  * @pdev_id: id of pdev handle
7692  * @param: parameter type to be get
7693  * @value : buffer for value
7694  *
7695  * Return: status
7696  */
7697 static QDF_STATUS dp_get_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
7698 				    enum cdp_pdev_param_type param,
7699 				    cdp_config_param_type *val)
7700 {
7701 	struct cdp_pdev *pdev = (struct cdp_pdev *)
7702 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
7703 						   pdev_id);
7704 	if (!pdev)
7705 		return QDF_STATUS_E_FAILURE;
7706 
7707 	switch (param) {
7708 	case CDP_CONFIG_VOW:
7709 		val->cdp_pdev_param_cfg_vow =
7710 				((struct dp_pdev *)pdev)->delay_stats_flag;
7711 		break;
7712 	case CDP_TX_PENDING:
7713 		val->cdp_pdev_param_tx_pending = dp_get_tx_pending(pdev);
7714 		break;
7715 	case CDP_FILTER_MCAST_DATA:
7716 		val->cdp_pdev_param_fltr_mcast =
7717 					dp_pdev_get_filter_mcast_data(pdev);
7718 		break;
7719 	case CDP_FILTER_NO_DATA:
7720 		val->cdp_pdev_param_fltr_none =
7721 					dp_pdev_get_filter_non_data(pdev);
7722 		break;
7723 	case CDP_FILTER_UCAST_DATA:
7724 		val->cdp_pdev_param_fltr_ucast =
7725 					dp_pdev_get_filter_ucast_data(pdev);
7726 		break;
7727 	default:
7728 		return QDF_STATUS_E_FAILURE;
7729 	}
7730 
7731 	return QDF_STATUS_SUCCESS;
7732 }
7733 
7734 /*
7735  * dp_set_pdev_param: function to set parameters in pdev
7736  * @cdp_soc: DP soc handle
7737  * @pdev_id: id of pdev handle
7738  * @param: parameter type to be set
7739  * @val: value of parameter to be set
7740  *
7741  * Return: 0 for success. nonzero for failure.
7742  */
7743 static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
7744 				    enum cdp_pdev_param_type param,
7745 				    cdp_config_param_type val)
7746 {
7747 	struct dp_pdev *pdev =
7748 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
7749 						   pdev_id);
7750 	if (!pdev)
7751 		return QDF_STATUS_E_FAILURE;
7752 
7753 	switch (param) {
7754 	case CDP_CONFIG_TX_CAPTURE:
7755 		return dp_config_debug_sniffer(pdev,
7756 					       val.cdp_pdev_param_tx_capture);
7757 	case CDP_CONFIG_DEBUG_SNIFFER:
7758 		return dp_config_debug_sniffer(pdev,
7759 					       val.cdp_pdev_param_dbg_snf);
7760 	case CDP_CONFIG_BPR_ENABLE:
7761 		return dp_set_bpr_enable(pdev, val.cdp_pdev_param_bpr_enable);
7762 	case CDP_CONFIG_PRIMARY_RADIO:
7763 		pdev->is_primary = val.cdp_pdev_param_primary_radio;
7764 		break;
7765 	case CDP_CONFIG_CAPTURE_LATENCY:
7766 		pdev->latency_capture_enable = val.cdp_pdev_param_cptr_latcy;
7767 		break;
7768 	case CDP_INGRESS_STATS:
7769 		dp_pdev_tid_stats_ingress_inc(pdev,
7770 					      val.cdp_pdev_param_ingrs_stats);
7771 		break;
7772 	case CDP_OSIF_DROP:
7773 		dp_pdev_tid_stats_osif_drop(pdev,
7774 					    val.cdp_pdev_param_osif_drop);
7775 		break;
7776 	case CDP_CONFIG_ENH_RX_CAPTURE:
7777 		return dp_config_enh_rx_capture(pdev,
7778 						val.cdp_pdev_param_en_rx_cap);
7779 	case CDP_CONFIG_ENH_TX_CAPTURE:
7780 		return dp_config_enh_tx_capture(pdev,
7781 						val.cdp_pdev_param_en_tx_cap);
7782 	case CDP_CONFIG_HMMC_TID_OVERRIDE:
7783 		pdev->hmmc_tid_override_en = val.cdp_pdev_param_hmmc_tid_ovrd;
7784 		break;
7785 	case CDP_CONFIG_HMMC_TID_VALUE:
7786 		pdev->hmmc_tid = val.cdp_pdev_param_hmmc_tid;
7787 		break;
7788 	case CDP_CHAN_NOISE_FLOOR:
7789 		pdev->chan_noise_floor = val.cdp_pdev_param_chn_noise_flr;
7790 		break;
7791 	case CDP_TIDMAP_PRTY:
7792 		dp_set_pdev_tidmap_prty_wifi3(pdev,
7793 					      val.cdp_pdev_param_tidmap_prty);
7794 		break;
7795 	case CDP_FILTER_NEIGH_PEERS:
7796 		dp_set_filter_neigh_peers(pdev,
7797 					  val.cdp_pdev_param_fltr_neigh_peers);
7798 		break;
7799 	case CDP_MONITOR_CHANNEL:
7800 		pdev->mon_chan_num = val.cdp_pdev_param_monitor_chan;
7801 		break;
7802 	case CDP_MONITOR_FREQUENCY:
7803 		pdev->mon_chan_freq = val.cdp_pdev_param_mon_freq;
7804 		break;
7805 	case CDP_CONFIG_BSS_COLOR:
7806 		dp_mon_set_bsscolor(pdev, val.cdp_pdev_param_bss_color);
7807 		break;
7808 	default:
7809 		return QDF_STATUS_E_INVAL;
7810 	}
7811 	return QDF_STATUS_SUCCESS;
7812 }
7813 
7814 /*
7815  * dp_calculate_delay_stats: function to get rx delay stats
7816  * @cdp_soc: DP soc handle
7817  * @vdev_id: id of DP vdev handle
7818  * @nbuf: skb
7819  *
7820  * Return: QDF_STATUS
7821  */
7822 static QDF_STATUS
7823 dp_calculate_delay_stats(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
7824 			 qdf_nbuf_t nbuf)
7825 {
7826 	struct dp_vdev *vdev =
7827 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)cdp_soc,
7828 						   vdev_id);
7829 	if (vdev) {
7830 		dp_rx_compute_delay(vdev, nbuf);
7831 		return QDF_STATUS_E_FAILURE;
7832 	}
7833 
7834 	return QDF_STATUS_SUCCESS;
7835 }
7836 
7837 /*
7838  * dp_get_vdev_param: function to get parameters from vdev
7839  * @cdp_soc : DP soc handle
7840  * @vdev_id: id of DP vdev handle
7841  * @param: parameter type to get value
7842  * @val: buffer address
7843  *
7844  * return: status
7845  */
7846 static QDF_STATUS dp_get_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
7847 				    enum cdp_vdev_param_type param,
7848 				    cdp_config_param_type *val)
7849 {
7850 	struct dp_vdev *vdev =
7851 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)cdp_soc,
7852 						   vdev_id);
7853 	if (!vdev)
7854 		return QDF_STATUS_E_FAILURE;
7855 
7856 	switch (param) {
7857 	case CDP_ENABLE_WDS:
7858 		val->cdp_vdev_param_wds = vdev->wds_enabled;
7859 		break;
7860 	case CDP_ENABLE_MEC:
7861 		val->cdp_vdev_param_mec = vdev->mec_enabled;
7862 		break;
7863 	case CDP_ENABLE_DA_WAR:
7864 		val->cdp_vdev_param_da_war = vdev->pdev->soc->da_war_enabled;
7865 		break;
7866 	default:
7867 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7868 			  "param value %d is wrong\n",
7869 			  param);
7870 		return QDF_STATUS_E_FAILURE;
7871 	}
7872 
7873 	return QDF_STATUS_SUCCESS;
7874 }
7875 
7876 /*
7877  * dp_set_vdev_param: function to set parameters in vdev
7878  * @cdp_soc : DP soc handle
7879  * @vdev_id: id of DP vdev handle
7880  * @param: parameter type to get value
7881  * @val: value
7882  *
7883  * return: QDF_STATUS
7884  */
7885 static QDF_STATUS
7886 dp_set_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
7887 		  enum cdp_vdev_param_type param, cdp_config_param_type val)
7888 {
7889 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
7890 	struct dp_vdev *vdev =
7891 		dp_get_vdev_from_soc_vdev_id_wifi3(dsoc, vdev_id);
7892 	uint32_t var = 0;
7893 
7894 	if (!vdev)
7895 		return QDF_STATUS_E_FAILURE;
7896 
7897 	switch (param) {
7898 	case CDP_ENABLE_WDS:
7899 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7900 			  "wds_enable %d for vdev(%pK) id(%d)\n",
7901 			  val.cdp_vdev_param_wds, vdev, vdev->vdev_id);
7902 		vdev->wds_enabled = val.cdp_vdev_param_wds;
7903 		break;
7904 	case CDP_ENABLE_MEC:
7905 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7906 			  "mec_enable %d for vdev(%pK) id(%d)\n",
7907 			  val.cdp_vdev_param_mec, vdev, vdev->vdev_id);
7908 		vdev->mec_enabled = val.cdp_vdev_param_mec;
7909 		break;
7910 	case CDP_ENABLE_DA_WAR:
7911 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7912 			  "da_war_enable %d for vdev(%pK) id(%d)\n",
7913 			  val.cdp_vdev_param_da_war, vdev, vdev->vdev_id);
7914 		vdev->pdev->soc->da_war_enabled = val.cdp_vdev_param_da_war;
7915 		dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
7916 					     vdev->pdev->soc));
7917 		break;
7918 	case CDP_ENABLE_NAWDS:
7919 		vdev->nawds_enabled = val.cdp_vdev_param_nawds;
7920 		break;
7921 	case CDP_ENABLE_MCAST_EN:
7922 		vdev->mcast_enhancement_en = val.cdp_vdev_param_mcast_en;
7923 		break;
7924 	case CDP_ENABLE_PROXYSTA:
7925 		vdev->proxysta_vdev = val.cdp_vdev_param_proxysta;
7926 		break;
7927 	case CDP_UPDATE_TDLS_FLAGS:
7928 		vdev->tdls_link_connected = val.cdp_vdev_param_tdls_flags;
7929 		break;
7930 	case CDP_CFG_WDS_AGING_TIMER:
7931 		var = val.cdp_vdev_param_aging_tmr;
7932 		if (!var)
7933 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
7934 		else if (var != vdev->wds_aging_timer_val)
7935 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, var);
7936 
7937 		vdev->wds_aging_timer_val = var;
7938 		break;
7939 	case CDP_ENABLE_AP_BRIDGE:
7940 		if (wlan_op_mode_sta != vdev->opmode)
7941 			vdev->ap_bridge_enabled = val.cdp_vdev_param_ap_brdg_en;
7942 		else
7943 			vdev->ap_bridge_enabled = false;
7944 		break;
7945 	case CDP_ENABLE_CIPHER:
7946 		vdev->sec_type = val.cdp_vdev_param_cipher_en;
7947 		break;
7948 	case CDP_ENABLE_QWRAP_ISOLATION:
7949 		vdev->isolation_vdev = val.cdp_vdev_param_qwrap_isolation;
7950 		break;
7951 	case CDP_UPDATE_MULTIPASS:
7952 		vdev->multipass_en = val.cdp_vdev_param_update_multipass;
7953 		break;
7954 	case CDP_TX_ENCAP_TYPE:
7955 		vdev->tx_encap_type = val.cdp_vdev_param_tx_encap;
7956 		break;
7957 	case CDP_RX_DECAP_TYPE:
7958 		vdev->rx_decap_type = val.cdp_vdev_param_rx_decap;
7959 		break;
7960 	case CDP_TID_VDEV_PRTY:
7961 		vdev->tidmap_prty = val.cdp_vdev_param_tidmap_prty;
7962 		break;
7963 	case CDP_TIDMAP_TBL_ID:
7964 		vdev->tidmap_tbl_id = val.cdp_vdev_param_tidmap_tbl_id;
7965 		break;
7966 #ifdef MESH_MODE_SUPPORT
7967 	case CDP_MESH_RX_FILTER:
7968 		dp_peer_set_mesh_rx_filter((struct cdp_vdev *)vdev,
7969 					   val.cdp_vdev_param_mesh_rx_filter);
7970 		break;
7971 	case CDP_MESH_MODE:
7972 		dp_peer_set_mesh_mode((struct cdp_vdev *)vdev,
7973 				      val.cdp_vdev_param_mesh_mode);
7974 		break;
7975 #endif
7976 	default:
7977 		break;
7978 	}
7979 
7980 	dp_tx_vdev_update_search_flags((struct dp_vdev *)vdev);
7981 
7982 	return QDF_STATUS_SUCCESS;
7983 }
7984 
7985 /*
7986  * dp_set_psoc_param: function to set parameters in psoc
7987  * @cdp_soc : DP soc handle
7988  * @param: parameter type to be set
7989  * @val: value of parameter to be set
7990  *
7991  * return: QDF_STATUS
7992  */
7993 static QDF_STATUS
7994 dp_set_psoc_param(struct cdp_soc_t *cdp_soc,
7995 		  enum cdp_psoc_param_type param, cdp_config_param_type val)
7996 {
7997 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
7998 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->wlan_cfg_ctx;
7999 
8000 	switch (param) {
8001 	case CDP_ENABLE_RATE_STATS:
8002 		soc->wlanstats_enabled = val.cdp_psoc_param_en_rate_stats;
8003 		break;
8004 	case CDP_SET_NSS_CFG:
8005 		wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx,
8006 					    val.cdp_psoc_param_en_nss_cfg);
8007 		/*
8008 		 * TODO: masked out based on the per offloaded radio
8009 		 */
8010 		switch (val.cdp_psoc_param_en_nss_cfg) {
8011 		case dp_nss_cfg_default:
8012 			break;
8013 		case dp_nss_cfg_first_radio:
8014 		/*
8015 		 * This configuration is valid for single band radio which
8016 		 * is also NSS offload.
8017 		 */
8018 		case dp_nss_cfg_dbdc:
8019 		case dp_nss_cfg_dbtc:
8020 			wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
8021 			wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
8022 			wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
8023 			wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
8024 			break;
8025 		default:
8026 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8027 				  "Invalid offload config %d",
8028 				  val.cdp_psoc_param_en_nss_cfg);
8029 		}
8030 
8031 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8032 			  FL("nss-wifi<0> nss config is enabled"));
8033 		break;
8034 	case CDP_SET_PREFERRED_HW_MODE:
8035 		soc->preferred_hw_mode = val.cdp_psoc_param_preferred_hw_mode;
8036 		break;
8037 	default:
8038 		break;
8039 	}
8040 
8041 	return QDF_STATUS_SUCCESS;
8042 }
8043 
8044 /*
8045  * dp_get_psoc_param: function to get parameters in soc
8046  * @cdp_soc : DP soc handle
8047  * @param: parameter type to be set
8048  * @val: address of buffer
8049  *
8050  * return: status
8051  */
8052 static QDF_STATUS dp_get_psoc_param(struct cdp_soc_t *cdp_soc,
8053 				    enum cdp_psoc_param_type param,
8054 				    cdp_config_param_type *val)
8055 {
8056 	return QDF_STATUS_SUCCESS;
8057 }
8058 
8059 /**
8060  * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer
8061  * @soc: DP_SOC handle
8062  * @pdev_id: id of DP_PDEV handle
8063  * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
8064  * @is_tx_pkt_cap_enable: enable/disable/delete/print
8065  * Tx packet capture in monitor mode
8066  * @peer_mac: MAC address for which the above need to be enabled/disabled
8067  *
8068  * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
8069  */
8070 QDF_STATUS
8071 dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
8072 				  uint8_t pdev_id,
8073 				  bool is_rx_pkt_cap_enable,
8074 				  uint8_t is_tx_pkt_cap_enable,
8075 				  uint8_t *peer_mac)
8076 {
8077 	QDF_STATUS status;
8078 	struct dp_peer *peer;
8079 	struct dp_pdev *pdev =
8080 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8081 						   pdev_id);
8082 	if (!pdev)
8083 		return QDF_STATUS_E_FAILURE;
8084 
8085 	peer = (struct dp_peer *)dp_find_peer_by_addr((struct cdp_pdev *)pdev,
8086 						      peer_mac);
8087 
8088 	/* we need to set tx pkt capture for non associated peer */
8089 	status = dp_peer_set_tx_capture_enabled(pdev, peer,
8090 						is_tx_pkt_cap_enable,
8091 						peer_mac);
8092 
8093 	status = dp_peer_set_rx_capture_enabled(pdev, peer,
8094 						is_rx_pkt_cap_enable,
8095 						peer_mac);
8096 
8097 	return status;
8098 }
8099 
8100 /*
8101  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
8102  * @soc: DP_SOC handle
8103  * @vdev_id: id of DP_VDEV handle
8104  * @map_id:ID of map that needs to be updated
8105  *
8106  * Return: QDF_STATUS
8107  */
8108 static QDF_STATUS dp_set_vdev_dscp_tid_map_wifi3(ol_txrx_soc_handle soc,
8109 						 uint8_t vdev_id,
8110 						 uint8_t map_id)
8111 {
8112 	struct dp_vdev *vdev =
8113 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
8114 						   vdev_id);
8115 	if (vdev) {
8116 		vdev->dscp_tid_map_id = map_id;
8117 		return QDF_STATUS_SUCCESS;
8118 	}
8119 
8120 	return QDF_STATUS_E_FAILURE;
8121 }
8122 
8123 #ifdef DP_RATETABLE_SUPPORT
8124 static int dp_txrx_get_ratekbps(int preamb, int mcs,
8125 				int htflag, int gintval)
8126 {
8127 	uint32_t rix;
8128 	uint16_t ratecode;
8129 
8130 	return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
8131 			       (uint8_t)preamb, 1, &rix, &ratecode);
8132 }
8133 #else
8134 static int dp_txrx_get_ratekbps(int preamb, int mcs,
8135 				int htflag, int gintval)
8136 {
8137 	return 0;
8138 }
8139 #endif
8140 
8141 /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
8142  * @soc: DP soc handle
8143  * @pdev_id: id of DP pdev handle
8144  * @pdev_stats: buffer to copy to
8145  *
8146  * return : status success/failure
8147  */
8148 static QDF_STATUS
8149 dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
8150 		       struct cdp_pdev_stats *pdev_stats)
8151 {
8152 	struct dp_pdev *pdev =
8153 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8154 						   pdev_id);
8155 	if (!pdev)
8156 		return QDF_STATUS_E_FAILURE;
8157 
8158 	dp_aggregate_pdev_stats(pdev);
8159 
8160 	qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats));
8161 	return QDF_STATUS_SUCCESS;
8162 }
8163 
8164 /* dp_txrx_update_vdev_me_stats(): Update vdev ME stats sent from CDP
8165  * @vdev: DP vdev handle
8166  * @buf: buffer containing specific stats structure
8167  *
8168  * Returns: void
8169  */
8170 static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev,
8171 					 void *buf)
8172 {
8173 	struct cdp_tx_ingress_stats *host_stats = NULL;
8174 
8175 	if (!buf) {
8176 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8177 			  "Invalid host stats buf");
8178 		return;
8179 	}
8180 	host_stats = (struct cdp_tx_ingress_stats *)buf;
8181 
8182 	DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt,
8183 			 host_stats->mcast_en.mcast_pkt.num,
8184 			 host_stats->mcast_en.mcast_pkt.bytes);
8185 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error,
8186 		     host_stats->mcast_en.dropped_map_error);
8187 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac,
8188 		     host_stats->mcast_en.dropped_self_mac);
8189 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail,
8190 		     host_stats->mcast_en.dropped_send_fail);
8191 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast,
8192 		     host_stats->mcast_en.ucast);
8193 	DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc,
8194 		     host_stats->mcast_en.fail_seg_alloc);
8195 	DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail,
8196 		     host_stats->mcast_en.clone_fail);
8197 }
8198 
8199 /* dp_txrx_update_vdev_host_stats(): Update stats sent through CDP
8200  * @soc: DP soc handle
8201  * @vdev_id: id of DP vdev handle
8202  * @buf: buffer containing specific stats structure
8203  * @stats_id: stats type
8204  *
8205  * Returns: QDF_STATUS
8206  */
8207 static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc,
8208 						 uint8_t vdev_id,
8209 						 void *buf,
8210 						 uint16_t stats_id)
8211 {
8212 	struct dp_vdev *vdev =
8213 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
8214 						   vdev_id);
8215 	if (!vdev) {
8216 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8217 			  "Invalid vdev handle");
8218 		return QDF_STATUS_E_FAILURE;
8219 	}
8220 
8221 	switch (stats_id) {
8222 	case DP_VDEV_STATS_PKT_CNT_ONLY:
8223 		break;
8224 	case DP_VDEV_STATS_TX_ME:
8225 		dp_txrx_update_vdev_me_stats(vdev, buf);
8226 		break;
8227 	default:
8228 		qdf_info("Invalid stats_id %d", stats_id);
8229 		break;
8230 	}
8231 
8232 	return QDF_STATUS_SUCCESS;
8233 }
8234 
8235 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
8236  * @soc: soc handle
8237  * @vdev_id: id of vdev handle
8238  * @peer_mac: mac of DP_PEER handle
8239  * @peer_stats: buffer to copy to
8240  * return : status success/failure
8241  */
8242 static QDF_STATUS
8243 dp_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
8244 		       uint8_t *peer_mac, struct cdp_peer_stats *peer_stats)
8245 {
8246 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8247 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
8248 						       peer_mac, 0, vdev_id);
8249 
8250 	if (!peer || peer->delete_in_progress) {
8251 		status = QDF_STATUS_E_FAILURE;
8252 	} else
8253 		qdf_mem_copy(peer_stats, &peer->stats,
8254 			     sizeof(struct cdp_peer_stats));
8255 
8256 	if (peer)
8257 		dp_peer_unref_delete(peer);
8258 
8259 	return status;
8260 }
8261 
8262 /* dp_txrx_get_peer_stats_param - will return specified cdp_peer_stats
8263  * @param soc - soc handle
8264  * @param vdev_id - vdev_id of vdev object
8265  * @param peer_mac - mac address of the peer
8266  * @param type - enum of required stats
8267  * @param buf - buffer to hold the value
8268  * return : status success/failure
8269  */
8270 static QDF_STATUS
8271 dp_txrx_get_peer_stats_param(struct cdp_soc_t *soc, uint8_t vdev_id,
8272 			     uint8_t *peer_mac, enum cdp_peer_stats_type type,
8273 			     cdp_peer_stats_param_t *buf)
8274 {
8275 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
8276 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
8277 						      peer_mac, 0, vdev_id);
8278 
8279 	if (!peer || peer->delete_in_progress) {
8280 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8281 			  "Invalid Peer for Mac %pM", peer_mac);
8282 		ret = QDF_STATUS_E_FAILURE;
8283 	} else if (type < cdp_peer_stats_max) {
8284 		switch (type) {
8285 		case cdp_peer_tx_ucast:
8286 			buf->tx_ucast = peer->stats.tx.ucast;
8287 			break;
8288 		case cdp_peer_tx_mcast:
8289 			buf->tx_mcast = peer->stats.tx.mcast;
8290 			break;
8291 		case cdp_peer_tx_rate:
8292 			buf->tx_rate = peer->stats.tx.tx_rate;
8293 			break;
8294 		case cdp_peer_tx_last_tx_rate:
8295 			buf->last_tx_rate = peer->stats.tx.last_tx_rate;
8296 			break;
8297 		case cdp_peer_tx_inactive_time:
8298 			buf->tx_inactive_time = peer->stats.tx.inactive_time;
8299 			break;
8300 		case cdp_peer_tx_ratecode:
8301 			buf->tx_ratecode = peer->stats.tx.tx_ratecode;
8302 			break;
8303 		case cdp_peer_tx_flags:
8304 			buf->tx_flags = peer->stats.tx.tx_flags;
8305 			break;
8306 		case cdp_peer_tx_power:
8307 			buf->tx_power = peer->stats.tx.tx_power;
8308 			break;
8309 		case cdp_peer_rx_rate:
8310 			buf->rx_rate = peer->stats.rx.rx_rate;
8311 			break;
8312 		case cdp_peer_rx_last_rx_rate:
8313 			buf->last_rx_rate = peer->stats.rx.last_rx_rate;
8314 			break;
8315 		case cdp_peer_rx_ratecode:
8316 			buf->rx_ratecode = peer->stats.rx.rx_ratecode;
8317 			break;
8318 		case cdp_peer_rx_ucast:
8319 			buf->rx_ucast = peer->stats.rx.unicast;
8320 			break;
8321 		case cdp_peer_rx_flags:
8322 			buf->rx_flags = peer->stats.rx.rx_flags;
8323 			break;
8324 		case cdp_peer_rx_avg_rssi:
8325 			buf->rx_avg_rssi = peer->stats.rx.avg_rssi;
8326 			break;
8327 		default:
8328 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8329 				  "Invalid value");
8330 			ret = QDF_STATUS_E_FAILURE;
8331 			break;
8332 		}
8333 	} else {
8334 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8335 			  "Invalid value");
8336 		ret = QDF_STATUS_E_FAILURE;
8337 	}
8338 
8339 	if (peer)
8340 		dp_peer_unref_delete(peer);
8341 
8342 	return ret;
8343 }
8344 
8345 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
8346  * @soc: soc handle
8347  * @vdev_id: id of vdev handle
8348  * @peer_mac: mac of DP_PEER handle
8349  *
8350  * return : QDF_STATUS
8351  */
8352 static QDF_STATUS
8353 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
8354 			 uint8_t *peer_mac)
8355 {
8356 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8357 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
8358 						       peer_mac, 0, vdev_id);
8359 
8360 	if (!peer || peer->delete_in_progress) {
8361 		status = QDF_STATUS_E_FAILURE;
8362 		goto fail;
8363 	}
8364 
8365 	qdf_mem_zero(&peer->stats, sizeof(peer->stats));
8366 
8367 fail:
8368 	if (peer)
8369 		dp_peer_unref_delete(peer);
8370 
8371 	return status;
8372 }
8373 
8374 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
8375  * @vdev_handle: DP_VDEV handle
8376  * @buf: buffer for vdev stats
8377  *
8378  * return : int
8379  */
8380 static int dp_txrx_get_vdev_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
8381 				   void *buf, bool is_aggregate)
8382 {
8383 	struct cdp_vdev_stats *vdev_stats;
8384 	struct dp_pdev *pdev;
8385 	struct dp_vdev *vdev =
8386 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
8387 						   vdev_id);
8388 
8389 	if (!vdev)
8390 		return 1;
8391 
8392 	pdev = vdev->pdev;
8393 	if (!pdev)
8394 		return 1;
8395 
8396 	vdev_stats = (struct cdp_vdev_stats *)buf;
8397 
8398 	if (is_aggregate) {
8399 		qdf_spin_lock_bh(&((struct dp_soc *)soc)->peer_ref_mutex);
8400 		dp_aggregate_vdev_stats(vdev, buf);
8401 		qdf_spin_unlock_bh(&((struct dp_soc *)soc)->peer_ref_mutex);
8402 	} else {
8403 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
8404 	}
8405 
8406 	return 0;
8407 }
8408 
8409 /*
8410  * dp_get_total_per(): get total per
8411  * @soc: DP soc handle
8412  * @pdev_id: id of DP_PDEV handle
8413  *
8414  * Return: % error rate using retries per packet and success packets
8415  */
8416 static int dp_get_total_per(struct cdp_soc_t *soc, uint8_t pdev_id)
8417 {
8418 	struct dp_pdev *pdev =
8419 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8420 						   pdev_id);
8421 
8422 	if (!pdev)
8423 		return 0;
8424 
8425 	dp_aggregate_pdev_stats(pdev);
8426 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
8427 		return 0;
8428 	return ((pdev->stats.tx.retries * 100) /
8429 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
8430 }
8431 
8432 /*
8433  * dp_txrx_stats_publish(): publish pdev stats into a buffer
8434  * @soc: DP soc handle
8435  * @pdev_id: id of DP_PDEV handle
8436  * @buf: to hold pdev_stats
8437  *
8438  * Return: int
8439  */
8440 static int
8441 dp_txrx_stats_publish(struct cdp_soc_t *soc, uint8_t pdev_id,
8442 		      struct cdp_stats_extd *buf)
8443 {
8444 	struct cdp_txrx_stats_req req = {0,};
8445 	struct dp_pdev *pdev =
8446 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8447 						   pdev_id);
8448 
8449 	if (!pdev)
8450 		return TXRX_STATS_LEVEL_OFF;
8451 
8452 	dp_aggregate_pdev_stats(pdev);
8453 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
8454 	req.cookie_val = 1;
8455 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
8456 				req.param1, req.param2, req.param3, 0,
8457 				req.cookie_val, 0);
8458 
8459 	msleep(DP_MAX_SLEEP_TIME);
8460 
8461 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
8462 	req.cookie_val = 1;
8463 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
8464 				req.param1, req.param2, req.param3, 0,
8465 				req.cookie_val, 0);
8466 
8467 	msleep(DP_MAX_SLEEP_TIME);
8468 	qdf_mem_copy(buf, &pdev->stats, sizeof(struct cdp_pdev_stats));
8469 
8470 	return TXRX_STATS_LEVEL;
8471 }
8472 
8473 /**
8474  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
8475  * @soc: soc handle
8476  * @pdev_id: id of DP_PDEV handle
8477  * @map_id: ID of map that needs to be updated
8478  * @tos: index value in map
8479  * @tid: tid value passed by the user
8480  *
8481  * Return: QDF_STATUS
8482  */
8483 static QDF_STATUS
8484 dp_set_pdev_dscp_tid_map_wifi3(struct cdp_soc_t *soc_handle,
8485 			       uint8_t pdev_id,
8486 			       uint8_t map_id,
8487 			       uint8_t tos, uint8_t tid)
8488 {
8489 	uint8_t dscp;
8490 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8491 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
8492 
8493 	if (!pdev)
8494 		return QDF_STATUS_E_FAILURE;
8495 
8496 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
8497 	pdev->dscp_tid_map[map_id][dscp] = tid;
8498 
8499 	if (map_id < soc->num_hw_dscp_tid_map)
8500 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
8501 				       map_id, dscp);
8502 	else
8503 		return QDF_STATUS_E_FAILURE;
8504 
8505 	return QDF_STATUS_SUCCESS;
8506 }
8507 
8508 /**
8509  * dp_fw_stats_process(): Process TxRX FW stats request
8510  * @vdev_handle: DP VDEV handle
8511  * @req: stats request
8512  *
8513  * return: int
8514  */
8515 static int dp_fw_stats_process(struct dp_vdev *vdev,
8516 			       struct cdp_txrx_stats_req *req)
8517 {
8518 	struct dp_pdev *pdev = NULL;
8519 	uint32_t stats = req->stats;
8520 	uint8_t mac_id = req->mac_id;
8521 
8522 	if (!vdev) {
8523 		DP_TRACE(NONE, "VDEV not found");
8524 		return 1;
8525 	}
8526 	pdev = vdev->pdev;
8527 
8528 	/*
8529 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
8530 	 * from param0 to param3 according to below rule:
8531 	 *
8532 	 * PARAM:
8533 	 *   - config_param0 : start_offset (stats type)
8534 	 *   - config_param1 : stats bmask from start offset
8535 	 *   - config_param2 : stats bmask from start offset + 32
8536 	 *   - config_param3 : stats bmask from start offset + 64
8537 	 */
8538 	if (req->stats == CDP_TXRX_STATS_0) {
8539 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
8540 		req->param1 = 0xFFFFFFFF;
8541 		req->param2 = 0xFFFFFFFF;
8542 		req->param3 = 0xFFFFFFFF;
8543 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
8544 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
8545 	}
8546 
8547 	if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT) {
8548 		return dp_h2t_ext_stats_msg_send(pdev,
8549 				HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT,
8550 				req->param0, req->param1, req->param2,
8551 				req->param3, 0, 0, mac_id);
8552 	} else {
8553 		return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
8554 				req->param1, req->param2, req->param3,
8555 				0, 0, mac_id);
8556 	}
8557 }
8558 
8559 /**
8560  * dp_txrx_stats_request - function to map to firmware and host stats
8561  * @soc: soc handle
8562  * @vdev_id: virtual device ID
8563  * @req: stats request
8564  *
8565  * Return: QDF_STATUS
8566  */
8567 static
8568 QDF_STATUS dp_txrx_stats_request(struct cdp_soc_t *soc_handle,
8569 				 uint8_t vdev_id,
8570 				 struct cdp_txrx_stats_req *req)
8571 {
8572 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_handle);
8573 	int host_stats;
8574 	int fw_stats;
8575 	enum cdp_stats stats;
8576 	int num_stats;
8577 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc,
8578 								  vdev_id);
8579 
8580 	if (!vdev || !req) {
8581 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8582 				"Invalid vdev/req instance");
8583 		return QDF_STATUS_E_INVAL;
8584 	}
8585 
8586 	if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) {
8587 		dp_err("Invalid mac id request");
8588 		return QDF_STATUS_E_INVAL;
8589 	}
8590 
8591 	stats = req->stats;
8592 	if (stats >= CDP_TXRX_MAX_STATS)
8593 		return QDF_STATUS_E_INVAL;
8594 
8595 	/*
8596 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
8597 	 *			has to be updated if new FW HTT stats added
8598 	 */
8599 	if (stats > CDP_TXRX_STATS_HTT_MAX)
8600 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
8601 
8602 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
8603 
8604 	if (stats >= num_stats) {
8605 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8606 			  "%s: Invalid stats option: %d", __func__, stats);
8607 		return QDF_STATUS_E_INVAL;
8608 	}
8609 
8610 	req->stats = stats;
8611 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
8612 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
8613 
8614 	dp_info("stats: %u fw_stats_type: %d host_stats: %d",
8615 		stats, fw_stats, host_stats);
8616 
8617 	if (fw_stats != TXRX_FW_STATS_INVALID) {
8618 		/* update request with FW stats type */
8619 		req->stats = fw_stats;
8620 		return dp_fw_stats_process(vdev, req);
8621 	}
8622 
8623 	if ((host_stats != TXRX_HOST_STATS_INVALID) &&
8624 			(host_stats <= TXRX_HOST_STATS_MAX))
8625 		return dp_print_host_stats(vdev, req);
8626 	else
8627 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8628 				"Wrong Input for TxRx Stats");
8629 
8630 	return QDF_STATUS_SUCCESS;
8631 }
8632 
8633 /*
8634  * dp_txrx_dump_stats() -  Dump statistics
8635  * @value - Statistics option
8636  */
8637 static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value,
8638 				     enum qdf_stats_verbosity_level level)
8639 {
8640 	struct dp_soc *soc =
8641 		(struct dp_soc *)psoc;
8642 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8643 
8644 	if (!soc) {
8645 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8646 			"%s: soc is NULL", __func__);
8647 		return QDF_STATUS_E_INVAL;
8648 	}
8649 
8650 	switch (value) {
8651 	case CDP_TXRX_PATH_STATS:
8652 		dp_txrx_path_stats(soc);
8653 		dp_print_soc_interrupt_stats(soc);
8654 		hal_dump_reg_write_stats(soc->hal_soc);
8655 		break;
8656 
8657 	case CDP_RX_RING_STATS:
8658 		dp_print_per_ring_stats(soc);
8659 		break;
8660 
8661 	case CDP_TXRX_TSO_STATS:
8662 		dp_print_tso_stats(soc, level);
8663 		break;
8664 
8665 	case CDP_DUMP_TX_FLOW_POOL_INFO:
8666 		if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH)
8667 			cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
8668 		break;
8669 
8670 	case CDP_DP_NAPI_STATS:
8671 		dp_print_napi_stats(soc);
8672 		break;
8673 
8674 	case CDP_TXRX_DESC_STATS:
8675 		/* TODO: NOT IMPLEMENTED */
8676 		break;
8677 
8678 	case CDP_DP_RX_FISA_STATS:
8679 		dp_rx_dump_fisa_stats(soc);
8680 		break;
8681 
8682 	default:
8683 		status = QDF_STATUS_E_INVAL;
8684 		break;
8685 	}
8686 
8687 	return status;
8688 
8689 }
8690 
8691 /**
8692  * dp_txrx_clear_dump_stats() - clear dumpStats
8693  * @soc- soc handle
8694  * @value - stats option
8695  *
8696  * Return: 0 - Success, non-zero - failure
8697  */
8698 static
8699 QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
8700 				    uint8_t value)
8701 {
8702 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8703 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8704 
8705 	if (!soc) {
8706 		dp_err("%s: soc is NULL", __func__);
8707 		return QDF_STATUS_E_INVAL;
8708 	}
8709 
8710 	switch (value) {
8711 	case CDP_TXRX_TSO_STATS:
8712 		dp_txrx_clear_tso_stats(soc);
8713 		break;
8714 
8715 	default:
8716 		status = QDF_STATUS_E_INVAL;
8717 		break;
8718 	}
8719 
8720 	return status;
8721 }
8722 
8723 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
8724 /**
8725  * dp_update_flow_control_parameters() - API to store datapath
8726  *                            config parameters
8727  * @soc: soc handle
8728  * @cfg: ini parameter handle
8729  *
8730  * Return: void
8731  */
8732 static inline
8733 void dp_update_flow_control_parameters(struct dp_soc *soc,
8734 				struct cdp_config_params *params)
8735 {
8736 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
8737 					params->tx_flow_stop_queue_threshold;
8738 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
8739 					params->tx_flow_start_queue_offset;
8740 }
8741 #else
8742 static inline
8743 void dp_update_flow_control_parameters(struct dp_soc *soc,
8744 				struct cdp_config_params *params)
8745 {
8746 }
8747 #endif
8748 
8749 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
8750 /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */
8751 #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024
8752 
8753 /* Max packet limit for RX REAP Loop (dp_rx_process) */
8754 #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024
8755 
8756 static
8757 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
8758 					struct cdp_config_params *params)
8759 {
8760 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit =
8761 				params->tx_comp_loop_pkt_limit;
8762 
8763 	if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX)
8764 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true;
8765 	else
8766 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false;
8767 
8768 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit =
8769 				params->rx_reap_loop_pkt_limit;
8770 
8771 	if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX)
8772 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = true;
8773 	else
8774 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = false;
8775 
8776 	soc->wlan_cfg_ctx->rx_hp_oos_update_limit =
8777 				params->rx_hp_oos_update_limit;
8778 
8779 	dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u",
8780 		soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit,
8781 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check,
8782 		soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit,
8783 		soc->wlan_cfg_ctx->rx_enable_eol_data_check,
8784 		soc->wlan_cfg_ctx->rx_hp_oos_update_limit);
8785 }
8786 #else
8787 static inline
8788 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
8789 					struct cdp_config_params *params)
8790 { }
8791 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
8792 
8793 /**
8794  * dp_update_config_parameters() - API to store datapath
8795  *                            config parameters
8796  * @soc: soc handle
8797  * @cfg: ini parameter handle
8798  *
8799  * Return: status
8800  */
8801 static
8802 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
8803 				struct cdp_config_params *params)
8804 {
8805 	struct dp_soc *soc = (struct dp_soc *)psoc;
8806 
8807 	if (!(soc)) {
8808 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8809 				"%s: Invalid handle", __func__);
8810 		return QDF_STATUS_E_INVAL;
8811 	}
8812 
8813 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
8814 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
8815 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
8816 	soc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload =
8817 				params->nan_tcp_udp_checksumoffload;
8818 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
8819 				params->tcp_udp_checksumoffload;
8820 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
8821 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
8822 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
8823 
8824 	dp_update_rx_soft_irq_limit_params(soc, params);
8825 	dp_update_flow_control_parameters(soc, params);
8826 
8827 	return QDF_STATUS_SUCCESS;
8828 }
8829 
8830 static struct cdp_wds_ops dp_ops_wds = {
8831 	.vdev_set_wds = dp_vdev_set_wds,
8832 #ifdef WDS_VENDOR_EXTENSION
8833 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
8834 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
8835 #endif
8836 };
8837 
8838 /*
8839  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
8840  * @soc_hdl - datapath soc handle
8841  * @vdev_id - virtual interface id
8842  * @callback - callback function
8843  * @ctxt: callback context
8844  *
8845  */
8846 static void
8847 dp_txrx_data_tx_cb_set(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
8848 		       ol_txrx_data_tx_cb callback, void *ctxt)
8849 {
8850 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8851 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
8852 
8853 	if (!vdev)
8854 		return;
8855 
8856 	vdev->tx_non_std_data_callback.func = callback;
8857 	vdev->tx_non_std_data_callback.ctxt = ctxt;
8858 }
8859 
8860 /**
8861  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
8862  * @soc: datapath soc handle
8863  * @pdev_id: id of datapath pdev handle
8864  *
8865  * Return: opaque pointer to dp txrx handle
8866  */
8867 static void *dp_pdev_get_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id)
8868 {
8869 	struct dp_pdev *pdev =
8870 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8871 						   pdev_id);
8872 	if (qdf_unlikely(!pdev))
8873 		return NULL;
8874 
8875 	return pdev->dp_txrx_handle;
8876 }
8877 
8878 /**
8879  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
8880  * @soc: datapath soc handle
8881  * @pdev_id: id of datapath pdev handle
8882  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
8883  *
8884  * Return: void
8885  */
8886 static void
8887 dp_pdev_set_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id,
8888 			   void *dp_txrx_hdl)
8889 {
8890 	struct dp_pdev *pdev =
8891 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8892 						   pdev_id);
8893 
8894 	if (!pdev)
8895 		return;
8896 
8897 	pdev->dp_txrx_handle = dp_txrx_hdl;
8898 }
8899 
8900 /**
8901  * dp_vdev_get_dp_ext_handle() - get dp handle from vdev
8902  * @soc: datapath soc handle
8903  * @vdev_id: vdev id
8904  *
8905  * Return: opaque pointer to dp txrx handle
8906  */
8907 static void *dp_vdev_get_dp_ext_handle(ol_txrx_soc_handle soc, uint8_t vdev_id)
8908 {
8909 	struct dp_vdev *vdev =
8910 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
8911 						   vdev_id);
8912 
8913 	if (!vdev)
8914 		return NULL;
8915 
8916 	return vdev->vdev_dp_ext_handle;
8917 }
8918 
8919 /**
8920  * dp_vdev_set_dp_ext_handle() - set dp handle in vdev
8921  * @soc: datapath soc handle
8922  * @vdev_id: vdev id
8923  * @size: size of advance dp handle
8924  *
8925  * Return: QDF_STATUS
8926  */
8927 static QDF_STATUS
8928 dp_vdev_set_dp_ext_handle(ol_txrx_soc_handle soc, uint8_t vdev_id,
8929 			  uint16_t size)
8930 {
8931 	struct dp_vdev *vdev =
8932 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
8933 						   vdev_id);
8934 	void *dp_ext_handle;
8935 
8936 	if (!vdev)
8937 		return QDF_STATUS_E_FAILURE;
8938 
8939 	dp_ext_handle = qdf_mem_malloc(size);
8940 
8941 	if (!dp_ext_handle)
8942 		return QDF_STATUS_E_FAILURE;
8943 
8944 	vdev->vdev_dp_ext_handle = dp_ext_handle;
8945 	return QDF_STATUS_SUCCESS;
8946 }
8947 
8948 /**
8949  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
8950  * @soc_handle: datapath soc handle
8951  *
8952  * Return: opaque pointer to external dp (non-core DP)
8953  */
8954 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
8955 {
8956 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8957 
8958 	return soc->external_txrx_handle;
8959 }
8960 
8961 /**
8962  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
8963  * @soc_handle: datapath soc handle
8964  * @txrx_handle: opaque pointer to external dp (non-core DP)
8965  *
8966  * Return: void
8967  */
8968 static void
8969 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
8970 {
8971 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8972 
8973 	soc->external_txrx_handle = txrx_handle;
8974 }
8975 
8976 /**
8977  * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
8978  * @soc_hdl: datapath soc handle
8979  * @pdev_id: id of the datapath pdev handle
8980  * @lmac_id: lmac id
8981  *
8982  * Return: QDF_STATUS
8983  */
8984 static QDF_STATUS
8985 dp_soc_map_pdev_to_lmac
8986 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
8987 	 uint32_t lmac_id)
8988 {
8989 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8990 
8991 	wlan_cfg_set_hw_mac_idx(soc->wlan_cfg_ctx,
8992 				pdev_id,
8993 				lmac_id);
8994 
8995 	/*Set host PDEV ID for lmac_id*/
8996 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
8997 			      pdev_id,
8998 			      lmac_id);
8999 
9000 	return QDF_STATUS_SUCCESS;
9001 }
9002 
9003 /**
9004  * dp_soc_handle_pdev_mode_change() - Update pdev to lmac mapping
9005  * @soc_hdl: datapath soc handle
9006  * @pdev_id: id of the datapath pdev handle
9007  * @lmac_id: lmac id
9008  *
9009  * In the event of a dynamic mode change, update the pdev to lmac mapping
9010  *
9011  * Return: QDF_STATUS
9012  */
9013 static QDF_STATUS
9014 dp_soc_handle_pdev_mode_change
9015 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
9016 	 uint32_t lmac_id)
9017 {
9018 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9019 	struct dp_vdev *vdev = NULL;
9020 	uint8_t hw_pdev_id, mac_id;
9021 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc,
9022 								  pdev_id);
9023 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
9024 
9025 	if (qdf_unlikely(!pdev))
9026 		return QDF_STATUS_E_FAILURE;
9027 
9028 	pdev->lmac_id = lmac_id;
9029 	dp_info(" mode change %d %d\n", pdev->pdev_id, pdev->lmac_id);
9030 
9031 	/*Set host PDEV ID for lmac_id*/
9032 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
9033 			      pdev->pdev_id,
9034 			      lmac_id);
9035 
9036 	hw_pdev_id =
9037 		dp_get_target_pdev_id_for_host_pdev_id(soc,
9038 						       pdev->pdev_id);
9039 
9040 	/*
9041 	 * When NSS offload is enabled, send pdev_id->lmac_id
9042 	 * and pdev_id to hw_pdev_id to NSS FW
9043 	 */
9044 	if (nss_config) {
9045 		mac_id = pdev->lmac_id;
9046 		if (soc->cdp_soc.ol_ops->pdev_update_lmac_n_target_pdev_id)
9047 			soc->cdp_soc.ol_ops->
9048 				pdev_update_lmac_n_target_pdev_id(
9049 				soc->ctrl_psoc,
9050 				&pdev_id, &mac_id, &hw_pdev_id);
9051 	}
9052 
9053 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
9054 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
9055 		HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
9056 						hw_pdev_id);
9057 		vdev->lmac_id = pdev->lmac_id;
9058 	}
9059 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
9060 
9061 	return QDF_STATUS_SUCCESS;
9062 }
9063 
9064 /**
9065  * dp_soc_set_pdev_status_down() - set pdev down/up status
9066  * @soc: datapath soc handle
9067  * @pdev_id: id of datapath pdev handle
9068  * @is_pdev_down: pdev down/up status
9069  *
9070  * Return: QDF_STATUS
9071  */
9072 static QDF_STATUS
9073 dp_soc_set_pdev_status_down(struct cdp_soc_t *soc, uint8_t pdev_id,
9074 			    bool is_pdev_down)
9075 {
9076 	struct dp_pdev *pdev =
9077 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9078 						   pdev_id);
9079 	if (!pdev)
9080 		return QDF_STATUS_E_FAILURE;
9081 
9082 	pdev->is_pdev_down = is_pdev_down;
9083 	return QDF_STATUS_SUCCESS;
9084 }
9085 
9086 /**
9087  * dp_get_cfg_capabilities() - get dp capabilities
9088  * @soc_handle: datapath soc handle
9089  * @dp_caps: enum for dp capabilities
9090  *
9091  * Return: bool to determine if dp caps is enabled
9092  */
9093 static bool
9094 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
9095 			enum cdp_capabilities dp_caps)
9096 {
9097 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9098 
9099 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
9100 }
9101 
9102 #ifdef FEATURE_AST
9103 static QDF_STATUS
9104 dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
9105 		       uint8_t *peer_mac)
9106 {
9107 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9108 	QDF_STATUS status = QDF_STATUS_SUCCESS;
9109 	struct dp_peer *peer =
9110 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id);
9111 
9112 	/* Peer can be null for monitor vap mac address */
9113 	if (!peer) {
9114 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
9115 			  "%s: Invalid peer\n", __func__);
9116 		return QDF_STATUS_E_FAILURE;
9117 	}
9118 	/*
9119 	 * For BSS peer, new peer is not created on alloc_node if the
9120 	 * peer with same address already exists , instead refcnt is
9121 	 * increased for existing peer. Correspondingly in delete path,
9122 	 * only refcnt is decreased; and peer is only deleted , when all
9123 	 * references are deleted. So delete_in_progress should not be set
9124 	 * for bss_peer, unless only 3 reference remains (peer map reference,
9125 	 * peer hash table reference and above local reference).
9126 	 */
9127 	if ((peer->vdev->opmode == wlan_op_mode_ap) && peer->bss_peer &&
9128 	    (qdf_atomic_read(&peer->ref_cnt) > 3)) {
9129 		status =  QDF_STATUS_E_FAILURE;
9130 		goto fail;
9131 	}
9132 
9133 	qdf_spin_lock_bh(&soc->ast_lock);
9134 	peer->delete_in_progress = true;
9135 	dp_peer_delete_ast_entries(soc, peer);
9136 	qdf_spin_unlock_bh(&soc->ast_lock);
9137 
9138 fail:
9139 	if (peer)
9140 		dp_peer_unref_delete(peer);
9141 	return status;
9142 }
9143 #endif
9144 
9145 #ifdef ATH_SUPPORT_NAC_RSSI
9146 /**
9147  * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
9148  * @soc_hdl: DP soc handle
9149  * @vdev_id: id of DP vdev handle
9150  * @mac_addr: neighbour mac
9151  * @rssi: rssi value
9152  *
9153  * Return: 0 for success. nonzero for failure.
9154  */
9155 static QDF_STATUS  dp_vdev_get_neighbour_rssi(struct cdp_soc_t *soc,
9156 					      uint8_t vdev_id,
9157 					      char *mac_addr,
9158 					      uint8_t *rssi)
9159 {
9160 	struct dp_vdev *vdev =
9161 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
9162 						   vdev_id);
9163 	struct dp_pdev *pdev;
9164 	struct dp_neighbour_peer *peer = NULL;
9165 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
9166 
9167 	if (!vdev)
9168 		return status;
9169 
9170 	pdev = vdev->pdev;
9171 	*rssi = 0;
9172 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
9173 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
9174 		      neighbour_peer_list_elem) {
9175 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
9176 				mac_addr, QDF_MAC_ADDR_SIZE) == 0) {
9177 			*rssi = peer->rssi;
9178 			status = QDF_STATUS_SUCCESS;
9179 			break;
9180 		}
9181 	}
9182 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
9183 	return status;
9184 }
9185 
9186 static QDF_STATUS
9187 dp_config_for_nac_rssi(struct cdp_soc_t *cdp_soc,
9188 		       uint8_t vdev_id,
9189 		       enum cdp_nac_param_cmd cmd, char *bssid,
9190 		       char *client_macaddr,
9191 		       uint8_t chan_num)
9192 {
9193 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
9194 	struct dp_vdev *vdev =
9195 		dp_get_vdev_from_soc_vdev_id_wifi3(soc,
9196 						   vdev_id);
9197 	struct dp_pdev *pdev;
9198 
9199 	if (!vdev)
9200 		return QDF_STATUS_E_FAILURE;
9201 
9202 	pdev = (struct dp_pdev *)vdev->pdev;
9203 	pdev->nac_rssi_filtering = 1;
9204 	/* Store address of NAC (neighbour peer) which will be checked
9205 	 * against TA of received packets.
9206 	 */
9207 
9208 	if (cmd == CDP_NAC_PARAM_ADD) {
9209 		dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id,
9210 						 DP_NAC_PARAM_ADD,
9211 						 (uint8_t *)client_macaddr);
9212 	} else if (cmd == CDP_NAC_PARAM_DEL) {
9213 		dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id,
9214 						 DP_NAC_PARAM_DEL,
9215 						 (uint8_t *)client_macaddr);
9216 	}
9217 
9218 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
9219 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
9220 			(soc->ctrl_psoc, pdev->pdev_id,
9221 			 vdev->vdev_id, cmd, bssid, client_macaddr);
9222 
9223 	return QDF_STATUS_SUCCESS;
9224 }
9225 #endif
9226 
9227 /**
9228  * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
9229  * for pktlog
9230  * @soc: cdp_soc handle
9231  * @pdev_id: id of dp pdev handle
9232  * @mac_addr: Peer mac address
9233  * @enb_dsb: Enable or disable peer based filtering
9234  *
9235  * Return: QDF_STATUS
9236  */
9237 static int
9238 dp_enable_peer_based_pktlog(struct cdp_soc_t *soc, uint8_t pdev_id,
9239 			    uint8_t *mac_addr, uint8_t enb_dsb)
9240 {
9241 	struct dp_peer *peer;
9242 	struct dp_pdev *pdev =
9243 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9244 						   pdev_id);
9245 
9246 	if (!pdev) {
9247 		dp_err("Invalid Pdev for pdev_id %d", pdev_id);
9248 		return QDF_STATUS_E_FAILURE;
9249 	}
9250 
9251 	peer = (struct dp_peer *)dp_find_peer_by_addr((struct cdp_pdev *)pdev,
9252 						      mac_addr);
9253 
9254 	if (!peer) {
9255 		dp_err("Invalid Peer");
9256 		return QDF_STATUS_E_FAILURE;
9257 	}
9258 
9259 	peer->peer_based_pktlog_filter = enb_dsb;
9260 	pdev->dp_peer_based_pktlog = enb_dsb;
9261 
9262 	return QDF_STATUS_SUCCESS;
9263 }
9264 
9265 #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS
9266 /**
9267  * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
9268  * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
9269  * @soc: cdp_soc handle
9270  * @pdev_id: id of cdp_pdev handle
9271  * @protocol_type: protocol type for which stats should be displayed
9272  *
9273  * Return: none
9274  */
9275 static inline void
9276 dp_dump_pdev_rx_protocol_tag_stats(struct cdp_soc_t  *soc, uint8_t pdev_id,
9277 				   uint16_t protocol_type)
9278 {
9279 }
9280 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
9281 
9282 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
9283 /**
9284  * dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be
9285  * applied to the desired protocol type packets
9286  * @soc: soc handle
9287  * @pdev_id: id of cdp_pdev handle
9288  * @enable_rx_protocol_tag - bitmask that indicates what protocol types
9289  * are enabled for tagging. zero indicates disable feature, non-zero indicates
9290  * enable feature
9291  * @protocol_type: new protocol type for which the tag is being added
9292  * @tag: user configured tag for the new protocol
9293  *
9294  * Return: Success
9295  */
9296 static inline QDF_STATUS
9297 dp_update_pdev_rx_protocol_tag(struct cdp_soc_t  *soc, uint8_t pdev_id,
9298 			       uint32_t enable_rx_protocol_tag,
9299 			       uint16_t protocol_type,
9300 			       uint16_t tag)
9301 {
9302 	return QDF_STATUS_SUCCESS;
9303 }
9304 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
9305 
9306 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
9307 /**
9308  * dp_set_rx_flow_tag - add/delete a flow
9309  * @soc: soc handle
9310  * @pdev_id: id of cdp_pdev handle
9311  * @flow_info: flow tuple that is to be added to/deleted from flow search table
9312  *
9313  * Return: Success
9314  */
9315 static inline QDF_STATUS
9316 dp_set_rx_flow_tag(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
9317 		   struct cdp_rx_flow_info *flow_info)
9318 {
9319 	return QDF_STATUS_SUCCESS;
9320 }
9321 /**
9322  * dp_dump_rx_flow_tag_stats - dump the number of packets tagged for
9323  * given flow 5-tuple
9324  * @cdp_soc: soc handle
9325  * @pdev_id: id of cdp_pdev handle
9326  * @flow_info: flow 5-tuple for which stats should be displayed
9327  *
9328  * Return: Success
9329  */
9330 static inline QDF_STATUS
9331 dp_dump_rx_flow_tag_stats(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
9332 			  struct cdp_rx_flow_info *flow_info)
9333 {
9334 	return QDF_STATUS_SUCCESS;
9335 }
9336 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
9337 
9338 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
9339 					   uint32_t max_peers,
9340 					   uint32_t max_ast_index,
9341 					   bool peer_map_unmap_v2)
9342 {
9343 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9344 
9345 	soc->max_peers = max_peers;
9346 
9347 	qdf_print ("%s max_peers %u, max_ast_index: %u\n",
9348 		   __func__, max_peers, max_ast_index);
9349 	wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
9350 
9351 	if (dp_peer_find_attach(soc))
9352 		return QDF_STATUS_E_FAILURE;
9353 
9354 	soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
9355 	soc->peer_map_attach_success = TRUE;
9356 
9357 	return QDF_STATUS_SUCCESS;
9358 }
9359 
9360 static QDF_STATUS dp_soc_set_param(struct cdp_soc_t  *soc_hdl,
9361 				   enum cdp_soc_param_t param,
9362 				   uint32_t value)
9363 {
9364 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9365 
9366 	switch (param) {
9367 	case DP_SOC_PARAM_MSDU_EXCEPTION_DESC:
9368 		soc->num_msdu_exception_desc = value;
9369 		dp_info("num_msdu exception_desc %u",
9370 			value);
9371 		break;
9372 	default:
9373 		dp_info("not handled param %d ", param);
9374 		break;
9375 	}
9376 
9377 	return QDF_STATUS_SUCCESS;
9378 }
9379 
9380 static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
9381 				      void *stats_ctx)
9382 {
9383 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9384 
9385 	soc->rate_stats_ctx = (struct cdp_soc_rate_stats_ctx *)stats_ctx;
9386 }
9387 
9388 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9389 static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
9390 					  uint8_t pdev_id)
9391 {
9392 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9393 	struct dp_vdev *vdev = NULL;
9394 	struct dp_peer *peer = NULL;
9395 	struct dp_pdev *pdev =
9396 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9397 						   pdev_id);
9398 
9399 	if (!pdev)
9400 		return QDF_STATUS_E_FAILURE;
9401 
9402 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
9403 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
9404 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
9405 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
9406 			if (peer && !peer->bss_peer)
9407 				dp_wdi_event_handler(
9408 					WDI_EVENT_FLUSH_RATE_STATS_REQ,
9409 					soc, peer->wlanstats_ctx,
9410 					peer->peer_id,
9411 					WDI_NO_VAL, pdev_id);
9412 		}
9413 	}
9414 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
9415 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
9416 
9417 	return QDF_STATUS_SUCCESS;
9418 }
9419 #else
9420 static inline QDF_STATUS
9421 dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
9422 			uint8_t pdev_id)
9423 {
9424 	return QDF_STATUS_SUCCESS;
9425 }
9426 #endif
9427 
9428 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9429 static QDF_STATUS dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
9430 					   uint8_t pdev_id,
9431 					   void *buf)
9432 {
9433 	 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS,
9434 			      (struct dp_soc *)soc, buf, HTT_INVALID_PEER,
9435 			      WDI_NO_VAL, pdev_id);
9436 	return QDF_STATUS_SUCCESS;
9437 }
9438 #else
9439 static inline QDF_STATUS
9440 dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
9441 			 uint8_t pdev_id,
9442 			 void *buf)
9443 {
9444 	return QDF_STATUS_SUCCESS;
9445 }
9446 #endif
9447 
9448 static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle)
9449 {
9450 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9451 
9452 	return soc->rate_stats_ctx;
9453 }
9454 
9455 /*
9456  * dp_get_cfg() - get dp cfg
9457  * @soc: cdp soc handle
9458  * @cfg: cfg enum
9459  *
9460  * Return: cfg value
9461  */
9462 static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg)
9463 {
9464 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
9465 	uint32_t value = 0;
9466 
9467 	switch (cfg) {
9468 	case cfg_dp_enable_data_stall:
9469 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
9470 		break;
9471 	case cfg_dp_enable_nan_ip_tcp_udp_checksum_offload:
9472 		value = dpsoc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload;
9473 		break;
9474 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
9475 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
9476 		break;
9477 	case cfg_dp_tso_enable:
9478 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
9479 		break;
9480 	case cfg_dp_lro_enable:
9481 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
9482 		break;
9483 	case cfg_dp_gro_enable:
9484 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
9485 		break;
9486 	case cfg_dp_tx_flow_start_queue_offset:
9487 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
9488 		break;
9489 	case cfg_dp_tx_flow_stop_queue_threshold:
9490 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
9491 		break;
9492 	case cfg_dp_disable_intra_bss_fwd:
9493 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
9494 		break;
9495 	case cfg_dp_pktlog_buffer_size:
9496 		value = dpsoc->wlan_cfg_ctx->pktlog_buffer_size;
9497 		break;
9498 	default:
9499 		value =  0;
9500 	}
9501 
9502 	return value;
9503 }
9504 
9505 #ifdef PEER_FLOW_CONTROL
9506 /**
9507  * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params
9508  * @soc_handle: datapath soc handle
9509  * @pdev_id: id of datapath pdev handle
9510  * @param: ol ath params
9511  * @value: value of the flag
9512  * @buff: Buffer to be passed
9513  *
9514  * Implemented this function same as legacy function. In legacy code, single
9515  * function is used to display stats and update pdev params.
9516  *
9517  * Return: 0 for success. nonzero for failure.
9518  */
9519 static uint32_t dp_tx_flow_ctrl_configure_pdev(struct cdp_soc_t *soc_handle,
9520 					       uint8_t pdev_id,
9521 					       enum _dp_param_t param,
9522 					       uint32_t value, void *buff)
9523 {
9524 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9525 	struct dp_pdev *pdev =
9526 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9527 						   pdev_id);
9528 
9529 	if (qdf_unlikely(!pdev))
9530 		return 1;
9531 
9532 	soc = pdev->soc;
9533 	if (!soc)
9534 		return 1;
9535 
9536 	switch (param) {
9537 #ifdef QCA_ENH_V3_STATS_SUPPORT
9538 	case DP_PARAM_VIDEO_DELAY_STATS_FC:
9539 		if (value)
9540 			pdev->delay_stats_flag = true;
9541 		else
9542 			pdev->delay_stats_flag = false;
9543 		break;
9544 	case DP_PARAM_VIDEO_STATS_FC:
9545 		qdf_print("------- TID Stats ------\n");
9546 		dp_pdev_print_tid_stats(pdev);
9547 		qdf_print("------ Delay Stats ------\n");
9548 		dp_pdev_print_delay_stats(pdev);
9549 		break;
9550 #endif
9551 	case DP_PARAM_TOTAL_Q_SIZE:
9552 		{
9553 			uint32_t tx_min, tx_max;
9554 
9555 			tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx);
9556 			tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
9557 
9558 			if (!buff) {
9559 				if ((value >= tx_min) && (value <= tx_max)) {
9560 					pdev->num_tx_allowed = value;
9561 				} else {
9562 					QDF_TRACE(QDF_MODULE_ID_DP,
9563 						  QDF_TRACE_LEVEL_INFO,
9564 						  "Failed to update num_tx_allowed, Q_min = %d Q_max = %d",
9565 						  tx_min, tx_max);
9566 					break;
9567 				}
9568 			} else {
9569 				*(int *)buff = pdev->num_tx_allowed;
9570 			}
9571 		}
9572 		break;
9573 	default:
9574 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
9575 			  "%s: not handled param %d ", __func__, param);
9576 		break;
9577 	}
9578 
9579 	return 0;
9580 }
9581 #endif
9582 
9583 /**
9584  * dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev
9585  * @psoc: dp soc handle
9586  * @pdev_id: id of DP_PDEV handle
9587  * @pcp: pcp value
9588  * @tid: tid value passed by the user
9589  *
9590  * Return: QDF_STATUS_SUCCESS on success
9591  */
9592 static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
9593 						uint8_t pdev_id,
9594 						uint8_t pcp, uint8_t tid)
9595 {
9596 	struct dp_soc *soc = (struct dp_soc *)psoc;
9597 
9598 	soc->pcp_tid_map[pcp] = tid;
9599 
9600 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
9601 	return QDF_STATUS_SUCCESS;
9602 }
9603 
9604 /**
9605  * dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev
9606  * @soc: DP soc handle
9607  * @vdev_id: id of DP_VDEV handle
9608  * @pcp: pcp value
9609  * @tid: tid value passed by the user
9610  *
9611  * Return: QDF_STATUS_SUCCESS on success
9612  */
9613 static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc,
9614 						uint8_t vdev_id,
9615 						uint8_t pcp, uint8_t tid)
9616 {
9617 	struct dp_vdev *vdev =
9618 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
9619 						   vdev_id);
9620 
9621 	if (!vdev)
9622 		return QDF_STATUS_E_FAILURE;
9623 
9624 	vdev->pcp_tid_map[pcp] = tid;
9625 
9626 	return QDF_STATUS_SUCCESS;
9627 }
9628 
9629 #ifdef QCA_SUPPORT_FULL_MON
9630 static inline QDF_STATUS
9631 dp_config_full_mon_mode(struct cdp_soc_t *soc_handle,
9632 			uint8_t val)
9633 {
9634 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9635 
9636 	soc->full_mon_mode = val;
9637 	qdf_alert("Configure full monitor mode val: %d ", val);
9638 
9639 	return QDF_STATUS_SUCCESS;
9640 }
9641 #else
9642 static inline QDF_STATUS
9643 dp_config_full_mon_mode(struct cdp_soc_t *soc_handle,
9644 			uint8_t val)
9645 {
9646 	return 0;
9647 }
9648 #endif
9649 
9650 static struct cdp_cmn_ops dp_ops_cmn = {
9651 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
9652 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
9653 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
9654 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
9655 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
9656 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
9657 	.txrx_peer_create = dp_peer_create_wifi3,
9658 	.txrx_peer_setup = dp_peer_setup_wifi3,
9659 #ifdef FEATURE_AST
9660 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
9661 #else
9662 	.txrx_peer_teardown = NULL,
9663 #endif
9664 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
9665 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
9666 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
9667 	.txrx_peer_get_ast_info_by_pdev =
9668 		dp_peer_get_ast_info_by_pdevid_wifi3,
9669 	.txrx_peer_ast_delete_by_soc =
9670 		dp_peer_ast_entry_del_by_soc,
9671 	.txrx_peer_ast_delete_by_pdev =
9672 		dp_peer_ast_entry_del_by_pdev,
9673 	.txrx_peer_delete = dp_peer_delete_wifi3,
9674 	.txrx_vdev_register = dp_vdev_register_wifi3,
9675 	.txrx_soc_detach = dp_soc_detach_wifi3,
9676 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
9677 	.txrx_soc_init = dp_soc_init_wifi3,
9678 	.txrx_tso_soc_attach = dp_tso_soc_attach,
9679 	.txrx_tso_soc_detach = dp_tso_soc_detach,
9680 	.txrx_pdev_init = dp_pdev_init_wifi3,
9681 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
9682 	.txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3,
9683 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
9684 	.txrx_ath_getstats = dp_get_device_stats,
9685 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
9686 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
9687 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
9688 	.delba_process = dp_delba_process_wifi3,
9689 	.set_addba_response = dp_set_addba_response,
9690 	.flush_cache_rx_queue = NULL,
9691 	/* TODO: get API's for dscp-tid need to be added*/
9692 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
9693 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
9694 	.txrx_get_total_per = dp_get_total_per,
9695 	.txrx_stats_request = dp_txrx_stats_request,
9696 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
9697 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
9698 	.display_stats = dp_txrx_dump_stats,
9699 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
9700 	.txrx_intr_detach = dp_soc_interrupt_detach,
9701 	.set_pn_check = dp_set_pn_check_wifi3,
9702 	.set_key_sec_type = dp_set_key_sec_type_wifi3,
9703 	.update_config_parameters = dp_update_config_parameters,
9704 	/* TODO: Add other functions */
9705 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
9706 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
9707 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
9708 	.get_vdev_dp_ext_txrx_handle = dp_vdev_get_dp_ext_handle,
9709 	.set_vdev_dp_ext_txrx_handle = dp_vdev_set_dp_ext_handle,
9710 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
9711 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
9712 	.map_pdev_to_lmac = dp_soc_map_pdev_to_lmac,
9713 	.handle_mode_change = dp_soc_handle_pdev_mode_change,
9714 	.set_pdev_status_down = dp_soc_set_pdev_status_down,
9715 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
9716 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
9717 	.tx_send = dp_tx_send,
9718 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
9719 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
9720 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
9721 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
9722 	.set_soc_param = dp_soc_set_param,
9723 	.txrx_get_os_rx_handles_from_vdev =
9724 					dp_get_os_rx_handles_from_vdev_wifi3,
9725 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
9726 	.get_dp_capabilities = dp_get_cfg_capabilities,
9727 	.txrx_get_cfg = dp_get_cfg,
9728 	.set_rate_stats_ctx = dp_soc_set_rate_stats_ctx,
9729 	.get_rate_stats_ctx = dp_soc_get_rate_stats_ctx,
9730 	.txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats,
9731 	.txrx_flush_rate_stats_request = dp_flush_rate_stats_req,
9732 
9733 	.set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3,
9734 	.set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3,
9735 
9736 	.txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler,
9737 #ifdef QCA_MULTIPASS_SUPPORT
9738 	.set_vlan_groupkey = dp_set_vlan_groupkey,
9739 #endif
9740 	.get_peer_mac_list = dp_get_peer_mac_list,
9741 	.tx_send_exc = dp_tx_send_exception,
9742 };
9743 
9744 static struct cdp_ctrl_ops dp_ops_ctrl = {
9745 	.txrx_peer_authorize = dp_peer_authorize,
9746 #ifdef VDEV_PEER_PROTOCOL_COUNT
9747 	.txrx_enable_peer_protocol_count = dp_enable_vdev_peer_protocol_count,
9748 	.txrx_set_peer_protocol_drop_mask =
9749 		dp_enable_vdev_peer_protocol_drop_mask,
9750 	.txrx_is_peer_protocol_count_enabled =
9751 		dp_is_vdev_peer_protocol_count_enabled,
9752 	.txrx_get_peer_protocol_drop_mask = dp_get_vdev_peer_protocol_drop_mask,
9753 #endif
9754 	.txrx_set_vdev_param = dp_set_vdev_param,
9755 	.txrx_set_psoc_param = dp_set_psoc_param,
9756 	.txrx_get_psoc_param = dp_get_psoc_param,
9757 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
9758 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
9759 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
9760 	.txrx_update_filter_neighbour_peers =
9761 		dp_update_filter_neighbour_peers,
9762 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
9763 	.txrx_get_sec_type = dp_get_sec_type,
9764 	.txrx_wdi_event_sub = dp_wdi_event_sub,
9765 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
9766 #ifdef WDI_EVENT_ENABLE
9767 	.txrx_get_pldev = dp_get_pldev,
9768 #endif
9769 	.txrx_set_pdev_param = dp_set_pdev_param,
9770 	.txrx_get_pdev_param = dp_get_pdev_param,
9771 	.txrx_set_peer_param = dp_set_peer_param,
9772 	.txrx_get_peer_param = dp_get_peer_param,
9773 #ifdef VDEV_PEER_PROTOCOL_COUNT
9774 	.txrx_peer_protocol_cnt = dp_peer_stats_update_protocol_cnt,
9775 #endif
9776 #ifdef ATH_SUPPORT_NAC_RSSI
9777 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
9778 	.txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
9779 #endif
9780 	.set_key = dp_set_michael_key,
9781 	.txrx_get_vdev_param = dp_get_vdev_param,
9782 	.enable_peer_based_pktlog = dp_enable_peer_based_pktlog,
9783 	.calculate_delay_stats = dp_calculate_delay_stats,
9784 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
9785 	.txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag,
9786 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
9787 	.txrx_dump_pdev_rx_protocol_tag_stats =
9788 				dp_dump_pdev_rx_protocol_tag_stats,
9789 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
9790 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
9791 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
9792 	.txrx_set_rx_flow_tag = dp_set_rx_flow_tag,
9793 	.txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats,
9794 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
9795 #ifdef QCA_MULTIPASS_SUPPORT
9796 	.txrx_peer_set_vlan_id = dp_peer_set_vlan_id,
9797 #endif /*QCA_MULTIPASS_SUPPORT*/
9798 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
9799 	.txrx_update_peer_pkt_capture_params =
9800 		 dp_peer_update_pkt_capture_params,
9801 #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
9802 };
9803 
9804 static struct cdp_me_ops dp_ops_me = {
9805 #ifdef ATH_SUPPORT_IQUE
9806 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
9807 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
9808 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
9809 #endif
9810 };
9811 
9812 static struct cdp_mon_ops dp_ops_mon = {
9813 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
9814 	/* Added support for HK advance filter */
9815 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
9816 	.txrx_deliver_tx_mgmt = dp_deliver_tx_mgmt,
9817 	.config_full_mon_mode = dp_config_full_mon_mode,
9818 };
9819 
9820 static struct cdp_host_stats_ops dp_ops_host_stats = {
9821 	.txrx_per_peer_stats = dp_get_host_peer_stats,
9822 	.get_fw_peer_stats = dp_get_fw_peer_stats,
9823 	.get_htt_stats = dp_get_htt_stats,
9824 #ifdef FEATURE_PERPKT_INFO
9825 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
9826 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
9827 #endif /* FEATURE_PERPKT_INFO */
9828 	.txrx_stats_publish = dp_txrx_stats_publish,
9829 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
9830 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
9831 	.txrx_get_peer_stats_param = dp_txrx_get_peer_stats_param,
9832 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
9833 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
9834 	.txrx_get_ratekbps = dp_txrx_get_ratekbps,
9835 	.txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats,
9836 	/* TODO */
9837 };
9838 
9839 static struct cdp_raw_ops dp_ops_raw = {
9840 	/* TODO */
9841 };
9842 
9843 #ifdef PEER_FLOW_CONTROL
9844 static struct cdp_pflow_ops dp_ops_pflow = {
9845 	dp_tx_flow_ctrl_configure_pdev,
9846 };
9847 #endif /* CONFIG_WIN */
9848 
9849 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
9850 static struct cdp_cfr_ops dp_ops_cfr = {
9851 	.txrx_cfr_filter = dp_cfr_filter,
9852 	.txrx_get_cfr_rcc = dp_get_cfr_rcc,
9853 	.txrx_set_cfr_rcc = dp_set_cfr_rcc,
9854 	.txrx_get_cfr_dbg_stats = dp_get_cfr_dbg_stats,
9855 	.txrx_clear_cfr_dbg_stats = dp_clear_cfr_dbg_stats,
9856 	.txrx_enable_mon_reap_timer = dp_enable_mon_reap_timer,
9857 };
9858 #endif
9859 
9860 #ifdef FEATURE_RUNTIME_PM
9861 /**
9862  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
9863  * @soc_hdl: Datapath soc handle
9864  * @pdev_id: id of data path pdev handle
9865  *
9866  * DP is ready to runtime suspend if there are no pending TX packets.
9867  *
9868  * Return: QDF_STATUS
9869  */
9870 static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
9871 {
9872 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9873 	struct dp_pdev *pdev;
9874 
9875 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
9876 	if (!pdev) {
9877 		dp_err("pdev is NULL");
9878 		return QDF_STATUS_E_INVAL;
9879 	}
9880 
9881 	/* Abort if there are any pending TX packets */
9882 	if (dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev)) > 0) {
9883 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
9884 			  FL("Abort suspend due to pending TX packets"));
9885 		return QDF_STATUS_E_AGAIN;
9886 	}
9887 
9888 	if (soc->intr_mode == DP_INTR_POLL)
9889 		qdf_timer_stop(&soc->int_timer);
9890 
9891 	return QDF_STATUS_SUCCESS;
9892 }
9893 
9894 /**
9895  * dp_flush_ring_hptp() - Update ring shadow
9896  *			  register HP/TP address when runtime
9897  *                        resume
9898  * @opaque_soc: DP soc context
9899  *
9900  * Return: None
9901  */
9902 static
9903 void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
9904 {
9905 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
9906 						 HAL_SRNG_FLUSH_EVENT)) {
9907 		/* Acquire the lock */
9908 		hal_srng_access_start(soc->hal_soc, hal_srng);
9909 
9910 		hal_srng_access_end(soc->hal_soc, hal_srng);
9911 
9912 		hal_srng_set_flush_last_ts(hal_srng);
9913 	}
9914 }
9915 
9916 /**
9917  * dp_runtime_resume() - ensure DP is ready to runtime resume
9918  * @soc_hdl: Datapath soc handle
9919  * @pdev_id: id of data path pdev handle
9920  *
9921  * Resume DP for runtime PM.
9922  *
9923  * Return: QDF_STATUS
9924  */
9925 static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
9926 {
9927 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9928 	int i;
9929 
9930 	if (soc->intr_mode == DP_INTR_POLL)
9931 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
9932 
9933 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
9934 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
9935 	}
9936 
9937 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
9938 
9939 	return QDF_STATUS_SUCCESS;
9940 }
9941 #endif /* FEATURE_RUNTIME_PM */
9942 
9943 /**
9944  * dp_tx_get_success_ack_stats() - get tx success completion count
9945  * @soc_hdl: Datapath soc handle
9946  * @vdevid: vdev identifier
9947  *
9948  * Return: tx success ack count
9949  */
9950 static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl,
9951 					    uint8_t vdev_id)
9952 {
9953 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9954 	struct cdp_vdev_stats *vdev_stats = NULL;
9955 	uint32_t tx_success;
9956 	struct dp_vdev *vdev =
9957 		(struct dp_vdev *)dp_get_vdev_from_soc_vdev_id_wifi3(soc,
9958 								     vdev_id);
9959 
9960 	if (!vdev) {
9961 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
9962 			  FL("Invalid vdev id %d"), vdev_id);
9963 		return 0;
9964 	}
9965 
9966 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
9967 	if (!vdev_stats) {
9968 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
9969 			  "DP alloc failure - unable to get alloc vdev stats");
9970 		return 0;
9971 	}
9972 
9973 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
9974 	dp_aggregate_vdev_stats(vdev, vdev_stats);
9975 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
9976 
9977 	tx_success = vdev_stats->tx.tx_success.num;
9978 	qdf_mem_free(vdev_stats);
9979 
9980 	return tx_success;
9981 }
9982 
9983 #ifdef WLAN_SUPPORT_DATA_STALL
9984 /**
9985  * dp_register_data_stall_detect_cb() - register data stall callback
9986  * @soc_hdl: Datapath soc handle
9987  * @pdev_id: id of data path pdev handle
9988  * @data_stall_detect_callback: data stall callback function
9989  *
9990  * Return: QDF_STATUS Enumeration
9991  */
9992 static
9993 QDF_STATUS dp_register_data_stall_detect_cb(
9994 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
9995 			data_stall_detect_cb data_stall_detect_callback)
9996 {
9997 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9998 	struct dp_pdev *pdev;
9999 
10000 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10001 	if (!pdev) {
10002 		dp_err("pdev NULL!");
10003 		return QDF_STATUS_E_INVAL;
10004 	}
10005 
10006 	pdev->data_stall_detect_callback = data_stall_detect_callback;
10007 	return QDF_STATUS_SUCCESS;
10008 }
10009 
10010 /**
10011  * dp_deregister_data_stall_detect_cb() - de-register data stall callback
10012  * @soc_hdl: Datapath soc handle
10013  * @pdev_id: id of data path pdev handle
10014  * @data_stall_detect_callback: data stall callback function
10015  *
10016  * Return: QDF_STATUS Enumeration
10017  */
10018 static
10019 QDF_STATUS dp_deregister_data_stall_detect_cb(
10020 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
10021 			data_stall_detect_cb data_stall_detect_callback)
10022 {
10023 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10024 	struct dp_pdev *pdev;
10025 
10026 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10027 	if (!pdev) {
10028 		dp_err("pdev NULL!");
10029 		return QDF_STATUS_E_INVAL;
10030 	}
10031 
10032 	pdev->data_stall_detect_callback = NULL;
10033 	return QDF_STATUS_SUCCESS;
10034 }
10035 
10036 /**
10037  * dp_txrx_post_data_stall_event() - post data stall event
10038  * @soc_hdl: Datapath soc handle
10039  * @indicator: Module triggering data stall
10040  * @data_stall_type: data stall event type
10041  * @pdev_id: pdev id
10042  * @vdev_id_bitmap: vdev id bitmap
10043  * @recovery_type: data stall recovery type
10044  *
10045  * Return: None
10046  */
10047 static void
10048 dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl,
10049 			      enum data_stall_log_event_indicator indicator,
10050 			      enum data_stall_log_event_type data_stall_type,
10051 			      uint32_t pdev_id, uint32_t vdev_id_bitmap,
10052 			      enum data_stall_log_recovery_type recovery_type)
10053 {
10054 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10055 	struct data_stall_event_info data_stall_info;
10056 	struct dp_pdev *pdev;
10057 
10058 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10059 	if (!pdev) {
10060 		dp_err("pdev NULL!");
10061 		return;
10062 	}
10063 
10064 	if (!pdev->data_stall_detect_callback) {
10065 		dp_err("data stall cb not registered!");
10066 		return;
10067 	}
10068 
10069 	dp_info("data_stall_type: %x pdev_id: %d",
10070 		data_stall_type, pdev_id);
10071 
10072 	data_stall_info.indicator = indicator;
10073 	data_stall_info.data_stall_type = data_stall_type;
10074 	data_stall_info.vdev_id_bitmap = vdev_id_bitmap;
10075 	data_stall_info.pdev_id = pdev_id;
10076 	data_stall_info.recovery_type = recovery_type;
10077 
10078 	pdev->data_stall_detect_callback(&data_stall_info);
10079 }
10080 #endif /* WLAN_SUPPORT_DATA_STALL */
10081 
10082 #ifdef DP_PEER_EXTENDED_API
10083 /**
10084  * dp_peer_get_ref_find_by_addr - get peer with addr by ref count inc
10085  * @dev: physical device instance
10086  * @peer_mac_addr: peer mac address
10087  * @debug_id: to track enum peer access
10088  *
10089  * Return: peer instance pointer
10090  */
10091 static void *
10092 dp_peer_get_ref_find_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
10093 			     enum peer_debug_id_type debug_id)
10094 {
10095 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
10096 	struct dp_peer *peer;
10097 
10098 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
10099 
10100 	if (!peer)
10101 		return NULL;
10102 
10103 	if (peer->delete_in_progress) {
10104 		dp_err("Peer deletion in progress");
10105 		dp_peer_unref_delete(peer);
10106 		return NULL;
10107 	}
10108 
10109 	dp_info_rl("peer %pK mac: %pM", peer, peer->mac_addr.raw);
10110 
10111 	return peer;
10112 }
10113 #endif /* DP_PEER_EXTENDED_API */
10114 
10115 #ifdef WLAN_FEATURE_STATS_EXT
10116 /* rx hw stats event wait timeout in ms */
10117 #define DP_REO_STATUS_STATS_TIMEOUT 1500
10118 /**
10119  * dp_txrx_ext_stats_request - request dp txrx extended stats request
10120  * @soc_hdl: soc handle
10121  * @pdev_id: pdev id
10122  * @req: stats request
10123  *
10124  * Return: QDF_STATUS
10125  */
10126 static QDF_STATUS
10127 dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
10128 			  struct cdp_txrx_ext_stats *req)
10129 {
10130 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10131 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10132 
10133 	if (!pdev) {
10134 		dp_err("pdev is null");
10135 		return QDF_STATUS_E_INVAL;
10136 	}
10137 
10138 	dp_aggregate_pdev_stats(pdev);
10139 
10140 	req->tx_msdu_enqueue = pdev->stats.tx_i.processed.num;
10141 	req->tx_msdu_overflow = pdev->stats.tx_i.dropped.ring_full;
10142 	req->rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
10143 	req->rx_mpdu_delivered = soc->ext_stats.rx_mpdu_received;
10144 	req->rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
10145 	req->rx_mpdu_error = soc->stats.rx.err_ring_pkts -
10146 				soc->stats.rx.rx_frags;
10147 
10148 	return QDF_STATUS_SUCCESS;
10149 }
10150 
10151 /**
10152  * dp_rx_hw_stats_cb - request rx hw stats response callback
10153  * @soc: soc handle
10154  * @cb_ctxt: callback context
10155  * @reo_status: reo command response status
10156  *
10157  * Return: None
10158  */
10159 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
10160 			      union hal_reo_status *reo_status)
10161 {
10162 	struct dp_req_rx_hw_stats_t *rx_hw_stats = cb_ctxt;
10163 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
10164 	bool is_query_timeout;
10165 
10166 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
10167 	is_query_timeout = rx_hw_stats->is_query_timeout;
10168 	/* free the cb_ctxt if all pending tid stats query is received */
10169 	if (qdf_atomic_dec_and_test(&rx_hw_stats->pending_tid_stats_cnt)) {
10170 		if (!is_query_timeout) {
10171 			qdf_event_set(&soc->rx_hw_stats_event);
10172 			soc->is_last_stats_ctx_init = false;
10173 		}
10174 
10175 		qdf_mem_free(rx_hw_stats);
10176 	}
10177 
10178 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
10179 		dp_info("REO stats failure %d",
10180 			queue_status->header.status);
10181 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
10182 		return;
10183 	}
10184 
10185 	if (!is_query_timeout) {
10186 		soc->ext_stats.rx_mpdu_received +=
10187 					queue_status->mpdu_frms_cnt;
10188 		soc->ext_stats.rx_mpdu_missed +=
10189 					queue_status->late_recv_mpdu_cnt;
10190 	}
10191 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
10192 }
10193 
10194 /**
10195  * dp_request_rx_hw_stats - request rx hardware stats
10196  * @soc_hdl: soc handle
10197  * @vdev_id: vdev id
10198  *
10199  * Return: None
10200  */
10201 static QDF_STATUS
10202 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
10203 {
10204 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10205 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
10206 	struct dp_peer *peer;
10207 	QDF_STATUS status;
10208 	struct dp_req_rx_hw_stats_t *rx_hw_stats;
10209 	int rx_stats_sent_cnt = 0;
10210 
10211 	if (!vdev) {
10212 		dp_err("vdev is null for vdev_id: %u", vdev_id);
10213 		return QDF_STATUS_E_INVAL;
10214 	}
10215 
10216 	peer = dp_peer_get_ref_find_by_addr((struct cdp_pdev *)vdev->pdev,
10217 					    vdev->vap_bss_peer_mac_addr, 0);
10218 
10219 	if (!peer) {
10220 		dp_err("Peer is NULL");
10221 		return QDF_STATUS_E_INVAL;
10222 	}
10223 
10224 	rx_hw_stats = qdf_mem_malloc(sizeof(*rx_hw_stats));
10225 
10226 	if (!rx_hw_stats) {
10227 		dp_err("malloc failed for hw stats structure");
10228 		dp_peer_unref_delete(peer);
10229 		return QDF_STATUS_E_NOMEM;
10230 	}
10231 
10232 	qdf_event_reset(&soc->rx_hw_stats_event);
10233 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
10234 	rx_stats_sent_cnt =
10235 		dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
10236 	if (!rx_stats_sent_cnt) {
10237 		dp_err("no tid stats sent successfully");
10238 		qdf_mem_free(rx_hw_stats);
10239 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
10240 		dp_peer_unref_delete(peer);
10241 		return QDF_STATUS_E_INVAL;
10242 	}
10243 	qdf_atomic_set(&rx_hw_stats->pending_tid_stats_cnt,
10244 		       rx_stats_sent_cnt);
10245 	rx_hw_stats->is_query_timeout = false;
10246 	soc->is_last_stats_ctx_init = true;
10247 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
10248 
10249 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
10250 				       DP_REO_STATUS_STATS_TIMEOUT);
10251 
10252 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
10253 	if (status != QDF_STATUS_SUCCESS) {
10254 		dp_info("rx hw stats event timeout");
10255 		if (soc->is_last_stats_ctx_init)
10256 			rx_hw_stats->is_query_timeout = true;
10257 	}
10258 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
10259 	dp_peer_unref_delete(peer);
10260 
10261 	return status;
10262 }
10263 #endif /* WLAN_FEATURE_STATS_EXT */
10264 
10265 #ifdef DP_PEER_EXTENDED_API
10266 static struct cdp_misc_ops dp_ops_misc = {
10267 #ifdef FEATURE_WLAN_TDLS
10268 	.tx_non_std = dp_tx_non_std,
10269 #endif /* FEATURE_WLAN_TDLS */
10270 	.get_opmode = dp_get_opmode,
10271 #ifdef FEATURE_RUNTIME_PM
10272 	.runtime_suspend = dp_runtime_suspend,
10273 	.runtime_resume = dp_runtime_resume,
10274 #endif /* FEATURE_RUNTIME_PM */
10275 	.pkt_log_init = dp_pkt_log_init,
10276 	.pkt_log_con_service = dp_pkt_log_con_service,
10277 	.get_num_rx_contexts = dp_get_num_rx_contexts,
10278 	.get_tx_ack_stats = dp_tx_get_success_ack_stats,
10279 #ifdef WLAN_SUPPORT_DATA_STALL
10280 	.txrx_data_stall_cb_register = dp_register_data_stall_detect_cb,
10281 	.txrx_data_stall_cb_deregister = dp_deregister_data_stall_detect_cb,
10282 	.txrx_post_data_stall_event = dp_txrx_post_data_stall_event,
10283 #endif
10284 
10285 #ifdef WLAN_FEATURE_STATS_EXT
10286 	.txrx_ext_stats_request = dp_txrx_ext_stats_request,
10287 	.request_rx_hw_stats = dp_request_rx_hw_stats,
10288 #endif /* WLAN_FEATURE_STATS_EXT */
10289 };
10290 #endif
10291 
10292 #ifdef DP_FLOW_CTL
10293 static struct cdp_flowctl_ops dp_ops_flowctl = {
10294 	/* WIFI 3.0 DP implement as required. */
10295 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
10296 	.flow_pool_map_handler = dp_tx_flow_pool_map,
10297 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
10298 	.register_pause_cb = dp_txrx_register_pause_cb,
10299 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
10300 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
10301 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
10302 };
10303 
10304 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
10305 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
10306 };
10307 #endif
10308 
10309 #ifdef IPA_OFFLOAD
10310 static struct cdp_ipa_ops dp_ops_ipa = {
10311 	.ipa_get_resource = dp_ipa_get_resource,
10312 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
10313 	.ipa_op_response = dp_ipa_op_response,
10314 	.ipa_register_op_cb = dp_ipa_register_op_cb,
10315 	.ipa_get_stat = dp_ipa_get_stat,
10316 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
10317 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
10318 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
10319 	.ipa_setup = dp_ipa_setup,
10320 	.ipa_cleanup = dp_ipa_cleanup,
10321 	.ipa_setup_iface = dp_ipa_setup_iface,
10322 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
10323 	.ipa_enable_pipes = dp_ipa_enable_pipes,
10324 	.ipa_disable_pipes = dp_ipa_disable_pipes,
10325 	.ipa_set_perf_level = dp_ipa_set_perf_level,
10326 	.ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd
10327 };
10328 #endif
10329 
10330 #ifdef DP_POWER_SAVE
10331 static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
10332 {
10333 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10334 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10335 	int timeout = SUSPEND_DRAIN_WAIT;
10336 	int drain_wait_delay = 50; /* 50 ms */
10337 
10338 	if (qdf_unlikely(!pdev)) {
10339 		dp_err("pdev is NULL");
10340 		return QDF_STATUS_E_INVAL;
10341 	}
10342 
10343 	/* Abort if there are any pending TX packets */
10344 	while (dp_get_tx_pending((struct cdp_pdev *)pdev) > 0) {
10345 		qdf_sleep(drain_wait_delay);
10346 		if (timeout <= 0) {
10347 			dp_err("TX frames are pending, abort suspend");
10348 			return QDF_STATUS_E_TIMEOUT;
10349 		}
10350 		timeout = timeout - drain_wait_delay;
10351 	}
10352 
10353 	if (soc->intr_mode == DP_INTR_POLL)
10354 		qdf_timer_stop(&soc->int_timer);
10355 
10356 	/* Stop monitor reap timer and reap any pending frames in ring */
10357 	if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) ||
10358 	     dp_is_enable_reap_timer_non_pkt(pdev)) &&
10359 	    soc->reap_timer_init) {
10360 		qdf_timer_sync_cancel(&soc->mon_reap_timer);
10361 		dp_service_mon_rings(soc, DP_MON_REAP_BUDGET);
10362 	}
10363 
10364 	return QDF_STATUS_SUCCESS;
10365 }
10366 
10367 static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
10368 {
10369 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10370 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10371 
10372 	if (qdf_unlikely(!pdev)) {
10373 		dp_err("pdev is NULL");
10374 		return QDF_STATUS_E_INVAL;
10375 	}
10376 
10377 	if (soc->intr_mode == DP_INTR_POLL)
10378 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
10379 
10380 	/* Start monitor reap timer */
10381 	if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) ||
10382 	     dp_is_enable_reap_timer_non_pkt(pdev)) &&
10383 	    soc->reap_timer_init)
10384 		qdf_timer_mod(&soc->mon_reap_timer,
10385 			      DP_INTR_POLL_TIMER_MS);
10386 
10387 	return QDF_STATUS_SUCCESS;
10388 }
10389 
10390 /**
10391  * dp_process_wow_ack_rsp() - process wow ack response
10392  * @soc_hdl: datapath soc handle
10393  * @pdev_id: data path pdev handle id
10394  *
10395  * Return: none
10396  */
10397 static void dp_process_wow_ack_rsp(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
10398 {
10399 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10400 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10401 
10402 	if (qdf_unlikely(!pdev)) {
10403 		dp_err("pdev is NULL");
10404 		return;
10405 	}
10406 
10407 	/*
10408 	 * As part of wow enable FW disables the mon status ring and in wow ack
10409 	 * response from FW reap mon status ring to make sure no packets pending
10410 	 * in the ring.
10411 	 */
10412 	if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) ||
10413 	     dp_is_enable_reap_timer_non_pkt(pdev)) &&
10414 	    soc->reap_timer_init) {
10415 		dp_service_mon_rings(soc, DP_MON_REAP_BUDGET);
10416 	}
10417 }
10418 
10419 /**
10420  * dp_process_target_suspend_req() - process target suspend request
10421  * @soc_hdl: datapath soc handle
10422  * @pdev_id: data path pdev handle id
10423  *
10424  * Return: none
10425  */
10426 static void dp_process_target_suspend_req(struct cdp_soc_t *soc_hdl,
10427 					  uint8_t pdev_id)
10428 {
10429 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10430 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10431 
10432 	if (qdf_unlikely(!pdev)) {
10433 		dp_err("pdev is NULL");
10434 		return;
10435 	}
10436 
10437 	/* Stop monitor reap timer and reap any pending frames in ring */
10438 	if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) ||
10439 	     dp_is_enable_reap_timer_non_pkt(pdev)) &&
10440 	    soc->reap_timer_init) {
10441 		qdf_timer_sync_cancel(&soc->mon_reap_timer);
10442 		dp_service_mon_rings(soc, DP_MON_REAP_BUDGET);
10443 	}
10444 }
10445 
10446 static struct cdp_bus_ops dp_ops_bus = {
10447 	.bus_suspend = dp_bus_suspend,
10448 	.bus_resume = dp_bus_resume,
10449 	.process_wow_ack_rsp = dp_process_wow_ack_rsp,
10450 	.process_target_suspend_req = dp_process_target_suspend_req
10451 };
10452 #endif
10453 
10454 #ifdef DP_FLOW_CTL
10455 static struct cdp_throttle_ops dp_ops_throttle = {
10456 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
10457 };
10458 
10459 static struct cdp_cfg_ops dp_ops_cfg = {
10460 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
10461 };
10462 #endif
10463 
10464 #ifdef DP_PEER_EXTENDED_API
10465 static struct cdp_ocb_ops dp_ops_ocb = {
10466 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
10467 };
10468 
10469 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
10470 	.clear_stats = dp_txrx_clear_dump_stats,
10471 };
10472 
10473 static struct cdp_peer_ops dp_ops_peer = {
10474 	.register_peer = dp_register_peer,
10475 	.clear_peer = dp_clear_peer,
10476 	.find_peer_exist = dp_find_peer_exist,
10477 	.find_peer_exist_on_vdev = dp_find_peer_exist_on_vdev,
10478 	.find_peer_exist_on_other_vdev = dp_find_peer_exist_on_other_vdev,
10479 	.peer_state_update = dp_peer_state_update,
10480 	.get_vdevid = dp_get_vdevid,
10481 	.get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr,
10482 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
10483 	.get_peer_state = dp_get_peer_state,
10484 };
10485 #endif
10486 
10487 static struct cdp_ops dp_txrx_ops = {
10488 	.cmn_drv_ops = &dp_ops_cmn,
10489 	.ctrl_ops = &dp_ops_ctrl,
10490 	.me_ops = &dp_ops_me,
10491 	.mon_ops = &dp_ops_mon,
10492 	.host_stats_ops = &dp_ops_host_stats,
10493 	.wds_ops = &dp_ops_wds,
10494 	.raw_ops = &dp_ops_raw,
10495 #ifdef PEER_FLOW_CONTROL
10496 	.pflow_ops = &dp_ops_pflow,
10497 #endif /* PEER_FLOW_CONTROL */
10498 #ifdef DP_PEER_EXTENDED_API
10499 	.misc_ops = &dp_ops_misc,
10500 	.ocb_ops = &dp_ops_ocb,
10501 	.peer_ops = &dp_ops_peer,
10502 	.mob_stats_ops = &dp_ops_mob_stats,
10503 #endif
10504 #ifdef DP_FLOW_CTL
10505 	.cfg_ops = &dp_ops_cfg,
10506 	.flowctl_ops = &dp_ops_flowctl,
10507 	.l_flowctl_ops = &dp_ops_l_flowctl,
10508 	.throttle_ops = &dp_ops_throttle,
10509 #endif
10510 #ifdef IPA_OFFLOAD
10511 	.ipa_ops = &dp_ops_ipa,
10512 #endif
10513 #ifdef DP_POWER_SAVE
10514 	.bus_ops = &dp_ops_bus,
10515 #endif
10516 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
10517 	.cfr_ops = &dp_ops_cfr,
10518 #endif
10519 };
10520 
10521 /*
10522  * dp_soc_set_txrx_ring_map()
10523  * @dp_soc: DP handler for soc
10524  *
10525  * Return: Void
10526  */
10527 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
10528 {
10529 	uint32_t i;
10530 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
10531 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
10532 	}
10533 }
10534 
10535 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
10536 	defined(QCA_WIFI_QCA5018)
10537 /**
10538  * dp_soc_attach_wifi3() - Attach txrx SOC
10539  * @ctrl_psoc: Opaque SOC handle from control plane
10540  * @htc_handle: Opaque HTC handle
10541  * @hif_handle: Opaque HIF handle
10542  * @qdf_osdev: QDF device
10543  * @ol_ops: Offload Operations
10544  * @device_id: Device ID
10545  *
10546  * Return: DP SOC handle on success, NULL on failure
10547  */
10548 struct cdp_soc_t *
10549 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
10550 		    struct hif_opaque_softc *hif_handle,
10551 		    HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
10552 		    struct ol_if_ops *ol_ops, uint16_t device_id)
10553 {
10554 	struct dp_soc *dp_soc = NULL;
10555 
10556 	dp_soc = dp_soc_attach(ctrl_psoc, hif_handle, htc_handle, qdf_osdev,
10557 			       ol_ops, device_id);
10558 	return dp_soc_to_cdp_soc_t(dp_soc);
10559 }
10560 
10561 static inline void dp_soc_set_def_pdev(struct dp_soc *soc)
10562 {
10563 	int lmac_id;
10564 
10565 	for (lmac_id = 0; lmac_id < MAX_NUM_LMAC_HW; lmac_id++) {
10566 		/*Set default host PDEV ID for lmac_id*/
10567 		wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
10568 				      INVALID_PDEV_ID, lmac_id);
10569 	}
10570 }
10571 
10572 /**
10573  * dp_soc_attach() - Attach txrx SOC
10574  * @ctrl_psoc: Opaque SOC handle from control plane
10575  * @hif_handle: Opaque HIF handle
10576  * @htc_handle: Opaque HTC handle
10577  * @qdf_osdev: QDF device
10578  * @ol_ops: Offload Operations
10579  * @device_id: Device ID
10580  *
10581  * Return: DP SOC handle on success, NULL on failure
10582  */
10583 static struct dp_soc *
10584 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
10585 	      struct hif_opaque_softc *hif_handle, HTC_HANDLE htc_handle,
10586 	      qdf_device_t qdf_osdev, struct ol_if_ops *ol_ops,
10587 	      uint16_t device_id)
10588 {
10589 	int int_ctx;
10590 	struct dp_soc *soc =  NULL;
10591 
10592 	if (!hif_handle) {
10593 		dp_err("HIF handle is NULL");
10594 		goto fail0;
10595 	}
10596 
10597 	soc = qdf_mem_malloc(sizeof(*soc));
10598 	if (!soc) {
10599 		dp_err("DP SOC memory allocation failed");
10600 		goto fail0;
10601 	}
10602 
10603 	soc->hif_handle = hif_handle;
10604 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
10605 	if (!soc->hal_soc)
10606 		goto fail1;
10607 
10608 	int_ctx = 0;
10609 	soc->device_id = device_id;
10610 	soc->cdp_soc.ops = &dp_txrx_ops;
10611 	soc->cdp_soc.ol_ops = ol_ops;
10612 	soc->ctrl_psoc = ctrl_psoc;
10613 	soc->osdev = qdf_osdev;
10614 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
10615 
10616 	/* Reset wbm sg list and flags */
10617 	dp_rx_wbm_sg_list_reset(soc);
10618 
10619 	wlan_set_srng_cfg(&soc->wlan_srng_cfg);
10620 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
10621 	if (!soc->wlan_cfg_ctx) {
10622 		dp_err("wlan_cfg_ctx failed\n");
10623 		goto fail1;
10624 	}
10625 
10626 	dp_soc_cfg_attach(soc);
10627 
10628 	if (dp_hw_link_desc_pool_banks_alloc(soc, WLAN_INVALID_PDEV_ID)) {
10629 		dp_err("failed to allocate link desc pool banks");
10630 		goto fail2;
10631 	}
10632 
10633 	if (dp_hw_link_desc_ring_alloc(soc)) {
10634 		dp_err("failed to allocate link_desc_ring");
10635 		goto fail3;
10636 	}
10637 
10638 	if (dp_soc_srng_alloc(soc)) {
10639 		dp_err("failed to allocate soc srng rings");
10640 		goto fail4;
10641 	}
10642 
10643 	if (dp_soc_tx_desc_sw_pools_alloc(soc)) {
10644 		dp_err("dp_soc_tx_desc_sw_pools_alloc failed");
10645 		goto fail5;
10646 	}
10647 
10648 	dp_soc_set_interrupt_mode(soc);
10649 	dp_soc_set_def_pdev(soc);
10650 
10651 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
10652 		qdf_dma_mem_stats_read(),
10653 		qdf_heap_mem_stats_read(),
10654 		qdf_skb_mem_stats_read());
10655 
10656 	return soc;
10657 fail5:
10658 	dp_soc_srng_free(soc);
10659 fail4:
10660 	dp_hw_link_desc_ring_free(soc);
10661 fail3:
10662 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
10663 fail2:
10664 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
10665 fail1:
10666 	qdf_mem_free(soc);
10667 fail0:
10668 	return NULL;
10669 }
10670 
10671 /**
10672  * dp_soc_init() - Initialize txrx SOC
10673  * @dp_soc: Opaque DP SOC handle
10674  * @htc_handle: Opaque HTC handle
10675  * @hif_handle: Opaque HIF handle
10676  *
10677  * Return: DP SOC handle on success, NULL on failure
10678  */
10679 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
10680 		  struct hif_opaque_softc *hif_handle)
10681 {
10682 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
10683 	bool is_monitor_mode = false;
10684 	struct hal_reo_params reo_params;
10685 	uint8_t i;
10686 
10687 	wlan_minidump_log(soc, sizeof(*soc), soc->ctrl_psoc,
10688 			  WLAN_MD_DP_SOC, "dp_soc");
10689 
10690 	htt_soc = htt_soc_attach(soc, htc_handle);
10691 	if (!htt_soc)
10692 		goto fail0;
10693 
10694 	soc->htt_handle = htt_soc;
10695 
10696 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
10697 		goto fail1;
10698 
10699 	htt_set_htc_handle(htt_soc, htc_handle);
10700 	soc->hif_handle = hif_handle;
10701 
10702 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
10703 	if (!soc->hal_soc)
10704 		goto fail2;
10705 
10706 	dp_soc_cfg_init(soc);
10707 
10708 	/* Reset/Initialize wbm sg list and flags */
10709 	dp_rx_wbm_sg_list_reset(soc);
10710 
10711 	/* Note: Any SRNG ring initialization should happen only after
10712 	 * Interrupt mode is set and followed by filling up the
10713 	 * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER.
10714 	 */
10715 	dp_soc_set_interrupt_mode(soc);
10716 	if (soc->cdp_soc.ol_ops->get_con_mode &&
10717 	    soc->cdp_soc.ol_ops->get_con_mode() ==
10718 	    QDF_GLOBAL_MONITOR_MODE)
10719 		is_monitor_mode = true;
10720 
10721 	wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, soc->intr_mode,
10722 				     is_monitor_mode);
10723 
10724 	/* initialize WBM_IDLE_LINK ring */
10725 	if (dp_hw_link_desc_ring_init(soc)) {
10726 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
10727 			  FL("dp_hw_link_desc_ring_init failed"));
10728 		goto fail3;
10729 	}
10730 
10731 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
10732 
10733 	if (dp_soc_srng_init(soc)) {
10734 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
10735 			  FL("dp_soc_srng_init failed"));
10736 		goto fail4;
10737 	}
10738 
10739 	if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
10740 			       htt_get_htc_handle(htt_soc),
10741 			       soc->hal_soc, soc->osdev) == NULL)
10742 		goto fail5;
10743 
10744 	/* Initialize descriptors in TCL Rings */
10745 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
10746 		hal_tx_init_data_ring(soc->hal_soc,
10747 				      soc->tcl_data_ring[i].hal_srng);
10748 	}
10749 
10750 	if (dp_soc_tx_desc_sw_pools_init(soc)) {
10751 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
10752 			  FL("dp_tx_soc_attach failed"));
10753 		goto fail6;
10754 	}
10755 
10756 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
10757 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
10758 	soc->cce_disable = false;
10759 
10760 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
10761 	qdf_atomic_init(&soc->num_tx_outstanding);
10762 	qdf_atomic_init(&soc->num_tx_exception);
10763 	soc->num_tx_allowed =
10764 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
10765 
10766 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
10767 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
10768 				CDP_CFG_MAX_PEER_ID);
10769 
10770 		if (ret != -EINVAL)
10771 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
10772 
10773 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
10774 				CDP_CFG_CCE_DISABLE);
10775 		if (ret == 1)
10776 			soc->cce_disable = true;
10777 	}
10778 
10779 	/*
10780 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
10781 	 * and IPQ5018 WMAC2 is not there in these platforms.
10782 	 */
10783 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018 ||
10784 	    soc->disable_mac2_intr)
10785 		dp_soc_disable_unused_mac_intr_mask(soc, 0x2);
10786 
10787 	/*
10788 	 * Skip registering hw ring interrupts for WMAC1 on IPQ5018
10789 	 * WMAC1 is not there in this platform.
10790 	 */
10791 	if (soc->disable_mac1_intr)
10792 		dp_soc_disable_unused_mac_intr_mask(soc, 0x1);
10793 
10794 	/* Setup HW REO */
10795 	qdf_mem_zero(&reo_params, sizeof(reo_params));
10796 
10797 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
10798 		/*
10799 		 * Reo ring remap is not required if both radios
10800 		 * are offloaded to NSS
10801 		 */
10802 		if (dp_reo_remap_config(soc,
10803 					&reo_params.remap1,
10804 					&reo_params.remap2))
10805 			reo_params.rx_hash_enabled = true;
10806 		else
10807 			reo_params.rx_hash_enabled = false;
10808 	}
10809 
10810 	/* setup the global rx defrag waitlist */
10811 	TAILQ_INIT(&soc->rx.defrag.waitlist);
10812 	soc->rx.defrag.timeout_ms =
10813 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
10814 	soc->rx.defrag.next_flush_ms = 0;
10815 	soc->rx.flags.defrag_timeout_check =
10816 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
10817 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
10818 
10819 	/*
10820 	 * set the fragment destination ring
10821 	 */
10822 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
10823 
10824 	hal_reo_setup(soc->hal_soc, &reo_params);
10825 
10826 	hal_reo_set_err_dst_remap(soc->hal_soc);
10827 
10828 	qdf_atomic_set(&soc->cmn_init_done, 1);
10829 
10830 	dp_soc_wds_attach(soc);
10831 
10832 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
10833 
10834 	qdf_spinlock_create(&soc->peer_ref_mutex);
10835 	qdf_spinlock_create(&soc->ast_lock);
10836 
10837 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
10838 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
10839 	INIT_RX_HW_STATS_LOCK(soc);
10840 
10841 	/* fill the tx/rx cpu ring map*/
10842 	dp_soc_set_txrx_ring_map(soc);
10843 
10844 	qdf_spinlock_create(&soc->htt_stats.lock);
10845 	/* initialize work queue for stats processing */
10846 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
10847 
10848 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
10849 		qdf_dma_mem_stats_read(),
10850 		qdf_heap_mem_stats_read(),
10851 		qdf_skb_mem_stats_read());
10852 
10853 	return soc;
10854 fail6:
10855 	htt_soc_htc_dealloc(soc->htt_handle);
10856 fail5:
10857 	dp_soc_srng_deinit(soc);
10858 fail4:
10859 	dp_hw_link_desc_ring_deinit(soc);
10860 fail3:
10861 	dp_hw_link_desc_ring_free(soc);
10862 fail2:
10863 	htt_htc_pkt_pool_free(htt_soc);
10864 fail1:
10865 	htt_soc_detach(htt_soc);
10866 fail0:
10867 	return NULL;
10868 }
10869 
10870 /**
10871  * dp_soc_init_wifi3() - Initialize txrx SOC
10872  * @soc: Opaque DP SOC handle
10873  * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
10874  * @hif_handle: Opaque HIF handle
10875  * @htc_handle: Opaque HTC handle
10876  * @qdf_osdev: QDF device (Unused)
10877  * @ol_ops: Offload Operations (Unused)
10878  * @device_id: Device ID (Unused)
10879  *
10880  * Return: DP SOC handle on success, NULL on failure
10881  */
10882 void *dp_soc_init_wifi3(struct cdp_soc_t *soc,
10883 			struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
10884 			struct hif_opaque_softc *hif_handle,
10885 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
10886 			struct ol_if_ops *ol_ops, uint16_t device_id)
10887 {
10888 	return dp_soc_init((struct dp_soc *)soc, htc_handle, hif_handle);
10889 }
10890 
10891 #endif
10892 
10893 /*
10894  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
10895  *
10896  * @soc: handle to DP soc
10897  * @mac_id: MAC id
10898  *
10899  * Return: Return pdev corresponding to MAC
10900  */
10901 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
10902 {
10903 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
10904 		return (mac_id < MAX_PDEV_CNT) ? soc->pdev_list[mac_id] : NULL;
10905 
10906 	/* Typically for MCL as there only 1 PDEV*/
10907 	return soc->pdev_list[0];
10908 }
10909 
10910 /*
10911  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
10912  * @soc:		DP SoC context
10913  * @max_mac_rings:	No of MAC rings
10914  *
10915  * Return: None
10916  */
10917 void dp_is_hw_dbs_enable(struct dp_soc *soc,
10918 				int *max_mac_rings)
10919 {
10920 	bool dbs_enable = false;
10921 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
10922 		dbs_enable = soc->cdp_soc.ol_ops->
10923 		is_hw_dbs_2x2_capable((void *)soc->ctrl_psoc);
10924 
10925 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
10926 }
10927 
10928 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
10929 /*
10930  * dp_cfr_filter() -  Configure HOST RX monitor status ring for CFR
10931  * @soc_hdl: Datapath soc handle
10932  * @pdev_id: id of data path pdev handle
10933  * @enable: Enable/Disable CFR
10934  * @filter_val: Flag to select Filter for monitor mode
10935  */
10936 static void dp_cfr_filter(struct cdp_soc_t *soc_hdl,
10937 			  uint8_t pdev_id,
10938 			  bool enable,
10939 			  struct cdp_monitor_filter *filter_val)
10940 {
10941 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10942 	struct dp_pdev *pdev = NULL;
10943 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
10944 	int max_mac_rings;
10945 	uint8_t mac_id = 0;
10946 
10947 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10948 	if (!pdev) {
10949 		dp_err("pdev is NULL");
10950 		return;
10951 	}
10952 
10953 	if (pdev->monitor_vdev) {
10954 		dp_info("No action is needed since monitor mode is enabled\n");
10955 		return;
10956 	}
10957 	soc = pdev->soc;
10958 	pdev->cfr_rcc_mode = false;
10959 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
10960 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
10961 
10962 	dp_debug("Max_mac_rings %d", max_mac_rings);
10963 	dp_info("enable : %d, mode: 0x%x", enable, filter_val->mode);
10964 
10965 	if (enable) {
10966 		pdev->cfr_rcc_mode = true;
10967 
10968 		htt_tlv_filter.ppdu_start = 1;
10969 		htt_tlv_filter.ppdu_end = 1;
10970 		htt_tlv_filter.ppdu_end_user_stats = 1;
10971 		htt_tlv_filter.ppdu_end_user_stats_ext = 1;
10972 		htt_tlv_filter.ppdu_end_status_done = 1;
10973 		htt_tlv_filter.mpdu_start = 1;
10974 		htt_tlv_filter.offset_valid = false;
10975 
10976 		htt_tlv_filter.enable_fp =
10977 			(filter_val->mode & MON_FILTER_PASS) ? 1 : 0;
10978 		htt_tlv_filter.enable_md = 0;
10979 		htt_tlv_filter.enable_mo =
10980 			(filter_val->mode & MON_FILTER_OTHER) ? 1 : 0;
10981 		htt_tlv_filter.fp_mgmt_filter = filter_val->fp_mgmt;
10982 		htt_tlv_filter.fp_ctrl_filter = filter_val->fp_ctrl;
10983 		htt_tlv_filter.fp_data_filter = filter_val->fp_data;
10984 		htt_tlv_filter.mo_mgmt_filter = filter_val->mo_mgmt;
10985 		htt_tlv_filter.mo_ctrl_filter = filter_val->mo_ctrl;
10986 		htt_tlv_filter.mo_data_filter = filter_val->mo_data;
10987 	}
10988 
10989 	for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
10990 		int mac_for_pdev =
10991 			dp_get_mac_id_for_pdev(mac_id,
10992 					       pdev->pdev_id);
10993 
10994 		htt_h2t_rx_ring_cfg(soc->htt_handle,
10995 				    mac_for_pdev,
10996 				    soc->rxdma_mon_status_ring[mac_id]
10997 				    .hal_srng,
10998 				    RXDMA_MONITOR_STATUS,
10999 				    RX_DATA_BUFFER_SIZE,
11000 				    &htt_tlv_filter);
11001 	}
11002 }
11003 
11004 /**
11005  * dp_get_cfr_rcc() - get cfr rcc config
11006  * @soc_hdl: Datapath soc handle
11007  * @pdev_id: id of objmgr pdev
11008  *
11009  * Return: true/false based on cfr mode setting
11010  */
11011 static
11012 bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
11013 {
11014 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11015 	struct dp_pdev *pdev = NULL;
11016 
11017 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11018 	if (!pdev) {
11019 		dp_err("pdev is NULL");
11020 		return false;
11021 	}
11022 
11023 	return pdev->cfr_rcc_mode;
11024 }
11025 
11026 /**
11027  * dp_set_cfr_rcc() - enable/disable cfr rcc config
11028  * @soc_hdl: Datapath soc handle
11029  * @pdev_id: id of objmgr pdev
11030  * @enable: Enable/Disable cfr rcc mode
11031  *
11032  * Return: none
11033  */
11034 static
11035 void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, bool enable)
11036 {
11037 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11038 	struct dp_pdev *pdev = NULL;
11039 
11040 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11041 	if (!pdev) {
11042 		dp_err("pdev is NULL");
11043 		return;
11044 	}
11045 
11046 	pdev->cfr_rcc_mode = enable;
11047 }
11048 
11049 /*
11050  * dp_get_cfr_dbg_stats - Get the debug statistics for CFR
11051  * @soc_hdl: Datapath soc handle
11052  * @pdev_id: id of data path pdev handle
11053  * @cfr_rcc_stats: CFR RCC debug statistics buffer
11054  *
11055  * Return: none
11056  */
11057 static inline void
11058 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
11059 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats)
11060 {
11061 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11062 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11063 
11064 	if (!pdev) {
11065 		dp_err("Invalid pdev");
11066 		return;
11067 	}
11068 
11069 	qdf_mem_copy(cfr_rcc_stats, &pdev->stats.rcc,
11070 		     sizeof(struct cdp_cfr_rcc_stats));
11071 }
11072 
11073 /*
11074  * dp_clear_cfr_dbg_stats - Clear debug statistics for CFR
11075  * @soc_hdl: Datapath soc handle
11076  * @pdev_id: id of data path pdev handle
11077  *
11078  * Return: none
11079  */
11080 static void dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl,
11081 				   uint8_t pdev_id)
11082 {
11083 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11084 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11085 
11086 	if (!pdev) {
11087 		dp_err("dp pdev is NULL");
11088 		return;
11089 	}
11090 
11091 	qdf_mem_zero(&pdev->stats.rcc, sizeof(pdev->stats.rcc));
11092 }
11093 
11094 /*
11095  * dp_enable_mon_reap_timer() - enable/disable reap timer
11096  * @soc_hdl: Datapath soc handle
11097  * @pdev_id: id of objmgr pdev
11098  * @enable: Enable/Disable reap timer of monitor status ring
11099  *
11100  * Return: none
11101  */
11102 static void
11103 dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
11104 			 bool enable)
11105 {
11106 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11107 	struct dp_pdev *pdev = NULL;
11108 
11109 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11110 	if (!pdev) {
11111 		dp_err("pdev is NULL");
11112 		return;
11113 	}
11114 
11115 	pdev->enable_reap_timer_non_pkt = enable;
11116 	if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
11117 		dp_debug("pktlog enabled %d", pdev->rx_pktlog_mode);
11118 		return;
11119 	}
11120 
11121 	if (!soc->reap_timer_init) {
11122 		dp_err("reap timer not init");
11123 		return;
11124 	}
11125 
11126 	if (enable)
11127 		qdf_timer_mod(&soc->mon_reap_timer,
11128 			      DP_INTR_POLL_TIMER_MS);
11129 	else
11130 		qdf_timer_sync_cancel(&soc->mon_reap_timer);
11131 }
11132 #endif
11133 
11134 /*
11135  * dp_is_enable_reap_timer_non_pkt() - check if mon reap timer is
11136  * enabled by non-pkt log or not
11137  * @pdev: point to dp pdev
11138  *
11139  * Return: true if mon reap timer is enabled by non-pkt log
11140  */
11141 static bool dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev)
11142 {
11143 	if (!pdev) {
11144 		dp_err("null pdev");
11145 		return false;
11146 	}
11147 
11148 	return pdev->enable_reap_timer_non_pkt;
11149 }
11150 
11151 /*
11152 * dp_set_pktlog_wifi3() - attach txrx vdev
11153 * @pdev: Datapath PDEV handle
11154 * @event: which event's notifications are being subscribed to
11155 * @enable: WDI event subscribe or not. (True or False)
11156 *
11157 * Return: Success, NULL on failure
11158 */
11159 #ifdef WDI_EVENT_ENABLE
11160 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
11161 		bool enable)
11162 {
11163 	struct dp_soc *soc = NULL;
11164 	int max_mac_rings = wlan_cfg_get_num_mac_rings
11165 					(pdev->wlan_cfg_ctx);
11166 	uint8_t mac_id = 0;
11167 
11168 	soc = pdev->soc;
11169 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
11170 
11171 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
11172 			FL("Max_mac_rings %d "),
11173 			max_mac_rings);
11174 
11175 	if (enable) {
11176 		switch (event) {
11177 		case WDI_EVENT_RX_DESC:
11178 			if (pdev->monitor_vdev) {
11179 				/* Nothing needs to be done if monitor mode is
11180 				 * enabled
11181 				 */
11182 				return 0;
11183 			}
11184 
11185 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
11186 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
11187 				dp_mon_filter_setup_rx_pkt_log_full(pdev);
11188 				if (dp_mon_filter_update(pdev) !=
11189 						QDF_STATUS_SUCCESS) {
11190 					QDF_TRACE(QDF_MODULE_ID_DP,
11191 						  QDF_TRACE_LEVEL_ERROR,
11192 						  FL("Pktlog full filters set failed"));
11193 					dp_mon_filter_reset_rx_pkt_log_full(pdev);
11194 					pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
11195 					return 0;
11196 				}
11197 
11198 				if (soc->reap_timer_init &&
11199 				    (!dp_is_enable_reap_timer_non_pkt(pdev)))
11200 					qdf_timer_mod(&soc->mon_reap_timer,
11201 					DP_INTR_POLL_TIMER_MS);
11202 			}
11203 			break;
11204 
11205 		case WDI_EVENT_LITE_RX:
11206 			if (pdev->monitor_vdev) {
11207 				/* Nothing needs to be done if monitor mode is
11208 				 * enabled
11209 				 */
11210 				return 0;
11211 			}
11212 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
11213 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
11214 
11215 				/*
11216 				 * Set the packet log lite mode filter.
11217 				 */
11218 				dp_mon_filter_setup_rx_pkt_log_lite(pdev);
11219 				if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
11220 					QDF_TRACE(QDF_MODULE_ID_DP,
11221 						  QDF_TRACE_LEVEL_ERROR,
11222 						  FL("Pktlog lite filters set failed"));
11223 					dp_mon_filter_reset_rx_pkt_log_lite(pdev);
11224 					pdev->rx_pktlog_mode =
11225 						DP_RX_PKTLOG_DISABLED;
11226 					return 0;
11227 				}
11228 
11229 				if (soc->reap_timer_init &&
11230 				    (!dp_is_enable_reap_timer_non_pkt(pdev)))
11231 					qdf_timer_mod(&soc->mon_reap_timer,
11232 					DP_INTR_POLL_TIMER_MS);
11233 			}
11234 			break;
11235 
11236 		case WDI_EVENT_LITE_T2H:
11237 			if (pdev->monitor_vdev) {
11238 				/* Nothing needs to be done if monitor mode is
11239 				 * enabled
11240 				 */
11241 				return 0;
11242 			}
11243 
11244 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
11245 				int mac_for_pdev = dp_get_mac_id_for_pdev(
11246 							mac_id,	pdev->pdev_id);
11247 
11248 				pdev->pktlog_ppdu_stats = true;
11249 				dp_h2t_cfg_stats_msg_send(pdev,
11250 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
11251 					mac_for_pdev);
11252 			}
11253 			break;
11254 
11255 		default:
11256 			/* Nothing needs to be done for other pktlog types */
11257 			break;
11258 		}
11259 	} else {
11260 		switch (event) {
11261 		case WDI_EVENT_RX_DESC:
11262 		case WDI_EVENT_LITE_RX:
11263 			if (pdev->monitor_vdev) {
11264 				/* Nothing needs to be done if monitor mode is
11265 				 * enabled
11266 				 */
11267 				return 0;
11268 			}
11269 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
11270 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
11271 				dp_mon_filter_reset_rx_pkt_log_full(pdev);
11272 				if (dp_mon_filter_update(pdev) !=
11273 						QDF_STATUS_SUCCESS) {
11274 					QDF_TRACE(QDF_MODULE_ID_DP,
11275 						  QDF_TRACE_LEVEL_ERROR,
11276 						  FL("Pktlog filters reset failed"));
11277 					return 0;
11278 				}
11279 
11280 				dp_mon_filter_reset_rx_pkt_log_lite(pdev);
11281 				if (dp_mon_filter_update(pdev) !=
11282 						QDF_STATUS_SUCCESS) {
11283 					QDF_TRACE(QDF_MODULE_ID_DP,
11284 						  QDF_TRACE_LEVEL_ERROR,
11285 						  FL("Pktlog filters reset failed"));
11286 					return 0;
11287 				}
11288 
11289 				if (soc->reap_timer_init &&
11290 				    (!dp_is_enable_reap_timer_non_pkt(pdev)))
11291 					qdf_timer_stop(&soc->mon_reap_timer);
11292 			}
11293 			break;
11294 		case WDI_EVENT_LITE_T2H:
11295 			if (pdev->monitor_vdev) {
11296 				/* Nothing needs to be done if monitor mode is
11297 				 * enabled
11298 				 */
11299 				return 0;
11300 			}
11301 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
11302 			 * passing value 0. Once these macros will define in htt
11303 			 * header file will use proper macros
11304 			*/
11305 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
11306 				int mac_for_pdev =
11307 						dp_get_mac_id_for_pdev(mac_id,
11308 								pdev->pdev_id);
11309 
11310 				pdev->pktlog_ppdu_stats = false;
11311 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
11312 					dp_h2t_cfg_stats_msg_send(pdev, 0,
11313 								mac_for_pdev);
11314 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
11315 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
11316 								mac_for_pdev);
11317 				} else if (pdev->enhanced_stats_en) {
11318 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
11319 								mac_for_pdev);
11320 				}
11321 			}
11322 
11323 			break;
11324 		default:
11325 			/* Nothing needs to be done for other pktlog types */
11326 			break;
11327 		}
11328 	}
11329 	return 0;
11330 }
11331 #endif
11332 
11333 /**
11334  * dp_bucket_index() - Return index from array
11335  *
11336  * @delay: delay measured
11337  * @array: array used to index corresponding delay
11338  *
11339  * Return: index
11340  */
11341 static uint8_t dp_bucket_index(uint32_t delay, uint16_t *array)
11342 {
11343 	uint8_t i = CDP_DELAY_BUCKET_0;
11344 
11345 	for (; i < CDP_DELAY_BUCKET_MAX; i++) {
11346 		if (delay >= array[i] && delay <= array[i + 1])
11347 			return i;
11348 	}
11349 
11350 	return (CDP_DELAY_BUCKET_MAX - 1);
11351 }
11352 
11353 /**
11354  * dp_fill_delay_buckets() - Fill delay statistics bucket for each
11355  *				type of delay
11356  *
11357  * @pdev: pdev handle
11358  * @delay: delay in ms
11359  * @tid: tid value
11360  * @mode: type of tx delay mode
11361  * @ring_id: ring number
11362  * Return: pointer to cdp_delay_stats structure
11363  */
11364 static struct cdp_delay_stats *
11365 dp_fill_delay_buckets(struct dp_pdev *pdev, uint32_t delay,
11366 		      uint8_t tid, uint8_t mode, uint8_t ring_id)
11367 {
11368 	uint8_t delay_index = 0;
11369 	struct cdp_tid_tx_stats *tstats =
11370 		&pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
11371 	struct cdp_tid_rx_stats *rstats =
11372 		&pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
11373 	/*
11374 	 * cdp_fw_to_hw_delay_range
11375 	 * Fw to hw delay ranges in milliseconds
11376 	 */
11377 	uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
11378 		0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
11379 
11380 	/*
11381 	 * cdp_sw_enq_delay_range
11382 	 * Software enqueue delay ranges in milliseconds
11383 	 */
11384 	uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
11385 		0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
11386 
11387 	/*
11388 	 * cdp_intfrm_delay_range
11389 	 * Interframe delay ranges in milliseconds
11390 	 */
11391 	uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
11392 		0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
11393 
11394 	/*
11395 	 * Update delay stats in proper bucket
11396 	 */
11397 	switch (mode) {
11398 	/* Software Enqueue delay ranges */
11399 	case CDP_DELAY_STATS_SW_ENQ:
11400 
11401 		delay_index = dp_bucket_index(delay, cdp_sw_enq_delay);
11402 		tstats->swq_delay.delay_bucket[delay_index]++;
11403 		return &tstats->swq_delay;
11404 
11405 	/* Tx Completion delay ranges */
11406 	case CDP_DELAY_STATS_FW_HW_TRANSMIT:
11407 
11408 		delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay);
11409 		tstats->hwtx_delay.delay_bucket[delay_index]++;
11410 		return &tstats->hwtx_delay;
11411 
11412 	/* Interframe tx delay ranges */
11413 	case CDP_DELAY_STATS_TX_INTERFRAME:
11414 
11415 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
11416 		tstats->intfrm_delay.delay_bucket[delay_index]++;
11417 		return &tstats->intfrm_delay;
11418 
11419 	/* Interframe rx delay ranges */
11420 	case CDP_DELAY_STATS_RX_INTERFRAME:
11421 
11422 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
11423 		rstats->intfrm_delay.delay_bucket[delay_index]++;
11424 		return &rstats->intfrm_delay;
11425 
11426 	/* Ring reap to indication to network stack */
11427 	case CDP_DELAY_STATS_REAP_STACK:
11428 
11429 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
11430 		rstats->to_stack_delay.delay_bucket[delay_index]++;
11431 		return &rstats->to_stack_delay;
11432 	default:
11433 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
11434 			  "%s Incorrect delay mode: %d", __func__, mode);
11435 	}
11436 
11437 	return NULL;
11438 }
11439 
11440 /**
11441  * dp_update_delay_stats() - Update delay statistics in structure
11442  *				and fill min, max and avg delay
11443  *
11444  * @pdev: pdev handle
11445  * @delay: delay in ms
11446  * @tid: tid value
11447  * @mode: type of tx delay mode
11448  * @ring id: ring number
11449  * Return: none
11450  */
11451 void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay,
11452 			   uint8_t tid, uint8_t mode, uint8_t ring_id)
11453 {
11454 	struct cdp_delay_stats *dstats = NULL;
11455 
11456 	/*
11457 	 * Delay ranges are different for different delay modes
11458 	 * Get the correct index to update delay bucket
11459 	 */
11460 	dstats = dp_fill_delay_buckets(pdev, delay, tid, mode, ring_id);
11461 	if (qdf_unlikely(!dstats))
11462 		return;
11463 
11464 	if (delay != 0) {
11465 		/*
11466 		 * Compute minimum,average and maximum
11467 		 * delay
11468 		 */
11469 		if (delay < dstats->min_delay)
11470 			dstats->min_delay = delay;
11471 
11472 		if (delay > dstats->max_delay)
11473 			dstats->max_delay = delay;
11474 
11475 		/*
11476 		 * Average over delay measured till now
11477 		 */
11478 		if (!dstats->avg_delay)
11479 			dstats->avg_delay = delay;
11480 		else
11481 			dstats->avg_delay = ((delay + dstats->avg_delay) / 2);
11482 	}
11483 }
11484 
11485 /**
11486  * dp_get_peer_mac_list(): function to get peer mac list of vdev
11487  * @soc: Datapath soc handle
11488  * @vdev_id: vdev id
11489  * @newmac: Table of the clients mac
11490  * @mac_cnt: No. of MACs required
11491  *
11492  * return: no of clients
11493  */
11494 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id,
11495 			      u_int8_t newmac[][QDF_MAC_ADDR_SIZE],
11496 			      u_int16_t mac_cnt)
11497 {
11498 	struct dp_vdev *vdev =
11499 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
11500 						   vdev_id);
11501 	struct dp_soc *dp_soc = (struct dp_soc *)soc;
11502 	struct dp_peer *peer;
11503 	uint16_t new_mac_cnt = 0;
11504 
11505 	if (!vdev)
11506 		return new_mac_cnt;
11507 
11508 	qdf_spin_lock_bh(&dp_soc->peer_ref_mutex);
11509 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
11510 		if (peer->bss_peer)
11511 			continue;
11512 		if (new_mac_cnt < mac_cnt) {
11513 			WLAN_ADDR_COPY(newmac[new_mac_cnt], peer->mac_addr.raw);
11514 			new_mac_cnt++;
11515 		}
11516 	}
11517 	qdf_spin_unlock_bh(&dp_soc->peer_ref_mutex);
11518 	return new_mac_cnt;
11519 }
11520 
11521 /**
11522  * dp_pdev_srng_deinit() - de-initialize all pdev srng ring including
11523  *			   monitor rings
11524  * @pdev: Datapath pdev handle
11525  *
11526  */
11527 static void dp_pdev_srng_deinit(struct dp_pdev *pdev)
11528 {
11529 	struct dp_soc *soc = pdev->soc;
11530 	uint8_t i;
11531 
11532 	dp_srng_deinit(soc, &soc->rx_refill_buf_ring[pdev->lmac_id], RXDMA_BUF,
11533 		       pdev->lmac_id);
11534 
11535 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
11536 		dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
11537 
11538 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
11539 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id);
11540 
11541 		wlan_minidump_remove(soc->rxdma_err_dst_ring[lmac_id].base_vaddr_unaligned);
11542 		dp_srng_deinit(soc, &soc->rxdma_err_dst_ring[lmac_id],
11543 			       RXDMA_DST, lmac_id);
11544 	}
11545 
11546 	dp_mon_rings_deinit(pdev);
11547 }
11548 
11549 /**
11550  * dp_pdev_srng_init() - initialize all pdev srng rings including
11551  *			   monitor rings
11552  * @pdev: Datapath pdev handle
11553  *
11554  * return: QDF_STATUS_SUCCESS on success
11555  *	   QDF_STATUS_E_NOMEM on failure
11556  */
11557 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev)
11558 {
11559 	struct dp_soc *soc = pdev->soc;
11560 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
11561 	uint32_t i;
11562 
11563 	soc_cfg_ctx = soc->wlan_cfg_ctx;
11564 
11565 	if (dp_srng_init(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
11566 			 RXDMA_BUF, 0, pdev->lmac_id)) {
11567 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11568 			  FL("dp_srng_init failed rx refill ring"));
11569 		goto fail1;
11570 	}
11571 
11572 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
11573 		if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
11574 			goto fail1;
11575 	}
11576 
11577 	if (dp_mon_rings_init(soc, pdev)) {
11578 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11579 			  FL("MONITOR rings setup failed"));
11580 		goto fail1;
11581 	}
11582 
11583 	/* LMAC RxDMA to SW Rings configuration */
11584 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
11585 		/* Only valid for MCL */
11586 		pdev = soc->pdev_list[0];
11587 
11588 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
11589 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id);
11590 		struct dp_srng *srng = &soc->rxdma_err_dst_ring[lmac_id];
11591 
11592 		if (srng->hal_srng)
11593 			continue;
11594 
11595 		if (dp_srng_init(soc, srng, RXDMA_DST, 0, lmac_id)) {
11596 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11597 				  FL(RNG_ERR "rxdma_err_dst_ring"));
11598 			goto fail1;
11599 		}
11600 		wlan_minidump_log(soc->rxdma_err_dst_ring[lmac_id].base_vaddr_unaligned,
11601 				  soc->rxdma_err_dst_ring[lmac_id].alloc_size,
11602 				  soc->ctrl_psoc,
11603 				  WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
11604 				  "rxdma_err_dst");
11605 	}
11606 	return QDF_STATUS_SUCCESS;
11607 
11608 fail1:
11609 	dp_pdev_srng_deinit(pdev);
11610 	return QDF_STATUS_E_NOMEM;
11611 }
11612 
11613 /**
11614  * dp_pdev_srng_free() - free all pdev srng rings including monitor rings
11615  * pdev: Datapath pdev handle
11616  *
11617  */
11618 static void dp_pdev_srng_free(struct dp_pdev *pdev)
11619 {
11620 	struct dp_soc *soc = pdev->soc;
11621 	uint8_t i;
11622 
11623 	dp_srng_free(soc, &soc->rx_refill_buf_ring[pdev->lmac_id]);
11624 	dp_mon_rings_free(pdev);
11625 
11626 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
11627 		dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
11628 
11629 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
11630 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id);
11631 
11632 		dp_srng_free(soc, &soc->rxdma_err_dst_ring[lmac_id]);
11633 	}
11634 }
11635 
11636 /**
11637  * dp_pdev_srng_alloc() - allocate memory for all pdev srng rings including
11638  *			  monitor rings
11639  * pdev: Datapath pdev handle
11640  *
11641  * return: QDF_STATUS_SUCCESS on success
11642  *	   QDF_STATUS_E_NOMEM on failure
11643  */
11644 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev)
11645 {
11646 	struct dp_soc *soc = pdev->soc;
11647 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
11648 	uint32_t ring_size;
11649 	uint32_t i;
11650 
11651 	soc_cfg_ctx = soc->wlan_cfg_ctx;
11652 
11653 	ring_size = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
11654 	if (dp_srng_alloc(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
11655 			  RXDMA_BUF, ring_size, 0)) {
11656 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11657 			  FL("dp_srng_alloc failed rx refill ring"));
11658 		goto fail1;
11659 	}
11660 
11661 	if (dp_mon_rings_alloc(soc, pdev)) {
11662 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11663 			  FL("MONITOR rings setup failed"));
11664 		goto fail1;
11665 	}
11666 
11667 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
11668 		if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
11669 			goto fail1;
11670 	}
11671 
11672 	ring_size = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
11673 	/* LMAC RxDMA to SW Rings configuration */
11674 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
11675 		/* Only valid for MCL */
11676 		pdev = soc->pdev_list[0];
11677 
11678 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
11679 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id);
11680 		struct dp_srng *srng = &soc->rxdma_err_dst_ring[lmac_id];
11681 
11682 		if (srng->base_vaddr_unaligned)
11683 			continue;
11684 
11685 		if (dp_srng_alloc(soc, srng, RXDMA_DST, ring_size, 0)) {
11686 			QDF_TRACE(QDF_MODULE_ID_DP,
11687 				  QDF_TRACE_LEVEL_ERROR,
11688 				  FL(RNG_ERR "rxdma_err_dst_ring"));
11689 			goto fail1;
11690 		}
11691 	}
11692 
11693 	return QDF_STATUS_SUCCESS;
11694 fail1:
11695 	dp_pdev_srng_free(pdev);
11696 	return QDF_STATUS_E_NOMEM;
11697 }
11698 
11699 /**
11700  * dp_soc_srng_deinit() - de-initialize soc srng rings
11701  * @soc: Datapath soc handle
11702  *
11703  */
11704 static void dp_soc_srng_deinit(struct dp_soc *soc)
11705 {
11706 	uint32_t i;
11707 	/* Free the ring memories */
11708 	/* Common rings */
11709 	wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned);
11710 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
11711 
11712 	/* Tx data rings */
11713 	for (i = 0; i < soc->num_tcl_data_rings; i++)
11714 		dp_deinit_tx_pair_by_index(soc, i);
11715 
11716 	/* TCL command and status rings */
11717 	wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned);
11718 	dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring, TCL_CMD_CREDIT, 0);
11719 	wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned);
11720 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
11721 
11722 	/* Rx data rings */
11723 	soc->num_reo_dest_rings =
11724 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
11725 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
11726 		/* TODO: Get number of rings and ring sizes
11727 		 * from wlan_cfg
11728 		 */
11729 		wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned);
11730 		dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i);
11731 	}
11732 
11733 	/* REO reinjection ring */
11734 	wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned);
11735 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
11736 
11737 	/* Rx release ring */
11738 	wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned);
11739 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
11740 
11741 	/* Rx exception ring */
11742 	/* TODO: Better to store ring_type and ring_num in
11743 	 * dp_srng during setup
11744 	 */
11745 	wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned);
11746 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
11747 
11748 	/* REO command and status rings */
11749 	wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned);
11750 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
11751 	wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned);
11752 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
11753 }
11754 
11755 /**
11756  * dp_soc_srng_init() - Initialize soc level srng rings
11757  * @soc: Datapath soc handle
11758  *
11759  * return: QDF_STATUS_SUCCESS on success
11760  *	   QDF_STATUS_E_FAILURE on failure
11761  */
11762 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc)
11763 {
11764 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
11765 	uint32_t num_tcl_data_rings, num_reo_dest_rings;
11766 	uint8_t i;
11767 
11768 	soc_cfg_ctx = soc->wlan_cfg_ctx;
11769 
11770 	dp_enable_verbose_debug(soc);
11771 
11772 	/* WBM descriptor release ring */
11773 	if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) {
11774 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11775 			  FL("dp_srng_init failed for wbm_desc_rel_ring"));
11776 		goto fail1;
11777 	}
11778 
11779 	wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
11780 			  soc->wbm_desc_rel_ring.alloc_size,
11781 			  soc->ctrl_psoc,
11782 			  WLAN_MD_DP_SRNG_WBM_DESC_REL,
11783 			  "wbm_desc_rel_ring");
11784 
11785 	/* TCL command and status rings */
11786 	if (dp_srng_init(soc, &soc->tcl_cmd_credit_ring,
11787 			 TCL_CMD_CREDIT, 0, 0)) {
11788 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11789 			  FL("dp_srng_init failed for tcl_cmd_ring"));
11790 		goto fail1;
11791 	}
11792 
11793 	wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
11794 			  soc->tcl_cmd_credit_ring.alloc_size,
11795 			  soc->ctrl_psoc,
11796 			  WLAN_MD_DP_SRNG_TCL_CMD,
11797 			  "wbm_desc_rel_ring");
11798 
11799 	if (dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0)) {
11800 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11801 			  FL("dp_srng_init failed for tcl_status_ring"));
11802 		goto fail1;
11803 	}
11804 
11805 	wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned,
11806 			  soc->tcl_status_ring.alloc_size,
11807 			  soc->ctrl_psoc,
11808 			  WLAN_MD_DP_SRNG_TCL_STATUS,
11809 			  "wbm_desc_rel_ring");
11810 
11811 	/* REO reinjection ring */
11812 	if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) {
11813 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11814 			  FL("dp_srng_init failed for reo_reinject_ring"));
11815 		goto fail1;
11816 	}
11817 
11818 	wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned,
11819 			  soc->reo_reinject_ring.alloc_size,
11820 			  soc->ctrl_psoc,
11821 			  WLAN_MD_DP_SRNG_REO_REINJECT,
11822 			  "reo_reinject_ring");
11823 
11824 	/* Rx release ring */
11825 	if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0)) {
11826 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11827 			  FL("dp_srng_init failed for rx_rel_ring"));
11828 		goto fail1;
11829 	}
11830 
11831 	wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned,
11832 			  soc->rx_rel_ring.alloc_size,
11833 			  soc->ctrl_psoc,
11834 			  WLAN_MD_DP_SRNG_RX_REL,
11835 			  "reo_release_ring");
11836 
11837 	/* Rx exception ring */
11838 	if (dp_srng_init(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0,
11839 			 MAX_REO_DEST_RINGS)) {
11840 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11841 			  FL("dp_srng_init failed for reo_exception_ring"));
11842 		goto fail1;
11843 	}
11844 
11845 	wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned,
11846 			  soc->reo_exception_ring.alloc_size,
11847 			  soc->ctrl_psoc,
11848 			  WLAN_MD_DP_SRNG_REO_EXCEPTION,
11849 			  "reo_exception_ring");
11850 
11851 	/* REO command and status rings */
11852 	if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) {
11853 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11854 			  FL("dp_srng_init failed for reo_cmd_ring"));
11855 		goto fail1;
11856 	}
11857 
11858 	wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned,
11859 			  soc->reo_cmd_ring.alloc_size,
11860 			  soc->ctrl_psoc,
11861 			  WLAN_MD_DP_SRNG_REO_CMD,
11862 			  "reo_cmd_ring");
11863 
11864 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
11865 	TAILQ_INIT(&soc->rx.reo_cmd_list);
11866 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
11867 
11868 	if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) {
11869 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11870 			  FL("dp_srng_init failed for reo_status_ring"));
11871 		goto fail1;
11872 	}
11873 
11874 	wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned,
11875 			  soc->reo_status_ring.alloc_size,
11876 			  soc->ctrl_psoc,
11877 			  WLAN_MD_DP_SRNG_REO_STATUS,
11878 			  "reo_status_ring");
11879 
11880 	num_tcl_data_rings = wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
11881 	num_reo_dest_rings = wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
11882 
11883 	for (i = 0; i < num_tcl_data_rings; i++) {
11884 		if (dp_init_tx_ring_pair_by_index(soc, i))
11885 			goto fail1;
11886 	}
11887 
11888 	dp_create_ext_stats_event(soc);
11889 
11890 	for (i = 0; i < num_reo_dest_rings; i++) {
11891 		/* Initialize REO destination ring */
11892 		if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) {
11893 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11894 				  FL("dp_srng_init failed for reo_dest_ringn"));
11895 			goto fail1;
11896 		}
11897 
11898 		wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned,
11899 				  soc->reo_dest_ring[i].alloc_size,
11900 				  soc->ctrl_psoc,
11901 				  WLAN_MD_DP_SRNG_REO_DEST,
11902 				  "reo_dest_ring");
11903 	}
11904 
11905 	return QDF_STATUS_SUCCESS;
11906 fail1:
11907 	/*
11908 	 * Cleanup will be done as part of soc_detach, which will
11909 	 * be called on pdev attach failure
11910 	 */
11911 	dp_soc_srng_deinit(soc);
11912 	return QDF_STATUS_E_FAILURE;
11913 }
11914 
11915 /**
11916  * dp_soc_srng_free() - free soc level srng rings
11917  * @soc: Datapath soc handle
11918  *
11919  */
11920 static void dp_soc_srng_free(struct dp_soc *soc)
11921 {
11922 	uint32_t i;
11923 
11924 	dp_srng_free(soc, &soc->wbm_desc_rel_ring);
11925 
11926 	for (i = 0; i < soc->num_tcl_data_rings; i++)
11927 		dp_free_tx_ring_pair_by_index(soc, i);
11928 
11929 	dp_srng_free(soc, &soc->tcl_cmd_credit_ring);
11930 	dp_srng_free(soc, &soc->tcl_status_ring);
11931 
11932 	for (i = 0; i < soc->num_reo_dest_rings; i++)
11933 		dp_srng_free(soc, &soc->reo_dest_ring[i]);
11934 
11935 	dp_srng_free(soc, &soc->reo_reinject_ring);
11936 	dp_srng_free(soc, &soc->rx_rel_ring);
11937 	dp_srng_free(soc, &soc->reo_exception_ring);
11938 	dp_srng_free(soc, &soc->reo_cmd_ring);
11939 	dp_srng_free(soc, &soc->reo_status_ring);
11940 }
11941 
11942 /**
11943  * dp_soc_srng_alloc() - Allocate memory for soc level srng rings
11944  * @soc: Datapath soc handle
11945  *
11946  * return: QDF_STATUS_SUCCESS on success
11947  *	   QDF_STATUS_E_NOMEM on failure
11948  */
11949 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc)
11950 {
11951 	uint32_t entries;
11952 	uint32_t i;
11953 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
11954 	uint32_t num_tcl_data_rings, num_reo_dest_rings;
11955 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
11956 	uint32_t tx_comp_ring_size, tx_ring_size, reo_dst_ring_size;
11957 
11958 	soc_cfg_ctx = soc->wlan_cfg_ctx;
11959 
11960 	/* sw2wbm link descriptor release ring */
11961 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
11962 	if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE,
11963 			  entries, 0)) {
11964 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11965 			  FL("dp_srng_alloc failed for wbm_desc_rel_ring"));
11966 		goto fail1;
11967 	}
11968 
11969 	entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx);
11970 	/* TCL command and status rings */
11971 	if (dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring, TCL_CMD_CREDIT,
11972 			  entries, 0)) {
11973 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11974 			  FL("dp_srng_alloc failed for tcl_cmd_ring"));
11975 		goto fail1;
11976 	}
11977 
11978 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
11979 	if (dp_srng_alloc(soc, &soc->tcl_status_ring, TCL_STATUS, entries,
11980 			  0)) {
11981 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11982 			  FL("dp_srng_alloc failed for tcl_status_ring"));
11983 		goto fail1;
11984 	}
11985 
11986 	/* REO reinjection ring */
11987 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
11988 	if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT,
11989 			  entries, 0)) {
11990 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11991 			  FL("dp_srng_alloc failed for reo_reinject_ring"));
11992 		goto fail1;
11993 	}
11994 
11995 	/* Rx release ring */
11996 	entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx);
11997 	if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
11998 			  entries, 0)) {
11999 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12000 			  FL("dp_srng_alloc failed for rx_rel_ring"));
12001 		goto fail1;
12002 	}
12003 
12004 	/* Rx exception ring */
12005 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
12006 	if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION,
12007 			  entries, 0)) {
12008 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12009 			  FL("dp_srng_alloc failed for reo_exception_ring"));
12010 		goto fail1;
12011 	}
12012 
12013 	/* REO command and status rings */
12014 	entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx);
12015 	if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) {
12016 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12017 			  FL("dp_srng_alloc failed for reo_cmd_ring"));
12018 		goto fail1;
12019 	}
12020 
12021 	entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx);
12022 	if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS,
12023 			  entries, 0)) {
12024 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12025 			  FL("dp_srng_alloc failed for reo_status_ring"));
12026 		goto fail1;
12027 	}
12028 
12029 	num_tcl_data_rings = wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
12030 	num_reo_dest_rings = wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
12031 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
12032 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
12033 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx);
12034 
12035 	/* Disable cached desc if NSS offload is enabled */
12036 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
12037 		cached = 0;
12038 
12039 	for (i = 0; i < num_tcl_data_rings; i++) {
12040 		if (dp_alloc_tx_ring_pair_by_index(soc, i))
12041 			goto fail1;
12042 	}
12043 
12044 	soc->num_tcl_data_rings = num_tcl_data_rings;
12045 
12046 	for (i = 0; i < num_reo_dest_rings; i++) {
12047 		/* Setup REO destination ring */
12048 		if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST,
12049 				  reo_dst_ring_size, cached)) {
12050 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12051 				  FL("dp_srng_alloc failed for reo_dest_ring"));
12052 			goto fail1;
12053 		}
12054 	}
12055 	soc->num_reo_dest_rings = num_reo_dest_rings;
12056 
12057 	return QDF_STATUS_SUCCESS;
12058 
12059 fail1:
12060 	dp_soc_srng_free(soc);
12061 	return QDF_STATUS_E_NOMEM;
12062 }
12063 
12064 /**
12065  * dp_soc_cfg_init() - initialize target specific configuration
12066  *		       during dp_soc_init
12067  * @soc: dp soc handle
12068  */
12069 static void dp_soc_cfg_init(struct dp_soc *soc)
12070 {
12071 	int target_type;
12072 
12073 	target_type = hal_get_target_type(soc->hal_soc);
12074 	switch (target_type) {
12075 	case TARGET_TYPE_QCA6290:
12076 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12077 					       REO_DST_RING_SIZE_QCA6290);
12078 		soc->ast_override_support = 1;
12079 		soc->da_war_enabled = false;
12080 		break;
12081 	case TARGET_TYPE_QCA6390:
12082 	case TARGET_TYPE_QCA6490:
12083 	case TARGET_TYPE_QCA6750:
12084 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12085 					       REO_DST_RING_SIZE_QCA6290);
12086 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
12087 		soc->ast_override_support = 1;
12088 		if (soc->cdp_soc.ol_ops->get_con_mode &&
12089 		    soc->cdp_soc.ol_ops->get_con_mode() ==
12090 		    QDF_GLOBAL_MONITOR_MODE) {
12091 			int int_ctx;
12092 
12093 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
12094 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
12095 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
12096 			}
12097 		}
12098 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
12099 		break;
12100 	case TARGET_TYPE_QCA8074:
12101 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
12102 							   MON_BUF_MIN_ENTRIES);
12103 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12104 					       REO_DST_RING_SIZE_QCA8074);
12105 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
12106 		soc->da_war_enabled = true;
12107 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
12108 		break;
12109 	case TARGET_TYPE_QCA8074V2:
12110 	case TARGET_TYPE_QCA6018:
12111 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
12112 							   MON_BUF_MIN_ENTRIES);
12113 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12114 					       REO_DST_RING_SIZE_QCA8074);
12115 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
12116 		soc->hw_nac_monitor_support = 1;
12117 		soc->ast_override_support = 1;
12118 		soc->per_tid_basize_max_tid = 8;
12119 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
12120 		soc->da_war_enabled = false;
12121 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
12122 		break;
12123 	case TARGET_TYPE_QCN9000:
12124 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
12125 							   MON_BUF_MIN_ENTRIES);
12126 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12127 					       REO_DST_RING_SIZE_QCN9000);
12128 		soc->ast_override_support = 1;
12129 		soc->da_war_enabled = false;
12130 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
12131 		soc->hw_nac_monitor_support = 1;
12132 		soc->per_tid_basize_max_tid = 8;
12133 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
12134 		soc->lmac_polled_mode = 0;
12135 		soc->wbm_release_desc_rx_sg_support = 1;
12136 		if (cfg_get(soc->ctrl_psoc, CFG_DP_FULL_MON_MODE))
12137 			soc->full_mon_mode = true;
12138 		break;
12139 	case TARGET_TYPE_QCA5018:
12140 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12141 					       REO_DST_RING_SIZE_QCA8074);
12142 		soc->ast_override_support = 1;
12143 		soc->da_war_enabled = false;
12144 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
12145 		soc->hw_nac_monitor_support = 1;
12146 		soc->per_tid_basize_max_tid = 8;
12147 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
12148 		soc->disable_mac1_intr = 1;
12149 		soc->disable_mac2_intr = 1;
12150 		break;
12151 	default:
12152 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
12153 		qdf_assert_always(0);
12154 		break;
12155 	}
12156 }
12157 
12158 /**
12159  * dp_soc_cfg_attach() - set target specific configuration in
12160  *			 dp soc cfg.
12161  * @soc: dp soc handle
12162  */
12163 static void dp_soc_cfg_attach(struct dp_soc *soc)
12164 {
12165 	int target_type;
12166 	int nss_cfg = 0;
12167 
12168 	target_type = hal_get_target_type(soc->hal_soc);
12169 	switch (target_type) {
12170 	case TARGET_TYPE_QCA6290:
12171 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12172 					       REO_DST_RING_SIZE_QCA6290);
12173 		break;
12174 	case TARGET_TYPE_QCA6390:
12175 	case TARGET_TYPE_QCA6490:
12176 	case TARGET_TYPE_QCA6750:
12177 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12178 					       REO_DST_RING_SIZE_QCA6290);
12179 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
12180 		break;
12181 	case TARGET_TYPE_QCA8074:
12182 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
12183 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12184 					       REO_DST_RING_SIZE_QCA8074);
12185 		break;
12186 	case TARGET_TYPE_QCA8074V2:
12187 	case TARGET_TYPE_QCA6018:
12188 	case TARGET_TYPE_QCA5018:
12189 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
12190 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12191 					       REO_DST_RING_SIZE_QCA8074);
12192 		break;
12193 	case TARGET_TYPE_QCN9000:
12194 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
12195 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12196 					       REO_DST_RING_SIZE_QCN9000);
12197 		break;
12198 	default:
12199 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
12200 		qdf_assert_always(0);
12201 		break;
12202 	}
12203 
12204 	if (soc->cdp_soc.ol_ops->get_soc_nss_cfg)
12205 		nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc);
12206 
12207 	wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg);
12208 
12209 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
12210 		wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0);
12211 		wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0);
12212 		wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0);
12213 		wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0);
12214 	}
12215 }
12216 
12217 static inline QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc,
12218 				      HTC_HANDLE htc_handle,
12219 				      qdf_device_t qdf_osdev,
12220 				      uint8_t pdev_id)
12221 {
12222 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
12223 	int nss_cfg;
12224 	void *sojourn_buf;
12225 	QDF_STATUS ret;
12226 
12227 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
12228 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
12229 
12230 	soc_cfg_ctx = soc->wlan_cfg_ctx;
12231 	pdev->soc = soc;
12232 	pdev->pdev_id = pdev_id;
12233 
12234 	pdev->filter = dp_mon_filter_alloc(pdev);
12235 	if (!pdev->filter) {
12236 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12237 			  FL("Memory allocation failed for monitor filters"));
12238 		ret = QDF_STATUS_E_NOMEM;
12239 		goto fail0;
12240 	}
12241 
12242 	/*
12243 	 * Variable to prevent double pdev deinitialization during
12244 	 * radio detach execution .i.e. in the absence of any vdev.
12245 	 */
12246 	pdev->pdev_deinit = 0;
12247 
12248 	if (dp_wdi_event_attach(pdev)) {
12249 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
12250 			  "dp_wdi_evet_attach failed");
12251 		goto fail1;
12252 	}
12253 
12254 	if (dp_pdev_srng_init(pdev)) {
12255 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12256 			  FL("Failed to initialize pdev srng rings"));
12257 		goto fail2;
12258 	}
12259 
12260 	/* Initialize descriptors in TCL Rings used by IPA */
12261 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
12262 		hal_tx_init_data_ring(soc->hal_soc,
12263 				      soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng);
12264 
12265 	/*
12266 	 * Initialize command/credit ring descriptor
12267 	 * Command/CREDIT ring also used for sending DATA cmds
12268 	 */
12269 	hal_tx_init_cmd_credit_ring(soc->hal_soc,
12270 				    soc->tcl_cmd_credit_ring.hal_srng);
12271 
12272 	dp_tx_pdev_init(pdev);
12273 	/*
12274 	 * Variable to prevent double pdev deinitialization during
12275 	 * radio detach execution .i.e. in the absence of any vdev.
12276 	 */
12277 	pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer));
12278 
12279 	if (!pdev->invalid_peer) {
12280 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12281 			  FL("Invalid peer memory allocation failed"));
12282 		goto fail3;
12283 	}
12284 
12285 	/*
12286 	 * set nss pdev config based on soc config
12287 	 */
12288 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
12289 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
12290 					 (nss_cfg & (1 << pdev_id)));
12291 
12292 	pdev->target_pdev_id =
12293 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
12294 
12295 	if (soc->preferred_hw_mode == WMI_HOST_HW_MODE_2G_PHYB &&
12296 	    pdev->lmac_id == PHYB_2G_LMAC_ID) {
12297 		pdev->target_pdev_id = PHYB_2G_TARGET_PDEV_ID;
12298 	}
12299 
12300 	/* Reset the cpu ring map if radio is NSS offloaded */
12301 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
12302 		dp_soc_reset_cpu_ring_map(soc);
12303 		dp_soc_reset_intr_mask(soc);
12304 	}
12305 
12306 	TAILQ_INIT(&pdev->vdev_list);
12307 	qdf_spinlock_create(&pdev->vdev_list_lock);
12308 	pdev->vdev_count = 0;
12309 
12310 	qdf_spinlock_create(&pdev->tx_mutex);
12311 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
12312 	TAILQ_INIT(&pdev->neighbour_peers_list);
12313 	pdev->neighbour_peers_added = false;
12314 	pdev->monitor_configured = false;
12315 
12316 	DP_STATS_INIT(pdev);
12317 
12318 	/* Monitor filter init */
12319 	pdev->mon_filter_mode = MON_FILTER_ALL;
12320 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
12321 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
12322 	pdev->fp_data_filter = FILTER_DATA_ALL;
12323 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
12324 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
12325 	pdev->mo_data_filter = FILTER_DATA_ALL;
12326 
12327 	dp_local_peer_id_pool_init(pdev);
12328 
12329 	dp_dscp_tid_map_setup(pdev);
12330 	dp_pcp_tid_map_setup(pdev);
12331 
12332 	/* set the reo destination during initialization */
12333 	pdev->reo_dest = pdev->pdev_id + 1;
12334 
12335 	/*
12336 	 * initialize ppdu tlv list
12337 	 */
12338 	TAILQ_INIT(&pdev->ppdu_info_list);
12339 	pdev->tlv_count = 0;
12340 	pdev->list_depth = 0;
12341 
12342 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
12343 
12344 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
12345 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
12346 			      TRUE);
12347 
12348 	if (!pdev->sojourn_buf) {
12349 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12350 			  FL("Failed to allocate sojourn buf"));
12351 		goto fail4;
12352 	}
12353 	sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
12354 	qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
12355 
12356 	/* initlialize cal client timer */
12357 	dp_cal_client_attach(&pdev->cal_client_ctx,
12358 			     dp_pdev_to_cdp_pdev(pdev),
12359 			     pdev->soc->osdev,
12360 			     &dp_iterate_update_peer_list);
12361 	qdf_event_create(&pdev->fw_peer_stats_event);
12362 
12363 	pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
12364 	if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS)
12365 		goto fail5;
12366 
12367 	if (dp_rxdma_ring_setup(soc, pdev)) {
12368 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12369 			  FL("RXDMA ring config failed"));
12370 		goto fail6;
12371 	}
12372 
12373 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
12374 		goto fail7;
12375 
12376 	if (dp_ipa_ring_resource_setup(soc, pdev))
12377 		goto fail8;
12378 
12379 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
12380 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12381 			  FL("dp_ipa_uc_attach failed"));
12382 		goto fail8;
12383 	}
12384 
12385 	ret = dp_rx_fst_attach(soc, pdev);
12386 	if ((ret != QDF_STATUS_SUCCESS) &&
12387 	    (ret != QDF_STATUS_E_NOSUPPORT)) {
12388 		QDF_TRACE(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_ERROR,
12389 			  "RX Flow Search Table attach failed: pdev %d err %d",
12390 			  pdev_id, ret);
12391 		goto fail9;
12392 	}
12393 
12394 	/* initialize sw rx descriptors */
12395 	dp_rx_pdev_desc_pool_init(pdev);
12396 	/* initialize sw monitor rx descriptors */
12397 	dp_rx_pdev_mon_desc_pool_init(pdev);
12398 	/* allocate buffers and replenish the RxDMA ring */
12399 	dp_rx_pdev_buffers_alloc(pdev);
12400 	/* allocate buffers and replenish the monitor RxDMA ring */
12401 	dp_rx_pdev_mon_buffers_alloc(pdev);
12402 
12403 	dp_init_tso_stats(pdev);
12404 	dp_tx_ppdu_stats_attach(pdev);
12405 
12406 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
12407 		qdf_dma_mem_stats_read(),
12408 		qdf_heap_mem_stats_read(),
12409 		qdf_skb_mem_stats_read());
12410 
12411 	return QDF_STATUS_SUCCESS;
12412 fail9:
12413 	dp_ipa_uc_detach(soc, pdev);
12414 fail8:
12415 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
12416 fail7:
12417 	dp_rxdma_ring_cleanup(soc, pdev);
12418 fail6:
12419 	dp_htt_ppdu_stats_detach(pdev);
12420 fail5:
12421 	qdf_nbuf_free(pdev->sojourn_buf);
12422 fail4:
12423 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
12424 	qdf_spinlock_destroy(&pdev->tx_mutex);
12425 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
12426 	qdf_mem_free(pdev->invalid_peer);
12427 fail3:
12428 	dp_pdev_srng_deinit(pdev);
12429 fail2:
12430 	dp_wdi_event_detach(pdev);
12431 fail1:
12432 	dp_mon_filter_dealloc(pdev);
12433 fail0:
12434 	return QDF_STATUS_E_FAILURE;
12435 }
12436 
12437 /*
12438  * dp_pdev_init_wifi3() - Init txrx pdev
12439  * @htc_handle: HTC handle for host-target interface
12440  * @qdf_osdev: QDF OS device
12441  * @force: Force deinit
12442  *
12443  * Return: QDF_STATUS
12444  */
12445 static QDF_STATUS dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
12446 				     HTC_HANDLE htc_handle,
12447 				     qdf_device_t qdf_osdev,
12448 				     uint8_t pdev_id)
12449 {
12450 	return dp_pdev_init(txrx_soc, htc_handle, qdf_osdev, pdev_id);
12451 }
12452 
12453