xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision a2d910900d3182481ddd6fa24ef7a7cf04e14f69)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_hw_headers.h>
25 #include <hal_api.h>
26 #include <hif.h>
27 #include <htt.h>
28 #include <wdi_event.h>
29 #include <queue.h>
30 #include "dp_types.h"
31 #include "dp_internal.h"
32 #include "dp_tx.h"
33 #include "dp_tx_desc.h"
34 #include "dp_rx.h"
35 #include "dp_rx_mon.h"
36 #ifdef DP_RATETABLE_SUPPORT
37 #include "dp_ratetable.h"
38 #endif
39 #include <cdp_txrx_handle.h>
40 #include <wlan_cfg.h>
41 #include <wlan_utility.h>
42 #include "cdp_txrx_cmn_struct.h"
43 #include "cdp_txrx_stats_struct.h"
44 #include "cdp_txrx_cmn_reg.h"
45 #include <qdf_util.h>
46 #include "dp_peer.h"
47 #include "dp_rx_mon.h"
48 #include "htt_stats.h"
49 #include "dp_htt.h"
50 #ifdef WLAN_SUPPORT_RX_FISA
51 #include <dp_fisa_rx.h>
52 #endif
53 #include "htt_ppdu_stats.h"
54 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
55 #include "cfg_ucfg_api.h"
56 #include "dp_mon_filter.h"
57 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
58 #include "cdp_txrx_flow_ctrl_v2.h"
59 #else
60 static inline void
61 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
62 {
63 	return;
64 }
65 #endif
66 #include "dp_ipa.h"
67 #include "dp_cal_client_api.h"
68 #ifdef FEATURE_WDS
69 #include "dp_txrx_wds.h"
70 #endif
71 #ifdef ATH_SUPPORT_IQUE
72 #include "dp_txrx_me.h"
73 #endif
74 #if defined(DP_CON_MON)
75 #ifndef REMOVE_PKT_LOG
76 #include <pktlog_ac_api.h>
77 #include <pktlog_ac.h>
78 #endif
79 #endif
80 
81 #ifdef WLAN_FEATURE_STATS_EXT
82 #define INIT_RX_HW_STATS_LOCK(_soc) \
83 	qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
84 #define DEINIT_RX_HW_STATS_LOCK(_soc) \
85 	qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
86 #else
87 #define INIT_RX_HW_STATS_LOCK(_soc)  /* no op */
88 #define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
89 #endif
90 
91 #ifdef DP_PEER_EXTENDED_API
92 #define SET_PEER_REF_CNT_ONE(_peer) \
93 	qdf_atomic_set(&(_peer)->ref_cnt, 1)
94 #else
95 #define SET_PEER_REF_CNT_ONE(_peer)
96 #endif
97 
98 /*
99  * The max size of cdp_peer_stats_param_t is limited to 16 bytes.
100  * If the buffer size is exceeding this size limit,
101  * dp_txrx_get_peer_stats is to be used instead.
102  */
103 QDF_COMPILE_TIME_ASSERT(cdp_peer_stats_param_t_max_size,
104 			(sizeof(cdp_peer_stats_param_t) <= 16));
105 
106 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
107 /*
108  * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS
109  * also should be updated accordingly
110  */
111 QDF_COMPILE_TIME_ASSERT(num_intr_grps,
112 			HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS);
113 
114 /*
115  * HIF_EVENT_HIST_MAX should always be power of 2
116  */
117 QDF_COMPILE_TIME_ASSERT(hif_event_history_size,
118 			(HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0);
119 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
120 
121 /*
122  * If WLAN_CFG_INT_NUM_CONTEXTS is changed,
123  * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated
124  */
125 QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs,
126 			WLAN_CFG_INT_NUM_CONTEXTS_MAX >=
127 			WLAN_CFG_INT_NUM_CONTEXTS);
128 
129 #ifdef WLAN_RX_PKT_CAPTURE_ENH
130 #include "dp_rx_mon_feature.h"
131 #else
132 /*
133  * dp_config_enh_rx_capture()- API to enable/disable enhanced rx capture
134  * @pdev_handle: DP_PDEV handle
135  * @val: user provided value
136  *
137  * Return: QDF_STATUS
138  */
139 static QDF_STATUS
140 dp_config_enh_rx_capture(struct dp_pdev *pdev_handle, uint8_t val)
141 {
142 	return QDF_STATUS_E_INVAL;
143 }
144 #endif /* WLAN_RX_PKT_CAPTURE_ENH */
145 
146 #ifdef WLAN_TX_PKT_CAPTURE_ENH
147 #include "dp_tx_capture.h"
148 #else
149 /*
150  * dp_config_enh_tx_capture()- API to enable/disable enhanced tx capture
151  * @pdev_handle: DP_PDEV handle
152  * @val: user provided value
153  *
154  * Return: QDF_STATUS
155  */
156 static QDF_STATUS
157 dp_config_enh_tx_capture(struct dp_pdev *pdev_handle, uint8_t val)
158 {
159 	return QDF_STATUS_E_INVAL;
160 }
161 #endif
162 
163 static void dp_pdev_srng_deinit(struct dp_pdev *pdev);
164 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev);
165 static void dp_pdev_srng_free(struct dp_pdev *pdev);
166 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev);
167 
168 static void dp_soc_srng_deinit(struct dp_soc *soc);
169 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc);
170 static void dp_soc_srng_free(struct dp_soc *soc);
171 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc);
172 
173 static void dp_soc_cfg_init(struct dp_soc *soc);
174 static void dp_soc_cfg_attach(struct dp_soc *soc);
175 
176 static inline
177 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
178 				HTC_HANDLE htc_handle,
179 				qdf_device_t qdf_osdev,
180 				uint8_t pdev_id);
181 
182 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id);
183 
184 static QDF_STATUS
185 dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
186 		   HTC_HANDLE htc_handle,
187 		   qdf_device_t qdf_osdev,
188 		   uint8_t pdev_id);
189 
190 static QDF_STATUS
191 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, int force);
192 
193 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc);
194 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc);
195 
196 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
197 		  struct hif_opaque_softc *hif_handle);
198 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
199 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc,
200 				       uint8_t pdev_id,
201 				       int force);
202 static struct dp_soc *
203 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
204 	      struct hif_opaque_softc *hif_handle,
205 	      HTC_HANDLE htc_handle,
206 	      qdf_device_t qdf_osdev,
207 	      struct ol_if_ops *ol_ops, uint16_t device_id);
208 static void dp_pktlogmod_exit(struct dp_pdev *handle);
209 static inline QDF_STATUS dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl,
210 					      uint8_t vdev_id,
211 					      uint8_t *peer_mac_addr);
212 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
213 				       uint8_t vdev_id,
214 				       uint8_t *peer_mac, uint32_t bitmap);
215 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
216 				bool unmap_only);
217 #ifdef ENABLE_VERBOSE_DEBUG
218 bool is_dp_verbose_debug_enabled;
219 #endif
220 
221 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
222 static void dp_cfr_filter(struct cdp_soc_t *soc_hdl,
223 			  uint8_t pdev_id,
224 			  bool enable,
225 			  struct cdp_monitor_filter *filter_val);
226 static bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
227 static void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
228 			   bool enable);
229 static inline void
230 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
231 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats);
232 static inline void
233 dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
234 static inline void
235 dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
236 			 bool enable);
237 #endif
238 static inline bool
239 dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev);
240 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
241 					    enum hal_ring_type ring_type,
242 					    int ring_num);
243 #define DP_INTR_POLL_TIMER_MS	5
244 
245 /* Generic AST entry aging timer value */
246 #define DP_AST_AGING_TIMER_DEFAULT_MS	1000
247 #define DP_MCS_LENGTH (6*MAX_MCS)
248 
249 #define DP_CURR_FW_STATS_AVAIL 19
250 #define DP_HTT_DBG_EXT_STATS_MAX 256
251 #define DP_MAX_SLEEP_TIME 100
252 #ifndef QCA_WIFI_3_0_EMU
253 #define SUSPEND_DRAIN_WAIT 500
254 #else
255 #define SUSPEND_DRAIN_WAIT 3000
256 #endif
257 
258 #ifdef IPA_OFFLOAD
259 /* Exclude IPA rings from the interrupt context */
260 #define TX_RING_MASK_VAL	0xb
261 #define RX_RING_MASK_VAL	0x7
262 #else
263 #define TX_RING_MASK_VAL	0xF
264 #define RX_RING_MASK_VAL	0xF
265 #endif
266 
267 #define STR_MAXLEN	64
268 
269 #define RNG_ERR		"SRNG setup failed for"
270 
271 /* Threshold for peer's cached buf queue beyond which frames are dropped */
272 #define DP_RX_CACHED_BUFQ_THRESH 64
273 
274 /* Budget to reap monitor status ring */
275 #define DP_MON_REAP_BUDGET 1024
276 
277 /**
278  * default_dscp_tid_map - Default DSCP-TID mapping
279  *
280  * DSCP        TID
281  * 000000      0
282  * 001000      1
283  * 010000      2
284  * 011000      3
285  * 100000      4
286  * 101000      5
287  * 110000      6
288  * 111000      7
289  */
290 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
291 	0, 0, 0, 0, 0, 0, 0, 0,
292 	1, 1, 1, 1, 1, 1, 1, 1,
293 	2, 2, 2, 2, 2, 2, 2, 2,
294 	3, 3, 3, 3, 3, 3, 3, 3,
295 	4, 4, 4, 4, 4, 4, 4, 4,
296 	5, 5, 5, 5, 5, 5, 5, 5,
297 	6, 6, 6, 6, 6, 6, 6, 6,
298 	7, 7, 7, 7, 7, 7, 7, 7,
299 };
300 
301 /**
302  * default_pcp_tid_map - Default PCP-TID mapping
303  *
304  * PCP     TID
305  * 000      0
306  * 001      1
307  * 010      2
308  * 011      3
309  * 100      4
310  * 101      5
311  * 110      6
312  * 111      7
313  */
314 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
315 	0, 1, 2, 3, 4, 5, 6, 7,
316 };
317 
318 /**
319  * @brief Cpu to tx ring map
320  */
321 uint8_t
322 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
323 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
324 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
325 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
326 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
327 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
328 #ifdef WLAN_TX_PKT_CAPTURE_ENH
329 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
330 #endif
331 };
332 
333 /**
334  * @brief Select the type of statistics
335  */
336 enum dp_stats_type {
337 	STATS_FW = 0,
338 	STATS_HOST = 1,
339 	STATS_TYPE_MAX = 2,
340 };
341 
342 /**
343  * @brief General Firmware statistics options
344  *
345  */
346 enum dp_fw_stats {
347 	TXRX_FW_STATS_INVALID	= -1,
348 };
349 
350 /**
351  * dp_stats_mapping_table - Firmware and Host statistics
352  * currently supported
353  */
354 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
355 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
356 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
357 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
358 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
359 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
360 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
361 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
362 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
363 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
364 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
365 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
366 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
367 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
368 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
369 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
370 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
371 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
372 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
373 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
374 	/* Last ENUM for HTT FW STATS */
375 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
376 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
377 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
378 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
379 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
380 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
381 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
382 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
383 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
384 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
385 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
386 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
387 	{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
388 	{TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS},
389 	{TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS},
390 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, TXRX_HOST_STATS_INVALID}
391 };
392 
393 /* MCL specific functions */
394 #if defined(DP_CON_MON)
395 /**
396  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
397  * @soc: pointer to dp_soc handle
398  * @intr_ctx_num: interrupt context number for which mon mask is needed
399  *
400  * For MCL, monitor mode rings are being processed in timer contexts (polled).
401  * This function is returning 0, since in interrupt mode(softirq based RX),
402  * we donot want to process monitor mode rings in a softirq.
403  *
404  * So, in case packet log is enabled for SAP/STA/P2P modes,
405  * regular interrupt processing will not process monitor mode rings. It would be
406  * done in a separate timer context.
407  *
408  * Return: 0
409  */
410 static inline
411 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
412 {
413 	return 0;
414 }
415 
416 /*
417  * dp_service_mon_rings()- service monitor rings
418  * @soc: soc dp handle
419  * @quota: number of ring entry that can be serviced
420  *
421  * Return: None
422  *
423  */
424 static void dp_service_mon_rings(struct  dp_soc *soc, uint32_t quota)
425 {
426 	int ring = 0, work_done;
427 	struct dp_pdev *pdev = NULL;
428 
429 	for (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
430 		pdev = dp_get_pdev_for_lmac_id(soc, ring);
431 		if (!pdev)
432 			continue;
433 		work_done = dp_mon_process(soc, NULL, ring, quota);
434 
435 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
436 			  FL("Reaped %d descs from Monitor rings"),
437 			  work_done);
438 	}
439 }
440 
441 /*
442  * dp_mon_reap_timer_handler()- timer to reap monitor rings
443  * reqd as we are not getting ppdu end interrupts
444  * @arg: SoC Handle
445  *
446  * Return:
447  *
448  */
449 static void dp_mon_reap_timer_handler(void *arg)
450 {
451 	struct dp_soc *soc = (struct dp_soc *)arg;
452 
453 	dp_service_mon_rings(soc, QCA_NAPI_BUDGET);
454 
455 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
456 }
457 
458 #ifndef REMOVE_PKT_LOG
459 /**
460  * dp_pkt_log_init() - API to initialize packet log
461  * @soc_hdl: Datapath soc handle
462  * @pdev_id: id of data path pdev handle
463  * @scn: HIF context
464  *
465  * Return: none
466  */
467 void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn)
468 {
469 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
470 	struct dp_pdev *handle =
471 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
472 
473 	if (!handle) {
474 		dp_err("pdev handle is NULL");
475 		return;
476 	}
477 
478 	if (handle->pkt_log_init) {
479 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
480 			  "%s: Packet log not initialized", __func__);
481 		return;
482 	}
483 
484 	pktlog_sethandle(&handle->pl_dev, scn);
485 	pktlog_set_pdev_id(handle->pl_dev, pdev_id);
486 	pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
487 
488 	if (pktlogmod_init(scn)) {
489 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
490 			  "%s: pktlogmod_init failed", __func__);
491 		handle->pkt_log_init = false;
492 	} else {
493 		handle->pkt_log_init = true;
494 	}
495 }
496 
497 /**
498  * dp_pkt_log_con_service() - connect packet log service
499  * @soc_hdl: Datapath soc handle
500  * @pdev_id: id of data path pdev handle
501  * @scn: device context
502  *
503  * Return: none
504  */
505 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
506 				   uint8_t pdev_id, void *scn)
507 {
508 	dp_pkt_log_init(soc_hdl, pdev_id, scn);
509 	pktlog_htc_attach();
510 }
511 
512 /**
513  * dp_pktlogmod_exit() - API to cleanup pktlog info
514  * @pdev: Pdev handle
515  *
516  * Return: none
517  */
518 static void dp_pktlogmod_exit(struct dp_pdev *pdev)
519 {
520 	struct dp_soc *soc = pdev->soc;
521 	struct hif_opaque_softc *scn = soc->hif_handle;
522 
523 	if (!scn) {
524 		dp_err("Invalid hif(scn) handle");
525 		return;
526 	}
527 
528 	/* stop mon_reap_timer if it has been started */
529 	if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED &&
530 	    soc->reap_timer_init && (!dp_is_enable_reap_timer_non_pkt(pdev)))
531 		qdf_timer_sync_cancel(&soc->mon_reap_timer);
532 
533 	pktlogmod_exit(scn);
534 	pdev->pkt_log_init = false;
535 }
536 #else
537 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
538 				   uint8_t pdev_id, void *scn)
539 {
540 }
541 
542 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
543 #endif
544 /**
545  * dp_get_num_rx_contexts() - get number of RX contexts
546  * @soc_hdl: cdp opaque soc handle
547  *
548  * Return: number of RX contexts
549  */
550 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
551 {
552 	int i;
553 	int num_rx_contexts = 0;
554 
555 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
556 
557 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
558 		if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i))
559 			num_rx_contexts++;
560 
561 	return num_rx_contexts;
562 }
563 
564 #else
565 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
566 
567 /**
568  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
569  * @soc: pointer to dp_soc handle
570  * @intr_ctx_num: interrupt context number for which mon mask is needed
571  *
572  * Return: mon mask value
573  */
574 static inline
575 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
576 {
577 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
578 }
579 
580 /*
581  * dp_service_lmac_rings()- timer to reap lmac rings
582  * @arg: SoC Handle
583  *
584  * Return:
585  *
586  */
587 static void dp_service_lmac_rings(void *arg)
588 {
589 	struct dp_soc *soc = (struct dp_soc *)arg;
590 	int ring = 0, i;
591 	struct dp_pdev *pdev = NULL;
592 	union dp_rx_desc_list_elem_t *desc_list = NULL;
593 	union dp_rx_desc_list_elem_t *tail = NULL;
594 
595 	/* Process LMAC interrupts */
596 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
597 		int mac_for_pdev = ring;
598 		struct dp_srng *rx_refill_buf_ring;
599 
600 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
601 		if (!pdev)
602 			continue;
603 
604 		rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev];
605 
606 		dp_mon_process(soc, NULL, mac_for_pdev,
607 			       QCA_NAPI_BUDGET);
608 
609 		for (i = 0;
610 		     i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
611 			dp_rxdma_err_process(&soc->intr_ctx[i], soc,
612 					     mac_for_pdev,
613 					     QCA_NAPI_BUDGET);
614 
615 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF,
616 						  mac_for_pdev))
617 			dp_rx_buffers_replenish(soc, mac_for_pdev,
618 						rx_refill_buf_ring,
619 						&soc->rx_desc_buf[mac_for_pdev],
620 						0, &desc_list, &tail);
621 	}
622 
623 	qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
624 }
625 
626 #endif
627 
628 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
629 				 uint8_t vdev_id,
630 				 uint8_t *peer_mac,
631 				 uint8_t *mac_addr,
632 				 enum cdp_txrx_ast_entry_type type,
633 				 uint32_t flags)
634 {
635 	int ret = -1;
636 	QDF_STATUS status = QDF_STATUS_SUCCESS;
637 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
638 						       peer_mac, 0, vdev_id,
639 						       DP_MOD_ID_CDP);
640 
641 	if (!peer) {
642 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
643 			  "%s: Peer is NULL!\n", __func__);
644 		return ret;
645 	}
646 
647 	status = dp_peer_add_ast((struct dp_soc *)soc_hdl,
648 				 peer,
649 				 mac_addr,
650 				 type,
651 				 flags);
652 	if ((status == QDF_STATUS_SUCCESS) ||
653 	    (status == QDF_STATUS_E_ALREADY) ||
654 	    (status == QDF_STATUS_E_AGAIN))
655 		ret = 0;
656 
657 	dp_hmwds_ast_add_notify(peer, mac_addr,
658 				type, status, false);
659 
660 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
661 
662 	return ret;
663 }
664 
665 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
666 						uint8_t vdev_id,
667 						uint8_t *peer_mac,
668 						uint8_t *wds_macaddr,
669 						uint32_t flags)
670 {
671 	int status = -1;
672 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
673 	struct dp_ast_entry  *ast_entry = NULL;
674 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
675 						       peer_mac, 0, vdev_id,
676 						       DP_MOD_ID_CDP);
677 
678 	if (!peer) {
679 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
680 			  "%s: Peer is NULL!\n", __func__);
681 		return status;
682 	}
683 
684 	qdf_spin_lock_bh(&soc->ast_lock);
685 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
686 						    peer->vdev->pdev->pdev_id);
687 
688 	if (ast_entry) {
689 		status = dp_peer_update_ast(soc,
690 					    peer,
691 					    ast_entry, flags);
692 	}
693 	qdf_spin_unlock_bh(&soc->ast_lock);
694 
695 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
696 
697 	return status;
698 }
699 
700 /*
701  * dp_peer_reset_ast_entries() - Deletes all HMWDS entries for a peer
702  * @soc_handle:		Datapath SOC handle
703  * @peer:		DP peer
704  * @arg:		callback argument
705  *
706  * Return: None
707  */
708 static void
709 dp_peer_reset_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
710 {
711 	struct dp_ast_entry *ast_entry = NULL;
712 	struct dp_ast_entry *tmp_ast_entry;
713 
714 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
715 		if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
716 		    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
717 			dp_peer_del_ast(soc, ast_entry);
718 	}
719 }
720 
721 /*
722  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
723  * @soc_handle:		Datapath SOC handle
724  * @wds_macaddr:	WDS entry MAC Address
725  * @peer_macaddr:	WDS entry MAC Address
726  * @vdev_id:		id of vdev handle
727  * Return: QDF_STATUS
728  */
729 static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
730 					 uint8_t *wds_macaddr,
731 					 uint8_t *peer_mac_addr,
732 					 uint8_t vdev_id)
733 {
734 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
735 	struct dp_ast_entry *ast_entry = NULL;
736 	struct dp_peer *peer;
737 	struct dp_pdev *pdev;
738 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
739 						     DP_MOD_ID_CDP);
740 
741 	if (!vdev)
742 		return QDF_STATUS_E_FAILURE;
743 
744 	pdev = vdev->pdev;
745 
746 	if (peer_mac_addr) {
747 		peer = dp_peer_find_hash_find(soc, peer_mac_addr,
748 					      0, vdev->vdev_id,
749 					      DP_MOD_ID_CDP);
750 		if (!peer) {
751 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
752 			return QDF_STATUS_E_FAILURE;
753 		}
754 
755 		qdf_spin_lock_bh(&soc->ast_lock);
756 		dp_peer_reset_ast_entries(soc, peer, NULL);
757 		qdf_spin_unlock_bh(&soc->ast_lock);
758 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
759 	} else if (wds_macaddr) {
760 		qdf_spin_lock_bh(&soc->ast_lock);
761 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
762 							    pdev->pdev_id);
763 
764 		if (ast_entry) {
765 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
766 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
767 				dp_peer_del_ast(soc, ast_entry);
768 		}
769 		qdf_spin_unlock_bh(&soc->ast_lock);
770 	}
771 
772 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
773 	return QDF_STATUS_SUCCESS;
774 }
775 
776 /*
777  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
778  * @soc:		Datapath SOC handle
779  * @vdev_id:		id of vdev object
780  *
781  * Return: QDF_STATUS
782  */
783 static QDF_STATUS
784 dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
785 			     uint8_t vdev_id)
786 {
787 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
788 
789 	qdf_spin_lock_bh(&soc->ast_lock);
790 
791 	dp_soc_iterate_peer(soc, dp_peer_reset_ast_entries, NULL,
792 			    DP_MOD_ID_CDP);
793 	qdf_spin_unlock_bh(&soc->ast_lock);
794 
795 	return QDF_STATUS_SUCCESS;
796 }
797 
798 /*
799  * dp_peer_flush_ast_entries() - Delete all wds and hmwds ast entries of a peer
800  * @soc:		Datapath SOC
801  * @peer:		Datapath peer
802  * @arg:		arg to callback
803  *
804  * Return: None
805  */
806 static void
807 dp_peer_flush_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
808 {
809 	struct dp_ast_entry *ase = NULL;
810 	struct dp_ast_entry *temp_ase;
811 
812 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
813 		if ((ase->type ==
814 			CDP_TXRX_AST_TYPE_STATIC) ||
815 			(ase->type ==
816 			 CDP_TXRX_AST_TYPE_SELF) ||
817 			(ase->type ==
818 			 CDP_TXRX_AST_TYPE_STA_BSS))
819 			continue;
820 		dp_peer_del_ast(soc, ase);
821 	}
822 }
823 
824 /*
825  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
826  * @soc:		Datapath SOC handle
827  *
828  * Return: None
829  */
830 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
831 {
832 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
833 
834 	qdf_spin_lock_bh(&soc->ast_lock);
835 
836 	dp_soc_iterate_peer(soc, dp_peer_flush_ast_entries, NULL,
837 			    DP_MOD_ID_CDP);
838 
839 	qdf_spin_unlock_bh(&soc->ast_lock);
840 }
841 
842 /**
843  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
844  *                                       and return ast entry information
845  *                                       of first ast entry found in the
846  *                                       table with given mac address
847  *
848  * @soc : data path soc handle
849  * @ast_mac_addr : AST entry mac address
850  * @ast_entry_info : ast entry information
851  *
852  * return : true if ast entry found with ast_mac_addr
853  *          false if ast entry not found
854  */
855 static bool dp_peer_get_ast_info_by_soc_wifi3
856 	(struct cdp_soc_t *soc_hdl,
857 	 uint8_t *ast_mac_addr,
858 	 struct cdp_ast_entry_info *ast_entry_info)
859 {
860 	struct dp_ast_entry *ast_entry = NULL;
861 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
862 	struct dp_peer *peer = NULL;
863 
864 	qdf_spin_lock_bh(&soc->ast_lock);
865 
866 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
867 	if ((!ast_entry) ||
868 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
869 		qdf_spin_unlock_bh(&soc->ast_lock);
870 		return false;
871 	}
872 
873 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
874 				     DP_MOD_ID_AST);
875 	if (!peer) {
876 		qdf_spin_unlock_bh(&soc->ast_lock);
877 		return false;
878 	}
879 
880 	ast_entry_info->type = ast_entry->type;
881 	ast_entry_info->pdev_id = ast_entry->pdev_id;
882 	ast_entry_info->vdev_id = ast_entry->vdev_id;
883 	ast_entry_info->peer_id = ast_entry->peer_id;
884 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
885 		     &peer->mac_addr.raw[0],
886 		     QDF_MAC_ADDR_SIZE);
887 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
888 	qdf_spin_unlock_bh(&soc->ast_lock);
889 	return true;
890 }
891 
892 /**
893  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
894  *                                          and return ast entry information
895  *                                          if mac address and pdev_id matches
896  *
897  * @soc : data path soc handle
898  * @ast_mac_addr : AST entry mac address
899  * @pdev_id : pdev_id
900  * @ast_entry_info : ast entry information
901  *
902  * return : true if ast entry found with ast_mac_addr
903  *          false if ast entry not found
904  */
905 static bool dp_peer_get_ast_info_by_pdevid_wifi3
906 		(struct cdp_soc_t *soc_hdl,
907 		 uint8_t *ast_mac_addr,
908 		 uint8_t pdev_id,
909 		 struct cdp_ast_entry_info *ast_entry_info)
910 {
911 	struct dp_ast_entry *ast_entry;
912 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
913 	struct dp_peer *peer = NULL;
914 
915 	qdf_spin_lock_bh(&soc->ast_lock);
916 
917 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr,
918 						    pdev_id);
919 
920 	if ((!ast_entry) ||
921 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
922 		qdf_spin_unlock_bh(&soc->ast_lock);
923 		return false;
924 	}
925 
926 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
927 				     DP_MOD_ID_AST);
928 	if (!peer) {
929 		qdf_spin_unlock_bh(&soc->ast_lock);
930 		return false;
931 	}
932 
933 	ast_entry_info->type = ast_entry->type;
934 	ast_entry_info->pdev_id = ast_entry->pdev_id;
935 	ast_entry_info->vdev_id = ast_entry->vdev_id;
936 	ast_entry_info->peer_id = ast_entry->peer_id;
937 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
938 		     &peer->mac_addr.raw[0],
939 		     QDF_MAC_ADDR_SIZE);
940 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
941 	qdf_spin_unlock_bh(&soc->ast_lock);
942 	return true;
943 }
944 
945 /**
946  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
947  *                            with given mac address
948  *
949  * @soc : data path soc handle
950  * @ast_mac_addr : AST entry mac address
951  * @callback : callback function to called on ast delete response from FW
952  * @cookie : argument to be passed to callback
953  *
954  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
955  *          is sent
956  *          QDF_STATUS_E_INVAL false if ast entry not found
957  */
958 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
959 					       uint8_t *mac_addr,
960 					       txrx_ast_free_cb callback,
961 					       void *cookie)
962 
963 {
964 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
965 	struct dp_ast_entry *ast_entry = NULL;
966 	txrx_ast_free_cb cb = NULL;
967 	void *arg = NULL;
968 
969 	qdf_spin_lock_bh(&soc->ast_lock);
970 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
971 	if (!ast_entry) {
972 		qdf_spin_unlock_bh(&soc->ast_lock);
973 		return -QDF_STATUS_E_INVAL;
974 	}
975 
976 	if (ast_entry->callback) {
977 		cb = ast_entry->callback;
978 		arg = ast_entry->cookie;
979 	}
980 
981 	ast_entry->callback = callback;
982 	ast_entry->cookie = cookie;
983 
984 	/*
985 	 * if delete_in_progress is set AST delete is sent to target
986 	 * and host is waiting for response should not send delete
987 	 * again
988 	 */
989 	if (!ast_entry->delete_in_progress)
990 		dp_peer_del_ast(soc, ast_entry);
991 
992 	qdf_spin_unlock_bh(&soc->ast_lock);
993 	if (cb) {
994 		cb(soc->ctrl_psoc,
995 		   dp_soc_to_cdp_soc(soc),
996 		   arg,
997 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
998 	}
999 	return QDF_STATUS_SUCCESS;
1000 }
1001 
1002 /**
1003  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
1004  *                                   table if mac address and pdev_id matches
1005  *
1006  * @soc : data path soc handle
1007  * @ast_mac_addr : AST entry mac address
1008  * @pdev_id : pdev id
1009  * @callback : callback function to called on ast delete response from FW
1010  * @cookie : argument to be passed to callback
1011  *
1012  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
1013  *          is sent
1014  *          QDF_STATUS_E_INVAL false if ast entry not found
1015  */
1016 
1017 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
1018 						uint8_t *mac_addr,
1019 						uint8_t pdev_id,
1020 						txrx_ast_free_cb callback,
1021 						void *cookie)
1022 
1023 {
1024 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1025 	struct dp_ast_entry *ast_entry;
1026 	txrx_ast_free_cb cb = NULL;
1027 	void *arg = NULL;
1028 
1029 	qdf_spin_lock_bh(&soc->ast_lock);
1030 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
1031 
1032 	if (!ast_entry) {
1033 		qdf_spin_unlock_bh(&soc->ast_lock);
1034 		return -QDF_STATUS_E_INVAL;
1035 	}
1036 
1037 	if (ast_entry->callback) {
1038 		cb = ast_entry->callback;
1039 		arg = ast_entry->cookie;
1040 	}
1041 
1042 	ast_entry->callback = callback;
1043 	ast_entry->cookie = cookie;
1044 
1045 	/*
1046 	 * if delete_in_progress is set AST delete is sent to target
1047 	 * and host is waiting for response should not sent delete
1048 	 * again
1049 	 */
1050 	if (!ast_entry->delete_in_progress)
1051 		dp_peer_del_ast(soc, ast_entry);
1052 
1053 	qdf_spin_unlock_bh(&soc->ast_lock);
1054 
1055 	if (cb) {
1056 		cb(soc->ctrl_psoc,
1057 		   dp_soc_to_cdp_soc(soc),
1058 		   arg,
1059 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1060 	}
1061 	return QDF_STATUS_SUCCESS;
1062 }
1063 
1064 /**
1065  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
1066  * @ring_num: ring num of the ring being queried
1067  * @grp_mask: the grp_mask array for the ring type in question.
1068  *
1069  * The grp_mask array is indexed by group number and the bit fields correspond
1070  * to ring numbers.  We are finding which interrupt group a ring belongs to.
1071  *
1072  * Return: the index in the grp_mask array with the ring number.
1073  * -QDF_STATUS_E_NOENT if no entry is found
1074  */
1075 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
1076 {
1077 	int ext_group_num;
1078 	int mask = 1 << ring_num;
1079 
1080 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
1081 	     ext_group_num++) {
1082 		if (mask & grp_mask[ext_group_num])
1083 			return ext_group_num;
1084 	}
1085 
1086 	return -QDF_STATUS_E_NOENT;
1087 }
1088 
1089 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
1090 				       enum hal_ring_type ring_type,
1091 				       int ring_num)
1092 {
1093 	int *grp_mask;
1094 
1095 	switch (ring_type) {
1096 	case WBM2SW_RELEASE:
1097 		/* dp_tx_comp_handler - soc->tx_comp_ring */
1098 		if (ring_num < 3)
1099 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1100 
1101 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
1102 		else if (ring_num == 3) {
1103 			/* sw treats this as a separate ring type */
1104 			grp_mask = &soc->wlan_cfg_ctx->
1105 				int_rx_wbm_rel_ring_mask[0];
1106 			ring_num = 0;
1107 		} else {
1108 			qdf_assert(0);
1109 			return -QDF_STATUS_E_NOENT;
1110 		}
1111 	break;
1112 
1113 	case REO_EXCEPTION:
1114 		/* dp_rx_err_process - &soc->reo_exception_ring */
1115 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
1116 	break;
1117 
1118 	case REO_DST:
1119 		/* dp_rx_process - soc->reo_dest_ring */
1120 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1121 	break;
1122 
1123 	case REO_STATUS:
1124 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
1125 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
1126 	break;
1127 
1128 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
1129 	case RXDMA_MONITOR_STATUS:
1130 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
1131 	case RXDMA_MONITOR_DST:
1132 		/* dp_mon_process */
1133 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
1134 	break;
1135 	case RXDMA_DST:
1136 		/* dp_rxdma_err_process */
1137 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
1138 	break;
1139 
1140 	case RXDMA_BUF:
1141 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1142 	break;
1143 
1144 	case RXDMA_MONITOR_BUF:
1145 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
1146 	break;
1147 
1148 	case TCL_DATA:
1149 	/* CMD_CREDIT_RING is used as command in 8074 and credit in 9000 */
1150 	case TCL_CMD_CREDIT:
1151 	case REO_CMD:
1152 	case SW2WBM_RELEASE:
1153 	case WBM_IDLE_LINK:
1154 		/* normally empty SW_TO_HW rings */
1155 		return -QDF_STATUS_E_NOENT;
1156 	break;
1157 
1158 	case TCL_STATUS:
1159 	case REO_REINJECT:
1160 		/* misc unused rings */
1161 		return -QDF_STATUS_E_NOENT;
1162 	break;
1163 
1164 	case CE_SRC:
1165 	case CE_DST:
1166 	case CE_DST_STATUS:
1167 		/* CE_rings - currently handled by hif */
1168 	default:
1169 		return -QDF_STATUS_E_NOENT;
1170 	break;
1171 	}
1172 
1173 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
1174 }
1175 
1176 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
1177 			      *ring_params, int ring_type, int ring_num)
1178 {
1179 	int msi_group_number;
1180 	int msi_data_count;
1181 	int ret;
1182 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1183 
1184 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1185 					    &msi_data_count, &msi_data_start,
1186 					    &msi_irq_start);
1187 
1188 	if (ret)
1189 		return;
1190 
1191 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
1192 						       ring_num);
1193 	if (msi_group_number < 0) {
1194 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
1195 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
1196 			ring_type, ring_num);
1197 		ring_params->msi_addr = 0;
1198 		ring_params->msi_data = 0;
1199 		return;
1200 	}
1201 
1202 	if (msi_group_number > msi_data_count) {
1203 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
1204 			FL("2 msi_groups will share an msi; msi_group_num %d"),
1205 			msi_group_number);
1206 
1207 		QDF_ASSERT(0);
1208 	}
1209 
1210 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1211 
1212 	ring_params->msi_addr = addr_low;
1213 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1214 	ring_params->msi_data = (msi_group_number % msi_data_count)
1215 		+ msi_data_start;
1216 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1217 }
1218 
1219 #ifdef FEATURE_AST
1220 /**
1221  * dp_print_peer_ast_entries() - Dump AST entries of peer
1222  * @soc: Datapath soc handle
1223  * @peer: Datapath peer
1224  * @arg: argument to iterate function
1225  *
1226  * return void
1227  */
1228 static void
1229 dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1230 {
1231 	struct dp_ast_entry *ase, *tmp_ase;
1232 	uint32_t num_entries = 0;
1233 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1234 			"NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS",
1235 			"DA", "HMWDS_SEC"};
1236 
1237 	DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1238 	    DP_PRINT_STATS("%6d mac_addr = "QDF_MAC_ADDR_FMT
1239 		    " peer_mac_addr = "QDF_MAC_ADDR_FMT
1240 		    " peer_id = %u"
1241 		    " type = %s"
1242 		    " next_hop = %d"
1243 		    " is_active = %d"
1244 		    " ast_idx = %d"
1245 		    " ast_hash = %d"
1246 		    " delete_in_progress = %d"
1247 		    " pdev_id = %d"
1248 		    " vdev_id = %d",
1249 		    ++num_entries,
1250 		    QDF_MAC_ADDR_REF(ase->mac_addr.raw),
1251 		    QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1252 		    ase->peer_id,
1253 		    type[ase->type],
1254 		    ase->next_hop,
1255 		    ase->is_active,
1256 		    ase->ast_idx,
1257 		    ase->ast_hash_value,
1258 		    ase->delete_in_progress,
1259 		    ase->pdev_id,
1260 		    ase->vdev_id);
1261 	}
1262 }
1263 
1264 /**
1265  * dp_print_ast_stats() - Dump AST table contents
1266  * @soc: Datapath soc handle
1267  *
1268  * return void
1269  */
1270 void dp_print_ast_stats(struct dp_soc *soc)
1271 {
1272 	DP_PRINT_STATS("AST Stats:");
1273 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1274 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1275 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1276 	DP_PRINT_STATS("	Entries MAP ERR  = %d", soc->stats.ast.map_err);
1277 	DP_PRINT_STATS("	Entries Mismatch ERR  = %d",
1278 		       soc->stats.ast.ast_mismatch);
1279 
1280 	DP_PRINT_STATS("AST Table:");
1281 
1282 	qdf_spin_lock_bh(&soc->ast_lock);
1283 
1284 	dp_soc_iterate_peer(soc, dp_print_peer_ast_entries, NULL,
1285 			    DP_MOD_ID_GENERIC_STATS);
1286 
1287 	qdf_spin_unlock_bh(&soc->ast_lock);
1288 }
1289 #else
1290 void dp_print_ast_stats(struct dp_soc *soc)
1291 {
1292 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1293 	return;
1294 }
1295 #endif
1296 
1297 /**
1298  * dp_print_peer_info() - Dump peer info
1299  * @soc: Datapath soc handle
1300  * @peer: Datapath peer handle
1301  * @arg: argument to iter function
1302  *
1303  * return void
1304  */
1305 static void
1306 dp_print_peer_info(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1307 {
1308 	DP_PRINT_STATS("    peer_mac_addr = "QDF_MAC_ADDR_FMT
1309 		       " nawds_enabled = %d"
1310 		       " bss_peer = %d"
1311 		       " wds_enabled = %d"
1312 		       " tx_cap_enabled = %d"
1313 		       " rx_cap_enabled = %d"
1314 		       " peer id = %d",
1315 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1316 		       peer->nawds_enabled,
1317 		       peer->bss_peer,
1318 		       peer->wds_enabled,
1319 		       peer->tx_cap_enabled,
1320 		       peer->rx_cap_enabled,
1321 		       peer->peer_id);
1322 }
1323 
1324 /**
1325  * dp_print_peer_table() - Dump all Peer stats
1326  * @vdev: Datapath Vdev handle
1327  *
1328  * return void
1329  */
1330 static void dp_print_peer_table(struct dp_vdev *vdev)
1331 {
1332 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1333 	dp_vdev_iterate_peer(vdev, dp_print_peer_info, NULL,
1334 			     DP_MOD_ID_GENERIC_STATS);
1335 }
1336 
1337 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
1338 /**
1339  * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt
1340  * threshold values from the wlan_srng_cfg table for each ring type
1341  * @soc: device handle
1342  * @ring_params: per ring specific parameters
1343  * @ring_type: Ring type
1344  * @ring_num: Ring number for a given ring type
1345  *
1346  * Fill the ring params with the interrupt threshold
1347  * configuration parameters available in the per ring type wlan_srng_cfg
1348  * table.
1349  *
1350  * Return: None
1351  */
1352 static void
1353 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1354 				       struct hal_srng_params *ring_params,
1355 				       int ring_type, int ring_num,
1356 				       int num_entries)
1357 {
1358 	if (ring_type == WBM2SW_RELEASE && (ring_num == 3)) {
1359 		ring_params->intr_timer_thres_us =
1360 				wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1361 		ring_params->intr_batch_cntr_thres_entries =
1362 				wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1363 	} else {
1364 		ring_params->intr_timer_thres_us =
1365 				soc->wlan_srng_cfg[ring_type].timer_threshold;
1366 		ring_params->intr_batch_cntr_thres_entries =
1367 				soc->wlan_srng_cfg[ring_type].batch_count_threshold;
1368 	}
1369 	ring_params->low_threshold =
1370 			soc->wlan_srng_cfg[ring_type].low_threshold;
1371 	if (ring_params->low_threshold)
1372 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1373 }
1374 #else
1375 static void
1376 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1377 				       struct hal_srng_params *ring_params,
1378 				       int ring_type, int ring_num,
1379 				       int num_entries)
1380 {
1381 	if (ring_type == REO_DST) {
1382 		ring_params->intr_timer_thres_us =
1383 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1384 		ring_params->intr_batch_cntr_thres_entries =
1385 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1386 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
1387 		ring_params->intr_timer_thres_us =
1388 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1389 		ring_params->intr_batch_cntr_thres_entries =
1390 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1391 	} else {
1392 		ring_params->intr_timer_thres_us =
1393 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1394 		ring_params->intr_batch_cntr_thres_entries =
1395 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1396 	}
1397 
1398 	/* Enable low threshold interrupts for rx buffer rings (regular and
1399 	 * monitor buffer rings.
1400 	 * TODO: See if this is required for any other ring
1401 	 */
1402 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1403 	    (ring_type == RXDMA_MONITOR_STATUS)) {
1404 		/* TODO: Setting low threshold to 1/8th of ring size
1405 		 * see if this needs to be configurable
1406 		 */
1407 		ring_params->low_threshold = num_entries >> 3;
1408 		ring_params->intr_timer_thres_us =
1409 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1410 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1411 		ring_params->intr_batch_cntr_thres_entries = 0;
1412 	}
1413 
1414 	/* During initialisation monitor rings are only filled with
1415 	 * MON_BUF_MIN_ENTRIES entries. So low threshold needs to be set to
1416 	 * a value less than that. Low threshold value is reconfigured again
1417 	 * to 1/8th of the ring size when monitor vap is created.
1418 	 */
1419 	if (ring_type == RXDMA_MONITOR_BUF)
1420 		ring_params->low_threshold = MON_BUF_MIN_ENTRIES >> 1;
1421 
1422 	/* In case of PCI chipsets, we dont have PPDU end interrupts,
1423 	 * so MONITOR STATUS ring is reaped by receiving MSI from srng.
1424 	 * Keep batch threshold as 8 so that interrupt is received for
1425 	 * every 4 packets in MONITOR_STATUS ring
1426 	 */
1427 	if ((ring_type == RXDMA_MONITOR_STATUS) &&
1428 	    (soc->intr_mode == DP_INTR_MSI))
1429 		ring_params->intr_batch_cntr_thres_entries = 4;
1430 }
1431 #endif
1432 
1433 /*
1434  * dp_srng_free() - Free SRNG memory
1435  * @soc  : Data path soc handle
1436  * @srng : SRNG pointer
1437  *
1438  * return: None
1439  */
1440 
1441 static void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng)
1442 {
1443 	if (srng->alloc_size && srng->base_vaddr_unaligned) {
1444 		if (!srng->cached) {
1445 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1446 						srng->alloc_size,
1447 						srng->base_vaddr_unaligned,
1448 						srng->base_paddr_unaligned, 0);
1449 		} else {
1450 			qdf_mem_free(srng->base_vaddr_unaligned);
1451 		}
1452 		srng->alloc_size = 0;
1453 		srng->base_vaddr_unaligned = NULL;
1454 	}
1455 	srng->hal_srng = NULL;
1456 }
1457 
1458 /*
1459  * dp_srng_init() - Initialize SRNG
1460  * @soc  : Data path soc handle
1461  * @srng : SRNG pointer
1462  * @ring_type : Ring Type
1463  * @ring_num: Ring number
1464  * @mac_id: mac_id
1465  *
1466  * return: QDF_STATUS
1467  */
1468 static QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
1469 			       int ring_type, int ring_num, int mac_id)
1470 {
1471 	hal_soc_handle_t hal_soc = soc->hal_soc;
1472 	struct hal_srng_params ring_params;
1473 
1474 	if (srng->hal_srng) {
1475 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1476 			  FL("Ring type: %d, num:%d is already initialized"),
1477 			  ring_type, ring_num);
1478 		return QDF_STATUS_SUCCESS;
1479 	}
1480 
1481 	/* memset the srng ring to zero */
1482 	qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size);
1483 
1484 	qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params));
1485 	ring_params.ring_base_paddr = srng->base_paddr_aligned;
1486 	ring_params.ring_base_vaddr = srng->base_vaddr_aligned;
1487 
1488 	ring_params.num_entries = srng->num_entries;
1489 
1490 	dp_verbose_debug("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
1491 			 ring_type, ring_num,
1492 			 (void *)ring_params.ring_base_vaddr,
1493 			 (void *)ring_params.ring_base_paddr,
1494 			 ring_params.num_entries);
1495 
1496 	if (soc->intr_mode == DP_INTR_MSI) {
1497 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
1498 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
1499 				 ring_type, ring_num);
1500 
1501 	} else {
1502 		ring_params.msi_data = 0;
1503 		ring_params.msi_addr = 0;
1504 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
1505 				 ring_type, ring_num);
1506 	}
1507 
1508 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
1509 					       ring_type, ring_num,
1510 					       srng->num_entries);
1511 
1512 	if (srng->cached)
1513 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
1514 
1515 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
1516 					mac_id, &ring_params);
1517 
1518 	if (!srng->hal_srng) {
1519 		dp_srng_free(soc, srng);
1520 		return QDF_STATUS_E_FAILURE;
1521 	}
1522 
1523 	return QDF_STATUS_SUCCESS;
1524 }
1525 
1526 /*
1527  * dp_srng_alloc() - Allocate memory for SRNG
1528  * @soc  : Data path soc handle
1529  * @srng : SRNG pointer
1530  * @ring_type : Ring Type
1531  * @num_entries: Number of entries
1532  * @cached: cached flag variable
1533  *
1534  * return: QDF_STATUS
1535  */
1536 static QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
1537 				int ring_type, uint32_t num_entries,
1538 				bool cached)
1539 {
1540 	hal_soc_handle_t hal_soc = soc->hal_soc;
1541 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
1542 	uint32_t ring_base_align = 32;
1543 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
1544 
1545 	if (srng->base_vaddr_unaligned) {
1546 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1547 			  FL("Ring type: %d, is already allocated"), ring_type);
1548 		return QDF_STATUS_SUCCESS;
1549 	}
1550 
1551 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
1552 	srng->hal_srng = NULL;
1553 	srng->alloc_size = num_entries * entry_size;
1554 	srng->num_entries = num_entries;
1555 	srng->cached = cached;
1556 
1557 	if (!cached) {
1558 		srng->base_vaddr_aligned =
1559 		    qdf_aligned_mem_alloc_consistent(
1560 					soc->osdev, &srng->alloc_size,
1561 					&srng->base_vaddr_unaligned,
1562 					&srng->base_paddr_unaligned,
1563 					&srng->base_paddr_aligned,
1564 					ring_base_align);
1565 	} else {
1566 		srng->base_vaddr_aligned = qdf_aligned_malloc(
1567 					&srng->alloc_size,
1568 					&srng->base_vaddr_unaligned,
1569 					&srng->base_paddr_unaligned,
1570 					&srng->base_paddr_aligned,
1571 					ring_base_align);
1572 	}
1573 
1574 	if (!srng->base_vaddr_aligned)
1575 		return QDF_STATUS_E_NOMEM;
1576 
1577 	return QDF_STATUS_SUCCESS;
1578 }
1579 
1580 /*
1581  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
1582  * @soc: DP SOC handle
1583  * @srng: source ring structure
1584  * @ring_type: type of ring
1585  * @ring_num: ring number
1586  *
1587  * Return: None
1588  */
1589 static void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
1590 			   int ring_type, int ring_num)
1591 {
1592 	if (!srng->hal_srng) {
1593 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1594 			  FL("Ring type: %d, num:%d not setup"),
1595 			  ring_type, ring_num);
1596 		return;
1597 	}
1598 
1599 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1600 	srng->hal_srng = NULL;
1601 }
1602 
1603 /* TODO: Need this interface from HIF */
1604 void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle);
1605 
1606 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1607 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
1608 			 hal_ring_handle_t hal_ring_hdl)
1609 {
1610 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1611 	uint32_t hp, tp;
1612 	uint8_t ring_id;
1613 
1614 	if (!int_ctx)
1615 		return hal_srng_access_start(hal_soc, hal_ring_hdl);
1616 
1617 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
1618 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
1619 
1620 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
1621 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START);
1622 
1623 	return hal_srng_access_start(hal_soc, hal_ring_hdl);
1624 }
1625 
1626 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
1627 			hal_ring_handle_t hal_ring_hdl)
1628 {
1629 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1630 	uint32_t hp, tp;
1631 	uint8_t ring_id;
1632 
1633 	if (!int_ctx)
1634 		return hal_srng_access_end(hal_soc, hal_ring_hdl);
1635 
1636 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
1637 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
1638 
1639 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
1640 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END);
1641 
1642 	return hal_srng_access_end(hal_soc, hal_ring_hdl);
1643 }
1644 
1645 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
1646 					      uint8_t hist_group_id)
1647 {
1648 	hif_record_event(dp_soc->hif_handle, hist_group_id,
1649 			 0, 0, 0, HIF_EVENT_TIMER_ENTRY);
1650 }
1651 
1652 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
1653 					     uint8_t hist_group_id)
1654 {
1655 	hif_record_event(dp_soc->hif_handle, hist_group_id,
1656 			 0, 0, 0, HIF_EVENT_TIMER_EXIT);
1657 }
1658 #else
1659 
1660 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
1661 					      uint8_t hist_group_id)
1662 {
1663 }
1664 
1665 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
1666 					     uint8_t hist_group_id)
1667 {
1668 }
1669 
1670 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1671 
1672 /*
1673  * dp_should_timer_irq_yield() - Decide if the bottom half should yield
1674  * @soc: DP soc handle
1675  * @work_done: work done in softirq context
1676  * @start_time: start time for the softirq
1677  *
1678  * Return: enum with yield code
1679  */
1680 static enum timer_yield_status
1681 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
1682 			  uint64_t start_time)
1683 {
1684 	uint64_t cur_time = qdf_get_log_timestamp();
1685 
1686 	if (!work_done)
1687 		return DP_TIMER_WORK_DONE;
1688 
1689 	if (cur_time - start_time > DP_MAX_TIMER_EXEC_TIME_TICKS)
1690 		return DP_TIMER_TIME_EXHAUST;
1691 
1692 	return DP_TIMER_NO_YIELD;
1693 }
1694 
1695 /**
1696  * dp_process_lmac_rings() - Process LMAC rings
1697  * @int_ctx: interrupt context
1698  * @total_budget: budget of work which can be done
1699  *
1700  * Return: work done
1701  */
1702 static int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget)
1703 {
1704 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
1705 	struct dp_soc *soc = int_ctx->soc;
1706 	uint32_t remaining_quota = total_budget;
1707 	struct dp_pdev *pdev = NULL;
1708 	uint32_t work_done  = 0;
1709 	int budget = total_budget;
1710 	int ring = 0;
1711 
1712 	/* Process LMAC interrupts */
1713 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
1714 		int mac_for_pdev = ring;
1715 
1716 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
1717 		if (!pdev)
1718 			continue;
1719 		if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1720 			work_done = dp_mon_process(soc, int_ctx, mac_for_pdev,
1721 						   remaining_quota);
1722 			if (work_done)
1723 				intr_stats->num_rx_mon_ring_masks++;
1724 			budget -= work_done;
1725 			if (budget <= 0)
1726 				goto budget_done;
1727 			remaining_quota = budget;
1728 		}
1729 
1730 		if (int_ctx->rxdma2host_ring_mask &
1731 				(1 << mac_for_pdev)) {
1732 			work_done = dp_rxdma_err_process(int_ctx, soc,
1733 							 mac_for_pdev,
1734 							 remaining_quota);
1735 			if (work_done)
1736 				intr_stats->num_rxdma2host_ring_masks++;
1737 			budget -=  work_done;
1738 			if (budget <= 0)
1739 				goto budget_done;
1740 			remaining_quota = budget;
1741 		}
1742 
1743 		if (int_ctx->host2rxdma_ring_mask &
1744 					(1 << mac_for_pdev)) {
1745 			union dp_rx_desc_list_elem_t *desc_list = NULL;
1746 			union dp_rx_desc_list_elem_t *tail = NULL;
1747 			struct dp_srng *rx_refill_buf_ring;
1748 
1749 			if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
1750 				rx_refill_buf_ring =
1751 					&soc->rx_refill_buf_ring[mac_for_pdev];
1752 			else
1753 				rx_refill_buf_ring =
1754 					&soc->rx_refill_buf_ring[pdev->lmac_id];
1755 
1756 			intr_stats->num_host2rxdma_ring_masks++;
1757 			DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1758 				     1);
1759 			dp_rx_buffers_replenish(soc, mac_for_pdev,
1760 						rx_refill_buf_ring,
1761 						&soc->rx_desc_buf[mac_for_pdev],
1762 						0, &desc_list, &tail);
1763 		}
1764 	}
1765 
1766 budget_done:
1767 	return total_budget - budget;
1768 }
1769 
1770 /*
1771  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
1772  * @dp_ctx: DP SOC handle
1773  * @budget: Number of frames/descriptors that can be processed in one shot
1774  *
1775  * Return: remaining budget/quota for the soc device
1776  */
1777 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
1778 {
1779 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1780 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
1781 	struct dp_soc *soc = int_ctx->soc;
1782 	int ring = 0;
1783 	uint32_t work_done  = 0;
1784 	int budget = dp_budget;
1785 	uint8_t tx_mask = int_ctx->tx_ring_mask;
1786 	uint8_t rx_mask = int_ctx->rx_ring_mask;
1787 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1788 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
1789 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
1790 	uint32_t remaining_quota = dp_budget;
1791 
1792 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n",
1793 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
1794 			 reo_status_mask,
1795 			 int_ctx->rx_mon_ring_mask,
1796 			 int_ctx->host2rxdma_ring_mask,
1797 			 int_ctx->rxdma2host_ring_mask);
1798 
1799 	/* Process Tx completion interrupts first to return back buffers */
1800 	while (tx_mask) {
1801 		if (tx_mask & 0x1) {
1802 			work_done = dp_tx_comp_handler(int_ctx,
1803 						       soc,
1804 						       soc->tx_comp_ring[ring].hal_srng,
1805 						       ring, remaining_quota);
1806 
1807 			if (work_done) {
1808 				intr_stats->num_tx_ring_masks[ring]++;
1809 				dp_verbose_debug("tx mask 0x%x ring %d, budget %d, work_done %d",
1810 						 tx_mask, ring, budget,
1811 						 work_done);
1812 			}
1813 
1814 			budget -= work_done;
1815 			if (budget <= 0)
1816 				goto budget_done;
1817 
1818 			remaining_quota = budget;
1819 		}
1820 		tx_mask = tx_mask >> 1;
1821 		ring++;
1822 	}
1823 
1824 	/* Process REO Exception ring interrupt */
1825 	if (rx_err_mask) {
1826 		work_done = dp_rx_err_process(int_ctx, soc,
1827 					      soc->reo_exception_ring.hal_srng,
1828 					      remaining_quota);
1829 
1830 		if (work_done) {
1831 			intr_stats->num_rx_err_ring_masks++;
1832 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
1833 					 work_done, budget);
1834 		}
1835 
1836 		budget -=  work_done;
1837 		if (budget <= 0) {
1838 			goto budget_done;
1839 		}
1840 		remaining_quota = budget;
1841 	}
1842 
1843 	/* Process Rx WBM release ring interrupt */
1844 	if (rx_wbm_rel_mask) {
1845 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
1846 						  soc->rx_rel_ring.hal_srng,
1847 						  remaining_quota);
1848 
1849 		if (work_done) {
1850 			intr_stats->num_rx_wbm_rel_ring_masks++;
1851 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
1852 					 work_done, budget);
1853 		}
1854 
1855 		budget -=  work_done;
1856 		if (budget <= 0) {
1857 			goto budget_done;
1858 		}
1859 		remaining_quota = budget;
1860 	}
1861 
1862 	/* Process Rx interrupts */
1863 	if (rx_mask) {
1864 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1865 			if (!(rx_mask & (1 << ring)))
1866 				continue;
1867 			work_done = dp_rx_process(int_ctx,
1868 						  soc->reo_dest_ring[ring].hal_srng,
1869 						  ring,
1870 						  remaining_quota);
1871 			if (work_done) {
1872 				intr_stats->num_rx_ring_masks[ring]++;
1873 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
1874 						 rx_mask, ring,
1875 						 work_done, budget);
1876 				budget -=  work_done;
1877 				if (budget <= 0)
1878 					goto budget_done;
1879 				remaining_quota = budget;
1880 			}
1881 		}
1882 	}
1883 
1884 	if (reo_status_mask) {
1885 		if (dp_reo_status_ring_handler(int_ctx, soc))
1886 			int_ctx->intr_stats.num_reo_status_ring_masks++;
1887 	}
1888 
1889 	work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
1890 	if (work_done) {
1891 		budget -=  work_done;
1892 		if (budget <= 0)
1893 			goto budget_done;
1894 		remaining_quota = budget;
1895 	}
1896 
1897 	qdf_lro_flush(int_ctx->lro_ctx);
1898 	intr_stats->num_masks++;
1899 
1900 budget_done:
1901 	return dp_budget - budget;
1902 }
1903 
1904 /* dp_interrupt_timer()- timer poll for interrupts
1905  *
1906  * @arg: SoC Handle
1907  *
1908  * Return:
1909  *
1910  */
1911 static void dp_interrupt_timer(void *arg)
1912 {
1913 	struct dp_soc *soc = (struct dp_soc *) arg;
1914 	struct dp_pdev *pdev = soc->pdev_list[0];
1915 	enum timer_yield_status yield = DP_TIMER_NO_YIELD;
1916 	uint32_t work_done  = 0, total_work_done = 0;
1917 	int budget = 0xffff;
1918 	uint32_t remaining_quota = budget;
1919 	uint64_t start_time;
1920 	uint32_t lmac_id;
1921 	uint8_t dp_intr_id;
1922 
1923 	if (!qdf_atomic_read(&soc->cmn_init_done))
1924 		return;
1925 
1926 	if (pdev->mon_chan_band == REG_BAND_UNKNOWN) {
1927 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1928 		return;
1929 	}
1930 
1931 	lmac_id = pdev->ch_band_lmac_id_mapping[pdev->mon_chan_band];
1932 	if (qdf_unlikely(lmac_id == DP_MON_INVALID_LMAC_ID)) {
1933 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1934 		return;
1935 	}
1936 
1937 	dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id];
1938 	dp_srng_record_timer_entry(soc, dp_intr_id);
1939 	start_time = qdf_get_log_timestamp();
1940 
1941 	while (yield == DP_TIMER_NO_YIELD) {
1942 		work_done = dp_mon_process(soc, &soc->intr_ctx[dp_intr_id],
1943 					   lmac_id, remaining_quota);
1944 		if (work_done) {
1945 			budget -=  work_done;
1946 			if (budget <= 0) {
1947 				yield = DP_TIMER_WORK_EXHAUST;
1948 				goto budget_done;
1949 			}
1950 			remaining_quota = budget;
1951 			total_work_done += work_done;
1952 		}
1953 
1954 		yield = dp_should_timer_irq_yield(soc, total_work_done,
1955 						  start_time);
1956 		total_work_done = 0;
1957 	}
1958 
1959 budget_done:
1960 	if (yield == DP_TIMER_WORK_EXHAUST ||
1961 	    yield == DP_TIMER_TIME_EXHAUST)
1962 		qdf_timer_mod(&soc->int_timer, 1);
1963 	else
1964 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1965 
1966 	dp_srng_record_timer_exit(soc, dp_intr_id);
1967 }
1968 
1969 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1970 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
1971 					struct dp_intr *intr_ctx)
1972 {
1973 	if (intr_ctx->rx_mon_ring_mask)
1974 		return true;
1975 
1976 	return false;
1977 }
1978 #else
1979 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
1980 					struct dp_intr *intr_ctx)
1981 {
1982 	return false;
1983 }
1984 #endif
1985 
1986 /*
1987  * dp_soc_attach_poll() - Register handlers for DP interrupts
1988  * @txrx_soc: DP SOC handle
1989  *
1990  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1991  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1992  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1993  *
1994  * Return: 0 for success, nonzero for failure.
1995  */
1996 static QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
1997 {
1998 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1999 	int i;
2000 	int lmac_id = 0;
2001 
2002 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
2003 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
2004 	soc->intr_mode = DP_INTR_POLL;
2005 
2006 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
2007 		soc->intr_ctx[i].dp_intr_id = i;
2008 		soc->intr_ctx[i].tx_ring_mask =
2009 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
2010 		soc->intr_ctx[i].rx_ring_mask =
2011 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
2012 		soc->intr_ctx[i].rx_mon_ring_mask =
2013 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
2014 		soc->intr_ctx[i].rx_err_ring_mask =
2015 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
2016 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
2017 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
2018 		soc->intr_ctx[i].reo_status_ring_mask =
2019 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
2020 		soc->intr_ctx[i].rxdma2host_ring_mask =
2021 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
2022 		soc->intr_ctx[i].soc = soc;
2023 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
2024 
2025 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
2026 			hif_event_history_init(soc->hif_handle, i);
2027 			soc->mon_intr_id_lmac_map[lmac_id] = i;
2028 			lmac_id++;
2029 		}
2030 	}
2031 
2032 	qdf_timer_init(soc->osdev, &soc->int_timer,
2033 			dp_interrupt_timer, (void *)soc,
2034 			QDF_TIMER_TYPE_WAKE_APPS);
2035 
2036 	return QDF_STATUS_SUCCESS;
2037 }
2038 
2039 /**
2040  * dp_soc_set_interrupt_mode() - Set the interrupt mode in soc
2041  * soc: DP soc handle
2042  *
2043  * Set the appropriate interrupt mode flag in the soc
2044  */
2045 static void dp_soc_set_interrupt_mode(struct dp_soc *soc)
2046 {
2047 	uint32_t msi_base_data, msi_vector_start;
2048 	int msi_vector_count, ret;
2049 
2050 	soc->intr_mode = DP_INTR_INTEGRATED;
2051 
2052 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
2053 	    (soc->cdp_soc.ol_ops->get_con_mode &&
2054 	     soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) {
2055 		soc->intr_mode = DP_INTR_POLL;
2056 	} else {
2057 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
2058 						  &msi_vector_count,
2059 						  &msi_base_data,
2060 						  &msi_vector_start);
2061 		if (ret)
2062 			return;
2063 
2064 		soc->intr_mode = DP_INTR_MSI;
2065 	}
2066 }
2067 
2068 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc);
2069 #if defined(DP_INTR_POLL_BOTH)
2070 /*
2071  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
2072  * @txrx_soc: DP SOC handle
2073  *
2074  * Call the appropriate attach function based on the mode of operation.
2075  * This is a WAR for enabling monitor mode.
2076  *
2077  * Return: 0 for success. nonzero for failure.
2078  */
2079 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2080 {
2081 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2082 
2083 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
2084 	    (soc->cdp_soc.ol_ops->get_con_mode &&
2085 	     soc->cdp_soc.ol_ops->get_con_mode() ==
2086 	     QDF_GLOBAL_MONITOR_MODE)) {
2087 		dp_info("Poll mode");
2088 		return dp_soc_attach_poll(txrx_soc);
2089 	} else {
2090 		dp_info("Interrupt  mode");
2091 		return dp_soc_interrupt_attach(txrx_soc);
2092 	}
2093 }
2094 #else
2095 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
2096 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2097 {
2098 	return dp_soc_attach_poll(txrx_soc);
2099 }
2100 #else
2101 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2102 {
2103 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2104 
2105 	if (hif_is_polled_mode_enabled(soc->hif_handle))
2106 		return dp_soc_attach_poll(txrx_soc);
2107 	else
2108 		return dp_soc_interrupt_attach(txrx_soc);
2109 }
2110 #endif
2111 #endif
2112 
2113 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
2114 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
2115 {
2116 	int j;
2117 	int num_irq = 0;
2118 
2119 	int tx_mask =
2120 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2121 	int rx_mask =
2122 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2123 	int rx_mon_mask =
2124 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2125 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
2126 					soc->wlan_cfg_ctx, intr_ctx_num);
2127 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
2128 					soc->wlan_cfg_ctx, intr_ctx_num);
2129 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
2130 					soc->wlan_cfg_ctx, intr_ctx_num);
2131 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
2132 					soc->wlan_cfg_ctx, intr_ctx_num);
2133 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
2134 					soc->wlan_cfg_ctx, intr_ctx_num);
2135 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
2136 					soc->wlan_cfg_ctx, intr_ctx_num);
2137 
2138 	soc->intr_mode = DP_INTR_INTEGRATED;
2139 
2140 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
2141 
2142 		if (tx_mask & (1 << j)) {
2143 			irq_id_map[num_irq++] =
2144 				(wbm2host_tx_completions_ring1 - j);
2145 		}
2146 
2147 		if (rx_mask & (1 << j)) {
2148 			irq_id_map[num_irq++] =
2149 				(reo2host_destination_ring1 - j);
2150 		}
2151 
2152 		if (rxdma2host_ring_mask & (1 << j)) {
2153 			irq_id_map[num_irq++] =
2154 				rxdma2host_destination_ring_mac1 - j;
2155 		}
2156 
2157 		if (host2rxdma_ring_mask & (1 << j)) {
2158 			irq_id_map[num_irq++] =
2159 				host2rxdma_host_buf_ring_mac1 -	j;
2160 		}
2161 
2162 		if (host2rxdma_mon_ring_mask & (1 << j)) {
2163 			irq_id_map[num_irq++] =
2164 				host2rxdma_monitor_ring1 - j;
2165 		}
2166 
2167 		if (rx_mon_mask & (1 << j)) {
2168 			irq_id_map[num_irq++] =
2169 				ppdu_end_interrupts_mac1 - j;
2170 			irq_id_map[num_irq++] =
2171 				rxdma2host_monitor_status_ring_mac1 - j;
2172 			irq_id_map[num_irq++] =
2173 				rxdma2host_monitor_destination_mac1 - j;
2174 		}
2175 
2176 		if (rx_wbm_rel_ring_mask & (1 << j))
2177 			irq_id_map[num_irq++] = wbm2host_rx_release;
2178 
2179 		if (rx_err_ring_mask & (1 << j))
2180 			irq_id_map[num_irq++] = reo2host_exception;
2181 
2182 		if (reo_status_ring_mask & (1 << j))
2183 			irq_id_map[num_irq++] = reo2host_status;
2184 
2185 	}
2186 	*num_irq_r = num_irq;
2187 }
2188 
2189 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
2190 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
2191 		int msi_vector_count, int msi_vector_start)
2192 {
2193 	int tx_mask = wlan_cfg_get_tx_ring_mask(
2194 					soc->wlan_cfg_ctx, intr_ctx_num);
2195 	int rx_mask = wlan_cfg_get_rx_ring_mask(
2196 					soc->wlan_cfg_ctx, intr_ctx_num);
2197 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
2198 					soc->wlan_cfg_ctx, intr_ctx_num);
2199 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
2200 					soc->wlan_cfg_ctx, intr_ctx_num);
2201 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
2202 					soc->wlan_cfg_ctx, intr_ctx_num);
2203 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
2204 					soc->wlan_cfg_ctx, intr_ctx_num);
2205 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
2206 					soc->wlan_cfg_ctx, intr_ctx_num);
2207 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
2208 					soc->wlan_cfg_ctx, intr_ctx_num);
2209 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
2210 					soc->wlan_cfg_ctx, intr_ctx_num);
2211 
2212 	unsigned int vector =
2213 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
2214 	int num_irq = 0;
2215 
2216 	soc->intr_mode = DP_INTR_MSI;
2217 
2218 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
2219 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask |
2220 	    host2rxdma_ring_mask | host2rxdma_mon_ring_mask)
2221 		irq_id_map[num_irq++] =
2222 			pld_get_msi_irq(soc->osdev->dev, vector);
2223 
2224 	*num_irq_r = num_irq;
2225 }
2226 
2227 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
2228 				    int *irq_id_map, int *num_irq)
2229 {
2230 	int msi_vector_count, ret;
2231 	uint32_t msi_base_data, msi_vector_start;
2232 
2233 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
2234 					    &msi_vector_count,
2235 					    &msi_base_data,
2236 					    &msi_vector_start);
2237 	if (ret)
2238 		return dp_soc_interrupt_map_calculate_integrated(soc,
2239 				intr_ctx_num, irq_id_map, num_irq);
2240 
2241 	else
2242 		dp_soc_interrupt_map_calculate_msi(soc,
2243 				intr_ctx_num, irq_id_map, num_irq,
2244 				msi_vector_count, msi_vector_start);
2245 }
2246 
2247 /*
2248  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
2249  * @txrx_soc: DP SOC handle
2250  *
2251  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
2252  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
2253  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
2254  *
2255  * Return: 0 for success. nonzero for failure.
2256  */
2257 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
2258 {
2259 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2260 
2261 	int i = 0;
2262 	int num_irq = 0;
2263 
2264 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
2265 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
2266 
2267 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
2268 		int ret = 0;
2269 
2270 		/* Map of IRQ ids registered with one interrupt context */
2271 		int irq_id_map[HIF_MAX_GRP_IRQ];
2272 
2273 		int tx_mask =
2274 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
2275 		int rx_mask =
2276 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
2277 		int rx_mon_mask =
2278 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
2279 		int rx_err_ring_mask =
2280 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
2281 		int rx_wbm_rel_ring_mask =
2282 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
2283 		int reo_status_ring_mask =
2284 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
2285 		int rxdma2host_ring_mask =
2286 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
2287 		int host2rxdma_ring_mask =
2288 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
2289 		int host2rxdma_mon_ring_mask =
2290 			wlan_cfg_get_host2rxdma_mon_ring_mask(
2291 				soc->wlan_cfg_ctx, i);
2292 
2293 		soc->intr_ctx[i].dp_intr_id = i;
2294 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
2295 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
2296 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
2297 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
2298 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
2299 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
2300 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
2301 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
2302 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
2303 			 host2rxdma_mon_ring_mask;
2304 
2305 		soc->intr_ctx[i].soc = soc;
2306 
2307 		num_irq = 0;
2308 
2309 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
2310 					       &num_irq);
2311 
2312 		ret = hif_register_ext_group(soc->hif_handle,
2313 				num_irq, irq_id_map, dp_service_srngs,
2314 				&soc->intr_ctx[i], "dp_intr",
2315 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
2316 
2317 		if (ret) {
2318 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2319 			FL("failed, ret = %d"), ret);
2320 
2321 			return QDF_STATUS_E_FAILURE;
2322 		}
2323 
2324 		hif_event_history_init(soc->hif_handle, i);
2325 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
2326 	}
2327 
2328 	hif_configure_ext_group_interrupts(soc->hif_handle);
2329 
2330 	return QDF_STATUS_SUCCESS;
2331 }
2332 
2333 /*
2334  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
2335  * @txrx_soc: DP SOC handle
2336  *
2337  * Return: none
2338  */
2339 static void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
2340 {
2341 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2342 	int i;
2343 
2344 	if (soc->intr_mode == DP_INTR_POLL) {
2345 		qdf_timer_free(&soc->int_timer);
2346 	} else {
2347 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
2348 	}
2349 
2350 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
2351 		soc->intr_ctx[i].tx_ring_mask = 0;
2352 		soc->intr_ctx[i].rx_ring_mask = 0;
2353 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
2354 		soc->intr_ctx[i].rx_err_ring_mask = 0;
2355 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
2356 		soc->intr_ctx[i].reo_status_ring_mask = 0;
2357 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
2358 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
2359 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
2360 
2361 		hif_event_history_deinit(soc->hif_handle, i);
2362 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
2363 	}
2364 
2365 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
2366 		    REG_BAND_UNKNOWN * sizeof(int), DP_MON_INVALID_LMAC_ID);
2367 }
2368 
2369 #define AVG_MAX_MPDUS_PER_TID 128
2370 #define AVG_TIDS_PER_CLIENT 2
2371 #define AVG_FLOWS_PER_TID 2
2372 #define AVG_MSDUS_PER_FLOW 128
2373 #define AVG_MSDUS_PER_MPDU 4
2374 
2375 /*
2376  * dp_hw_link_desc_pool_banks_free() - Free h/w link desc pool banks
2377  * @soc: DP SOC handle
2378  * @mac_id: mac id
2379  *
2380  * Return: none
2381  */
2382 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
2383 {
2384 	struct qdf_mem_multi_page_t *pages;
2385 
2386 	if (mac_id != WLAN_INVALID_PDEV_ID)
2387 		pages = &soc->mon_link_desc_pages[mac_id];
2388 	else
2389 		pages = &soc->link_desc_pages;
2390 
2391 	if (pages->dma_pages) {
2392 		wlan_minidump_remove((void *)
2393 				     pages->dma_pages->page_v_addr_start);
2394 		qdf_mem_multi_pages_free(soc->osdev, pages, 0, false);
2395 	}
2396 }
2397 
2398 /*
2399  * dp_hw_link_desc_pool_banks_alloc() - Allocate h/w link desc pool banks
2400  * @soc: DP SOC handle
2401  * @mac_id: mac id
2402  *
2403  * Allocates memory pages for link descriptors, the page size is 4K for
2404  * MCL and 2MB for WIN. if the mac_id is invalid link descriptor pages are
2405  * allocated for regular RX/TX and if the there is a proper mac_id link
2406  * descriptors are allocated for RX monitor mode.
2407  *
2408  * Return: QDF_STATUS_SUCCESS: Success
2409  *	   QDF_STATUS_E_FAILURE: Failure
2410  */
2411 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
2412 {
2413 	hal_soc_handle_t hal_soc = soc->hal_soc;
2414 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
2415 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
2416 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
2417 	uint32_t num_mpdus_per_link_desc = hal_num_mpdus_per_link_desc(hal_soc);
2418 	uint32_t num_msdus_per_link_desc = hal_num_msdus_per_link_desc(hal_soc);
2419 	uint32_t num_mpdu_links_per_queue_desc =
2420 		hal_num_mpdu_links_per_queue_desc(hal_soc);
2421 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
2422 	uint32_t *total_link_descs, total_mem_size;
2423 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
2424 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
2425 	uint32_t num_entries;
2426 	struct qdf_mem_multi_page_t *pages;
2427 	struct dp_srng *dp_srng;
2428 	uint8_t minidump_str[MINIDUMP_STR_SIZE];
2429 
2430 	/* Only Tx queue descriptors are allocated from common link descriptor
2431 	 * pool Rx queue descriptors are not included in this because (REO queue
2432 	 * extension descriptors) they are expected to be allocated contiguously
2433 	 * with REO queue descriptors
2434 	 */
2435 	if (mac_id != WLAN_INVALID_PDEV_ID) {
2436 		pages = &soc->mon_link_desc_pages[mac_id];
2437 		dp_srng = &soc->rxdma_mon_desc_ring[mac_id];
2438 		num_entries = dp_srng->alloc_size /
2439 			hal_srng_get_entrysize(soc->hal_soc,
2440 					       RXDMA_MONITOR_DESC);
2441 		total_link_descs = &soc->total_mon_link_descs[mac_id];
2442 		qdf_str_lcopy(minidump_str, "mon_link_desc_bank",
2443 			      MINIDUMP_STR_SIZE);
2444 	} else {
2445 		num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2446 			AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
2447 
2448 		num_mpdu_queue_descs = num_mpdu_link_descs /
2449 			num_mpdu_links_per_queue_desc;
2450 
2451 		num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2452 			AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
2453 			num_msdus_per_link_desc;
2454 
2455 		num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2456 			AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
2457 
2458 		num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
2459 			num_tx_msdu_link_descs + num_rx_msdu_link_descs;
2460 
2461 		pages = &soc->link_desc_pages;
2462 		total_link_descs = &soc->total_link_descs;
2463 		qdf_str_lcopy(minidump_str, "link_desc_bank",
2464 			      MINIDUMP_STR_SIZE);
2465 	}
2466 
2467 	/* Round up to power of 2 */
2468 	*total_link_descs = 1;
2469 	while (*total_link_descs < num_entries)
2470 		*total_link_descs <<= 1;
2471 
2472 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2473 		  FL("total_link_descs: %u, link_desc_size: %d"),
2474 		  *total_link_descs, link_desc_size);
2475 	total_mem_size =  *total_link_descs * link_desc_size;
2476 	total_mem_size += link_desc_align;
2477 
2478 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2479 		  FL("total_mem_size: %d"), total_mem_size);
2480 
2481 	dp_set_max_page_size(pages, max_alloc_size);
2482 	qdf_mem_multi_pages_alloc(soc->osdev,
2483 				  pages,
2484 				  link_desc_size,
2485 				  *total_link_descs,
2486 				  0, false);
2487 	if (!pages->num_pages) {
2488 		dp_err("Multi page alloc fail for hw link desc pool");
2489 		return QDF_STATUS_E_FAULT;
2490 	}
2491 
2492 	wlan_minidump_log(pages->dma_pages->page_v_addr_start,
2493 			  pages->num_pages * pages->page_size,
2494 			  soc->ctrl_psoc,
2495 			  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
2496 			  "hw_link_desc_bank");
2497 
2498 	return QDF_STATUS_SUCCESS;
2499 }
2500 
2501 /*
2502  * dp_hw_link_desc_ring_free() - Free h/w link desc rings
2503  * @soc: DP SOC handle
2504  *
2505  * Return: none
2506  */
2507 static void dp_hw_link_desc_ring_free(struct dp_soc *soc)
2508 {
2509 	uint32_t i;
2510 	uint32_t size = soc->wbm_idle_scatter_buf_size;
2511 	void *vaddr = soc->wbm_idle_link_ring.base_vaddr_unaligned;
2512 	qdf_dma_addr_t paddr;
2513 
2514 	if (soc->wbm_idle_scatter_buf_base_vaddr[0]) {
2515 		for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2516 			vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
2517 			paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
2518 			if (vaddr) {
2519 				qdf_mem_free_consistent(soc->osdev,
2520 							soc->osdev->dev,
2521 							size,
2522 							vaddr,
2523 							paddr,
2524 							0);
2525 				vaddr = NULL;
2526 			}
2527 		}
2528 	} else {
2529 		wlan_minidump_remove(vaddr);
2530 		dp_srng_free(soc, &soc->wbm_idle_link_ring);
2531 	}
2532 }
2533 
2534 /*
2535  * dp_hw_link_desc_ring_alloc() - Allocate hw link desc rings
2536  * @soc: DP SOC handle
2537  *
2538  * Allocate memory for WBM_IDLE_LINK srng ring if the number of
2539  * link descriptors is less then the max_allocated size. else
2540  * allocate memory for wbm_idle_scatter_buffer.
2541  *
2542  * Return: QDF_STATUS_SUCCESS: success
2543  *         QDF_STATUS_E_NO_MEM: No memory (Failure)
2544  */
2545 static QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc)
2546 {
2547 	uint32_t entry_size, i;
2548 	uint32_t total_mem_size;
2549 	qdf_dma_addr_t *baseaddr = NULL;
2550 	struct dp_srng *dp_srng;
2551 	uint32_t ring_type;
2552 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
2553 	uint32_t tlds;
2554 
2555 	ring_type = WBM_IDLE_LINK;
2556 	dp_srng = &soc->wbm_idle_link_ring;
2557 	tlds = soc->total_link_descs;
2558 
2559 	entry_size = hal_srng_get_entrysize(soc->hal_soc, ring_type);
2560 	total_mem_size = entry_size * tlds;
2561 
2562 	if (total_mem_size <= max_alloc_size) {
2563 		if (dp_srng_alloc(soc, dp_srng, ring_type, tlds, 0)) {
2564 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2565 				  FL("Link desc idle ring setup failed"));
2566 			goto fail;
2567 		}
2568 
2569 		wlan_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
2570 				  soc->wbm_idle_link_ring.alloc_size,
2571 				  soc->ctrl_psoc,
2572 				  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
2573 				  "wbm_idle_link_ring");
2574 	} else {
2575 		uint32_t num_scatter_bufs;
2576 		uint32_t num_entries_per_buf;
2577 		uint32_t buf_size = 0;
2578 
2579 		soc->wbm_idle_scatter_buf_size =
2580 			hal_idle_list_scatter_buf_size(soc->hal_soc);
2581 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
2582 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
2583 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
2584 					soc->hal_soc, total_mem_size,
2585 					soc->wbm_idle_scatter_buf_size);
2586 
2587 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
2588 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2589 				  FL("scatter bufs size out of bounds"));
2590 			goto fail;
2591 		}
2592 
2593 		for (i = 0; i < num_scatter_bufs; i++) {
2594 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
2595 			buf_size = soc->wbm_idle_scatter_buf_size;
2596 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
2597 				qdf_mem_alloc_consistent(soc->osdev,
2598 							 soc->osdev->dev,
2599 							 buf_size,
2600 							 baseaddr);
2601 
2602 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2603 				QDF_TRACE(QDF_MODULE_ID_DP,
2604 					  QDF_TRACE_LEVEL_ERROR,
2605 					  FL("Scatter lst memory alloc fail"));
2606 				goto fail;
2607 			}
2608 		}
2609 		soc->num_scatter_bufs = num_scatter_bufs;
2610 	}
2611 	return QDF_STATUS_SUCCESS;
2612 
2613 fail:
2614 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2615 		void *vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
2616 		qdf_dma_addr_t paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
2617 
2618 		if (vaddr) {
2619 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2620 						soc->wbm_idle_scatter_buf_size,
2621 						vaddr,
2622 						paddr, 0);
2623 			vaddr = NULL;
2624 		}
2625 	}
2626 	return QDF_STATUS_E_NOMEM;
2627 }
2628 
2629 /*
2630  * dp_hw_link_desc_ring_init() - Initialize hw link desc rings
2631  * @soc: DP SOC handle
2632  *
2633  * Return: QDF_STATUS_SUCCESS: success
2634  *         QDF_STATUS_E_FAILURE: failure
2635  */
2636 static QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc)
2637 {
2638 	struct dp_srng *dp_srng = &soc->wbm_idle_link_ring;
2639 
2640 	if (dp_srng->base_vaddr_unaligned) {
2641 		if (dp_srng_init(soc, dp_srng, WBM_IDLE_LINK, 0, 0))
2642 			return QDF_STATUS_E_FAILURE;
2643 	}
2644 	return QDF_STATUS_SUCCESS;
2645 }
2646 
2647 /*
2648  * dp_hw_link_desc_ring_deinit() - Reset hw link desc rings
2649  * @soc: DP SOC handle
2650  *
2651  * Return: None
2652  */
2653 static void dp_hw_link_desc_ring_deinit(struct dp_soc *soc)
2654 {
2655 	dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0);
2656 }
2657 
2658 /*
2659  * dp_hw_link_desc_ring_replenish() - Replenish hw link desc rings
2660  * @soc: DP SOC handle
2661  * @mac_id: mac id
2662  *
2663  * Return: None
2664  */
2665 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id)
2666 {
2667 	uint32_t cookie = 0;
2668 	uint32_t page_idx = 0;
2669 	struct qdf_mem_multi_page_t *pages;
2670 	struct qdf_mem_dma_page_t *dma_pages;
2671 	uint32_t offset = 0;
2672 	uint32_t count = 0;
2673 	void *desc_srng;
2674 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
2675 	uint32_t total_link_descs;
2676 	uint32_t scatter_buf_num;
2677 	uint32_t num_entries_per_buf = 0;
2678 	uint32_t rem_entries;
2679 	uint32_t num_descs_per_page;
2680 	uint32_t num_scatter_bufs = 0;
2681 	uint8_t *scatter_buf_ptr;
2682 	void *desc;
2683 
2684 	num_scatter_bufs = soc->num_scatter_bufs;
2685 
2686 	if (mac_id == WLAN_INVALID_PDEV_ID) {
2687 		pages = &soc->link_desc_pages;
2688 		total_link_descs = soc->total_link_descs;
2689 		desc_srng = soc->wbm_idle_link_ring.hal_srng;
2690 	} else {
2691 		pages = &soc->mon_link_desc_pages[mac_id];
2692 		total_link_descs = soc->total_mon_link_descs[mac_id];
2693 		desc_srng = soc->rxdma_mon_desc_ring[mac_id].hal_srng;
2694 	}
2695 
2696 	dma_pages = pages->dma_pages;
2697 	do {
2698 		qdf_mem_zero(dma_pages[page_idx].page_v_addr_start,
2699 			     pages->page_size);
2700 		page_idx++;
2701 	} while (page_idx < pages->num_pages);
2702 
2703 	if (desc_srng) {
2704 		hal_srng_access_start_unlocked(soc->hal_soc, desc_srng);
2705 		page_idx = 0;
2706 		count = 0;
2707 		offset = 0;
2708 		pages = &soc->link_desc_pages;
2709 		while ((desc = hal_srng_src_get_next(soc->hal_soc,
2710 						     desc_srng)) &&
2711 			(count < total_link_descs)) {
2712 			page_idx = count / pages->num_element_per_page;
2713 			offset = count % pages->num_element_per_page;
2714 			cookie = LINK_DESC_COOKIE(count, page_idx);
2715 
2716 			hal_set_link_desc_addr(desc, cookie,
2717 					       dma_pages[page_idx].page_p_addr
2718 					       + (offset * link_desc_size));
2719 			count++;
2720 		}
2721 		hal_srng_access_end_unlocked(soc->hal_soc, desc_srng);
2722 	} else {
2723 		/* Populate idle list scatter buffers with link descriptor
2724 		 * pointers
2725 		 */
2726 		scatter_buf_num = 0;
2727 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
2728 					soc->hal_soc,
2729 					soc->wbm_idle_scatter_buf_size);
2730 
2731 		scatter_buf_ptr = (uint8_t *)(
2732 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
2733 		rem_entries = num_entries_per_buf;
2734 		pages = &soc->link_desc_pages;
2735 		page_idx = 0; count = 0;
2736 		offset = 0;
2737 		num_descs_per_page = pages->num_element_per_page;
2738 
2739 		while (count < total_link_descs) {
2740 			page_idx = count / num_descs_per_page;
2741 			offset = count % num_descs_per_page;
2742 			cookie = LINK_DESC_COOKIE(count, page_idx);
2743 			hal_set_link_desc_addr((void *)scatter_buf_ptr,
2744 					       cookie,
2745 					       dma_pages[page_idx].page_p_addr +
2746 					       (offset * link_desc_size));
2747 			rem_entries--;
2748 			if (rem_entries) {
2749 				scatter_buf_ptr += link_desc_size;
2750 			} else {
2751 				rem_entries = num_entries_per_buf;
2752 				scatter_buf_num++;
2753 				if (scatter_buf_num >= num_scatter_bufs)
2754 					break;
2755 				scatter_buf_ptr = (uint8_t *)
2756 					(soc->wbm_idle_scatter_buf_base_vaddr[
2757 					 scatter_buf_num]);
2758 			}
2759 			count++;
2760 		}
2761 		/* Setup link descriptor idle list in HW */
2762 		hal_setup_link_idle_list(soc->hal_soc,
2763 			soc->wbm_idle_scatter_buf_base_paddr,
2764 			soc->wbm_idle_scatter_buf_base_vaddr,
2765 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
2766 			(uint32_t)(scatter_buf_ptr -
2767 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
2768 			scatter_buf_num-1])), total_link_descs);
2769 	}
2770 }
2771 
2772 #ifdef IPA_OFFLOAD
2773 #define REO_DST_RING_SIZE_QCA6290 1023
2774 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
2775 #define REO_DST_RING_SIZE_QCA8074 1023
2776 #define REO_DST_RING_SIZE_QCN9000 2048
2777 #else
2778 #define REO_DST_RING_SIZE_QCA8074 8
2779 #define REO_DST_RING_SIZE_QCN9000 8
2780 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
2781 
2782 #else
2783 
2784 #define REO_DST_RING_SIZE_QCA6290 1024
2785 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
2786 #define REO_DST_RING_SIZE_QCA8074 2048
2787 #define REO_DST_RING_SIZE_QCN9000 2048
2788 #else
2789 #define REO_DST_RING_SIZE_QCA8074 8
2790 #define REO_DST_RING_SIZE_QCN9000 8
2791 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
2792 #endif /* IPA_OFFLOAD */
2793 
2794 #ifndef FEATURE_WDS
2795 static void dp_soc_wds_attach(struct dp_soc *soc)
2796 {
2797 }
2798 
2799 static void dp_soc_wds_detach(struct dp_soc *soc)
2800 {
2801 }
2802 #endif
2803 /*
2804  * dp_soc_reset_ring_map() - Reset cpu ring map
2805  * @soc: Datapath soc handler
2806  *
2807  * This api resets the default cpu ring map
2808  */
2809 
2810 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
2811 {
2812 	uint8_t i;
2813 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2814 
2815 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
2816 		switch (nss_config) {
2817 		case dp_nss_cfg_first_radio:
2818 			/*
2819 			 * Setting Tx ring map for one nss offloaded radio
2820 			 */
2821 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
2822 			break;
2823 
2824 		case dp_nss_cfg_second_radio:
2825 			/*
2826 			 * Setting Tx ring for two nss offloaded radios
2827 			 */
2828 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
2829 			break;
2830 
2831 		case dp_nss_cfg_dbdc:
2832 			/*
2833 			 * Setting Tx ring map for 2 nss offloaded radios
2834 			 */
2835 			soc->tx_ring_map[i] =
2836 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
2837 			break;
2838 
2839 		case dp_nss_cfg_dbtc:
2840 			/*
2841 			 * Setting Tx ring map for 3 nss offloaded radios
2842 			 */
2843 			soc->tx_ring_map[i] =
2844 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
2845 			break;
2846 
2847 		default:
2848 			dp_err("tx_ring_map failed due to invalid nss cfg");
2849 			break;
2850 		}
2851 	}
2852 }
2853 
2854 /*
2855  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
2856  * @dp_soc - DP soc handle
2857  * @ring_type - ring type
2858  * @ring_num - ring_num
2859  *
2860  * return 0 or 1
2861  */
2862 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
2863 {
2864 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2865 	uint8_t status = 0;
2866 
2867 	switch (ring_type) {
2868 	case WBM2SW_RELEASE:
2869 	case REO_DST:
2870 	case RXDMA_BUF:
2871 		status = ((nss_config) & (1 << ring_num));
2872 		break;
2873 	default:
2874 		break;
2875 	}
2876 
2877 	return status;
2878 }
2879 
2880 /*
2881  * dp_soc_disable_unused_mac_intr_mask() - reset interrupt mask for
2882  *					  unused WMAC hw rings
2883  * @dp_soc - DP Soc handle
2884  * @mac_num - wmac num
2885  *
2886  * Return: Return void
2887  */
2888 static void dp_soc_disable_unused_mac_intr_mask(struct dp_soc *soc,
2889 						int mac_num)
2890 {
2891 	int *grp_mask = NULL;
2892 	int group_number;
2893 
2894 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2895 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2896 	wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2897 					  group_number, 0x0);
2898 
2899 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
2900 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2901 	wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
2902 				      group_number, 0x0);
2903 
2904 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
2905 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2906 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
2907 					  group_number, 0x0);
2908 
2909 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
2910 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2911 	wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
2912 					      group_number, 0x0);
2913 }
2914 
2915 /*
2916  * dp_soc_reset_intr_mask() - reset interrupt mask
2917  * @dp_soc - DP Soc handle
2918  *
2919  * Return: Return void
2920  */
2921 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
2922 {
2923 	uint8_t j;
2924 	int *grp_mask = NULL;
2925 	int group_number, mask, num_ring;
2926 
2927 	/* number of tx ring */
2928 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
2929 
2930 	/*
2931 	 * group mask for tx completion  ring.
2932 	 */
2933 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
2934 
2935 	/* loop and reset the mask for only offloaded ring */
2936 	for (j = 0; j < num_ring; j++) {
2937 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
2938 			continue;
2939 		}
2940 
2941 		/*
2942 		 * Group number corresponding to tx offloaded ring.
2943 		 */
2944 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2945 		if (group_number < 0) {
2946 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2947 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2948 					WBM2SW_RELEASE, j);
2949 			return;
2950 		}
2951 
2952 		/* reset the tx mask for offloaded ring */
2953 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2954 		mask &= (~(1 << j));
2955 
2956 		/*
2957 		 * reset the interrupt mask for offloaded ring.
2958 		 */
2959 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2960 	}
2961 
2962 	/* number of rx rings */
2963 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2964 
2965 	/*
2966 	 * group mask for reo destination ring.
2967 	 */
2968 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2969 
2970 	/* loop and reset the mask for only offloaded ring */
2971 	for (j = 0; j < num_ring; j++) {
2972 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
2973 			continue;
2974 		}
2975 
2976 		/*
2977 		 * Group number corresponding to rx offloaded ring.
2978 		 */
2979 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2980 		if (group_number < 0) {
2981 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2982 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2983 					REO_DST, j);
2984 			return;
2985 		}
2986 
2987 		/* set the interrupt mask for offloaded ring */
2988 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2989 		mask &= (~(1 << j));
2990 
2991 		/*
2992 		 * set the interrupt mask to zero for rx offloaded radio.
2993 		 */
2994 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2995 	}
2996 
2997 	/*
2998 	 * group mask for Rx buffer refill ring
2999 	 */
3000 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
3001 
3002 	/* loop and reset the mask for only offloaded ring */
3003 	for (j = 0; j < MAX_PDEV_CNT; j++) {
3004 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
3005 
3006 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
3007 			continue;
3008 		}
3009 
3010 		/*
3011 		 * Group number corresponding to rx offloaded ring.
3012 		 */
3013 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
3014 		if (group_number < 0) {
3015 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3016 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
3017 					REO_DST, lmac_id);
3018 			return;
3019 		}
3020 
3021 		/* set the interrupt mask for offloaded ring */
3022 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
3023 				group_number);
3024 		mask &= (~(1 << lmac_id));
3025 
3026 		/*
3027 		 * set the interrupt mask to zero for rx offloaded radio.
3028 		 */
3029 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
3030 			group_number, mask);
3031 	}
3032 }
3033 
3034 #ifdef IPA_OFFLOAD
3035 /**
3036  * dp_reo_remap_config() - configure reo remap register value based
3037  *                         nss configuration.
3038  *		based on offload_radio value below remap configuration
3039  *		get applied.
3040  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
3041  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
3042  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
3043  *		3 - both Radios handled by NSS (remap not required)
3044  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
3045  *
3046  * @remap1: output parameter indicates reo remap 1 register value
3047  * @remap2: output parameter indicates reo remap 2 register value
3048  * Return: bool type, true if remap is configured else false.
3049  */
3050 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap1, uint32_t *remap2)
3051 {
3052 	uint32_t ring[4] = {REO_REMAP_SW1, REO_REMAP_SW2,
3053 						REO_REMAP_SW3};
3054 	hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3055 				      3, remap1, remap2);
3056 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
3057 
3058 	return true;
3059 }
3060 
3061 /**
3062  * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA
3063  *
3064  * @tx_ring_num: Tx ring number
3065  * @tx_ipa_ring_sz: Return param only updated for IPA.
3066  *
3067  * Return: None
3068  */
3069 static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz)
3070 {
3071 	if (tx_ring_num == IPA_TCL_DATA_RING_IDX)
3072 		*tx_ipa_ring_sz = WLAN_CFG_IPA_TX_RING_SIZE;
3073 }
3074 
3075 /**
3076  * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA
3077  *
3078  * @tx_comp_ring_num: Tx comp ring number
3079  * @tx_comp_ipa_ring_sz: Return param only updated for IPA.
3080  *
3081  * Return: None
3082  */
3083 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
3084 					 int *tx_comp_ipa_ring_sz)
3085 {
3086 	if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX)
3087 		*tx_comp_ipa_ring_sz = WLAN_CFG_IPA_TX_COMP_RING_SIZE;
3088 }
3089 #else
3090 static uint8_t dp_reo_ring_selection(uint32_t value, uint32_t *ring)
3091 {
3092 	uint8_t num = 0;
3093 
3094 	switch (value) {
3095 	case 0xF:
3096 		num = 4;
3097 		ring[0] = REO_REMAP_SW1;
3098 		ring[1] = REO_REMAP_SW2;
3099 		ring[2] = REO_REMAP_SW3;
3100 		ring[3] = REO_REMAP_SW4;
3101 		break;
3102 	case 0xE:
3103 		num = 3;
3104 		ring[0] = REO_REMAP_SW2;
3105 		ring[1] = REO_REMAP_SW3;
3106 		ring[2] = REO_REMAP_SW4;
3107 		break;
3108 	case 0xD:
3109 		num = 3;
3110 		ring[0] = REO_REMAP_SW1;
3111 		ring[1] = REO_REMAP_SW3;
3112 		ring[2] = REO_REMAP_SW4;
3113 		break;
3114 	case 0xC:
3115 		num = 2;
3116 		ring[0] = REO_REMAP_SW3;
3117 		ring[1] = REO_REMAP_SW4;
3118 		break;
3119 	case 0xB:
3120 		num = 3;
3121 		ring[0] = REO_REMAP_SW1;
3122 		ring[1] = REO_REMAP_SW2;
3123 		ring[2] = REO_REMAP_SW4;
3124 		break;
3125 	case 0xA:
3126 		num = 2;
3127 		ring[0] = REO_REMAP_SW2;
3128 		ring[1] = REO_REMAP_SW4;
3129 		break;
3130 	case 0x9:
3131 		num = 2;
3132 		ring[0] = REO_REMAP_SW1;
3133 		ring[1] = REO_REMAP_SW4;
3134 		break;
3135 	case 0x8:
3136 		num = 1;
3137 		ring[0] = REO_REMAP_SW4;
3138 		break;
3139 	case 0x7:
3140 		num = 3;
3141 		ring[0] = REO_REMAP_SW1;
3142 		ring[1] = REO_REMAP_SW2;
3143 		ring[2] = REO_REMAP_SW3;
3144 		break;
3145 	case 0x6:
3146 		num = 2;
3147 		ring[0] = REO_REMAP_SW2;
3148 		ring[1] = REO_REMAP_SW3;
3149 		break;
3150 	case 0x5:
3151 		num = 2;
3152 		ring[0] = REO_REMAP_SW1;
3153 		ring[1] = REO_REMAP_SW3;
3154 		break;
3155 	case 0x4:
3156 		num = 1;
3157 		ring[0] = REO_REMAP_SW3;
3158 		break;
3159 	case 0x3:
3160 		num = 2;
3161 		ring[0] = REO_REMAP_SW1;
3162 		ring[1] = REO_REMAP_SW2;
3163 		break;
3164 	case 0x2:
3165 		num = 1;
3166 		ring[0] = REO_REMAP_SW2;
3167 		break;
3168 	case 0x1:
3169 		num = 1;
3170 		ring[0] = REO_REMAP_SW1;
3171 		break;
3172 	}
3173 	return num;
3174 }
3175 
3176 static bool dp_reo_remap_config(struct dp_soc *soc,
3177 				uint32_t *remap1,
3178 				uint32_t *remap2)
3179 {
3180 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
3181 	uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
3182 	uint8_t target_type, num;
3183 	uint32_t ring[4];
3184 	uint32_t value;
3185 
3186 	target_type = hal_get_target_type(soc->hal_soc);
3187 
3188 	switch (offload_radio) {
3189 	case dp_nss_cfg_default:
3190 		value = reo_config & 0xF;
3191 		num = dp_reo_ring_selection(value, ring);
3192 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3193 					      num, remap1, remap2);
3194 
3195 		break;
3196 	case dp_nss_cfg_first_radio:
3197 		value = reo_config & 0xE;
3198 		num = dp_reo_ring_selection(value, ring);
3199 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3200 					      num, remap1, remap2);
3201 
3202 		break;
3203 	case dp_nss_cfg_second_radio:
3204 		value = reo_config & 0xD;
3205 		num = dp_reo_ring_selection(value, ring);
3206 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3207 					      num, remap1, remap2);
3208 
3209 		break;
3210 	case dp_nss_cfg_dbdc:
3211 	case dp_nss_cfg_dbtc:
3212 		/* return false if both or all are offloaded to NSS */
3213 		return false;
3214 	}
3215 
3216 	dp_debug("remap1 %x remap2 %x offload_radio %u",
3217 		 *remap1, *remap2, offload_radio);
3218 	return true;
3219 }
3220 
3221 static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz)
3222 {
3223 }
3224 
3225 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
3226 					 int *tx_comp_ipa_ring_sz)
3227 {
3228 }
3229 #endif /* IPA_OFFLOAD */
3230 
3231 /*
3232  * dp_reo_frag_dst_set() - configure reo register to set the
3233  *                        fragment destination ring
3234  * @soc : Datapath soc
3235  * @frag_dst_ring : output parameter to set fragment destination ring
3236  *
3237  * Based on offload_radio below fragment destination rings is selected
3238  * 0 - TCL
3239  * 1 - SW1
3240  * 2 - SW2
3241  * 3 - SW3
3242  * 4 - SW4
3243  * 5 - Release
3244  * 6 - FW
3245  * 7 - alternate select
3246  *
3247  * return: void
3248  */
3249 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
3250 {
3251 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
3252 
3253 	switch (offload_radio) {
3254 	case dp_nss_cfg_default:
3255 		*frag_dst_ring = REO_REMAP_TCL;
3256 		break;
3257 	case dp_nss_cfg_first_radio:
3258 		/*
3259 		 * This configuration is valid for single band radio which
3260 		 * is also NSS offload.
3261 		 */
3262 	case dp_nss_cfg_dbdc:
3263 	case dp_nss_cfg_dbtc:
3264 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
3265 		break;
3266 	default:
3267 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3268 				FL("dp_reo_frag_dst_set invalid offload radio config"));
3269 		break;
3270 	}
3271 }
3272 
3273 #ifdef ENABLE_VERBOSE_DEBUG
3274 static void dp_enable_verbose_debug(struct dp_soc *soc)
3275 {
3276 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3277 
3278 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3279 
3280 	if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask)
3281 		is_dp_verbose_debug_enabled = true;
3282 
3283 	if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask)
3284 		hal_set_verbose_debug(true);
3285 	else
3286 		hal_set_verbose_debug(false);
3287 }
3288 #else
3289 static void dp_enable_verbose_debug(struct dp_soc *soc)
3290 {
3291 }
3292 #endif
3293 
3294 #ifdef WLAN_FEATURE_STATS_EXT
3295 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
3296 {
3297 	qdf_event_create(&soc->rx_hw_stats_event);
3298 }
3299 #else
3300 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
3301 {
3302 }
3303 #endif
3304 
3305 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index)
3306 {
3307 	wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned);
3308 	dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA, index);
3309 
3310 	wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned);
3311 	dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE, index);
3312 }
3313 
3314 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
3315 						uint8_t index)
3316 {
3317 	if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA, index, 0)) {
3318 		dp_err("dp_srng_init failed for tcl_data_ring");
3319 		goto fail1;
3320 	}
3321 	wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned,
3322 			  soc->tcl_data_ring[index].alloc_size,
3323 			  soc->ctrl_psoc,
3324 			  WLAN_MD_DP_SRNG_TCL_DATA,
3325 			  "tcl_data_ring");
3326 
3327 	if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
3328 			 index, 0)) {
3329 		dp_err("dp_srng_init failed for tx_comp_ring");
3330 		goto fail1;
3331 	}
3332 	wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned,
3333 			  soc->tx_comp_ring[index].alloc_size,
3334 			  soc->ctrl_psoc,
3335 			  WLAN_MD_DP_SRNG_TX_COMP,
3336 			  "tcl_comp_ring");
3337 
3338 	return QDF_STATUS_SUCCESS;
3339 
3340 fail1:
3341 	return QDF_STATUS_E_FAILURE;
3342 }
3343 
3344 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index)
3345 {
3346 	dp_srng_free(soc, &soc->tcl_data_ring[index]);
3347 	dp_srng_free(soc, &soc->tx_comp_ring[index]);
3348 }
3349 
3350 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
3351 						 uint8_t index)
3352 {
3353 	int tx_ring_size;
3354 	int tx_comp_ring_size;
3355 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
3356 	int cached = 0;
3357 
3358 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
3359 	dp_ipa_get_tx_ring_size(index, &tx_ring_size);
3360 
3361 	if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA,
3362 			  tx_ring_size, cached)) {
3363 		dp_err("dp_srng_alloc failed for tcl_data_ring");
3364 		goto fail1;
3365 	}
3366 
3367 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
3368 	dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size);
3369 	/* Enable cached TCL desc if NSS offload is disabled */
3370 	if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
3371 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
3372 
3373 	if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
3374 			  tx_comp_ring_size, cached)) {
3375 		dp_err("dp_srng_alloc failed for tx_comp_ring");
3376 		goto fail1;
3377 	}
3378 
3379 	return QDF_STATUS_SUCCESS;
3380 
3381 fail1:
3382 	return QDF_STATUS_E_FAILURE;
3383 }
3384 
3385 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3386 {
3387 	struct cdp_lro_hash_config lro_hash;
3388 	QDF_STATUS status;
3389 
3390 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
3391 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
3392 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
3393 		dp_err("LRO, GRO and RX hash disabled");
3394 		return QDF_STATUS_E_FAILURE;
3395 	}
3396 
3397 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
3398 
3399 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
3400 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
3401 		lro_hash.lro_enable = 1;
3402 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
3403 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
3404 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
3405 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
3406 	}
3407 
3408 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
3409 			     (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
3410 			      LRO_IPV4_SEED_ARR_SZ));
3411 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
3412 			     (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
3413 			      LRO_IPV6_SEED_ARR_SZ));
3414 
3415 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
3416 
3417 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
3418 		QDF_BUG(0);
3419 		dp_err("lro_hash_config not configured");
3420 		return QDF_STATUS_E_FAILURE;
3421 	}
3422 
3423 	status = soc->cdp_soc.ol_ops->lro_hash_config(soc->ctrl_psoc,
3424 						      pdev->pdev_id,
3425 						      &lro_hash);
3426 	if (!QDF_IS_STATUS_SUCCESS(status)) {
3427 		dp_err("failed to send lro_hash_config to FW %u", status);
3428 		return status;
3429 	}
3430 
3431 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
3432 		lro_hash.lro_enable, lro_hash.tcp_flag,
3433 		lro_hash.tcp_flag_mask);
3434 
3435 	dp_info("toeplitz_hash_ipv4:");
3436 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3437 			   lro_hash.toeplitz_hash_ipv4,
3438 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
3439 			   LRO_IPV4_SEED_ARR_SZ));
3440 
3441 	dp_info("toeplitz_hash_ipv6:");
3442 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3443 			   lro_hash.toeplitz_hash_ipv6,
3444 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
3445 			   LRO_IPV6_SEED_ARR_SZ));
3446 
3447 	return status;
3448 }
3449 
3450 /*
3451  * dp_rxdma_ring_setup() - configure the RX DMA rings
3452  * @soc: data path SoC handle
3453  * @pdev: Physical device handle
3454  *
3455  * Return: 0 - success, > 0 - failure
3456  */
3457 #ifdef QCA_HOST2FW_RXBUF_RING
3458 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3459 {
3460 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3461 	int max_mac_rings;
3462 	int i;
3463 	int ring_size;
3464 
3465 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3466 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
3467 	ring_size =  wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx);
3468 
3469 	for (i = 0; i < max_mac_rings; i++) {
3470 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
3471 		if (dp_srng_alloc(soc, &pdev->rx_mac_buf_ring[i],
3472 				  RXDMA_BUF, ring_size, 0)) {
3473 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3474 				  FL("failed rx mac ring setup"));
3475 			return QDF_STATUS_E_FAILURE;
3476 		}
3477 
3478 		if (dp_srng_init(soc, &pdev->rx_mac_buf_ring[i],
3479 				 RXDMA_BUF, 1, i)) {
3480 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3481 				  FL("failed rx mac ring setup"));
3482 
3483 			dp_srng_free(soc, &pdev->rx_mac_buf_ring[i]);
3484 			return QDF_STATUS_E_FAILURE;
3485 		}
3486 	}
3487 	return QDF_STATUS_SUCCESS;
3488 }
3489 #else
3490 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3491 {
3492 	return QDF_STATUS_SUCCESS;
3493 }
3494 #endif
3495 
3496 /**
3497  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
3498  * @pdev - DP_PDEV handle
3499  *
3500  * Return: void
3501  */
3502 static inline void
3503 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
3504 {
3505 	uint8_t map_id;
3506 	struct dp_soc *soc = pdev->soc;
3507 
3508 	if (!soc)
3509 		return;
3510 
3511 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
3512 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
3513 			     default_dscp_tid_map,
3514 			     sizeof(default_dscp_tid_map));
3515 	}
3516 
3517 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
3518 		hal_tx_set_dscp_tid_map(soc->hal_soc,
3519 					default_dscp_tid_map,
3520 					map_id);
3521 	}
3522 }
3523 
3524 /**
3525  * dp_pcp_tid_map_setup(): Initialize the pcp-tid maps
3526  * @pdev - DP_PDEV handle
3527  *
3528  * Return: void
3529  */
3530 static inline void
3531 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
3532 {
3533 	struct dp_soc *soc = pdev->soc;
3534 
3535 	if (!soc)
3536 		return;
3537 
3538 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
3539 		     sizeof(default_pcp_tid_map));
3540 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
3541 }
3542 
3543 #ifdef IPA_OFFLOAD
3544 /**
3545  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
3546  * @soc: data path instance
3547  * @pdev: core txrx pdev context
3548  *
3549  * Return: QDF_STATUS_SUCCESS: success
3550  *         QDF_STATUS_E_RESOURCES: Error return
3551  */
3552 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3553 					   struct dp_pdev *pdev)
3554 {
3555 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3556 	int entries;
3557 
3558 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3559 	entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
3560 
3561 	/* Setup second Rx refill buffer ring */
3562 	if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
3563 			  entries, 0)) {
3564 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3565 			FL("dp_srng_alloc failed second rx refill ring"));
3566 		return QDF_STATUS_E_FAILURE;
3567 	}
3568 
3569 	if (dp_srng_init(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
3570 			 IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id)) {
3571 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3572 			  FL("dp_srng_init failed second rx refill ring"));
3573 		return QDF_STATUS_E_FAILURE;
3574 	}
3575 
3576 	return QDF_STATUS_SUCCESS;
3577 }
3578 
3579 /**
3580  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
3581  * @soc: data path instance
3582  * @pdev: core txrx pdev context
3583  *
3584  * Return: void
3585  */
3586 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3587 					      struct dp_pdev *pdev)
3588 {
3589 	dp_srng_deinit(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 0);
3590 	dp_srng_free(soc, &pdev->rx_refill_buf_ring2);
3591 }
3592 
3593 #else
3594 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3595 					   struct dp_pdev *pdev)
3596 {
3597 	return QDF_STATUS_SUCCESS;
3598 }
3599 
3600 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3601 					      struct dp_pdev *pdev)
3602 {
3603 }
3604 #endif
3605 
3606 #if !defined(DISABLE_MON_CONFIG)
3607 /**
3608  * dp_mon_ring_deinit() - Deinitialize monitor rings
3609  * @pdev: DP pdev handle
3610  *
3611  */
3612 static void dp_mon_rings_deinit(struct dp_pdev *pdev)
3613 {
3614 	int mac_id = 0;
3615 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3616 	struct dp_soc *soc = pdev->soc;
3617 
3618 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3619 
3620 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3621 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
3622 							 pdev->pdev_id);
3623 
3624 		dp_srng_deinit(soc, &soc->rxdma_mon_status_ring[lmac_id],
3625 			       RXDMA_MONITOR_STATUS, 0);
3626 
3627 		if (!soc->wlan_cfg_ctx->rxdma1_enable)
3628 			continue;
3629 
3630 		dp_srng_deinit(soc, &soc->rxdma_mon_buf_ring[lmac_id],
3631 			       RXDMA_MONITOR_BUF, 0);
3632 		dp_srng_deinit(soc, &soc->rxdma_mon_dst_ring[lmac_id],
3633 			       RXDMA_MONITOR_DST, 0);
3634 		dp_srng_deinit(soc, &soc->rxdma_mon_desc_ring[lmac_id],
3635 			       RXDMA_MONITOR_DESC, 0);
3636 	}
3637 }
3638 
3639 /**
3640  * dp_mon_rings_free() - free monitor rings
3641  * @pdev: Datapath pdev handle
3642  *
3643  */
3644 static void dp_mon_rings_free(struct dp_pdev *pdev)
3645 {
3646 	int mac_id = 0;
3647 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3648 	struct dp_soc *soc = pdev->soc;
3649 
3650 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3651 
3652 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3653 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
3654 							 pdev->pdev_id);
3655 
3656 		dp_srng_free(soc, &soc->rxdma_mon_status_ring[lmac_id]);
3657 
3658 		if (!soc->wlan_cfg_ctx->rxdma1_enable)
3659 			continue;
3660 
3661 		dp_srng_free(soc, &soc->rxdma_mon_buf_ring[lmac_id]);
3662 		dp_srng_free(soc, &soc->rxdma_mon_dst_ring[lmac_id]);
3663 		dp_srng_free(soc, &soc->rxdma_mon_desc_ring[lmac_id]);
3664 	}
3665 }
3666 
3667 /**
3668  * dp_mon_rings_init() - Initialize monitor srng rings
3669  * @pdev: Datapath pdev handle
3670  *
3671  * return: QDF_STATUS_SUCCESS on success
3672  *	   QDF_STATUS_E_NOMEM on failure
3673  */
3674 static
3675 QDF_STATUS dp_mon_rings_init(struct dp_soc *soc, struct dp_pdev *pdev)
3676 {
3677 	int mac_id = 0;
3678 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3679 
3680 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3681 
3682 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3683 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
3684 							 pdev->pdev_id);
3685 
3686 		if (dp_srng_init(soc, &soc->rxdma_mon_status_ring[lmac_id],
3687 				 RXDMA_MONITOR_STATUS, 0, lmac_id)) {
3688 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3689 				  FL(RNG_ERR "rxdma_mon_status_ring"));
3690 			goto fail1;
3691 		}
3692 
3693 		if (!soc->wlan_cfg_ctx->rxdma1_enable)
3694 			continue;
3695 
3696 		if (dp_srng_init(soc, &soc->rxdma_mon_buf_ring[lmac_id],
3697 				 RXDMA_MONITOR_BUF, 0, lmac_id)) {
3698 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3699 				  FL(RNG_ERR "rxdma_mon_buf_ring "));
3700 			goto fail1;
3701 		}
3702 
3703 		if (dp_srng_init(soc, &soc->rxdma_mon_dst_ring[lmac_id],
3704 				 RXDMA_MONITOR_DST, 0, lmac_id)) {
3705 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3706 				  FL(RNG_ERR "rxdma_mon_dst_ring"));
3707 			goto fail1;
3708 		}
3709 
3710 		if (dp_srng_init(soc, &soc->rxdma_mon_desc_ring[lmac_id],
3711 				 RXDMA_MONITOR_DESC, 0, lmac_id)) {
3712 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3713 				  FL(RNG_ERR "rxdma_mon_desc_ring"));
3714 			goto fail1;
3715 		}
3716 	}
3717 	return QDF_STATUS_SUCCESS;
3718 
3719 fail1:
3720 	dp_mon_rings_deinit(pdev);
3721 	return QDF_STATUS_E_NOMEM;
3722 }
3723 
3724 /**
3725  * dp_mon_rings_alloc() - Allocate memory for monitor srng rings
3726  * @soc: Datapath soc handle
3727  * @pdev: Datapath pdev handle
3728  *
3729  * return: QDF_STATUS_SUCCESS on success
3730  *	   QDF_STATUS_E_NOMEM on failure
3731  */
3732 static
3733 QDF_STATUS dp_mon_rings_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
3734 {
3735 	int mac_id = 0;
3736 	int entries;
3737 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3738 
3739 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3740 
3741 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3742 		int lmac_id =
3743 		dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id);
3744 		entries = wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
3745 		if (dp_srng_alloc(soc, &soc->rxdma_mon_status_ring[lmac_id],
3746 				  RXDMA_MONITOR_STATUS, entries, 0)) {
3747 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3748 				  FL(RNG_ERR "rxdma_mon_status_ring"));
3749 			goto fail1;
3750 		}
3751 
3752 		if (!soc->wlan_cfg_ctx->rxdma1_enable)
3753 			continue;
3754 
3755 		entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
3756 		if (dp_srng_alloc(soc, &soc->rxdma_mon_buf_ring[lmac_id],
3757 				  RXDMA_MONITOR_BUF, entries, 0)) {
3758 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3759 				  FL(RNG_ERR "rxdma_mon_buf_ring "));
3760 			goto fail1;
3761 		}
3762 
3763 		entries = wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
3764 		if (dp_srng_alloc(soc, &soc->rxdma_mon_dst_ring[lmac_id],
3765 				  RXDMA_MONITOR_DST, entries, 0)) {
3766 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3767 				  FL(RNG_ERR "rxdma_mon_dst_ring"));
3768 			goto fail1;
3769 		}
3770 
3771 		entries = wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
3772 		if (dp_srng_alloc(soc, &soc->rxdma_mon_desc_ring[lmac_id],
3773 				  RXDMA_MONITOR_DESC, entries, 0)) {
3774 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3775 				  FL(RNG_ERR "rxdma_mon_desc_ring"));
3776 			goto fail1;
3777 		}
3778 	}
3779 	return QDF_STATUS_SUCCESS;
3780 
3781 fail1:
3782 	dp_mon_rings_free(pdev);
3783 	return QDF_STATUS_E_NOMEM;
3784 }
3785 #else
3786 static void dp_mon_rings_free(struct dp_pdev *pdev)
3787 {
3788 }
3789 
3790 static void dp_mon_rings_deinit(struct dp_pdev *pdev)
3791 {
3792 }
3793 
3794 static
3795 QDF_STATUS dp_mon_rings_init(struct dp_soc *soc, struct dp_pdev *pdev)
3796 {
3797 	return QDF_STATUS_SUCCESS;
3798 }
3799 
3800 static
3801 QDF_STATUS dp_mon_rings_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
3802 {
3803 	return QDF_STATUS_SUCCESS;
3804 }
3805 #endif
3806 
3807 #ifdef ATH_SUPPORT_EXT_STAT
3808 /*dp_peer_cal_clients_stats_update - update peer stats on cal client timer
3809  * @soc : Datapath SOC
3810  * @peer : Datapath peer
3811  * @arg : argument to iter function
3812  */
3813 static void
3814 dp_peer_cal_clients_stats_update(struct dp_soc *soc,
3815 				 struct dp_peer *peer,
3816 				 void *arg)
3817 {
3818 	dp_cal_client_update_peer_stats(&peer->stats);
3819 }
3820 
3821 /*dp_iterate_update_peer_list - update peer stats on cal client timer
3822  * @pdev_hdl: pdev handle
3823  */
3824 void  dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
3825 {
3826 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
3827 
3828 	dp_pdev_iterate_peer(pdev, dp_peer_cal_clients_stats_update, NULL,
3829 			     DP_MOD_ID_CDP);
3830 }
3831 #else
3832 void  dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
3833 {
3834 }
3835 #endif
3836 
3837 /*
3838  * dp_htt_ppdu_stats_attach() - attach resources for HTT PPDU stats processing
3839  * @pdev: Datapath PDEV handle
3840  *
3841  * Return: QDF_STATUS_SUCCESS: Success
3842  *         QDF_STATUS_E_NOMEM: Error
3843  */
3844 static QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev)
3845 {
3846 	pdev->ppdu_tlv_buf = qdf_mem_malloc(HTT_T2H_MAX_MSG_SIZE);
3847 
3848 	if (!pdev->ppdu_tlv_buf) {
3849 		QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "ppdu_tlv_buf alloc fail");
3850 		return QDF_STATUS_E_NOMEM;
3851 	}
3852 
3853 	return QDF_STATUS_SUCCESS;
3854 }
3855 
3856 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
3857 /**
3858  * dp_soc_rx_history_attach() - Attach the ring history record buffers
3859  * @soc: DP soc structure
3860  *
3861  * This function allocates the memory for recording the rx ring, rx error
3862  * ring and the reinject ring entries. There is no error returned in case
3863  * of allocation failure since the record function checks if the history is
3864  * initialized or not. We do not want to fail the driver load in case of
3865  * failure to allocate memory for debug history.
3866  *
3867  * Returns: None
3868  */
3869 static void dp_soc_rx_history_attach(struct dp_soc *soc)
3870 {
3871 	int i;
3872 	uint32_t rx_ring_hist_size;
3873 	uint32_t rx_err_ring_hist_size;
3874 	uint32_t rx_reinject_hist_size;
3875 
3876 	rx_ring_hist_size = sizeof(*soc->rx_ring_history[i]);
3877 	rx_err_ring_hist_size = sizeof(*soc->rx_err_ring_history);
3878 	rx_reinject_hist_size = sizeof(*soc->rx_reinject_ring_history);
3879 
3880 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
3881 		soc->rx_ring_history[i] = qdf_mem_malloc(rx_ring_hist_size);
3882 		if (soc->rx_ring_history[i])
3883 			qdf_atomic_init(&soc->rx_ring_history[i]->index);
3884 	}
3885 
3886 	soc->rx_err_ring_history = qdf_mem_malloc(rx_err_ring_hist_size);
3887 	if (soc->rx_err_ring_history)
3888 		qdf_atomic_init(&soc->rx_err_ring_history->index);
3889 
3890 	soc->rx_reinject_ring_history = qdf_mem_malloc(rx_reinject_hist_size);
3891 	if (soc->rx_reinject_ring_history)
3892 		qdf_atomic_init(&soc->rx_reinject_ring_history->index);
3893 }
3894 
3895 static void dp_soc_rx_history_detach(struct dp_soc *soc)
3896 {
3897 	int i;
3898 
3899 	for (i = 0; i < MAX_REO_DEST_RINGS; i++)
3900 		qdf_mem_free(soc->rx_ring_history[i]);
3901 
3902 	qdf_mem_free(soc->rx_err_ring_history);
3903 	qdf_mem_free(soc->rx_reinject_ring_history);
3904 }
3905 
3906 #else
3907 static inline void dp_soc_rx_history_attach(struct dp_soc *soc)
3908 {
3909 }
3910 
3911 static inline void dp_soc_rx_history_detach(struct dp_soc *soc)
3912 {
3913 }
3914 #endif
3915 
3916 /*
3917 * dp_pdev_attach_wifi3() - attach txrx pdev
3918 * @txrx_soc: Datapath SOC handle
3919 * @htc_handle: HTC handle for host-target interface
3920 * @qdf_osdev: QDF OS device
3921 * @pdev_id: PDEV ID
3922 *
3923 * Return: QDF_STATUS
3924 */
3925 static inline QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
3926 					      HTC_HANDLE htc_handle,
3927 					      qdf_device_t qdf_osdev,
3928 					      uint8_t pdev_id)
3929 {
3930 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3931 	struct dp_pdev *pdev = NULL;
3932 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3933 	int nss_cfg;
3934 
3935 	pdev = qdf_mem_malloc(sizeof(*pdev));
3936 	if (!pdev) {
3937 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3938 			  FL("DP PDEV memory allocation failed"));
3939 		goto fail0;
3940 	}
3941 	wlan_minidump_log(pdev, sizeof(*pdev), soc->ctrl_psoc,
3942 			  WLAN_MD_DP_PDEV, "dp_pdev");
3943 
3944 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3945 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
3946 
3947 	if (!pdev->wlan_cfg_ctx) {
3948 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3949 			  FL("pdev cfg_attach failed"));
3950 		goto fail1;
3951 	}
3952 
3953 	/*
3954 	 * set nss pdev config based on soc config
3955 	 */
3956 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
3957 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
3958 					 (nss_cfg & (1 << pdev_id)));
3959 
3960 	pdev->soc = soc;
3961 	pdev->pdev_id = pdev_id;
3962 	soc->pdev_list[pdev_id] = pdev;
3963 
3964 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
3965 	soc->pdev_count++;
3966 
3967 	/* Allocate memory for pdev srng rings */
3968 	if (dp_pdev_srng_alloc(pdev)) {
3969 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3970 			  FL("dp_pdev_srng_alloc failed"));
3971 		goto fail2;
3972 	}
3973 
3974 	/* Rx specific init */
3975 	if (dp_rx_pdev_desc_pool_alloc(pdev)) {
3976 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3977 			  FL("dp_rx_pdev_attach failed"));
3978 		goto fail3;
3979 	}
3980 
3981 	/* Rx monitor mode specific init */
3982 	if (dp_rx_pdev_mon_desc_pool_alloc(pdev)) {
3983 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3984 			  "dp_rx_pdev_mon_attach failed");
3985 		goto fail4;
3986 	}
3987 
3988 	return QDF_STATUS_SUCCESS;
3989 fail4:
3990 	dp_rx_pdev_desc_pool_free(pdev);
3991 fail3:
3992 	dp_pdev_srng_free(pdev);
3993 fail2:
3994 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
3995 fail1:
3996 	qdf_mem_free(pdev);
3997 fail0:
3998 	return QDF_STATUS_E_FAILURE;
3999 }
4000 
4001 /*
4002  * dp_rxdma_ring_cleanup() - configure the RX DMA rings
4003  * @soc: data path SoC handle
4004  * @pdev: Physical device handle
4005  *
4006  * Return: void
4007  */
4008 #ifdef QCA_HOST2FW_RXBUF_RING
4009 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
4010 {
4011 	int i;
4012 
4013 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
4014 		dp_srng_deinit(soc, &pdev->rx_mac_buf_ring[i], RXDMA_BUF, 1);
4015 		dp_srng_free(soc, &pdev->rx_mac_buf_ring[i]);
4016 	}
4017 
4018 	if (soc->reap_timer_init) {
4019 		qdf_timer_free(&soc->mon_reap_timer);
4020 		soc->reap_timer_init = 0;
4021 	}
4022 }
4023 #else
4024 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
4025 {
4026 	if (soc->lmac_timer_init) {
4027 		qdf_timer_stop(&soc->lmac_reap_timer);
4028 		qdf_timer_free(&soc->lmac_reap_timer);
4029 		soc->lmac_timer_init = 0;
4030 	}
4031 }
4032 #endif
4033 
4034 /*
4035  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
4036  * @pdev: device object
4037  *
4038  * Return: void
4039  */
4040 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
4041 {
4042 	struct dp_neighbour_peer *peer = NULL;
4043 	struct dp_neighbour_peer *temp_peer = NULL;
4044 
4045 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
4046 			   neighbour_peer_list_elem, temp_peer) {
4047 		/* delete this peer from the list */
4048 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
4049 			     peer, neighbour_peer_list_elem);
4050 		qdf_mem_free(peer);
4051 	}
4052 
4053 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
4054 }
4055 
4056 /**
4057 * dp_htt_ppdu_stats_detach() - detach stats resources
4058 * @pdev: Datapath PDEV handle
4059 *
4060 * Return: void
4061 */
4062 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
4063 {
4064 	struct ppdu_info *ppdu_info, *ppdu_info_next;
4065 
4066 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
4067 			   ppdu_info_list_elem, ppdu_info_next) {
4068 		if (!ppdu_info)
4069 			break;
4070 		qdf_assert_always(ppdu_info->nbuf);
4071 		qdf_nbuf_free(ppdu_info->nbuf);
4072 		qdf_mem_free(ppdu_info);
4073 		pdev->list_depth--;
4074 	}
4075 
4076 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->sched_comp_ppdu_list,
4077 			   ppdu_info_list_elem, ppdu_info_next) {
4078 		if (!ppdu_info)
4079 			break;
4080 		qdf_assert_always(ppdu_info->nbuf);
4081 		qdf_nbuf_free(ppdu_info->nbuf);
4082 		qdf_mem_free(ppdu_info);
4083 		pdev->sched_comp_list_depth--;
4084 	}
4085 
4086 	if (pdev->ppdu_tlv_buf)
4087 		qdf_mem_free(pdev->ppdu_tlv_buf);
4088 
4089 }
4090 
4091 #ifdef WLAN_DP_PENDING_MEM_FLUSH
4092 /**
4093  * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev
4094  * @pdev: Datapath PDEV handle
4095  *
4096  * This is the last chance to flush all pending dp vdevs/peers,
4097  * some peer/vdev leak case like Non-SSR + peer unmap missing
4098  * will be covered here.
4099  *
4100  * Return: None
4101  */
4102 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
4103 {
4104 	struct dp_vdev *vdev = NULL;
4105 
4106 	while (true) {
4107 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
4108 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4109 			if (vdev->delete.pending)
4110 				break;
4111 		}
4112 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4113 
4114 		/*
4115 		 * vdev will be freed when all peers get cleanup,
4116 		 * dp_delete_pending_vdev will remove vdev from vdev_list
4117 		 * in pdev.
4118 		 */
4119 		if (vdev)
4120 			dp_vdev_flush_peers((struct cdp_vdev *)vdev, 0);
4121 		else
4122 			break;
4123 	}
4124 }
4125 #else
4126 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
4127 {
4128 }
4129 #endif
4130 
4131 /**
4132  * dp_pdev_deinit() - Deinit txrx pdev
4133  * @txrx_pdev: Datapath PDEV handle
4134  * @force: Force deinit
4135  *
4136  * Return: None
4137  */
4138 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
4139 {
4140 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
4141 	qdf_nbuf_t curr_nbuf, next_nbuf;
4142 
4143 	if (pdev->pdev_deinit)
4144 		return;
4145 
4146 	dp_tx_me_exit(pdev);
4147 	dp_rx_fst_detach(pdev->soc, pdev);
4148 	dp_rx_pdev_mon_buffers_free(pdev);
4149 	dp_rx_pdev_buffers_free(pdev);
4150 	dp_rx_pdev_mon_desc_pool_deinit(pdev);
4151 	dp_rx_pdev_desc_pool_deinit(pdev);
4152 	dp_htt_ppdu_stats_detach(pdev);
4153 	dp_tx_ppdu_stats_detach(pdev);
4154 	qdf_event_destroy(&pdev->fw_peer_stats_event);
4155 	dp_cal_client_detach(&pdev->cal_client_ctx);
4156 	if (pdev->sojourn_buf)
4157 		qdf_nbuf_free(pdev->sojourn_buf);
4158 
4159 	dp_pdev_flush_pending_vdevs(pdev);
4160 	dp_tx_pdev_detach(pdev);
4161 	dp_pktlogmod_exit(pdev);
4162 	dp_neighbour_peers_detach(pdev);
4163 
4164 	qdf_spinlock_destroy(&pdev->tx_mutex);
4165 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
4166 
4167 	if (pdev->invalid_peer)
4168 		qdf_mem_free(pdev->invalid_peer);
4169 
4170 	if (pdev->filter)
4171 		dp_mon_filter_dealloc(pdev);
4172 
4173 	dp_pdev_srng_deinit(pdev);
4174 
4175 	dp_ipa_uc_detach(pdev->soc, pdev);
4176 	dp_cleanup_ipa_rx_refill_buf_ring(pdev->soc, pdev);
4177 	dp_rxdma_ring_cleanup(pdev->soc, pdev);
4178 
4179 	curr_nbuf = pdev->invalid_peer_head_msdu;
4180 	while (curr_nbuf) {
4181 		next_nbuf = qdf_nbuf_next(curr_nbuf);
4182 		qdf_nbuf_free(curr_nbuf);
4183 		curr_nbuf = next_nbuf;
4184 	}
4185 	pdev->invalid_peer_head_msdu = NULL;
4186 	pdev->invalid_peer_tail_msdu = NULL;
4187 
4188 	dp_wdi_event_detach(pdev);
4189 	pdev->pdev_deinit = 1;
4190 }
4191 
4192 /**
4193  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
4194  * @psoc: Datapath psoc handle
4195  * @pdev_id: Id of datapath PDEV handle
4196  * @force: Force deinit
4197  *
4198  * Return: QDF_STATUS
4199  */
4200 static QDF_STATUS
4201 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
4202 		     int force)
4203 {
4204 	struct dp_pdev *txrx_pdev;
4205 
4206 	txrx_pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
4207 						       pdev_id);
4208 
4209 	if (!txrx_pdev)
4210 		return QDF_STATUS_E_FAILURE;
4211 
4212 	dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force);
4213 
4214 	return QDF_STATUS_SUCCESS;
4215 }
4216 
4217 /*
4218  * dp_pdev_post_attach() - Do post pdev attach after dev_alloc_name
4219  * @txrx_pdev: Datapath PDEV handle
4220  *
4221  * Return: None
4222  */
4223 static void dp_pdev_post_attach(struct cdp_pdev *txrx_pdev)
4224 {
4225 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
4226 
4227 	dp_tx_capture_debugfs_init(pdev);
4228 }
4229 
4230 /*
4231  * dp_pdev_post_attach_wifi3() - attach txrx pdev post
4232  * @psoc: Datapath soc handle
4233  * @pdev_id: pdev id of pdev
4234  *
4235  * Return: QDF_STATUS
4236  */
4237 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *soc,
4238 				     uint8_t pdev_id)
4239 {
4240 	struct dp_pdev *pdev;
4241 
4242 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
4243 						  pdev_id);
4244 
4245 	if (!pdev) {
4246 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4247 			  FL("DP PDEV is Null for pdev id %d"), pdev_id);
4248 		return QDF_STATUS_E_FAILURE;
4249 	}
4250 
4251 	dp_pdev_post_attach((struct cdp_pdev *)pdev);
4252 	return QDF_STATUS_SUCCESS;
4253 }
4254 
4255 /*
4256  * dp_pdev_detach() - Complete rest of pdev detach
4257  * @txrx_pdev: Datapath PDEV handle
4258  * @force: Force deinit
4259  *
4260  * Return: None
4261  */
4262 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
4263 {
4264 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
4265 	struct dp_soc *soc = pdev->soc;
4266 
4267 	dp_rx_pdev_mon_desc_pool_free(pdev);
4268 	dp_rx_pdev_desc_pool_free(pdev);
4269 	dp_pdev_srng_free(pdev);
4270 
4271 	soc->pdev_count--;
4272 	soc->pdev_list[pdev->pdev_id] = NULL;
4273 
4274 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
4275 	wlan_minidump_remove(pdev);
4276 	qdf_mem_free(pdev);
4277 }
4278 
4279 /*
4280  * dp_pdev_detach_wifi3() - detach txrx pdev
4281  * @psoc: Datapath soc handle
4282  * @pdev_id: pdev id of pdev
4283  * @force: Force detach
4284  *
4285  * Return: QDF_STATUS
4286  */
4287 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
4288 				       int force)
4289 {
4290 	struct dp_pdev *pdev;
4291 
4292 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
4293 						  pdev_id);
4294 
4295 	if (!pdev) {
4296 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4297 			  FL("DP PDEV is Null for pdev id %d"), pdev_id);
4298 		return QDF_STATUS_E_FAILURE;
4299 	}
4300 
4301 	dp_pdev_detach((struct cdp_pdev *)pdev, force);
4302 	return QDF_STATUS_SUCCESS;
4303 }
4304 
4305 /*
4306  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
4307  * @soc: DP SOC handle
4308  */
4309 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
4310 {
4311 	struct reo_desc_list_node *desc;
4312 	struct dp_rx_tid *rx_tid;
4313 
4314 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
4315 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
4316 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
4317 		rx_tid = &desc->rx_tid;
4318 		qdf_mem_unmap_nbytes_single(soc->osdev,
4319 			rx_tid->hw_qdesc_paddr,
4320 			QDF_DMA_BIDIRECTIONAL,
4321 			rx_tid->hw_qdesc_alloc_size);
4322 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
4323 		qdf_mem_free(desc);
4324 	}
4325 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
4326 	qdf_list_destroy(&soc->reo_desc_freelist);
4327 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
4328 }
4329 
4330 /*
4331  * dp_soc_reset_txrx_ring_map() - reset tx ring map
4332  * @soc: DP SOC handle
4333  *
4334  */
4335 static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc)
4336 {
4337 	uint32_t i;
4338 
4339 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++)
4340 		soc->tx_ring_map[i] = 0;
4341 }
4342 
4343 /*
4344  * dp_soc_print_inactive_objects() - prints inactive peer and vdev list
4345  * @soc: DP SOC handle
4346  *
4347  */
4348 static void dp_soc_print_inactive_objects(struct dp_soc *soc)
4349 {
4350 	struct dp_peer *peer = NULL;
4351 	struct dp_peer *tmp_peer = NULL;
4352 	struct dp_vdev *vdev = NULL;
4353 	struct dp_vdev *tmp_vdev = NULL;
4354 	int i = 0;
4355 	uint32_t count;
4356 
4357 	if (TAILQ_EMPTY(&soc->inactive_peer_list) &&
4358 	    TAILQ_EMPTY(&soc->inactive_vdev_list))
4359 		return;
4360 
4361 	TAILQ_FOREACH_SAFE(peer, &soc->inactive_peer_list,
4362 			   inactive_list_elem, tmp_peer) {
4363 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
4364 			count = qdf_atomic_read(&peer->mod_refs[i]);
4365 			if (count)
4366 				DP_PRINT_STATS("peer %pK Module id %u ==> %u",
4367 					       peer, i, count);
4368 		}
4369 	}
4370 
4371 	TAILQ_FOREACH_SAFE(vdev, &soc->inactive_vdev_list,
4372 			   inactive_list_elem, tmp_vdev) {
4373 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
4374 			count = qdf_atomic_read(&vdev->mod_refs[i]);
4375 			if (count)
4376 				DP_PRINT_STATS("vdev %pK Module id %u ==> %u",
4377 					       vdev, i, count);
4378 		}
4379 	}
4380 	QDF_BUG(0);
4381 }
4382 
4383 /**
4384  * dp_soc_deinit() - Deinitialize txrx SOC
4385  * @txrx_soc: Opaque DP SOC handle
4386  *
4387  * Return: None
4388  */
4389 static void dp_soc_deinit(void *txrx_soc)
4390 {
4391 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4392 	struct htt_soc *htt_soc = soc->htt_handle;
4393 
4394 	qdf_atomic_set(&soc->cmn_init_done, 0);
4395 
4396 	/* free peer tables & AST tables allocated during peer_map_attach */
4397 	if (soc->peer_map_attach_success) {
4398 		dp_peer_find_detach(soc);
4399 		soc->peer_map_attach_success = FALSE;
4400 	}
4401 
4402 	qdf_flush_work(&soc->htt_stats.work);
4403 	qdf_disable_work(&soc->htt_stats.work);
4404 
4405 	qdf_spinlock_destroy(&soc->htt_stats.lock);
4406 
4407 	dp_soc_reset_txrx_ring_map(soc);
4408 
4409 	dp_reo_desc_freelist_destroy(soc);
4410 
4411 	DEINIT_RX_HW_STATS_LOCK(soc);
4412 
4413 	qdf_spinlock_destroy(&soc->ast_lock);
4414 
4415 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
4416 
4417 	dp_soc_wds_detach(soc);
4418 
4419 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
4420 
4421 	qdf_spinlock_destroy(&soc->vdev_map_lock);
4422 
4423 	dp_reo_cmdlist_destroy(soc);
4424 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
4425 
4426 	dp_soc_tx_desc_sw_pools_deinit(soc);
4427 
4428 	dp_soc_srng_deinit(soc);
4429 
4430 	dp_hw_link_desc_ring_deinit(soc);
4431 
4432 	dp_soc_print_inactive_objects(soc);
4433 	qdf_spinlock_destroy(&soc->inactive_peer_list_lock);
4434 	qdf_spinlock_destroy(&soc->inactive_vdev_list_lock);
4435 
4436 	htt_soc_htc_dealloc(soc->htt_handle);
4437 
4438 	htt_soc_detach(htt_soc);
4439 
4440 	/* Free wbm sg list and reset flags in down path */
4441 	dp_rx_wbm_sg_list_deinit(soc);
4442 
4443 	wlan_minidump_remove(soc);
4444 }
4445 
4446 /**
4447  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
4448  * @txrx_soc: Opaque DP SOC handle
4449  *
4450  * Return: None
4451  */
4452 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc)
4453 {
4454 	dp_soc_deinit(txrx_soc);
4455 }
4456 
4457 /*
4458  * dp_soc_detach() - Detach rest of txrx SOC
4459  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
4460  *
4461  * Return: None
4462  */
4463 static void dp_soc_detach(struct cdp_soc_t *txrx_soc)
4464 {
4465 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4466 
4467 	dp_soc_tx_desc_sw_pools_free(soc);
4468 	dp_soc_srng_free(soc);
4469 	dp_hw_link_desc_ring_free(soc);
4470 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
4471 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
4472 	dp_soc_rx_history_detach(soc);
4473 
4474 	qdf_mem_free(soc);
4475 }
4476 
4477 /*
4478  * dp_soc_detach_wifi3() - Detach txrx SOC
4479  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
4480  *
4481  * Return: None
4482  */
4483 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc)
4484 {
4485 	dp_soc_detach(txrx_soc);
4486 }
4487 
4488 #if !defined(DISABLE_MON_CONFIG)
4489 /**
4490  * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings
4491  * @soc: soc handle
4492  * @pdev: physical device handle
4493  * @mac_id: ring number
4494  * @mac_for_pdev: mac_id
4495  *
4496  * Return: non-zero for failure, zero for success
4497  */
4498 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
4499 					struct dp_pdev *pdev,
4500 					int mac_id,
4501 					int mac_for_pdev)
4502 {
4503 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4504 
4505 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
4506 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4507 					soc->rxdma_mon_buf_ring[mac_id]
4508 					.hal_srng,
4509 					RXDMA_MONITOR_BUF);
4510 
4511 		if (status != QDF_STATUS_SUCCESS) {
4512 			dp_err("Failed to send htt srng setup message for Rxdma mon buf ring");
4513 			return status;
4514 		}
4515 
4516 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4517 					soc->rxdma_mon_dst_ring[mac_id]
4518 					.hal_srng,
4519 					RXDMA_MONITOR_DST);
4520 
4521 		if (status != QDF_STATUS_SUCCESS) {
4522 			dp_err("Failed to send htt srng setup message for Rxdma mon dst ring");
4523 			return status;
4524 		}
4525 
4526 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4527 					soc->rxdma_mon_status_ring[mac_id]
4528 					.hal_srng,
4529 					RXDMA_MONITOR_STATUS);
4530 
4531 		if (status != QDF_STATUS_SUCCESS) {
4532 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
4533 			return status;
4534 		}
4535 
4536 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4537 				soc->rxdma_mon_desc_ring[mac_id]
4538 					.hal_srng,
4539 					RXDMA_MONITOR_DESC);
4540 
4541 		if (status != QDF_STATUS_SUCCESS) {
4542 			dp_err("Failed to send htt srng message for Rxdma mon desc ring");
4543 			return status;
4544 		}
4545 	} else {
4546 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4547 					soc->rxdma_mon_status_ring[mac_id]
4548 					.hal_srng,
4549 					RXDMA_MONITOR_STATUS);
4550 
4551 		if (status != QDF_STATUS_SUCCESS) {
4552 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
4553 			return status;
4554 		}
4555 	}
4556 
4557 	return status;
4558 
4559 }
4560 #else
4561 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
4562 					struct dp_pdev *pdev,
4563 					int mac_id,
4564 					int mac_for_pdev)
4565 {
4566 	return QDF_STATUS_SUCCESS;
4567 }
4568 #endif
4569 
4570 /*
4571  * dp_rxdma_ring_config() - configure the RX DMA rings
4572  *
4573  * This function is used to configure the MAC rings.
4574  * On MCL host provides buffers in Host2FW ring
4575  * FW refills (copies) buffers to the ring and updates
4576  * ring_idx in register
4577  *
4578  * @soc: data path SoC handle
4579  *
4580  * Return: zero on success, non-zero on failure
4581  */
4582 #ifdef QCA_HOST2FW_RXBUF_RING
4583 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
4584 {
4585 	int i;
4586 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4587 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4588 		struct dp_pdev *pdev = soc->pdev_list[i];
4589 
4590 		if (pdev) {
4591 			int mac_id;
4592 			bool dbs_enable = 0;
4593 			int max_mac_rings =
4594 				 wlan_cfg_get_num_mac_rings
4595 				(pdev->wlan_cfg_ctx);
4596 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
4597 
4598 			htt_srng_setup(soc->htt_handle, 0,
4599 				       soc->rx_refill_buf_ring[lmac_id]
4600 				       .hal_srng,
4601 				       RXDMA_BUF);
4602 
4603 			if (pdev->rx_refill_buf_ring2.hal_srng)
4604 				htt_srng_setup(soc->htt_handle, 0,
4605 					pdev->rx_refill_buf_ring2.hal_srng,
4606 					RXDMA_BUF);
4607 
4608 			if (soc->cdp_soc.ol_ops->
4609 				is_hw_dbs_2x2_capable) {
4610 				dbs_enable = soc->cdp_soc.ol_ops->
4611 					is_hw_dbs_2x2_capable(
4612 							(void *)soc->ctrl_psoc);
4613 			}
4614 
4615 			if (dbs_enable) {
4616 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4617 				QDF_TRACE_LEVEL_ERROR,
4618 				FL("DBS enabled max_mac_rings %d"),
4619 					 max_mac_rings);
4620 			} else {
4621 				max_mac_rings = 1;
4622 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4623 					 QDF_TRACE_LEVEL_ERROR,
4624 					 FL("DBS disabled, max_mac_rings %d"),
4625 					 max_mac_rings);
4626 			}
4627 
4628 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4629 					 FL("pdev_id %d max_mac_rings %d"),
4630 					 pdev->pdev_id, max_mac_rings);
4631 
4632 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
4633 				int mac_for_pdev =
4634 					dp_get_mac_id_for_pdev(mac_id,
4635 							       pdev->pdev_id);
4636 				/*
4637 				 * Obtain lmac id from pdev to access the LMAC
4638 				 * ring in soc context
4639 				 */
4640 				lmac_id =
4641 				dp_get_lmac_id_for_pdev_id(soc,
4642 							   mac_id,
4643 							   pdev->pdev_id);
4644 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4645 					 QDF_TRACE_LEVEL_ERROR,
4646 					 FL("mac_id %d"), mac_for_pdev);
4647 
4648 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
4649 					 pdev->rx_mac_buf_ring[mac_id]
4650 						.hal_srng,
4651 					 RXDMA_BUF);
4652 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
4653 				soc->rxdma_err_dst_ring[lmac_id]
4654 					.hal_srng,
4655 					RXDMA_DST);
4656 
4657 				/* Configure monitor mode rings */
4658 				status = dp_mon_htt_srng_setup(soc, pdev,
4659 							       lmac_id,
4660 							       mac_for_pdev);
4661 				if (status != QDF_STATUS_SUCCESS) {
4662 					dp_err("Failed to send htt monitor messages to target");
4663 					return status;
4664 				}
4665 
4666 			}
4667 		}
4668 	}
4669 
4670 	/*
4671 	 * Timer to reap rxdma status rings.
4672 	 * Needed until we enable ppdu end interrupts
4673 	 */
4674 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
4675 		       dp_mon_reap_timer_handler, (void *)soc,
4676 		       QDF_TIMER_TYPE_WAKE_APPS);
4677 	soc->reap_timer_init = 1;
4678 	return status;
4679 }
4680 #else
4681 /* This is only for WIN */
4682 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
4683 {
4684 	int i;
4685 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4686 	int mac_for_pdev;
4687 	int lmac_id;
4688 
4689 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4690 		struct dp_pdev *pdev =  soc->pdev_list[i];
4691 
4692 		if (!pdev)
4693 			continue;
4694 
4695 		mac_for_pdev = i;
4696 		lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
4697 
4698 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4699 			       soc->rx_refill_buf_ring[lmac_id].
4700 			       hal_srng, RXDMA_BUF);
4701 #ifndef DISABLE_MON_CONFIG
4702 
4703 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4704 			       soc->rxdma_mon_buf_ring[lmac_id].hal_srng,
4705 			       RXDMA_MONITOR_BUF);
4706 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4707 			       soc->rxdma_mon_dst_ring[lmac_id].hal_srng,
4708 			       RXDMA_MONITOR_DST);
4709 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4710 			       soc->rxdma_mon_status_ring[lmac_id].hal_srng,
4711 			       RXDMA_MONITOR_STATUS);
4712 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4713 			       soc->rxdma_mon_desc_ring[lmac_id].hal_srng,
4714 			       RXDMA_MONITOR_DESC);
4715 #endif
4716 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4717 			       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
4718 			       RXDMA_DST);
4719 	}
4720 
4721 	/* Configure LMAC rings in Polled mode */
4722 	if (soc->lmac_polled_mode) {
4723 		/*
4724 		 * Timer to reap lmac rings.
4725 		 */
4726 		qdf_timer_init(soc->osdev, &soc->lmac_reap_timer,
4727 			       dp_service_lmac_rings, (void *)soc,
4728 			       QDF_TIMER_TYPE_WAKE_APPS);
4729 		soc->lmac_timer_init = 1;
4730 		qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
4731 	}
4732 	return status;
4733 }
4734 #endif
4735 
4736 #ifdef NO_RX_PKT_HDR_TLV
4737 static QDF_STATUS
4738 dp_rxdma_ring_sel_cfg(struct dp_soc *soc)
4739 {
4740 	int i;
4741 	int mac_id;
4742 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
4743 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4744 
4745 	htt_tlv_filter.mpdu_start = 1;
4746 	htt_tlv_filter.msdu_start = 1;
4747 	htt_tlv_filter.mpdu_end = 1;
4748 	htt_tlv_filter.msdu_end = 1;
4749 	htt_tlv_filter.attention = 1;
4750 	htt_tlv_filter.packet = 1;
4751 	htt_tlv_filter.packet_header = 0;
4752 
4753 	htt_tlv_filter.ppdu_start = 0;
4754 	htt_tlv_filter.ppdu_end = 0;
4755 	htt_tlv_filter.ppdu_end_user_stats = 0;
4756 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4757 	htt_tlv_filter.ppdu_end_status_done = 0;
4758 	htt_tlv_filter.enable_fp = 1;
4759 	htt_tlv_filter.enable_md = 0;
4760 	htt_tlv_filter.enable_md = 0;
4761 	htt_tlv_filter.enable_mo = 0;
4762 
4763 	htt_tlv_filter.fp_mgmt_filter = 0;
4764 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
4765 	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
4766 					 FILTER_DATA_MCAST |
4767 					 FILTER_DATA_DATA);
4768 	htt_tlv_filter.mo_mgmt_filter = 0;
4769 	htt_tlv_filter.mo_ctrl_filter = 0;
4770 	htt_tlv_filter.mo_data_filter = 0;
4771 	htt_tlv_filter.md_data_filter = 0;
4772 
4773 	htt_tlv_filter.offset_valid = true;
4774 
4775 	htt_tlv_filter.rx_packet_offset = RX_PKT_TLVS_LEN;
4776 	/*Not subscribing rx_pkt_header*/
4777 	htt_tlv_filter.rx_header_offset = 0;
4778 	htt_tlv_filter.rx_mpdu_start_offset =
4779 				hal_rx_mpdu_start_offset_get(soc->hal_soc);
4780 	htt_tlv_filter.rx_mpdu_end_offset =
4781 				hal_rx_mpdu_end_offset_get(soc->hal_soc);
4782 	htt_tlv_filter.rx_msdu_start_offset =
4783 				hal_rx_msdu_start_offset_get(soc->hal_soc);
4784 	htt_tlv_filter.rx_msdu_end_offset =
4785 				hal_rx_msdu_end_offset_get(soc->hal_soc);
4786 	htt_tlv_filter.rx_attn_offset =
4787 				hal_rx_attn_offset_get(soc->hal_soc);
4788 
4789 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4790 		struct dp_pdev *pdev = soc->pdev_list[i];
4791 
4792 		if (!pdev)
4793 			continue;
4794 
4795 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4796 			int mac_for_pdev =
4797 				dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
4798 			/*
4799 			 * Obtain lmac id from pdev to access the LMAC ring
4800 			 * in soc context
4801 			 */
4802 			int lmac_id =
4803 				dp_get_lmac_id_for_pdev_id(soc, mac_id,
4804 							   pdev->pdev_id);
4805 
4806 			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4807 					    soc->rx_refill_buf_ring[lmac_id].
4808 					    hal_srng,
4809 					    RXDMA_BUF, RX_DATA_BUFFER_SIZE,
4810 					    &htt_tlv_filter);
4811 		}
4812 	}
4813 	return status;
4814 }
4815 #else
4816 static QDF_STATUS
4817 dp_rxdma_ring_sel_cfg(struct dp_soc *soc)
4818 {
4819 	return QDF_STATUS_SUCCESS;
4820 }
4821 #endif
4822 
4823 /*
4824  * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine
4825  *
4826  * This function is used to configure the FSE HW block in RX OLE on a
4827  * per pdev basis. Here, we will be programming parameters related to
4828  * the Flow Search Table.
4829  *
4830  * @soc: data path SoC handle
4831  *
4832  * Return: zero on success, non-zero on failure
4833  */
4834 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
4835 static QDF_STATUS
4836 dp_rx_target_fst_config(struct dp_soc *soc)
4837 {
4838 	int i;
4839 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4840 
4841 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4842 		struct dp_pdev *pdev = soc->pdev_list[i];
4843 
4844 		/* Flow search is not enabled if NSS offload is enabled */
4845 		if (pdev &&
4846 		    !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
4847 			status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev);
4848 			if (status != QDF_STATUS_SUCCESS)
4849 				break;
4850 		}
4851 	}
4852 	return status;
4853 }
4854 #elif defined(WLAN_SUPPORT_RX_FISA)
4855 /**
4856  * dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW
4857  * @soc: SoC handle
4858  *
4859  * Return: Success
4860  */
4861 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
4862 {
4863 	/* Check if it is enabled in the INI */
4864 	if (!soc->fisa_enable) {
4865 		dp_err("RX FISA feature is disabled");
4866 		return QDF_STATUS_E_NOSUPPORT;
4867 	}
4868 
4869 	return dp_rx_flow_send_fst_fw_setup(soc, soc->pdev_list[0]);
4870 }
4871 
4872 #define FISA_MAX_TIMEOUT 0xffffffff
4873 #define FISA_DISABLE_TIMEOUT 0
4874 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
4875 {
4876 	struct dp_htt_rx_fisa_cfg fisa_config;
4877 
4878 	fisa_config.pdev_id = 0;
4879 	fisa_config.fisa_timeout = FISA_MAX_TIMEOUT;
4880 
4881 	return dp_htt_rx_fisa_config(soc->pdev_list[0], &fisa_config);
4882 }
4883 #else /* !WLAN_SUPPORT_RX_FISA */
4884 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
4885 {
4886 	return QDF_STATUS_SUCCESS;
4887 }
4888 #endif /* !WLAN_SUPPORT_RX_FISA */
4889 
4890 #ifndef WLAN_SUPPORT_RX_FISA
4891 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
4892 {
4893 	return QDF_STATUS_SUCCESS;
4894 }
4895 
4896 static QDF_STATUS dp_rx_dump_fisa_stats(struct dp_soc *soc)
4897 {
4898 	return QDF_STATUS_SUCCESS;
4899 }
4900 
4901 static void dp_rx_dump_fisa_table(struct dp_soc *soc)
4902 {
4903 }
4904 #endif /* !WLAN_SUPPORT_RX_FISA */
4905 
4906 /*
4907  * dp_soc_attach_target_wifi3() - SOC initialization in the target
4908  * @cdp_soc: Opaque Datapath SOC handle
4909  *
4910  * Return: zero on success, non-zero on failure
4911  */
4912 static QDF_STATUS
4913 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
4914 {
4915 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
4916 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4917 
4918 	htt_soc_attach_target(soc->htt_handle);
4919 
4920 	status = dp_rxdma_ring_config(soc);
4921 	if (status != QDF_STATUS_SUCCESS) {
4922 		dp_err("Failed to send htt srng setup messages to target");
4923 		return status;
4924 	}
4925 
4926 	status = dp_rxdma_ring_sel_cfg(soc);
4927 	if (status != QDF_STATUS_SUCCESS) {
4928 		dp_err("Failed to send htt ring config message to target");
4929 		return status;
4930 	}
4931 
4932 	status = dp_rx_target_fst_config(soc);
4933 	if (status != QDF_STATUS_SUCCESS &&
4934 	    status != QDF_STATUS_E_NOSUPPORT) {
4935 		dp_err("Failed to send htt fst setup config message to target");
4936 		return status;
4937 	}
4938 
4939 	if (status == QDF_STATUS_SUCCESS) {
4940 		status = dp_rx_fisa_config(soc);
4941 		if (status != QDF_STATUS_SUCCESS) {
4942 			dp_err("Failed to send htt FISA config message to target");
4943 			return status;
4944 		}
4945 	}
4946 
4947 	DP_STATS_INIT(soc);
4948 
4949 	/* initialize work queue for stats processing */
4950 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
4951 
4952 	return QDF_STATUS_SUCCESS;
4953 }
4954 
4955 #ifdef QCA_SUPPORT_FULL_MON
4956 static inline QDF_STATUS
4957 dp_soc_config_full_mon_mode(struct dp_pdev *pdev, enum dp_full_mon_config val)
4958 {
4959 	struct dp_soc *soc = pdev->soc;
4960 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4961 
4962 	if (!soc->full_mon_mode)
4963 		return QDF_STATUS_SUCCESS;
4964 
4965 	if ((htt_h2t_full_mon_cfg(soc->htt_handle,
4966 				  pdev->pdev_id,
4967 				  val)) != QDF_STATUS_SUCCESS) {
4968 		status = QDF_STATUS_E_FAILURE;
4969 	}
4970 
4971 	return status;
4972 }
4973 #else
4974 static inline QDF_STATUS
4975 dp_soc_config_full_mon_mode(struct dp_pdev *pdev, enum dp_full_mon_config val)
4976 {
4977 	return 0;
4978 }
4979 #endif
4980 
4981 /*
4982  * dp_vdev_id_map_tbl_add() - Add vdev into vdev_id table
4983  * @soc: SoC handle
4984  * @vdev: vdev handle
4985  * @vdev_id: vdev_id
4986  *
4987  * Return: None
4988  */
4989 static void dp_vdev_id_map_tbl_add(struct dp_soc *soc,
4990 				   struct dp_vdev *vdev,
4991 				   uint8_t vdev_id)
4992 {
4993 	QDF_ASSERT(vdev_id <= MAX_VDEV_CNT);
4994 
4995 	qdf_spin_lock_bh(&soc->vdev_map_lock);
4996 
4997 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
4998 			QDF_STATUS_SUCCESS) {
4999 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5000 			  "unable to get vdev reference at MAP vdev %pK vdev_id %u",
5001 			  vdev, vdev_id);
5002 		qdf_spin_unlock_bh(&soc->vdev_map_lock);
5003 		return;
5004 	}
5005 
5006 	if (!soc->vdev_id_map[vdev_id])
5007 		soc->vdev_id_map[vdev_id] = vdev;
5008 	else
5009 		QDF_ASSERT(0);
5010 
5011 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
5012 }
5013 
5014 /*
5015  * dp_vdev_id_map_tbl_remove() - remove vdev from vdev_id table
5016  * @soc: SoC handle
5017  * @vdev: vdev handle
5018  *
5019  * Return: None
5020  */
5021 static void dp_vdev_id_map_tbl_remove(struct dp_soc *soc,
5022 				      struct dp_vdev *vdev)
5023 {
5024 	qdf_spin_lock_bh(&soc->vdev_map_lock);
5025 	QDF_ASSERT(soc->vdev_id_map[vdev->vdev_id] == vdev);
5026 
5027 	soc->vdev_id_map[vdev->vdev_id] = NULL;
5028 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
5029 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
5030 }
5031 
5032 /*
5033  * dp_vdev_pdev_list_add() - add vdev into pdev's list
5034  * @soc: soc handle
5035  * @pdev: pdev handle
5036  * @vdev: vdev handle
5037  *
5038  * return: none
5039  */
5040 static void dp_vdev_pdev_list_add(struct dp_soc *soc,
5041 				  struct dp_pdev *pdev,
5042 				  struct dp_vdev *vdev)
5043 {
5044 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5045 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
5046 			QDF_STATUS_SUCCESS) {
5047 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5048 			  "unable to get vdev reference at MAP vdev %pK",
5049 			  vdev);
5050 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5051 		return;
5052 	}
5053 	/* add this vdev into the pdev's list */
5054 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
5055 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5056 }
5057 
5058 /*
5059  * dp_vdev_pdev_list_remove() - remove vdev from pdev's list
5060  * @soc: SoC handle
5061  * @pdev: pdev handle
5062  * @vdev: VDEV handle
5063  *
5064  * Return: none
5065  */
5066 static void dp_vdev_pdev_list_remove(struct dp_soc *soc,
5067 				     struct dp_pdev *pdev,
5068 				     struct dp_vdev *vdev)
5069 {
5070 	uint8_t found = 0;
5071 	struct dp_vdev *tmpvdev = NULL;
5072 
5073 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5074 	TAILQ_FOREACH(tmpvdev, &pdev->vdev_list, vdev_list_elem) {
5075 		if (tmpvdev == vdev) {
5076 			found = 1;
5077 			break;
5078 		}
5079 	}
5080 
5081 	if (found) {
5082 		TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
5083 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
5084 	} else {
5085 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
5086 			  "vdev:%pK not found in pdev:%pK vdevlist:%pK",
5087 			  vdev, pdev, &pdev->vdev_list);
5088 		QDF_ASSERT(0);
5089 	}
5090 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5091 }
5092 
5093 /*
5094 * dp_vdev_attach_wifi3() - attach txrx vdev
5095 * @txrx_pdev: Datapath PDEV handle
5096 * @vdev_mac_addr: MAC address of the virtual interface
5097 * @vdev_id: VDEV Id
5098 * @wlan_op_mode: VDEV operating mode
5099 * @subtype: VDEV operating subtype
5100 *
5101 * Return: status
5102 */
5103 static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc,
5104 				       uint8_t pdev_id,
5105 				       uint8_t *vdev_mac_addr,
5106 				       uint8_t vdev_id,
5107 				       enum wlan_op_mode op_mode,
5108 				       enum wlan_op_subtype subtype)
5109 {
5110 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
5111 	struct dp_pdev *pdev =
5112 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
5113 						   pdev_id);
5114 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
5115 	int i = 0;
5116 
5117 	if (!pdev) {
5118 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5119 			  FL("DP PDEV is Null for pdev id %d"), pdev_id);
5120 		qdf_mem_free(vdev);
5121 		goto fail0;
5122 	}
5123 
5124 	if (!vdev) {
5125 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5126 			FL("DP VDEV memory allocation failed"));
5127 		goto fail0;
5128 	}
5129 
5130 	wlan_minidump_log(vdev, sizeof(*vdev), soc->ctrl_psoc,
5131 			  WLAN_MD_DP_VDEV, "dp_vdev");
5132 
5133 	vdev->pdev = pdev;
5134 	vdev->vdev_id = vdev_id;
5135 	vdev->opmode = op_mode;
5136 	vdev->subtype = subtype;
5137 	vdev->osdev = soc->osdev;
5138 
5139 	vdev->osif_rx = NULL;
5140 	vdev->osif_rsim_rx_decap = NULL;
5141 	vdev->osif_get_key = NULL;
5142 	vdev->osif_rx_mon = NULL;
5143 	vdev->osif_tx_free_ext = NULL;
5144 	vdev->osif_vdev = NULL;
5145 
5146 	vdev->delete.pending = 0;
5147 	vdev->safemode = 0;
5148 	vdev->drop_unenc = 1;
5149 	vdev->sec_type = cdp_sec_type_none;
5150 	vdev->multipass_en = false;
5151 	qdf_atomic_init(&vdev->ref_cnt);
5152 	for (i = 0; i < DP_MOD_ID_MAX; i++)
5153 		qdf_atomic_init(&vdev->mod_refs[i]);
5154 
5155 	/* Take one reference for create*/
5156 	qdf_atomic_inc(&vdev->ref_cnt);
5157 	qdf_atomic_inc(&vdev->mod_refs[DP_MOD_ID_CONFIG]);
5158 	vdev->num_peers = 0;
5159 #ifdef notyet
5160 	vdev->filters_num = 0;
5161 #endif
5162 	vdev->lmac_id = pdev->lmac_id;
5163 
5164 	qdf_mem_copy(
5165 		&vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
5166 
5167 	/* TODO: Initialize default HTT meta data that will be used in
5168 	 * TCL descriptors for packets transmitted from this VDEV
5169 	 */
5170 
5171 	qdf_spinlock_create(&vdev->peer_list_lock);
5172 	TAILQ_INIT(&vdev->peer_list);
5173 	dp_peer_multipass_list_init(vdev);
5174 
5175 	if ((soc->intr_mode == DP_INTR_POLL) &&
5176 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
5177 		if ((pdev->vdev_count == 0) ||
5178 		    (wlan_op_mode_monitor == vdev->opmode))
5179 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
5180 	}
5181 
5182 	dp_vdev_id_map_tbl_add(soc, vdev, vdev_id);
5183 
5184 	if (wlan_op_mode_monitor == vdev->opmode) {
5185 		pdev->monitor_vdev = vdev;
5186 		return QDF_STATUS_SUCCESS;
5187 	}
5188 
5189 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
5190 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
5191 	vdev->dscp_tid_map_id = 0;
5192 	vdev->mcast_enhancement_en = 0;
5193 	vdev->igmp_mcast_enhanc_en = 0;
5194 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
5195 	vdev->prev_tx_enq_tstamp = 0;
5196 	vdev->prev_rx_deliver_tstamp = 0;
5197 
5198 	dp_vdev_pdev_list_add(soc, pdev, vdev);
5199 	pdev->vdev_count++;
5200 
5201 	if (wlan_op_mode_sta != vdev->opmode)
5202 		vdev->ap_bridge_enabled = true;
5203 	else
5204 		vdev->ap_bridge_enabled = false;
5205 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5206 		  "%s: wlan_cfg_ap_bridge_enabled %d",
5207 		  __func__, vdev->ap_bridge_enabled);
5208 
5209 	dp_tx_vdev_attach(vdev);
5210 
5211 	if (pdev->vdev_count == 1)
5212 		dp_lro_hash_setup(soc, pdev);
5213 
5214 	dp_info("Created vdev %pK ("QDF_MAC_ADDR_FMT")", vdev,
5215 		QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
5216 	DP_STATS_INIT(vdev);
5217 
5218 	if (wlan_op_mode_sta == vdev->opmode)
5219 		dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id,
5220 				     vdev->mac_addr.raw);
5221 	return QDF_STATUS_SUCCESS;
5222 
5223 fail0:
5224 	return QDF_STATUS_E_FAILURE;
5225 }
5226 
5227 /**
5228  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
5229  * @soc: Datapath soc handle
5230  * @vdev_id: id of Datapath VDEV handle
5231  * @osif_vdev: OSIF vdev handle
5232  * @txrx_ops: Tx and Rx operations
5233  *
5234  * Return: DP VDEV handle on success, NULL on failure
5235  */
5236 static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc_hdl,
5237 					 uint8_t vdev_id,
5238 					 ol_osif_vdev_handle osif_vdev,
5239 					 struct ol_txrx_ops *txrx_ops)
5240 {
5241 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5242 	struct dp_vdev *vdev =	dp_vdev_get_ref_by_id(soc, vdev_id,
5243 						      DP_MOD_ID_CDP);
5244 
5245 	if (!vdev)
5246 		return QDF_STATUS_E_FAILURE;
5247 
5248 	vdev->osif_vdev = osif_vdev;
5249 	vdev->osif_rx = txrx_ops->rx.rx;
5250 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
5251 	vdev->osif_rx_flush = txrx_ops->rx.rx_flush;
5252 	vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush;
5253 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
5254 	vdev->osif_fisa_rx = txrx_ops->rx.osif_fisa_rx;
5255 	vdev->osif_fisa_flush = txrx_ops->rx.osif_fisa_flush;
5256 	vdev->osif_get_key = txrx_ops->get_key;
5257 	vdev->osif_rx_mon = txrx_ops->rx.mon;
5258 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
5259 	vdev->tx_comp = txrx_ops->tx.tx_comp;
5260 	vdev->stats_cb = txrx_ops->rx.stats_rx;
5261 #ifdef notyet
5262 #if ATH_SUPPORT_WAPI
5263 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
5264 #endif
5265 #endif
5266 #ifdef UMAC_SUPPORT_PROXY_ARP
5267 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
5268 #endif
5269 	vdev->me_convert = txrx_ops->me_convert;
5270 
5271 	/* TODO: Enable the following once Tx code is integrated */
5272 	if (vdev->mesh_vdev)
5273 		txrx_ops->tx.tx = dp_tx_send_mesh;
5274 	else
5275 		txrx_ops->tx.tx = dp_tx_send;
5276 
5277 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
5278 
5279 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
5280 		"DP Vdev Register success");
5281 
5282 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5283 	return QDF_STATUS_SUCCESS;
5284 }
5285 
5286 /**
5287  * dp_peer_delete() - delete DP peer
5288  *
5289  * @soc: Datatpath soc
5290  * @peer: Datapath peer
5291  * @arg: argument to iter function
5292  *
5293  * Return: void
5294  */
5295 static void
5296 dp_peer_delete(struct dp_soc *soc,
5297 	       struct dp_peer *peer,
5298 	       void *arg)
5299 {
5300 	if (!peer->valid)
5301 		return;
5302 
5303 	dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
5304 			     peer->vdev->vdev_id,
5305 			     peer->mac_addr.raw, 0);
5306 }
5307 
5308 /**
5309  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
5310  * @vdev: Datapath VDEV handle
5311  * @unmap_only: Flag to indicate "only unmap"
5312  *
5313  * Return: void
5314  */
5315 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
5316 {
5317 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5318 	struct dp_pdev *pdev = vdev->pdev;
5319 	struct dp_soc *soc = pdev->soc;
5320 	struct dp_peer *peer;
5321 	uint32_t i = 0;
5322 
5323 
5324 	if (!unmap_only)
5325 		dp_vdev_iterate_peer(vdev, dp_peer_delete, NULL,
5326 				     DP_MOD_ID_CDP);
5327 
5328 	for (i = 0; i < soc->max_peers ; i++) {
5329 		peer = __dp_peer_get_ref_by_id(soc, i, DP_MOD_ID_CDP);
5330 
5331 		if (!peer)
5332 			continue;
5333 
5334 		if (peer->vdev != vdev) {
5335 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5336 			continue;
5337 		}
5338 
5339 		dp_info("peer: "QDF_MAC_ADDR_FMT" is getting unmap",
5340 			QDF_MAC_ADDR_REF(peer->mac_addr.raw));
5341 
5342 		dp_rx_peer_unmap_handler(soc, i,
5343 					 vdev->vdev_id,
5344 					 peer->mac_addr.raw, 0,
5345 					 DP_PEER_WDS_COUNT_INVALID);
5346 		SET_PEER_REF_CNT_ONE(peer);
5347 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5348 	}
5349 
5350 }
5351 
5352 /*
5353  * dp_vdev_detach_wifi3() - Detach txrx vdev
5354  * @cdp_soc: Datapath soc handle
5355  * @vdev_id: VDEV Id
5356  * @callback: Callback OL_IF on completion of detach
5357  * @cb_context:	Callback context
5358  *
5359  */
5360 static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc,
5361 				       uint8_t vdev_id,
5362 				       ol_txrx_vdev_delete_cb callback,
5363 				       void *cb_context)
5364 {
5365 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
5366 	struct dp_pdev *pdev;
5367 	struct dp_neighbour_peer *peer = NULL;
5368 	struct dp_neighbour_peer *temp_peer = NULL;
5369 	struct dp_peer *vap_self_peer = NULL;
5370 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
5371 						     DP_MOD_ID_CDP);
5372 
5373 	if (!vdev)
5374 		return QDF_STATUS_E_FAILURE;
5375 
5376 	pdev = vdev->pdev;
5377 
5378 	vap_self_peer = dp_sta_vdev_self_peer_ref_n_get(soc, vdev,
5379 							DP_MOD_ID_CONFIG);
5380 	if (vap_self_peer) {
5381 		qdf_spin_lock_bh(&soc->ast_lock);
5382 		if (vap_self_peer->self_ast_entry) {
5383 			dp_peer_del_ast(soc, vap_self_peer->self_ast_entry);
5384 			vap_self_peer->self_ast_entry = NULL;
5385 		}
5386 		qdf_spin_unlock_bh(&soc->ast_lock);
5387 
5388 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc, vdev->vdev_id,
5389 				     vap_self_peer->mac_addr.raw, 0);
5390 		dp_peer_unref_delete(vap_self_peer, DP_MOD_ID_CONFIG);
5391 	}
5392 
5393 	/*
5394 	 * If Target is hung, flush all peers before detaching vdev
5395 	 * this will free all references held due to missing
5396 	 * unmap commands from Target
5397 	 */
5398 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
5399 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, false);
5400 	else if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
5401 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, true);
5402 
5403 	dp_rx_vdev_detach(vdev);
5404 	/*
5405 	 * move it after dp_rx_vdev_detach(),
5406 	 * as the call back done in dp_rx_vdev_detach()
5407 	 * still need to get vdev pointer by vdev_id.
5408 	 */
5409 	dp_vdev_id_map_tbl_remove(soc, vdev);
5410 
5411 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5412 	if (!soc->hw_nac_monitor_support) {
5413 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
5414 			      neighbour_peer_list_elem) {
5415 			QDF_ASSERT(peer->vdev != vdev);
5416 		}
5417 	} else {
5418 		TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
5419 				   neighbour_peer_list_elem, temp_peer) {
5420 			if (peer->vdev == vdev) {
5421 				TAILQ_REMOVE(&pdev->neighbour_peers_list, peer,
5422 					     neighbour_peer_list_elem);
5423 				qdf_mem_free(peer);
5424 			}
5425 		}
5426 	}
5427 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5428 
5429 	if (vdev->vdev_dp_ext_handle) {
5430 		qdf_mem_free(vdev->vdev_dp_ext_handle);
5431 		vdev->vdev_dp_ext_handle = NULL;
5432 	}
5433 	/* indicate that the vdev needs to be deleted */
5434 	vdev->delete.pending = 1;
5435 	vdev->delete.callback = callback;
5436 	vdev->delete.context = cb_context;
5437 
5438 	if (vdev->opmode != wlan_op_mode_monitor)
5439 		dp_vdev_pdev_list_remove(soc, pdev, vdev);
5440 
5441 	/* release reference taken above for find */
5442 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5443 
5444 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
5445 	TAILQ_INSERT_TAIL(&soc->inactive_vdev_list, vdev, inactive_list_elem);
5446 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
5447 
5448 	/* release reference taken at dp_vdev_create */
5449 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
5450 
5451 	return QDF_STATUS_SUCCESS;
5452 }
5453 
5454 #if ATH_SUPPORT_WRAP
5455 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
5456 						uint8_t *peer_mac_addr)
5457 {
5458 	struct dp_peer *peer;
5459 
5460 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
5461 				      0, vdev->vdev_id,
5462 				      DP_MOD_ID_CONFIG);
5463 	if (!peer)
5464 		return NULL;
5465 
5466 	if (peer->bss_peer)
5467 		return peer;
5468 
5469 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
5470 	return NULL;
5471 }
5472 #else
5473 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
5474 						uint8_t *peer_mac_addr)
5475 {
5476 	struct dp_peer *peer;
5477 
5478 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
5479 				      0, vdev->vdev_id,
5480 				      DP_MOD_ID_CONFIG);
5481 	if (!peer)
5482 		return NULL;
5483 
5484 	if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
5485 		return peer;
5486 
5487 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
5488 	return NULL;
5489 }
5490 #endif
5491 
5492 #ifdef FEATURE_AST
5493 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
5494 					       struct dp_pdev *pdev,
5495 					       uint8_t *peer_mac_addr)
5496 {
5497 	struct dp_ast_entry *ast_entry;
5498 
5499 	qdf_spin_lock_bh(&soc->ast_lock);
5500 	if (soc->ast_override_support)
5501 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
5502 							    pdev->pdev_id);
5503 	else
5504 		ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
5505 
5506 	if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress)
5507 		dp_peer_del_ast(soc, ast_entry);
5508 
5509 	qdf_spin_unlock_bh(&soc->ast_lock);
5510 }
5511 #endif
5512 
5513 #ifdef PEER_CACHE_RX_PKTS
5514 static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
5515 {
5516 	qdf_spinlock_create(&peer->bufq_info.bufq_lock);
5517 	peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
5518 	qdf_list_create(&peer->bufq_info.cached_bufq, DP_RX_CACHED_BUFQ_THRESH);
5519 }
5520 #else
5521 static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
5522 {
5523 }
5524 #endif
5525 
5526 /*
5527  * dp_peer_create_wifi3() - attach txrx peer
5528  * @soc_hdl: Datapath soc handle
5529  * @vdev_id: id of vdev
5530  * @peer_mac_addr: Peer MAC address
5531  *
5532  * Return: 0 on success, -1 on failure
5533  */
5534 static QDF_STATUS
5535 dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5536 		     uint8_t *peer_mac_addr)
5537 {
5538 	struct dp_peer *peer;
5539 	int i;
5540 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5541 	struct dp_pdev *pdev;
5542 	struct cdp_peer_cookie peer_cookie;
5543 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
5544 	struct dp_vdev *vdev = NULL;
5545 
5546 	if (!peer_mac_addr)
5547 		return QDF_STATUS_E_FAILURE;
5548 
5549 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
5550 
5551 	if (!vdev)
5552 		return QDF_STATUS_E_FAILURE;
5553 
5554 	pdev = vdev->pdev;
5555 	soc = pdev->soc;
5556 
5557 	/*
5558 	 * If a peer entry with given MAC address already exists,
5559 	 * reuse the peer and reset the state of peer.
5560 	 */
5561 	peer = dp_peer_can_reuse(vdev, peer_mac_addr);
5562 
5563 	if (peer) {
5564 		qdf_atomic_init(&peer->is_default_route_set);
5565 		dp_peer_cleanup(vdev, peer);
5566 
5567 		qdf_spin_lock_bh(&soc->ast_lock);
5568 		dp_peer_delete_ast_entries(soc, peer);
5569 		qdf_spin_unlock_bh(&soc->ast_lock);
5570 
5571 		if ((vdev->opmode == wlan_op_mode_sta) &&
5572 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
5573 		     QDF_MAC_ADDR_SIZE)) {
5574 			ast_type = CDP_TXRX_AST_TYPE_SELF;
5575 		}
5576 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
5577 
5578 		peer->valid = 1;
5579 		dp_local_peer_id_alloc(pdev, peer);
5580 
5581 		qdf_spinlock_create(&peer->peer_info_lock);
5582 		dp_peer_rx_bufq_resources_init(peer);
5583 
5584 		DP_STATS_INIT(peer);
5585 		DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI);
5586 
5587 		/*
5588 		 * In tx_monitor mode, filter may be set for unassociated peer
5589 		 * when unassociated peer get associated peer need to
5590 		 * update tx_cap_enabled flag to support peer filter.
5591 		 */
5592 		dp_peer_tx_capture_filter_check(pdev, peer);
5593 
5594 		dp_set_peer_isolation(peer, false);
5595 
5596 		for (i = 0; i < DP_MAX_TIDS; i++)
5597 			qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
5598 
5599 		dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
5600 
5601 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5602 		return QDF_STATUS_SUCCESS;
5603 	} else {
5604 		/*
5605 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
5606 		 * need to remove the AST entry which was earlier added as a WDS
5607 		 * entry.
5608 		 * If an AST entry exists, but no peer entry exists with a given
5609 		 * MAC addresses, we could deduce it as a WDS entry
5610 		 */
5611 		dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
5612 	}
5613 
5614 #ifdef notyet
5615 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
5616 		soc->mempool_ol_ath_peer);
5617 #else
5618 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
5619 #endif
5620 	wlan_minidump_log(peer,
5621 			  sizeof(*peer),
5622 			  soc->ctrl_psoc,
5623 			  WLAN_MD_DP_PEER, "dp_peer");
5624 	if (!peer) {
5625 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5626 		return QDF_STATUS_E_FAILURE; /* failure */
5627 	}
5628 
5629 	qdf_mem_zero(peer, sizeof(struct dp_peer));
5630 
5631 	TAILQ_INIT(&peer->ast_entry_list);
5632 
5633 	/* store provided params */
5634 	peer->vdev = vdev;
5635 	/* get the vdev reference for new peer */
5636 	dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CHILD);
5637 
5638 	if ((vdev->opmode == wlan_op_mode_sta) &&
5639 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
5640 			 QDF_MAC_ADDR_SIZE)) {
5641 		ast_type = CDP_TXRX_AST_TYPE_SELF;
5642 	}
5643 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
5644 	qdf_spinlock_create(&peer->peer_info_lock);
5645 
5646 	dp_peer_rx_bufq_resources_init(peer);
5647 
5648 	qdf_mem_copy(
5649 		&peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
5650 
5651 	/* initialize the peer_id */
5652 	peer->peer_id = HTT_INVALID_PEER;
5653 
5654 	/* reset the ast index to flowid table */
5655 	dp_peer_reset_flowq_map(peer);
5656 
5657 	qdf_atomic_init(&peer->ref_cnt);
5658 
5659 	for (i = 0; i < DP_MOD_ID_MAX; i++)
5660 		qdf_atomic_init(&peer->mod_refs[i]);
5661 
5662 	/* keep one reference for attach */
5663 	qdf_atomic_inc(&peer->ref_cnt);
5664 	qdf_atomic_inc(&peer->mod_refs[DP_MOD_ID_CONFIG]);
5665 
5666 	dp_peer_vdev_list_add(soc, vdev, peer);
5667 
5668 	/* TODO: See if hash based search is required */
5669 	dp_peer_find_hash_add(soc, peer);
5670 
5671 	/* Initialize the peer state */
5672 	peer->state = OL_TXRX_PEER_STATE_DISC;
5673 
5674 	dp_info("vdev %pK created peer %pK ("QDF_MAC_ADDR_FMT") ref_cnt: %d",
5675 		vdev, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
5676 		qdf_atomic_read(&peer->ref_cnt));
5677 	/*
5678 	 * For every peer MAp message search and set if bss_peer
5679 	 */
5680 	if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
5681 			QDF_MAC_ADDR_SIZE) == 0 &&
5682 			(wlan_op_mode_sta != vdev->opmode)) {
5683 		dp_info("vdev bss_peer!!");
5684 		peer->bss_peer = 1;
5685 	}
5686 
5687 	if (wlan_op_mode_sta == vdev->opmode &&
5688 	    qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
5689 			QDF_MAC_ADDR_SIZE) == 0) {
5690 		peer->sta_self_peer = 1;
5691 	}
5692 
5693 	for (i = 0; i < DP_MAX_TIDS; i++)
5694 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
5695 
5696 	peer->valid = 1;
5697 	dp_local_peer_id_alloc(pdev, peer);
5698 	DP_STATS_INIT(peer);
5699 	DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI);
5700 
5701 	qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
5702 		     QDF_MAC_ADDR_SIZE);
5703 	peer_cookie.ctx = NULL;
5704 	peer_cookie.pdev_id = pdev->pdev_id;
5705 	peer_cookie.cookie = pdev->next_peer_cookie++;
5706 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
5707 	dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, pdev->soc,
5708 			     (void *)&peer_cookie,
5709 			     peer->peer_id, WDI_NO_VAL, pdev->pdev_id);
5710 #endif
5711 	if (soc->wlanstats_enabled) {
5712 		if (!peer_cookie.ctx) {
5713 			pdev->next_peer_cookie--;
5714 			qdf_err("Failed to initialize peer rate stats");
5715 		} else {
5716 			peer->wlanstats_ctx = (struct cdp_peer_rate_stats_ctx *)
5717 						peer_cookie.ctx;
5718 		}
5719 	}
5720 
5721 	/*
5722 	 * Allocate peer extended stats context. Fall through in
5723 	 * case of failure as its not an implicit requirement to have
5724 	 * this object for regular statistics updates.
5725 	 */
5726 	if (dp_peer_ext_stats_ctx_alloc(soc, peer) !=
5727 			QDF_STATUS_SUCCESS)
5728 		dp_warn("peer ext_stats ctx alloc failed");
5729 
5730 	/*
5731 	 * In tx_monitor mode, filter may be set for unassociated peer
5732 	 * when unassociated peer get associated peer need to
5733 	 * update tx_cap_enabled flag to support peer filter.
5734 	 */
5735 	dp_peer_tx_capture_filter_check(pdev, peer);
5736 
5737 	dp_set_peer_isolation(peer, false);
5738 
5739 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
5740 
5741 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5742 
5743 	return QDF_STATUS_SUCCESS;
5744 }
5745 
5746 /*
5747  * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
5748  * @vdev: Datapath VDEV handle
5749  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5750  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5751  *
5752  * Return: None
5753  */
5754 static
5755 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
5756 				  enum cdp_host_reo_dest_ring *reo_dest,
5757 				  bool *hash_based)
5758 {
5759 	struct dp_soc *soc;
5760 	struct dp_pdev *pdev;
5761 
5762 	pdev = vdev->pdev;
5763 	soc = pdev->soc;
5764 	/*
5765 	 * hash based steering is disabled for Radios which are offloaded
5766 	 * to NSS
5767 	 */
5768 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
5769 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
5770 
5771 	/*
5772 	 * Below line of code will ensure the proper reo_dest ring is chosen
5773 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
5774 	 */
5775 	*reo_dest = pdev->reo_dest;
5776 }
5777 
5778 #ifdef IPA_OFFLOAD
5779 /**
5780  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
5781  * @vdev: Virtual device
5782  *
5783  * Return: true if the vdev is of subtype P2P
5784  *	   false if the vdev is of any other subtype
5785  */
5786 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
5787 {
5788 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
5789 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
5790 	    vdev->subtype == wlan_op_subtype_p2p_go)
5791 		return true;
5792 
5793 	return false;
5794 }
5795 
5796 /*
5797  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
5798  * @vdev: Datapath VDEV handle
5799  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5800  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5801  *
5802  * If IPA is enabled in ini, for SAP mode, disable hash based
5803  * steering, use default reo_dst ring for RX. Use config values for other modes.
5804  * Return: None
5805  */
5806 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
5807 				       enum cdp_host_reo_dest_ring *reo_dest,
5808 				       bool *hash_based)
5809 {
5810 	struct dp_soc *soc;
5811 	struct dp_pdev *pdev;
5812 
5813 	pdev = vdev->pdev;
5814 	soc = pdev->soc;
5815 
5816 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
5817 
5818 	/* For P2P-GO interfaces we do not need to change the REO
5819 	 * configuration even if IPA config is enabled
5820 	 */
5821 	if (dp_is_vdev_subtype_p2p(vdev))
5822 		return;
5823 
5824 	/*
5825 	 * If IPA is enabled, disable hash-based flow steering and set
5826 	 * reo_dest_ring_4 as the REO ring to receive packets on.
5827 	 * IPA is configured to reap reo_dest_ring_4.
5828 	 *
5829 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
5830 	 * value enum value is from 1 - 4.
5831 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
5832 	 */
5833 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5834 		if (vdev->opmode == wlan_op_mode_ap) {
5835 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
5836 			*hash_based = 0;
5837 		} else if (vdev->opmode == wlan_op_mode_sta &&
5838 			   dp_ipa_is_mdm_platform()) {
5839 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
5840 		}
5841 	}
5842 }
5843 
5844 #else
5845 
5846 /*
5847  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
5848  * @vdev: Datapath VDEV handle
5849  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5850  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5851  *
5852  * Use system config values for hash based steering.
5853  * Return: None
5854  */
5855 
5856 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
5857 				       enum cdp_host_reo_dest_ring *reo_dest,
5858 				       bool *hash_based)
5859 {
5860 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
5861 }
5862 #endif /* IPA_OFFLOAD */
5863 
5864 /*
5865  * dp_peer_setup_wifi3() - initialize the peer
5866  * @soc_hdl: soc handle object
5867  * @vdev_id : vdev_id of vdev object
5868  * @peer_mac: Peer's mac address
5869  *
5870  * Return: QDF_STATUS
5871  */
5872 static QDF_STATUS
5873 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5874 		    uint8_t *peer_mac)
5875 {
5876 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5877 	struct dp_pdev *pdev;
5878 	bool hash_based = 0;
5879 	enum cdp_host_reo_dest_ring reo_dest;
5880 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5881 	struct dp_vdev *vdev = NULL;
5882 	struct dp_peer *peer =
5883 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
5884 					       DP_MOD_ID_CDP);
5885 
5886 	if (!peer)
5887 		return QDF_STATUS_E_FAILURE;
5888 
5889 	vdev = peer->vdev;
5890 	if (!vdev) {
5891 		status = QDF_STATUS_E_FAILURE;
5892 		goto fail;
5893 	}
5894 
5895 	pdev = vdev->pdev;
5896 	dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
5897 
5898 	dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
5899 		pdev->pdev_id, vdev->vdev_id,
5900 		vdev->opmode, hash_based, reo_dest);
5901 
5902 	/*
5903 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
5904 	 * i.e both the devices have same MAC address. In these
5905 	 * cases we want such pkts to be processed in NULL Q handler
5906 	 * which is REO2TCL ring. for this reason we should
5907 	 * not setup reo_queues and default route for bss_peer.
5908 	 */
5909 	dp_peer_tx_init(pdev, peer);
5910 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
5911 		status = QDF_STATUS_E_FAILURE;
5912 		goto fail;
5913 	}
5914 
5915 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
5916 		/* TODO: Check the destination ring number to be passed to FW */
5917 		soc->cdp_soc.ol_ops->peer_set_default_routing(
5918 				soc->ctrl_psoc,
5919 				peer->vdev->pdev->pdev_id,
5920 				peer->mac_addr.raw,
5921 				peer->vdev->vdev_id, hash_based, reo_dest);
5922 	}
5923 
5924 	qdf_atomic_set(&peer->is_default_route_set, 1);
5925 
5926 	dp_peer_rx_init(pdev, peer);
5927 
5928 	dp_peer_ppdu_delayed_ba_init(peer);
5929 
5930 fail:
5931 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5932 	return status;
5933 }
5934 
5935 /*
5936  * dp_cp_peer_del_resp_handler - Handle the peer delete response
5937  * @soc_hdl: Datapath SOC handle
5938  * @vdev_id: id of virtual device object
5939  * @mac_addr: Mac address of the peer
5940  *
5941  * Return: QDF_STATUS
5942  */
5943 static QDF_STATUS dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl,
5944 					      uint8_t vdev_id,
5945 					      uint8_t *mac_addr)
5946 {
5947 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5948 	struct dp_ast_entry  *ast_entry = NULL;
5949 	txrx_ast_free_cb cb = NULL;
5950 	void *cookie;
5951 
5952 	qdf_spin_lock_bh(&soc->ast_lock);
5953 
5954 	ast_entry =
5955 		dp_peer_ast_hash_find_by_vdevid(soc, mac_addr,
5956 						vdev_id);
5957 
5958 	/* in case of qwrap we have multiple BSS peers
5959 	 * with same mac address
5960 	 *
5961 	 * AST entry for this mac address will be created
5962 	 * only for one peer hence it will be NULL here
5963 	 */
5964 	if ((!ast_entry || !ast_entry->delete_in_progress) ||
5965 	    (ast_entry->peer_id != HTT_INVALID_PEER)) {
5966 		qdf_spin_unlock_bh(&soc->ast_lock);
5967 		return QDF_STATUS_E_FAILURE;
5968 	}
5969 
5970 	if (ast_entry->is_mapped)
5971 		soc->ast_table[ast_entry->ast_idx] = NULL;
5972 
5973 	DP_STATS_INC(soc, ast.deleted, 1);
5974 	dp_peer_ast_hash_remove(soc, ast_entry);
5975 
5976 	cb = ast_entry->callback;
5977 	cookie = ast_entry->cookie;
5978 	ast_entry->callback = NULL;
5979 	ast_entry->cookie = NULL;
5980 
5981 	soc->num_ast_entries--;
5982 	qdf_spin_unlock_bh(&soc->ast_lock);
5983 
5984 	if (cb) {
5985 		cb(soc->ctrl_psoc,
5986 		   dp_soc_to_cdp_soc(soc),
5987 		   cookie,
5988 		   CDP_TXRX_AST_DELETED);
5989 	}
5990 	qdf_mem_free(ast_entry);
5991 
5992 	return QDF_STATUS_SUCCESS;
5993 }
5994 
5995 /*
5996  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
5997  * @txrx_soc: cdp soc handle
5998  * @ac: Access category
5999  * @value: timeout value in millisec
6000  *
6001  * Return: void
6002  */
6003 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
6004 				    uint8_t ac, uint32_t value)
6005 {
6006 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6007 
6008 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
6009 }
6010 
6011 /*
6012  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
6013  * @txrx_soc: cdp soc handle
6014  * @ac: access category
6015  * @value: timeout value in millisec
6016  *
6017  * Return: void
6018  */
6019 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
6020 				    uint8_t ac, uint32_t *value)
6021 {
6022 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6023 
6024 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
6025 }
6026 
6027 /*
6028  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
6029  * @txrx_soc: cdp soc handle
6030  * @pdev_id: id of physical device object
6031  * @val: reo destination ring index (1 - 4)
6032  *
6033  * Return: QDF_STATUS
6034  */
6035 static QDF_STATUS
6036 dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
6037 		     enum cdp_host_reo_dest_ring val)
6038 {
6039 	struct dp_pdev *pdev =
6040 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
6041 						   pdev_id);
6042 
6043 	if (pdev) {
6044 		pdev->reo_dest = val;
6045 		return QDF_STATUS_SUCCESS;
6046 	}
6047 
6048 	return QDF_STATUS_E_FAILURE;
6049 }
6050 
6051 /*
6052  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
6053  * @txrx_soc: cdp soc handle
6054  * @pdev_id: id of physical device object
6055  *
6056  * Return: reo destination ring index
6057  */
6058 static enum cdp_host_reo_dest_ring
6059 dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id)
6060 {
6061 	struct dp_pdev *pdev =
6062 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
6063 						   pdev_id);
6064 
6065 	if (pdev)
6066 		return pdev->reo_dest;
6067 	else
6068 		return cdp_host_reo_dest_ring_unknown;
6069 }
6070 
6071 #ifdef ATH_SUPPORT_NAC
6072 /*
6073  * dp_set_filter_neigh_peers() - set filter neighbour peers for smart mesh
6074  * @pdev_handle: device object
6075  * @val: value to be set
6076  *
6077  * Return: void
6078  */
6079 static int dp_set_filter_neigh_peers(struct dp_pdev *pdev,
6080 				     bool val)
6081 {
6082 	/* Enable/Disable smart mesh filtering. This flag will be checked
6083 	 * during rx processing to check if packets are from NAC clients.
6084 	 */
6085 	pdev->filter_neighbour_peers = val;
6086 	return 0;
6087 }
6088 #else
6089 static int dp_set_filter_neigh_peers(struct dp_pdev *pdev,
6090 				     bool val)
6091 {
6092 	return 0;
6093 }
6094 #endif /* ATH_SUPPORT_NAC */
6095 
6096 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
6097 /*
6098  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
6099  * address for smart mesh filtering
6100  * @txrx_soc: cdp soc handle
6101  * @vdev_id: id of virtual device object
6102  * @cmd: Add/Del command
6103  * @macaddr: nac client mac address
6104  *
6105  * Return: success/failure
6106  */
6107 static int dp_update_filter_neighbour_peers(struct cdp_soc_t *soc_hdl,
6108 					    uint8_t vdev_id,
6109 					    uint32_t cmd, uint8_t *macaddr)
6110 {
6111 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6112 	struct dp_pdev *pdev;
6113 	struct dp_neighbour_peer *peer = NULL;
6114 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
6115 						     DP_MOD_ID_CDP);
6116 
6117 	if (!vdev || !macaddr)
6118 		goto fail0;
6119 
6120 	pdev = vdev->pdev;
6121 
6122 	if (!pdev)
6123 		goto fail0;
6124 
6125 	/* Store address of NAC (neighbour peer) which will be checked
6126 	 * against TA of received packets.
6127 	 */
6128 	if (cmd == DP_NAC_PARAM_ADD) {
6129 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
6130 				sizeof(*peer));
6131 
6132 		if (!peer) {
6133 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6134 				FL("DP neighbour peer node memory allocation failed"));
6135 			goto fail0;
6136 		}
6137 
6138 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
6139 			macaddr, QDF_MAC_ADDR_SIZE);
6140 		peer->vdev = vdev;
6141 
6142 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
6143 
6144 		/* add this neighbour peer into the list */
6145 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
6146 				neighbour_peer_list_elem);
6147 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
6148 
6149 		/* first neighbour */
6150 		if (!pdev->neighbour_peers_added) {
6151 			QDF_STATUS status = QDF_STATUS_SUCCESS;
6152 
6153 			pdev->neighbour_peers_added = true;
6154 			dp_mon_filter_setup_smart_monitor(pdev);
6155 			status = dp_mon_filter_update(pdev);
6156 			if (status != QDF_STATUS_SUCCESS) {
6157 				QDF_TRACE(QDF_MODULE_ID_DP,
6158 					  QDF_TRACE_LEVEL_ERROR,
6159 					  FL("smart mon filter setup failed"));
6160 				dp_mon_filter_reset_smart_monitor(pdev);
6161 				pdev->neighbour_peers_added = false;
6162 			}
6163 		}
6164 
6165 	} else if (cmd == DP_NAC_PARAM_DEL) {
6166 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
6167 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
6168 				neighbour_peer_list_elem) {
6169 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
6170 				macaddr, QDF_MAC_ADDR_SIZE)) {
6171 				/* delete this peer from the list */
6172 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
6173 					peer, neighbour_peer_list_elem);
6174 				qdf_mem_free(peer);
6175 				break;
6176 			}
6177 		}
6178 		/* last neighbour deleted */
6179 		if (TAILQ_EMPTY(&pdev->neighbour_peers_list)) {
6180 			QDF_STATUS status = QDF_STATUS_SUCCESS;
6181 
6182 			pdev->neighbour_peers_added = false;
6183 			dp_mon_filter_reset_smart_monitor(pdev);
6184 			status = dp_mon_filter_update(pdev);
6185 			if (status != QDF_STATUS_SUCCESS) {
6186 				QDF_TRACE(QDF_MODULE_ID_DP,
6187 					  QDF_TRACE_LEVEL_ERROR,
6188 					  FL("smart mon filter clear failed"));
6189 			}
6190 
6191 		}
6192 
6193 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
6194 	}
6195 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6196 	return 1;
6197 
6198 fail0:
6199 	if (vdev)
6200 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6201 	return 0;
6202 }
6203 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
6204 
6205 #ifdef WLAN_SUPPORT_MSCS
6206 /*
6207  * dp_record_mscs_params - MSCS parameters sent by the STA in
6208  * the MSCS Request to the AP. The AP makes a note of these
6209  * parameters while comparing the MSDUs sent by the STA, to
6210  * send the downlink traffic with correct User priority.
6211  * @soc - Datapath soc handle
6212  * @peer_mac - STA Mac address
6213  * @vdev_id - ID of the vdev handle
6214  * @mscs_params - Structure having MSCS parameters obtained
6215  * from handshake
6216  * @active - Flag to set MSCS active/inactive
6217  * return type - QDF_STATUS - Success/Invalid
6218  */
6219 static QDF_STATUS
6220 dp_record_mscs_params(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
6221 		      uint8_t vdev_id, struct cdp_mscs_params *mscs_params,
6222 		      bool active)
6223 {
6224 	struct dp_peer *peer;
6225 	QDF_STATUS status = QDF_STATUS_E_INVAL;
6226 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6227 
6228 	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
6229 				      DP_MOD_ID_CDP);
6230 
6231 	if (!peer) {
6232 		dp_err("%s: Peer is NULL!\n", __func__);
6233 		goto fail;
6234 	}
6235 	if (!active) {
6236 		dp_info("MSCS Procedure is terminated");
6237 		peer->mscs_active = active;
6238 		goto fail;
6239 	}
6240 
6241 	if (mscs_params->classifier_type == IEEE80211_TCLAS_MASK_CLA_TYPE_4) {
6242 		/* Populate entries inside IPV4 database first */
6243 		peer->mscs_ipv4_parameter.user_priority_bitmap =
6244 			mscs_params->user_pri_bitmap;
6245 		peer->mscs_ipv4_parameter.user_priority_limit =
6246 			mscs_params->user_pri_limit;
6247 		peer->mscs_ipv4_parameter.classifier_mask =
6248 			mscs_params->classifier_mask;
6249 
6250 		/* Populate entries inside IPV6 database */
6251 		peer->mscs_ipv6_parameter.user_priority_bitmap =
6252 			mscs_params->user_pri_bitmap;
6253 		peer->mscs_ipv6_parameter.user_priority_limit =
6254 			mscs_params->user_pri_limit;
6255 		peer->mscs_ipv6_parameter.classifier_mask =
6256 			mscs_params->classifier_mask;
6257 		peer->mscs_active = 1;
6258 		dp_info("\n\tMSCS Procedure request based parameters for "QDF_MAC_ADDR_FMT"\n"
6259 			"\tClassifier_type = %d\tUser priority bitmap = %x\n"
6260 			"\tUser priority limit = %x\tClassifier mask = %x",
6261 			QDF_MAC_ADDR_REF(peer_mac),
6262 			mscs_params->classifier_type,
6263 			peer->mscs_ipv4_parameter.user_priority_bitmap,
6264 			peer->mscs_ipv4_parameter.user_priority_limit,
6265 			peer->mscs_ipv4_parameter.classifier_mask);
6266 	}
6267 
6268 	status = QDF_STATUS_SUCCESS;
6269 fail:
6270 	if (peer)
6271 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6272 	return status;
6273 }
6274 #endif
6275 
6276 /*
6277  * dp_get_sec_type() - Get the security type
6278  * @soc: soc handle
6279  * @vdev_id: id of dp handle
6280  * @peer_mac: mac of datapath PEER handle
6281  * @sec_idx:    Security id (mcast, ucast)
6282  *
6283  * return sec_type: Security type
6284  */
6285 static int dp_get_sec_type(struct cdp_soc_t *soc, uint8_t vdev_id,
6286 			   uint8_t *peer_mac, uint8_t sec_idx)
6287 {
6288 	int sec_type = 0;
6289 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
6290 						       peer_mac, 0, vdev_id,
6291 						       DP_MOD_ID_CDP);
6292 
6293 	if (!peer) {
6294 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
6295 			  "%s: Peer is NULL!\n", __func__);
6296 		return sec_type;
6297 	}
6298 
6299 	sec_type = peer->security[sec_idx].sec_type;
6300 
6301 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6302 	return sec_type;
6303 }
6304 
6305 /*
6306  * dp_peer_authorize() - authorize txrx peer
6307  * @soc: soc handle
6308  * @vdev_id: id of dp handle
6309  * @peer_mac: mac of datapath PEER handle
6310  * @authorize
6311  *
6312  */
6313 static QDF_STATUS
6314 dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
6315 		  uint8_t *peer_mac, uint32_t authorize)
6316 {
6317 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6318 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6319 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
6320 						      0, vdev_id,
6321 						      DP_MOD_ID_CDP);
6322 
6323 	if (!peer) {
6324 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
6325 			  "%s: Peer is NULL!\n", __func__);
6326 		status = QDF_STATUS_E_FAILURE;
6327 	} else {
6328 		peer->authorize = authorize ? 1 : 0;
6329 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6330 	}
6331 
6332 	return status;
6333 }
6334 
6335 /**
6336  * dp_vdev_unref_delete() - check and process vdev delete
6337  * @soc : DP specific soc pointer
6338  * @vdev: DP specific vdev pointer
6339  * @mod_id: module id
6340  *
6341  */
6342 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev,
6343 			  enum dp_mod_id mod_id)
6344 {
6345 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
6346 	void *vdev_delete_context = NULL;
6347 	uint8_t vdev_id = vdev->vdev_id;
6348 	struct dp_pdev *pdev = vdev->pdev;
6349 	struct dp_vdev *tmp_vdev = NULL;
6350 	uint8_t found = 0;
6351 
6352 	QDF_ASSERT(qdf_atomic_dec_return(&vdev->mod_refs[mod_id]) >= 0);
6353 
6354 	/* Return if this is not the last reference*/
6355 	if (!qdf_atomic_dec_and_test(&vdev->ref_cnt))
6356 		return;
6357 
6358 	/*
6359 	 * This should be set as last reference need to released
6360 	 * after cdp_vdev_detach() is called
6361 	 *
6362 	 * if this assert is hit there is a ref count issue
6363 	 */
6364 	QDF_ASSERT(vdev->delete.pending);
6365 
6366 	vdev_delete_cb = vdev->delete.callback;
6367 	vdev_delete_context = vdev->delete.context;
6368 
6369 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")- its last peer is done",
6370 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
6371 
6372 	if (wlan_op_mode_monitor == vdev->opmode) {
6373 		if (soc->intr_mode == DP_INTR_POLL)
6374 			qdf_timer_sync_cancel(&soc->int_timer);
6375 		pdev->monitor_vdev = NULL;
6376 		goto free_vdev;
6377 	}
6378 	/* all peers are gone, go ahead and delete it */
6379 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
6380 			FLOW_TYPE_VDEV, vdev_id);
6381 	dp_tx_vdev_detach(vdev);
6382 
6383 free_vdev:
6384 	qdf_spinlock_destroy(&vdev->peer_list_lock);
6385 
6386 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
6387 	TAILQ_FOREACH(tmp_vdev, &soc->inactive_vdev_list,
6388 		      inactive_list_elem) {
6389 		if (tmp_vdev == vdev) {
6390 			found = 1;
6391 			break;
6392 		}
6393 	}
6394 	if (found)
6395 		TAILQ_REMOVE(&soc->inactive_vdev_list, vdev,
6396 			     inactive_list_elem);
6397 	/* delete this peer from the list */
6398 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
6399 
6400 	dp_info("deleting vdev object %pK (%pM)",
6401 		vdev, vdev->mac_addr.raw);
6402 	dp_info("deleting vdev object %pK ("QDF_MAC_ADDR_FMT")",
6403 		vdev, QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
6404 	wlan_minidump_remove(vdev);
6405 	qdf_mem_free(vdev);
6406 	vdev = NULL;
6407 
6408 	if (vdev_delete_cb)
6409 		vdev_delete_cb(vdev_delete_context);
6410 }
6411 
6412 /*
6413  * dp_peer_unref_delete() - unref and delete peer
6414  * @peer_handle:    Datapath peer handle
6415  * @mod_id:         ID of module releasing reference
6416  *
6417  */
6418 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id mod_id)
6419 {
6420 	struct dp_vdev *vdev = peer->vdev;
6421 	struct dp_pdev *pdev = vdev->pdev;
6422 	struct dp_soc *soc = pdev->soc;
6423 	uint16_t peer_id;
6424 	struct cdp_peer_cookie peer_cookie;
6425 	struct dp_peer *tmp_peer;
6426 	bool found = false;
6427 
6428 	if (mod_id > DP_MOD_ID_RX)
6429 		QDF_ASSERT(qdf_atomic_dec_return(&peer->mod_refs[mod_id]) >= 0);
6430 
6431 	/*
6432 	 * Hold the lock all the way from checking if the peer ref count
6433 	 * is zero until the peer references are removed from the hash
6434 	 * table and vdev list (if the peer ref count is zero).
6435 	 * This protects against a new HL tx operation starting to use the
6436 	 * peer object just after this function concludes it's done being used.
6437 	 * Furthermore, the lock needs to be held while checking whether the
6438 	 * vdev's list of peers is empty, to make sure that list is not modified
6439 	 * concurrently with the empty check.
6440 	 */
6441 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
6442 		peer_id = peer->peer_id;
6443 
6444 		/*
6445 		 * Make sure that the reference to the peer in
6446 		 * peer object map is removed
6447 		 */
6448 		QDF_ASSERT(peer_id == HTT_INVALID_PEER);
6449 
6450 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
6451 			  "Deleting peer %pK ("QDF_MAC_ADDR_FMT")", peer,
6452 			  QDF_MAC_ADDR_REF(peer->mac_addr.raw));
6453 
6454 		/*
6455 		 * Deallocate the extended stats contenxt
6456 		 */
6457 		dp_peer_ext_stats_ctx_dealloc(soc, peer);
6458 
6459 		/* send peer destroy event to upper layer */
6460 		qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
6461 			     QDF_MAC_ADDR_SIZE);
6462 		peer_cookie.ctx = NULL;
6463 		peer_cookie.ctx = (struct cdp_stats_cookie *)
6464 					peer->wlanstats_ctx;
6465 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6466 		dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY,
6467 				     soc,
6468 				     (void *)&peer_cookie,
6469 				     peer->peer_id,
6470 				     WDI_NO_VAL,
6471 				     pdev->pdev_id);
6472 #endif
6473 		peer->wlanstats_ctx = NULL;
6474 		wlan_minidump_remove(peer);
6475 
6476 		qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
6477 		TAILQ_FOREACH(tmp_peer, &soc->inactive_peer_list,
6478 			      inactive_list_elem) {
6479 			if (tmp_peer == peer) {
6480 				found = 1;
6481 				break;
6482 			}
6483 		}
6484 		if (found)
6485 			TAILQ_REMOVE(&soc->inactive_peer_list, peer,
6486 				     inactive_list_elem);
6487 		/* delete this peer from the list */
6488 		qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
6489 		DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
6490 		dp_peer_update_state(soc, peer, DP_PEER_STATE_FREED);
6491 
6492 		qdf_mem_free(peer);
6493 
6494 		/*
6495 		 * Decrement ref count taken at peer create
6496 		 */
6497 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CHILD);
6498 	}
6499 }
6500 
6501 #ifdef PEER_CACHE_RX_PKTS
6502 static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
6503 {
6504 	qdf_list_destroy(&peer->bufq_info.cached_bufq);
6505 	qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
6506 }
6507 #else
6508 static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
6509 {
6510 }
6511 #endif
6512 
6513 /*
6514  * dp_peer_detach_wifi3() – Detach txrx peer
6515  * @soc_hdl: soc handle
6516  * @vdev_id: id of dp handle
6517  * @peer_mac: mac of datapath PEER handle
6518  * @bitmap: bitmap indicating special handling of request.
6519  *
6520  */
6521 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
6522 				       uint8_t vdev_id,
6523 				       uint8_t *peer_mac, uint32_t bitmap)
6524 {
6525 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6526 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
6527 						      0, vdev_id,
6528 						      DP_MOD_ID_CDP);
6529 	struct dp_vdev *vdev = NULL;
6530 
6531 	/* Peer can be null for monitor vap mac address */
6532 	if (!peer) {
6533 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
6534 			  "%s: Invalid peer\n", __func__);
6535 		return QDF_STATUS_E_FAILURE;
6536 	}
6537 
6538 	if (!peer->valid) {
6539 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6540 		dp_err("Invalid peer: "QDF_MAC_ADDR_FMT,
6541 			QDF_MAC_ADDR_REF(peer_mac));
6542 		return QDF_STATUS_E_ALREADY;
6543 	}
6544 
6545 	vdev = peer->vdev;
6546 
6547 	if (!vdev)
6548 		return QDF_STATUS_E_FAILURE;
6549 	peer->valid = 0;
6550 
6551 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
6552 		FL("peer %pK ("QDF_MAC_ADDR_FMT")"),  peer,
6553 		  QDF_MAC_ADDR_REF(peer->mac_addr.raw));
6554 
6555 	dp_local_peer_id_free(peer->vdev->pdev, peer);
6556 
6557 	/* Drop all rx packets before deleting peer */
6558 	dp_clear_peer_internal(soc, peer);
6559 
6560 	dp_peer_rx_bufq_resources_deinit(peer);
6561 
6562 	qdf_spinlock_destroy(&peer->peer_info_lock);
6563 	dp_peer_multipass_list_remove(peer);
6564 
6565 	/* remove the reference to the peer from the hash table */
6566 	dp_peer_find_hash_remove(soc, peer);
6567 
6568 	dp_peer_vdev_list_remove(soc, vdev, peer);
6569 
6570 	/*
6571 	 * Remove the reference added during peer_attach.
6572 	 * The peer will still be left allocated until the
6573 	 * PEER_UNMAP message arrives to remove the other
6574 	 * reference, added by the PEER_MAP message.
6575 	 */
6576 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
6577 	/*
6578 	 * Remove the reference taken above
6579 	 */
6580 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6581 
6582 	return QDF_STATUS_SUCCESS;
6583 }
6584 
6585 /*
6586  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
6587  * @soc_hdl: Datapath soc handle
6588  * @vdev_id: virtual interface id
6589  *
6590  * Return: MAC address on success, NULL on failure.
6591  *
6592  */
6593 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_soc_t *soc_hdl,
6594 					 uint8_t vdev_id)
6595 {
6596 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6597 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
6598 						     DP_MOD_ID_CDP);
6599 	uint8_t *mac = NULL;
6600 
6601 	if (!vdev)
6602 		return NULL;
6603 
6604 	mac = vdev->mac_addr.raw;
6605 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6606 
6607 	return mac;
6608 }
6609 
6610 /*
6611  * dp_vdev_set_wds() - Enable per packet stats
6612  * @soc: DP soc handle
6613  * @vdev_id: id of DP VDEV handle
6614  * @val: value
6615  *
6616  * Return: none
6617  */
6618 static int dp_vdev_set_wds(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
6619 			   uint32_t val)
6620 {
6621 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6622 	struct dp_vdev *vdev =
6623 		dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id,
6624 				      DP_MOD_ID_CDP);
6625 
6626 	if (!vdev)
6627 		return QDF_STATUS_E_FAILURE;
6628 
6629 	vdev->wds_enabled = val;
6630 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6631 
6632 	return QDF_STATUS_SUCCESS;
6633 }
6634 
6635 /*
6636  * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev id of monitor mode
6637  * @soc_hdl: datapath soc handle
6638  * @pdev_id: physical device instance id
6639  *
6640  * Return: virtual interface id
6641  */
6642 static uint8_t dp_get_mon_vdev_from_pdev_wifi3(struct cdp_soc_t *soc_hdl,
6643 					       uint8_t pdev_id)
6644 {
6645 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6646 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
6647 
6648 	if (qdf_unlikely(!pdev))
6649 		return -EINVAL;
6650 
6651 	return pdev->monitor_vdev->vdev_id;
6652 }
6653 
6654 static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
6655 {
6656 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6657 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
6658 						     DP_MOD_ID_CDP);
6659 	int opmode;
6660 
6661 	if (!vdev) {
6662 		dp_err("vdev for id %d is NULL", vdev_id);
6663 		return -EINVAL;
6664 	}
6665 	opmode = vdev->opmode;
6666 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6667 
6668 	return opmode;
6669 }
6670 
6671 /**
6672  * dp_get_os_rx_handles_from_vdev_wifi3() - Get os rx handles for a vdev
6673  * @soc_hdl: ol_txrx_soc_handle handle
6674  * @vdev_id: vdev id for which os rx handles are needed
6675  * @stack_fn_p: pointer to stack function pointer
6676  * @osif_handle_p: pointer to ol_osif_vdev_handle
6677  *
6678  * Return: void
6679  */
6680 static
6681 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_soc_t *soc_hdl,
6682 					  uint8_t vdev_id,
6683 					  ol_txrx_rx_fp *stack_fn_p,
6684 					  ol_osif_vdev_handle *osif_vdev_p)
6685 {
6686 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6687 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
6688 						     DP_MOD_ID_CDP);
6689 
6690 	if (!vdev)
6691 		return;
6692 
6693 	*stack_fn_p = vdev->osif_rx_stack;
6694 	*osif_vdev_p = vdev->osif_vdev;
6695 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6696 }
6697 
6698 /**
6699  * dp_get_ctrl_pdev_from_vdev() - Get control pdev of vdev
6700  * @soc_hdl: datapath soc handle
6701  * @vdev_id: virtual device/interface id
6702  *
6703  * Return: Handle to control pdev
6704  */
6705 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(
6706 						struct cdp_soc_t *soc_hdl,
6707 						uint8_t vdev_id)
6708 {
6709 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6710 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
6711 						     DP_MOD_ID_CDP);
6712 	struct dp_pdev *pdev;
6713 
6714 	if (!vdev)
6715 		return NULL;
6716 
6717 	pdev = vdev->pdev;
6718 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6719 	return pdev ? (struct cdp_cfg *)pdev->wlan_cfg_ctx : NULL;
6720 }
6721 
6722 /**
6723  * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
6724  *                                 ring based on target
6725  * @soc: soc handle
6726  * @mac_for_pdev: WIN- pdev_id, MCL- mac id
6727  * @pdev: physical device handle
6728  * @ring_num: mac id
6729  * @htt_tlv_filter: tlv filter
6730  *
6731  * Return: zero on success, non-zero on failure
6732  */
6733 static inline
6734 QDF_STATUS dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
6735 				       struct dp_pdev *pdev, uint8_t ring_num,
6736 				       struct htt_rx_ring_tlv_filter htt_tlv_filter)
6737 {
6738 	QDF_STATUS status;
6739 
6740 	if (soc->wlan_cfg_ctx->rxdma1_enable)
6741 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6742 					     soc->rxdma_mon_buf_ring[ring_num]
6743 					     .hal_srng,
6744 					     RXDMA_MONITOR_BUF,
6745 					     RX_MONITOR_BUFFER_SIZE,
6746 					     &htt_tlv_filter);
6747 	else
6748 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6749 					     pdev->rx_mac_buf_ring[ring_num]
6750 					     .hal_srng,
6751 					     RXDMA_BUF, RX_DATA_BUFFER_SIZE,
6752 					     &htt_tlv_filter);
6753 
6754 	return status;
6755 }
6756 
6757 static inline void
6758 dp_pdev_disable_mcopy_code(struct dp_pdev *pdev)
6759 {
6760 	pdev->mcopy_mode = M_COPY_DISABLED;
6761 	pdev->monitor_configured = false;
6762 	pdev->monitor_vdev = NULL;
6763 }
6764 
6765 /**
6766  * dp_reset_monitor_mode() - Disable monitor mode
6767  * @soc_hdl: Datapath soc handle
6768  * @pdev_id: id of datapath PDEV handle
6769  *
6770  * Return: QDF_STATUS
6771  */
6772 QDF_STATUS dp_reset_monitor_mode(struct cdp_soc_t *soc_hdl,
6773 				 uint8_t pdev_id,
6774 				 uint8_t special_monitor)
6775 {
6776 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6777 	struct dp_pdev *pdev =
6778 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
6779 						   pdev_id);
6780 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6781 
6782 	if (!pdev)
6783 		return QDF_STATUS_E_FAILURE;
6784 
6785 	qdf_spin_lock_bh(&pdev->mon_lock);
6786 
6787 	dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_DISABLE);
6788 	pdev->monitor_vdev = NULL;
6789 	pdev->monitor_configured = false;
6790 
6791 	/*
6792 	 * Lite monitor mode, smart monitor mode and monitor
6793 	 * mode uses this APIs to filter reset and mode disable
6794 	 */
6795 	if (pdev->mcopy_mode) {
6796 #if defined(FEATURE_PERPKT_INFO)
6797 		dp_pdev_disable_mcopy_code(pdev);
6798 		dp_mon_filter_reset_mcopy_mode(pdev);
6799 #endif /* FEATURE_PERPKT_INFO */
6800 	} else if (special_monitor) {
6801 #if defined(ATH_SUPPORT_NAC)
6802 		dp_mon_filter_reset_smart_monitor(pdev);
6803 #endif /* ATH_SUPPORT_NAC */
6804 	} else {
6805 		dp_mon_filter_reset_mon_mode(pdev);
6806 	}
6807 
6808 	status = dp_mon_filter_update(pdev);
6809 	if (status != QDF_STATUS_SUCCESS) {
6810 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6811 			  FL("Failed to reset monitor filters"));
6812 	}
6813 
6814 	qdf_spin_unlock_bh(&pdev->mon_lock);
6815 	return QDF_STATUS_SUCCESS;
6816 }
6817 
6818 /**
6819  * dp_get_tx_pending() - read pending tx
6820  * @pdev_handle: Datapath PDEV handle
6821  *
6822  * Return: outstanding tx
6823  */
6824 static uint32_t dp_get_tx_pending(struct cdp_pdev *pdev_handle)
6825 {
6826 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6827 
6828 	return qdf_atomic_read(&pdev->num_tx_outstanding);
6829 }
6830 
6831 /**
6832  * dp_get_peer_mac_from_peer_id() - get peer mac
6833  * @pdev_handle: Datapath PDEV handle
6834  * @peer_id: Peer ID
6835  * @peer_mac: MAC addr of PEER
6836  *
6837  * Return: QDF_STATUS
6838  */
6839 static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc,
6840 					       uint32_t peer_id,
6841 					       uint8_t *peer_mac)
6842 {
6843 	struct dp_peer *peer;
6844 
6845 	if (soc && peer_mac) {
6846 		peer = dp_peer_get_ref_by_id((struct dp_soc *)soc,
6847 					     (uint16_t)peer_id,
6848 					     DP_MOD_ID_CDP);
6849 		if (peer) {
6850 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
6851 				     QDF_MAC_ADDR_SIZE);
6852 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6853 			return QDF_STATUS_SUCCESS;
6854 		}
6855 	}
6856 
6857 	return QDF_STATUS_E_FAILURE;
6858 }
6859 
6860 /**
6861  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
6862  * @vdev_handle: Datapath VDEV handle
6863  * @smart_monitor: Flag to denote if its smart monitor mode
6864  *
6865  * Return: 0 on success, not 0 on failure
6866  */
6867 static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *soc_hdl,
6868 					   uint8_t vdev_id,
6869 					   uint8_t special_monitor)
6870 {
6871 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6872 	uint32_t mac_id;
6873 	uint32_t mac_for_pdev;
6874 	struct dp_pdev *pdev;
6875 	uint32_t num_entries;
6876 	struct dp_srng *mon_buf_ring;
6877 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
6878 						     DP_MOD_ID_CDP);
6879 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6880 
6881 	if (!vdev)
6882 		return QDF_STATUS_E_FAILURE;
6883 
6884 	pdev = vdev->pdev;
6885 	pdev->monitor_vdev = vdev;
6886 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
6887 		  "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
6888 		  pdev, pdev->pdev_id, pdev->soc, vdev);
6889 
6890 	/*
6891 	 * do not configure monitor buf ring and filter for smart and
6892 	 * lite monitor
6893 	 * for smart monitor filters are added along with first NAC
6894 	 * for lite monitor required configuration done through
6895 	 * dp_set_pdev_param
6896 	 */
6897 	if (special_monitor) {
6898 		status = QDF_STATUS_SUCCESS;
6899 		goto fail;
6900 	}
6901 
6902 	/*Check if current pdev's monitor_vdev exists */
6903 	if (pdev->monitor_configured) {
6904 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
6905 			  "monitor vap already created vdev=%pK\n", vdev);
6906 		status = QDF_STATUS_E_RESOURCES;
6907 		goto fail;
6908 	}
6909 
6910 	pdev->monitor_configured = true;
6911 
6912 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6913 		mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id,
6914 							  pdev->pdev_id);
6915 		dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev,
6916 						 FALSE);
6917 		/*
6918 		 * Configure low interrupt threshld when monitor mode is
6919 		 * configured.
6920 		 */
6921 		mon_buf_ring = &pdev->soc->rxdma_mon_buf_ring[mac_for_pdev];
6922 		if (mon_buf_ring->hal_srng) {
6923 			num_entries = mon_buf_ring->num_entries;
6924 			hal_set_low_threshold(mon_buf_ring->hal_srng,
6925 					      num_entries >> 3);
6926 			htt_srng_setup(pdev->soc->htt_handle,
6927 				       pdev->pdev_id,
6928 				       mon_buf_ring->hal_srng,
6929 				       RXDMA_MONITOR_BUF);
6930 		}
6931 	}
6932 
6933 	dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_ENABLE);
6934 
6935 	dp_mon_filter_setup_mon_mode(pdev);
6936 	status = dp_mon_filter_update(pdev);
6937 	if (status != QDF_STATUS_SUCCESS) {
6938 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6939 			  FL("Failed to reset monitor filters"));
6940 		dp_mon_filter_reset_mon_mode(pdev);
6941 		pdev->monitor_configured = false;
6942 		pdev->monitor_vdev = NULL;
6943 	}
6944 
6945 fail:
6946 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6947 	return status;
6948 }
6949 
6950 /**
6951  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
6952  * @soc: soc handle
6953  * @pdev_id: id of Datapath PDEV handle
6954  * @filter_val: Flag to select Filter for monitor mode
6955  * Return: 0 on success, not 0 on failure
6956  */
6957 static QDF_STATUS
6958 dp_pdev_set_advance_monitor_filter(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
6959 				   struct cdp_monitor_filter *filter_val)
6960 {
6961 	/* Many monitor VAPs can exists in a system but only one can be up at
6962 	 * anytime
6963 	 */
6964 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6965 	struct dp_vdev *vdev;
6966 	struct dp_pdev *pdev =
6967 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
6968 						   pdev_id);
6969 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6970 
6971 	if (!pdev)
6972 		return QDF_STATUS_E_FAILURE;
6973 
6974 	vdev = pdev->monitor_vdev;
6975 
6976 	if (!vdev)
6977 		return QDF_STATUS_E_FAILURE;
6978 
6979 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
6980 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
6981 		pdev, pdev_id, soc, vdev);
6982 
6983 	/*Check if current pdev's monitor_vdev exists */
6984 	if (!pdev->monitor_vdev) {
6985 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6986 			"vdev=%pK", vdev);
6987 		qdf_assert(vdev);
6988 	}
6989 
6990 	/* update filter mode, type in pdev structure */
6991 	pdev->mon_filter_mode = filter_val->mode;
6992 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
6993 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
6994 	pdev->fp_data_filter = filter_val->fp_data;
6995 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
6996 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
6997 	pdev->mo_data_filter = filter_val->mo_data;
6998 
6999 	dp_mon_filter_setup_mon_mode(pdev);
7000 	status = dp_mon_filter_update(pdev);
7001 	if (status != QDF_STATUS_SUCCESS) {
7002 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7003 			  FL("Failed to set filter for advance mon mode"));
7004 		dp_mon_filter_reset_mon_mode(pdev);
7005 	}
7006 
7007 	return status;
7008 }
7009 
7010 /**
7011  * dp_deliver_tx_mgmt() - Deliver mgmt frame for tx capture
7012  * @cdp_soc : data path soc handle
7013  * @pdev_id : pdev_id
7014  * @nbuf: Management frame buffer
7015  */
7016 static QDF_STATUS
7017 dp_deliver_tx_mgmt(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf)
7018 {
7019 	struct dp_pdev *pdev =
7020 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
7021 						   pdev_id);
7022 
7023 	if (!pdev)
7024 		return QDF_STATUS_E_FAILURE;
7025 
7026 	dp_deliver_mgmt_frm(pdev, nbuf);
7027 
7028 	return QDF_STATUS_SUCCESS;
7029 }
7030 
7031 /**
7032  * dp_set_bsscolor() - sets bsscolor for tx capture
7033  * @pdev: Datapath PDEV handle
7034  * @bsscolor: new bsscolor
7035  */
7036 static void
7037 dp_mon_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor)
7038 {
7039 	pdev->rx_mon_recv_status.bsscolor = bsscolor;
7040 }
7041 
7042 /**
7043  * dp_pdev_get_filter_ucast_data() - get DP PDEV monitor ucast filter
7044  * @soc : data path soc handle
7045  * @pdev_id : pdev_id
7046  * Return: true on ucast filter flag set
7047  */
7048 static bool dp_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle)
7049 {
7050 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7051 
7052 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
7053 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
7054 		return true;
7055 
7056 	return false;
7057 }
7058 
7059 /**
7060  * dp_pdev_get_filter_mcast_data() - get DP PDEV monitor mcast filter
7061  * @pdev_handle: Datapath PDEV handle
7062  * Return: true on mcast filter flag set
7063  */
7064 static bool dp_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle)
7065 {
7066 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7067 
7068 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
7069 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
7070 		return true;
7071 
7072 	return false;
7073 }
7074 
7075 /**
7076  * dp_pdev_get_filter_non_data() - get DP PDEV monitor non_data filter
7077  * @pdev_handle: Datapath PDEV handle
7078  * Return: true on non data filter flag set
7079  */
7080 static bool dp_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle)
7081 {
7082 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7083 
7084 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
7085 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
7086 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
7087 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
7088 			return true;
7089 		}
7090 	}
7091 
7092 	return false;
7093 }
7094 
7095 #ifdef MESH_MODE_SUPPORT
7096 static
7097 void dp_vdev_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
7098 {
7099 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
7100 
7101 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7102 		FL("val %d"), val);
7103 	vdev->mesh_vdev = val;
7104 }
7105 
7106 /*
7107  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
7108  * @vdev_hdl: virtual device object
7109  * @val: value to be set
7110  *
7111  * Return: void
7112  */
7113 static
7114 void dp_vdev_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
7115 {
7116 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
7117 
7118 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7119 		FL("val %d"), val);
7120 	vdev->mesh_rx_filter = val;
7121 }
7122 #endif
7123 
7124 #ifdef VDEV_PEER_PROTOCOL_COUNT
7125 static void dp_enable_vdev_peer_protocol_count(struct cdp_soc_t *soc_hdl,
7126 					       int8_t vdev_id,
7127 					       bool enable)
7128 {
7129 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7130 	struct dp_vdev *vdev;
7131 
7132 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
7133 	if (!vdev)
7134 		return;
7135 
7136 	dp_info("enable %d vdev_id %d", enable, vdev_id);
7137 	vdev->peer_protocol_count_track = enable;
7138 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7139 }
7140 
7141 static void dp_enable_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
7142 						   int8_t vdev_id,
7143 						   int drop_mask)
7144 {
7145 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7146 	struct dp_vdev *vdev;
7147 
7148 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
7149 	if (!vdev)
7150 		return;
7151 
7152 	dp_info("drop_mask %d vdev_id %d", drop_mask, vdev_id);
7153 	vdev->peer_protocol_count_dropmask = drop_mask;
7154 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7155 }
7156 
7157 static int dp_is_vdev_peer_protocol_count_enabled(struct cdp_soc_t *soc_hdl,
7158 						  int8_t vdev_id)
7159 {
7160 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7161 	struct dp_vdev *vdev;
7162 	int peer_protocol_count_track;
7163 
7164 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
7165 	if (!vdev)
7166 		return 0;
7167 
7168 	dp_info("enable %d vdev_id %d", vdev->peer_protocol_count_track,
7169 		vdev_id);
7170 	peer_protocol_count_track =
7171 		vdev->peer_protocol_count_track;
7172 
7173 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7174 	return peer_protocol_count_track;
7175 }
7176 
7177 static int dp_get_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
7178 					       int8_t vdev_id)
7179 {
7180 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7181 	struct dp_vdev *vdev;
7182 	int peer_protocol_count_dropmask;
7183 
7184 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
7185 	if (!vdev)
7186 		return 0;
7187 
7188 	dp_info("drop_mask %d vdev_id %d", vdev->peer_protocol_count_dropmask,
7189 		vdev_id);
7190 	peer_protocol_count_dropmask =
7191 		vdev->peer_protocol_count_dropmask;
7192 
7193 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7194 	return peer_protocol_count_dropmask;
7195 }
7196 
7197 #endif
7198 
7199 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data)
7200 {
7201 	uint8_t pdev_count;
7202 
7203 	for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) {
7204 		if (soc->pdev_list[pdev_count] &&
7205 		    soc->pdev_list[pdev_count] == data)
7206 			return true;
7207 	}
7208 	return false;
7209 }
7210 
7211 /**
7212  * dp_rx_bar_stats_cb(): BAR received stats callback
7213  * @soc: SOC handle
7214  * @cb_ctxt: Call back context
7215  * @reo_status: Reo status
7216  *
7217  * return: void
7218  */
7219 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
7220 	union hal_reo_status *reo_status)
7221 {
7222 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
7223 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
7224 
7225 	if (!dp_check_pdev_exists(soc, pdev)) {
7226 		dp_err_rl("pdev doesn't exist");
7227 		return;
7228 	}
7229 
7230 	if (!qdf_atomic_read(&soc->cmn_init_done))
7231 		return;
7232 
7233 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
7234 		DP_PRINT_STATS("REO stats failure %d",
7235 			       queue_status->header.status);
7236 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
7237 		return;
7238 	}
7239 
7240 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
7241 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
7242 
7243 }
7244 
7245 /**
7246  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
7247  * @vdev: DP VDEV handle
7248  *
7249  * return: void
7250  */
7251 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
7252 			     struct cdp_vdev_stats *vdev_stats)
7253 {
7254 	struct dp_soc *soc = NULL;
7255 
7256 	if (!vdev || !vdev->pdev)
7257 		return;
7258 
7259 	soc = vdev->pdev->soc;
7260 
7261 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
7262 
7263 	dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats,
7264 			     DP_MOD_ID_GENERIC_STATS);
7265 
7266 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7267 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
7268 			     vdev_stats, vdev->vdev_id,
7269 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
7270 #endif
7271 }
7272 
7273 void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
7274 {
7275 	struct dp_vdev *vdev = NULL;
7276 	struct dp_soc *soc;
7277 	struct cdp_vdev_stats *vdev_stats =
7278 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
7279 
7280 	if (!vdev_stats) {
7281 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7282 			  "DP alloc failure - unable to get alloc vdev stats");
7283 		return;
7284 	}
7285 
7286 	qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
7287 	qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
7288 	qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
7289 
7290 	if (pdev->mcopy_mode)
7291 		DP_UPDATE_STATS(pdev, pdev->invalid_peer);
7292 
7293 	soc = pdev->soc;
7294 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
7295 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
7296 
7297 		dp_aggregate_vdev_stats(vdev, vdev_stats);
7298 		dp_update_pdev_stats(pdev, vdev_stats);
7299 		dp_update_pdev_ingress_stats(pdev, vdev);
7300 	}
7301 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
7302 	qdf_mem_free(vdev_stats);
7303 
7304 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7305 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
7306 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
7307 #endif
7308 }
7309 
7310 /**
7311  * dp_vdev_getstats() - get vdev packet level stats
7312  * @vdev_handle: Datapath VDEV handle
7313  * @stats: cdp network device stats structure
7314  *
7315  * Return: QDF_STATUS
7316  */
7317 static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle,
7318 				   struct cdp_dev_stats *stats)
7319 {
7320 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7321 	struct dp_pdev *pdev;
7322 	struct dp_soc *soc;
7323 	struct cdp_vdev_stats *vdev_stats;
7324 
7325 	if (!vdev)
7326 		return QDF_STATUS_E_FAILURE;
7327 
7328 	pdev = vdev->pdev;
7329 	if (!pdev)
7330 		return QDF_STATUS_E_FAILURE;
7331 
7332 	soc = pdev->soc;
7333 
7334 	vdev_stats = qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
7335 
7336 	if (!vdev_stats) {
7337 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7338 			  "DP alloc failure - unable to get alloc vdev stats");
7339 		return QDF_STATUS_E_FAILURE;
7340 	}
7341 
7342 	dp_aggregate_vdev_stats(vdev, vdev_stats);
7343 
7344 	stats->tx_packets = vdev_stats->tx_i.rcvd.num;
7345 	stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
7346 
7347 	stats->tx_errors = vdev_stats->tx.tx_failed +
7348 		vdev_stats->tx_i.dropped.dropped_pkt.num;
7349 	stats->tx_dropped = stats->tx_errors;
7350 
7351 	stats->rx_packets = vdev_stats->rx.unicast.num +
7352 		vdev_stats->rx.multicast.num +
7353 		vdev_stats->rx.bcast.num;
7354 	stats->rx_bytes = vdev_stats->rx.unicast.bytes +
7355 		vdev_stats->rx.multicast.bytes +
7356 		vdev_stats->rx.bcast.bytes;
7357 
7358 	qdf_mem_free(vdev_stats);
7359 
7360 	return QDF_STATUS_SUCCESS;
7361 }
7362 
7363 /**
7364  * dp_pdev_getstats() - get pdev packet level stats
7365  * @pdev_handle: Datapath PDEV handle
7366  * @stats: cdp network device stats structure
7367  *
7368  * Return: QDF_STATUS
7369  */
7370 static void dp_pdev_getstats(struct cdp_pdev *pdev_handle,
7371 			     struct cdp_dev_stats *stats)
7372 {
7373 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7374 
7375 	dp_aggregate_pdev_stats(pdev);
7376 
7377 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
7378 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
7379 
7380 	stats->tx_errors = pdev->stats.tx.tx_failed +
7381 		pdev->stats.tx_i.dropped.dropped_pkt.num;
7382 	stats->tx_dropped = stats->tx_errors;
7383 
7384 	stats->rx_packets = pdev->stats.rx.unicast.num +
7385 		pdev->stats.rx.multicast.num +
7386 		pdev->stats.rx.bcast.num;
7387 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
7388 		pdev->stats.rx.multicast.bytes +
7389 		pdev->stats.rx.bcast.bytes;
7390 	stats->rx_errors = pdev->stats.err.desc_alloc_fail +
7391 		pdev->stats.err.ip_csum_err +
7392 		pdev->stats.err.tcp_udp_csum_err +
7393 		pdev->stats.rx.err.mic_err +
7394 		pdev->stats.rx.err.decrypt_err +
7395 		pdev->stats.err.rxdma_error +
7396 		pdev->stats.err.reo_error;
7397 	stats->rx_dropped = pdev->stats.dropped.msdu_not_done +
7398 		pdev->stats.dropped.mec +
7399 		pdev->stats.dropped.mesh_filter +
7400 		pdev->stats.dropped.wifi_parse +
7401 		pdev->stats.dropped.mon_rx_drop +
7402 		pdev->stats.dropped.mon_radiotap_update_err;
7403 }
7404 
7405 /**
7406  * dp_get_device_stats() - get interface level packet stats
7407  * @soc: soc handle
7408  * @id : vdev_id or pdev_id based on type
7409  * @stats: cdp network device stats structure
7410  * @type: device type pdev/vdev
7411  *
7412  * Return: QDF_STATUS
7413  */
7414 static QDF_STATUS dp_get_device_stats(struct cdp_soc_t *soc_hdl, uint8_t id,
7415 				      struct cdp_dev_stats *stats,
7416 				      uint8_t type)
7417 {
7418 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7419 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
7420 	struct dp_vdev *vdev;
7421 
7422 	switch (type) {
7423 	case UPDATE_VDEV_STATS:
7424 		vdev = dp_vdev_get_ref_by_id(soc, id, DP_MOD_ID_CDP);
7425 
7426 		if (vdev) {
7427 			status = dp_vdev_getstats((struct cdp_vdev *)vdev,
7428 						  stats);
7429 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7430 		}
7431 		return status;
7432 	case UPDATE_PDEV_STATS:
7433 		{
7434 			struct dp_pdev *pdev =
7435 				dp_get_pdev_from_soc_pdev_id_wifi3(
7436 						(struct dp_soc *)soc,
7437 						 id);
7438 			if (pdev) {
7439 				dp_pdev_getstats((struct cdp_pdev *)pdev,
7440 						 stats);
7441 				return QDF_STATUS_SUCCESS;
7442 			}
7443 		}
7444 		break;
7445 	default:
7446 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7447 			"apstats cannot be updated for this input "
7448 			"type %d", type);
7449 		break;
7450 	}
7451 
7452 	return QDF_STATUS_E_FAILURE;
7453 }
7454 
7455 const
7456 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
7457 {
7458 	switch (ring_type) {
7459 	case REO_DST:
7460 		return "Reo_dst";
7461 	case REO_EXCEPTION:
7462 		return "Reo_exception";
7463 	case REO_CMD:
7464 		return "Reo_cmd";
7465 	case REO_REINJECT:
7466 		return "Reo_reinject";
7467 	case REO_STATUS:
7468 		return "Reo_status";
7469 	case WBM2SW_RELEASE:
7470 		return "wbm2sw_release";
7471 	case TCL_DATA:
7472 		return "tcl_data";
7473 	case TCL_CMD_CREDIT:
7474 		return "tcl_cmd_credit";
7475 	case TCL_STATUS:
7476 		return "tcl_status";
7477 	case SW2WBM_RELEASE:
7478 		return "sw2wbm_release";
7479 	case RXDMA_BUF:
7480 		return "Rxdma_buf";
7481 	case RXDMA_DST:
7482 		return "Rxdma_dst";
7483 	case RXDMA_MONITOR_BUF:
7484 		return "Rxdma_monitor_buf";
7485 	case RXDMA_MONITOR_DESC:
7486 		return "Rxdma_monitor_desc";
7487 	case RXDMA_MONITOR_STATUS:
7488 		return "Rxdma_monitor_status";
7489 	default:
7490 		dp_err("Invalid ring type");
7491 		break;
7492 	}
7493 	return "Invalid";
7494 }
7495 
7496 /*
7497  * dp_print_napi_stats(): NAPI stats
7498  * @soc - soc handle
7499  */
7500 void dp_print_napi_stats(struct dp_soc *soc)
7501 {
7502 	hif_print_napi_stats(soc->hif_handle);
7503 }
7504 
7505 /**
7506  * dp_txrx_host_peer_stats_clr): Reinitialize the txrx peer stats
7507  * @soc: Datapath soc
7508  * @peer: Datatpath peer
7509  * @arg: argument to iter function
7510  *
7511  * Return: QDF_STATUS
7512  */
7513 static inline void
7514 dp_txrx_host_peer_stats_clr(struct dp_soc *soc,
7515 			    struct dp_peer *peer,
7516 			    void *arg)
7517 {
7518 	struct dp_rx_tid *rx_tid;
7519 	uint8_t tid;
7520 
7521 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
7522 		rx_tid = &peer->rx_tid[tid];
7523 		DP_STATS_CLR(rx_tid);
7524 	}
7525 	DP_STATS_CLR(peer);
7526 
7527 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7528 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, peer->vdev->pdev->soc,
7529 			     &peer->stats,  peer->peer_id,
7530 			     UPDATE_PEER_STATS, peer->vdev->pdev->pdev_id);
7531 #endif
7532 }
7533 
7534 /**
7535  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
7536  * @vdev: DP_VDEV handle
7537  * @dp_soc: DP_SOC handle
7538  *
7539  * Return: QDF_STATUS
7540  */
7541 static inline QDF_STATUS
7542 dp_txrx_host_stats_clr(struct dp_vdev *vdev, struct dp_soc *soc)
7543 {
7544 	if (!vdev || !vdev->pdev)
7545 		return QDF_STATUS_E_FAILURE;
7546 
7547 	/*
7548 	 * if NSS offload is enabled, then send message
7549 	 * to NSS FW to clear the stats. Once NSS FW clears the statistics
7550 	 * then clear host statistics.
7551 	 */
7552 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
7553 		if (soc->cdp_soc.ol_ops->nss_stats_clr)
7554 			soc->cdp_soc.ol_ops->nss_stats_clr(soc->ctrl_psoc,
7555 							   vdev->vdev_id);
7556 	}
7557 
7558 	DP_STATS_CLR(vdev->pdev);
7559 	DP_STATS_CLR(vdev->pdev->soc);
7560 	DP_STATS_CLR(vdev);
7561 
7562 	hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
7563 
7564 	dp_vdev_iterate_peer(vdev, dp_txrx_host_peer_stats_clr, NULL,
7565 			     DP_MOD_ID_GENERIC_STATS);
7566 
7567 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7568 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
7569 			     &vdev->stats,  vdev->vdev_id,
7570 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
7571 #endif
7572 	return QDF_STATUS_SUCCESS;
7573 }
7574 
7575 /*
7576  * dp_get_host_peer_stats()- function to print peer stats
7577  * @soc: dp_soc handle
7578  * @mac_addr: mac address of the peer
7579  *
7580  * Return: QDF_STATUS
7581  */
7582 static QDF_STATUS
7583 dp_get_host_peer_stats(struct cdp_soc_t *soc, uint8_t *mac_addr)
7584 {
7585 	struct dp_peer *peer = NULL;
7586 
7587 	if (!mac_addr) {
7588 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7589 			  "%s: NULL peer mac addr\n", __func__);
7590 		return QDF_STATUS_E_FAILURE;
7591 	}
7592 
7593 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
7594 				      mac_addr, 0,
7595 				      DP_VDEV_ALL,
7596 				      DP_MOD_ID_CDP);
7597 	if (!peer) {
7598 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7599 			  "%s: Invalid peer\n", __func__);
7600 		return QDF_STATUS_E_FAILURE;
7601 	}
7602 
7603 	dp_print_peer_stats(peer);
7604 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
7605 
7606 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7607 
7608 	return QDF_STATUS_SUCCESS;
7609 }
7610 
7611 /**
7612  * dp_txrx_stats_help() - Helper function for Txrx_Stats
7613  *
7614  * Return: None
7615  */
7616 static void dp_txrx_stats_help(void)
7617 {
7618 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
7619 	dp_info("stats_option:");
7620 	dp_info("  1 -- HTT Tx Statistics");
7621 	dp_info("  2 -- HTT Rx Statistics");
7622 	dp_info("  3 -- HTT Tx HW Queue Statistics");
7623 	dp_info("  4 -- HTT Tx HW Sched Statistics");
7624 	dp_info("  5 -- HTT Error Statistics");
7625 	dp_info("  6 -- HTT TQM Statistics");
7626 	dp_info("  7 -- HTT TQM CMDQ Statistics");
7627 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
7628 	dp_info("  9 -- HTT Tx Rate Statistics");
7629 	dp_info(" 10 -- HTT Rx Rate Statistics");
7630 	dp_info(" 11 -- HTT Peer Statistics");
7631 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
7632 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
7633 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
7634 	dp_info(" 15 -- HTT SRNG Statistics");
7635 	dp_info(" 16 -- HTT SFM Info Statistics");
7636 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
7637 	dp_info(" 18 -- HTT Peer List Details");
7638 	dp_info(" 20 -- Clear Host Statistics");
7639 	dp_info(" 21 -- Host Rx Rate Statistics");
7640 	dp_info(" 22 -- Host Tx Rate Statistics");
7641 	dp_info(" 23 -- Host Tx Statistics");
7642 	dp_info(" 24 -- Host Rx Statistics");
7643 	dp_info(" 25 -- Host AST Statistics");
7644 	dp_info(" 26 -- Host SRNG PTR Statistics");
7645 	dp_info(" 27 -- Host Mon Statistics");
7646 	dp_info(" 28 -- Host REO Queue Statistics");
7647 	dp_info(" 29 -- Host Soc cfg param Statistics");
7648 	dp_info(" 30 -- Host pdev cfg param Statistics");
7649 	dp_info(" 31 -- Host FISA stats");
7650 	dp_info(" 32 -- Host Register Work stats");
7651 }
7652 
7653 /**
7654  * dp_print_host_stats()- Function to print the stats aggregated at host
7655  * @vdev_handle: DP_VDEV handle
7656  * @req: host stats type
7657  * @soc: dp soc handler
7658  *
7659  * Return: 0 on success, print error message in case of failure
7660  */
7661 static int
7662 dp_print_host_stats(struct dp_vdev *vdev,
7663 		    struct cdp_txrx_stats_req *req,
7664 		    struct dp_soc *soc)
7665 {
7666 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
7667 	enum cdp_host_txrx_stats type =
7668 			dp_stats_mapping_table[req->stats][STATS_HOST];
7669 
7670 	dp_aggregate_pdev_stats(pdev);
7671 
7672 	switch (type) {
7673 	case TXRX_CLEAR_STATS:
7674 		dp_txrx_host_stats_clr(vdev, soc);
7675 		break;
7676 	case TXRX_RX_RATE_STATS:
7677 		dp_print_rx_rates(vdev);
7678 		break;
7679 	case TXRX_TX_RATE_STATS:
7680 		dp_print_tx_rates(vdev);
7681 		break;
7682 	case TXRX_TX_HOST_STATS:
7683 		dp_print_pdev_tx_stats(pdev);
7684 		dp_print_soc_tx_stats(pdev->soc);
7685 		break;
7686 	case TXRX_RX_HOST_STATS:
7687 		dp_print_pdev_rx_stats(pdev);
7688 		dp_print_soc_rx_stats(pdev->soc);
7689 		break;
7690 	case TXRX_AST_STATS:
7691 		dp_print_ast_stats(pdev->soc);
7692 		dp_print_peer_table(vdev);
7693 		break;
7694 	case TXRX_SRNG_PTR_STATS:
7695 		dp_print_ring_stats(pdev);
7696 		break;
7697 	case TXRX_RX_MON_STATS:
7698 		dp_print_pdev_rx_mon_stats(pdev);
7699 		break;
7700 	case TXRX_REO_QUEUE_STATS:
7701 		dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc,
7702 				       req->peer_addr);
7703 		break;
7704 	case TXRX_SOC_CFG_PARAMS:
7705 		dp_print_soc_cfg_params(pdev->soc);
7706 		break;
7707 	case TXRX_PDEV_CFG_PARAMS:
7708 		dp_print_pdev_cfg_params(pdev);
7709 		break;
7710 	case TXRX_NAPI_STATS:
7711 		dp_print_napi_stats(pdev->soc);
7712 		break;
7713 	case TXRX_SOC_INTERRUPT_STATS:
7714 		dp_print_soc_interrupt_stats(pdev->soc);
7715 		break;
7716 	case TXRX_SOC_FSE_STATS:
7717 		dp_rx_dump_fisa_table(pdev->soc);
7718 		break;
7719 	case TXRX_HAL_REG_WRITE_STATS:
7720 		hal_dump_reg_write_stats(pdev->soc->hal_soc);
7721 		hal_dump_reg_write_srng_stats(pdev->soc->hal_soc);
7722 		break;
7723 	default:
7724 		dp_info("Wrong Input For TxRx Host Stats");
7725 		dp_txrx_stats_help();
7726 		break;
7727 	}
7728 	return 0;
7729 }
7730 
7731 /*
7732  * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
7733  *                              modes are enabled or not.
7734  * @dp_pdev: dp pdev handle.
7735  *
7736  * Return: bool
7737  */
7738 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
7739 {
7740 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
7741 	    !pdev->mcopy_mode)
7742 		return true;
7743 	else
7744 		return false;
7745 }
7746 
7747 /*
7748  *dp_set_bpr_enable() - API to enable/disable bpr feature
7749  *@pdev_handle: DP_PDEV handle.
7750  *@val: Provided value.
7751  *
7752  *Return: 0 for success. nonzero for failure.
7753  */
7754 static QDF_STATUS
7755 dp_set_bpr_enable(struct dp_pdev *pdev, int val)
7756 {
7757 	switch (val) {
7758 	case CDP_BPR_DISABLE:
7759 		pdev->bpr_enable = CDP_BPR_DISABLE;
7760 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7761 		    !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
7762 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7763 		} else if (pdev->enhanced_stats_en &&
7764 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
7765 			   !pdev->pktlog_ppdu_stats) {
7766 			dp_h2t_cfg_stats_msg_send(pdev,
7767 						  DP_PPDU_STATS_CFG_ENH_STATS,
7768 						  pdev->pdev_id);
7769 		}
7770 		break;
7771 	case CDP_BPR_ENABLE:
7772 		pdev->bpr_enable = CDP_BPR_ENABLE;
7773 		if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
7774 		    !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
7775 			dp_h2t_cfg_stats_msg_send(pdev,
7776 						  DP_PPDU_STATS_CFG_BPR,
7777 						  pdev->pdev_id);
7778 		} else if (pdev->enhanced_stats_en &&
7779 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
7780 			   !pdev->pktlog_ppdu_stats) {
7781 			dp_h2t_cfg_stats_msg_send(pdev,
7782 						  DP_PPDU_STATS_CFG_BPR_ENH,
7783 						  pdev->pdev_id);
7784 		} else if (pdev->pktlog_ppdu_stats) {
7785 			dp_h2t_cfg_stats_msg_send(pdev,
7786 						  DP_PPDU_STATS_CFG_BPR_PKTLOG,
7787 						  pdev->pdev_id);
7788 		}
7789 		break;
7790 	default:
7791 		break;
7792 	}
7793 
7794 	return QDF_STATUS_SUCCESS;
7795 }
7796 
7797 /*
7798  * dp_pdev_tid_stats_ingress_inc
7799  * @pdev: pdev handle
7800  * @val: increase in value
7801  *
7802  * Return: void
7803  */
7804 static void
7805 dp_pdev_tid_stats_ingress_inc(struct dp_pdev *pdev, uint32_t val)
7806 {
7807 	pdev->stats.tid_stats.ingress_stack += val;
7808 }
7809 
7810 /*
7811  * dp_pdev_tid_stats_osif_drop
7812  * @pdev: pdev handle
7813  * @val: increase in value
7814  *
7815  * Return: void
7816  */
7817 static void
7818 dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val)
7819 {
7820 	pdev->stats.tid_stats.osif_drop += val;
7821 }
7822 
7823 /*
7824  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
7825  * @pdev: DP_PDEV handle
7826  * @val: user provided value
7827  *
7828  * Return: 0 for success. nonzero for failure.
7829  */
7830 static QDF_STATUS
7831 dp_config_debug_sniffer(struct dp_pdev *pdev, int val)
7832 {
7833 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7834 
7835 	/*
7836 	 * Note: The mirror copy mode cannot co-exist with any other
7837 	 * monitor modes. Hence disabling the filter for this mode will
7838 	 * reset the monitor destination ring filters.
7839 	 */
7840 	if (pdev->mcopy_mode) {
7841 #ifdef FEATURE_PERPKT_INFO
7842 		dp_pdev_disable_mcopy_code(pdev);
7843 		dp_mon_filter_reset_mcopy_mode(pdev);
7844 		status = dp_mon_filter_update(pdev);
7845 		if (status != QDF_STATUS_SUCCESS) {
7846 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7847 				  FL("Failed to reset AM copy mode filters"));
7848 		}
7849 #endif /* FEATURE_PERPKT_INFO */
7850 	}
7851 	switch (val) {
7852 	case 0:
7853 		pdev->tx_sniffer_enable = 0;
7854 		pdev->monitor_configured = false;
7855 
7856 		/*
7857 		 * We don't need to reset the Rx monitor status ring  or call
7858 		 * the API dp_ppdu_ring_reset() if all debug sniffer mode is
7859 		 * disabled. The Rx monitor status ring will be disabled when
7860 		 * the last mode using the monitor status ring get disabled.
7861 		 */
7862 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7863 		    !pdev->bpr_enable) {
7864 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7865 		} else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
7866 			dp_h2t_cfg_stats_msg_send(pdev,
7867 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
7868 		} else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
7869 			dp_h2t_cfg_stats_msg_send(pdev,
7870 						  DP_PPDU_STATS_CFG_BPR_ENH,
7871 						  pdev->pdev_id);
7872 		} else {
7873 			dp_h2t_cfg_stats_msg_send(pdev,
7874 						  DP_PPDU_STATS_CFG_BPR,
7875 						  pdev->pdev_id);
7876 		}
7877 		break;
7878 
7879 	case 1:
7880 		pdev->tx_sniffer_enable = 1;
7881 		pdev->monitor_configured = false;
7882 
7883 		if (!pdev->pktlog_ppdu_stats)
7884 			dp_h2t_cfg_stats_msg_send(pdev,
7885 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
7886 		break;
7887 	case 2:
7888 	case 4:
7889 		if (pdev->monitor_vdev) {
7890 			status = QDF_STATUS_E_RESOURCES;
7891 			break;
7892 		}
7893 
7894 #ifdef FEATURE_PERPKT_INFO
7895 		pdev->mcopy_mode = val;
7896 		pdev->tx_sniffer_enable = 0;
7897 		pdev->monitor_configured = true;
7898 
7899 		/*
7900 		 * Setup the M copy mode filter.
7901 		 */
7902 		dp_mon_filter_setup_mcopy_mode(pdev);
7903 		status = dp_mon_filter_update(pdev);
7904 		if (status != QDF_STATUS_SUCCESS) {
7905 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7906 				  FL("Failed to set M_copy mode filters"));
7907 			dp_mon_filter_reset_mcopy_mode(pdev);
7908 			dp_pdev_disable_mcopy_code(pdev);
7909 			return status;
7910 		}
7911 
7912 		if (!pdev->pktlog_ppdu_stats)
7913 			dp_h2t_cfg_stats_msg_send(pdev,
7914 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
7915 #endif /* FEATURE_PERPKT_INFO */
7916 		break;
7917 
7918 	default:
7919 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7920 			"Invalid value");
7921 		break;
7922 	}
7923 	return status;
7924 }
7925 
7926 #ifdef FEATURE_PERPKT_INFO
7927 /*
7928  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
7929  * @soc_handle: DP_SOC handle
7930  * @pdev_id: id of DP_PDEV handle
7931  *
7932  * Return: QDF_STATUS
7933  */
7934 static QDF_STATUS
7935 dp_enable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
7936 {
7937 	struct dp_pdev *pdev = NULL;
7938 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7939 
7940 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7941 						  pdev_id);
7942 
7943 	if (!pdev)
7944 		return QDF_STATUS_E_FAILURE;
7945 
7946 	if (pdev->enhanced_stats_en == 0)
7947 		dp_cal_client_timer_start(pdev->cal_client_ctx);
7948 
7949 	pdev->enhanced_stats_en = 1;
7950 
7951 	dp_mon_filter_setup_enhanced_stats(pdev);
7952 	status = dp_mon_filter_update(pdev);
7953 	if (status != QDF_STATUS_SUCCESS) {
7954 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7955 			  FL("Failed to set enhanced mode filters"));
7956 		dp_mon_filter_reset_enhanced_stats(pdev);
7957 		dp_cal_client_timer_stop(pdev->cal_client_ctx);
7958 		pdev->enhanced_stats_en = 0;
7959 		return QDF_STATUS_E_FAILURE;
7960 	}
7961 
7962 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
7963 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
7964 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7965 		dp_h2t_cfg_stats_msg_send(pdev,
7966 					  DP_PPDU_STATS_CFG_BPR_ENH,
7967 					  pdev->pdev_id);
7968 	}
7969 
7970 	return QDF_STATUS_SUCCESS;
7971 }
7972 
7973 /*
7974  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
7975  *
7976  * @param soc - the soc handle
7977  * @param pdev_id - pdev_id of pdev
7978  * @return - QDF_STATUS
7979  */
7980 static QDF_STATUS
7981 dp_disable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
7982 {
7983 	struct dp_pdev *pdev =
7984 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7985 						   pdev_id);
7986 
7987 	if (!pdev)
7988 		return QDF_STATUS_E_FAILURE;
7989 
7990 	if (pdev->enhanced_stats_en == 1)
7991 		dp_cal_client_timer_stop(pdev->cal_client_ctx);
7992 
7993 	pdev->enhanced_stats_en = 0;
7994 
7995 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
7996 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7997 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7998 		dp_h2t_cfg_stats_msg_send(pdev,
7999 					  DP_PPDU_STATS_CFG_BPR,
8000 					  pdev->pdev_id);
8001 	}
8002 
8003 	dp_mon_filter_reset_enhanced_stats(pdev);
8004 	if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
8005 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8006 			  FL("Failed to reset enhanced mode filters"));
8007 	}
8008 
8009 	return QDF_STATUS_SUCCESS;
8010 }
8011 #endif /* FEATURE_PERPKT_INFO */
8012 
8013 /*
8014  * dp_get_fw_peer_stats()- function to print peer stats
8015  * @soc: soc handle
8016  * @pdev_id : id of the pdev handle
8017  * @mac_addr: mac address of the peer
8018  * @cap: Type of htt stats requested
8019  * @is_wait: if set, wait on completion from firmware response
8020  *
8021  * Currently Supporting only MAC ID based requests Only
8022  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
8023  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
8024  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
8025  *
8026  * Return: QDF_STATUS
8027  */
8028 static QDF_STATUS
8029 dp_get_fw_peer_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
8030 		     uint8_t *mac_addr,
8031 		     uint32_t cap, uint32_t is_wait)
8032 {
8033 	int i;
8034 	uint32_t config_param0 = 0;
8035 	uint32_t config_param1 = 0;
8036 	uint32_t config_param2 = 0;
8037 	uint32_t config_param3 = 0;
8038 	struct dp_pdev *pdev =
8039 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8040 						   pdev_id);
8041 
8042 	if (!pdev)
8043 		return QDF_STATUS_E_FAILURE;
8044 
8045 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
8046 	config_param0 |= (1 << (cap + 1));
8047 
8048 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
8049 		config_param1 |= (1 << i);
8050 	}
8051 
8052 	config_param2 |= (mac_addr[0] & 0x000000ff);
8053 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
8054 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
8055 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
8056 
8057 	config_param3 |= (mac_addr[4] & 0x000000ff);
8058 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
8059 
8060 	if (is_wait) {
8061 		qdf_event_reset(&pdev->fw_peer_stats_event);
8062 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
8063 					  config_param0, config_param1,
8064 					  config_param2, config_param3,
8065 					  0, 1, 0);
8066 		qdf_wait_single_event(&pdev->fw_peer_stats_event,
8067 				      DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
8068 	} else {
8069 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
8070 					  config_param0, config_param1,
8071 					  config_param2, config_param3,
8072 					  0, 0, 0);
8073 	}
8074 
8075 	return QDF_STATUS_SUCCESS;
8076 
8077 }
8078 
8079 /* This struct definition will be removed from here
8080  * once it get added in FW headers*/
8081 struct httstats_cmd_req {
8082     uint32_t    config_param0;
8083     uint32_t    config_param1;
8084     uint32_t    config_param2;
8085     uint32_t    config_param3;
8086     int cookie;
8087     u_int8_t    stats_id;
8088 };
8089 
8090 /*
8091  * dp_get_htt_stats: function to process the httstas request
8092  * @soc: DP soc handle
8093  * @pdev_id: id of pdev handle
8094  * @data: pointer to request data
8095  * @data_len: length for request data
8096  *
8097  * return: QDF_STATUS
8098  */
8099 static QDF_STATUS
8100 dp_get_htt_stats(struct cdp_soc_t *soc, uint8_t pdev_id, void *data,
8101 		 uint32_t data_len)
8102 {
8103 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
8104 	struct dp_pdev *pdev =
8105 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8106 						   pdev_id);
8107 
8108 	if (!pdev)
8109 		return QDF_STATUS_E_FAILURE;
8110 
8111 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
8112 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
8113 				req->config_param0, req->config_param1,
8114 				req->config_param2, req->config_param3,
8115 				req->cookie, 0, 0);
8116 
8117 	return QDF_STATUS_SUCCESS;
8118 }
8119 
8120 /**
8121  * dp_set_pdev_tidmap_prty_wifi3(): update tidmap priority in pdev
8122  * @pdev: DP_PDEV handle
8123  * @prio: tidmap priority value passed by the user
8124  *
8125  * Return: QDF_STATUS_SUCCESS on success
8126  */
8127 static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct dp_pdev *pdev,
8128 						uint8_t prio)
8129 {
8130 	struct dp_soc *soc = pdev->soc;
8131 
8132 	soc->tidmap_prty = prio;
8133 
8134 	hal_tx_set_tidmap_prty(soc->hal_soc, prio);
8135 	return QDF_STATUS_SUCCESS;
8136 }
8137 
8138 /*
8139  * dp_get_peer_param: function to get parameters in peer
8140  * @cdp_soc: DP soc handle
8141  * @vdev_id: id of vdev handle
8142  * @peer_mac: peer mac address
8143  * @param: parameter type to be set
8144  * @val : address of buffer
8145  *
8146  * Return: val
8147  */
8148 static QDF_STATUS dp_get_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
8149 				    uint8_t *peer_mac,
8150 				    enum cdp_peer_param_type param,
8151 				    cdp_config_param_type *val)
8152 {
8153 	return QDF_STATUS_SUCCESS;
8154 }
8155 
8156 #ifdef WLAN_ATF_ENABLE
8157 static void dp_set_atf_stats_enable(struct dp_pdev *pdev, bool value)
8158 {
8159 	if (!pdev) {
8160 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8161 			  "Invalid pdev");
8162 		return;
8163 	}
8164 
8165 	pdev->dp_atf_stats_enable = value;
8166 }
8167 #else
8168 static void dp_set_atf_stats_enable(struct dp_pdev *pdev, bool value)
8169 {
8170 }
8171 #endif
8172 
8173 /*
8174  * dp_set_peer_param: function to set parameters in peer
8175  * @cdp_soc: DP soc handle
8176  * @vdev_id: id of vdev handle
8177  * @peer_mac: peer mac address
8178  * @param: parameter type to be set
8179  * @val: value of parameter to be set
8180  *
8181  * Return: 0 for success. nonzero for failure.
8182  */
8183 static QDF_STATUS dp_set_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
8184 				    uint8_t *peer_mac,
8185 				    enum cdp_peer_param_type param,
8186 				    cdp_config_param_type val)
8187 {
8188 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
8189 						      peer_mac, 0, vdev_id,
8190 						      DP_MOD_ID_CDP);
8191 
8192 	if (!peer)
8193 		return QDF_STATUS_E_FAILURE;
8194 
8195 	switch (param) {
8196 	case CDP_CONFIG_NAWDS:
8197 		peer->nawds_enabled = val.cdp_peer_param_nawds;
8198 		break;
8199 	case CDP_CONFIG_NAC:
8200 		peer->nac = !!(val.cdp_peer_param_nac);
8201 		break;
8202 	case CDP_CONFIG_ISOLATION:
8203 		dp_set_peer_isolation(peer, val.cdp_peer_param_isolation);
8204 		break;
8205 	case CDP_CONFIG_IN_TWT:
8206 		peer->in_twt = !!(val.cdp_peer_param_in_twt);
8207 		break;
8208 	default:
8209 		break;
8210 	}
8211 
8212 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8213 
8214 	return QDF_STATUS_SUCCESS;
8215 }
8216 
8217 /*
8218  * dp_get_pdev_param: function to get parameters from pdev
8219  * @cdp_soc: DP soc handle
8220  * @pdev_id: id of pdev handle
8221  * @param: parameter type to be get
8222  * @value : buffer for value
8223  *
8224  * Return: status
8225  */
8226 static QDF_STATUS dp_get_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
8227 				    enum cdp_pdev_param_type param,
8228 				    cdp_config_param_type *val)
8229 {
8230 	struct cdp_pdev *pdev = (struct cdp_pdev *)
8231 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
8232 						   pdev_id);
8233 	if (!pdev)
8234 		return QDF_STATUS_E_FAILURE;
8235 
8236 	switch (param) {
8237 	case CDP_CONFIG_VOW:
8238 		val->cdp_pdev_param_cfg_vow =
8239 				((struct dp_pdev *)pdev)->delay_stats_flag;
8240 		break;
8241 	case CDP_TX_PENDING:
8242 		val->cdp_pdev_param_tx_pending = dp_get_tx_pending(pdev);
8243 		break;
8244 	case CDP_FILTER_MCAST_DATA:
8245 		val->cdp_pdev_param_fltr_mcast =
8246 					dp_pdev_get_filter_mcast_data(pdev);
8247 		break;
8248 	case CDP_FILTER_NO_DATA:
8249 		val->cdp_pdev_param_fltr_none =
8250 					dp_pdev_get_filter_non_data(pdev);
8251 		break;
8252 	case CDP_FILTER_UCAST_DATA:
8253 		val->cdp_pdev_param_fltr_ucast =
8254 					dp_pdev_get_filter_ucast_data(pdev);
8255 		break;
8256 	default:
8257 		return QDF_STATUS_E_FAILURE;
8258 	}
8259 
8260 	return QDF_STATUS_SUCCESS;
8261 }
8262 
8263 /*
8264  * dp_set_pdev_param: function to set parameters in pdev
8265  * @cdp_soc: DP soc handle
8266  * @pdev_id: id of pdev handle
8267  * @param: parameter type to be set
8268  * @val: value of parameter to be set
8269  *
8270  * Return: 0 for success. nonzero for failure.
8271  */
8272 static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
8273 				    enum cdp_pdev_param_type param,
8274 				    cdp_config_param_type val)
8275 {
8276 	int target_type;
8277 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
8278 	struct dp_pdev *pdev =
8279 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
8280 						   pdev_id);
8281 	if (!pdev)
8282 		return QDF_STATUS_E_FAILURE;
8283 
8284 	target_type = hal_get_target_type(soc->hal_soc);
8285 	switch (target_type) {
8286 	case TARGET_TYPE_QCA6750:
8287 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_5G_LMAC_ID;
8288 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_5G_LMAC_ID;
8289 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_6G_LMAC_ID;
8290 		break;
8291 	default:
8292 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_2G_LMAC_ID;
8293 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_5G_LMAC_ID;
8294 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_6G_LMAC_ID;
8295 		break;
8296 	}
8297 
8298 	switch (param) {
8299 	case CDP_CONFIG_TX_CAPTURE:
8300 		return dp_config_debug_sniffer(pdev,
8301 					       val.cdp_pdev_param_tx_capture);
8302 	case CDP_CONFIG_DEBUG_SNIFFER:
8303 		return dp_config_debug_sniffer(pdev,
8304 					       val.cdp_pdev_param_dbg_snf);
8305 	case CDP_CONFIG_BPR_ENABLE:
8306 		return dp_set_bpr_enable(pdev, val.cdp_pdev_param_bpr_enable);
8307 	case CDP_CONFIG_PRIMARY_RADIO:
8308 		pdev->is_primary = val.cdp_pdev_param_primary_radio;
8309 		break;
8310 	case CDP_CONFIG_CAPTURE_LATENCY:
8311 		pdev->latency_capture_enable = val.cdp_pdev_param_cptr_latcy;
8312 		break;
8313 	case CDP_INGRESS_STATS:
8314 		dp_pdev_tid_stats_ingress_inc(pdev,
8315 					      val.cdp_pdev_param_ingrs_stats);
8316 		break;
8317 	case CDP_OSIF_DROP:
8318 		dp_pdev_tid_stats_osif_drop(pdev,
8319 					    val.cdp_pdev_param_osif_drop);
8320 		break;
8321 	case CDP_CONFIG_ENH_RX_CAPTURE:
8322 		return dp_config_enh_rx_capture(pdev,
8323 						val.cdp_pdev_param_en_rx_cap);
8324 	case CDP_CONFIG_ENH_TX_CAPTURE:
8325 		return dp_config_enh_tx_capture(pdev,
8326 						val.cdp_pdev_param_en_tx_cap);
8327 	case CDP_CONFIG_HMMC_TID_OVERRIDE:
8328 		pdev->hmmc_tid_override_en = val.cdp_pdev_param_hmmc_tid_ovrd;
8329 		break;
8330 	case CDP_CONFIG_HMMC_TID_VALUE:
8331 		pdev->hmmc_tid = val.cdp_pdev_param_hmmc_tid;
8332 		break;
8333 	case CDP_CHAN_NOISE_FLOOR:
8334 		pdev->chan_noise_floor = val.cdp_pdev_param_chn_noise_flr;
8335 		break;
8336 	case CDP_TIDMAP_PRTY:
8337 		dp_set_pdev_tidmap_prty_wifi3(pdev,
8338 					      val.cdp_pdev_param_tidmap_prty);
8339 		break;
8340 	case CDP_FILTER_NEIGH_PEERS:
8341 		dp_set_filter_neigh_peers(pdev,
8342 					  val.cdp_pdev_param_fltr_neigh_peers);
8343 		break;
8344 	case CDP_MONITOR_CHANNEL:
8345 		pdev->mon_chan_num = val.cdp_pdev_param_monitor_chan;
8346 		break;
8347 	case CDP_MONITOR_FREQUENCY:
8348 		pdev->mon_chan_freq = val.cdp_pdev_param_mon_freq;
8349 		pdev->mon_chan_band =
8350 				wlan_reg_freq_to_band(pdev->mon_chan_freq);
8351 		break;
8352 	case CDP_CONFIG_BSS_COLOR:
8353 		dp_mon_set_bsscolor(pdev, val.cdp_pdev_param_bss_color);
8354 		break;
8355 	case CDP_SET_ATF_STATS_ENABLE:
8356 		dp_set_atf_stats_enable(pdev,
8357 					val.cdp_pdev_param_atf_stats_enable);
8358 		break;
8359 	default:
8360 		return QDF_STATUS_E_INVAL;
8361 	}
8362 	return QDF_STATUS_SUCCESS;
8363 }
8364 
8365 #ifdef QCA_PEER_EXT_STATS
8366 static void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
8367 					  qdf_nbuf_t nbuf)
8368 {
8369 	struct dp_peer *peer = NULL;
8370 	uint16_t peer_id, ring_id;
8371 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
8372 	struct cdp_peer_ext_stats *pext_stats = NULL;
8373 
8374 	peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
8375 	if (peer_id > soc->max_peers)
8376 		return;
8377 
8378 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
8379 	if (qdf_unlikely(!peer))
8380 		return;
8381 
8382 	if (qdf_likely(peer->pext_stats)) {
8383 		pext_stats = peer->pext_stats;
8384 		ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
8385 		dp_rx_compute_tid_delay(&pext_stats->delay_stats[tid][ring_id],
8386 					nbuf);
8387 	}
8388 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8389 }
8390 #else
8391 static inline void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
8392 						 qdf_nbuf_t nbuf)
8393 {
8394 }
8395 #endif
8396 
8397 /*
8398  * dp_calculate_delay_stats: function to get rx delay stats
8399  * @cdp_soc: DP soc handle
8400  * @vdev_id: id of DP vdev handle
8401  * @nbuf: skb
8402  *
8403  * Return: QDF_STATUS
8404  */
8405 static QDF_STATUS
8406 dp_calculate_delay_stats(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
8407 			 qdf_nbuf_t nbuf)
8408 {
8409 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
8410 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8411 						     DP_MOD_ID_CDP);
8412 
8413 	if (!vdev)
8414 		return QDF_STATUS_SUCCESS;
8415 
8416 	if (vdev->pdev->delay_stats_flag)
8417 		dp_rx_compute_delay(vdev, nbuf);
8418 	else
8419 		dp_rx_update_peer_delay_stats(soc, nbuf);
8420 
8421 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8422 	return QDF_STATUS_SUCCESS;
8423 }
8424 
8425 /*
8426  * dp_get_vdev_param: function to get parameters from vdev
8427  * @cdp_soc : DP soc handle
8428  * @vdev_id: id of DP vdev handle
8429  * @param: parameter type to get value
8430  * @val: buffer address
8431  *
8432  * return: status
8433  */
8434 static QDF_STATUS dp_get_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
8435 				    enum cdp_vdev_param_type param,
8436 				    cdp_config_param_type *val)
8437 {
8438 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
8439 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8440 						     DP_MOD_ID_CDP);
8441 
8442 	if (!vdev)
8443 		return QDF_STATUS_E_FAILURE;
8444 
8445 	switch (param) {
8446 	case CDP_ENABLE_WDS:
8447 		val->cdp_vdev_param_wds = vdev->wds_enabled;
8448 		break;
8449 	case CDP_ENABLE_MEC:
8450 		val->cdp_vdev_param_mec = vdev->mec_enabled;
8451 		break;
8452 	case CDP_ENABLE_DA_WAR:
8453 		val->cdp_vdev_param_da_war = vdev->pdev->soc->da_war_enabled;
8454 		break;
8455 	case CDP_ENABLE_IGMP_MCAST_EN:
8456 		val->cdp_vdev_param_igmp_mcast_en = vdev->igmp_mcast_enhanc_en;
8457 		break;
8458 	case CDP_ENABLE_MCAST_EN:
8459 		val->cdp_vdev_param_mcast_en = vdev->mcast_enhancement_en;
8460 		break;
8461 	default:
8462 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8463 			  "param value %d is wrong\n",
8464 			  param);
8465 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8466 		return QDF_STATUS_E_FAILURE;
8467 	}
8468 
8469 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8470 	return QDF_STATUS_SUCCESS;
8471 }
8472 
8473 /*
8474  * dp_set_vdev_param: function to set parameters in vdev
8475  * @cdp_soc : DP soc handle
8476  * @vdev_id: id of DP vdev handle
8477  * @param: parameter type to get value
8478  * @val: value
8479  *
8480  * return: QDF_STATUS
8481  */
8482 static QDF_STATUS
8483 dp_set_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
8484 		  enum cdp_vdev_param_type param, cdp_config_param_type val)
8485 {
8486 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
8487 	struct dp_vdev *vdev =
8488 		dp_vdev_get_ref_by_id(dsoc, vdev_id, DP_MOD_ID_CDP);
8489 	uint32_t var = 0;
8490 
8491 	if (!vdev)
8492 		return QDF_STATUS_E_FAILURE;
8493 
8494 	switch (param) {
8495 	case CDP_ENABLE_WDS:
8496 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8497 			  "wds_enable %d for vdev(%pK) id(%d)\n",
8498 			  val.cdp_vdev_param_wds, vdev, vdev->vdev_id);
8499 		vdev->wds_enabled = val.cdp_vdev_param_wds;
8500 		break;
8501 	case CDP_ENABLE_MEC:
8502 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8503 			  "mec_enable %d for vdev(%pK) id(%d)\n",
8504 			  val.cdp_vdev_param_mec, vdev, vdev->vdev_id);
8505 		vdev->mec_enabled = val.cdp_vdev_param_mec;
8506 		break;
8507 	case CDP_ENABLE_DA_WAR:
8508 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8509 			  "da_war_enable %d for vdev(%pK) id(%d)\n",
8510 			  val.cdp_vdev_param_da_war, vdev, vdev->vdev_id);
8511 		vdev->pdev->soc->da_war_enabled = val.cdp_vdev_param_da_war;
8512 		dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
8513 					     vdev->pdev->soc));
8514 		break;
8515 	case CDP_ENABLE_NAWDS:
8516 		vdev->nawds_enabled = val.cdp_vdev_param_nawds;
8517 		break;
8518 	case CDP_ENABLE_MCAST_EN:
8519 		vdev->mcast_enhancement_en = val.cdp_vdev_param_mcast_en;
8520 		break;
8521 	case CDP_ENABLE_IGMP_MCAST_EN:
8522 		vdev->igmp_mcast_enhanc_en = val.cdp_vdev_param_igmp_mcast_en;
8523 		break;
8524 	case CDP_ENABLE_PROXYSTA:
8525 		vdev->proxysta_vdev = val.cdp_vdev_param_proxysta;
8526 		break;
8527 	case CDP_UPDATE_TDLS_FLAGS:
8528 		vdev->tdls_link_connected = val.cdp_vdev_param_tdls_flags;
8529 		break;
8530 	case CDP_CFG_WDS_AGING_TIMER:
8531 		var = val.cdp_vdev_param_aging_tmr;
8532 		if (!var)
8533 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
8534 		else if (var != vdev->wds_aging_timer_val)
8535 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, var);
8536 
8537 		vdev->wds_aging_timer_val = var;
8538 		break;
8539 	case CDP_ENABLE_AP_BRIDGE:
8540 		if (wlan_op_mode_sta != vdev->opmode)
8541 			vdev->ap_bridge_enabled = val.cdp_vdev_param_ap_brdg_en;
8542 		else
8543 			vdev->ap_bridge_enabled = false;
8544 		break;
8545 	case CDP_ENABLE_CIPHER:
8546 		vdev->sec_type = val.cdp_vdev_param_cipher_en;
8547 		break;
8548 	case CDP_ENABLE_QWRAP_ISOLATION:
8549 		vdev->isolation_vdev = val.cdp_vdev_param_qwrap_isolation;
8550 		break;
8551 	case CDP_UPDATE_MULTIPASS:
8552 		vdev->multipass_en = val.cdp_vdev_param_update_multipass;
8553 		break;
8554 	case CDP_TX_ENCAP_TYPE:
8555 		vdev->tx_encap_type = val.cdp_vdev_param_tx_encap;
8556 		break;
8557 	case CDP_RX_DECAP_TYPE:
8558 		vdev->rx_decap_type = val.cdp_vdev_param_rx_decap;
8559 		break;
8560 	case CDP_TID_VDEV_PRTY:
8561 		vdev->tidmap_prty = val.cdp_vdev_param_tidmap_prty;
8562 		break;
8563 	case CDP_TIDMAP_TBL_ID:
8564 		vdev->tidmap_tbl_id = val.cdp_vdev_param_tidmap_tbl_id;
8565 		break;
8566 #ifdef MESH_MODE_SUPPORT
8567 	case CDP_MESH_RX_FILTER:
8568 		dp_vdev_set_mesh_rx_filter((struct cdp_vdev *)vdev,
8569 					   val.cdp_vdev_param_mesh_rx_filter);
8570 		break;
8571 	case CDP_MESH_MODE:
8572 		dp_vdev_set_mesh_mode((struct cdp_vdev *)vdev,
8573 				      val.cdp_vdev_param_mesh_mode);
8574 		break;
8575 #endif
8576 	case CDP_ENABLE_CSUM:
8577 		dp_info("vdev_id %d enable Checksum %d", vdev_id,
8578 			val.cdp_enable_tx_checksum);
8579 		vdev->csum_enabled = val.cdp_enable_tx_checksum;
8580 		break;
8581 	default:
8582 		break;
8583 	}
8584 
8585 	dp_tx_vdev_update_search_flags((struct dp_vdev *)vdev);
8586 	dp_vdev_unref_delete(dsoc, vdev, DP_MOD_ID_CDP);
8587 
8588 	return QDF_STATUS_SUCCESS;
8589 }
8590 
8591 /*
8592  * dp_set_psoc_param: function to set parameters in psoc
8593  * @cdp_soc : DP soc handle
8594  * @param: parameter type to be set
8595  * @val: value of parameter to be set
8596  *
8597  * return: QDF_STATUS
8598  */
8599 static QDF_STATUS
8600 dp_set_psoc_param(struct cdp_soc_t *cdp_soc,
8601 		  enum cdp_psoc_param_type param, cdp_config_param_type val)
8602 {
8603 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
8604 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->wlan_cfg_ctx;
8605 
8606 	switch (param) {
8607 	case CDP_ENABLE_RATE_STATS:
8608 		soc->wlanstats_enabled = val.cdp_psoc_param_en_rate_stats;
8609 		break;
8610 	case CDP_SET_NSS_CFG:
8611 		wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx,
8612 					    val.cdp_psoc_param_en_nss_cfg);
8613 		/*
8614 		 * TODO: masked out based on the per offloaded radio
8615 		 */
8616 		switch (val.cdp_psoc_param_en_nss_cfg) {
8617 		case dp_nss_cfg_default:
8618 			break;
8619 		case dp_nss_cfg_first_radio:
8620 		/*
8621 		 * This configuration is valid for single band radio which
8622 		 * is also NSS offload.
8623 		 */
8624 		case dp_nss_cfg_dbdc:
8625 		case dp_nss_cfg_dbtc:
8626 			wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
8627 			wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
8628 			wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
8629 			wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
8630 			break;
8631 		default:
8632 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8633 				  "Invalid offload config %d",
8634 				  val.cdp_psoc_param_en_nss_cfg);
8635 		}
8636 
8637 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8638 			  FL("nss-wifi<0> nss config is enabled"));
8639 		break;
8640 	case CDP_SET_PREFERRED_HW_MODE:
8641 		soc->preferred_hw_mode = val.cdp_psoc_param_preferred_hw_mode;
8642 		break;
8643 	default:
8644 		break;
8645 	}
8646 
8647 	return QDF_STATUS_SUCCESS;
8648 }
8649 
8650 /*
8651  * dp_get_psoc_param: function to get parameters in soc
8652  * @cdp_soc : DP soc handle
8653  * @param: parameter type to be set
8654  * @val: address of buffer
8655  *
8656  * return: status
8657  */
8658 static QDF_STATUS dp_get_psoc_param(struct cdp_soc_t *cdp_soc,
8659 				    enum cdp_psoc_param_type param,
8660 				    cdp_config_param_type *val)
8661 {
8662 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
8663 
8664 	if (!soc)
8665 		return QDF_STATUS_E_FAILURE;
8666 
8667 	switch (param) {
8668 	case CDP_CFG_PEER_EXT_STATS:
8669 		val->cdp_psoc_param_pext_stats =
8670 			wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx);
8671 		break;
8672 	default:
8673 		dp_warn("Invalid param");
8674 		break;
8675 	}
8676 
8677 	return QDF_STATUS_SUCCESS;
8678 }
8679 
8680 /**
8681  * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer
8682  * @soc: DP_SOC handle
8683  * @pdev_id: id of DP_PDEV handle
8684  * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
8685  * @is_tx_pkt_cap_enable: enable/disable/delete/print
8686  * Tx packet capture in monitor mode
8687  * @peer_mac: MAC address for which the above need to be enabled/disabled
8688  *
8689  * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
8690  */
8691 QDF_STATUS
8692 dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
8693 				  uint8_t pdev_id,
8694 				  bool is_rx_pkt_cap_enable,
8695 				  uint8_t is_tx_pkt_cap_enable,
8696 				  uint8_t *peer_mac)
8697 {
8698 	struct dp_peer *peer;
8699 	QDF_STATUS status;
8700 	struct dp_pdev *pdev =
8701 			dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8702 							   pdev_id);
8703 	if (!pdev)
8704 		return QDF_STATUS_E_FAILURE;
8705 
8706 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
8707 				      peer_mac, 0, DP_VDEV_ALL,
8708 				      DP_MOD_ID_CDP);
8709 	if (!peer)
8710 		return QDF_STATUS_E_FAILURE;
8711 
8712 	/* we need to set tx pkt capture for non associated peer */
8713 	status = dp_peer_set_tx_capture_enabled(pdev, peer,
8714 						is_tx_pkt_cap_enable,
8715 						peer_mac);
8716 
8717 	status = dp_peer_set_rx_capture_enabled(pdev, peer,
8718 						is_rx_pkt_cap_enable,
8719 						peer_mac);
8720 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8721 
8722 	return status;
8723 }
8724 
8725 /*
8726  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
8727  * @soc: DP_SOC handle
8728  * @vdev_id: id of DP_VDEV handle
8729  * @map_id:ID of map that needs to be updated
8730  *
8731  * Return: QDF_STATUS
8732  */
8733 static QDF_STATUS dp_set_vdev_dscp_tid_map_wifi3(ol_txrx_soc_handle cdp_soc,
8734 						 uint8_t vdev_id,
8735 						 uint8_t map_id)
8736 {
8737 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
8738 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8739 						     DP_MOD_ID_CDP);
8740 	if (vdev) {
8741 		vdev->dscp_tid_map_id = map_id;
8742 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8743 		return QDF_STATUS_SUCCESS;
8744 	}
8745 
8746 	return QDF_STATUS_E_FAILURE;
8747 }
8748 
8749 #ifdef DP_RATETABLE_SUPPORT
8750 static int dp_txrx_get_ratekbps(int preamb, int mcs,
8751 				int htflag, int gintval)
8752 {
8753 	uint32_t rix;
8754 	uint16_t ratecode;
8755 
8756 	return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
8757 			       (uint8_t)preamb, 1, &rix, &ratecode);
8758 }
8759 #else
8760 static int dp_txrx_get_ratekbps(int preamb, int mcs,
8761 				int htflag, int gintval)
8762 {
8763 	return 0;
8764 }
8765 #endif
8766 
8767 /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
8768  * @soc: DP soc handle
8769  * @pdev_id: id of DP pdev handle
8770  * @pdev_stats: buffer to copy to
8771  *
8772  * return : status success/failure
8773  */
8774 static QDF_STATUS
8775 dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
8776 		       struct cdp_pdev_stats *pdev_stats)
8777 {
8778 	struct dp_pdev *pdev =
8779 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8780 						   pdev_id);
8781 	if (!pdev)
8782 		return QDF_STATUS_E_FAILURE;
8783 
8784 	dp_aggregate_pdev_stats(pdev);
8785 
8786 	qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats));
8787 	return QDF_STATUS_SUCCESS;
8788 }
8789 
8790 /* dp_txrx_update_vdev_me_stats(): Update vdev ME stats sent from CDP
8791  * @vdev: DP vdev handle
8792  * @buf: buffer containing specific stats structure
8793  *
8794  * Returns: void
8795  */
8796 static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev,
8797 					 void *buf)
8798 {
8799 	struct cdp_tx_ingress_stats *host_stats = NULL;
8800 
8801 	if (!buf) {
8802 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8803 			  "Invalid host stats buf");
8804 		return;
8805 	}
8806 	host_stats = (struct cdp_tx_ingress_stats *)buf;
8807 
8808 	DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt,
8809 			 host_stats->mcast_en.mcast_pkt.num,
8810 			 host_stats->mcast_en.mcast_pkt.bytes);
8811 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error,
8812 		     host_stats->mcast_en.dropped_map_error);
8813 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac,
8814 		     host_stats->mcast_en.dropped_self_mac);
8815 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail,
8816 		     host_stats->mcast_en.dropped_send_fail);
8817 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast,
8818 		     host_stats->mcast_en.ucast);
8819 	DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc,
8820 		     host_stats->mcast_en.fail_seg_alloc);
8821 	DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail,
8822 		     host_stats->mcast_en.clone_fail);
8823 }
8824 
8825 /* dp_txrx_update_vdev_host_stats(): Update stats sent through CDP
8826  * @soc: DP soc handle
8827  * @vdev_id: id of DP vdev handle
8828  * @buf: buffer containing specific stats structure
8829  * @stats_id: stats type
8830  *
8831  * Returns: QDF_STATUS
8832  */
8833 static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc_hdl,
8834 						 uint8_t vdev_id,
8835 						 void *buf,
8836 						 uint16_t stats_id)
8837 {
8838 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8839 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8840 						     DP_MOD_ID_CDP);
8841 
8842 	if (!vdev) {
8843 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8844 			  "Invalid vdev handle");
8845 		return QDF_STATUS_E_FAILURE;
8846 	}
8847 
8848 	switch (stats_id) {
8849 	case DP_VDEV_STATS_PKT_CNT_ONLY:
8850 		break;
8851 	case DP_VDEV_STATS_TX_ME:
8852 		dp_txrx_update_vdev_me_stats(vdev, buf);
8853 		break;
8854 	default:
8855 		qdf_info("Invalid stats_id %d", stats_id);
8856 		break;
8857 	}
8858 
8859 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8860 	return QDF_STATUS_SUCCESS;
8861 }
8862 
8863 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
8864  * @soc: soc handle
8865  * @vdev_id: id of vdev handle
8866  * @peer_mac: mac of DP_PEER handle
8867  * @peer_stats: buffer to copy to
8868  * return : status success/failure
8869  */
8870 static QDF_STATUS
8871 dp_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
8872 		       uint8_t *peer_mac, struct cdp_peer_stats *peer_stats)
8873 {
8874 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8875 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
8876 						       peer_mac, 0, vdev_id,
8877 						       DP_MOD_ID_CDP);
8878 
8879 	if (!peer)
8880 		return QDF_STATUS_E_FAILURE;
8881 
8882 	qdf_mem_copy(peer_stats, &peer->stats,
8883 		     sizeof(struct cdp_peer_stats));
8884 
8885 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8886 
8887 	return status;
8888 }
8889 
8890 /* dp_txrx_get_peer_stats_param - will return specified cdp_peer_stats
8891  * @param soc - soc handle
8892  * @param vdev_id - vdev_id of vdev object
8893  * @param peer_mac - mac address of the peer
8894  * @param type - enum of required stats
8895  * @param buf - buffer to hold the value
8896  * return : status success/failure
8897  */
8898 static QDF_STATUS
8899 dp_txrx_get_peer_stats_param(struct cdp_soc_t *soc, uint8_t vdev_id,
8900 			     uint8_t *peer_mac, enum cdp_peer_stats_type type,
8901 			     cdp_peer_stats_param_t *buf)
8902 {
8903 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
8904 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
8905 						      peer_mac, 0, vdev_id,
8906 						      DP_MOD_ID_CDP);
8907 
8908 	if (!peer) {
8909 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8910 			  "Invalid Peer for Mac "QDF_MAC_ADDR_FMT,
8911 			  QDF_MAC_ADDR_REF(peer_mac));
8912 		return QDF_STATUS_E_FAILURE;
8913 	} else if (type < cdp_peer_stats_max) {
8914 		switch (type) {
8915 		case cdp_peer_tx_ucast:
8916 			buf->tx_ucast = peer->stats.tx.ucast;
8917 			break;
8918 		case cdp_peer_tx_mcast:
8919 			buf->tx_mcast = peer->stats.tx.mcast;
8920 			break;
8921 		case cdp_peer_tx_rate:
8922 			buf->tx_rate = peer->stats.tx.tx_rate;
8923 			break;
8924 		case cdp_peer_tx_last_tx_rate:
8925 			buf->last_tx_rate = peer->stats.tx.last_tx_rate;
8926 			break;
8927 		case cdp_peer_tx_inactive_time:
8928 			buf->tx_inactive_time = peer->stats.tx.inactive_time;
8929 			break;
8930 		case cdp_peer_tx_ratecode:
8931 			buf->tx_ratecode = peer->stats.tx.tx_ratecode;
8932 			break;
8933 		case cdp_peer_tx_flags:
8934 			buf->tx_flags = peer->stats.tx.tx_flags;
8935 			break;
8936 		case cdp_peer_tx_power:
8937 			buf->tx_power = peer->stats.tx.tx_power;
8938 			break;
8939 		case cdp_peer_rx_rate:
8940 			buf->rx_rate = peer->stats.rx.rx_rate;
8941 			break;
8942 		case cdp_peer_rx_last_rx_rate:
8943 			buf->last_rx_rate = peer->stats.rx.last_rx_rate;
8944 			break;
8945 		case cdp_peer_rx_ratecode:
8946 			buf->rx_ratecode = peer->stats.rx.rx_ratecode;
8947 			break;
8948 		case cdp_peer_rx_ucast:
8949 			buf->rx_ucast = peer->stats.rx.unicast;
8950 			break;
8951 		case cdp_peer_rx_flags:
8952 			buf->rx_flags = peer->stats.rx.rx_flags;
8953 			break;
8954 		case cdp_peer_rx_avg_rssi:
8955 			buf->rx_avg_rssi = peer->stats.rx.avg_rssi;
8956 			break;
8957 		default:
8958 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8959 				  "Invalid value");
8960 			ret = QDF_STATUS_E_FAILURE;
8961 			break;
8962 		}
8963 	} else {
8964 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8965 			  "Invalid value");
8966 		ret = QDF_STATUS_E_FAILURE;
8967 	}
8968 
8969 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8970 
8971 	return ret;
8972 }
8973 
8974 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
8975  * @soc: soc handle
8976  * @vdev_id: id of vdev handle
8977  * @peer_mac: mac of DP_PEER handle
8978  *
8979  * return : QDF_STATUS
8980  */
8981 static QDF_STATUS
8982 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
8983 			 uint8_t *peer_mac)
8984 {
8985 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8986 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
8987 						      peer_mac, 0, vdev_id,
8988 						      DP_MOD_ID_CDP);
8989 
8990 	if (!peer)
8991 		return QDF_STATUS_E_FAILURE;
8992 
8993 	qdf_mem_zero(&peer->stats, sizeof(peer->stats));
8994 
8995 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8996 
8997 	return status;
8998 }
8999 
9000 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
9001  * @vdev_handle: DP_VDEV handle
9002  * @buf: buffer for vdev stats
9003  *
9004  * return : int
9005  */
9006 static int dp_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
9007 				  void *buf, bool is_aggregate)
9008 {
9009 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9010 	struct cdp_vdev_stats *vdev_stats;
9011 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9012 						     DP_MOD_ID_CDP);
9013 
9014 	if (!vdev)
9015 		return 1;
9016 
9017 	vdev_stats = (struct cdp_vdev_stats *)buf;
9018 
9019 	if (is_aggregate) {
9020 		dp_aggregate_vdev_stats(vdev, buf);
9021 	} else {
9022 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
9023 	}
9024 
9025 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9026 	return 0;
9027 }
9028 
9029 /*
9030  * dp_get_total_per(): get total per
9031  * @soc: DP soc handle
9032  * @pdev_id: id of DP_PDEV handle
9033  *
9034  * Return: % error rate using retries per packet and success packets
9035  */
9036 static int dp_get_total_per(struct cdp_soc_t *soc, uint8_t pdev_id)
9037 {
9038 	struct dp_pdev *pdev =
9039 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9040 						   pdev_id);
9041 
9042 	if (!pdev)
9043 		return 0;
9044 
9045 	dp_aggregate_pdev_stats(pdev);
9046 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
9047 		return 0;
9048 	return ((pdev->stats.tx.retries * 100) /
9049 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
9050 }
9051 
9052 /*
9053  * dp_txrx_stats_publish(): publish pdev stats into a buffer
9054  * @soc: DP soc handle
9055  * @pdev_id: id of DP_PDEV handle
9056  * @buf: to hold pdev_stats
9057  *
9058  * Return: int
9059  */
9060 static int
9061 dp_txrx_stats_publish(struct cdp_soc_t *soc, uint8_t pdev_id,
9062 		      struct cdp_stats_extd *buf)
9063 {
9064 	struct cdp_txrx_stats_req req = {0,};
9065 	struct dp_pdev *pdev =
9066 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9067 						   pdev_id);
9068 
9069 	if (!pdev)
9070 		return TXRX_STATS_LEVEL_OFF;
9071 
9072 	dp_aggregate_pdev_stats(pdev);
9073 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
9074 	req.cookie_val = 1;
9075 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
9076 				req.param1, req.param2, req.param3, 0,
9077 				req.cookie_val, 0);
9078 
9079 	msleep(DP_MAX_SLEEP_TIME);
9080 
9081 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
9082 	req.cookie_val = 1;
9083 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
9084 				req.param1, req.param2, req.param3, 0,
9085 				req.cookie_val, 0);
9086 
9087 	msleep(DP_MAX_SLEEP_TIME);
9088 	qdf_mem_copy(buf, &pdev->stats, sizeof(struct cdp_pdev_stats));
9089 
9090 	return TXRX_STATS_LEVEL;
9091 }
9092 
9093 /**
9094  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
9095  * @soc: soc handle
9096  * @pdev_id: id of DP_PDEV handle
9097  * @map_id: ID of map that needs to be updated
9098  * @tos: index value in map
9099  * @tid: tid value passed by the user
9100  *
9101  * Return: QDF_STATUS
9102  */
9103 static QDF_STATUS
9104 dp_set_pdev_dscp_tid_map_wifi3(struct cdp_soc_t *soc_handle,
9105 			       uint8_t pdev_id,
9106 			       uint8_t map_id,
9107 			       uint8_t tos, uint8_t tid)
9108 {
9109 	uint8_t dscp;
9110 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9111 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
9112 
9113 	if (!pdev)
9114 		return QDF_STATUS_E_FAILURE;
9115 
9116 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
9117 	pdev->dscp_tid_map[map_id][dscp] = tid;
9118 
9119 	if (map_id < soc->num_hw_dscp_tid_map)
9120 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
9121 				       map_id, dscp);
9122 	else
9123 		return QDF_STATUS_E_FAILURE;
9124 
9125 	return QDF_STATUS_SUCCESS;
9126 }
9127 
9128 /**
9129  * dp_fw_stats_process(): Process TxRX FW stats request
9130  * @vdev_handle: DP VDEV handle
9131  * @req: stats request
9132  *
9133  * return: int
9134  */
9135 static int dp_fw_stats_process(struct dp_vdev *vdev,
9136 			       struct cdp_txrx_stats_req *req)
9137 {
9138 	struct dp_pdev *pdev = NULL;
9139 	uint32_t stats = req->stats;
9140 	uint8_t mac_id = req->mac_id;
9141 
9142 	if (!vdev) {
9143 		DP_TRACE(NONE, "VDEV not found");
9144 		return 1;
9145 	}
9146 	pdev = vdev->pdev;
9147 
9148 	/*
9149 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
9150 	 * from param0 to param3 according to below rule:
9151 	 *
9152 	 * PARAM:
9153 	 *   - config_param0 : start_offset (stats type)
9154 	 *   - config_param1 : stats bmask from start offset
9155 	 *   - config_param2 : stats bmask from start offset + 32
9156 	 *   - config_param3 : stats bmask from start offset + 64
9157 	 */
9158 	if (req->stats == CDP_TXRX_STATS_0) {
9159 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
9160 		req->param1 = 0xFFFFFFFF;
9161 		req->param2 = 0xFFFFFFFF;
9162 		req->param3 = 0xFFFFFFFF;
9163 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
9164 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
9165 	}
9166 
9167 	if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT) {
9168 		return dp_h2t_ext_stats_msg_send(pdev,
9169 				HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT,
9170 				req->param0, req->param1, req->param2,
9171 				req->param3, 0, 0, mac_id);
9172 	} else {
9173 		return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
9174 				req->param1, req->param2, req->param3,
9175 				0, 0, mac_id);
9176 	}
9177 }
9178 
9179 /**
9180  * dp_txrx_stats_request - function to map to firmware and host stats
9181  * @soc: soc handle
9182  * @vdev_id: virtual device ID
9183  * @req: stats request
9184  *
9185  * Return: QDF_STATUS
9186  */
9187 static
9188 QDF_STATUS dp_txrx_stats_request(struct cdp_soc_t *soc_handle,
9189 				 uint8_t vdev_id,
9190 				 struct cdp_txrx_stats_req *req)
9191 {
9192 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_handle);
9193 	int host_stats;
9194 	int fw_stats;
9195 	enum cdp_stats stats;
9196 	int num_stats;
9197 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9198 						     DP_MOD_ID_CDP);
9199 	QDF_STATUS status = QDF_STATUS_E_INVAL;
9200 
9201 	if (!vdev || !req) {
9202 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
9203 				"Invalid vdev/req instance");
9204 		status = QDF_STATUS_E_INVAL;
9205 		goto fail0;
9206 	}
9207 
9208 	if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) {
9209 		dp_err("Invalid mac id request");
9210 		status = QDF_STATUS_E_INVAL;
9211 		goto fail0;
9212 	}
9213 
9214 	stats = req->stats;
9215 	if (stats >= CDP_TXRX_MAX_STATS) {
9216 		status = QDF_STATUS_E_INVAL;
9217 		goto fail0;
9218 	}
9219 
9220 	/*
9221 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
9222 	 *			has to be updated if new FW HTT stats added
9223 	 */
9224 	if (stats > CDP_TXRX_STATS_HTT_MAX)
9225 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
9226 
9227 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
9228 
9229 	if (stats >= num_stats) {
9230 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
9231 			  "%s: Invalid stats option: %d", __func__, stats);
9232 		status = QDF_STATUS_E_INVAL;
9233 		goto fail0;
9234 	}
9235 
9236 	req->stats = stats;
9237 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
9238 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
9239 
9240 	dp_info("stats: %u fw_stats_type: %d host_stats: %d",
9241 		stats, fw_stats, host_stats);
9242 
9243 	if (fw_stats != TXRX_FW_STATS_INVALID) {
9244 		/* update request with FW stats type */
9245 		req->stats = fw_stats;
9246 		status = dp_fw_stats_process(vdev, req);
9247 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
9248 			(host_stats <= TXRX_HOST_STATS_MAX))
9249 		status = dp_print_host_stats(vdev, req, soc);
9250 	else
9251 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
9252 				"Wrong Input for TxRx Stats");
9253 fail0:
9254 	if (vdev)
9255 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9256 	return status;
9257 }
9258 
9259 /*
9260  * dp_txrx_dump_stats() -  Dump statistics
9261  * @value - Statistics option
9262  */
9263 static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value,
9264 				     enum qdf_stats_verbosity_level level)
9265 {
9266 	struct dp_soc *soc =
9267 		(struct dp_soc *)psoc;
9268 	QDF_STATUS status = QDF_STATUS_SUCCESS;
9269 
9270 	if (!soc) {
9271 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
9272 			"%s: soc is NULL", __func__);
9273 		return QDF_STATUS_E_INVAL;
9274 	}
9275 
9276 	switch (value) {
9277 	case CDP_TXRX_PATH_STATS:
9278 		dp_txrx_path_stats(soc);
9279 		dp_print_soc_interrupt_stats(soc);
9280 		hal_dump_reg_write_stats(soc->hal_soc);
9281 		break;
9282 
9283 	case CDP_RX_RING_STATS:
9284 		dp_print_per_ring_stats(soc);
9285 		break;
9286 
9287 	case CDP_TXRX_TSO_STATS:
9288 		dp_print_tso_stats(soc, level);
9289 		break;
9290 
9291 	case CDP_DUMP_TX_FLOW_POOL_INFO:
9292 		if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH)
9293 			cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
9294 		break;
9295 
9296 	case CDP_DP_NAPI_STATS:
9297 		dp_print_napi_stats(soc);
9298 		break;
9299 
9300 	case CDP_TXRX_DESC_STATS:
9301 		/* TODO: NOT IMPLEMENTED */
9302 		break;
9303 
9304 	case CDP_DP_RX_FISA_STATS:
9305 		dp_rx_dump_fisa_stats(soc);
9306 		break;
9307 
9308 	default:
9309 		status = QDF_STATUS_E_INVAL;
9310 		break;
9311 	}
9312 
9313 	return status;
9314 
9315 }
9316 
9317 /**
9318  * dp_txrx_clear_dump_stats() - clear dumpStats
9319  * @soc- soc handle
9320  * @value - stats option
9321  *
9322  * Return: 0 - Success, non-zero - failure
9323  */
9324 static
9325 QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
9326 				    uint8_t value)
9327 {
9328 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9329 	QDF_STATUS status = QDF_STATUS_SUCCESS;
9330 
9331 	if (!soc) {
9332 		dp_err("%s: soc is NULL", __func__);
9333 		return QDF_STATUS_E_INVAL;
9334 	}
9335 
9336 	switch (value) {
9337 	case CDP_TXRX_TSO_STATS:
9338 		dp_txrx_clear_tso_stats(soc);
9339 		break;
9340 
9341 	default:
9342 		status = QDF_STATUS_E_INVAL;
9343 		break;
9344 	}
9345 
9346 	return status;
9347 }
9348 
9349 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
9350 /**
9351  * dp_update_flow_control_parameters() - API to store datapath
9352  *                            config parameters
9353  * @soc: soc handle
9354  * @cfg: ini parameter handle
9355  *
9356  * Return: void
9357  */
9358 static inline
9359 void dp_update_flow_control_parameters(struct dp_soc *soc,
9360 				struct cdp_config_params *params)
9361 {
9362 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
9363 					params->tx_flow_stop_queue_threshold;
9364 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
9365 					params->tx_flow_start_queue_offset;
9366 }
9367 #else
9368 static inline
9369 void dp_update_flow_control_parameters(struct dp_soc *soc,
9370 				struct cdp_config_params *params)
9371 {
9372 }
9373 #endif
9374 
9375 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
9376 /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */
9377 #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024
9378 
9379 /* Max packet limit for RX REAP Loop (dp_rx_process) */
9380 #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024
9381 
9382 static
9383 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
9384 					struct cdp_config_params *params)
9385 {
9386 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit =
9387 				params->tx_comp_loop_pkt_limit;
9388 
9389 	if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX)
9390 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true;
9391 	else
9392 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false;
9393 
9394 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit =
9395 				params->rx_reap_loop_pkt_limit;
9396 
9397 	if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX)
9398 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = true;
9399 	else
9400 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = false;
9401 
9402 	soc->wlan_cfg_ctx->rx_hp_oos_update_limit =
9403 				params->rx_hp_oos_update_limit;
9404 
9405 	dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u",
9406 		soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit,
9407 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check,
9408 		soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit,
9409 		soc->wlan_cfg_ctx->rx_enable_eol_data_check,
9410 		soc->wlan_cfg_ctx->rx_hp_oos_update_limit);
9411 }
9412 #else
9413 static inline
9414 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
9415 					struct cdp_config_params *params)
9416 { }
9417 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
9418 
9419 /**
9420  * dp_update_config_parameters() - API to store datapath
9421  *                            config parameters
9422  * @soc: soc handle
9423  * @cfg: ini parameter handle
9424  *
9425  * Return: status
9426  */
9427 static
9428 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
9429 				struct cdp_config_params *params)
9430 {
9431 	struct dp_soc *soc = (struct dp_soc *)psoc;
9432 
9433 	if (!(soc)) {
9434 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
9435 				"%s: Invalid handle", __func__);
9436 		return QDF_STATUS_E_INVAL;
9437 	}
9438 
9439 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
9440 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
9441 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
9442 	soc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload =
9443 				params->p2p_tcp_udp_checksumoffload;
9444 	soc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload =
9445 				params->nan_tcp_udp_checksumoffload;
9446 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
9447 				params->tcp_udp_checksumoffload;
9448 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
9449 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
9450 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
9451 
9452 	dp_update_rx_soft_irq_limit_params(soc, params);
9453 	dp_update_flow_control_parameters(soc, params);
9454 
9455 	return QDF_STATUS_SUCCESS;
9456 }
9457 
9458 static struct cdp_wds_ops dp_ops_wds = {
9459 	.vdev_set_wds = dp_vdev_set_wds,
9460 #ifdef WDS_VENDOR_EXTENSION
9461 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
9462 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
9463 #endif
9464 };
9465 
9466 /*
9467  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
9468  * @soc_hdl - datapath soc handle
9469  * @vdev_id - virtual interface id
9470  * @callback - callback function
9471  * @ctxt: callback context
9472  *
9473  */
9474 static void
9475 dp_txrx_data_tx_cb_set(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
9476 		       ol_txrx_data_tx_cb callback, void *ctxt)
9477 {
9478 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9479 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9480 						     DP_MOD_ID_CDP);
9481 
9482 	if (!vdev)
9483 		return;
9484 
9485 	vdev->tx_non_std_data_callback.func = callback;
9486 	vdev->tx_non_std_data_callback.ctxt = ctxt;
9487 
9488 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9489 }
9490 
9491 /**
9492  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
9493  * @soc: datapath soc handle
9494  * @pdev_id: id of datapath pdev handle
9495  *
9496  * Return: opaque pointer to dp txrx handle
9497  */
9498 static void *dp_pdev_get_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id)
9499 {
9500 	struct dp_pdev *pdev =
9501 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9502 						   pdev_id);
9503 	if (qdf_unlikely(!pdev))
9504 		return NULL;
9505 
9506 	return pdev->dp_txrx_handle;
9507 }
9508 
9509 /**
9510  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
9511  * @soc: datapath soc handle
9512  * @pdev_id: id of datapath pdev handle
9513  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
9514  *
9515  * Return: void
9516  */
9517 static void
9518 dp_pdev_set_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id,
9519 			   void *dp_txrx_hdl)
9520 {
9521 	struct dp_pdev *pdev =
9522 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9523 						   pdev_id);
9524 
9525 	if (!pdev)
9526 		return;
9527 
9528 	pdev->dp_txrx_handle = dp_txrx_hdl;
9529 }
9530 
9531 /**
9532  * dp_vdev_get_dp_ext_handle() - get dp handle from vdev
9533  * @soc: datapath soc handle
9534  * @vdev_id: vdev id
9535  *
9536  * Return: opaque pointer to dp txrx handle
9537  */
9538 static void *dp_vdev_get_dp_ext_handle(ol_txrx_soc_handle soc_hdl,
9539 				       uint8_t vdev_id)
9540 {
9541 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9542 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9543 						     DP_MOD_ID_CDP);
9544 	void *dp_ext_handle;
9545 
9546 	if (!vdev)
9547 		return NULL;
9548 	dp_ext_handle = vdev->vdev_dp_ext_handle;
9549 
9550 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9551 	return dp_ext_handle;
9552 }
9553 
9554 /**
9555  * dp_vdev_set_dp_ext_handle() - set dp handle in vdev
9556  * @soc: datapath soc handle
9557  * @vdev_id: vdev id
9558  * @size: size of advance dp handle
9559  *
9560  * Return: QDF_STATUS
9561  */
9562 static QDF_STATUS
9563 dp_vdev_set_dp_ext_handle(ol_txrx_soc_handle soc_hdl, uint8_t vdev_id,
9564 			  uint16_t size)
9565 {
9566 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9567 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9568 						     DP_MOD_ID_CDP);
9569 	void *dp_ext_handle;
9570 
9571 	if (!vdev)
9572 		return QDF_STATUS_E_FAILURE;
9573 
9574 	dp_ext_handle = qdf_mem_malloc(size);
9575 
9576 	if (!dp_ext_handle) {
9577 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9578 		return QDF_STATUS_E_FAILURE;
9579 	}
9580 
9581 	vdev->vdev_dp_ext_handle = dp_ext_handle;
9582 
9583 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9584 	return QDF_STATUS_SUCCESS;
9585 }
9586 
9587 /**
9588  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
9589  * @soc_handle: datapath soc handle
9590  *
9591  * Return: opaque pointer to external dp (non-core DP)
9592  */
9593 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
9594 {
9595 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9596 
9597 	return soc->external_txrx_handle;
9598 }
9599 
9600 /**
9601  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
9602  * @soc_handle: datapath soc handle
9603  * @txrx_handle: opaque pointer to external dp (non-core DP)
9604  *
9605  * Return: void
9606  */
9607 static void
9608 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
9609 {
9610 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9611 
9612 	soc->external_txrx_handle = txrx_handle;
9613 }
9614 
9615 /**
9616  * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
9617  * @soc_hdl: datapath soc handle
9618  * @pdev_id: id of the datapath pdev handle
9619  * @lmac_id: lmac id
9620  *
9621  * Return: QDF_STATUS
9622  */
9623 static QDF_STATUS
9624 dp_soc_map_pdev_to_lmac
9625 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
9626 	 uint32_t lmac_id)
9627 {
9628 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9629 
9630 	wlan_cfg_set_hw_mac_idx(soc->wlan_cfg_ctx,
9631 				pdev_id,
9632 				lmac_id);
9633 
9634 	/*Set host PDEV ID for lmac_id*/
9635 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
9636 			      pdev_id,
9637 			      lmac_id);
9638 
9639 	return QDF_STATUS_SUCCESS;
9640 }
9641 
9642 /**
9643  * dp_soc_handle_pdev_mode_change() - Update pdev to lmac mapping
9644  * @soc_hdl: datapath soc handle
9645  * @pdev_id: id of the datapath pdev handle
9646  * @lmac_id: lmac id
9647  *
9648  * In the event of a dynamic mode change, update the pdev to lmac mapping
9649  *
9650  * Return: QDF_STATUS
9651  */
9652 static QDF_STATUS
9653 dp_soc_handle_pdev_mode_change
9654 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
9655 	 uint32_t lmac_id)
9656 {
9657 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9658 	struct dp_vdev *vdev = NULL;
9659 	uint8_t hw_pdev_id, mac_id;
9660 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc,
9661 								  pdev_id);
9662 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
9663 
9664 	if (qdf_unlikely(!pdev))
9665 		return QDF_STATUS_E_FAILURE;
9666 
9667 	pdev->lmac_id = lmac_id;
9668 	dp_info(" mode change %d %d\n", pdev->pdev_id, pdev->lmac_id);
9669 
9670 	/*Set host PDEV ID for lmac_id*/
9671 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
9672 			      pdev->pdev_id,
9673 			      lmac_id);
9674 
9675 	hw_pdev_id =
9676 		dp_get_target_pdev_id_for_host_pdev_id(soc,
9677 						       pdev->pdev_id);
9678 
9679 	/*
9680 	 * When NSS offload is enabled, send pdev_id->lmac_id
9681 	 * and pdev_id to hw_pdev_id to NSS FW
9682 	 */
9683 	if (nss_config) {
9684 		mac_id = pdev->lmac_id;
9685 		if (soc->cdp_soc.ol_ops->pdev_update_lmac_n_target_pdev_id)
9686 			soc->cdp_soc.ol_ops->
9687 				pdev_update_lmac_n_target_pdev_id(
9688 				soc->ctrl_psoc,
9689 				&pdev_id, &mac_id, &hw_pdev_id);
9690 	}
9691 
9692 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
9693 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
9694 		HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
9695 						hw_pdev_id);
9696 		vdev->lmac_id = pdev->lmac_id;
9697 	}
9698 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
9699 
9700 	return QDF_STATUS_SUCCESS;
9701 }
9702 
9703 /**
9704  * dp_soc_set_pdev_status_down() - set pdev down/up status
9705  * @soc: datapath soc handle
9706  * @pdev_id: id of datapath pdev handle
9707  * @is_pdev_down: pdev down/up status
9708  *
9709  * Return: QDF_STATUS
9710  */
9711 static QDF_STATUS
9712 dp_soc_set_pdev_status_down(struct cdp_soc_t *soc, uint8_t pdev_id,
9713 			    bool is_pdev_down)
9714 {
9715 	struct dp_pdev *pdev =
9716 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9717 						   pdev_id);
9718 	if (!pdev)
9719 		return QDF_STATUS_E_FAILURE;
9720 
9721 	pdev->is_pdev_down = is_pdev_down;
9722 	return QDF_STATUS_SUCCESS;
9723 }
9724 
9725 /**
9726  * dp_get_cfg_capabilities() - get dp capabilities
9727  * @soc_handle: datapath soc handle
9728  * @dp_caps: enum for dp capabilities
9729  *
9730  * Return: bool to determine if dp caps is enabled
9731  */
9732 static bool
9733 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
9734 			enum cdp_capabilities dp_caps)
9735 {
9736 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9737 
9738 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
9739 }
9740 
9741 #ifdef FEATURE_AST
9742 static QDF_STATUS
9743 dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
9744 		       uint8_t *peer_mac)
9745 {
9746 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9747 	QDF_STATUS status = QDF_STATUS_SUCCESS;
9748 	struct dp_peer *peer =
9749 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
9750 					       DP_MOD_ID_CDP);
9751 
9752 	/* Peer can be null for monitor vap mac address */
9753 	if (!peer) {
9754 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
9755 			  "%s: Invalid peer\n", __func__);
9756 		return QDF_STATUS_E_FAILURE;
9757 	}
9758 
9759 	if (peer->peer_state == DP_PEER_STATE_INIT)
9760 		dp_peer_cleanup(peer->vdev, peer);
9761 
9762 	qdf_spin_lock_bh(&soc->ast_lock);
9763 	dp_peer_delete_ast_entries(soc, peer);
9764 
9765 	dp_peer_update_state(soc, peer, DP_PEER_STATE_LOGICAL_DELETE);
9766 	qdf_spin_unlock_bh(&soc->ast_lock);
9767 
9768 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9769 	return status;
9770 }
9771 #endif
9772 
9773 #ifdef ATH_SUPPORT_NAC_RSSI
9774 /**
9775  * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
9776  * @soc_hdl: DP soc handle
9777  * @vdev_id: id of DP vdev handle
9778  * @mac_addr: neighbour mac
9779  * @rssi: rssi value
9780  *
9781  * Return: 0 for success. nonzero for failure.
9782  */
9783 static QDF_STATUS  dp_vdev_get_neighbour_rssi(struct cdp_soc_t *soc_hdl,
9784 					      uint8_t vdev_id,
9785 					      char *mac_addr,
9786 					      uint8_t *rssi)
9787 {
9788 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9789 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9790 						     DP_MOD_ID_CDP);
9791 	struct dp_pdev *pdev;
9792 	struct dp_neighbour_peer *peer = NULL;
9793 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
9794 
9795 	if (!vdev)
9796 		return status;
9797 
9798 	pdev = vdev->pdev;
9799 	*rssi = 0;
9800 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
9801 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
9802 		      neighbour_peer_list_elem) {
9803 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
9804 				mac_addr, QDF_MAC_ADDR_SIZE) == 0) {
9805 			*rssi = peer->rssi;
9806 			status = QDF_STATUS_SUCCESS;
9807 			break;
9808 		}
9809 	}
9810 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
9811 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9812 	return status;
9813 }
9814 
9815 static QDF_STATUS
9816 dp_config_for_nac_rssi(struct cdp_soc_t *cdp_soc,
9817 		       uint8_t vdev_id,
9818 		       enum cdp_nac_param_cmd cmd, char *bssid,
9819 		       char *client_macaddr,
9820 		       uint8_t chan_num)
9821 {
9822 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
9823 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9824 						     DP_MOD_ID_CDP);
9825 	struct dp_pdev *pdev;
9826 
9827 	if (!vdev)
9828 		return QDF_STATUS_E_FAILURE;
9829 
9830 	pdev = (struct dp_pdev *)vdev->pdev;
9831 	pdev->nac_rssi_filtering = 1;
9832 	/* Store address of NAC (neighbour peer) which will be checked
9833 	 * against TA of received packets.
9834 	 */
9835 
9836 	if (cmd == CDP_NAC_PARAM_ADD) {
9837 		dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id,
9838 						 DP_NAC_PARAM_ADD,
9839 						 (uint8_t *)client_macaddr);
9840 	} else if (cmd == CDP_NAC_PARAM_DEL) {
9841 		dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id,
9842 						 DP_NAC_PARAM_DEL,
9843 						 (uint8_t *)client_macaddr);
9844 	}
9845 
9846 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
9847 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
9848 			(soc->ctrl_psoc, pdev->pdev_id,
9849 			 vdev->vdev_id, cmd, bssid, client_macaddr);
9850 
9851 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9852 	return QDF_STATUS_SUCCESS;
9853 }
9854 #endif
9855 
9856 /**
9857  * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
9858  * for pktlog
9859  * @soc: cdp_soc handle
9860  * @pdev_id: id of dp pdev handle
9861  * @mac_addr: Peer mac address
9862  * @enb_dsb: Enable or disable peer based filtering
9863  *
9864  * Return: QDF_STATUS
9865  */
9866 static int
9867 dp_enable_peer_based_pktlog(struct cdp_soc_t *soc, uint8_t pdev_id,
9868 			    uint8_t *mac_addr, uint8_t enb_dsb)
9869 {
9870 	struct dp_peer *peer;
9871 	struct dp_pdev *pdev =
9872 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9873 						   pdev_id);
9874 
9875 	if (!pdev)
9876 		return QDF_STATUS_E_FAILURE;
9877 
9878 	peer = dp_peer_find_hash_find((struct dp_soc *)soc, mac_addr,
9879 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
9880 
9881 	if (!peer) {
9882 		dp_err("Invalid Peer");
9883 		return QDF_STATUS_E_FAILURE;
9884 	}
9885 
9886 	peer->peer_based_pktlog_filter = enb_dsb;
9887 	pdev->dp_peer_based_pktlog = enb_dsb;
9888 
9889 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9890 
9891 	return QDF_STATUS_SUCCESS;
9892 }
9893 
9894 #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS
9895 /**
9896  * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
9897  * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
9898  * @soc: cdp_soc handle
9899  * @pdev_id: id of cdp_pdev handle
9900  * @protocol_type: protocol type for which stats should be displayed
9901  *
9902  * Return: none
9903  */
9904 static inline void
9905 dp_dump_pdev_rx_protocol_tag_stats(struct cdp_soc_t  *soc, uint8_t pdev_id,
9906 				   uint16_t protocol_type)
9907 {
9908 }
9909 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
9910 
9911 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
9912 /**
9913  * dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be
9914  * applied to the desired protocol type packets
9915  * @soc: soc handle
9916  * @pdev_id: id of cdp_pdev handle
9917  * @enable_rx_protocol_tag - bitmask that indicates what protocol types
9918  * are enabled for tagging. zero indicates disable feature, non-zero indicates
9919  * enable feature
9920  * @protocol_type: new protocol type for which the tag is being added
9921  * @tag: user configured tag for the new protocol
9922  *
9923  * Return: Success
9924  */
9925 static inline QDF_STATUS
9926 dp_update_pdev_rx_protocol_tag(struct cdp_soc_t  *soc, uint8_t pdev_id,
9927 			       uint32_t enable_rx_protocol_tag,
9928 			       uint16_t protocol_type,
9929 			       uint16_t tag)
9930 {
9931 	return QDF_STATUS_SUCCESS;
9932 }
9933 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
9934 
9935 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
9936 /**
9937  * dp_set_rx_flow_tag - add/delete a flow
9938  * @soc: soc handle
9939  * @pdev_id: id of cdp_pdev handle
9940  * @flow_info: flow tuple that is to be added to/deleted from flow search table
9941  *
9942  * Return: Success
9943  */
9944 static inline QDF_STATUS
9945 dp_set_rx_flow_tag(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
9946 		   struct cdp_rx_flow_info *flow_info)
9947 {
9948 	return QDF_STATUS_SUCCESS;
9949 }
9950 /**
9951  * dp_dump_rx_flow_tag_stats - dump the number of packets tagged for
9952  * given flow 5-tuple
9953  * @cdp_soc: soc handle
9954  * @pdev_id: id of cdp_pdev handle
9955  * @flow_info: flow 5-tuple for which stats should be displayed
9956  *
9957  * Return: Success
9958  */
9959 static inline QDF_STATUS
9960 dp_dump_rx_flow_tag_stats(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
9961 			  struct cdp_rx_flow_info *flow_info)
9962 {
9963 	return QDF_STATUS_SUCCESS;
9964 }
9965 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
9966 
9967 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
9968 					   uint32_t max_peers,
9969 					   uint32_t max_ast_index,
9970 					   bool peer_map_unmap_v2)
9971 {
9972 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9973 
9974 	soc->max_peers = max_peers;
9975 
9976 	qdf_print ("%s max_peers %u, max_ast_index: %u\n",
9977 		   __func__, max_peers, max_ast_index);
9978 	wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
9979 
9980 	if (dp_peer_find_attach(soc))
9981 		return QDF_STATUS_E_FAILURE;
9982 
9983 	soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
9984 	soc->peer_map_attach_success = TRUE;
9985 
9986 	return QDF_STATUS_SUCCESS;
9987 }
9988 
9989 static QDF_STATUS dp_soc_set_param(struct cdp_soc_t  *soc_hdl,
9990 				   enum cdp_soc_param_t param,
9991 				   uint32_t value)
9992 {
9993 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9994 
9995 	switch (param) {
9996 	case DP_SOC_PARAM_MSDU_EXCEPTION_DESC:
9997 		soc->num_msdu_exception_desc = value;
9998 		dp_info("num_msdu exception_desc %u",
9999 			value);
10000 		break;
10001 	default:
10002 		dp_info("not handled param %d ", param);
10003 		break;
10004 	}
10005 
10006 	return QDF_STATUS_SUCCESS;
10007 }
10008 
10009 static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
10010 				      void *stats_ctx)
10011 {
10012 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10013 
10014 	soc->rate_stats_ctx = (struct cdp_soc_rate_stats_ctx *)stats_ctx;
10015 }
10016 
10017 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
10018 /**
10019  * dp_peer_flush_rate_stats_req(): Flush peer rate stats
10020  * @soc: Datapath SOC handle
10021  * @peer: Datapath peer
10022  * @arg: argument to iter function
10023  *
10024  * Return: QDF_STATUS
10025  */
10026 static void
10027 dp_peer_flush_rate_stats_req(struct dp_soc *soc, struct dp_peer *peer,
10028 			     void *arg)
10029 {
10030 	if (peer->bss_peer)
10031 		return;
10032 
10033 	dp_wdi_event_handler(
10034 		WDI_EVENT_FLUSH_RATE_STATS_REQ,
10035 		soc, peer->wlanstats_ctx,
10036 		peer->peer_id,
10037 		WDI_NO_VAL, peer->vdev->pdev->pdev_id);
10038 }
10039 
10040 /**
10041  * dp_flush_rate_stats_req(): Flush peer rate stats in pdev
10042  * @soc_hdl: Datapath SOC handle
10043  * @pdev_id: pdev_id
10044  *
10045  * Return: QDF_STATUS
10046  */
10047 static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
10048 					  uint8_t pdev_id)
10049 {
10050 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10051 	struct dp_pdev *pdev =
10052 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10053 						   pdev_id);
10054 	if (!pdev)
10055 		return QDF_STATUS_E_FAILURE;
10056 
10057 	dp_pdev_iterate_peer(pdev, dp_peer_flush_rate_stats_req, NULL,
10058 			     DP_MOD_ID_CDP);
10059 
10060 	return QDF_STATUS_SUCCESS;
10061 }
10062 #else
10063 static inline QDF_STATUS
10064 dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
10065 			uint8_t pdev_id)
10066 {
10067 	return QDF_STATUS_SUCCESS;
10068 }
10069 #endif
10070 
10071 static void *dp_peer_get_wlan_stats_ctx(struct cdp_soc_t *soc_hdl,
10072 					uint8_t vdev_id,
10073 					uint8_t *mac_addr)
10074 {
10075 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10076 	struct dp_peer *peer;
10077 	void *wlanstats_ctx = NULL;
10078 
10079 	if (mac_addr) {
10080 		peer = dp_peer_find_hash_find(soc, mac_addr,
10081 					      0, vdev_id,
10082 					      DP_MOD_ID_CDP);
10083 		if (!peer)
10084 			return NULL;
10085 
10086 		wlanstats_ctx = peer->wlanstats_ctx;
10087 
10088 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10089 	}
10090 
10091 	return wlanstats_ctx;
10092 }
10093 
10094 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
10095 static QDF_STATUS dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
10096 					   uint8_t pdev_id,
10097 					   void *buf)
10098 {
10099 	 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS,
10100 			      (struct dp_soc *)soc, buf, HTT_INVALID_PEER,
10101 			      WDI_NO_VAL, pdev_id);
10102 	return QDF_STATUS_SUCCESS;
10103 }
10104 #else
10105 static inline QDF_STATUS
10106 dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
10107 			 uint8_t pdev_id,
10108 			 void *buf)
10109 {
10110 	return QDF_STATUS_SUCCESS;
10111 }
10112 #endif
10113 
10114 static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle)
10115 {
10116 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10117 
10118 	return soc->rate_stats_ctx;
10119 }
10120 
10121 /*
10122  * dp_get_cfg() - get dp cfg
10123  * @soc: cdp soc handle
10124  * @cfg: cfg enum
10125  *
10126  * Return: cfg value
10127  */
10128 static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg)
10129 {
10130 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
10131 	uint32_t value = 0;
10132 
10133 	switch (cfg) {
10134 	case cfg_dp_enable_data_stall:
10135 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
10136 		break;
10137 	case cfg_dp_enable_p2p_ip_tcp_udp_checksum_offload:
10138 		value = dpsoc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload;
10139 		break;
10140 	case cfg_dp_enable_nan_ip_tcp_udp_checksum_offload:
10141 		value = dpsoc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload;
10142 		break;
10143 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
10144 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
10145 		break;
10146 	case cfg_dp_disable_legacy_mode_csum_offload:
10147 		value = dpsoc->wlan_cfg_ctx->
10148 					legacy_mode_checksumoffload_disable;
10149 		break;
10150 	case cfg_dp_tso_enable:
10151 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
10152 		break;
10153 	case cfg_dp_lro_enable:
10154 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
10155 		break;
10156 	case cfg_dp_gro_enable:
10157 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
10158 		break;
10159 	case cfg_dp_tx_flow_start_queue_offset:
10160 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
10161 		break;
10162 	case cfg_dp_tx_flow_stop_queue_threshold:
10163 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
10164 		break;
10165 	case cfg_dp_disable_intra_bss_fwd:
10166 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
10167 		break;
10168 	case cfg_dp_pktlog_buffer_size:
10169 		value = dpsoc->wlan_cfg_ctx->pktlog_buffer_size;
10170 		break;
10171 	default:
10172 		value =  0;
10173 	}
10174 
10175 	return value;
10176 }
10177 
10178 #ifdef PEER_FLOW_CONTROL
10179 /**
10180  * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params
10181  * @soc_handle: datapath soc handle
10182  * @pdev_id: id of datapath pdev handle
10183  * @param: ol ath params
10184  * @value: value of the flag
10185  * @buff: Buffer to be passed
10186  *
10187  * Implemented this function same as legacy function. In legacy code, single
10188  * function is used to display stats and update pdev params.
10189  *
10190  * Return: 0 for success. nonzero for failure.
10191  */
10192 static uint32_t dp_tx_flow_ctrl_configure_pdev(struct cdp_soc_t *soc_handle,
10193 					       uint8_t pdev_id,
10194 					       enum _dp_param_t param,
10195 					       uint32_t value, void *buff)
10196 {
10197 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10198 	struct dp_pdev *pdev =
10199 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10200 						   pdev_id);
10201 
10202 	if (qdf_unlikely(!pdev))
10203 		return 1;
10204 
10205 	soc = pdev->soc;
10206 	if (!soc)
10207 		return 1;
10208 
10209 	switch (param) {
10210 #ifdef QCA_ENH_V3_STATS_SUPPORT
10211 	case DP_PARAM_VIDEO_DELAY_STATS_FC:
10212 		if (value)
10213 			pdev->delay_stats_flag = true;
10214 		else
10215 			pdev->delay_stats_flag = false;
10216 		break;
10217 	case DP_PARAM_VIDEO_STATS_FC:
10218 		qdf_print("------- TID Stats ------\n");
10219 		dp_pdev_print_tid_stats(pdev);
10220 		qdf_print("------ Delay Stats ------\n");
10221 		dp_pdev_print_delay_stats(pdev);
10222 		break;
10223 #endif
10224 	case DP_PARAM_TOTAL_Q_SIZE:
10225 		{
10226 			uint32_t tx_min, tx_max;
10227 
10228 			tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx);
10229 			tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
10230 
10231 			if (!buff) {
10232 				if ((value >= tx_min) && (value <= tx_max)) {
10233 					pdev->num_tx_allowed = value;
10234 				} else {
10235 					QDF_TRACE(QDF_MODULE_ID_DP,
10236 						  QDF_TRACE_LEVEL_INFO,
10237 						  "Failed to update num_tx_allowed, Q_min = %d Q_max = %d",
10238 						  tx_min, tx_max);
10239 					break;
10240 				}
10241 			} else {
10242 				*(int *)buff = pdev->num_tx_allowed;
10243 			}
10244 		}
10245 		break;
10246 	default:
10247 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
10248 			  "%s: not handled param %d ", __func__, param);
10249 		break;
10250 	}
10251 
10252 	return 0;
10253 }
10254 #endif
10255 
10256 /**
10257  * dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev
10258  * @psoc: dp soc handle
10259  * @pdev_id: id of DP_PDEV handle
10260  * @pcp: pcp value
10261  * @tid: tid value passed by the user
10262  *
10263  * Return: QDF_STATUS_SUCCESS on success
10264  */
10265 static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
10266 						uint8_t pdev_id,
10267 						uint8_t pcp, uint8_t tid)
10268 {
10269 	struct dp_soc *soc = (struct dp_soc *)psoc;
10270 
10271 	soc->pcp_tid_map[pcp] = tid;
10272 
10273 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
10274 	return QDF_STATUS_SUCCESS;
10275 }
10276 
10277 /**
10278  * dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev
10279  * @soc: DP soc handle
10280  * @vdev_id: id of DP_VDEV handle
10281  * @pcp: pcp value
10282  * @tid: tid value passed by the user
10283  *
10284  * Return: QDF_STATUS_SUCCESS on success
10285  */
10286 static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc_hdl,
10287 						uint8_t vdev_id,
10288 						uint8_t pcp, uint8_t tid)
10289 {
10290 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10291 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10292 						     DP_MOD_ID_CDP);
10293 
10294 	if (!vdev)
10295 		return QDF_STATUS_E_FAILURE;
10296 
10297 	vdev->pcp_tid_map[pcp] = tid;
10298 
10299 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10300 	return QDF_STATUS_SUCCESS;
10301 }
10302 
10303 #ifdef QCA_SUPPORT_FULL_MON
10304 static inline QDF_STATUS
10305 dp_config_full_mon_mode(struct cdp_soc_t *soc_handle,
10306 			uint8_t val)
10307 {
10308 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10309 
10310 	soc->full_mon_mode = val;
10311 	qdf_alert("Configure full monitor mode val: %d ", val);
10312 
10313 	return QDF_STATUS_SUCCESS;
10314 }
10315 #else
10316 static inline QDF_STATUS
10317 dp_config_full_mon_mode(struct cdp_soc_t *soc_handle,
10318 			uint8_t val)
10319 {
10320 	return 0;
10321 }
10322 #endif
10323 
10324 static struct cdp_cmn_ops dp_ops_cmn = {
10325 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
10326 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
10327 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
10328 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
10329 	.txrx_pdev_post_attach = dp_pdev_post_attach_wifi3,
10330 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
10331 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
10332 	.txrx_peer_create = dp_peer_create_wifi3,
10333 	.txrx_peer_setup = dp_peer_setup_wifi3,
10334 #ifdef FEATURE_AST
10335 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
10336 #else
10337 	.txrx_peer_teardown = NULL,
10338 #endif
10339 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
10340 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
10341 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
10342 	.txrx_peer_get_ast_info_by_pdev =
10343 		dp_peer_get_ast_info_by_pdevid_wifi3,
10344 	.txrx_peer_ast_delete_by_soc =
10345 		dp_peer_ast_entry_del_by_soc,
10346 	.txrx_peer_ast_delete_by_pdev =
10347 		dp_peer_ast_entry_del_by_pdev,
10348 	.txrx_peer_delete = dp_peer_delete_wifi3,
10349 	.txrx_vdev_register = dp_vdev_register_wifi3,
10350 	.txrx_soc_detach = dp_soc_detach_wifi3,
10351 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
10352 	.txrx_soc_init = dp_soc_init_wifi3,
10353 	.txrx_tso_soc_attach = dp_tso_soc_attach,
10354 	.txrx_tso_soc_detach = dp_tso_soc_detach,
10355 	.txrx_pdev_init = dp_pdev_init_wifi3,
10356 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
10357 	.txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3,
10358 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
10359 	.txrx_ath_getstats = dp_get_device_stats,
10360 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
10361 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
10362 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
10363 	.delba_process = dp_delba_process_wifi3,
10364 	.set_addba_response = dp_set_addba_response,
10365 	.flush_cache_rx_queue = NULL,
10366 	/* TODO: get API's for dscp-tid need to be added*/
10367 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
10368 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
10369 	.txrx_get_total_per = dp_get_total_per,
10370 	.txrx_stats_request = dp_txrx_stats_request,
10371 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
10372 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
10373 	.display_stats = dp_txrx_dump_stats,
10374 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
10375 	.txrx_intr_detach = dp_soc_interrupt_detach,
10376 	.set_pn_check = dp_set_pn_check_wifi3,
10377 	.set_key_sec_type = dp_set_key_sec_type_wifi3,
10378 	.update_config_parameters = dp_update_config_parameters,
10379 	/* TODO: Add other functions */
10380 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
10381 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
10382 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
10383 	.get_vdev_dp_ext_txrx_handle = dp_vdev_get_dp_ext_handle,
10384 	.set_vdev_dp_ext_txrx_handle = dp_vdev_set_dp_ext_handle,
10385 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
10386 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
10387 	.map_pdev_to_lmac = dp_soc_map_pdev_to_lmac,
10388 	.handle_mode_change = dp_soc_handle_pdev_mode_change,
10389 	.set_pdev_status_down = dp_soc_set_pdev_status_down,
10390 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
10391 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
10392 	.tx_send = dp_tx_send,
10393 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
10394 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
10395 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
10396 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
10397 	.set_soc_param = dp_soc_set_param,
10398 	.txrx_get_os_rx_handles_from_vdev =
10399 					dp_get_os_rx_handles_from_vdev_wifi3,
10400 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
10401 	.get_dp_capabilities = dp_get_cfg_capabilities,
10402 	.txrx_get_cfg = dp_get_cfg,
10403 	.set_rate_stats_ctx = dp_soc_set_rate_stats_ctx,
10404 	.get_rate_stats_ctx = dp_soc_get_rate_stats_ctx,
10405 	.txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats,
10406 	.txrx_flush_rate_stats_request = dp_flush_rate_stats_req,
10407 	.txrx_peer_get_wlan_stats_ctx = dp_peer_get_wlan_stats_ctx,
10408 
10409 	.set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3,
10410 	.set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3,
10411 
10412 	.txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler,
10413 #ifdef QCA_MULTIPASS_SUPPORT
10414 	.set_vlan_groupkey = dp_set_vlan_groupkey,
10415 #endif
10416 	.get_peer_mac_list = dp_get_peer_mac_list,
10417 	.tx_send_exc = dp_tx_send_exception,
10418 };
10419 
10420 static struct cdp_ctrl_ops dp_ops_ctrl = {
10421 	.txrx_peer_authorize = dp_peer_authorize,
10422 #ifdef VDEV_PEER_PROTOCOL_COUNT
10423 	.txrx_enable_peer_protocol_count = dp_enable_vdev_peer_protocol_count,
10424 	.txrx_set_peer_protocol_drop_mask =
10425 		dp_enable_vdev_peer_protocol_drop_mask,
10426 	.txrx_is_peer_protocol_count_enabled =
10427 		dp_is_vdev_peer_protocol_count_enabled,
10428 	.txrx_get_peer_protocol_drop_mask = dp_get_vdev_peer_protocol_drop_mask,
10429 #endif
10430 	.txrx_set_vdev_param = dp_set_vdev_param,
10431 	.txrx_set_psoc_param = dp_set_psoc_param,
10432 	.txrx_get_psoc_param = dp_get_psoc_param,
10433 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
10434 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
10435 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
10436 	.txrx_update_filter_neighbour_peers =
10437 		dp_update_filter_neighbour_peers,
10438 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
10439 	.txrx_get_sec_type = dp_get_sec_type,
10440 	.txrx_wdi_event_sub = dp_wdi_event_sub,
10441 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
10442 #ifdef WDI_EVENT_ENABLE
10443 	.txrx_get_pldev = dp_get_pldev,
10444 #endif
10445 	.txrx_set_pdev_param = dp_set_pdev_param,
10446 	.txrx_get_pdev_param = dp_get_pdev_param,
10447 	.txrx_set_peer_param = dp_set_peer_param,
10448 	.txrx_get_peer_param = dp_get_peer_param,
10449 #ifdef VDEV_PEER_PROTOCOL_COUNT
10450 	.txrx_peer_protocol_cnt = dp_peer_stats_update_protocol_cnt,
10451 #endif
10452 #ifdef ATH_SUPPORT_NAC_RSSI
10453 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
10454 	.txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
10455 #endif
10456 #ifdef WLAN_SUPPORT_MSCS
10457 	.txrx_record_mscs_params = dp_record_mscs_params,
10458 #endif
10459 	.set_key = dp_set_michael_key,
10460 	.txrx_get_vdev_param = dp_get_vdev_param,
10461 	.enable_peer_based_pktlog = dp_enable_peer_based_pktlog,
10462 	.calculate_delay_stats = dp_calculate_delay_stats,
10463 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
10464 	.txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag,
10465 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
10466 	.txrx_dump_pdev_rx_protocol_tag_stats =
10467 				dp_dump_pdev_rx_protocol_tag_stats,
10468 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
10469 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
10470 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
10471 	.txrx_set_rx_flow_tag = dp_set_rx_flow_tag,
10472 	.txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats,
10473 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
10474 #ifdef QCA_MULTIPASS_SUPPORT
10475 	.txrx_peer_set_vlan_id = dp_peer_set_vlan_id,
10476 #endif /*QCA_MULTIPASS_SUPPORT*/
10477 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
10478 	.txrx_update_peer_pkt_capture_params =
10479 		 dp_peer_update_pkt_capture_params,
10480 #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
10481 };
10482 
10483 static struct cdp_me_ops dp_ops_me = {
10484 #ifdef ATH_SUPPORT_IQUE
10485 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
10486 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
10487 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
10488 #endif
10489 };
10490 
10491 static struct cdp_mon_ops dp_ops_mon = {
10492 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
10493 	/* Added support for HK advance filter */
10494 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
10495 	.txrx_deliver_tx_mgmt = dp_deliver_tx_mgmt,
10496 	.config_full_mon_mode = dp_config_full_mon_mode,
10497 };
10498 
10499 static struct cdp_host_stats_ops dp_ops_host_stats = {
10500 	.txrx_per_peer_stats = dp_get_host_peer_stats,
10501 	.get_fw_peer_stats = dp_get_fw_peer_stats,
10502 	.get_htt_stats = dp_get_htt_stats,
10503 #ifdef FEATURE_PERPKT_INFO
10504 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
10505 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
10506 #endif /* FEATURE_PERPKT_INFO */
10507 	.txrx_stats_publish = dp_txrx_stats_publish,
10508 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
10509 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
10510 	.txrx_get_peer_stats_param = dp_txrx_get_peer_stats_param,
10511 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
10512 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
10513 	.txrx_get_ratekbps = dp_txrx_get_ratekbps,
10514 	.txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats,
10515 	/* TODO */
10516 };
10517 
10518 static struct cdp_raw_ops dp_ops_raw = {
10519 	/* TODO */
10520 };
10521 
10522 #ifdef PEER_FLOW_CONTROL
10523 static struct cdp_pflow_ops dp_ops_pflow = {
10524 	dp_tx_flow_ctrl_configure_pdev,
10525 };
10526 #endif /* CONFIG_WIN */
10527 
10528 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
10529 static struct cdp_cfr_ops dp_ops_cfr = {
10530 	.txrx_cfr_filter = dp_cfr_filter,
10531 	.txrx_get_cfr_rcc = dp_get_cfr_rcc,
10532 	.txrx_set_cfr_rcc = dp_set_cfr_rcc,
10533 	.txrx_get_cfr_dbg_stats = dp_get_cfr_dbg_stats,
10534 	.txrx_clear_cfr_dbg_stats = dp_clear_cfr_dbg_stats,
10535 	.txrx_enable_mon_reap_timer = dp_enable_mon_reap_timer,
10536 };
10537 #endif
10538 
10539 #ifdef FEATURE_RUNTIME_PM
10540 /**
10541  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
10542  * @soc_hdl: Datapath soc handle
10543  * @pdev_id: id of data path pdev handle
10544  *
10545  * DP is ready to runtime suspend if there are no pending TX packets.
10546  *
10547  * Return: QDF_STATUS
10548  */
10549 static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
10550 {
10551 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10552 	struct dp_pdev *pdev;
10553 
10554 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10555 	if (!pdev) {
10556 		dp_err("pdev is NULL");
10557 		return QDF_STATUS_E_INVAL;
10558 	}
10559 
10560 	/* Abort if there are any pending TX packets */
10561 	if (dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev)) > 0) {
10562 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
10563 			  FL("Abort suspend due to pending TX packets"));
10564 		return QDF_STATUS_E_AGAIN;
10565 	}
10566 
10567 	if (soc->intr_mode == DP_INTR_POLL)
10568 		qdf_timer_stop(&soc->int_timer);
10569 
10570 	return QDF_STATUS_SUCCESS;
10571 }
10572 
10573 /**
10574  * dp_flush_ring_hptp() - Update ring shadow
10575  *			  register HP/TP address when runtime
10576  *                        resume
10577  * @opaque_soc: DP soc context
10578  *
10579  * Return: None
10580  */
10581 static
10582 void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
10583 {
10584 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
10585 						 HAL_SRNG_FLUSH_EVENT)) {
10586 		/* Acquire the lock */
10587 		hal_srng_access_start(soc->hal_soc, hal_srng);
10588 
10589 		hal_srng_access_end(soc->hal_soc, hal_srng);
10590 
10591 		hal_srng_set_flush_last_ts(hal_srng);
10592 	}
10593 }
10594 
10595 /**
10596  * dp_runtime_resume() - ensure DP is ready to runtime resume
10597  * @soc_hdl: Datapath soc handle
10598  * @pdev_id: id of data path pdev handle
10599  *
10600  * Resume DP for runtime PM.
10601  *
10602  * Return: QDF_STATUS
10603  */
10604 static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
10605 {
10606 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10607 	int i;
10608 
10609 	if (soc->intr_mode == DP_INTR_POLL)
10610 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
10611 
10612 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
10613 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
10614 	}
10615 
10616 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
10617 
10618 	return QDF_STATUS_SUCCESS;
10619 }
10620 #endif /* FEATURE_RUNTIME_PM */
10621 
10622 /**
10623  * dp_tx_get_success_ack_stats() - get tx success completion count
10624  * @soc_hdl: Datapath soc handle
10625  * @vdevid: vdev identifier
10626  *
10627  * Return: tx success ack count
10628  */
10629 static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl,
10630 					    uint8_t vdev_id)
10631 {
10632 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10633 	struct cdp_vdev_stats *vdev_stats = NULL;
10634 	uint32_t tx_success;
10635 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10636 						     DP_MOD_ID_CDP);
10637 
10638 	if (!vdev) {
10639 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
10640 			  FL("Invalid vdev id %d"), vdev_id);
10641 		return 0;
10642 	}
10643 
10644 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
10645 	if (!vdev_stats) {
10646 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
10647 			  "DP alloc failure - unable to get alloc vdev stats");
10648 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10649 		return 0;
10650 	}
10651 
10652 	dp_aggregate_vdev_stats(vdev, vdev_stats);
10653 
10654 	tx_success = vdev_stats->tx.tx_success.num;
10655 	qdf_mem_free(vdev_stats);
10656 
10657 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10658 	return tx_success;
10659 }
10660 
10661 #ifdef WLAN_SUPPORT_DATA_STALL
10662 /**
10663  * dp_register_data_stall_detect_cb() - register data stall callback
10664  * @soc_hdl: Datapath soc handle
10665  * @pdev_id: id of data path pdev handle
10666  * @data_stall_detect_callback: data stall callback function
10667  *
10668  * Return: QDF_STATUS Enumeration
10669  */
10670 static
10671 QDF_STATUS dp_register_data_stall_detect_cb(
10672 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
10673 			data_stall_detect_cb data_stall_detect_callback)
10674 {
10675 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10676 	struct dp_pdev *pdev;
10677 
10678 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10679 	if (!pdev) {
10680 		dp_err("pdev NULL!");
10681 		return QDF_STATUS_E_INVAL;
10682 	}
10683 
10684 	pdev->data_stall_detect_callback = data_stall_detect_callback;
10685 	return QDF_STATUS_SUCCESS;
10686 }
10687 
10688 /**
10689  * dp_deregister_data_stall_detect_cb() - de-register data stall callback
10690  * @soc_hdl: Datapath soc handle
10691  * @pdev_id: id of data path pdev handle
10692  * @data_stall_detect_callback: data stall callback function
10693  *
10694  * Return: QDF_STATUS Enumeration
10695  */
10696 static
10697 QDF_STATUS dp_deregister_data_stall_detect_cb(
10698 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
10699 			data_stall_detect_cb data_stall_detect_callback)
10700 {
10701 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10702 	struct dp_pdev *pdev;
10703 
10704 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10705 	if (!pdev) {
10706 		dp_err("pdev NULL!");
10707 		return QDF_STATUS_E_INVAL;
10708 	}
10709 
10710 	pdev->data_stall_detect_callback = NULL;
10711 	return QDF_STATUS_SUCCESS;
10712 }
10713 
10714 /**
10715  * dp_txrx_post_data_stall_event() - post data stall event
10716  * @soc_hdl: Datapath soc handle
10717  * @indicator: Module triggering data stall
10718  * @data_stall_type: data stall event type
10719  * @pdev_id: pdev id
10720  * @vdev_id_bitmap: vdev id bitmap
10721  * @recovery_type: data stall recovery type
10722  *
10723  * Return: None
10724  */
10725 static void
10726 dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl,
10727 			      enum data_stall_log_event_indicator indicator,
10728 			      enum data_stall_log_event_type data_stall_type,
10729 			      uint32_t pdev_id, uint32_t vdev_id_bitmap,
10730 			      enum data_stall_log_recovery_type recovery_type)
10731 {
10732 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10733 	struct data_stall_event_info data_stall_info;
10734 	struct dp_pdev *pdev;
10735 
10736 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10737 	if (!pdev) {
10738 		dp_err("pdev NULL!");
10739 		return;
10740 	}
10741 
10742 	if (!pdev->data_stall_detect_callback) {
10743 		dp_err("data stall cb not registered!");
10744 		return;
10745 	}
10746 
10747 	dp_info("data_stall_type: %x pdev_id: %d",
10748 		data_stall_type, pdev_id);
10749 
10750 	data_stall_info.indicator = indicator;
10751 	data_stall_info.data_stall_type = data_stall_type;
10752 	data_stall_info.vdev_id_bitmap = vdev_id_bitmap;
10753 	data_stall_info.pdev_id = pdev_id;
10754 	data_stall_info.recovery_type = recovery_type;
10755 
10756 	pdev->data_stall_detect_callback(&data_stall_info);
10757 }
10758 #endif /* WLAN_SUPPORT_DATA_STALL */
10759 
10760 #ifdef WLAN_FEATURE_STATS_EXT
10761 /* rx hw stats event wait timeout in ms */
10762 #define DP_REO_STATUS_STATS_TIMEOUT 1500
10763 /**
10764  * dp_txrx_ext_stats_request - request dp txrx extended stats request
10765  * @soc_hdl: soc handle
10766  * @pdev_id: pdev id
10767  * @req: stats request
10768  *
10769  * Return: QDF_STATUS
10770  */
10771 static QDF_STATUS
10772 dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
10773 			  struct cdp_txrx_ext_stats *req)
10774 {
10775 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10776 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10777 
10778 	if (!pdev) {
10779 		dp_err("pdev is null");
10780 		return QDF_STATUS_E_INVAL;
10781 	}
10782 
10783 	dp_aggregate_pdev_stats(pdev);
10784 
10785 	req->tx_msdu_enqueue = pdev->stats.tx_i.processed.num;
10786 	req->tx_msdu_overflow = pdev->stats.tx_i.dropped.ring_full;
10787 	req->rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
10788 	req->rx_mpdu_delivered = soc->ext_stats.rx_mpdu_received;
10789 	req->rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
10790 	req->rx_mpdu_error = soc->stats.rx.err_ring_pkts -
10791 				soc->stats.rx.rx_frags;
10792 
10793 	return QDF_STATUS_SUCCESS;
10794 }
10795 
10796 /**
10797  * dp_rx_hw_stats_cb - request rx hw stats response callback
10798  * @soc: soc handle
10799  * @cb_ctxt: callback context
10800  * @reo_status: reo command response status
10801  *
10802  * Return: None
10803  */
10804 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
10805 			      union hal_reo_status *reo_status)
10806 {
10807 	struct dp_req_rx_hw_stats_t *rx_hw_stats = cb_ctxt;
10808 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
10809 	bool is_query_timeout;
10810 
10811 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
10812 	is_query_timeout = rx_hw_stats->is_query_timeout;
10813 	/* free the cb_ctxt if all pending tid stats query is received */
10814 	if (qdf_atomic_dec_and_test(&rx_hw_stats->pending_tid_stats_cnt)) {
10815 		if (!is_query_timeout) {
10816 			qdf_event_set(&soc->rx_hw_stats_event);
10817 			soc->is_last_stats_ctx_init = false;
10818 		}
10819 
10820 		qdf_mem_free(rx_hw_stats);
10821 	}
10822 
10823 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
10824 		dp_info("REO stats failure %d",
10825 			queue_status->header.status);
10826 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
10827 		return;
10828 	}
10829 
10830 	if (!is_query_timeout) {
10831 		soc->ext_stats.rx_mpdu_received +=
10832 					queue_status->mpdu_frms_cnt;
10833 		soc->ext_stats.rx_mpdu_missed +=
10834 					queue_status->late_recv_mpdu_cnt;
10835 	}
10836 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
10837 }
10838 
10839 /**
10840  * dp_request_rx_hw_stats - request rx hardware stats
10841  * @soc_hdl: soc handle
10842  * @vdev_id: vdev id
10843  *
10844  * Return: None
10845  */
10846 static QDF_STATUS
10847 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
10848 {
10849 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10850 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10851 						     DP_MOD_ID_CDP);
10852 	struct dp_peer *peer = NULL;
10853 	QDF_STATUS status;
10854 	struct dp_req_rx_hw_stats_t *rx_hw_stats;
10855 	int rx_stats_sent_cnt = 0;
10856 	uint32_t last_rx_mpdu_received;
10857 	uint32_t last_rx_mpdu_missed;
10858 
10859 	if (!vdev) {
10860 		dp_err("vdev is null for vdev_id: %u", vdev_id);
10861 		status = QDF_STATUS_E_INVAL;
10862 		goto out;
10863 	}
10864 
10865 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_CDP);
10866 
10867 	if (!peer) {
10868 		dp_err("Peer is NULL");
10869 		status = QDF_STATUS_E_INVAL;
10870 		goto out;
10871 	}
10872 
10873 	rx_hw_stats = qdf_mem_malloc(sizeof(*rx_hw_stats));
10874 
10875 	if (!rx_hw_stats) {
10876 		dp_err("malloc failed for hw stats structure");
10877 		status = QDF_STATUS_E_INVAL;
10878 		goto out;
10879 	}
10880 
10881 	qdf_event_reset(&soc->rx_hw_stats_event);
10882 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
10883 	/* save the last soc cumulative stats and reset it to 0 */
10884 	last_rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
10885 	last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
10886 	soc->ext_stats.rx_mpdu_received = 0;
10887 	soc->ext_stats.rx_mpdu_missed = 0;
10888 
10889 	rx_stats_sent_cnt =
10890 		dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
10891 	if (!rx_stats_sent_cnt) {
10892 		dp_err("no tid stats sent successfully");
10893 		qdf_mem_free(rx_hw_stats);
10894 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
10895 		status = QDF_STATUS_E_INVAL;
10896 		goto out;
10897 	}
10898 	qdf_atomic_set(&rx_hw_stats->pending_tid_stats_cnt,
10899 		       rx_stats_sent_cnt);
10900 	rx_hw_stats->is_query_timeout = false;
10901 	soc->is_last_stats_ctx_init = true;
10902 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
10903 
10904 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
10905 				       DP_REO_STATUS_STATS_TIMEOUT);
10906 
10907 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
10908 	if (status != QDF_STATUS_SUCCESS) {
10909 		dp_info("rx hw stats event timeout");
10910 		if (soc->is_last_stats_ctx_init)
10911 			rx_hw_stats->is_query_timeout = true;
10912 		/**
10913 		 * If query timeout happened, use the last saved stats
10914 		 * for this time query.
10915 		 */
10916 		soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
10917 		soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
10918 	}
10919 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
10920 
10921 out:
10922 	if (peer)
10923 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10924 	if (vdev)
10925 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10926 
10927 	return status;
10928 }
10929 #endif /* WLAN_FEATURE_STATS_EXT */
10930 
10931 #ifdef DP_PEER_EXTENDED_API
10932 static struct cdp_misc_ops dp_ops_misc = {
10933 #ifdef FEATURE_WLAN_TDLS
10934 	.tx_non_std = dp_tx_non_std,
10935 #endif /* FEATURE_WLAN_TDLS */
10936 	.get_opmode = dp_get_opmode,
10937 #ifdef FEATURE_RUNTIME_PM
10938 	.runtime_suspend = dp_runtime_suspend,
10939 	.runtime_resume = dp_runtime_resume,
10940 #endif /* FEATURE_RUNTIME_PM */
10941 	.pkt_log_init = dp_pkt_log_init,
10942 	.pkt_log_con_service = dp_pkt_log_con_service,
10943 	.get_num_rx_contexts = dp_get_num_rx_contexts,
10944 	.get_tx_ack_stats = dp_tx_get_success_ack_stats,
10945 #ifdef WLAN_SUPPORT_DATA_STALL
10946 	.txrx_data_stall_cb_register = dp_register_data_stall_detect_cb,
10947 	.txrx_data_stall_cb_deregister = dp_deregister_data_stall_detect_cb,
10948 	.txrx_post_data_stall_event = dp_txrx_post_data_stall_event,
10949 #endif
10950 
10951 #ifdef WLAN_FEATURE_STATS_EXT
10952 	.txrx_ext_stats_request = dp_txrx_ext_stats_request,
10953 	.request_rx_hw_stats = dp_request_rx_hw_stats,
10954 #endif /* WLAN_FEATURE_STATS_EXT */
10955 };
10956 #endif
10957 
10958 #ifdef DP_FLOW_CTL
10959 static struct cdp_flowctl_ops dp_ops_flowctl = {
10960 	/* WIFI 3.0 DP implement as required. */
10961 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
10962 	.flow_pool_map_handler = dp_tx_flow_pool_map,
10963 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
10964 	.register_pause_cb = dp_txrx_register_pause_cb,
10965 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
10966 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
10967 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
10968 };
10969 
10970 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
10971 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
10972 };
10973 #endif
10974 
10975 #ifdef IPA_OFFLOAD
10976 static struct cdp_ipa_ops dp_ops_ipa = {
10977 	.ipa_get_resource = dp_ipa_get_resource,
10978 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
10979 	.ipa_op_response = dp_ipa_op_response,
10980 	.ipa_register_op_cb = dp_ipa_register_op_cb,
10981 	.ipa_get_stat = dp_ipa_get_stat,
10982 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
10983 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
10984 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
10985 	.ipa_setup = dp_ipa_setup,
10986 	.ipa_cleanup = dp_ipa_cleanup,
10987 	.ipa_setup_iface = dp_ipa_setup_iface,
10988 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
10989 	.ipa_enable_pipes = dp_ipa_enable_pipes,
10990 	.ipa_disable_pipes = dp_ipa_disable_pipes,
10991 	.ipa_set_perf_level = dp_ipa_set_perf_level,
10992 	.ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd
10993 };
10994 #endif
10995 
10996 #ifdef DP_POWER_SAVE
10997 static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
10998 {
10999 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11000 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11001 	int timeout = SUSPEND_DRAIN_WAIT;
11002 	int drain_wait_delay = 50; /* 50 ms */
11003 
11004 	if (qdf_unlikely(!pdev)) {
11005 		dp_err("pdev is NULL");
11006 		return QDF_STATUS_E_INVAL;
11007 	}
11008 
11009 	/* Abort if there are any pending TX packets */
11010 	while (dp_get_tx_pending((struct cdp_pdev *)pdev) > 0) {
11011 		qdf_sleep(drain_wait_delay);
11012 		if (timeout <= 0) {
11013 			dp_err("TX frames are pending, abort suspend");
11014 			return QDF_STATUS_E_TIMEOUT;
11015 		}
11016 		timeout = timeout - drain_wait_delay;
11017 	}
11018 
11019 	if (soc->intr_mode == DP_INTR_POLL)
11020 		qdf_timer_stop(&soc->int_timer);
11021 
11022 	/* Stop monitor reap timer and reap any pending frames in ring */
11023 	if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) ||
11024 	     dp_is_enable_reap_timer_non_pkt(pdev)) &&
11025 	    soc->reap_timer_init) {
11026 		qdf_timer_sync_cancel(&soc->mon_reap_timer);
11027 		dp_service_mon_rings(soc, DP_MON_REAP_BUDGET);
11028 	}
11029 
11030 	return QDF_STATUS_SUCCESS;
11031 }
11032 
11033 static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
11034 {
11035 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11036 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11037 
11038 	if (qdf_unlikely(!pdev)) {
11039 		dp_err("pdev is NULL");
11040 		return QDF_STATUS_E_INVAL;
11041 	}
11042 
11043 	if (soc->intr_mode == DP_INTR_POLL)
11044 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
11045 
11046 	/* Start monitor reap timer */
11047 	if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) ||
11048 	     dp_is_enable_reap_timer_non_pkt(pdev)) &&
11049 	    soc->reap_timer_init)
11050 		qdf_timer_mod(&soc->mon_reap_timer,
11051 			      DP_INTR_POLL_TIMER_MS);
11052 
11053 	return QDF_STATUS_SUCCESS;
11054 }
11055 
11056 /**
11057  * dp_process_wow_ack_rsp() - process wow ack response
11058  * @soc_hdl: datapath soc handle
11059  * @pdev_id: data path pdev handle id
11060  *
11061  * Return: none
11062  */
11063 static void dp_process_wow_ack_rsp(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
11064 {
11065 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11066 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11067 
11068 	if (qdf_unlikely(!pdev)) {
11069 		dp_err("pdev is NULL");
11070 		return;
11071 	}
11072 
11073 	/*
11074 	 * As part of wow enable FW disables the mon status ring and in wow ack
11075 	 * response from FW reap mon status ring to make sure no packets pending
11076 	 * in the ring.
11077 	 */
11078 	if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) ||
11079 	     dp_is_enable_reap_timer_non_pkt(pdev)) &&
11080 	    soc->reap_timer_init) {
11081 		dp_service_mon_rings(soc, DP_MON_REAP_BUDGET);
11082 	}
11083 }
11084 
11085 /**
11086  * dp_process_target_suspend_req() - process target suspend request
11087  * @soc_hdl: datapath soc handle
11088  * @pdev_id: data path pdev handle id
11089  *
11090  * Return: none
11091  */
11092 static void dp_process_target_suspend_req(struct cdp_soc_t *soc_hdl,
11093 					  uint8_t pdev_id)
11094 {
11095 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11096 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11097 
11098 	if (qdf_unlikely(!pdev)) {
11099 		dp_err("pdev is NULL");
11100 		return;
11101 	}
11102 
11103 	/* Stop monitor reap timer and reap any pending frames in ring */
11104 	if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) ||
11105 	     dp_is_enable_reap_timer_non_pkt(pdev)) &&
11106 	    soc->reap_timer_init) {
11107 		qdf_timer_sync_cancel(&soc->mon_reap_timer);
11108 		dp_service_mon_rings(soc, DP_MON_REAP_BUDGET);
11109 	}
11110 }
11111 
11112 static struct cdp_bus_ops dp_ops_bus = {
11113 	.bus_suspend = dp_bus_suspend,
11114 	.bus_resume = dp_bus_resume,
11115 	.process_wow_ack_rsp = dp_process_wow_ack_rsp,
11116 	.process_target_suspend_req = dp_process_target_suspend_req
11117 };
11118 #endif
11119 
11120 #ifdef DP_FLOW_CTL
11121 static struct cdp_throttle_ops dp_ops_throttle = {
11122 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
11123 };
11124 
11125 static struct cdp_cfg_ops dp_ops_cfg = {
11126 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
11127 };
11128 #endif
11129 
11130 #ifdef DP_PEER_EXTENDED_API
11131 static struct cdp_ocb_ops dp_ops_ocb = {
11132 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
11133 };
11134 
11135 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
11136 	.clear_stats = dp_txrx_clear_dump_stats,
11137 };
11138 
11139 static struct cdp_peer_ops dp_ops_peer = {
11140 	.register_peer = dp_register_peer,
11141 	.clear_peer = dp_clear_peer,
11142 	.find_peer_exist = dp_find_peer_exist,
11143 	.find_peer_exist_on_vdev = dp_find_peer_exist_on_vdev,
11144 	.find_peer_exist_on_other_vdev = dp_find_peer_exist_on_other_vdev,
11145 	.peer_state_update = dp_peer_state_update,
11146 	.get_vdevid = dp_get_vdevid,
11147 	.get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr,
11148 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
11149 	.get_peer_state = dp_get_peer_state,
11150 };
11151 #endif
11152 
11153 static struct cdp_ops dp_txrx_ops = {
11154 	.cmn_drv_ops = &dp_ops_cmn,
11155 	.ctrl_ops = &dp_ops_ctrl,
11156 	.me_ops = &dp_ops_me,
11157 	.mon_ops = &dp_ops_mon,
11158 	.host_stats_ops = &dp_ops_host_stats,
11159 	.wds_ops = &dp_ops_wds,
11160 	.raw_ops = &dp_ops_raw,
11161 #ifdef PEER_FLOW_CONTROL
11162 	.pflow_ops = &dp_ops_pflow,
11163 #endif /* PEER_FLOW_CONTROL */
11164 #ifdef DP_PEER_EXTENDED_API
11165 	.misc_ops = &dp_ops_misc,
11166 	.ocb_ops = &dp_ops_ocb,
11167 	.peer_ops = &dp_ops_peer,
11168 	.mob_stats_ops = &dp_ops_mob_stats,
11169 #endif
11170 #ifdef DP_FLOW_CTL
11171 	.cfg_ops = &dp_ops_cfg,
11172 	.flowctl_ops = &dp_ops_flowctl,
11173 	.l_flowctl_ops = &dp_ops_l_flowctl,
11174 	.throttle_ops = &dp_ops_throttle,
11175 #endif
11176 #ifdef IPA_OFFLOAD
11177 	.ipa_ops = &dp_ops_ipa,
11178 #endif
11179 #ifdef DP_POWER_SAVE
11180 	.bus_ops = &dp_ops_bus,
11181 #endif
11182 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
11183 	.cfr_ops = &dp_ops_cfr,
11184 #endif
11185 };
11186 
11187 /*
11188  * dp_soc_set_txrx_ring_map()
11189  * @dp_soc: DP handler for soc
11190  *
11191  * Return: Void
11192  */
11193 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
11194 {
11195 	uint32_t i;
11196 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
11197 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
11198 	}
11199 }
11200 
11201 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
11202 	defined(QCA_WIFI_QCA5018)
11203 /**
11204  * dp_soc_attach_wifi3() - Attach txrx SOC
11205  * @ctrl_psoc: Opaque SOC handle from control plane
11206  * @htc_handle: Opaque HTC handle
11207  * @hif_handle: Opaque HIF handle
11208  * @qdf_osdev: QDF device
11209  * @ol_ops: Offload Operations
11210  * @device_id: Device ID
11211  *
11212  * Return: DP SOC handle on success, NULL on failure
11213  */
11214 struct cdp_soc_t *
11215 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
11216 		    struct hif_opaque_softc *hif_handle,
11217 		    HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
11218 		    struct ol_if_ops *ol_ops, uint16_t device_id)
11219 {
11220 	struct dp_soc *dp_soc = NULL;
11221 
11222 	dp_soc = dp_soc_attach(ctrl_psoc, hif_handle, htc_handle, qdf_osdev,
11223 			       ol_ops, device_id);
11224 	return dp_soc_to_cdp_soc_t(dp_soc);
11225 }
11226 
11227 static inline void dp_soc_set_def_pdev(struct dp_soc *soc)
11228 {
11229 	int lmac_id;
11230 
11231 	for (lmac_id = 0; lmac_id < MAX_NUM_LMAC_HW; lmac_id++) {
11232 		/*Set default host PDEV ID for lmac_id*/
11233 		wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
11234 				      INVALID_PDEV_ID, lmac_id);
11235 	}
11236 }
11237 
11238 /**
11239  * dp_soc_attach() - Attach txrx SOC
11240  * @ctrl_psoc: Opaque SOC handle from control plane
11241  * @hif_handle: Opaque HIF handle
11242  * @htc_handle: Opaque HTC handle
11243  * @qdf_osdev: QDF device
11244  * @ol_ops: Offload Operations
11245  * @device_id: Device ID
11246  *
11247  * Return: DP SOC handle on success, NULL on failure
11248  */
11249 static struct dp_soc *
11250 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
11251 	      struct hif_opaque_softc *hif_handle, HTC_HANDLE htc_handle,
11252 	      qdf_device_t qdf_osdev, struct ol_if_ops *ol_ops,
11253 	      uint16_t device_id)
11254 {
11255 	int int_ctx;
11256 	struct dp_soc *soc =  NULL;
11257 
11258 	if (!hif_handle) {
11259 		dp_err("HIF handle is NULL");
11260 		goto fail0;
11261 	}
11262 
11263 	soc = qdf_mem_malloc(sizeof(*soc));
11264 	if (!soc) {
11265 		dp_err("DP SOC memory allocation failed");
11266 		goto fail0;
11267 	}
11268 
11269 	soc->hif_handle = hif_handle;
11270 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
11271 	if (!soc->hal_soc)
11272 		goto fail1;
11273 
11274 	int_ctx = 0;
11275 	soc->device_id = device_id;
11276 	soc->cdp_soc.ops = &dp_txrx_ops;
11277 	soc->cdp_soc.ol_ops = ol_ops;
11278 	soc->ctrl_psoc = ctrl_psoc;
11279 	soc->osdev = qdf_osdev;
11280 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
11281 
11282 	/* Reset wbm sg list and flags */
11283 	dp_rx_wbm_sg_list_reset(soc);
11284 
11285 	dp_soc_rx_history_attach(soc);
11286 	wlan_set_srng_cfg(&soc->wlan_srng_cfg);
11287 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
11288 	if (!soc->wlan_cfg_ctx) {
11289 		dp_err("wlan_cfg_ctx failed\n");
11290 		goto fail1;
11291 	}
11292 
11293 	dp_soc_cfg_attach(soc);
11294 
11295 	if (dp_hw_link_desc_pool_banks_alloc(soc, WLAN_INVALID_PDEV_ID)) {
11296 		dp_err("failed to allocate link desc pool banks");
11297 		goto fail2;
11298 	}
11299 
11300 	if (dp_hw_link_desc_ring_alloc(soc)) {
11301 		dp_err("failed to allocate link_desc_ring");
11302 		goto fail3;
11303 	}
11304 
11305 	if (dp_soc_srng_alloc(soc)) {
11306 		dp_err("failed to allocate soc srng rings");
11307 		goto fail4;
11308 	}
11309 
11310 	if (dp_soc_tx_desc_sw_pools_alloc(soc)) {
11311 		dp_err("dp_soc_tx_desc_sw_pools_alloc failed");
11312 		goto fail5;
11313 	}
11314 
11315 	dp_soc_set_interrupt_mode(soc);
11316 	dp_soc_set_def_pdev(soc);
11317 
11318 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
11319 		qdf_dma_mem_stats_read(),
11320 		qdf_heap_mem_stats_read(),
11321 		qdf_skb_mem_stats_read());
11322 
11323 	return soc;
11324 fail5:
11325 	dp_soc_srng_free(soc);
11326 fail4:
11327 	dp_hw_link_desc_ring_free(soc);
11328 fail3:
11329 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
11330 fail2:
11331 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
11332 fail1:
11333 	qdf_mem_free(soc);
11334 fail0:
11335 	return NULL;
11336 }
11337 
11338 /**
11339  * dp_soc_init() - Initialize txrx SOC
11340  * @dp_soc: Opaque DP SOC handle
11341  * @htc_handle: Opaque HTC handle
11342  * @hif_handle: Opaque HIF handle
11343  *
11344  * Return: DP SOC handle on success, NULL on failure
11345  */
11346 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
11347 		  struct hif_opaque_softc *hif_handle)
11348 {
11349 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
11350 	bool is_monitor_mode = false;
11351 	struct hal_reo_params reo_params;
11352 	uint8_t i;
11353 
11354 	wlan_minidump_log(soc, sizeof(*soc), soc->ctrl_psoc,
11355 			  WLAN_MD_DP_SOC, "dp_soc");
11356 
11357 	htt_soc = htt_soc_attach(soc, htc_handle);
11358 	if (!htt_soc)
11359 		goto fail0;
11360 
11361 	soc->htt_handle = htt_soc;
11362 
11363 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
11364 		goto fail1;
11365 
11366 	htt_set_htc_handle(htt_soc, htc_handle);
11367 	soc->hif_handle = hif_handle;
11368 
11369 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
11370 	if (!soc->hal_soc)
11371 		goto fail2;
11372 
11373 	dp_soc_cfg_init(soc);
11374 
11375 	/* Reset/Initialize wbm sg list and flags */
11376 	dp_rx_wbm_sg_list_reset(soc);
11377 
11378 	/* Note: Any SRNG ring initialization should happen only after
11379 	 * Interrupt mode is set and followed by filling up the
11380 	 * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER.
11381 	 */
11382 	dp_soc_set_interrupt_mode(soc);
11383 	if (soc->cdp_soc.ol_ops->get_con_mode &&
11384 	    soc->cdp_soc.ol_ops->get_con_mode() ==
11385 	    QDF_GLOBAL_MONITOR_MODE)
11386 		is_monitor_mode = true;
11387 
11388 	wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, soc->intr_mode,
11389 				     is_monitor_mode);
11390 
11391 	/* initialize WBM_IDLE_LINK ring */
11392 	if (dp_hw_link_desc_ring_init(soc)) {
11393 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11394 			  FL("dp_hw_link_desc_ring_init failed"));
11395 		goto fail3;
11396 	}
11397 
11398 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
11399 
11400 	if (dp_soc_srng_init(soc)) {
11401 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11402 			  FL("dp_soc_srng_init failed"));
11403 		goto fail4;
11404 	}
11405 
11406 	if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
11407 			       htt_get_htc_handle(htt_soc),
11408 			       soc->hal_soc, soc->osdev) == NULL)
11409 		goto fail5;
11410 
11411 	/* Initialize descriptors in TCL Rings */
11412 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
11413 		hal_tx_init_data_ring(soc->hal_soc,
11414 				      soc->tcl_data_ring[i].hal_srng);
11415 	}
11416 
11417 	if (dp_soc_tx_desc_sw_pools_init(soc)) {
11418 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11419 			  FL("dp_tx_soc_attach failed"));
11420 		goto fail6;
11421 	}
11422 
11423 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
11424 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
11425 	soc->cce_disable = false;
11426 
11427 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
11428 	qdf_spinlock_create(&soc->vdev_map_lock);
11429 	qdf_atomic_init(&soc->num_tx_outstanding);
11430 	qdf_atomic_init(&soc->num_tx_exception);
11431 	soc->num_tx_allowed =
11432 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
11433 
11434 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
11435 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
11436 				CDP_CFG_MAX_PEER_ID);
11437 
11438 		if (ret != -EINVAL)
11439 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
11440 
11441 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
11442 				CDP_CFG_CCE_DISABLE);
11443 		if (ret == 1)
11444 			soc->cce_disable = true;
11445 	}
11446 
11447 	/*
11448 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
11449 	 * and IPQ5018 WMAC2 is not there in these platforms.
11450 	 */
11451 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018 ||
11452 	    soc->disable_mac2_intr)
11453 		dp_soc_disable_unused_mac_intr_mask(soc, 0x2);
11454 
11455 	/*
11456 	 * Skip registering hw ring interrupts for WMAC1 on IPQ5018
11457 	 * WMAC1 is not there in this platform.
11458 	 */
11459 	if (soc->disable_mac1_intr)
11460 		dp_soc_disable_unused_mac_intr_mask(soc, 0x1);
11461 
11462 	/* Setup HW REO */
11463 	qdf_mem_zero(&reo_params, sizeof(reo_params));
11464 
11465 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
11466 		/*
11467 		 * Reo ring remap is not required if both radios
11468 		 * are offloaded to NSS
11469 		 */
11470 		if (dp_reo_remap_config(soc,
11471 					&reo_params.remap1,
11472 					&reo_params.remap2))
11473 			reo_params.rx_hash_enabled = true;
11474 		else
11475 			reo_params.rx_hash_enabled = false;
11476 	}
11477 
11478 	/* setup the global rx defrag waitlist */
11479 	TAILQ_INIT(&soc->rx.defrag.waitlist);
11480 	soc->rx.defrag.timeout_ms =
11481 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
11482 	soc->rx.defrag.next_flush_ms = 0;
11483 	soc->rx.flags.defrag_timeout_check =
11484 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
11485 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
11486 
11487 	/*
11488 	 * set the fragment destination ring
11489 	 */
11490 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
11491 
11492 	hal_reo_setup(soc->hal_soc, &reo_params);
11493 
11494 	hal_reo_set_err_dst_remap(soc->hal_soc);
11495 
11496 	qdf_atomic_set(&soc->cmn_init_done, 1);
11497 
11498 	dp_soc_wds_attach(soc);
11499 
11500 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
11501 
11502 	qdf_spinlock_create(&soc->ast_lock);
11503 
11504 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
11505 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
11506 	INIT_RX_HW_STATS_LOCK(soc);
11507 
11508 	/* fill the tx/rx cpu ring map*/
11509 	dp_soc_set_txrx_ring_map(soc);
11510 
11511 	TAILQ_INIT(&soc->inactive_peer_list);
11512 	qdf_spinlock_create(&soc->inactive_peer_list_lock);
11513 	TAILQ_INIT(&soc->inactive_vdev_list);
11514 	qdf_spinlock_create(&soc->inactive_vdev_list_lock);
11515 	qdf_spinlock_create(&soc->htt_stats.lock);
11516 	/* initialize work queue for stats processing */
11517 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
11518 
11519 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
11520 		qdf_dma_mem_stats_read(),
11521 		qdf_heap_mem_stats_read(),
11522 		qdf_skb_mem_stats_read());
11523 
11524 	return soc;
11525 fail6:
11526 	htt_soc_htc_dealloc(soc->htt_handle);
11527 fail5:
11528 	dp_soc_srng_deinit(soc);
11529 fail4:
11530 	dp_hw_link_desc_ring_deinit(soc);
11531 fail3:
11532 	dp_hw_link_desc_ring_free(soc);
11533 fail2:
11534 	htt_htc_pkt_pool_free(htt_soc);
11535 fail1:
11536 	htt_soc_detach(htt_soc);
11537 fail0:
11538 	return NULL;
11539 }
11540 
11541 /**
11542  * dp_soc_init_wifi3() - Initialize txrx SOC
11543  * @soc: Opaque DP SOC handle
11544  * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
11545  * @hif_handle: Opaque HIF handle
11546  * @htc_handle: Opaque HTC handle
11547  * @qdf_osdev: QDF device (Unused)
11548  * @ol_ops: Offload Operations (Unused)
11549  * @device_id: Device ID (Unused)
11550  *
11551  * Return: DP SOC handle on success, NULL on failure
11552  */
11553 void *dp_soc_init_wifi3(struct cdp_soc_t *soc,
11554 			struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
11555 			struct hif_opaque_softc *hif_handle,
11556 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
11557 			struct ol_if_ops *ol_ops, uint16_t device_id)
11558 {
11559 	return dp_soc_init((struct dp_soc *)soc, htc_handle, hif_handle);
11560 }
11561 
11562 #endif
11563 
11564 /*
11565  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
11566  *
11567  * @soc: handle to DP soc
11568  * @mac_id: MAC id
11569  *
11570  * Return: Return pdev corresponding to MAC
11571  */
11572 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
11573 {
11574 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
11575 		return (mac_id < MAX_PDEV_CNT) ? soc->pdev_list[mac_id] : NULL;
11576 
11577 	/* Typically for MCL as there only 1 PDEV*/
11578 	return soc->pdev_list[0];
11579 }
11580 
11581 /*
11582  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
11583  * @soc:		DP SoC context
11584  * @max_mac_rings:	No of MAC rings
11585  *
11586  * Return: None
11587  */
11588 void dp_is_hw_dbs_enable(struct dp_soc *soc,
11589 				int *max_mac_rings)
11590 {
11591 	bool dbs_enable = false;
11592 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
11593 		dbs_enable = soc->cdp_soc.ol_ops->
11594 		is_hw_dbs_2x2_capable((void *)soc->ctrl_psoc);
11595 
11596 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
11597 }
11598 
11599 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
11600 /*
11601  * dp_cfr_filter() -  Configure HOST RX monitor status ring for CFR
11602  * @soc_hdl: Datapath soc handle
11603  * @pdev_id: id of data path pdev handle
11604  * @enable: Enable/Disable CFR
11605  * @filter_val: Flag to select Filter for monitor mode
11606  */
11607 static void dp_cfr_filter(struct cdp_soc_t *soc_hdl,
11608 			  uint8_t pdev_id,
11609 			  bool enable,
11610 			  struct cdp_monitor_filter *filter_val)
11611 {
11612 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11613 	struct dp_pdev *pdev = NULL;
11614 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
11615 	int max_mac_rings;
11616 	uint8_t mac_id = 0;
11617 
11618 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11619 	if (!pdev) {
11620 		dp_err("pdev is NULL");
11621 		return;
11622 	}
11623 
11624 	if (pdev->monitor_vdev) {
11625 		dp_info("No action is needed since monitor mode is enabled\n");
11626 		return;
11627 	}
11628 	soc = pdev->soc;
11629 	pdev->cfr_rcc_mode = false;
11630 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
11631 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
11632 
11633 	dp_debug("Max_mac_rings %d", max_mac_rings);
11634 	dp_info("enable : %d, mode: 0x%x", enable, filter_val->mode);
11635 
11636 	if (enable) {
11637 		pdev->cfr_rcc_mode = true;
11638 
11639 		htt_tlv_filter.ppdu_start = 1;
11640 		htt_tlv_filter.ppdu_end = 1;
11641 		htt_tlv_filter.ppdu_end_user_stats = 1;
11642 		htt_tlv_filter.ppdu_end_user_stats_ext = 1;
11643 		htt_tlv_filter.ppdu_end_status_done = 1;
11644 		htt_tlv_filter.mpdu_start = 1;
11645 		htt_tlv_filter.offset_valid = false;
11646 
11647 		htt_tlv_filter.enable_fp =
11648 			(filter_val->mode & MON_FILTER_PASS) ? 1 : 0;
11649 		htt_tlv_filter.enable_md = 0;
11650 		htt_tlv_filter.enable_mo =
11651 			(filter_val->mode & MON_FILTER_OTHER) ? 1 : 0;
11652 		htt_tlv_filter.fp_mgmt_filter = filter_val->fp_mgmt;
11653 		htt_tlv_filter.fp_ctrl_filter = filter_val->fp_ctrl;
11654 		htt_tlv_filter.fp_data_filter = filter_val->fp_data;
11655 		htt_tlv_filter.mo_mgmt_filter = filter_val->mo_mgmt;
11656 		htt_tlv_filter.mo_ctrl_filter = filter_val->mo_ctrl;
11657 		htt_tlv_filter.mo_data_filter = filter_val->mo_data;
11658 	}
11659 
11660 	for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
11661 		int mac_for_pdev =
11662 			dp_get_mac_id_for_pdev(mac_id,
11663 					       pdev->pdev_id);
11664 
11665 		htt_h2t_rx_ring_cfg(soc->htt_handle,
11666 				    mac_for_pdev,
11667 				    soc->rxdma_mon_status_ring[mac_id]
11668 				    .hal_srng,
11669 				    RXDMA_MONITOR_STATUS,
11670 				    RX_MON_STATUS_BUF_SIZE,
11671 				    &htt_tlv_filter);
11672 	}
11673 }
11674 
11675 /**
11676  * dp_get_cfr_rcc() - get cfr rcc config
11677  * @soc_hdl: Datapath soc handle
11678  * @pdev_id: id of objmgr pdev
11679  *
11680  * Return: true/false based on cfr mode setting
11681  */
11682 static
11683 bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
11684 {
11685 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11686 	struct dp_pdev *pdev = NULL;
11687 
11688 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11689 	if (!pdev) {
11690 		dp_err("pdev is NULL");
11691 		return false;
11692 	}
11693 
11694 	return pdev->cfr_rcc_mode;
11695 }
11696 
11697 /**
11698  * dp_set_cfr_rcc() - enable/disable cfr rcc config
11699  * @soc_hdl: Datapath soc handle
11700  * @pdev_id: id of objmgr pdev
11701  * @enable: Enable/Disable cfr rcc mode
11702  *
11703  * Return: none
11704  */
11705 static
11706 void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, bool enable)
11707 {
11708 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11709 	struct dp_pdev *pdev = NULL;
11710 
11711 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11712 	if (!pdev) {
11713 		dp_err("pdev is NULL");
11714 		return;
11715 	}
11716 
11717 	pdev->cfr_rcc_mode = enable;
11718 }
11719 
11720 /*
11721  * dp_get_cfr_dbg_stats - Get the debug statistics for CFR
11722  * @soc_hdl: Datapath soc handle
11723  * @pdev_id: id of data path pdev handle
11724  * @cfr_rcc_stats: CFR RCC debug statistics buffer
11725  *
11726  * Return: none
11727  */
11728 static inline void
11729 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
11730 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats)
11731 {
11732 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11733 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11734 
11735 	if (!pdev) {
11736 		dp_err("Invalid pdev");
11737 		return;
11738 	}
11739 
11740 	qdf_mem_copy(cfr_rcc_stats, &pdev->stats.rcc,
11741 		     sizeof(struct cdp_cfr_rcc_stats));
11742 }
11743 
11744 /*
11745  * dp_clear_cfr_dbg_stats - Clear debug statistics for CFR
11746  * @soc_hdl: Datapath soc handle
11747  * @pdev_id: id of data path pdev handle
11748  *
11749  * Return: none
11750  */
11751 static void dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl,
11752 				   uint8_t pdev_id)
11753 {
11754 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11755 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11756 
11757 	if (!pdev) {
11758 		dp_err("dp pdev is NULL");
11759 		return;
11760 	}
11761 
11762 	qdf_mem_zero(&pdev->stats.rcc, sizeof(pdev->stats.rcc));
11763 }
11764 
11765 /*
11766  * dp_enable_mon_reap_timer() - enable/disable reap timer
11767  * @soc_hdl: Datapath soc handle
11768  * @pdev_id: id of objmgr pdev
11769  * @enable: Enable/Disable reap timer of monitor status ring
11770  *
11771  * Return: none
11772  */
11773 static void
11774 dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
11775 			 bool enable)
11776 {
11777 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11778 	struct dp_pdev *pdev = NULL;
11779 
11780 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11781 	if (!pdev) {
11782 		dp_err("pdev is NULL");
11783 		return;
11784 	}
11785 
11786 	pdev->enable_reap_timer_non_pkt = enable;
11787 	if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
11788 		dp_debug("pktlog enabled %d", pdev->rx_pktlog_mode);
11789 		return;
11790 	}
11791 
11792 	if (!soc->reap_timer_init) {
11793 		dp_err("reap timer not init");
11794 		return;
11795 	}
11796 
11797 	if (enable)
11798 		qdf_timer_mod(&soc->mon_reap_timer,
11799 			      DP_INTR_POLL_TIMER_MS);
11800 	else
11801 		qdf_timer_sync_cancel(&soc->mon_reap_timer);
11802 }
11803 #endif
11804 
11805 /*
11806  * dp_is_enable_reap_timer_non_pkt() - check if mon reap timer is
11807  * enabled by non-pkt log or not
11808  * @pdev: point to dp pdev
11809  *
11810  * Return: true if mon reap timer is enabled by non-pkt log
11811  */
11812 static bool dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev)
11813 {
11814 	if (!pdev) {
11815 		dp_err("null pdev");
11816 		return false;
11817 	}
11818 
11819 	return pdev->enable_reap_timer_non_pkt;
11820 }
11821 
11822 /*
11823 * dp_set_pktlog_wifi3() - attach txrx vdev
11824 * @pdev: Datapath PDEV handle
11825 * @event: which event's notifications are being subscribed to
11826 * @enable: WDI event subscribe or not. (True or False)
11827 *
11828 * Return: Success, NULL on failure
11829 */
11830 #ifdef WDI_EVENT_ENABLE
11831 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
11832 		bool enable)
11833 {
11834 	struct dp_soc *soc = NULL;
11835 	int max_mac_rings = wlan_cfg_get_num_mac_rings
11836 					(pdev->wlan_cfg_ctx);
11837 	uint8_t mac_id = 0;
11838 
11839 	soc = pdev->soc;
11840 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
11841 
11842 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
11843 			FL("Max_mac_rings %d "),
11844 			max_mac_rings);
11845 
11846 	if (enable) {
11847 		switch (event) {
11848 		case WDI_EVENT_RX_DESC:
11849 			if (pdev->monitor_vdev) {
11850 				/* Nothing needs to be done if monitor mode is
11851 				 * enabled
11852 				 */
11853 				return 0;
11854 			}
11855 
11856 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
11857 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
11858 				dp_mon_filter_setup_rx_pkt_log_full(pdev);
11859 				if (dp_mon_filter_update(pdev) !=
11860 						QDF_STATUS_SUCCESS) {
11861 					QDF_TRACE(QDF_MODULE_ID_DP,
11862 						  QDF_TRACE_LEVEL_ERROR,
11863 						  FL("Pktlog full filters set failed"));
11864 					dp_mon_filter_reset_rx_pkt_log_full(pdev);
11865 					pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
11866 					return 0;
11867 				}
11868 
11869 				if (soc->reap_timer_init &&
11870 				    (!dp_is_enable_reap_timer_non_pkt(pdev)))
11871 					qdf_timer_mod(&soc->mon_reap_timer,
11872 					DP_INTR_POLL_TIMER_MS);
11873 			}
11874 			break;
11875 
11876 		case WDI_EVENT_LITE_RX:
11877 			if (pdev->monitor_vdev) {
11878 				/* Nothing needs to be done if monitor mode is
11879 				 * enabled
11880 				 */
11881 				return 0;
11882 			}
11883 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
11884 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
11885 
11886 				/*
11887 				 * Set the packet log lite mode filter.
11888 				 */
11889 				dp_mon_filter_setup_rx_pkt_log_lite(pdev);
11890 				if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
11891 					QDF_TRACE(QDF_MODULE_ID_DP,
11892 						  QDF_TRACE_LEVEL_ERROR,
11893 						  FL("Pktlog lite filters set failed"));
11894 					dp_mon_filter_reset_rx_pkt_log_lite(pdev);
11895 					pdev->rx_pktlog_mode =
11896 						DP_RX_PKTLOG_DISABLED;
11897 					return 0;
11898 				}
11899 
11900 				if (soc->reap_timer_init &&
11901 				    (!dp_is_enable_reap_timer_non_pkt(pdev)))
11902 					qdf_timer_mod(&soc->mon_reap_timer,
11903 					DP_INTR_POLL_TIMER_MS);
11904 			}
11905 			break;
11906 
11907 		case WDI_EVENT_LITE_T2H:
11908 			if (pdev->monitor_vdev) {
11909 				/* Nothing needs to be done if monitor mode is
11910 				 * enabled
11911 				 */
11912 				return 0;
11913 			}
11914 
11915 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
11916 				int mac_for_pdev = dp_get_mac_id_for_pdev(
11917 							mac_id,	pdev->pdev_id);
11918 
11919 				pdev->pktlog_ppdu_stats = true;
11920 				dp_h2t_cfg_stats_msg_send(pdev,
11921 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
11922 					mac_for_pdev);
11923 			}
11924 			break;
11925 
11926 		default:
11927 			/* Nothing needs to be done for other pktlog types */
11928 			break;
11929 		}
11930 	} else {
11931 		switch (event) {
11932 		case WDI_EVENT_RX_DESC:
11933 		case WDI_EVENT_LITE_RX:
11934 			if (pdev->monitor_vdev) {
11935 				/* Nothing needs to be done if monitor mode is
11936 				 * enabled
11937 				 */
11938 				return 0;
11939 			}
11940 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
11941 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
11942 				dp_mon_filter_reset_rx_pkt_log_full(pdev);
11943 				if (dp_mon_filter_update(pdev) !=
11944 						QDF_STATUS_SUCCESS) {
11945 					QDF_TRACE(QDF_MODULE_ID_DP,
11946 						  QDF_TRACE_LEVEL_ERROR,
11947 						  FL("Pktlog filters reset failed"));
11948 					return 0;
11949 				}
11950 
11951 				dp_mon_filter_reset_rx_pkt_log_lite(pdev);
11952 				if (dp_mon_filter_update(pdev) !=
11953 						QDF_STATUS_SUCCESS) {
11954 					QDF_TRACE(QDF_MODULE_ID_DP,
11955 						  QDF_TRACE_LEVEL_ERROR,
11956 						  FL("Pktlog filters reset failed"));
11957 					return 0;
11958 				}
11959 
11960 				if (soc->reap_timer_init &&
11961 				    (!dp_is_enable_reap_timer_non_pkt(pdev)))
11962 					qdf_timer_stop(&soc->mon_reap_timer);
11963 			}
11964 			break;
11965 		case WDI_EVENT_LITE_T2H:
11966 			if (pdev->monitor_vdev) {
11967 				/* Nothing needs to be done if monitor mode is
11968 				 * enabled
11969 				 */
11970 				return 0;
11971 			}
11972 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
11973 			 * passing value 0. Once these macros will define in htt
11974 			 * header file will use proper macros
11975 			*/
11976 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
11977 				int mac_for_pdev =
11978 						dp_get_mac_id_for_pdev(mac_id,
11979 								pdev->pdev_id);
11980 
11981 				pdev->pktlog_ppdu_stats = false;
11982 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
11983 					dp_h2t_cfg_stats_msg_send(pdev, 0,
11984 								mac_for_pdev);
11985 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
11986 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
11987 								mac_for_pdev);
11988 				} else if (pdev->enhanced_stats_en) {
11989 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
11990 								mac_for_pdev);
11991 				}
11992 			}
11993 
11994 			break;
11995 		default:
11996 			/* Nothing needs to be done for other pktlog types */
11997 			break;
11998 		}
11999 	}
12000 	return 0;
12001 }
12002 #endif
12003 
12004 /**
12005  * dp_bucket_index() - Return index from array
12006  *
12007  * @delay: delay measured
12008  * @array: array used to index corresponding delay
12009  *
12010  * Return: index
12011  */
12012 static uint8_t dp_bucket_index(uint32_t delay, uint16_t *array)
12013 {
12014 	uint8_t i = CDP_DELAY_BUCKET_0;
12015 
12016 	for (; i < CDP_DELAY_BUCKET_MAX - 1; i++) {
12017 		if (delay >= array[i] && delay <= array[i + 1])
12018 			return i;
12019 	}
12020 
12021 	return (CDP_DELAY_BUCKET_MAX - 1);
12022 }
12023 
12024 /**
12025  * dp_fill_delay_buckets() - Fill delay statistics bucket for each
12026  *				type of delay
12027  *
12028  * @pdev: pdev handle
12029  * @delay: delay in ms
12030  * @tid: tid value
12031  * @mode: type of tx delay mode
12032  * @ring_id: ring number
12033  * Return: pointer to cdp_delay_stats structure
12034  */
12035 static struct cdp_delay_stats *
12036 dp_fill_delay_buckets(struct dp_pdev *pdev, uint32_t delay,
12037 		      uint8_t tid, uint8_t mode, uint8_t ring_id)
12038 {
12039 	uint8_t delay_index = 0;
12040 	struct cdp_tid_tx_stats *tstats =
12041 		&pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
12042 	struct cdp_tid_rx_stats *rstats =
12043 		&pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
12044 	/*
12045 	 * cdp_fw_to_hw_delay_range
12046 	 * Fw to hw delay ranges in milliseconds
12047 	 */
12048 	uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
12049 		0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
12050 
12051 	/*
12052 	 * cdp_sw_enq_delay_range
12053 	 * Software enqueue delay ranges in milliseconds
12054 	 */
12055 	uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
12056 		0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
12057 
12058 	/*
12059 	 * cdp_intfrm_delay_range
12060 	 * Interframe delay ranges in milliseconds
12061 	 */
12062 	uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
12063 		0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
12064 
12065 	/*
12066 	 * Update delay stats in proper bucket
12067 	 */
12068 	switch (mode) {
12069 	/* Software Enqueue delay ranges */
12070 	case CDP_DELAY_STATS_SW_ENQ:
12071 
12072 		delay_index = dp_bucket_index(delay, cdp_sw_enq_delay);
12073 		tstats->swq_delay.delay_bucket[delay_index]++;
12074 		return &tstats->swq_delay;
12075 
12076 	/* Tx Completion delay ranges */
12077 	case CDP_DELAY_STATS_FW_HW_TRANSMIT:
12078 
12079 		delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay);
12080 		tstats->hwtx_delay.delay_bucket[delay_index]++;
12081 		return &tstats->hwtx_delay;
12082 
12083 	/* Interframe tx delay ranges */
12084 	case CDP_DELAY_STATS_TX_INTERFRAME:
12085 
12086 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
12087 		tstats->intfrm_delay.delay_bucket[delay_index]++;
12088 		return &tstats->intfrm_delay;
12089 
12090 	/* Interframe rx delay ranges */
12091 	case CDP_DELAY_STATS_RX_INTERFRAME:
12092 
12093 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
12094 		rstats->intfrm_delay.delay_bucket[delay_index]++;
12095 		return &rstats->intfrm_delay;
12096 
12097 	/* Ring reap to indication to network stack */
12098 	case CDP_DELAY_STATS_REAP_STACK:
12099 
12100 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
12101 		rstats->to_stack_delay.delay_bucket[delay_index]++;
12102 		return &rstats->to_stack_delay;
12103 	default:
12104 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
12105 			  "%s Incorrect delay mode: %d", __func__, mode);
12106 	}
12107 
12108 	return NULL;
12109 }
12110 
12111 /**
12112  * dp_update_delay_stats() - Update delay statistics in structure
12113  *				and fill min, max and avg delay
12114  *
12115  * @pdev: pdev handle
12116  * @delay: delay in ms
12117  * @tid: tid value
12118  * @mode: type of tx delay mode
12119  * @ring id: ring number
12120  * Return: none
12121  */
12122 void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay,
12123 			   uint8_t tid, uint8_t mode, uint8_t ring_id)
12124 {
12125 	struct cdp_delay_stats *dstats = NULL;
12126 
12127 	/*
12128 	 * Delay ranges are different for different delay modes
12129 	 * Get the correct index to update delay bucket
12130 	 */
12131 	dstats = dp_fill_delay_buckets(pdev, delay, tid, mode, ring_id);
12132 	if (qdf_unlikely(!dstats))
12133 		return;
12134 
12135 	if (delay != 0) {
12136 		/*
12137 		 * Compute minimum,average and maximum
12138 		 * delay
12139 		 */
12140 		if (delay < dstats->min_delay)
12141 			dstats->min_delay = delay;
12142 
12143 		if (delay > dstats->max_delay)
12144 			dstats->max_delay = delay;
12145 
12146 		/*
12147 		 * Average over delay measured till now
12148 		 */
12149 		if (!dstats->avg_delay)
12150 			dstats->avg_delay = delay;
12151 		else
12152 			dstats->avg_delay = ((delay + dstats->avg_delay) / 2);
12153 	}
12154 }
12155 
12156 /**
12157  * dp_get_peer_mac_list(): function to get peer mac list of vdev
12158  * @soc: Datapath soc handle
12159  * @vdev_id: vdev id
12160  * @newmac: Table of the clients mac
12161  * @mac_cnt: No. of MACs required
12162  * @limit: Limit the number of clients
12163  *
12164  * return: no of clients
12165  */
12166 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id,
12167 			      u_int8_t newmac[][QDF_MAC_ADDR_SIZE],
12168 			      u_int16_t mac_cnt, bool limit)
12169 {
12170 	struct dp_soc *dp_soc = (struct dp_soc *)soc;
12171 	struct dp_vdev *vdev =
12172 		dp_vdev_get_ref_by_id(dp_soc, vdev_id, DP_MOD_ID_CDP);
12173 	struct dp_peer *peer;
12174 	uint16_t new_mac_cnt = 0;
12175 
12176 	if (!vdev)
12177 		return new_mac_cnt;
12178 
12179 	if (limit && (vdev->num_peers > mac_cnt))
12180 		return 0;
12181 
12182 	qdf_spin_lock_bh(&vdev->peer_list_lock);
12183 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
12184 		if (peer->bss_peer)
12185 			continue;
12186 		if (new_mac_cnt < mac_cnt) {
12187 			WLAN_ADDR_COPY(newmac[new_mac_cnt], peer->mac_addr.raw);
12188 			new_mac_cnt++;
12189 		}
12190 	}
12191 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
12192 	dp_vdev_unref_delete(dp_soc, vdev, DP_MOD_ID_CDP);
12193 	return new_mac_cnt;
12194 }
12195 
12196 /**
12197  * dp_pdev_srng_deinit() - de-initialize all pdev srng ring including
12198  *			   monitor rings
12199  * @pdev: Datapath pdev handle
12200  *
12201  */
12202 static void dp_pdev_srng_deinit(struct dp_pdev *pdev)
12203 {
12204 	struct dp_soc *soc = pdev->soc;
12205 	uint8_t i;
12206 
12207 	dp_srng_deinit(soc, &soc->rx_refill_buf_ring[pdev->lmac_id], RXDMA_BUF,
12208 		       pdev->lmac_id);
12209 
12210 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
12211 		dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
12212 
12213 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
12214 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id);
12215 
12216 		wlan_minidump_remove(soc->rxdma_err_dst_ring[lmac_id].base_vaddr_unaligned);
12217 		dp_srng_deinit(soc, &soc->rxdma_err_dst_ring[lmac_id],
12218 			       RXDMA_DST, lmac_id);
12219 	}
12220 
12221 	dp_mon_rings_deinit(pdev);
12222 }
12223 
12224 /**
12225  * dp_pdev_srng_init() - initialize all pdev srng rings including
12226  *			   monitor rings
12227  * @pdev: Datapath pdev handle
12228  *
12229  * return: QDF_STATUS_SUCCESS on success
12230  *	   QDF_STATUS_E_NOMEM on failure
12231  */
12232 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev)
12233 {
12234 	struct dp_soc *soc = pdev->soc;
12235 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
12236 	uint32_t i;
12237 
12238 	soc_cfg_ctx = soc->wlan_cfg_ctx;
12239 
12240 	if (dp_srng_init(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
12241 			 RXDMA_BUF, 0, pdev->lmac_id)) {
12242 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12243 			  FL("dp_srng_init failed rx refill ring"));
12244 		goto fail1;
12245 	}
12246 
12247 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
12248 		if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
12249 			goto fail1;
12250 	}
12251 
12252 	if (dp_mon_rings_init(soc, pdev)) {
12253 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12254 			  FL("MONITOR rings setup failed"));
12255 		goto fail1;
12256 	}
12257 
12258 	/* LMAC RxDMA to SW Rings configuration */
12259 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
12260 		/* Only valid for MCL */
12261 		pdev = soc->pdev_list[0];
12262 
12263 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
12264 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id);
12265 		struct dp_srng *srng = &soc->rxdma_err_dst_ring[lmac_id];
12266 
12267 		if (srng->hal_srng)
12268 			continue;
12269 
12270 		if (dp_srng_init(soc, srng, RXDMA_DST, 0, lmac_id)) {
12271 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12272 				  FL(RNG_ERR "rxdma_err_dst_ring"));
12273 			goto fail1;
12274 		}
12275 		wlan_minidump_log(soc->rxdma_err_dst_ring[lmac_id].base_vaddr_unaligned,
12276 				  soc->rxdma_err_dst_ring[lmac_id].alloc_size,
12277 				  soc->ctrl_psoc,
12278 				  WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
12279 				  "rxdma_err_dst");
12280 	}
12281 	return QDF_STATUS_SUCCESS;
12282 
12283 fail1:
12284 	dp_pdev_srng_deinit(pdev);
12285 	return QDF_STATUS_E_NOMEM;
12286 }
12287 
12288 /**
12289  * dp_pdev_srng_free() - free all pdev srng rings including monitor rings
12290  * pdev: Datapath pdev handle
12291  *
12292  */
12293 static void dp_pdev_srng_free(struct dp_pdev *pdev)
12294 {
12295 	struct dp_soc *soc = pdev->soc;
12296 	uint8_t i;
12297 
12298 	dp_srng_free(soc, &soc->rx_refill_buf_ring[pdev->lmac_id]);
12299 	dp_mon_rings_free(pdev);
12300 
12301 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
12302 		dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
12303 
12304 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
12305 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id);
12306 
12307 		dp_srng_free(soc, &soc->rxdma_err_dst_ring[lmac_id]);
12308 	}
12309 }
12310 
12311 /**
12312  * dp_pdev_srng_alloc() - allocate memory for all pdev srng rings including
12313  *			  monitor rings
12314  * pdev: Datapath pdev handle
12315  *
12316  * return: QDF_STATUS_SUCCESS on success
12317  *	   QDF_STATUS_E_NOMEM on failure
12318  */
12319 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev)
12320 {
12321 	struct dp_soc *soc = pdev->soc;
12322 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
12323 	uint32_t ring_size;
12324 	uint32_t i;
12325 
12326 	soc_cfg_ctx = soc->wlan_cfg_ctx;
12327 
12328 	ring_size = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
12329 	if (dp_srng_alloc(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
12330 			  RXDMA_BUF, ring_size, 0)) {
12331 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12332 			  FL("dp_srng_alloc failed rx refill ring"));
12333 		goto fail1;
12334 	}
12335 
12336 	if (dp_mon_rings_alloc(soc, pdev)) {
12337 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12338 			  FL("MONITOR rings setup failed"));
12339 		goto fail1;
12340 	}
12341 
12342 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
12343 		if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
12344 			goto fail1;
12345 	}
12346 
12347 	ring_size = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
12348 	/* LMAC RxDMA to SW Rings configuration */
12349 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
12350 		/* Only valid for MCL */
12351 		pdev = soc->pdev_list[0];
12352 
12353 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
12354 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id);
12355 		struct dp_srng *srng = &soc->rxdma_err_dst_ring[lmac_id];
12356 
12357 		if (srng->base_vaddr_unaligned)
12358 			continue;
12359 
12360 		if (dp_srng_alloc(soc, srng, RXDMA_DST, ring_size, 0)) {
12361 			QDF_TRACE(QDF_MODULE_ID_DP,
12362 				  QDF_TRACE_LEVEL_ERROR,
12363 				  FL(RNG_ERR "rxdma_err_dst_ring"));
12364 			goto fail1;
12365 		}
12366 	}
12367 
12368 	return QDF_STATUS_SUCCESS;
12369 fail1:
12370 	dp_pdev_srng_free(pdev);
12371 	return QDF_STATUS_E_NOMEM;
12372 }
12373 
12374 /**
12375  * dp_soc_srng_deinit() - de-initialize soc srng rings
12376  * @soc: Datapath soc handle
12377  *
12378  */
12379 static void dp_soc_srng_deinit(struct dp_soc *soc)
12380 {
12381 	uint32_t i;
12382 	/* Free the ring memories */
12383 	/* Common rings */
12384 	wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned);
12385 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
12386 
12387 	/* Tx data rings */
12388 	for (i = 0; i < soc->num_tcl_data_rings; i++)
12389 		dp_deinit_tx_pair_by_index(soc, i);
12390 
12391 	/* TCL command and status rings */
12392 	wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned);
12393 	dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring, TCL_CMD_CREDIT, 0);
12394 	wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned);
12395 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
12396 
12397 	/* Rx data rings */
12398 	soc->num_reo_dest_rings =
12399 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
12400 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
12401 		/* TODO: Get number of rings and ring sizes
12402 		 * from wlan_cfg
12403 		 */
12404 		wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned);
12405 		dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i);
12406 	}
12407 
12408 	/* REO reinjection ring */
12409 	wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned);
12410 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
12411 
12412 	/* Rx release ring */
12413 	wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned);
12414 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
12415 
12416 	/* Rx exception ring */
12417 	/* TODO: Better to store ring_type and ring_num in
12418 	 * dp_srng during setup
12419 	 */
12420 	wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned);
12421 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
12422 
12423 	/* REO command and status rings */
12424 	wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned);
12425 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
12426 	wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned);
12427 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
12428 }
12429 
12430 /**
12431  * dp_soc_srng_init() - Initialize soc level srng rings
12432  * @soc: Datapath soc handle
12433  *
12434  * return: QDF_STATUS_SUCCESS on success
12435  *	   QDF_STATUS_E_FAILURE on failure
12436  */
12437 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc)
12438 {
12439 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
12440 	uint32_t num_tcl_data_rings, num_reo_dest_rings;
12441 	uint8_t i;
12442 
12443 	soc_cfg_ctx = soc->wlan_cfg_ctx;
12444 
12445 	dp_enable_verbose_debug(soc);
12446 
12447 	/* WBM descriptor release ring */
12448 	if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) {
12449 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12450 			  FL("dp_srng_init failed for wbm_desc_rel_ring"));
12451 		goto fail1;
12452 	}
12453 
12454 	wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
12455 			  soc->wbm_desc_rel_ring.alloc_size,
12456 			  soc->ctrl_psoc,
12457 			  WLAN_MD_DP_SRNG_WBM_DESC_REL,
12458 			  "wbm_desc_rel_ring");
12459 
12460 	/* TCL command and status rings */
12461 	if (dp_srng_init(soc, &soc->tcl_cmd_credit_ring,
12462 			 TCL_CMD_CREDIT, 0, 0)) {
12463 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12464 			  FL("dp_srng_init failed for tcl_cmd_ring"));
12465 		goto fail1;
12466 	}
12467 
12468 	wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
12469 			  soc->tcl_cmd_credit_ring.alloc_size,
12470 			  soc->ctrl_psoc,
12471 			  WLAN_MD_DP_SRNG_TCL_CMD,
12472 			  "wbm_desc_rel_ring");
12473 
12474 	if (dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0)) {
12475 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12476 			  FL("dp_srng_init failed for tcl_status_ring"));
12477 		goto fail1;
12478 	}
12479 
12480 	wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned,
12481 			  soc->tcl_status_ring.alloc_size,
12482 			  soc->ctrl_psoc,
12483 			  WLAN_MD_DP_SRNG_TCL_STATUS,
12484 			  "wbm_desc_rel_ring");
12485 
12486 	/* REO reinjection ring */
12487 	if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) {
12488 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12489 			  FL("dp_srng_init failed for reo_reinject_ring"));
12490 		goto fail1;
12491 	}
12492 
12493 	wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned,
12494 			  soc->reo_reinject_ring.alloc_size,
12495 			  soc->ctrl_psoc,
12496 			  WLAN_MD_DP_SRNG_REO_REINJECT,
12497 			  "reo_reinject_ring");
12498 
12499 	/* Rx release ring */
12500 	if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0)) {
12501 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12502 			  FL("dp_srng_init failed for rx_rel_ring"));
12503 		goto fail1;
12504 	}
12505 
12506 	wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned,
12507 			  soc->rx_rel_ring.alloc_size,
12508 			  soc->ctrl_psoc,
12509 			  WLAN_MD_DP_SRNG_RX_REL,
12510 			  "reo_release_ring");
12511 
12512 	/* Rx exception ring */
12513 	if (dp_srng_init(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0,
12514 			 MAX_REO_DEST_RINGS)) {
12515 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12516 			  FL("dp_srng_init failed for reo_exception_ring"));
12517 		goto fail1;
12518 	}
12519 
12520 	wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned,
12521 			  soc->reo_exception_ring.alloc_size,
12522 			  soc->ctrl_psoc,
12523 			  WLAN_MD_DP_SRNG_REO_EXCEPTION,
12524 			  "reo_exception_ring");
12525 
12526 	/* REO command and status rings */
12527 	if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) {
12528 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12529 			  FL("dp_srng_init failed for reo_cmd_ring"));
12530 		goto fail1;
12531 	}
12532 
12533 	wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned,
12534 			  soc->reo_cmd_ring.alloc_size,
12535 			  soc->ctrl_psoc,
12536 			  WLAN_MD_DP_SRNG_REO_CMD,
12537 			  "reo_cmd_ring");
12538 
12539 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
12540 	TAILQ_INIT(&soc->rx.reo_cmd_list);
12541 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
12542 
12543 	if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) {
12544 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12545 			  FL("dp_srng_init failed for reo_status_ring"));
12546 		goto fail1;
12547 	}
12548 
12549 	wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned,
12550 			  soc->reo_status_ring.alloc_size,
12551 			  soc->ctrl_psoc,
12552 			  WLAN_MD_DP_SRNG_REO_STATUS,
12553 			  "reo_status_ring");
12554 
12555 	num_tcl_data_rings = wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
12556 	num_reo_dest_rings = wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
12557 
12558 	for (i = 0; i < num_tcl_data_rings; i++) {
12559 		if (dp_init_tx_ring_pair_by_index(soc, i))
12560 			goto fail1;
12561 	}
12562 
12563 	dp_create_ext_stats_event(soc);
12564 
12565 	for (i = 0; i < num_reo_dest_rings; i++) {
12566 		/* Initialize REO destination ring */
12567 		if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) {
12568 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12569 				  FL("dp_srng_init failed for reo_dest_ringn"));
12570 			goto fail1;
12571 		}
12572 
12573 		wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned,
12574 				  soc->reo_dest_ring[i].alloc_size,
12575 				  soc->ctrl_psoc,
12576 				  WLAN_MD_DP_SRNG_REO_DEST,
12577 				  "reo_dest_ring");
12578 	}
12579 
12580 	return QDF_STATUS_SUCCESS;
12581 fail1:
12582 	/*
12583 	 * Cleanup will be done as part of soc_detach, which will
12584 	 * be called on pdev attach failure
12585 	 */
12586 	dp_soc_srng_deinit(soc);
12587 	return QDF_STATUS_E_FAILURE;
12588 }
12589 
12590 /**
12591  * dp_soc_srng_free() - free soc level srng rings
12592  * @soc: Datapath soc handle
12593  *
12594  */
12595 static void dp_soc_srng_free(struct dp_soc *soc)
12596 {
12597 	uint32_t i;
12598 
12599 	dp_srng_free(soc, &soc->wbm_desc_rel_ring);
12600 
12601 	for (i = 0; i < soc->num_tcl_data_rings; i++)
12602 		dp_free_tx_ring_pair_by_index(soc, i);
12603 
12604 	dp_srng_free(soc, &soc->tcl_cmd_credit_ring);
12605 	dp_srng_free(soc, &soc->tcl_status_ring);
12606 
12607 	for (i = 0; i < soc->num_reo_dest_rings; i++)
12608 		dp_srng_free(soc, &soc->reo_dest_ring[i]);
12609 
12610 	dp_srng_free(soc, &soc->reo_reinject_ring);
12611 	dp_srng_free(soc, &soc->rx_rel_ring);
12612 	dp_srng_free(soc, &soc->reo_exception_ring);
12613 	dp_srng_free(soc, &soc->reo_cmd_ring);
12614 	dp_srng_free(soc, &soc->reo_status_ring);
12615 }
12616 
12617 /**
12618  * dp_soc_srng_alloc() - Allocate memory for soc level srng rings
12619  * @soc: Datapath soc handle
12620  *
12621  * return: QDF_STATUS_SUCCESS on success
12622  *	   QDF_STATUS_E_NOMEM on failure
12623  */
12624 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc)
12625 {
12626 	uint32_t entries;
12627 	uint32_t i;
12628 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
12629 	uint32_t num_tcl_data_rings, num_reo_dest_rings;
12630 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
12631 	uint32_t tx_comp_ring_size, tx_ring_size, reo_dst_ring_size;
12632 
12633 	soc_cfg_ctx = soc->wlan_cfg_ctx;
12634 
12635 	/* sw2wbm link descriptor release ring */
12636 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
12637 	if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE,
12638 			  entries, 0)) {
12639 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12640 			  FL("dp_srng_alloc failed for wbm_desc_rel_ring"));
12641 		goto fail1;
12642 	}
12643 
12644 	entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx);
12645 	/* TCL command and status rings */
12646 	if (dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring, TCL_CMD_CREDIT,
12647 			  entries, 0)) {
12648 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12649 			  FL("dp_srng_alloc failed for tcl_cmd_ring"));
12650 		goto fail1;
12651 	}
12652 
12653 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
12654 	if (dp_srng_alloc(soc, &soc->tcl_status_ring, TCL_STATUS, entries,
12655 			  0)) {
12656 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12657 			  FL("dp_srng_alloc failed for tcl_status_ring"));
12658 		goto fail1;
12659 	}
12660 
12661 	/* REO reinjection ring */
12662 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
12663 	if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT,
12664 			  entries, 0)) {
12665 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12666 			  FL("dp_srng_alloc failed for reo_reinject_ring"));
12667 		goto fail1;
12668 	}
12669 
12670 	/* Rx release ring */
12671 	entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx);
12672 	if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
12673 			  entries, 0)) {
12674 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12675 			  FL("dp_srng_alloc failed for rx_rel_ring"));
12676 		goto fail1;
12677 	}
12678 
12679 	/* Rx exception ring */
12680 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
12681 	if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION,
12682 			  entries, 0)) {
12683 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12684 			  FL("dp_srng_alloc failed for reo_exception_ring"));
12685 		goto fail1;
12686 	}
12687 
12688 	/* REO command and status rings */
12689 	entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx);
12690 	if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) {
12691 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12692 			  FL("dp_srng_alloc failed for reo_cmd_ring"));
12693 		goto fail1;
12694 	}
12695 
12696 	entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx);
12697 	if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS,
12698 			  entries, 0)) {
12699 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12700 			  FL("dp_srng_alloc failed for reo_status_ring"));
12701 		goto fail1;
12702 	}
12703 
12704 	num_tcl_data_rings = wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
12705 	num_reo_dest_rings = wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
12706 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
12707 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
12708 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx);
12709 
12710 	/* Disable cached desc if NSS offload is enabled */
12711 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
12712 		cached = 0;
12713 
12714 	for (i = 0; i < num_tcl_data_rings; i++) {
12715 		if (dp_alloc_tx_ring_pair_by_index(soc, i))
12716 			goto fail1;
12717 	}
12718 
12719 	soc->num_tcl_data_rings = num_tcl_data_rings;
12720 
12721 	for (i = 0; i < num_reo_dest_rings; i++) {
12722 		/* Setup REO destination ring */
12723 		if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST,
12724 				  reo_dst_ring_size, cached)) {
12725 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12726 				  FL("dp_srng_alloc failed for reo_dest_ring"));
12727 			goto fail1;
12728 		}
12729 	}
12730 	soc->num_reo_dest_rings = num_reo_dest_rings;
12731 
12732 	return QDF_STATUS_SUCCESS;
12733 
12734 fail1:
12735 	dp_soc_srng_free(soc);
12736 	return QDF_STATUS_E_NOMEM;
12737 }
12738 
12739 /**
12740  * dp_soc_cfg_init() - initialize target specific configuration
12741  *		       during dp_soc_init
12742  * @soc: dp soc handle
12743  */
12744 static void dp_soc_cfg_init(struct dp_soc *soc)
12745 {
12746 	int target_type;
12747 
12748 	target_type = hal_get_target_type(soc->hal_soc);
12749 	switch (target_type) {
12750 	case TARGET_TYPE_QCA6290:
12751 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12752 					       REO_DST_RING_SIZE_QCA6290);
12753 		soc->ast_override_support = 1;
12754 		soc->da_war_enabled = false;
12755 		break;
12756 	case TARGET_TYPE_QCA6390:
12757 	case TARGET_TYPE_QCA6490:
12758 	case TARGET_TYPE_QCA6750:
12759 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12760 					       REO_DST_RING_SIZE_QCA6290);
12761 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
12762 		soc->ast_override_support = 1;
12763 		if (soc->cdp_soc.ol_ops->get_con_mode &&
12764 		    soc->cdp_soc.ol_ops->get_con_mode() ==
12765 		    QDF_GLOBAL_MONITOR_MODE) {
12766 			int int_ctx;
12767 
12768 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
12769 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
12770 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
12771 			}
12772 		}
12773 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
12774 		break;
12775 	case TARGET_TYPE_QCA8074:
12776 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
12777 							   MON_BUF_MIN_ENTRIES);
12778 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12779 					       REO_DST_RING_SIZE_QCA8074);
12780 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
12781 		soc->da_war_enabled = true;
12782 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
12783 		break;
12784 	case TARGET_TYPE_QCA8074V2:
12785 	case TARGET_TYPE_QCA6018:
12786 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
12787 							   MON_BUF_MIN_ENTRIES);
12788 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12789 					       REO_DST_RING_SIZE_QCA8074);
12790 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
12791 		soc->hw_nac_monitor_support = 1;
12792 		soc->ast_override_support = 1;
12793 		soc->per_tid_basize_max_tid = 8;
12794 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
12795 		soc->da_war_enabled = false;
12796 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
12797 		break;
12798 	case TARGET_TYPE_QCN9000:
12799 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
12800 							   MON_BUF_MIN_ENTRIES);
12801 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12802 					       REO_DST_RING_SIZE_QCN9000);
12803 		soc->ast_override_support = 1;
12804 		soc->da_war_enabled = false;
12805 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
12806 		soc->hw_nac_monitor_support = 1;
12807 		soc->per_tid_basize_max_tid = 8;
12808 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
12809 		soc->lmac_polled_mode = 0;
12810 		soc->wbm_release_desc_rx_sg_support = 1;
12811 		if (cfg_get(soc->ctrl_psoc, CFG_DP_FULL_MON_MODE))
12812 			dp_config_full_mon_mode((struct cdp_soc_t *)soc, 1);
12813 		break;
12814 	case TARGET_TYPE_QCA5018:
12815 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12816 					       REO_DST_RING_SIZE_QCA8074);
12817 		soc->ast_override_support = 1;
12818 		soc->da_war_enabled = false;
12819 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
12820 		soc->hw_nac_monitor_support = 1;
12821 		soc->per_tid_basize_max_tid = 8;
12822 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
12823 		soc->disable_mac1_intr = 1;
12824 		soc->disable_mac2_intr = 1;
12825 		soc->wbm_release_desc_rx_sg_support = 1;
12826 		break;
12827 	default:
12828 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
12829 		qdf_assert_always(0);
12830 		break;
12831 	}
12832 }
12833 
12834 /**
12835  * dp_soc_cfg_attach() - set target specific configuration in
12836  *			 dp soc cfg.
12837  * @soc: dp soc handle
12838  */
12839 static void dp_soc_cfg_attach(struct dp_soc *soc)
12840 {
12841 	int target_type;
12842 	int nss_cfg = 0;
12843 
12844 	target_type = hal_get_target_type(soc->hal_soc);
12845 	switch (target_type) {
12846 	case TARGET_TYPE_QCA6290:
12847 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12848 					       REO_DST_RING_SIZE_QCA6290);
12849 		break;
12850 	case TARGET_TYPE_QCA6390:
12851 	case TARGET_TYPE_QCA6490:
12852 	case TARGET_TYPE_QCA6750:
12853 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12854 					       REO_DST_RING_SIZE_QCA6290);
12855 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
12856 		break;
12857 	case TARGET_TYPE_QCA8074:
12858 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
12859 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12860 					       REO_DST_RING_SIZE_QCA8074);
12861 		break;
12862 	case TARGET_TYPE_QCA8074V2:
12863 	case TARGET_TYPE_QCA6018:
12864 	case TARGET_TYPE_QCA5018:
12865 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
12866 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12867 					       REO_DST_RING_SIZE_QCA8074);
12868 		break;
12869 	case TARGET_TYPE_QCN9000:
12870 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
12871 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12872 					       REO_DST_RING_SIZE_QCN9000);
12873 		break;
12874 	default:
12875 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
12876 		qdf_assert_always(0);
12877 		break;
12878 	}
12879 
12880 	if (soc->cdp_soc.ol_ops->get_soc_nss_cfg)
12881 		nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc);
12882 
12883 	wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg);
12884 
12885 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
12886 		wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0);
12887 		wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0);
12888 		wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0);
12889 		wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0);
12890 	}
12891 }
12892 
12893 static inline QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc,
12894 				      HTC_HANDLE htc_handle,
12895 				      qdf_device_t qdf_osdev,
12896 				      uint8_t pdev_id)
12897 {
12898 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
12899 	int nss_cfg;
12900 	void *sojourn_buf;
12901 	QDF_STATUS ret;
12902 
12903 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
12904 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
12905 
12906 	soc_cfg_ctx = soc->wlan_cfg_ctx;
12907 	pdev->soc = soc;
12908 	pdev->pdev_id = pdev_id;
12909 
12910 	pdev->filter = dp_mon_filter_alloc(pdev);
12911 	if (!pdev->filter) {
12912 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12913 			  FL("Memory allocation failed for monitor filters"));
12914 		ret = QDF_STATUS_E_NOMEM;
12915 		goto fail0;
12916 	}
12917 
12918 	/*
12919 	 * Variable to prevent double pdev deinitialization during
12920 	 * radio detach execution .i.e. in the absence of any vdev.
12921 	 */
12922 	pdev->pdev_deinit = 0;
12923 
12924 	if (dp_wdi_event_attach(pdev)) {
12925 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
12926 			  "dp_wdi_evet_attach failed");
12927 		goto fail1;
12928 	}
12929 
12930 	if (dp_pdev_srng_init(pdev)) {
12931 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12932 			  FL("Failed to initialize pdev srng rings"));
12933 		goto fail2;
12934 	}
12935 
12936 	/* Initialize descriptors in TCL Rings used by IPA */
12937 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
12938 		hal_tx_init_data_ring(soc->hal_soc,
12939 				      soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng);
12940 
12941 	/*
12942 	 * Initialize command/credit ring descriptor
12943 	 * Command/CREDIT ring also used for sending DATA cmds
12944 	 */
12945 	hal_tx_init_cmd_credit_ring(soc->hal_soc,
12946 				    soc->tcl_cmd_credit_ring.hal_srng);
12947 
12948 	dp_tx_pdev_init(pdev);
12949 	/*
12950 	 * Variable to prevent double pdev deinitialization during
12951 	 * radio detach execution .i.e. in the absence of any vdev.
12952 	 */
12953 	pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer));
12954 
12955 	if (!pdev->invalid_peer) {
12956 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12957 			  FL("Invalid peer memory allocation failed"));
12958 		goto fail3;
12959 	}
12960 
12961 	/*
12962 	 * set nss pdev config based on soc config
12963 	 */
12964 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
12965 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
12966 					 (nss_cfg & (1 << pdev_id)));
12967 
12968 	pdev->target_pdev_id =
12969 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
12970 
12971 	if (soc->preferred_hw_mode == WMI_HOST_HW_MODE_2G_PHYB &&
12972 	    pdev->lmac_id == PHYB_2G_LMAC_ID) {
12973 		pdev->target_pdev_id = PHYB_2G_TARGET_PDEV_ID;
12974 	}
12975 
12976 	/* Reset the cpu ring map if radio is NSS offloaded */
12977 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
12978 		dp_soc_reset_cpu_ring_map(soc);
12979 		dp_soc_reset_intr_mask(soc);
12980 	}
12981 
12982 	TAILQ_INIT(&pdev->vdev_list);
12983 	qdf_spinlock_create(&pdev->vdev_list_lock);
12984 	pdev->vdev_count = 0;
12985 
12986 	qdf_spinlock_create(&pdev->tx_mutex);
12987 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
12988 	TAILQ_INIT(&pdev->neighbour_peers_list);
12989 	pdev->neighbour_peers_added = false;
12990 	pdev->monitor_configured = false;
12991 	pdev->mon_chan_band = REG_BAND_UNKNOWN;
12992 	pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_INVALID_LMAC_ID;
12993 	pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_INVALID_LMAC_ID;
12994 	pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_INVALID_LMAC_ID;
12995 
12996 	DP_STATS_INIT(pdev);
12997 
12998 	/* Monitor filter init */
12999 	pdev->mon_filter_mode = MON_FILTER_ALL;
13000 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
13001 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
13002 	pdev->fp_data_filter = FILTER_DATA_ALL;
13003 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
13004 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
13005 	pdev->mo_data_filter = FILTER_DATA_ALL;
13006 
13007 	dp_local_peer_id_pool_init(pdev);
13008 
13009 	dp_dscp_tid_map_setup(pdev);
13010 	dp_pcp_tid_map_setup(pdev);
13011 
13012 	/* set the reo destination during initialization */
13013 	pdev->reo_dest = pdev->pdev_id + 1;
13014 
13015 	/*
13016 	 * initialize ppdu tlv list
13017 	 */
13018 	TAILQ_INIT(&pdev->ppdu_info_list);
13019 	TAILQ_INIT(&pdev->sched_comp_ppdu_list);
13020 	pdev->tlv_count = 0;
13021 	pdev->list_depth = 0;
13022 
13023 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
13024 
13025 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
13026 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
13027 			      TRUE);
13028 
13029 	if (!pdev->sojourn_buf) {
13030 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
13031 			  FL("Failed to allocate sojourn buf"));
13032 		goto fail4;
13033 	}
13034 	sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
13035 	qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
13036 
13037 	/* initlialize cal client timer */
13038 	dp_cal_client_attach(&pdev->cal_client_ctx,
13039 			     dp_pdev_to_cdp_pdev(pdev),
13040 			     pdev->soc->osdev,
13041 			     &dp_iterate_update_peer_list);
13042 	qdf_event_create(&pdev->fw_peer_stats_event);
13043 
13044 	pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
13045 	if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS)
13046 		goto fail5;
13047 
13048 	if (dp_rxdma_ring_setup(soc, pdev)) {
13049 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
13050 			  FL("RXDMA ring config failed"));
13051 		goto fail6;
13052 	}
13053 
13054 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
13055 		goto fail7;
13056 
13057 	if (dp_ipa_ring_resource_setup(soc, pdev))
13058 		goto fail8;
13059 
13060 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
13061 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
13062 			  FL("dp_ipa_uc_attach failed"));
13063 		goto fail8;
13064 	}
13065 
13066 	ret = dp_rx_fst_attach(soc, pdev);
13067 	if ((ret != QDF_STATUS_SUCCESS) &&
13068 	    (ret != QDF_STATUS_E_NOSUPPORT)) {
13069 		QDF_TRACE(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_ERROR,
13070 			  "RX Flow Search Table attach failed: pdev %d err %d",
13071 			  pdev_id, ret);
13072 		goto fail9;
13073 	}
13074 
13075 	/* initialize sw rx descriptors */
13076 	dp_rx_pdev_desc_pool_init(pdev);
13077 	/* initialize sw monitor rx descriptors */
13078 	dp_rx_pdev_mon_desc_pool_init(pdev);
13079 	/* allocate buffers and replenish the RxDMA ring */
13080 	dp_rx_pdev_buffers_alloc(pdev);
13081 	/* allocate buffers and replenish the monitor RxDMA ring */
13082 	dp_rx_pdev_mon_buffers_alloc(pdev);
13083 
13084 	dp_init_tso_stats(pdev);
13085 	dp_tx_ppdu_stats_attach(pdev);
13086 
13087 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
13088 		qdf_dma_mem_stats_read(),
13089 		qdf_heap_mem_stats_read(),
13090 		qdf_skb_mem_stats_read());
13091 
13092 	return QDF_STATUS_SUCCESS;
13093 fail9:
13094 	dp_ipa_uc_detach(soc, pdev);
13095 fail8:
13096 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
13097 fail7:
13098 	dp_rxdma_ring_cleanup(soc, pdev);
13099 fail6:
13100 	dp_htt_ppdu_stats_detach(pdev);
13101 fail5:
13102 	qdf_nbuf_free(pdev->sojourn_buf);
13103 fail4:
13104 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
13105 	qdf_spinlock_destroy(&pdev->tx_mutex);
13106 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
13107 	qdf_mem_free(pdev->invalid_peer);
13108 fail3:
13109 	dp_pdev_srng_deinit(pdev);
13110 fail2:
13111 	dp_wdi_event_detach(pdev);
13112 fail1:
13113 	dp_mon_filter_dealloc(pdev);
13114 fail0:
13115 	return QDF_STATUS_E_FAILURE;
13116 }
13117 
13118 /*
13119  * dp_pdev_init_wifi3() - Init txrx pdev
13120  * @htc_handle: HTC handle for host-target interface
13121  * @qdf_osdev: QDF OS device
13122  * @force: Force deinit
13123  *
13124  * Return: QDF_STATUS
13125  */
13126 static QDF_STATUS dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
13127 				     HTC_HANDLE htc_handle,
13128 				     qdf_device_t qdf_osdev,
13129 				     uint8_t pdev_id)
13130 {
13131 	return dp_pdev_init(txrx_soc, htc_handle, qdf_osdev, pdev_id);
13132 }
13133 
13134