xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision 97b39bfea3401259bed153a56c00d1fddbb9e87d)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_hw_headers.h>
25 #include <hal_api.h>
26 #include <hif.h>
27 #include <htt.h>
28 #include <wdi_event.h>
29 #include <queue.h>
30 #include "dp_types.h"
31 #include "dp_internal.h"
32 #include "dp_tx.h"
33 #include "dp_tx_desc.h"
34 #include "dp_rx.h"
35 #include "dp_rx_mon.h"
36 #ifdef DP_RATETABLE_SUPPORT
37 #include "dp_ratetable.h"
38 #endif
39 #include <cdp_txrx_handle.h>
40 #include <wlan_cfg.h>
41 #include <wlan_utility.h>
42 #include "cdp_txrx_cmn_struct.h"
43 #include "cdp_txrx_stats_struct.h"
44 #include "cdp_txrx_cmn_reg.h"
45 #include <qdf_util.h>
46 #include "dp_peer.h"
47 #include "dp_rx_mon.h"
48 #include "htt_stats.h"
49 #include "dp_htt.h"
50 #ifdef WLAN_SUPPORT_RX_FISA
51 #include <dp_fisa_rx.h>
52 #endif
53 #include "htt_ppdu_stats.h"
54 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
55 #include "cfg_ucfg_api.h"
56 #include "dp_mon_filter.h"
57 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
58 #include "cdp_txrx_flow_ctrl_v2.h"
59 #else
60 static inline void
61 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
62 {
63 	return;
64 }
65 #endif
66 #include "dp_ipa.h"
67 #include "dp_cal_client_api.h"
68 #ifdef FEATURE_WDS
69 #include "dp_txrx_wds.h"
70 #endif
71 #ifdef ATH_SUPPORT_IQUE
72 #include "dp_txrx_me.h"
73 #endif
74 #if defined(DP_CON_MON)
75 #ifndef REMOVE_PKT_LOG
76 #include <pktlog_ac_api.h>
77 #include <pktlog_ac.h>
78 #endif
79 #endif
80 
81 #ifdef WLAN_FEATURE_STATS_EXT
82 #define INIT_RX_HW_STATS_LOCK(_soc) \
83 	qdf_spinlock_create(&(_soc)->rx_hw_stats_lock)
84 #define DEINIT_RX_HW_STATS_LOCK(_soc) \
85 	qdf_spinlock_destroy(&(_soc)->rx_hw_stats_lock)
86 #else
87 #define INIT_RX_HW_STATS_LOCK(_soc)  /* no op */
88 #define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
89 #endif
90 
91 #ifdef DP_PEER_EXTENDED_API
92 #define SET_PEER_REF_CNT_ONE(_peer) \
93 	qdf_atomic_set(&(_peer)->ref_cnt, 1)
94 #else
95 #define SET_PEER_REF_CNT_ONE(_peer)
96 #endif
97 
98 /*
99  * The max size of cdp_peer_stats_param_t is limited to 16 bytes.
100  * If the buffer size is exceeding this size limit,
101  * dp_txrx_get_peer_stats is to be used instead.
102  */
103 QDF_COMPILE_TIME_ASSERT(cdp_peer_stats_param_t_max_size,
104 			(sizeof(cdp_peer_stats_param_t) <= 16));
105 
106 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
107 /*
108  * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS
109  * also should be updated accordingly
110  */
111 QDF_COMPILE_TIME_ASSERT(num_intr_grps,
112 			HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS);
113 
114 /*
115  * HIF_EVENT_HIST_MAX should always be power of 2
116  */
117 QDF_COMPILE_TIME_ASSERT(hif_event_history_size,
118 			(HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0);
119 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
120 
121 /*
122  * If WLAN_CFG_INT_NUM_CONTEXTS is changed,
123  * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated
124  */
125 QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs,
126 			WLAN_CFG_INT_NUM_CONTEXTS_MAX >=
127 			WLAN_CFG_INT_NUM_CONTEXTS);
128 
129 #ifdef WLAN_RX_PKT_CAPTURE_ENH
130 #include "dp_rx_mon_feature.h"
131 #else
132 /*
133  * dp_config_enh_rx_capture()- API to enable/disable enhanced rx capture
134  * @pdev_handle: DP_PDEV handle
135  * @val: user provided value
136  *
137  * Return: QDF_STATUS
138  */
139 static QDF_STATUS
140 dp_config_enh_rx_capture(struct dp_pdev *pdev_handle, uint8_t val)
141 {
142 	return QDF_STATUS_E_INVAL;
143 }
144 #endif /* WLAN_RX_PKT_CAPTURE_ENH */
145 
146 #ifdef WLAN_TX_PKT_CAPTURE_ENH
147 #include "dp_tx_capture.h"
148 #else
149 /*
150  * dp_config_enh_tx_capture()- API to enable/disable enhanced tx capture
151  * @pdev_handle: DP_PDEV handle
152  * @val: user provided value
153  *
154  * Return: QDF_STATUS
155  */
156 static QDF_STATUS
157 dp_config_enh_tx_capture(struct dp_pdev *pdev_handle, uint8_t val)
158 {
159 	return QDF_STATUS_E_INVAL;
160 }
161 #endif
162 
163 static void dp_pdev_srng_deinit(struct dp_pdev *pdev);
164 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev);
165 static void dp_pdev_srng_free(struct dp_pdev *pdev);
166 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev);
167 
168 static void dp_soc_srng_deinit(struct dp_soc *soc);
169 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc);
170 static void dp_soc_srng_free(struct dp_soc *soc);
171 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc);
172 
173 static void dp_soc_cfg_init(struct dp_soc *soc);
174 static void dp_soc_cfg_attach(struct dp_soc *soc);
175 
176 static inline
177 QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
178 				HTC_HANDLE htc_handle,
179 				qdf_device_t qdf_osdev,
180 				uint8_t pdev_id);
181 
182 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id);
183 
184 static QDF_STATUS
185 dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
186 		   HTC_HANDLE htc_handle,
187 		   qdf_device_t qdf_osdev,
188 		   uint8_t pdev_id);
189 
190 static QDF_STATUS
191 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id, int force);
192 
193 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc);
194 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc);
195 
196 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
197 		  struct hif_opaque_softc *hif_handle);
198 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
199 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc,
200 				       uint8_t pdev_id,
201 				       int force);
202 static struct dp_soc *
203 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
204 	      struct hif_opaque_softc *hif_handle,
205 	      HTC_HANDLE htc_handle,
206 	      qdf_device_t qdf_osdev,
207 	      struct ol_if_ops *ol_ops, uint16_t device_id);
208 static void dp_pktlogmod_exit(struct dp_pdev *handle);
209 static inline QDF_STATUS dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl,
210 					      uint8_t vdev_id,
211 					      uint8_t *peer_mac_addr);
212 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
213 				       uint8_t vdev_id,
214 				       uint8_t *peer_mac, uint32_t bitmap);
215 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
216 				bool unmap_only);
217 #ifdef ENABLE_VERBOSE_DEBUG
218 bool is_dp_verbose_debug_enabled;
219 #endif
220 
221 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
222 static void dp_cfr_filter(struct cdp_soc_t *soc_hdl,
223 			  uint8_t pdev_id,
224 			  bool enable,
225 			  struct cdp_monitor_filter *filter_val);
226 static bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
227 static void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
228 			   bool enable);
229 static inline void
230 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
231 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats);
232 static inline void
233 dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
234 static inline void
235 dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
236 			 bool enable);
237 #endif
238 static inline bool
239 dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev);
240 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
241 					    enum hal_ring_type ring_type,
242 					    int ring_num);
243 #define DP_INTR_POLL_TIMER_MS	5
244 
245 /* Generic AST entry aging timer value */
246 #define DP_AST_AGING_TIMER_DEFAULT_MS	1000
247 #define DP_MCS_LENGTH (6*MAX_MCS)
248 
249 #define DP_CURR_FW_STATS_AVAIL 19
250 #define DP_HTT_DBG_EXT_STATS_MAX 256
251 #define DP_MAX_SLEEP_TIME 100
252 #ifndef QCA_WIFI_3_0_EMU
253 #define SUSPEND_DRAIN_WAIT 500
254 #else
255 #define SUSPEND_DRAIN_WAIT 3000
256 #endif
257 
258 #ifdef IPA_OFFLOAD
259 /* Exclude IPA rings from the interrupt context */
260 #define TX_RING_MASK_VAL	0xb
261 #define RX_RING_MASK_VAL	0x7
262 #else
263 #define TX_RING_MASK_VAL	0xF
264 #define RX_RING_MASK_VAL	0xF
265 #endif
266 
267 #define STR_MAXLEN	64
268 
269 #define RNG_ERR		"SRNG setup failed for"
270 
271 /* Threshold for peer's cached buf queue beyond which frames are dropped */
272 #define DP_RX_CACHED_BUFQ_THRESH 64
273 
274 /* Budget to reap monitor status ring */
275 #define DP_MON_REAP_BUDGET 1024
276 
277 /**
278  * default_dscp_tid_map - Default DSCP-TID mapping
279  *
280  * DSCP        TID
281  * 000000      0
282  * 001000      1
283  * 010000      2
284  * 011000      3
285  * 100000      4
286  * 101000      5
287  * 110000      6
288  * 111000      7
289  */
290 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
291 	0, 0, 0, 0, 0, 0, 0, 0,
292 	1, 1, 1, 1, 1, 1, 1, 1,
293 	2, 2, 2, 2, 2, 2, 2, 2,
294 	3, 3, 3, 3, 3, 3, 3, 3,
295 	4, 4, 4, 4, 4, 4, 4, 4,
296 	5, 5, 5, 5, 5, 5, 5, 5,
297 	6, 6, 6, 6, 6, 6, 6, 6,
298 	7, 7, 7, 7, 7, 7, 7, 7,
299 };
300 
301 /**
302  * default_pcp_tid_map - Default PCP-TID mapping
303  *
304  * PCP     TID
305  * 000      0
306  * 001      1
307  * 010      2
308  * 011      3
309  * 100      4
310  * 101      5
311  * 110      6
312  * 111      7
313  */
314 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
315 	0, 1, 2, 3, 4, 5, 6, 7,
316 };
317 
318 /**
319  * @brief Cpu to tx ring map
320  */
321 uint8_t
322 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
323 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
324 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
325 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
326 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
327 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
328 #ifdef WLAN_TX_PKT_CAPTURE_ENH
329 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
330 #endif
331 };
332 
333 /**
334  * @brief Select the type of statistics
335  */
336 enum dp_stats_type {
337 	STATS_FW = 0,
338 	STATS_HOST = 1,
339 	STATS_TYPE_MAX = 2,
340 };
341 
342 /**
343  * @brief General Firmware statistics options
344  *
345  */
346 enum dp_fw_stats {
347 	TXRX_FW_STATS_INVALID	= -1,
348 };
349 
350 /**
351  * dp_stats_mapping_table - Firmware and Host statistics
352  * currently supported
353  */
354 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
355 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
356 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
357 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
358 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
359 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
360 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
361 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
362 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
363 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
364 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
365 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
366 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
367 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
368 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
369 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
370 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
371 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
372 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
373 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
374 	/* Last ENUM for HTT FW STATS */
375 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
376 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
377 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
378 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
379 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
380 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
381 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
382 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
383 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
384 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
385 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
386 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
387 	{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
388 	{TXRX_FW_STATS_INVALID, TXRX_SOC_FSE_STATS},
389 	{TXRX_FW_STATS_INVALID, TXRX_HAL_REG_WRITE_STATS},
390 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT, TXRX_HOST_STATS_INVALID}
391 };
392 
393 /* MCL specific functions */
394 #if defined(DP_CON_MON)
395 /**
396  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
397  * @soc: pointer to dp_soc handle
398  * @intr_ctx_num: interrupt context number for which mon mask is needed
399  *
400  * For MCL, monitor mode rings are being processed in timer contexts (polled).
401  * This function is returning 0, since in interrupt mode(softirq based RX),
402  * we donot want to process monitor mode rings in a softirq.
403  *
404  * So, in case packet log is enabled for SAP/STA/P2P modes,
405  * regular interrupt processing will not process monitor mode rings. It would be
406  * done in a separate timer context.
407  *
408  * Return: 0
409  */
410 static inline
411 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
412 {
413 	return 0;
414 }
415 
416 /*
417  * dp_service_mon_rings()- service monitor rings
418  * @soc: soc dp handle
419  * @quota: number of ring entry that can be serviced
420  *
421  * Return: None
422  *
423  */
424 static void dp_service_mon_rings(struct  dp_soc *soc, uint32_t quota)
425 {
426 	int ring = 0, work_done;
427 	struct dp_pdev *pdev = NULL;
428 
429 	for (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
430 		pdev = dp_get_pdev_for_lmac_id(soc, ring);
431 		if (!pdev)
432 			continue;
433 		work_done = dp_mon_process(soc, NULL, ring, quota);
434 
435 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
436 			  FL("Reaped %d descs from Monitor rings"),
437 			  work_done);
438 	}
439 }
440 
441 /*
442  * dp_mon_reap_timer_handler()- timer to reap monitor rings
443  * reqd as we are not getting ppdu end interrupts
444  * @arg: SoC Handle
445  *
446  * Return:
447  *
448  */
449 static void dp_mon_reap_timer_handler(void *arg)
450 {
451 	struct dp_soc *soc = (struct dp_soc *)arg;
452 
453 	dp_service_mon_rings(soc, QCA_NAPI_BUDGET);
454 
455 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
456 }
457 
458 #ifndef REMOVE_PKT_LOG
459 /**
460  * dp_pkt_log_init() - API to initialize packet log
461  * @soc_hdl: Datapath soc handle
462  * @pdev_id: id of data path pdev handle
463  * @scn: HIF context
464  *
465  * Return: none
466  */
467 void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn)
468 {
469 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
470 	struct dp_pdev *handle =
471 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
472 
473 	if (!handle) {
474 		dp_err("pdev handle is NULL");
475 		return;
476 	}
477 
478 	if (handle->pkt_log_init) {
479 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
480 			  "%s: Packet log not initialized", __func__);
481 		return;
482 	}
483 
484 	pktlog_sethandle(&handle->pl_dev, scn);
485 	pktlog_set_pdev_id(handle->pl_dev, pdev_id);
486 	pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
487 
488 	if (pktlogmod_init(scn)) {
489 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
490 			  "%s: pktlogmod_init failed", __func__);
491 		handle->pkt_log_init = false;
492 	} else {
493 		handle->pkt_log_init = true;
494 	}
495 }
496 
497 /**
498  * dp_pkt_log_con_service() - connect packet log service
499  * @soc_hdl: Datapath soc handle
500  * @pdev_id: id of data path pdev handle
501  * @scn: device context
502  *
503  * Return: none
504  */
505 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
506 				   uint8_t pdev_id, void *scn)
507 {
508 	dp_pkt_log_init(soc_hdl, pdev_id, scn);
509 	pktlog_htc_attach();
510 }
511 
512 /**
513  * dp_pktlogmod_exit() - API to cleanup pktlog info
514  * @pdev: Pdev handle
515  *
516  * Return: none
517  */
518 static void dp_pktlogmod_exit(struct dp_pdev *pdev)
519 {
520 	struct dp_soc *soc = pdev->soc;
521 	struct hif_opaque_softc *scn = soc->hif_handle;
522 
523 	if (!scn) {
524 		dp_err("Invalid hif(scn) handle");
525 		return;
526 	}
527 
528 	/* stop mon_reap_timer if it has been started */
529 	if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED &&
530 	    soc->reap_timer_init && (!dp_is_enable_reap_timer_non_pkt(pdev)))
531 		qdf_timer_sync_cancel(&soc->mon_reap_timer);
532 
533 	pktlogmod_exit(scn);
534 	pdev->pkt_log_init = false;
535 }
536 #else
537 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
538 				   uint8_t pdev_id, void *scn)
539 {
540 }
541 
542 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
543 #endif
544 /**
545  * dp_get_num_rx_contexts() - get number of RX contexts
546  * @soc_hdl: cdp opaque soc handle
547  *
548  * Return: number of RX contexts
549  */
550 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
551 {
552 	int i;
553 	int num_rx_contexts = 0;
554 
555 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
556 
557 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
558 		if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i))
559 			num_rx_contexts++;
560 
561 	return num_rx_contexts;
562 }
563 
564 #else
565 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
566 
567 /**
568  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
569  * @soc: pointer to dp_soc handle
570  * @intr_ctx_num: interrupt context number for which mon mask is needed
571  *
572  * Return: mon mask value
573  */
574 static inline
575 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
576 {
577 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
578 }
579 
580 /*
581  * dp_service_lmac_rings()- timer to reap lmac rings
582  * @arg: SoC Handle
583  *
584  * Return:
585  *
586  */
587 static void dp_service_lmac_rings(void *arg)
588 {
589 	struct dp_soc *soc = (struct dp_soc *)arg;
590 	int ring = 0, i;
591 	struct dp_pdev *pdev = NULL;
592 	union dp_rx_desc_list_elem_t *desc_list = NULL;
593 	union dp_rx_desc_list_elem_t *tail = NULL;
594 
595 	/* Process LMAC interrupts */
596 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
597 		int mac_for_pdev = ring;
598 		struct dp_srng *rx_refill_buf_ring;
599 
600 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
601 		if (!pdev)
602 			continue;
603 
604 		rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev];
605 
606 		dp_mon_process(soc, NULL, mac_for_pdev,
607 			       QCA_NAPI_BUDGET);
608 
609 		for (i = 0;
610 		     i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
611 			dp_rxdma_err_process(&soc->intr_ctx[i], soc,
612 					     mac_for_pdev,
613 					     QCA_NAPI_BUDGET);
614 
615 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF,
616 						  mac_for_pdev))
617 			dp_rx_buffers_replenish(soc, mac_for_pdev,
618 						rx_refill_buf_ring,
619 						&soc->rx_desc_buf[mac_for_pdev],
620 						0, &desc_list, &tail);
621 	}
622 
623 	qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
624 }
625 
626 #endif
627 
628 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
629 				 uint8_t vdev_id,
630 				 uint8_t *peer_mac,
631 				 uint8_t *mac_addr,
632 				 enum cdp_txrx_ast_entry_type type,
633 				 uint32_t flags)
634 {
635 	int ret = -1;
636 	QDF_STATUS status = QDF_STATUS_SUCCESS;
637 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
638 						       peer_mac, 0, vdev_id,
639 						       DP_MOD_ID_CDP);
640 
641 	if (!peer) {
642 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
643 			  "%s: Peer is NULL!\n", __func__);
644 		return ret;
645 	}
646 
647 	status = dp_peer_add_ast((struct dp_soc *)soc_hdl,
648 				 peer,
649 				 mac_addr,
650 				 type,
651 				 flags);
652 	if ((status == QDF_STATUS_SUCCESS) ||
653 	    (status == QDF_STATUS_E_ALREADY) ||
654 	    (status == QDF_STATUS_E_AGAIN))
655 		ret = 0;
656 
657 	dp_hmwds_ast_add_notify(peer, mac_addr,
658 				type, status, false);
659 
660 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
661 
662 	return ret;
663 }
664 
665 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
666 						uint8_t vdev_id,
667 						uint8_t *peer_mac,
668 						uint8_t *wds_macaddr,
669 						uint32_t flags)
670 {
671 	int status = -1;
672 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
673 	struct dp_ast_entry  *ast_entry = NULL;
674 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
675 						       peer_mac, 0, vdev_id,
676 						       DP_MOD_ID_CDP);
677 
678 	if (!peer) {
679 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
680 			  "%s: Peer is NULL!\n", __func__);
681 		return status;
682 	}
683 
684 	qdf_spin_lock_bh(&soc->ast_lock);
685 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
686 						    peer->vdev->pdev->pdev_id);
687 
688 	if (ast_entry) {
689 		status = dp_peer_update_ast(soc,
690 					    peer,
691 					    ast_entry, flags);
692 	}
693 	qdf_spin_unlock_bh(&soc->ast_lock);
694 
695 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
696 
697 	return status;
698 }
699 
700 /*
701  * dp_peer_reset_ast_entries() - Deletes all HMWDS entries for a peer
702  * @soc_handle:		Datapath SOC handle
703  * @peer:		DP peer
704  * @arg:		callback argument
705  *
706  * Return: None
707  */
708 static void
709 dp_peer_reset_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
710 {
711 	struct dp_ast_entry *ast_entry = NULL;
712 	struct dp_ast_entry *tmp_ast_entry;
713 
714 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
715 		if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
716 		    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
717 			dp_peer_del_ast(soc, ast_entry);
718 	}
719 }
720 
721 /*
722  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
723  * @soc_handle:		Datapath SOC handle
724  * @wds_macaddr:	WDS entry MAC Address
725  * @peer_macaddr:	WDS entry MAC Address
726  * @vdev_id:		id of vdev handle
727  * Return: QDF_STATUS
728  */
729 static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
730 					 uint8_t *wds_macaddr,
731 					 uint8_t *peer_mac_addr,
732 					 uint8_t vdev_id)
733 {
734 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
735 	struct dp_ast_entry *ast_entry = NULL;
736 	struct dp_peer *peer;
737 	struct dp_pdev *pdev;
738 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
739 						     DP_MOD_ID_CDP);
740 
741 	if (!vdev)
742 		return QDF_STATUS_E_FAILURE;
743 
744 	pdev = vdev->pdev;
745 
746 	if (peer_mac_addr) {
747 		peer = dp_peer_find_hash_find(soc, peer_mac_addr,
748 					      0, vdev->vdev_id,
749 					      DP_MOD_ID_CDP);
750 		if (!peer) {
751 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
752 			return QDF_STATUS_E_FAILURE;
753 		}
754 
755 		qdf_spin_lock_bh(&soc->ast_lock);
756 		dp_peer_reset_ast_entries(soc, peer, NULL);
757 		qdf_spin_unlock_bh(&soc->ast_lock);
758 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
759 	} else if (wds_macaddr) {
760 		qdf_spin_lock_bh(&soc->ast_lock);
761 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
762 							    pdev->pdev_id);
763 
764 		if (ast_entry) {
765 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
766 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
767 				dp_peer_del_ast(soc, ast_entry);
768 		}
769 		qdf_spin_unlock_bh(&soc->ast_lock);
770 	}
771 
772 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
773 	return QDF_STATUS_SUCCESS;
774 }
775 
776 /*
777  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
778  * @soc:		Datapath SOC handle
779  * @vdev_id:		id of vdev object
780  *
781  * Return: QDF_STATUS
782  */
783 static QDF_STATUS
784 dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
785 			     uint8_t vdev_id)
786 {
787 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
788 
789 	qdf_spin_lock_bh(&soc->ast_lock);
790 
791 	dp_soc_iterate_peer(soc, dp_peer_reset_ast_entries, NULL,
792 			    DP_MOD_ID_CDP);
793 	qdf_spin_unlock_bh(&soc->ast_lock);
794 
795 	return QDF_STATUS_SUCCESS;
796 }
797 
798 /*
799  * dp_peer_flush_ast_entries() - Delete all wds and hmwds ast entries of a peer
800  * @soc:		Datapath SOC
801  * @peer:		Datapath peer
802  * @arg:		arg to callback
803  *
804  * Return: None
805  */
806 static void
807 dp_peer_flush_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
808 {
809 	struct dp_ast_entry *ase = NULL;
810 	struct dp_ast_entry *temp_ase;
811 
812 	DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
813 		if ((ase->type ==
814 			CDP_TXRX_AST_TYPE_STATIC) ||
815 			(ase->type ==
816 			 CDP_TXRX_AST_TYPE_SELF) ||
817 			(ase->type ==
818 			 CDP_TXRX_AST_TYPE_STA_BSS))
819 			continue;
820 		dp_peer_del_ast(soc, ase);
821 	}
822 }
823 
824 /*
825  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
826  * @soc:		Datapath SOC handle
827  *
828  * Return: None
829  */
830 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
831 {
832 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
833 
834 	qdf_spin_lock_bh(&soc->ast_lock);
835 
836 	dp_soc_iterate_peer(soc, dp_peer_flush_ast_entries, NULL,
837 			    DP_MOD_ID_CDP);
838 
839 	qdf_spin_unlock_bh(&soc->ast_lock);
840 }
841 
842 /**
843  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
844  *                                       and return ast entry information
845  *                                       of first ast entry found in the
846  *                                       table with given mac address
847  *
848  * @soc : data path soc handle
849  * @ast_mac_addr : AST entry mac address
850  * @ast_entry_info : ast entry information
851  *
852  * return : true if ast entry found with ast_mac_addr
853  *          false if ast entry not found
854  */
855 static bool dp_peer_get_ast_info_by_soc_wifi3
856 	(struct cdp_soc_t *soc_hdl,
857 	 uint8_t *ast_mac_addr,
858 	 struct cdp_ast_entry_info *ast_entry_info)
859 {
860 	struct dp_ast_entry *ast_entry = NULL;
861 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
862 	struct dp_peer *peer = NULL;
863 
864 	qdf_spin_lock_bh(&soc->ast_lock);
865 
866 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
867 	if ((!ast_entry) ||
868 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
869 		qdf_spin_unlock_bh(&soc->ast_lock);
870 		return false;
871 	}
872 
873 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
874 				     DP_MOD_ID_AST);
875 	if (!peer) {
876 		qdf_spin_unlock_bh(&soc->ast_lock);
877 		return false;
878 	}
879 
880 	ast_entry_info->type = ast_entry->type;
881 	ast_entry_info->pdev_id = ast_entry->pdev_id;
882 	ast_entry_info->vdev_id = ast_entry->vdev_id;
883 	ast_entry_info->peer_id = ast_entry->peer_id;
884 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
885 		     &peer->mac_addr.raw[0],
886 		     QDF_MAC_ADDR_SIZE);
887 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
888 	qdf_spin_unlock_bh(&soc->ast_lock);
889 	return true;
890 }
891 
892 /**
893  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
894  *                                          and return ast entry information
895  *                                          if mac address and pdev_id matches
896  *
897  * @soc : data path soc handle
898  * @ast_mac_addr : AST entry mac address
899  * @pdev_id : pdev_id
900  * @ast_entry_info : ast entry information
901  *
902  * return : true if ast entry found with ast_mac_addr
903  *          false if ast entry not found
904  */
905 static bool dp_peer_get_ast_info_by_pdevid_wifi3
906 		(struct cdp_soc_t *soc_hdl,
907 		 uint8_t *ast_mac_addr,
908 		 uint8_t pdev_id,
909 		 struct cdp_ast_entry_info *ast_entry_info)
910 {
911 	struct dp_ast_entry *ast_entry;
912 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
913 	struct dp_peer *peer = NULL;
914 
915 	qdf_spin_lock_bh(&soc->ast_lock);
916 
917 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr,
918 						    pdev_id);
919 
920 	if ((!ast_entry) ||
921 	    (ast_entry->delete_in_progress && !ast_entry->callback)) {
922 		qdf_spin_unlock_bh(&soc->ast_lock);
923 		return false;
924 	}
925 
926 	peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
927 				     DP_MOD_ID_AST);
928 	if (!peer) {
929 		qdf_spin_unlock_bh(&soc->ast_lock);
930 		return false;
931 	}
932 
933 	ast_entry_info->type = ast_entry->type;
934 	ast_entry_info->pdev_id = ast_entry->pdev_id;
935 	ast_entry_info->vdev_id = ast_entry->vdev_id;
936 	ast_entry_info->peer_id = ast_entry->peer_id;
937 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
938 		     &peer->mac_addr.raw[0],
939 		     QDF_MAC_ADDR_SIZE);
940 	dp_peer_unref_delete(peer, DP_MOD_ID_AST);
941 	qdf_spin_unlock_bh(&soc->ast_lock);
942 	return true;
943 }
944 
945 /**
946  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
947  *                            with given mac address
948  *
949  * @soc : data path soc handle
950  * @ast_mac_addr : AST entry mac address
951  * @callback : callback function to called on ast delete response from FW
952  * @cookie : argument to be passed to callback
953  *
954  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
955  *          is sent
956  *          QDF_STATUS_E_INVAL false if ast entry not found
957  */
958 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
959 					       uint8_t *mac_addr,
960 					       txrx_ast_free_cb callback,
961 					       void *cookie)
962 
963 {
964 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
965 	struct dp_ast_entry *ast_entry = NULL;
966 	txrx_ast_free_cb cb = NULL;
967 	void *arg = NULL;
968 
969 	qdf_spin_lock_bh(&soc->ast_lock);
970 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
971 	if (!ast_entry) {
972 		qdf_spin_unlock_bh(&soc->ast_lock);
973 		return -QDF_STATUS_E_INVAL;
974 	}
975 
976 	if (ast_entry->callback) {
977 		cb = ast_entry->callback;
978 		arg = ast_entry->cookie;
979 	}
980 
981 	ast_entry->callback = callback;
982 	ast_entry->cookie = cookie;
983 
984 	/*
985 	 * if delete_in_progress is set AST delete is sent to target
986 	 * and host is waiting for response should not send delete
987 	 * again
988 	 */
989 	if (!ast_entry->delete_in_progress)
990 		dp_peer_del_ast(soc, ast_entry);
991 
992 	qdf_spin_unlock_bh(&soc->ast_lock);
993 	if (cb) {
994 		cb(soc->ctrl_psoc,
995 		   dp_soc_to_cdp_soc(soc),
996 		   arg,
997 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
998 	}
999 	return QDF_STATUS_SUCCESS;
1000 }
1001 
1002 /**
1003  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
1004  *                                   table if mac address and pdev_id matches
1005  *
1006  * @soc : data path soc handle
1007  * @ast_mac_addr : AST entry mac address
1008  * @pdev_id : pdev id
1009  * @callback : callback function to called on ast delete response from FW
1010  * @cookie : argument to be passed to callback
1011  *
1012  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
1013  *          is sent
1014  *          QDF_STATUS_E_INVAL false if ast entry not found
1015  */
1016 
1017 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
1018 						uint8_t *mac_addr,
1019 						uint8_t pdev_id,
1020 						txrx_ast_free_cb callback,
1021 						void *cookie)
1022 
1023 {
1024 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1025 	struct dp_ast_entry *ast_entry;
1026 	txrx_ast_free_cb cb = NULL;
1027 	void *arg = NULL;
1028 
1029 	qdf_spin_lock_bh(&soc->ast_lock);
1030 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
1031 
1032 	if (!ast_entry) {
1033 		qdf_spin_unlock_bh(&soc->ast_lock);
1034 		return -QDF_STATUS_E_INVAL;
1035 	}
1036 
1037 	if (ast_entry->callback) {
1038 		cb = ast_entry->callback;
1039 		arg = ast_entry->cookie;
1040 	}
1041 
1042 	ast_entry->callback = callback;
1043 	ast_entry->cookie = cookie;
1044 
1045 	/*
1046 	 * if delete_in_progress is set AST delete is sent to target
1047 	 * and host is waiting for response should not sent delete
1048 	 * again
1049 	 */
1050 	if (!ast_entry->delete_in_progress)
1051 		dp_peer_del_ast(soc, ast_entry);
1052 
1053 	qdf_spin_unlock_bh(&soc->ast_lock);
1054 
1055 	if (cb) {
1056 		cb(soc->ctrl_psoc,
1057 		   dp_soc_to_cdp_soc(soc),
1058 		   arg,
1059 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
1060 	}
1061 	return QDF_STATUS_SUCCESS;
1062 }
1063 
1064 /**
1065  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
1066  * @ring_num: ring num of the ring being queried
1067  * @grp_mask: the grp_mask array for the ring type in question.
1068  *
1069  * The grp_mask array is indexed by group number and the bit fields correspond
1070  * to ring numbers.  We are finding which interrupt group a ring belongs to.
1071  *
1072  * Return: the index in the grp_mask array with the ring number.
1073  * -QDF_STATUS_E_NOENT if no entry is found
1074  */
1075 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
1076 {
1077 	int ext_group_num;
1078 	int mask = 1 << ring_num;
1079 
1080 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
1081 	     ext_group_num++) {
1082 		if (mask & grp_mask[ext_group_num])
1083 			return ext_group_num;
1084 	}
1085 
1086 	return -QDF_STATUS_E_NOENT;
1087 }
1088 
1089 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
1090 				       enum hal_ring_type ring_type,
1091 				       int ring_num)
1092 {
1093 	int *grp_mask;
1094 
1095 	switch (ring_type) {
1096 	case WBM2SW_RELEASE:
1097 		/* dp_tx_comp_handler - soc->tx_comp_ring */
1098 		if (ring_num < 3)
1099 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
1100 
1101 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
1102 		else if (ring_num == 3) {
1103 			/* sw treats this as a separate ring type */
1104 			grp_mask = &soc->wlan_cfg_ctx->
1105 				int_rx_wbm_rel_ring_mask[0];
1106 			ring_num = 0;
1107 		} else {
1108 			qdf_assert(0);
1109 			return -QDF_STATUS_E_NOENT;
1110 		}
1111 	break;
1112 
1113 	case REO_EXCEPTION:
1114 		/* dp_rx_err_process - &soc->reo_exception_ring */
1115 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
1116 	break;
1117 
1118 	case REO_DST:
1119 		/* dp_rx_process - soc->reo_dest_ring */
1120 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1121 	break;
1122 
1123 	case REO_STATUS:
1124 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
1125 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
1126 	break;
1127 
1128 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
1129 	case RXDMA_MONITOR_STATUS:
1130 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
1131 	case RXDMA_MONITOR_DST:
1132 		/* dp_mon_process */
1133 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
1134 	break;
1135 	case RXDMA_DST:
1136 		/* dp_rxdma_err_process */
1137 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
1138 	break;
1139 
1140 	case RXDMA_BUF:
1141 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1142 	break;
1143 
1144 	case RXDMA_MONITOR_BUF:
1145 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
1146 	break;
1147 
1148 	case TCL_DATA:
1149 	/* CMD_CREDIT_RING is used as command in 8074 and credit in 9000 */
1150 	case TCL_CMD_CREDIT:
1151 	case REO_CMD:
1152 	case SW2WBM_RELEASE:
1153 	case WBM_IDLE_LINK:
1154 		/* normally empty SW_TO_HW rings */
1155 		return -QDF_STATUS_E_NOENT;
1156 	break;
1157 
1158 	case TCL_STATUS:
1159 	case REO_REINJECT:
1160 		/* misc unused rings */
1161 		return -QDF_STATUS_E_NOENT;
1162 	break;
1163 
1164 	case CE_SRC:
1165 	case CE_DST:
1166 	case CE_DST_STATUS:
1167 		/* CE_rings - currently handled by hif */
1168 	default:
1169 		return -QDF_STATUS_E_NOENT;
1170 	break;
1171 	}
1172 
1173 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
1174 }
1175 
1176 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
1177 			      *ring_params, int ring_type, int ring_num)
1178 {
1179 	int msi_group_number;
1180 	int msi_data_count;
1181 	int ret;
1182 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1183 
1184 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1185 					    &msi_data_count, &msi_data_start,
1186 					    &msi_irq_start);
1187 
1188 	if (ret)
1189 		return;
1190 
1191 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
1192 						       ring_num);
1193 	if (msi_group_number < 0) {
1194 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
1195 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
1196 			ring_type, ring_num);
1197 		ring_params->msi_addr = 0;
1198 		ring_params->msi_data = 0;
1199 		return;
1200 	}
1201 
1202 	if (msi_group_number > msi_data_count) {
1203 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
1204 			FL("2 msi_groups will share an msi; msi_group_num %d"),
1205 			msi_group_number);
1206 
1207 		QDF_ASSERT(0);
1208 	}
1209 
1210 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1211 
1212 	ring_params->msi_addr = addr_low;
1213 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1214 	ring_params->msi_data = (msi_group_number % msi_data_count)
1215 		+ msi_data_start;
1216 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1217 }
1218 
1219 #ifdef FEATURE_AST
1220 /**
1221  * dp_print_peer_ast_entries() - Dump AST entries of peer
1222  * @soc: Datapath soc handle
1223  * @peer: Datapath peer
1224  * @arg: argument to iterate function
1225  *
1226  * return void
1227  */
1228 static void
1229 dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1230 {
1231 	struct dp_ast_entry *ase, *tmp_ase;
1232 	uint32_t num_entries = 0;
1233 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1234 			"NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS",
1235 			"DA", "HMWDS_SEC"};
1236 
1237 	DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1238 	    DP_PRINT_STATS("%6d mac_addr = %pM"
1239 		    " peer_mac_addr = %pM"
1240 		    " peer_id = %u"
1241 		    " type = %s"
1242 		    " next_hop = %d"
1243 		    " is_active = %d"
1244 		    " ast_idx = %d"
1245 		    " ast_hash = %d"
1246 		    " delete_in_progress = %d"
1247 		    " pdev_id = %d"
1248 		    " vdev_id = %d",
1249 		    ++num_entries,
1250 		    ase->mac_addr.raw,
1251 		    peer->mac_addr.raw,
1252 		    ase->peer_id,
1253 		    type[ase->type],
1254 		    ase->next_hop,
1255 		    ase->is_active,
1256 		    ase->ast_idx,
1257 		    ase->ast_hash_value,
1258 		    ase->delete_in_progress,
1259 		    ase->pdev_id,
1260 		    ase->vdev_id);
1261 	}
1262 }
1263 
1264 /**
1265  * dp_print_ast_stats() - Dump AST table contents
1266  * @soc: Datapath soc handle
1267  *
1268  * return void
1269  */
1270 void dp_print_ast_stats(struct dp_soc *soc)
1271 {
1272 	DP_PRINT_STATS("AST Stats:");
1273 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1274 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1275 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1276 	DP_PRINT_STATS("	Entries MAP ERR  = %d", soc->stats.ast.map_err);
1277 	DP_PRINT_STATS("	Entries Mismatch ERR  = %d",
1278 		       soc->stats.ast.ast_mismatch);
1279 
1280 	DP_PRINT_STATS("AST Table:");
1281 
1282 	qdf_spin_lock_bh(&soc->ast_lock);
1283 
1284 	dp_soc_iterate_peer(soc, dp_print_peer_ast_entries, NULL,
1285 			    DP_MOD_ID_GENERIC_STATS);
1286 
1287 	qdf_spin_unlock_bh(&soc->ast_lock);
1288 }
1289 #else
1290 void dp_print_ast_stats(struct dp_soc *soc)
1291 {
1292 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1293 	return;
1294 }
1295 #endif
1296 
1297 /**
1298  * dp_print_peer_info() - Dump peer info
1299  * @soc: Datapath soc handle
1300  * @peer: Datapath peer handle
1301  * @arg: argument to iter function
1302  *
1303  * return void
1304  */
1305 static void
1306 dp_print_peer_info(struct dp_soc *soc, struct dp_peer *peer, void *arg)
1307 {
1308 	DP_PRINT_STATS("    peer_mac_addr = %pM"
1309 		       " nawds_enabled = %d"
1310 		       " bss_peer = %d"
1311 		       " wds_enabled = %d"
1312 		       " tx_cap_enabled = %d"
1313 		       " rx_cap_enabled = %d"
1314 		       " peer id = %d",
1315 		       peer->mac_addr.raw,
1316 		       peer->nawds_enabled,
1317 		       peer->bss_peer,
1318 		       peer->wds_enabled,
1319 		       peer->tx_cap_enabled,
1320 		       peer->rx_cap_enabled,
1321 		       peer->peer_id);
1322 }
1323 
1324 /**
1325  * dp_print_peer_table() - Dump all Peer stats
1326  * @vdev: Datapath Vdev handle
1327  *
1328  * return void
1329  */
1330 static void dp_print_peer_table(struct dp_vdev *vdev)
1331 {
1332 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1333 	dp_vdev_iterate_peer(vdev, dp_print_peer_info, NULL,
1334 			     DP_MOD_ID_GENERIC_STATS);
1335 }
1336 
1337 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
1338 /**
1339  * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt
1340  * threshold values from the wlan_srng_cfg table for each ring type
1341  * @soc: device handle
1342  * @ring_params: per ring specific parameters
1343  * @ring_type: Ring type
1344  * @ring_num: Ring number for a given ring type
1345  *
1346  * Fill the ring params with the interrupt threshold
1347  * configuration parameters available in the per ring type wlan_srng_cfg
1348  * table.
1349  *
1350  * Return: None
1351  */
1352 static void
1353 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1354 				       struct hal_srng_params *ring_params,
1355 				       int ring_type, int ring_num,
1356 				       int num_entries)
1357 {
1358 	if (ring_type == WBM2SW_RELEASE && (ring_num == 3)) {
1359 		ring_params->intr_timer_thres_us =
1360 				wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1361 		ring_params->intr_batch_cntr_thres_entries =
1362 				wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1363 	} else {
1364 		ring_params->intr_timer_thres_us =
1365 				soc->wlan_srng_cfg[ring_type].timer_threshold;
1366 		ring_params->intr_batch_cntr_thres_entries =
1367 				soc->wlan_srng_cfg[ring_type].batch_count_threshold;
1368 	}
1369 	ring_params->low_threshold =
1370 			soc->wlan_srng_cfg[ring_type].low_threshold;
1371 	if (ring_params->low_threshold)
1372 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1373 }
1374 #else
1375 static void
1376 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1377 				       struct hal_srng_params *ring_params,
1378 				       int ring_type, int ring_num,
1379 				       int num_entries)
1380 {
1381 	if (ring_type == REO_DST) {
1382 		ring_params->intr_timer_thres_us =
1383 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1384 		ring_params->intr_batch_cntr_thres_entries =
1385 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1386 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
1387 		ring_params->intr_timer_thres_us =
1388 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1389 		ring_params->intr_batch_cntr_thres_entries =
1390 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1391 	} else {
1392 		ring_params->intr_timer_thres_us =
1393 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1394 		ring_params->intr_batch_cntr_thres_entries =
1395 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1396 	}
1397 
1398 	/* Enable low threshold interrupts for rx buffer rings (regular and
1399 	 * monitor buffer rings.
1400 	 * TODO: See if this is required for any other ring
1401 	 */
1402 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1403 	    (ring_type == RXDMA_MONITOR_STATUS)) {
1404 		/* TODO: Setting low threshold to 1/8th of ring size
1405 		 * see if this needs to be configurable
1406 		 */
1407 		ring_params->low_threshold = num_entries >> 3;
1408 		ring_params->intr_timer_thres_us =
1409 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1410 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1411 		ring_params->intr_batch_cntr_thres_entries = 0;
1412 	}
1413 
1414 	/* During initialisation monitor rings are only filled with
1415 	 * MON_BUF_MIN_ENTRIES entries. So low threshold needs to be set to
1416 	 * a value less than that. Low threshold value is reconfigured again
1417 	 * to 1/8th of the ring size when monitor vap is created.
1418 	 */
1419 	if (ring_type == RXDMA_MONITOR_BUF)
1420 		ring_params->low_threshold = MON_BUF_MIN_ENTRIES >> 1;
1421 
1422 	/* In case of PCI chipsets, we dont have PPDU end interrupts,
1423 	 * so MONITOR STATUS ring is reaped by receiving MSI from srng.
1424 	 * Keep batch threshold as 8 so that interrupt is received for
1425 	 * every 4 packets in MONITOR_STATUS ring
1426 	 */
1427 	if ((ring_type == RXDMA_MONITOR_STATUS) &&
1428 	    (soc->intr_mode == DP_INTR_MSI))
1429 		ring_params->intr_batch_cntr_thres_entries = 4;
1430 }
1431 #endif
1432 
1433 /*
1434  * dp_srng_free() - Free SRNG memory
1435  * @soc  : Data path soc handle
1436  * @srng : SRNG pointer
1437  *
1438  * return: None
1439  */
1440 
1441 static void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng)
1442 {
1443 	if (srng->alloc_size && srng->base_vaddr_unaligned) {
1444 		if (!srng->cached) {
1445 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1446 						srng->alloc_size,
1447 						srng->base_vaddr_unaligned,
1448 						srng->base_paddr_unaligned, 0);
1449 		} else {
1450 			qdf_mem_free(srng->base_vaddr_unaligned);
1451 		}
1452 		srng->alloc_size = 0;
1453 		srng->base_vaddr_unaligned = NULL;
1454 	}
1455 	srng->hal_srng = NULL;
1456 }
1457 
1458 /*
1459  * dp_srng_init() - Initialize SRNG
1460  * @soc  : Data path soc handle
1461  * @srng : SRNG pointer
1462  * @ring_type : Ring Type
1463  * @ring_num: Ring number
1464  * @mac_id: mac_id
1465  *
1466  * return: QDF_STATUS
1467  */
1468 static QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
1469 			       int ring_type, int ring_num, int mac_id)
1470 {
1471 	hal_soc_handle_t hal_soc = soc->hal_soc;
1472 	struct hal_srng_params ring_params;
1473 
1474 	if (srng->hal_srng) {
1475 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1476 			  FL("Ring type: %d, num:%d is already initialized"),
1477 			  ring_type, ring_num);
1478 		return QDF_STATUS_SUCCESS;
1479 	}
1480 
1481 	/* memset the srng ring to zero */
1482 	qdf_mem_zero(srng->base_vaddr_unaligned, srng->alloc_size);
1483 
1484 	qdf_mem_zero(&ring_params, sizeof(struct hal_srng_params));
1485 	ring_params.ring_base_paddr = srng->base_paddr_aligned;
1486 	ring_params.ring_base_vaddr = srng->base_vaddr_aligned;
1487 
1488 	ring_params.num_entries = srng->num_entries;
1489 
1490 	dp_verbose_debug("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
1491 			 ring_type, ring_num,
1492 			 (void *)ring_params.ring_base_vaddr,
1493 			 (void *)ring_params.ring_base_paddr,
1494 			 ring_params.num_entries);
1495 
1496 	if (soc->intr_mode == DP_INTR_MSI) {
1497 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
1498 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
1499 				 ring_type, ring_num);
1500 
1501 	} else {
1502 		ring_params.msi_data = 0;
1503 		ring_params.msi_addr = 0;
1504 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
1505 				 ring_type, ring_num);
1506 	}
1507 
1508 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
1509 					       ring_type, ring_num,
1510 					       srng->num_entries);
1511 
1512 	if (srng->cached)
1513 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
1514 
1515 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
1516 					mac_id, &ring_params);
1517 
1518 	if (!srng->hal_srng) {
1519 		dp_srng_free(soc, srng);
1520 		return QDF_STATUS_E_FAILURE;
1521 	}
1522 
1523 	return QDF_STATUS_SUCCESS;
1524 }
1525 
1526 /*
1527  * dp_srng_alloc() - Allocate memory for SRNG
1528  * @soc  : Data path soc handle
1529  * @srng : SRNG pointer
1530  * @ring_type : Ring Type
1531  * @num_entries: Number of entries
1532  * @cached: cached flag variable
1533  *
1534  * return: QDF_STATUS
1535  */
1536 static QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
1537 				int ring_type, uint32_t num_entries,
1538 				bool cached)
1539 {
1540 	hal_soc_handle_t hal_soc = soc->hal_soc;
1541 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
1542 	uint32_t ring_base_align = 32;
1543 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
1544 
1545 	if (srng->base_vaddr_unaligned) {
1546 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1547 			  FL("Ring type: %d, is already allocated"), ring_type);
1548 		return QDF_STATUS_SUCCESS;
1549 	}
1550 
1551 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
1552 	srng->hal_srng = NULL;
1553 	srng->alloc_size = num_entries * entry_size;
1554 	srng->num_entries = num_entries;
1555 	srng->cached = cached;
1556 
1557 	if (!cached) {
1558 		srng->base_vaddr_aligned =
1559 		    qdf_aligned_mem_alloc_consistent(
1560 					soc->osdev, &srng->alloc_size,
1561 					&srng->base_vaddr_unaligned,
1562 					&srng->base_paddr_unaligned,
1563 					&srng->base_paddr_aligned,
1564 					ring_base_align);
1565 	} else {
1566 		srng->base_vaddr_aligned = qdf_aligned_malloc(
1567 					&srng->alloc_size,
1568 					&srng->base_vaddr_unaligned,
1569 					&srng->base_paddr_unaligned,
1570 					&srng->base_paddr_aligned,
1571 					ring_base_align);
1572 	}
1573 
1574 	if (!srng->base_vaddr_aligned)
1575 		return QDF_STATUS_E_NOMEM;
1576 
1577 	return QDF_STATUS_SUCCESS;
1578 }
1579 
1580 /*
1581  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
1582  * @soc: DP SOC handle
1583  * @srng: source ring structure
1584  * @ring_type: type of ring
1585  * @ring_num: ring number
1586  *
1587  * Return: None
1588  */
1589 static void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
1590 			   int ring_type, int ring_num)
1591 {
1592 	if (!srng->hal_srng) {
1593 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1594 			  FL("Ring type: %d, num:%d not setup"),
1595 			  ring_type, ring_num);
1596 		return;
1597 	}
1598 
1599 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1600 	srng->hal_srng = NULL;
1601 }
1602 
1603 /* TODO: Need this interface from HIF */
1604 void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle);
1605 
1606 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1607 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
1608 			 hal_ring_handle_t hal_ring_hdl)
1609 {
1610 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1611 	uint32_t hp, tp;
1612 	uint8_t ring_id;
1613 
1614 	if (!int_ctx)
1615 		return hal_srng_access_start(hal_soc, hal_ring_hdl);
1616 
1617 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
1618 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
1619 
1620 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
1621 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START);
1622 
1623 	return hal_srng_access_start(hal_soc, hal_ring_hdl);
1624 }
1625 
1626 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
1627 			hal_ring_handle_t hal_ring_hdl)
1628 {
1629 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1630 	uint32_t hp, tp;
1631 	uint8_t ring_id;
1632 
1633 	if (!int_ctx)
1634 		return hal_srng_access_end(hal_soc, hal_ring_hdl);
1635 
1636 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
1637 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
1638 
1639 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
1640 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END);
1641 
1642 	return hal_srng_access_end(hal_soc, hal_ring_hdl);
1643 }
1644 
1645 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
1646 					      uint8_t hist_group_id)
1647 {
1648 	hif_record_event(dp_soc->hif_handle, hist_group_id,
1649 			 0, 0, 0, HIF_EVENT_TIMER_ENTRY);
1650 }
1651 
1652 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
1653 					     uint8_t hist_group_id)
1654 {
1655 	hif_record_event(dp_soc->hif_handle, hist_group_id,
1656 			 0, 0, 0, HIF_EVENT_TIMER_EXIT);
1657 }
1658 #else
1659 
1660 static inline void dp_srng_record_timer_entry(struct dp_soc *dp_soc,
1661 					      uint8_t hist_group_id)
1662 {
1663 }
1664 
1665 static inline void dp_srng_record_timer_exit(struct dp_soc *dp_soc,
1666 					     uint8_t hist_group_id)
1667 {
1668 }
1669 
1670 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1671 
1672 /*
1673  * dp_should_timer_irq_yield() - Decide if the bottom half should yield
1674  * @soc: DP soc handle
1675  * @work_done: work done in softirq context
1676  * @start_time: start time for the softirq
1677  *
1678  * Return: enum with yield code
1679  */
1680 static enum timer_yield_status
1681 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
1682 			  uint64_t start_time)
1683 {
1684 	uint64_t cur_time = qdf_get_log_timestamp();
1685 
1686 	if (!work_done)
1687 		return DP_TIMER_WORK_DONE;
1688 
1689 	if (cur_time - start_time > DP_MAX_TIMER_EXEC_TIME_TICKS)
1690 		return DP_TIMER_TIME_EXHAUST;
1691 
1692 	return DP_TIMER_NO_YIELD;
1693 }
1694 
1695 /**
1696  * dp_process_lmac_rings() - Process LMAC rings
1697  * @int_ctx: interrupt context
1698  * @total_budget: budget of work which can be done
1699  *
1700  * Return: work done
1701  */
1702 static int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget)
1703 {
1704 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
1705 	struct dp_soc *soc = int_ctx->soc;
1706 	uint32_t remaining_quota = total_budget;
1707 	struct dp_pdev *pdev = NULL;
1708 	uint32_t work_done  = 0;
1709 	int budget = total_budget;
1710 	int ring = 0;
1711 
1712 	/* Process LMAC interrupts */
1713 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
1714 		int mac_for_pdev = ring;
1715 
1716 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
1717 		if (!pdev)
1718 			continue;
1719 		if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1720 			work_done = dp_mon_process(soc, int_ctx, mac_for_pdev,
1721 						   remaining_quota);
1722 			if (work_done)
1723 				intr_stats->num_rx_mon_ring_masks++;
1724 			budget -= work_done;
1725 			if (budget <= 0)
1726 				goto budget_done;
1727 			remaining_quota = budget;
1728 		}
1729 
1730 		if (int_ctx->rxdma2host_ring_mask &
1731 				(1 << mac_for_pdev)) {
1732 			work_done = dp_rxdma_err_process(int_ctx, soc,
1733 							 mac_for_pdev,
1734 							 remaining_quota);
1735 			if (work_done)
1736 				intr_stats->num_rxdma2host_ring_masks++;
1737 			budget -=  work_done;
1738 			if (budget <= 0)
1739 				goto budget_done;
1740 			remaining_quota = budget;
1741 		}
1742 
1743 		if (int_ctx->host2rxdma_ring_mask &
1744 					(1 << mac_for_pdev)) {
1745 			union dp_rx_desc_list_elem_t *desc_list = NULL;
1746 			union dp_rx_desc_list_elem_t *tail = NULL;
1747 			struct dp_srng *rx_refill_buf_ring;
1748 
1749 			if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
1750 				rx_refill_buf_ring =
1751 					&soc->rx_refill_buf_ring[mac_for_pdev];
1752 			else
1753 				rx_refill_buf_ring =
1754 					&soc->rx_refill_buf_ring[pdev->lmac_id];
1755 
1756 			intr_stats->num_host2rxdma_ring_masks++;
1757 			DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1758 				     1);
1759 			dp_rx_buffers_replenish(soc, mac_for_pdev,
1760 						rx_refill_buf_ring,
1761 						&soc->rx_desc_buf[mac_for_pdev],
1762 						0, &desc_list, &tail);
1763 		}
1764 	}
1765 
1766 budget_done:
1767 	return total_budget - budget;
1768 }
1769 
1770 /*
1771  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
1772  * @dp_ctx: DP SOC handle
1773  * @budget: Number of frames/descriptors that can be processed in one shot
1774  *
1775  * Return: remaining budget/quota for the soc device
1776  */
1777 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
1778 {
1779 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1780 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
1781 	struct dp_soc *soc = int_ctx->soc;
1782 	int ring = 0;
1783 	uint32_t work_done  = 0;
1784 	int budget = dp_budget;
1785 	uint8_t tx_mask = int_ctx->tx_ring_mask;
1786 	uint8_t rx_mask = int_ctx->rx_ring_mask;
1787 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1788 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
1789 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
1790 	uint32_t remaining_quota = dp_budget;
1791 
1792 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n",
1793 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
1794 			 reo_status_mask,
1795 			 int_ctx->rx_mon_ring_mask,
1796 			 int_ctx->host2rxdma_ring_mask,
1797 			 int_ctx->rxdma2host_ring_mask);
1798 
1799 	/* Process Tx completion interrupts first to return back buffers */
1800 	while (tx_mask) {
1801 		if (tx_mask & 0x1) {
1802 			work_done = dp_tx_comp_handler(int_ctx,
1803 						       soc,
1804 						       soc->tx_comp_ring[ring].hal_srng,
1805 						       ring, remaining_quota);
1806 
1807 			if (work_done) {
1808 				intr_stats->num_tx_ring_masks[ring]++;
1809 				dp_verbose_debug("tx mask 0x%x ring %d, budget %d, work_done %d",
1810 						 tx_mask, ring, budget,
1811 						 work_done);
1812 			}
1813 
1814 			budget -= work_done;
1815 			if (budget <= 0)
1816 				goto budget_done;
1817 
1818 			remaining_quota = budget;
1819 		}
1820 		tx_mask = tx_mask >> 1;
1821 		ring++;
1822 	}
1823 
1824 	/* Process REO Exception ring interrupt */
1825 	if (rx_err_mask) {
1826 		work_done = dp_rx_err_process(int_ctx, soc,
1827 					      soc->reo_exception_ring.hal_srng,
1828 					      remaining_quota);
1829 
1830 		if (work_done) {
1831 			intr_stats->num_rx_err_ring_masks++;
1832 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
1833 					 work_done, budget);
1834 		}
1835 
1836 		budget -=  work_done;
1837 		if (budget <= 0) {
1838 			goto budget_done;
1839 		}
1840 		remaining_quota = budget;
1841 	}
1842 
1843 	/* Process Rx WBM release ring interrupt */
1844 	if (rx_wbm_rel_mask) {
1845 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
1846 						  soc->rx_rel_ring.hal_srng,
1847 						  remaining_quota);
1848 
1849 		if (work_done) {
1850 			intr_stats->num_rx_wbm_rel_ring_masks++;
1851 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
1852 					 work_done, budget);
1853 		}
1854 
1855 		budget -=  work_done;
1856 		if (budget <= 0) {
1857 			goto budget_done;
1858 		}
1859 		remaining_quota = budget;
1860 	}
1861 
1862 	/* Process Rx interrupts */
1863 	if (rx_mask) {
1864 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1865 			if (!(rx_mask & (1 << ring)))
1866 				continue;
1867 			work_done = dp_rx_process(int_ctx,
1868 						  soc->reo_dest_ring[ring].hal_srng,
1869 						  ring,
1870 						  remaining_quota);
1871 			if (work_done) {
1872 				intr_stats->num_rx_ring_masks[ring]++;
1873 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
1874 						 rx_mask, ring,
1875 						 work_done, budget);
1876 				budget -=  work_done;
1877 				if (budget <= 0)
1878 					goto budget_done;
1879 				remaining_quota = budget;
1880 			}
1881 		}
1882 	}
1883 
1884 	if (reo_status_mask) {
1885 		if (dp_reo_status_ring_handler(int_ctx, soc))
1886 			int_ctx->intr_stats.num_reo_status_ring_masks++;
1887 	}
1888 
1889 	work_done = dp_process_lmac_rings(int_ctx, remaining_quota);
1890 	if (work_done) {
1891 		budget -=  work_done;
1892 		if (budget <= 0)
1893 			goto budget_done;
1894 		remaining_quota = budget;
1895 	}
1896 
1897 	qdf_lro_flush(int_ctx->lro_ctx);
1898 	intr_stats->num_masks++;
1899 
1900 budget_done:
1901 	return dp_budget - budget;
1902 }
1903 
1904 /* dp_interrupt_timer()- timer poll for interrupts
1905  *
1906  * @arg: SoC Handle
1907  *
1908  * Return:
1909  *
1910  */
1911 static void dp_interrupt_timer(void *arg)
1912 {
1913 	struct dp_soc *soc = (struct dp_soc *) arg;
1914 	struct dp_pdev *pdev = soc->pdev_list[0];
1915 	enum timer_yield_status yield = DP_TIMER_NO_YIELD;
1916 	uint32_t work_done  = 0, total_work_done = 0;
1917 	int budget = 0xffff;
1918 	uint32_t remaining_quota = budget;
1919 	uint64_t start_time;
1920 	uint32_t lmac_id;
1921 	uint8_t dp_intr_id;
1922 
1923 	if (!qdf_atomic_read(&soc->cmn_init_done))
1924 		return;
1925 
1926 	if (pdev->mon_chan_band == REG_BAND_UNKNOWN) {
1927 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1928 		return;
1929 	}
1930 
1931 	lmac_id = pdev->ch_band_lmac_id_mapping[pdev->mon_chan_band];
1932 	if (qdf_unlikely(lmac_id == DP_MON_INVALID_LMAC_ID)) {
1933 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1934 		return;
1935 	}
1936 
1937 	dp_intr_id = soc->mon_intr_id_lmac_map[lmac_id];
1938 	dp_srng_record_timer_entry(soc, dp_intr_id);
1939 	start_time = qdf_get_log_timestamp();
1940 
1941 	while (yield == DP_TIMER_NO_YIELD) {
1942 		work_done = dp_mon_process(soc, &soc->intr_ctx[dp_intr_id],
1943 					   lmac_id, remaining_quota);
1944 		if (work_done) {
1945 			budget -=  work_done;
1946 			if (budget <= 0) {
1947 				yield = DP_TIMER_WORK_EXHAUST;
1948 				goto budget_done;
1949 			}
1950 			remaining_quota = budget;
1951 			total_work_done += work_done;
1952 		}
1953 
1954 		yield = dp_should_timer_irq_yield(soc, total_work_done,
1955 						  start_time);
1956 		total_work_done = 0;
1957 	}
1958 
1959 budget_done:
1960 	if (yield == DP_TIMER_WORK_EXHAUST ||
1961 	    yield == DP_TIMER_TIME_EXHAUST)
1962 		qdf_timer_mod(&soc->int_timer, 1);
1963 	else
1964 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1965 
1966 	dp_srng_record_timer_exit(soc, dp_intr_id);
1967 }
1968 
1969 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1970 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
1971 					struct dp_intr *intr_ctx)
1972 {
1973 	if (intr_ctx->rx_mon_ring_mask)
1974 		return true;
1975 
1976 	return false;
1977 }
1978 #else
1979 static inline bool dp_is_mon_mask_valid(struct dp_soc *soc,
1980 					struct dp_intr *intr_ctx)
1981 {
1982 	return false;
1983 }
1984 #endif
1985 
1986 /*
1987  * dp_soc_attach_poll() - Register handlers for DP interrupts
1988  * @txrx_soc: DP SOC handle
1989  *
1990  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1991  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1992  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1993  *
1994  * Return: 0 for success, nonzero for failure.
1995  */
1996 static QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
1997 {
1998 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1999 	int i;
2000 	int lmac_id = 0;
2001 
2002 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
2003 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
2004 	soc->intr_mode = DP_INTR_POLL;
2005 
2006 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
2007 		soc->intr_ctx[i].dp_intr_id = i;
2008 		soc->intr_ctx[i].tx_ring_mask =
2009 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
2010 		soc->intr_ctx[i].rx_ring_mask =
2011 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
2012 		soc->intr_ctx[i].rx_mon_ring_mask =
2013 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
2014 		soc->intr_ctx[i].rx_err_ring_mask =
2015 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
2016 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
2017 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
2018 		soc->intr_ctx[i].reo_status_ring_mask =
2019 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
2020 		soc->intr_ctx[i].rxdma2host_ring_mask =
2021 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
2022 		soc->intr_ctx[i].soc = soc;
2023 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
2024 
2025 		if (dp_is_mon_mask_valid(soc, &soc->intr_ctx[i])) {
2026 			hif_event_history_init(soc->hif_handle, i);
2027 			soc->mon_intr_id_lmac_map[lmac_id] = i;
2028 			lmac_id++;
2029 		}
2030 	}
2031 
2032 	qdf_timer_init(soc->osdev, &soc->int_timer,
2033 			dp_interrupt_timer, (void *)soc,
2034 			QDF_TIMER_TYPE_WAKE_APPS);
2035 
2036 	return QDF_STATUS_SUCCESS;
2037 }
2038 
2039 /**
2040  * dp_soc_set_interrupt_mode() - Set the interrupt mode in soc
2041  * soc: DP soc handle
2042  *
2043  * Set the appropriate interrupt mode flag in the soc
2044  */
2045 static void dp_soc_set_interrupt_mode(struct dp_soc *soc)
2046 {
2047 	uint32_t msi_base_data, msi_vector_start;
2048 	int msi_vector_count, ret;
2049 
2050 	soc->intr_mode = DP_INTR_INTEGRATED;
2051 
2052 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
2053 	    (soc->cdp_soc.ol_ops->get_con_mode &&
2054 	     soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) {
2055 		soc->intr_mode = DP_INTR_POLL;
2056 	} else {
2057 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
2058 						  &msi_vector_count,
2059 						  &msi_base_data,
2060 						  &msi_vector_start);
2061 		if (ret)
2062 			return;
2063 
2064 		soc->intr_mode = DP_INTR_MSI;
2065 	}
2066 }
2067 
2068 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc);
2069 #if defined(DP_INTR_POLL_BOTH)
2070 /*
2071  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
2072  * @txrx_soc: DP SOC handle
2073  *
2074  * Call the appropriate attach function based on the mode of operation.
2075  * This is a WAR for enabling monitor mode.
2076  *
2077  * Return: 0 for success. nonzero for failure.
2078  */
2079 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2080 {
2081 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2082 
2083 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
2084 	    (soc->cdp_soc.ol_ops->get_con_mode &&
2085 	     soc->cdp_soc.ol_ops->get_con_mode() ==
2086 	     QDF_GLOBAL_MONITOR_MODE)) {
2087 		dp_info("Poll mode");
2088 		return dp_soc_attach_poll(txrx_soc);
2089 	} else {
2090 		dp_info("Interrupt  mode");
2091 		return dp_soc_interrupt_attach(txrx_soc);
2092 	}
2093 }
2094 #else
2095 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
2096 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2097 {
2098 	return dp_soc_attach_poll(txrx_soc);
2099 }
2100 #else
2101 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
2102 {
2103 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2104 
2105 	if (hif_is_polled_mode_enabled(soc->hif_handle))
2106 		return dp_soc_attach_poll(txrx_soc);
2107 	else
2108 		return dp_soc_interrupt_attach(txrx_soc);
2109 }
2110 #endif
2111 #endif
2112 
2113 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
2114 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
2115 {
2116 	int j;
2117 	int num_irq = 0;
2118 
2119 	int tx_mask =
2120 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2121 	int rx_mask =
2122 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2123 	int rx_mon_mask =
2124 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
2125 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
2126 					soc->wlan_cfg_ctx, intr_ctx_num);
2127 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
2128 					soc->wlan_cfg_ctx, intr_ctx_num);
2129 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
2130 					soc->wlan_cfg_ctx, intr_ctx_num);
2131 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
2132 					soc->wlan_cfg_ctx, intr_ctx_num);
2133 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
2134 					soc->wlan_cfg_ctx, intr_ctx_num);
2135 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
2136 					soc->wlan_cfg_ctx, intr_ctx_num);
2137 
2138 	soc->intr_mode = DP_INTR_INTEGRATED;
2139 
2140 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
2141 
2142 		if (tx_mask & (1 << j)) {
2143 			irq_id_map[num_irq++] =
2144 				(wbm2host_tx_completions_ring1 - j);
2145 		}
2146 
2147 		if (rx_mask & (1 << j)) {
2148 			irq_id_map[num_irq++] =
2149 				(reo2host_destination_ring1 - j);
2150 		}
2151 
2152 		if (rxdma2host_ring_mask & (1 << j)) {
2153 			irq_id_map[num_irq++] =
2154 				rxdma2host_destination_ring_mac1 - j;
2155 		}
2156 
2157 		if (host2rxdma_ring_mask & (1 << j)) {
2158 			irq_id_map[num_irq++] =
2159 				host2rxdma_host_buf_ring_mac1 -	j;
2160 		}
2161 
2162 		if (host2rxdma_mon_ring_mask & (1 << j)) {
2163 			irq_id_map[num_irq++] =
2164 				host2rxdma_monitor_ring1 - j;
2165 		}
2166 
2167 		if (rx_mon_mask & (1 << j)) {
2168 			irq_id_map[num_irq++] =
2169 				ppdu_end_interrupts_mac1 - j;
2170 			irq_id_map[num_irq++] =
2171 				rxdma2host_monitor_status_ring_mac1 - j;
2172 			irq_id_map[num_irq++] =
2173 				rxdma2host_monitor_destination_mac1 - j;
2174 		}
2175 
2176 		if (rx_wbm_rel_ring_mask & (1 << j))
2177 			irq_id_map[num_irq++] = wbm2host_rx_release;
2178 
2179 		if (rx_err_ring_mask & (1 << j))
2180 			irq_id_map[num_irq++] = reo2host_exception;
2181 
2182 		if (reo_status_ring_mask & (1 << j))
2183 			irq_id_map[num_irq++] = reo2host_status;
2184 
2185 	}
2186 	*num_irq_r = num_irq;
2187 }
2188 
2189 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
2190 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
2191 		int msi_vector_count, int msi_vector_start)
2192 {
2193 	int tx_mask = wlan_cfg_get_tx_ring_mask(
2194 					soc->wlan_cfg_ctx, intr_ctx_num);
2195 	int rx_mask = wlan_cfg_get_rx_ring_mask(
2196 					soc->wlan_cfg_ctx, intr_ctx_num);
2197 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
2198 					soc->wlan_cfg_ctx, intr_ctx_num);
2199 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
2200 					soc->wlan_cfg_ctx, intr_ctx_num);
2201 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
2202 					soc->wlan_cfg_ctx, intr_ctx_num);
2203 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
2204 					soc->wlan_cfg_ctx, intr_ctx_num);
2205 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
2206 					soc->wlan_cfg_ctx, intr_ctx_num);
2207 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
2208 					soc->wlan_cfg_ctx, intr_ctx_num);
2209 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
2210 					soc->wlan_cfg_ctx, intr_ctx_num);
2211 
2212 	unsigned int vector =
2213 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
2214 	int num_irq = 0;
2215 
2216 	soc->intr_mode = DP_INTR_MSI;
2217 
2218 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
2219 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask |
2220 	    host2rxdma_ring_mask | host2rxdma_mon_ring_mask)
2221 		irq_id_map[num_irq++] =
2222 			pld_get_msi_irq(soc->osdev->dev, vector);
2223 
2224 	*num_irq_r = num_irq;
2225 }
2226 
2227 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
2228 				    int *irq_id_map, int *num_irq)
2229 {
2230 	int msi_vector_count, ret;
2231 	uint32_t msi_base_data, msi_vector_start;
2232 
2233 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
2234 					    &msi_vector_count,
2235 					    &msi_base_data,
2236 					    &msi_vector_start);
2237 	if (ret)
2238 		return dp_soc_interrupt_map_calculate_integrated(soc,
2239 				intr_ctx_num, irq_id_map, num_irq);
2240 
2241 	else
2242 		dp_soc_interrupt_map_calculate_msi(soc,
2243 				intr_ctx_num, irq_id_map, num_irq,
2244 				msi_vector_count, msi_vector_start);
2245 }
2246 
2247 /*
2248  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
2249  * @txrx_soc: DP SOC handle
2250  *
2251  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
2252  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
2253  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
2254  *
2255  * Return: 0 for success. nonzero for failure.
2256  */
2257 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
2258 {
2259 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2260 
2261 	int i = 0;
2262 	int num_irq = 0;
2263 
2264 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
2265 		    sizeof(soc->mon_intr_id_lmac_map), DP_MON_INVALID_LMAC_ID);
2266 
2267 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
2268 		int ret = 0;
2269 
2270 		/* Map of IRQ ids registered with one interrupt context */
2271 		int irq_id_map[HIF_MAX_GRP_IRQ];
2272 
2273 		int tx_mask =
2274 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
2275 		int rx_mask =
2276 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
2277 		int rx_mon_mask =
2278 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
2279 		int rx_err_ring_mask =
2280 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
2281 		int rx_wbm_rel_ring_mask =
2282 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
2283 		int reo_status_ring_mask =
2284 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
2285 		int rxdma2host_ring_mask =
2286 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
2287 		int host2rxdma_ring_mask =
2288 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
2289 		int host2rxdma_mon_ring_mask =
2290 			wlan_cfg_get_host2rxdma_mon_ring_mask(
2291 				soc->wlan_cfg_ctx, i);
2292 
2293 		soc->intr_ctx[i].dp_intr_id = i;
2294 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
2295 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
2296 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
2297 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
2298 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
2299 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
2300 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
2301 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
2302 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
2303 			 host2rxdma_mon_ring_mask;
2304 
2305 		soc->intr_ctx[i].soc = soc;
2306 
2307 		num_irq = 0;
2308 
2309 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
2310 					       &num_irq);
2311 
2312 		ret = hif_register_ext_group(soc->hif_handle,
2313 				num_irq, irq_id_map, dp_service_srngs,
2314 				&soc->intr_ctx[i], "dp_intr",
2315 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
2316 
2317 		if (ret) {
2318 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2319 			FL("failed, ret = %d"), ret);
2320 
2321 			return QDF_STATUS_E_FAILURE;
2322 		}
2323 
2324 		hif_event_history_init(soc->hif_handle, i);
2325 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
2326 	}
2327 
2328 	hif_configure_ext_group_interrupts(soc->hif_handle);
2329 
2330 	return QDF_STATUS_SUCCESS;
2331 }
2332 
2333 /*
2334  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
2335  * @txrx_soc: DP SOC handle
2336  *
2337  * Return: none
2338  */
2339 static void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
2340 {
2341 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2342 	int i;
2343 
2344 	if (soc->intr_mode == DP_INTR_POLL) {
2345 		qdf_timer_free(&soc->int_timer);
2346 	} else {
2347 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
2348 	}
2349 
2350 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
2351 		soc->intr_ctx[i].tx_ring_mask = 0;
2352 		soc->intr_ctx[i].rx_ring_mask = 0;
2353 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
2354 		soc->intr_ctx[i].rx_err_ring_mask = 0;
2355 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
2356 		soc->intr_ctx[i].reo_status_ring_mask = 0;
2357 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
2358 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
2359 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
2360 
2361 		hif_event_history_deinit(soc->hif_handle, i);
2362 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
2363 	}
2364 
2365 	qdf_mem_set(&soc->mon_intr_id_lmac_map,
2366 		    REG_BAND_UNKNOWN * sizeof(int), DP_MON_INVALID_LMAC_ID);
2367 }
2368 
2369 #define AVG_MAX_MPDUS_PER_TID 128
2370 #define AVG_TIDS_PER_CLIENT 2
2371 #define AVG_FLOWS_PER_TID 2
2372 #define AVG_MSDUS_PER_FLOW 128
2373 #define AVG_MSDUS_PER_MPDU 4
2374 
2375 /*
2376  * dp_hw_link_desc_pool_banks_free() - Free h/w link desc pool banks
2377  * @soc: DP SOC handle
2378  * @mac_id: mac id
2379  *
2380  * Return: none
2381  */
2382 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id)
2383 {
2384 	struct qdf_mem_multi_page_t *pages;
2385 
2386 	if (mac_id != WLAN_INVALID_PDEV_ID)
2387 		pages = &soc->mon_link_desc_pages[mac_id];
2388 	else
2389 		pages = &soc->link_desc_pages;
2390 
2391 	if (pages->dma_pages) {
2392 		wlan_minidump_remove((void *)
2393 				     pages->dma_pages->page_v_addr_start);
2394 		qdf_mem_multi_pages_free(soc->osdev, pages, 0, false);
2395 	}
2396 }
2397 
2398 /*
2399  * dp_hw_link_desc_pool_banks_alloc() - Allocate h/w link desc pool banks
2400  * @soc: DP SOC handle
2401  * @mac_id: mac id
2402  *
2403  * Allocates memory pages for link descriptors, the page size is 4K for
2404  * MCL and 2MB for WIN. if the mac_id is invalid link descriptor pages are
2405  * allocated for regular RX/TX and if the there is a proper mac_id link
2406  * descriptors are allocated for RX monitor mode.
2407  *
2408  * Return: QDF_STATUS_SUCCESS: Success
2409  *	   QDF_STATUS_E_FAILURE: Failure
2410  */
2411 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
2412 {
2413 	hal_soc_handle_t hal_soc = soc->hal_soc;
2414 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
2415 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
2416 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
2417 	uint32_t num_mpdus_per_link_desc = hal_num_mpdus_per_link_desc(hal_soc);
2418 	uint32_t num_msdus_per_link_desc = hal_num_msdus_per_link_desc(hal_soc);
2419 	uint32_t num_mpdu_links_per_queue_desc =
2420 		hal_num_mpdu_links_per_queue_desc(hal_soc);
2421 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
2422 	uint32_t *total_link_descs, total_mem_size;
2423 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
2424 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
2425 	uint32_t num_entries;
2426 	struct qdf_mem_multi_page_t *pages;
2427 	struct dp_srng *dp_srng;
2428 	uint8_t minidump_str[MINIDUMP_STR_SIZE];
2429 
2430 	/* Only Tx queue descriptors are allocated from common link descriptor
2431 	 * pool Rx queue descriptors are not included in this because (REO queue
2432 	 * extension descriptors) they are expected to be allocated contiguously
2433 	 * with REO queue descriptors
2434 	 */
2435 	if (mac_id != WLAN_INVALID_PDEV_ID) {
2436 		pages = &soc->mon_link_desc_pages[mac_id];
2437 		dp_srng = &soc->rxdma_mon_desc_ring[mac_id];
2438 		num_entries = dp_srng->alloc_size /
2439 			hal_srng_get_entrysize(soc->hal_soc,
2440 					       RXDMA_MONITOR_DESC);
2441 		total_link_descs = &soc->total_mon_link_descs[mac_id];
2442 		qdf_str_lcopy(minidump_str, "mon_link_desc_bank",
2443 			      MINIDUMP_STR_SIZE);
2444 	} else {
2445 		num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2446 			AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
2447 
2448 		num_mpdu_queue_descs = num_mpdu_link_descs /
2449 			num_mpdu_links_per_queue_desc;
2450 
2451 		num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2452 			AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
2453 			num_msdus_per_link_desc;
2454 
2455 		num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2456 			AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
2457 
2458 		num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
2459 			num_tx_msdu_link_descs + num_rx_msdu_link_descs;
2460 
2461 		pages = &soc->link_desc_pages;
2462 		total_link_descs = &soc->total_link_descs;
2463 		qdf_str_lcopy(minidump_str, "link_desc_bank",
2464 			      MINIDUMP_STR_SIZE);
2465 	}
2466 
2467 	/* Round up to power of 2 */
2468 	*total_link_descs = 1;
2469 	while (*total_link_descs < num_entries)
2470 		*total_link_descs <<= 1;
2471 
2472 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2473 		  FL("total_link_descs: %u, link_desc_size: %d"),
2474 		  *total_link_descs, link_desc_size);
2475 	total_mem_size =  *total_link_descs * link_desc_size;
2476 	total_mem_size += link_desc_align;
2477 
2478 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2479 		  FL("total_mem_size: %d"), total_mem_size);
2480 
2481 	dp_set_max_page_size(pages, max_alloc_size);
2482 	qdf_mem_multi_pages_alloc(soc->osdev,
2483 				  pages,
2484 				  link_desc_size,
2485 				  *total_link_descs,
2486 				  0, false);
2487 	if (!pages->num_pages) {
2488 		dp_err("Multi page alloc fail for hw link desc pool");
2489 		return QDF_STATUS_E_FAULT;
2490 	}
2491 
2492 	wlan_minidump_log(pages->dma_pages->page_v_addr_start,
2493 			  pages->num_pages * pages->page_size,
2494 			  soc->ctrl_psoc,
2495 			  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
2496 			  "hw_link_desc_bank");
2497 
2498 	return QDF_STATUS_SUCCESS;
2499 }
2500 
2501 /*
2502  * dp_hw_link_desc_ring_free() - Free h/w link desc rings
2503  * @soc: DP SOC handle
2504  *
2505  * Return: none
2506  */
2507 static void dp_hw_link_desc_ring_free(struct dp_soc *soc)
2508 {
2509 	uint32_t i;
2510 	uint32_t size = soc->wbm_idle_scatter_buf_size;
2511 	void *vaddr = soc->wbm_idle_link_ring.base_vaddr_unaligned;
2512 	qdf_dma_addr_t paddr;
2513 
2514 	if (soc->wbm_idle_scatter_buf_base_vaddr[0]) {
2515 		for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2516 			vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
2517 			paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
2518 			if (vaddr) {
2519 				qdf_mem_free_consistent(soc->osdev,
2520 							soc->osdev->dev,
2521 							size,
2522 							vaddr,
2523 							paddr,
2524 							0);
2525 				vaddr = NULL;
2526 			}
2527 		}
2528 	} else {
2529 		wlan_minidump_remove(vaddr);
2530 		dp_srng_free(soc, &soc->wbm_idle_link_ring);
2531 	}
2532 }
2533 
2534 /*
2535  * dp_hw_link_desc_ring_alloc() - Allocate hw link desc rings
2536  * @soc: DP SOC handle
2537  *
2538  * Allocate memory for WBM_IDLE_LINK srng ring if the number of
2539  * link descriptors is less then the max_allocated size. else
2540  * allocate memory for wbm_idle_scatter_buffer.
2541  *
2542  * Return: QDF_STATUS_SUCCESS: success
2543  *         QDF_STATUS_E_NO_MEM: No memory (Failure)
2544  */
2545 static QDF_STATUS dp_hw_link_desc_ring_alloc(struct dp_soc *soc)
2546 {
2547 	uint32_t entry_size, i;
2548 	uint32_t total_mem_size;
2549 	qdf_dma_addr_t *baseaddr = NULL;
2550 	struct dp_srng *dp_srng;
2551 	uint32_t ring_type;
2552 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
2553 	uint32_t tlds;
2554 
2555 	ring_type = WBM_IDLE_LINK;
2556 	dp_srng = &soc->wbm_idle_link_ring;
2557 	tlds = soc->total_link_descs;
2558 
2559 	entry_size = hal_srng_get_entrysize(soc->hal_soc, ring_type);
2560 	total_mem_size = entry_size * tlds;
2561 
2562 	if (total_mem_size <= max_alloc_size) {
2563 		if (dp_srng_alloc(soc, dp_srng, ring_type, tlds, 0)) {
2564 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2565 				  FL("Link desc idle ring setup failed"));
2566 			goto fail;
2567 		}
2568 
2569 		wlan_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
2570 				  soc->wbm_idle_link_ring.alloc_size,
2571 				  soc->ctrl_psoc,
2572 				  WLAN_MD_DP_SRNG_WBM_IDLE_LINK,
2573 				  "wbm_idle_link_ring");
2574 	} else {
2575 		uint32_t num_scatter_bufs;
2576 		uint32_t num_entries_per_buf;
2577 		uint32_t buf_size = 0;
2578 
2579 		soc->wbm_idle_scatter_buf_size =
2580 			hal_idle_list_scatter_buf_size(soc->hal_soc);
2581 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
2582 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
2583 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
2584 					soc->hal_soc, total_mem_size,
2585 					soc->wbm_idle_scatter_buf_size);
2586 
2587 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
2588 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2589 				  FL("scatter bufs size out of bounds"));
2590 			goto fail;
2591 		}
2592 
2593 		for (i = 0; i < num_scatter_bufs; i++) {
2594 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
2595 			buf_size = soc->wbm_idle_scatter_buf_size;
2596 			soc->wbm_idle_scatter_buf_base_vaddr[i] =
2597 				qdf_mem_alloc_consistent(soc->osdev,
2598 							 soc->osdev->dev,
2599 							 buf_size,
2600 							 baseaddr);
2601 
2602 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2603 				QDF_TRACE(QDF_MODULE_ID_DP,
2604 					  QDF_TRACE_LEVEL_ERROR,
2605 					  FL("Scatter lst memory alloc fail"));
2606 				goto fail;
2607 			}
2608 		}
2609 		soc->num_scatter_bufs = num_scatter_bufs;
2610 	}
2611 	return QDF_STATUS_SUCCESS;
2612 
2613 fail:
2614 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2615 		void *vaddr = soc->wbm_idle_scatter_buf_base_vaddr[i];
2616 		qdf_dma_addr_t paddr = soc->wbm_idle_scatter_buf_base_paddr[i];
2617 
2618 		if (vaddr) {
2619 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2620 						soc->wbm_idle_scatter_buf_size,
2621 						vaddr,
2622 						paddr, 0);
2623 			vaddr = NULL;
2624 		}
2625 	}
2626 	return QDF_STATUS_E_NOMEM;
2627 }
2628 
2629 /*
2630  * dp_hw_link_desc_ring_init() - Initialize hw link desc rings
2631  * @soc: DP SOC handle
2632  *
2633  * Return: QDF_STATUS_SUCCESS: success
2634  *         QDF_STATUS_E_FAILURE: failure
2635  */
2636 static QDF_STATUS dp_hw_link_desc_ring_init(struct dp_soc *soc)
2637 {
2638 	struct dp_srng *dp_srng = &soc->wbm_idle_link_ring;
2639 
2640 	if (dp_srng->base_vaddr_unaligned) {
2641 		if (dp_srng_init(soc, dp_srng, WBM_IDLE_LINK, 0, 0))
2642 			return QDF_STATUS_E_FAILURE;
2643 	}
2644 	return QDF_STATUS_SUCCESS;
2645 }
2646 
2647 /*
2648  * dp_hw_link_desc_ring_deinit() - Reset hw link desc rings
2649  * @soc: DP SOC handle
2650  *
2651  * Return: None
2652  */
2653 static void dp_hw_link_desc_ring_deinit(struct dp_soc *soc)
2654 {
2655 	dp_srng_deinit(soc, &soc->wbm_idle_link_ring, WBM_IDLE_LINK, 0);
2656 }
2657 
2658 /*
2659  * dp_hw_link_desc_ring_replenish() - Replenish hw link desc rings
2660  * @soc: DP SOC handle
2661  * @mac_id: mac id
2662  *
2663  * Return: None
2664  */
2665 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id)
2666 {
2667 	uint32_t cookie = 0;
2668 	uint32_t page_idx = 0;
2669 	struct qdf_mem_multi_page_t *pages;
2670 	struct qdf_mem_dma_page_t *dma_pages;
2671 	uint32_t offset = 0;
2672 	uint32_t count = 0;
2673 	void *desc_srng;
2674 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
2675 	uint32_t total_link_descs;
2676 	uint32_t scatter_buf_num;
2677 	uint32_t num_entries_per_buf = 0;
2678 	uint32_t rem_entries;
2679 	uint32_t num_descs_per_page;
2680 	uint32_t num_scatter_bufs = 0;
2681 	uint8_t *scatter_buf_ptr;
2682 	void *desc;
2683 
2684 	num_scatter_bufs = soc->num_scatter_bufs;
2685 
2686 	if (mac_id == WLAN_INVALID_PDEV_ID) {
2687 		pages = &soc->link_desc_pages;
2688 		total_link_descs = soc->total_link_descs;
2689 		desc_srng = soc->wbm_idle_link_ring.hal_srng;
2690 	} else {
2691 		pages = &soc->mon_link_desc_pages[mac_id];
2692 		total_link_descs = soc->total_mon_link_descs[mac_id];
2693 		desc_srng = soc->rxdma_mon_desc_ring[mac_id].hal_srng;
2694 	}
2695 
2696 	dma_pages = pages->dma_pages;
2697 	do {
2698 		qdf_mem_zero(dma_pages[page_idx].page_v_addr_start,
2699 			     pages->page_size);
2700 		page_idx++;
2701 	} while (page_idx < pages->num_pages);
2702 
2703 	if (desc_srng) {
2704 		hal_srng_access_start_unlocked(soc->hal_soc, desc_srng);
2705 		page_idx = 0;
2706 		count = 0;
2707 		offset = 0;
2708 		pages = &soc->link_desc_pages;
2709 		while ((desc = hal_srng_src_get_next(soc->hal_soc,
2710 						     desc_srng)) &&
2711 			(count < total_link_descs)) {
2712 			page_idx = count / pages->num_element_per_page;
2713 			offset = count % pages->num_element_per_page;
2714 			cookie = LINK_DESC_COOKIE(count, page_idx);
2715 
2716 			hal_set_link_desc_addr(desc, cookie,
2717 					       dma_pages[page_idx].page_p_addr
2718 					       + (offset * link_desc_size));
2719 			count++;
2720 		}
2721 		hal_srng_access_end_unlocked(soc->hal_soc, desc_srng);
2722 	} else {
2723 		/* Populate idle list scatter buffers with link descriptor
2724 		 * pointers
2725 		 */
2726 		scatter_buf_num = 0;
2727 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
2728 					soc->hal_soc,
2729 					soc->wbm_idle_scatter_buf_size);
2730 
2731 		scatter_buf_ptr = (uint8_t *)(
2732 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
2733 		rem_entries = num_entries_per_buf;
2734 		pages = &soc->link_desc_pages;
2735 		page_idx = 0; count = 0;
2736 		offset = 0;
2737 		num_descs_per_page = pages->num_element_per_page;
2738 
2739 		while (count < total_link_descs) {
2740 			page_idx = count / num_descs_per_page;
2741 			offset = count % num_descs_per_page;
2742 			cookie = LINK_DESC_COOKIE(count, page_idx);
2743 			hal_set_link_desc_addr((void *)scatter_buf_ptr,
2744 					       cookie,
2745 					       dma_pages[page_idx].page_p_addr +
2746 					       (offset * link_desc_size));
2747 			rem_entries--;
2748 			if (rem_entries) {
2749 				scatter_buf_ptr += link_desc_size;
2750 			} else {
2751 				rem_entries = num_entries_per_buf;
2752 				scatter_buf_num++;
2753 				if (scatter_buf_num >= num_scatter_bufs)
2754 					break;
2755 				scatter_buf_ptr = (uint8_t *)
2756 					(soc->wbm_idle_scatter_buf_base_vaddr[
2757 					 scatter_buf_num]);
2758 			}
2759 			count++;
2760 		}
2761 		/* Setup link descriptor idle list in HW */
2762 		hal_setup_link_idle_list(soc->hal_soc,
2763 			soc->wbm_idle_scatter_buf_base_paddr,
2764 			soc->wbm_idle_scatter_buf_base_vaddr,
2765 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
2766 			(uint32_t)(scatter_buf_ptr -
2767 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
2768 			scatter_buf_num-1])), total_link_descs);
2769 	}
2770 }
2771 
2772 #ifdef IPA_OFFLOAD
2773 #define REO_DST_RING_SIZE_QCA6290 1023
2774 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
2775 #define REO_DST_RING_SIZE_QCA8074 1023
2776 #define REO_DST_RING_SIZE_QCN9000 2048
2777 #else
2778 #define REO_DST_RING_SIZE_QCA8074 8
2779 #define REO_DST_RING_SIZE_QCN9000 8
2780 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
2781 
2782 #else
2783 
2784 #define REO_DST_RING_SIZE_QCA6290 1024
2785 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
2786 #define REO_DST_RING_SIZE_QCA8074 2048
2787 #define REO_DST_RING_SIZE_QCN9000 2048
2788 #else
2789 #define REO_DST_RING_SIZE_QCA8074 8
2790 #define REO_DST_RING_SIZE_QCN9000 8
2791 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
2792 #endif /* IPA_OFFLOAD */
2793 
2794 #ifndef FEATURE_WDS
2795 static void dp_soc_wds_attach(struct dp_soc *soc)
2796 {
2797 }
2798 
2799 static void dp_soc_wds_detach(struct dp_soc *soc)
2800 {
2801 }
2802 #endif
2803 /*
2804  * dp_soc_reset_ring_map() - Reset cpu ring map
2805  * @soc: Datapath soc handler
2806  *
2807  * This api resets the default cpu ring map
2808  */
2809 
2810 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
2811 {
2812 	uint8_t i;
2813 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2814 
2815 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
2816 		switch (nss_config) {
2817 		case dp_nss_cfg_first_radio:
2818 			/*
2819 			 * Setting Tx ring map for one nss offloaded radio
2820 			 */
2821 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
2822 			break;
2823 
2824 		case dp_nss_cfg_second_radio:
2825 			/*
2826 			 * Setting Tx ring for two nss offloaded radios
2827 			 */
2828 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
2829 			break;
2830 
2831 		case dp_nss_cfg_dbdc:
2832 			/*
2833 			 * Setting Tx ring map for 2 nss offloaded radios
2834 			 */
2835 			soc->tx_ring_map[i] =
2836 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
2837 			break;
2838 
2839 		case dp_nss_cfg_dbtc:
2840 			/*
2841 			 * Setting Tx ring map for 3 nss offloaded radios
2842 			 */
2843 			soc->tx_ring_map[i] =
2844 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
2845 			break;
2846 
2847 		default:
2848 			dp_err("tx_ring_map failed due to invalid nss cfg");
2849 			break;
2850 		}
2851 	}
2852 }
2853 
2854 /*
2855  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
2856  * @dp_soc - DP soc handle
2857  * @ring_type - ring type
2858  * @ring_num - ring_num
2859  *
2860  * return 0 or 1
2861  */
2862 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
2863 {
2864 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2865 	uint8_t status = 0;
2866 
2867 	switch (ring_type) {
2868 	case WBM2SW_RELEASE:
2869 	case REO_DST:
2870 	case RXDMA_BUF:
2871 		status = ((nss_config) & (1 << ring_num));
2872 		break;
2873 	default:
2874 		break;
2875 	}
2876 
2877 	return status;
2878 }
2879 
2880 /*
2881  * dp_soc_disable_unused_mac_intr_mask() - reset interrupt mask for
2882  *					  unused WMAC hw rings
2883  * @dp_soc - DP Soc handle
2884  * @mac_num - wmac num
2885  *
2886  * Return: Return void
2887  */
2888 static void dp_soc_disable_unused_mac_intr_mask(struct dp_soc *soc,
2889 						int mac_num)
2890 {
2891 	int *grp_mask = NULL;
2892 	int group_number;
2893 
2894 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2895 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2896 	wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2897 					  group_number, 0x0);
2898 
2899 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
2900 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2901 	wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
2902 				      group_number, 0x0);
2903 
2904 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
2905 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2906 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
2907 					  group_number, 0x0);
2908 
2909 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
2910 	group_number = dp_srng_find_ring_in_mask(mac_num, grp_mask);
2911 	wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
2912 					      group_number, 0x0);
2913 }
2914 
2915 /*
2916  * dp_soc_reset_intr_mask() - reset interrupt mask
2917  * @dp_soc - DP Soc handle
2918  *
2919  * Return: Return void
2920  */
2921 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
2922 {
2923 	uint8_t j;
2924 	int *grp_mask = NULL;
2925 	int group_number, mask, num_ring;
2926 
2927 	/* number of tx ring */
2928 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
2929 
2930 	/*
2931 	 * group mask for tx completion  ring.
2932 	 */
2933 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
2934 
2935 	/* loop and reset the mask for only offloaded ring */
2936 	for (j = 0; j < num_ring; j++) {
2937 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
2938 			continue;
2939 		}
2940 
2941 		/*
2942 		 * Group number corresponding to tx offloaded ring.
2943 		 */
2944 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2945 		if (group_number < 0) {
2946 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2947 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2948 					WBM2SW_RELEASE, j);
2949 			return;
2950 		}
2951 
2952 		/* reset the tx mask for offloaded ring */
2953 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2954 		mask &= (~(1 << j));
2955 
2956 		/*
2957 		 * reset the interrupt mask for offloaded ring.
2958 		 */
2959 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2960 	}
2961 
2962 	/* number of rx rings */
2963 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2964 
2965 	/*
2966 	 * group mask for reo destination ring.
2967 	 */
2968 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2969 
2970 	/* loop and reset the mask for only offloaded ring */
2971 	for (j = 0; j < num_ring; j++) {
2972 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
2973 			continue;
2974 		}
2975 
2976 		/*
2977 		 * Group number corresponding to rx offloaded ring.
2978 		 */
2979 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2980 		if (group_number < 0) {
2981 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2982 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2983 					REO_DST, j);
2984 			return;
2985 		}
2986 
2987 		/* set the interrupt mask for offloaded ring */
2988 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2989 		mask &= (~(1 << j));
2990 
2991 		/*
2992 		 * set the interrupt mask to zero for rx offloaded radio.
2993 		 */
2994 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2995 	}
2996 
2997 	/*
2998 	 * group mask for Rx buffer refill ring
2999 	 */
3000 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
3001 
3002 	/* loop and reset the mask for only offloaded ring */
3003 	for (j = 0; j < MAX_PDEV_CNT; j++) {
3004 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
3005 
3006 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
3007 			continue;
3008 		}
3009 
3010 		/*
3011 		 * Group number corresponding to rx offloaded ring.
3012 		 */
3013 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
3014 		if (group_number < 0) {
3015 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
3016 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
3017 					REO_DST, lmac_id);
3018 			return;
3019 		}
3020 
3021 		/* set the interrupt mask for offloaded ring */
3022 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
3023 				group_number);
3024 		mask &= (~(1 << lmac_id));
3025 
3026 		/*
3027 		 * set the interrupt mask to zero for rx offloaded radio.
3028 		 */
3029 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
3030 			group_number, mask);
3031 	}
3032 }
3033 
3034 #ifdef IPA_OFFLOAD
3035 /**
3036  * dp_reo_remap_config() - configure reo remap register value based
3037  *                         nss configuration.
3038  *		based on offload_radio value below remap configuration
3039  *		get applied.
3040  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
3041  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
3042  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
3043  *		3 - both Radios handled by NSS (remap not required)
3044  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
3045  *
3046  * @remap1: output parameter indicates reo remap 1 register value
3047  * @remap2: output parameter indicates reo remap 2 register value
3048  * Return: bool type, true if remap is configured else false.
3049  */
3050 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap1, uint32_t *remap2)
3051 {
3052 	uint32_t ring[4] = {REO_REMAP_SW1, REO_REMAP_SW2,
3053 						REO_REMAP_SW3};
3054 	hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3055 				      3, remap1, remap2);
3056 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
3057 
3058 	return true;
3059 }
3060 
3061 /**
3062  * dp_ipa_get_tx_ring_size() - Get Tx ring size for IPA
3063  *
3064  * @tx_ring_num: Tx ring number
3065  * @tx_ipa_ring_sz: Return param only updated for IPA.
3066  *
3067  * Return: None
3068  */
3069 static void dp_ipa_get_tx_ring_size(int tx_ring_num, int *tx_ipa_ring_sz)
3070 {
3071 	if (tx_ring_num == IPA_TCL_DATA_RING_IDX)
3072 		*tx_ipa_ring_sz = WLAN_CFG_IPA_TX_RING_SIZE;
3073 }
3074 
3075 /**
3076  * dp_ipa_get_tx_comp_ring_size() - Get Tx comp ring size for IPA
3077  *
3078  * @tx_comp_ring_num: Tx comp ring number
3079  * @tx_comp_ipa_ring_sz: Return param only updated for IPA.
3080  *
3081  * Return: None
3082  */
3083 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
3084 					 int *tx_comp_ipa_ring_sz)
3085 {
3086 	if (tx_comp_ring_num == IPA_TCL_DATA_RING_IDX)
3087 		*tx_comp_ipa_ring_sz = WLAN_CFG_IPA_TX_COMP_RING_SIZE;
3088 }
3089 #else
3090 static uint8_t dp_reo_ring_selection(uint32_t value, uint32_t *ring)
3091 {
3092 	uint8_t num = 0;
3093 
3094 	switch (value) {
3095 	case 0xF:
3096 		num = 4;
3097 		ring[0] = REO_REMAP_SW1;
3098 		ring[1] = REO_REMAP_SW2;
3099 		ring[2] = REO_REMAP_SW3;
3100 		ring[3] = REO_REMAP_SW4;
3101 		break;
3102 	case 0xE:
3103 		num = 3;
3104 		ring[0] = REO_REMAP_SW2;
3105 		ring[1] = REO_REMAP_SW3;
3106 		ring[2] = REO_REMAP_SW4;
3107 		break;
3108 	case 0xD:
3109 		num = 3;
3110 		ring[0] = REO_REMAP_SW1;
3111 		ring[1] = REO_REMAP_SW3;
3112 		ring[2] = REO_REMAP_SW4;
3113 		break;
3114 	case 0xC:
3115 		num = 2;
3116 		ring[0] = REO_REMAP_SW3;
3117 		ring[1] = REO_REMAP_SW4;
3118 		break;
3119 	case 0xB:
3120 		num = 3;
3121 		ring[0] = REO_REMAP_SW1;
3122 		ring[1] = REO_REMAP_SW2;
3123 		ring[2] = REO_REMAP_SW4;
3124 		break;
3125 	case 0xA:
3126 		num = 2;
3127 		ring[0] = REO_REMAP_SW2;
3128 		ring[1] = REO_REMAP_SW4;
3129 		break;
3130 	case 0x9:
3131 		num = 2;
3132 		ring[0] = REO_REMAP_SW1;
3133 		ring[1] = REO_REMAP_SW4;
3134 		break;
3135 	case 0x8:
3136 		num = 1;
3137 		ring[0] = REO_REMAP_SW4;
3138 		break;
3139 	case 0x7:
3140 		num = 3;
3141 		ring[0] = REO_REMAP_SW1;
3142 		ring[1] = REO_REMAP_SW2;
3143 		ring[2] = REO_REMAP_SW3;
3144 		break;
3145 	case 0x6:
3146 		num = 2;
3147 		ring[0] = REO_REMAP_SW2;
3148 		ring[1] = REO_REMAP_SW3;
3149 		break;
3150 	case 0x5:
3151 		num = 2;
3152 		ring[0] = REO_REMAP_SW1;
3153 		ring[1] = REO_REMAP_SW3;
3154 		break;
3155 	case 0x4:
3156 		num = 1;
3157 		ring[0] = REO_REMAP_SW3;
3158 		break;
3159 	case 0x3:
3160 		num = 2;
3161 		ring[0] = REO_REMAP_SW1;
3162 		ring[1] = REO_REMAP_SW2;
3163 		break;
3164 	case 0x2:
3165 		num = 1;
3166 		ring[0] = REO_REMAP_SW2;
3167 		break;
3168 	case 0x1:
3169 		num = 1;
3170 		ring[0] = REO_REMAP_SW1;
3171 		break;
3172 	}
3173 	return num;
3174 }
3175 
3176 static bool dp_reo_remap_config(struct dp_soc *soc,
3177 				uint32_t *remap1,
3178 				uint32_t *remap2)
3179 {
3180 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
3181 	uint32_t reo_config = wlan_cfg_get_reo_rings_mapping(soc->wlan_cfg_ctx);
3182 	uint8_t target_type, num;
3183 	uint32_t ring[4];
3184 	uint32_t value;
3185 
3186 	target_type = hal_get_target_type(soc->hal_soc);
3187 
3188 	switch (offload_radio) {
3189 	case dp_nss_cfg_default:
3190 		value = reo_config & 0xF;
3191 		num = dp_reo_ring_selection(value, ring);
3192 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3193 					      num, remap1, remap2);
3194 
3195 		break;
3196 	case dp_nss_cfg_first_radio:
3197 		value = reo_config & 0xE;
3198 		num = dp_reo_ring_selection(value, ring);
3199 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3200 					      num, remap1, remap2);
3201 
3202 		break;
3203 	case dp_nss_cfg_second_radio:
3204 		value = reo_config & 0xD;
3205 		num = dp_reo_ring_selection(value, ring);
3206 		hal_compute_reo_remap_ix2_ix3(soc->hal_soc, ring,
3207 					      num, remap1, remap2);
3208 
3209 		break;
3210 	case dp_nss_cfg_dbdc:
3211 	case dp_nss_cfg_dbtc:
3212 		/* return false if both or all are offloaded to NSS */
3213 		return false;
3214 	}
3215 
3216 	dp_debug("remap1 %x remap2 %x offload_radio %u",
3217 		 *remap1, *remap2, offload_radio);
3218 	return true;
3219 }
3220 
3221 static void dp_ipa_get_tx_ring_size(int ring_num, int *tx_ipa_ring_sz)
3222 {
3223 }
3224 
3225 static void dp_ipa_get_tx_comp_ring_size(int tx_comp_ring_num,
3226 					 int *tx_comp_ipa_ring_sz)
3227 {
3228 }
3229 #endif /* IPA_OFFLOAD */
3230 
3231 /*
3232  * dp_reo_frag_dst_set() - configure reo register to set the
3233  *                        fragment destination ring
3234  * @soc : Datapath soc
3235  * @frag_dst_ring : output parameter to set fragment destination ring
3236  *
3237  * Based on offload_radio below fragment destination rings is selected
3238  * 0 - TCL
3239  * 1 - SW1
3240  * 2 - SW2
3241  * 3 - SW3
3242  * 4 - SW4
3243  * 5 - Release
3244  * 6 - FW
3245  * 7 - alternate select
3246  *
3247  * return: void
3248  */
3249 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
3250 {
3251 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
3252 
3253 	switch (offload_radio) {
3254 	case dp_nss_cfg_default:
3255 		*frag_dst_ring = REO_REMAP_TCL;
3256 		break;
3257 	case dp_nss_cfg_first_radio:
3258 		/*
3259 		 * This configuration is valid for single band radio which
3260 		 * is also NSS offload.
3261 		 */
3262 	case dp_nss_cfg_dbdc:
3263 	case dp_nss_cfg_dbtc:
3264 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
3265 		break;
3266 	default:
3267 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3268 				FL("dp_reo_frag_dst_set invalid offload radio config"));
3269 		break;
3270 	}
3271 }
3272 
3273 #ifdef ENABLE_VERBOSE_DEBUG
3274 static void dp_enable_verbose_debug(struct dp_soc *soc)
3275 {
3276 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3277 
3278 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3279 
3280 	if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask)
3281 		is_dp_verbose_debug_enabled = true;
3282 
3283 	if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask)
3284 		hal_set_verbose_debug(true);
3285 	else
3286 		hal_set_verbose_debug(false);
3287 }
3288 #else
3289 static void dp_enable_verbose_debug(struct dp_soc *soc)
3290 {
3291 }
3292 #endif
3293 
3294 #ifdef WLAN_FEATURE_STATS_EXT
3295 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
3296 {
3297 	qdf_event_create(&soc->rx_hw_stats_event);
3298 }
3299 #else
3300 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
3301 {
3302 }
3303 #endif
3304 
3305 static void dp_deinit_tx_pair_by_index(struct dp_soc *soc, int index)
3306 {
3307 	wlan_minidump_remove(soc->tcl_data_ring[index].base_vaddr_unaligned);
3308 	dp_srng_deinit(soc, &soc->tcl_data_ring[index], TCL_DATA, index);
3309 
3310 	wlan_minidump_remove(soc->tx_comp_ring[index].base_vaddr_unaligned);
3311 	dp_srng_deinit(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE, index);
3312 }
3313 
3314 static QDF_STATUS dp_init_tx_ring_pair_by_index(struct dp_soc *soc,
3315 						uint8_t index)
3316 {
3317 	if (dp_srng_init(soc, &soc->tcl_data_ring[index], TCL_DATA, index, 0)) {
3318 		dp_err("dp_srng_init failed for tcl_data_ring");
3319 		goto fail1;
3320 	}
3321 	wlan_minidump_log(soc->tcl_data_ring[index].base_vaddr_unaligned,
3322 			  soc->tcl_data_ring[index].alloc_size,
3323 			  soc->ctrl_psoc,
3324 			  WLAN_MD_DP_SRNG_TCL_DATA,
3325 			  "tcl_data_ring");
3326 
3327 	if (dp_srng_init(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
3328 			 index, 0)) {
3329 		dp_err("dp_srng_init failed for tx_comp_ring");
3330 		goto fail1;
3331 	}
3332 	wlan_minidump_log(soc->tx_comp_ring[index].base_vaddr_unaligned,
3333 			  soc->tx_comp_ring[index].alloc_size,
3334 			  soc->ctrl_psoc,
3335 			  WLAN_MD_DP_SRNG_TX_COMP,
3336 			  "tcl_comp_ring");
3337 
3338 	return QDF_STATUS_SUCCESS;
3339 
3340 fail1:
3341 	return QDF_STATUS_E_FAILURE;
3342 }
3343 
3344 static void dp_free_tx_ring_pair_by_index(struct dp_soc *soc, uint8_t index)
3345 {
3346 	dp_srng_free(soc, &soc->tcl_data_ring[index]);
3347 	dp_srng_free(soc, &soc->tx_comp_ring[index]);
3348 }
3349 
3350 static QDF_STATUS dp_alloc_tx_ring_pair_by_index(struct dp_soc *soc,
3351 						 uint8_t index)
3352 {
3353 	int tx_ring_size;
3354 	int tx_comp_ring_size;
3355 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx = soc->wlan_cfg_ctx;
3356 	int cached = 0;
3357 
3358 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
3359 	dp_ipa_get_tx_ring_size(index, &tx_ring_size);
3360 
3361 	if (dp_srng_alloc(soc, &soc->tcl_data_ring[index], TCL_DATA,
3362 			  tx_ring_size, cached)) {
3363 		dp_err("dp_srng_alloc failed for tcl_data_ring");
3364 		goto fail1;
3365 	}
3366 
3367 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
3368 	dp_ipa_get_tx_comp_ring_size(index, &tx_comp_ring_size);
3369 	/* Enable cached TCL desc if NSS offload is disabled */
3370 	if (!wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
3371 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
3372 
3373 	if (dp_srng_alloc(soc, &soc->tx_comp_ring[index], WBM2SW_RELEASE,
3374 			  tx_comp_ring_size, cached)) {
3375 		dp_err("dp_srng_alloc failed for tx_comp_ring");
3376 		goto fail1;
3377 	}
3378 
3379 	return QDF_STATUS_SUCCESS;
3380 
3381 fail1:
3382 	return QDF_STATUS_E_FAILURE;
3383 }
3384 
3385 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3386 {
3387 	struct cdp_lro_hash_config lro_hash;
3388 	QDF_STATUS status;
3389 
3390 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
3391 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
3392 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
3393 		dp_err("LRO, GRO and RX hash disabled");
3394 		return QDF_STATUS_E_FAILURE;
3395 	}
3396 
3397 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
3398 
3399 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
3400 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
3401 		lro_hash.lro_enable = 1;
3402 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
3403 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
3404 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
3405 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
3406 	}
3407 
3408 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
3409 			     (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
3410 			      LRO_IPV4_SEED_ARR_SZ));
3411 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
3412 			     (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
3413 			      LRO_IPV6_SEED_ARR_SZ));
3414 
3415 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
3416 
3417 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
3418 		QDF_BUG(0);
3419 		dp_err("lro_hash_config not configured");
3420 		return QDF_STATUS_E_FAILURE;
3421 	}
3422 
3423 	status = soc->cdp_soc.ol_ops->lro_hash_config(soc->ctrl_psoc,
3424 						      pdev->pdev_id,
3425 						      &lro_hash);
3426 	if (!QDF_IS_STATUS_SUCCESS(status)) {
3427 		dp_err("failed to send lro_hash_config to FW %u", status);
3428 		return status;
3429 	}
3430 
3431 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
3432 		lro_hash.lro_enable, lro_hash.tcp_flag,
3433 		lro_hash.tcp_flag_mask);
3434 
3435 	dp_info("toeplitz_hash_ipv4:");
3436 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3437 			   lro_hash.toeplitz_hash_ipv4,
3438 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
3439 			   LRO_IPV4_SEED_ARR_SZ));
3440 
3441 	dp_info("toeplitz_hash_ipv6:");
3442 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3443 			   lro_hash.toeplitz_hash_ipv6,
3444 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
3445 			   LRO_IPV6_SEED_ARR_SZ));
3446 
3447 	return status;
3448 }
3449 
3450 /*
3451  * dp_rxdma_ring_setup() - configure the RX DMA rings
3452  * @soc: data path SoC handle
3453  * @pdev: Physical device handle
3454  *
3455  * Return: 0 - success, > 0 - failure
3456  */
3457 #ifdef QCA_HOST2FW_RXBUF_RING
3458 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3459 {
3460 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3461 	int max_mac_rings;
3462 	int i;
3463 	int ring_size;
3464 
3465 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3466 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
3467 	ring_size =  wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx);
3468 
3469 	for (i = 0; i < max_mac_rings; i++) {
3470 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
3471 		if (dp_srng_alloc(soc, &pdev->rx_mac_buf_ring[i],
3472 				  RXDMA_BUF, ring_size, 0)) {
3473 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3474 				  FL("failed rx mac ring setup"));
3475 			return QDF_STATUS_E_FAILURE;
3476 		}
3477 
3478 		if (dp_srng_init(soc, &pdev->rx_mac_buf_ring[i],
3479 				 RXDMA_BUF, 1, i)) {
3480 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3481 				  FL("failed rx mac ring setup"));
3482 
3483 			dp_srng_free(soc, &pdev->rx_mac_buf_ring[i]);
3484 			return QDF_STATUS_E_FAILURE;
3485 		}
3486 	}
3487 	return QDF_STATUS_SUCCESS;
3488 }
3489 #else
3490 static int dp_rxdma_ring_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3491 {
3492 	return QDF_STATUS_SUCCESS;
3493 }
3494 #endif
3495 
3496 /**
3497  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
3498  * @pdev - DP_PDEV handle
3499  *
3500  * Return: void
3501  */
3502 static inline void
3503 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
3504 {
3505 	uint8_t map_id;
3506 	struct dp_soc *soc = pdev->soc;
3507 
3508 	if (!soc)
3509 		return;
3510 
3511 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
3512 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
3513 			     default_dscp_tid_map,
3514 			     sizeof(default_dscp_tid_map));
3515 	}
3516 
3517 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
3518 		hal_tx_set_dscp_tid_map(soc->hal_soc,
3519 					default_dscp_tid_map,
3520 					map_id);
3521 	}
3522 }
3523 
3524 /**
3525  * dp_pcp_tid_map_setup(): Initialize the pcp-tid maps
3526  * @pdev - DP_PDEV handle
3527  *
3528  * Return: void
3529  */
3530 static inline void
3531 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
3532 {
3533 	struct dp_soc *soc = pdev->soc;
3534 
3535 	if (!soc)
3536 		return;
3537 
3538 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
3539 		     sizeof(default_pcp_tid_map));
3540 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
3541 }
3542 
3543 #ifdef IPA_OFFLOAD
3544 /**
3545  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
3546  * @soc: data path instance
3547  * @pdev: core txrx pdev context
3548  *
3549  * Return: QDF_STATUS_SUCCESS: success
3550  *         QDF_STATUS_E_RESOURCES: Error return
3551  */
3552 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3553 					   struct dp_pdev *pdev)
3554 {
3555 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3556 	int entries;
3557 
3558 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3559 	entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
3560 
3561 	/* Setup second Rx refill buffer ring */
3562 	if (dp_srng_alloc(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
3563 			  entries, 0)) {
3564 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3565 			FL("dp_srng_alloc failed second rx refill ring"));
3566 		return QDF_STATUS_E_FAILURE;
3567 	}
3568 
3569 	if (dp_srng_init(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
3570 			 IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id)) {
3571 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3572 			  FL("dp_srng_init failed second rx refill ring"));
3573 		return QDF_STATUS_E_FAILURE;
3574 	}
3575 
3576 	return QDF_STATUS_SUCCESS;
3577 }
3578 
3579 /**
3580  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
3581  * @soc: data path instance
3582  * @pdev: core txrx pdev context
3583  *
3584  * Return: void
3585  */
3586 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3587 					      struct dp_pdev *pdev)
3588 {
3589 	dp_srng_deinit(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF, 0);
3590 	dp_srng_free(soc, &pdev->rx_refill_buf_ring2);
3591 }
3592 
3593 #else
3594 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3595 					   struct dp_pdev *pdev)
3596 {
3597 	return QDF_STATUS_SUCCESS;
3598 }
3599 
3600 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3601 					      struct dp_pdev *pdev)
3602 {
3603 }
3604 #endif
3605 
3606 #if !defined(DISABLE_MON_CONFIG)
3607 /**
3608  * dp_mon_ring_deinit() - Deinitialize monitor rings
3609  * @pdev: DP pdev handle
3610  *
3611  */
3612 static void dp_mon_rings_deinit(struct dp_pdev *pdev)
3613 {
3614 	int mac_id = 0;
3615 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3616 	struct dp_soc *soc = pdev->soc;
3617 
3618 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3619 
3620 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3621 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
3622 							 pdev->pdev_id);
3623 
3624 		dp_srng_deinit(soc, &soc->rxdma_mon_status_ring[lmac_id],
3625 			       RXDMA_MONITOR_STATUS, 0);
3626 
3627 		if (!soc->wlan_cfg_ctx->rxdma1_enable)
3628 			continue;
3629 
3630 		dp_srng_deinit(soc, &soc->rxdma_mon_buf_ring[lmac_id],
3631 			       RXDMA_MONITOR_BUF, 0);
3632 		dp_srng_deinit(soc, &soc->rxdma_mon_dst_ring[lmac_id],
3633 			       RXDMA_MONITOR_DST, 0);
3634 		dp_srng_deinit(soc, &soc->rxdma_mon_desc_ring[lmac_id],
3635 			       RXDMA_MONITOR_DESC, 0);
3636 	}
3637 }
3638 
3639 /**
3640  * dp_mon_rings_free() - free monitor rings
3641  * @pdev: Datapath pdev handle
3642  *
3643  */
3644 static void dp_mon_rings_free(struct dp_pdev *pdev)
3645 {
3646 	int mac_id = 0;
3647 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3648 	struct dp_soc *soc = pdev->soc;
3649 
3650 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3651 
3652 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3653 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
3654 							 pdev->pdev_id);
3655 
3656 		dp_srng_free(soc, &soc->rxdma_mon_status_ring[lmac_id]);
3657 
3658 		if (!soc->wlan_cfg_ctx->rxdma1_enable)
3659 			continue;
3660 
3661 		dp_srng_free(soc, &soc->rxdma_mon_buf_ring[lmac_id]);
3662 		dp_srng_free(soc, &soc->rxdma_mon_dst_ring[lmac_id]);
3663 		dp_srng_free(soc, &soc->rxdma_mon_desc_ring[lmac_id]);
3664 	}
3665 }
3666 
3667 /**
3668  * dp_mon_rings_init() - Initialize monitor srng rings
3669  * @pdev: Datapath pdev handle
3670  *
3671  * return: QDF_STATUS_SUCCESS on success
3672  *	   QDF_STATUS_E_NOMEM on failure
3673  */
3674 static
3675 QDF_STATUS dp_mon_rings_init(struct dp_soc *soc, struct dp_pdev *pdev)
3676 {
3677 	int mac_id = 0;
3678 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3679 
3680 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3681 
3682 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3683 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
3684 							 pdev->pdev_id);
3685 
3686 		if (dp_srng_init(soc, &soc->rxdma_mon_status_ring[lmac_id],
3687 				 RXDMA_MONITOR_STATUS, 0, lmac_id)) {
3688 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3689 				  FL(RNG_ERR "rxdma_mon_status_ring"));
3690 			goto fail1;
3691 		}
3692 
3693 		if (!soc->wlan_cfg_ctx->rxdma1_enable)
3694 			continue;
3695 
3696 		if (dp_srng_init(soc, &soc->rxdma_mon_buf_ring[lmac_id],
3697 				 RXDMA_MONITOR_BUF, 0, lmac_id)) {
3698 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3699 				  FL(RNG_ERR "rxdma_mon_buf_ring "));
3700 			goto fail1;
3701 		}
3702 
3703 		if (dp_srng_init(soc, &soc->rxdma_mon_dst_ring[lmac_id],
3704 				 RXDMA_MONITOR_DST, 0, lmac_id)) {
3705 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3706 				  FL(RNG_ERR "rxdma_mon_dst_ring"));
3707 			goto fail1;
3708 		}
3709 
3710 		if (dp_srng_init(soc, &soc->rxdma_mon_desc_ring[lmac_id],
3711 				 RXDMA_MONITOR_DESC, 0, lmac_id)) {
3712 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3713 				  FL(RNG_ERR "rxdma_mon_desc_ring"));
3714 			goto fail1;
3715 		}
3716 	}
3717 	return QDF_STATUS_SUCCESS;
3718 
3719 fail1:
3720 	dp_mon_rings_deinit(pdev);
3721 	return QDF_STATUS_E_NOMEM;
3722 }
3723 
3724 /**
3725  * dp_mon_rings_alloc() - Allocate memory for monitor srng rings
3726  * @soc: Datapath soc handle
3727  * @pdev: Datapath pdev handle
3728  *
3729  * return: QDF_STATUS_SUCCESS on success
3730  *	   QDF_STATUS_E_NOMEM on failure
3731  */
3732 static
3733 QDF_STATUS dp_mon_rings_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
3734 {
3735 	int mac_id = 0;
3736 	int entries;
3737 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3738 
3739 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3740 
3741 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3742 		int lmac_id =
3743 		dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id);
3744 		entries = wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
3745 		if (dp_srng_alloc(soc, &soc->rxdma_mon_status_ring[lmac_id],
3746 				  RXDMA_MONITOR_STATUS, entries, 0)) {
3747 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3748 				  FL(RNG_ERR "rxdma_mon_status_ring"));
3749 			goto fail1;
3750 		}
3751 
3752 		if (!soc->wlan_cfg_ctx->rxdma1_enable)
3753 			continue;
3754 
3755 		entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
3756 		if (dp_srng_alloc(soc, &soc->rxdma_mon_buf_ring[lmac_id],
3757 				  RXDMA_MONITOR_BUF, entries, 0)) {
3758 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3759 				  FL(RNG_ERR "rxdma_mon_buf_ring "));
3760 			goto fail1;
3761 		}
3762 
3763 		entries = wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
3764 		if (dp_srng_alloc(soc, &soc->rxdma_mon_dst_ring[lmac_id],
3765 				  RXDMA_MONITOR_DST, entries, 0)) {
3766 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3767 				  FL(RNG_ERR "rxdma_mon_dst_ring"));
3768 			goto fail1;
3769 		}
3770 
3771 		entries = wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
3772 		if (dp_srng_alloc(soc, &soc->rxdma_mon_desc_ring[lmac_id],
3773 				  RXDMA_MONITOR_DESC, entries, 0)) {
3774 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3775 				  FL(RNG_ERR "rxdma_mon_desc_ring"));
3776 			goto fail1;
3777 		}
3778 	}
3779 	return QDF_STATUS_SUCCESS;
3780 
3781 fail1:
3782 	dp_mon_rings_free(pdev);
3783 	return QDF_STATUS_E_NOMEM;
3784 }
3785 #else
3786 static void dp_mon_rings_free(struct dp_pdev *pdev)
3787 {
3788 }
3789 
3790 static void dp_mon_rings_deinit(struct dp_pdev *pdev)
3791 {
3792 }
3793 
3794 static
3795 QDF_STATUS dp_mon_rings_init(struct dp_soc *soc, struct dp_pdev *pdev)
3796 {
3797 	return QDF_STATUS_SUCCESS;
3798 }
3799 
3800 static
3801 QDF_STATUS dp_mon_rings_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
3802 {
3803 	return QDF_STATUS_SUCCESS;
3804 }
3805 #endif
3806 
3807 #ifdef ATH_SUPPORT_EXT_STAT
3808 /*dp_peer_cal_clients_stats_update - update peer stats on cal client timer
3809  * @soc : Datapath SOC
3810  * @peer : Datapath peer
3811  * @arg : argument to iter function
3812  */
3813 static void
3814 dp_peer_cal_clients_stats_update(struct dp_soc *soc,
3815 				 struct dp_peer *peer,
3816 				 void *arg)
3817 {
3818 	dp_cal_client_update_peer_stats(&peer->stats);
3819 }
3820 
3821 /*dp_iterate_update_peer_list - update peer stats on cal client timer
3822  * @pdev_hdl: pdev handle
3823  */
3824 void  dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
3825 {
3826 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
3827 
3828 	dp_pdev_iterate_peer(pdev, dp_peer_cal_clients_stats_update, NULL,
3829 			     DP_MOD_ID_CDP);
3830 }
3831 #else
3832 void  dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
3833 {
3834 }
3835 #endif
3836 
3837 /*
3838  * dp_htt_ppdu_stats_attach() - attach resources for HTT PPDU stats processing
3839  * @pdev: Datapath PDEV handle
3840  *
3841  * Return: QDF_STATUS_SUCCESS: Success
3842  *         QDF_STATUS_E_NOMEM: Error
3843  */
3844 static QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev)
3845 {
3846 	pdev->ppdu_tlv_buf = qdf_mem_malloc(HTT_T2H_MAX_MSG_SIZE);
3847 
3848 	if (!pdev->ppdu_tlv_buf) {
3849 		QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "ppdu_tlv_buf alloc fail");
3850 		return QDF_STATUS_E_NOMEM;
3851 	}
3852 
3853 	return QDF_STATUS_SUCCESS;
3854 }
3855 
3856 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
3857 /**
3858  * dp_soc_rx_history_attach() - Attach the ring history record buffers
3859  * @soc: DP soc structure
3860  *
3861  * This function allocates the memory for recording the rx ring, rx error
3862  * ring and the reinject ring entries. There is no error returned in case
3863  * of allocation failure since the record function checks if the history is
3864  * initialized or not. We do not want to fail the driver load in case of
3865  * failure to allocate memory for debug history.
3866  *
3867  * Returns: None
3868  */
3869 static void dp_soc_rx_history_attach(struct dp_soc *soc)
3870 {
3871 	int i;
3872 	uint32_t rx_ring_hist_size;
3873 	uint32_t rx_err_ring_hist_size;
3874 	uint32_t rx_reinject_hist_size;
3875 
3876 	rx_ring_hist_size = sizeof(*soc->rx_ring_history[i]);
3877 	rx_err_ring_hist_size = sizeof(*soc->rx_err_ring_history);
3878 	rx_reinject_hist_size = sizeof(*soc->rx_reinject_ring_history);
3879 
3880 	for (i = 0; i < MAX_REO_DEST_RINGS; i++) {
3881 		soc->rx_ring_history[i] = qdf_mem_malloc(rx_ring_hist_size);
3882 		if (soc->rx_ring_history[i])
3883 			qdf_atomic_init(&soc->rx_ring_history[i]->index);
3884 	}
3885 
3886 	soc->rx_err_ring_history = qdf_mem_malloc(rx_err_ring_hist_size);
3887 	if (soc->rx_err_ring_history)
3888 		qdf_atomic_init(&soc->rx_err_ring_history->index);
3889 
3890 	soc->rx_reinject_ring_history = qdf_mem_malloc(rx_reinject_hist_size);
3891 	if (soc->rx_reinject_ring_history)
3892 		qdf_atomic_init(&soc->rx_reinject_ring_history->index);
3893 }
3894 
3895 static void dp_soc_rx_history_detach(struct dp_soc *soc)
3896 {
3897 	int i;
3898 
3899 	for (i = 0; i < MAX_REO_DEST_RINGS; i++)
3900 		qdf_mem_free(soc->rx_ring_history[i]);
3901 
3902 	qdf_mem_free(soc->rx_err_ring_history);
3903 	qdf_mem_free(soc->rx_reinject_ring_history);
3904 }
3905 
3906 #else
3907 static inline void dp_soc_rx_history_attach(struct dp_soc *soc)
3908 {
3909 }
3910 
3911 static inline void dp_soc_rx_history_detach(struct dp_soc *soc)
3912 {
3913 }
3914 #endif
3915 
3916 /*
3917 * dp_pdev_attach_wifi3() - attach txrx pdev
3918 * @txrx_soc: Datapath SOC handle
3919 * @htc_handle: HTC handle for host-target interface
3920 * @qdf_osdev: QDF OS device
3921 * @pdev_id: PDEV ID
3922 *
3923 * Return: QDF_STATUS
3924 */
3925 static inline QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
3926 					      HTC_HANDLE htc_handle,
3927 					      qdf_device_t qdf_osdev,
3928 					      uint8_t pdev_id)
3929 {
3930 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3931 	struct dp_pdev *pdev = NULL;
3932 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3933 	int nss_cfg;
3934 
3935 	pdev = qdf_mem_malloc(sizeof(*pdev));
3936 	if (!pdev) {
3937 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3938 			  FL("DP PDEV memory allocation failed"));
3939 		goto fail0;
3940 	}
3941 	wlan_minidump_log(pdev, sizeof(*pdev), soc->ctrl_psoc,
3942 			  WLAN_MD_DP_PDEV, "dp_pdev");
3943 
3944 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3945 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
3946 
3947 	if (!pdev->wlan_cfg_ctx) {
3948 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3949 			  FL("pdev cfg_attach failed"));
3950 		goto fail1;
3951 	}
3952 
3953 	/*
3954 	 * set nss pdev config based on soc config
3955 	 */
3956 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
3957 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
3958 					 (nss_cfg & (1 << pdev_id)));
3959 
3960 	pdev->soc = soc;
3961 	pdev->pdev_id = pdev_id;
3962 	soc->pdev_list[pdev_id] = pdev;
3963 
3964 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
3965 	soc->pdev_count++;
3966 
3967 	/* Allocate memory for pdev srng rings */
3968 	if (dp_pdev_srng_alloc(pdev)) {
3969 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3970 			  FL("dp_pdev_srng_alloc failed"));
3971 		goto fail2;
3972 	}
3973 
3974 	/* Rx specific init */
3975 	if (dp_rx_pdev_desc_pool_alloc(pdev)) {
3976 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3977 			  FL("dp_rx_pdev_attach failed"));
3978 		goto fail3;
3979 	}
3980 
3981 	/* Rx monitor mode specific init */
3982 	if (dp_rx_pdev_mon_desc_pool_alloc(pdev)) {
3983 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3984 			  "dp_rx_pdev_mon_attach failed");
3985 		goto fail4;
3986 	}
3987 
3988 	return QDF_STATUS_SUCCESS;
3989 fail4:
3990 	dp_rx_pdev_desc_pool_free(pdev);
3991 fail3:
3992 	dp_pdev_srng_free(pdev);
3993 fail2:
3994 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
3995 fail1:
3996 	qdf_mem_free(pdev);
3997 fail0:
3998 	return QDF_STATUS_E_FAILURE;
3999 }
4000 
4001 /*
4002  * dp_rxdma_ring_cleanup() - configure the RX DMA rings
4003  * @soc: data path SoC handle
4004  * @pdev: Physical device handle
4005  *
4006  * Return: void
4007  */
4008 #ifdef QCA_HOST2FW_RXBUF_RING
4009 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
4010 {
4011 	int i;
4012 
4013 	for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
4014 		dp_srng_deinit(soc, &pdev->rx_mac_buf_ring[i], RXDMA_BUF, 1);
4015 		dp_srng_free(soc, &pdev->rx_mac_buf_ring[i]);
4016 	}
4017 
4018 	if (soc->reap_timer_init) {
4019 		qdf_timer_free(&soc->mon_reap_timer);
4020 		soc->reap_timer_init = 0;
4021 	}
4022 }
4023 #else
4024 static void dp_rxdma_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
4025 {
4026 	if (soc->lmac_timer_init) {
4027 		qdf_timer_stop(&soc->lmac_reap_timer);
4028 		qdf_timer_free(&soc->lmac_reap_timer);
4029 		soc->lmac_timer_init = 0;
4030 	}
4031 }
4032 #endif
4033 
4034 /*
4035  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
4036  * @pdev: device object
4037  *
4038  * Return: void
4039  */
4040 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
4041 {
4042 	struct dp_neighbour_peer *peer = NULL;
4043 	struct dp_neighbour_peer *temp_peer = NULL;
4044 
4045 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
4046 			   neighbour_peer_list_elem, temp_peer) {
4047 		/* delete this peer from the list */
4048 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
4049 			     peer, neighbour_peer_list_elem);
4050 		qdf_mem_free(peer);
4051 	}
4052 
4053 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
4054 }
4055 
4056 /**
4057 * dp_htt_ppdu_stats_detach() - detach stats resources
4058 * @pdev: Datapath PDEV handle
4059 *
4060 * Return: void
4061 */
4062 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
4063 {
4064 	struct ppdu_info *ppdu_info, *ppdu_info_next;
4065 
4066 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
4067 			   ppdu_info_list_elem, ppdu_info_next) {
4068 		if (!ppdu_info)
4069 			break;
4070 		qdf_assert_always(ppdu_info->nbuf);
4071 		qdf_nbuf_free(ppdu_info->nbuf);
4072 		qdf_mem_free(ppdu_info);
4073 		pdev->list_depth--;
4074 	}
4075 
4076 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->sched_comp_ppdu_list,
4077 			   ppdu_info_list_elem, ppdu_info_next) {
4078 		if (!ppdu_info)
4079 			break;
4080 		qdf_assert_always(ppdu_info->nbuf);
4081 		qdf_nbuf_free(ppdu_info->nbuf);
4082 		qdf_mem_free(ppdu_info);
4083 		pdev->sched_comp_list_depth--;
4084 	}
4085 
4086 	if (pdev->ppdu_tlv_buf)
4087 		qdf_mem_free(pdev->ppdu_tlv_buf);
4088 
4089 }
4090 
4091 #ifdef WLAN_DP_PENDING_MEM_FLUSH
4092 /**
4093  * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev
4094  * @pdev: Datapath PDEV handle
4095  *
4096  * This is the last chance to flush all pending dp vdevs/peers,
4097  * some peer/vdev leak case like Non-SSR + peer unmap missing
4098  * will be covered here.
4099  *
4100  * Return: None
4101  */
4102 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
4103 {
4104 	struct dp_vdev *vdev = NULL;
4105 
4106 	while (true) {
4107 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
4108 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4109 			if (vdev->delete.pending)
4110 				break;
4111 		}
4112 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4113 
4114 		/*
4115 		 * vdev will be freed when all peers get cleanup,
4116 		 * dp_delete_pending_vdev will remove vdev from vdev_list
4117 		 * in pdev.
4118 		 */
4119 		if (vdev)
4120 			dp_vdev_flush_peers((struct cdp_vdev *)vdev, 0);
4121 		else
4122 			break;
4123 	}
4124 }
4125 #else
4126 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
4127 {
4128 }
4129 #endif
4130 
4131 /**
4132  * dp_pdev_deinit() - Deinit txrx pdev
4133  * @txrx_pdev: Datapath PDEV handle
4134  * @force: Force deinit
4135  *
4136  * Return: None
4137  */
4138 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
4139 {
4140 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
4141 	qdf_nbuf_t curr_nbuf, next_nbuf;
4142 
4143 	if (pdev->pdev_deinit)
4144 		return;
4145 
4146 	dp_tx_me_exit(pdev);
4147 	dp_rx_fst_detach(pdev->soc, pdev);
4148 	dp_rx_pdev_mon_buffers_free(pdev);
4149 	dp_rx_pdev_buffers_free(pdev);
4150 	dp_rx_pdev_mon_desc_pool_deinit(pdev);
4151 	dp_rx_pdev_desc_pool_deinit(pdev);
4152 	dp_htt_ppdu_stats_detach(pdev);
4153 	dp_tx_ppdu_stats_detach(pdev);
4154 	qdf_event_destroy(&pdev->fw_peer_stats_event);
4155 	dp_cal_client_detach(&pdev->cal_client_ctx);
4156 	if (pdev->sojourn_buf)
4157 		qdf_nbuf_free(pdev->sojourn_buf);
4158 
4159 	dp_pdev_flush_pending_vdevs(pdev);
4160 	dp_tx_pdev_detach(pdev);
4161 	dp_pktlogmod_exit(pdev);
4162 	dp_neighbour_peers_detach(pdev);
4163 
4164 	qdf_spinlock_destroy(&pdev->tx_mutex);
4165 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
4166 
4167 	if (pdev->invalid_peer)
4168 		qdf_mem_free(pdev->invalid_peer);
4169 
4170 	if (pdev->filter)
4171 		dp_mon_filter_dealloc(pdev);
4172 
4173 	dp_pdev_srng_deinit(pdev);
4174 
4175 	dp_ipa_uc_detach(pdev->soc, pdev);
4176 	dp_cleanup_ipa_rx_refill_buf_ring(pdev->soc, pdev);
4177 	dp_rxdma_ring_cleanup(pdev->soc, pdev);
4178 
4179 	curr_nbuf = pdev->invalid_peer_head_msdu;
4180 	while (curr_nbuf) {
4181 		next_nbuf = qdf_nbuf_next(curr_nbuf);
4182 		qdf_nbuf_free(curr_nbuf);
4183 		curr_nbuf = next_nbuf;
4184 	}
4185 	pdev->invalid_peer_head_msdu = NULL;
4186 	pdev->invalid_peer_tail_msdu = NULL;
4187 
4188 	dp_wdi_event_detach(pdev);
4189 	pdev->pdev_deinit = 1;
4190 }
4191 
4192 /**
4193  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
4194  * @psoc: Datapath psoc handle
4195  * @pdev_id: Id of datapath PDEV handle
4196  * @force: Force deinit
4197  *
4198  * Return: QDF_STATUS
4199  */
4200 static QDF_STATUS
4201 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
4202 		     int force)
4203 {
4204 	struct dp_pdev *txrx_pdev;
4205 
4206 	txrx_pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
4207 						       pdev_id);
4208 
4209 	if (!txrx_pdev)
4210 		return QDF_STATUS_E_FAILURE;
4211 
4212 	dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force);
4213 
4214 	return QDF_STATUS_SUCCESS;
4215 }
4216 
4217 /*
4218  * dp_pdev_post_attach() - Do post pdev attach after dev_alloc_name
4219  * @txrx_pdev: Datapath PDEV handle
4220  *
4221  * Return: None
4222  */
4223 static void dp_pdev_post_attach(struct cdp_pdev *txrx_pdev)
4224 {
4225 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
4226 
4227 	dp_tx_capture_debugfs_init(pdev);
4228 }
4229 
4230 /*
4231  * dp_pdev_post_attach_wifi3() - attach txrx pdev post
4232  * @psoc: Datapath soc handle
4233  * @pdev_id: pdev id of pdev
4234  *
4235  * Return: QDF_STATUS
4236  */
4237 static int dp_pdev_post_attach_wifi3(struct cdp_soc_t *soc,
4238 				     uint8_t pdev_id)
4239 {
4240 	struct dp_pdev *pdev;
4241 
4242 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
4243 						  pdev_id);
4244 
4245 	if (!pdev) {
4246 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4247 			  FL("DP PDEV is Null for pdev id %d"), pdev_id);
4248 		return QDF_STATUS_E_FAILURE;
4249 	}
4250 
4251 	dp_pdev_post_attach((struct cdp_pdev *)pdev);
4252 	return QDF_STATUS_SUCCESS;
4253 }
4254 
4255 /*
4256  * dp_pdev_detach() - Complete rest of pdev detach
4257  * @txrx_pdev: Datapath PDEV handle
4258  * @force: Force deinit
4259  *
4260  * Return: None
4261  */
4262 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
4263 {
4264 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
4265 	struct dp_soc *soc = pdev->soc;
4266 
4267 	dp_rx_pdev_mon_desc_pool_free(pdev);
4268 	dp_rx_pdev_desc_pool_free(pdev);
4269 	dp_pdev_srng_free(pdev);
4270 
4271 	soc->pdev_count--;
4272 	soc->pdev_list[pdev->pdev_id] = NULL;
4273 
4274 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
4275 	wlan_minidump_remove(pdev);
4276 	qdf_mem_free(pdev);
4277 }
4278 
4279 /*
4280  * dp_pdev_detach_wifi3() - detach txrx pdev
4281  * @psoc: Datapath soc handle
4282  * @pdev_id: pdev id of pdev
4283  * @force: Force detach
4284  *
4285  * Return: QDF_STATUS
4286  */
4287 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
4288 				       int force)
4289 {
4290 	struct dp_pdev *pdev;
4291 
4292 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
4293 						  pdev_id);
4294 
4295 	if (!pdev) {
4296 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4297 			  FL("DP PDEV is Null for pdev id %d"), pdev_id);
4298 		return QDF_STATUS_E_FAILURE;
4299 	}
4300 
4301 	dp_pdev_detach((struct cdp_pdev *)pdev, force);
4302 	return QDF_STATUS_SUCCESS;
4303 }
4304 
4305 /*
4306  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
4307  * @soc: DP SOC handle
4308  */
4309 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
4310 {
4311 	struct reo_desc_list_node *desc;
4312 	struct dp_rx_tid *rx_tid;
4313 
4314 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
4315 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
4316 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
4317 		rx_tid = &desc->rx_tid;
4318 		qdf_mem_unmap_nbytes_single(soc->osdev,
4319 			rx_tid->hw_qdesc_paddr,
4320 			QDF_DMA_BIDIRECTIONAL,
4321 			rx_tid->hw_qdesc_alloc_size);
4322 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
4323 		qdf_mem_free(desc);
4324 	}
4325 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
4326 	qdf_list_destroy(&soc->reo_desc_freelist);
4327 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
4328 }
4329 
4330 /*
4331  * dp_soc_reset_txrx_ring_map() - reset tx ring map
4332  * @soc: DP SOC handle
4333  *
4334  */
4335 static void dp_soc_reset_txrx_ring_map(struct dp_soc *soc)
4336 {
4337 	uint32_t i;
4338 
4339 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++)
4340 		soc->tx_ring_map[i] = 0;
4341 }
4342 
4343 /*
4344  * dp_soc_print_inactive_objects() - prints inactive peer and vdev list
4345  * @soc: DP SOC handle
4346  *
4347  */
4348 static void dp_soc_print_inactive_objects(struct dp_soc *soc)
4349 {
4350 	struct dp_peer *peer = NULL;
4351 	struct dp_peer *tmp_peer = NULL;
4352 	struct dp_vdev *vdev = NULL;
4353 	struct dp_vdev *tmp_vdev = NULL;
4354 	int i = 0;
4355 	uint32_t count;
4356 
4357 	if (TAILQ_EMPTY(&soc->inactive_peer_list) &&
4358 	    TAILQ_EMPTY(&soc->inactive_vdev_list))
4359 		return;
4360 
4361 	TAILQ_FOREACH_SAFE(peer, &soc->inactive_peer_list,
4362 			   inactive_list_elem, tmp_peer) {
4363 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
4364 			count = qdf_atomic_read(&peer->mod_refs[i]);
4365 			if (count)
4366 				DP_PRINT_STATS("peer %pK Module id %u ==> %u",
4367 					       peer, i, count);
4368 		}
4369 	}
4370 
4371 	TAILQ_FOREACH_SAFE(vdev, &soc->inactive_vdev_list,
4372 			   inactive_list_elem, tmp_vdev) {
4373 		for (i = 0; i < DP_MOD_ID_MAX; i++) {
4374 			count = qdf_atomic_read(&vdev->mod_refs[i]);
4375 			if (count)
4376 				DP_PRINT_STATS("vdev %pK Module id %u ==> %u",
4377 					       vdev, i, count);
4378 		}
4379 	}
4380 	QDF_BUG(0);
4381 }
4382 
4383 /**
4384  * dp_soc_deinit() - Deinitialize txrx SOC
4385  * @txrx_soc: Opaque DP SOC handle
4386  *
4387  * Return: None
4388  */
4389 static void dp_soc_deinit(void *txrx_soc)
4390 {
4391 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4392 	struct htt_soc *htt_soc = soc->htt_handle;
4393 
4394 	qdf_atomic_set(&soc->cmn_init_done, 0);
4395 
4396 	/* free peer tables & AST tables allocated during peer_map_attach */
4397 	if (soc->peer_map_attach_success) {
4398 		dp_peer_find_detach(soc);
4399 		soc->peer_map_attach_success = FALSE;
4400 	}
4401 
4402 	qdf_flush_work(&soc->htt_stats.work);
4403 	qdf_disable_work(&soc->htt_stats.work);
4404 
4405 	qdf_spinlock_destroy(&soc->htt_stats.lock);
4406 
4407 	dp_soc_reset_txrx_ring_map(soc);
4408 
4409 	dp_reo_desc_freelist_destroy(soc);
4410 
4411 	DEINIT_RX_HW_STATS_LOCK(soc);
4412 
4413 	qdf_spinlock_destroy(&soc->ast_lock);
4414 
4415 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
4416 
4417 	dp_soc_wds_detach(soc);
4418 
4419 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
4420 
4421 	qdf_spinlock_destroy(&soc->vdev_map_lock);
4422 
4423 	dp_reo_cmdlist_destroy(soc);
4424 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
4425 
4426 	dp_soc_tx_desc_sw_pools_deinit(soc);
4427 
4428 	dp_soc_srng_deinit(soc);
4429 
4430 	dp_hw_link_desc_ring_deinit(soc);
4431 
4432 	dp_soc_print_inactive_objects(soc);
4433 	qdf_spinlock_destroy(&soc->inactive_peer_list_lock);
4434 	qdf_spinlock_destroy(&soc->inactive_vdev_list_lock);
4435 
4436 	htt_soc_htc_dealloc(soc->htt_handle);
4437 
4438 	htt_soc_detach(htt_soc);
4439 
4440 	/* Free wbm sg list and reset flags in down path */
4441 	dp_rx_wbm_sg_list_deinit(soc);
4442 
4443 	wlan_minidump_remove(soc);
4444 }
4445 
4446 /**
4447  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
4448  * @txrx_soc: Opaque DP SOC handle
4449  *
4450  * Return: None
4451  */
4452 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc)
4453 {
4454 	dp_soc_deinit(txrx_soc);
4455 }
4456 
4457 /*
4458  * dp_soc_detach() - Detach rest of txrx SOC
4459  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
4460  *
4461  * Return: None
4462  */
4463 static void dp_soc_detach(struct cdp_soc_t *txrx_soc)
4464 {
4465 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4466 
4467 	dp_soc_tx_desc_sw_pools_free(soc);
4468 	dp_soc_srng_free(soc);
4469 	dp_hw_link_desc_ring_free(soc);
4470 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
4471 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
4472 	dp_soc_rx_history_detach(soc);
4473 
4474 	qdf_mem_free(soc);
4475 }
4476 
4477 /*
4478  * dp_soc_detach_wifi3() - Detach txrx SOC
4479  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
4480  *
4481  * Return: None
4482  */
4483 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc)
4484 {
4485 	dp_soc_detach(txrx_soc);
4486 }
4487 
4488 #if !defined(DISABLE_MON_CONFIG)
4489 /**
4490  * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings
4491  * @soc: soc handle
4492  * @pdev: physical device handle
4493  * @mac_id: ring number
4494  * @mac_for_pdev: mac_id
4495  *
4496  * Return: non-zero for failure, zero for success
4497  */
4498 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
4499 					struct dp_pdev *pdev,
4500 					int mac_id,
4501 					int mac_for_pdev)
4502 {
4503 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4504 
4505 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
4506 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4507 					soc->rxdma_mon_buf_ring[mac_id]
4508 					.hal_srng,
4509 					RXDMA_MONITOR_BUF);
4510 
4511 		if (status != QDF_STATUS_SUCCESS) {
4512 			dp_err("Failed to send htt srng setup message for Rxdma mon buf ring");
4513 			return status;
4514 		}
4515 
4516 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4517 					soc->rxdma_mon_dst_ring[mac_id]
4518 					.hal_srng,
4519 					RXDMA_MONITOR_DST);
4520 
4521 		if (status != QDF_STATUS_SUCCESS) {
4522 			dp_err("Failed to send htt srng setup message for Rxdma mon dst ring");
4523 			return status;
4524 		}
4525 
4526 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4527 					soc->rxdma_mon_status_ring[mac_id]
4528 					.hal_srng,
4529 					RXDMA_MONITOR_STATUS);
4530 
4531 		if (status != QDF_STATUS_SUCCESS) {
4532 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
4533 			return status;
4534 		}
4535 
4536 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4537 				soc->rxdma_mon_desc_ring[mac_id]
4538 					.hal_srng,
4539 					RXDMA_MONITOR_DESC);
4540 
4541 		if (status != QDF_STATUS_SUCCESS) {
4542 			dp_err("Failed to send htt srng message for Rxdma mon desc ring");
4543 			return status;
4544 		}
4545 	} else {
4546 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4547 					soc->rxdma_mon_status_ring[mac_id]
4548 					.hal_srng,
4549 					RXDMA_MONITOR_STATUS);
4550 
4551 		if (status != QDF_STATUS_SUCCESS) {
4552 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
4553 			return status;
4554 		}
4555 	}
4556 
4557 	return status;
4558 
4559 }
4560 #else
4561 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
4562 					struct dp_pdev *pdev,
4563 					int mac_id,
4564 					int mac_for_pdev)
4565 {
4566 	return QDF_STATUS_SUCCESS;
4567 }
4568 #endif
4569 
4570 /*
4571  * dp_rxdma_ring_config() - configure the RX DMA rings
4572  *
4573  * This function is used to configure the MAC rings.
4574  * On MCL host provides buffers in Host2FW ring
4575  * FW refills (copies) buffers to the ring and updates
4576  * ring_idx in register
4577  *
4578  * @soc: data path SoC handle
4579  *
4580  * Return: zero on success, non-zero on failure
4581  */
4582 #ifdef QCA_HOST2FW_RXBUF_RING
4583 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
4584 {
4585 	int i;
4586 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4587 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4588 		struct dp_pdev *pdev = soc->pdev_list[i];
4589 
4590 		if (pdev) {
4591 			int mac_id;
4592 			bool dbs_enable = 0;
4593 			int max_mac_rings =
4594 				 wlan_cfg_get_num_mac_rings
4595 				(pdev->wlan_cfg_ctx);
4596 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
4597 
4598 			htt_srng_setup(soc->htt_handle, 0,
4599 				       soc->rx_refill_buf_ring[lmac_id]
4600 				       .hal_srng,
4601 				       RXDMA_BUF);
4602 
4603 			if (pdev->rx_refill_buf_ring2.hal_srng)
4604 				htt_srng_setup(soc->htt_handle, 0,
4605 					pdev->rx_refill_buf_ring2.hal_srng,
4606 					RXDMA_BUF);
4607 
4608 			if (soc->cdp_soc.ol_ops->
4609 				is_hw_dbs_2x2_capable) {
4610 				dbs_enable = soc->cdp_soc.ol_ops->
4611 					is_hw_dbs_2x2_capable(
4612 							(void *)soc->ctrl_psoc);
4613 			}
4614 
4615 			if (dbs_enable) {
4616 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4617 				QDF_TRACE_LEVEL_ERROR,
4618 				FL("DBS enabled max_mac_rings %d"),
4619 					 max_mac_rings);
4620 			} else {
4621 				max_mac_rings = 1;
4622 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4623 					 QDF_TRACE_LEVEL_ERROR,
4624 					 FL("DBS disabled, max_mac_rings %d"),
4625 					 max_mac_rings);
4626 			}
4627 
4628 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4629 					 FL("pdev_id %d max_mac_rings %d"),
4630 					 pdev->pdev_id, max_mac_rings);
4631 
4632 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
4633 				int mac_for_pdev =
4634 					dp_get_mac_id_for_pdev(mac_id,
4635 							       pdev->pdev_id);
4636 				/*
4637 				 * Obtain lmac id from pdev to access the LMAC
4638 				 * ring in soc context
4639 				 */
4640 				lmac_id =
4641 				dp_get_lmac_id_for_pdev_id(soc,
4642 							   mac_id,
4643 							   pdev->pdev_id);
4644 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4645 					 QDF_TRACE_LEVEL_ERROR,
4646 					 FL("mac_id %d"), mac_for_pdev);
4647 
4648 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
4649 					 pdev->rx_mac_buf_ring[mac_id]
4650 						.hal_srng,
4651 					 RXDMA_BUF);
4652 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
4653 				soc->rxdma_err_dst_ring[lmac_id]
4654 					.hal_srng,
4655 					RXDMA_DST);
4656 
4657 				/* Configure monitor mode rings */
4658 				status = dp_mon_htt_srng_setup(soc, pdev,
4659 							       lmac_id,
4660 							       mac_for_pdev);
4661 				if (status != QDF_STATUS_SUCCESS) {
4662 					dp_err("Failed to send htt monitor messages to target");
4663 					return status;
4664 				}
4665 
4666 			}
4667 		}
4668 	}
4669 
4670 	/*
4671 	 * Timer to reap rxdma status rings.
4672 	 * Needed until we enable ppdu end interrupts
4673 	 */
4674 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
4675 		       dp_mon_reap_timer_handler, (void *)soc,
4676 		       QDF_TIMER_TYPE_WAKE_APPS);
4677 	soc->reap_timer_init = 1;
4678 	return status;
4679 }
4680 #else
4681 /* This is only for WIN */
4682 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
4683 {
4684 	int i;
4685 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4686 	int mac_for_pdev;
4687 	int lmac_id;
4688 
4689 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4690 		struct dp_pdev *pdev =  soc->pdev_list[i];
4691 
4692 		if (!pdev)
4693 			continue;
4694 
4695 		mac_for_pdev = i;
4696 		lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
4697 
4698 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4699 			       soc->rx_refill_buf_ring[lmac_id].
4700 			       hal_srng, RXDMA_BUF);
4701 #ifndef DISABLE_MON_CONFIG
4702 
4703 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4704 			       soc->rxdma_mon_buf_ring[lmac_id].hal_srng,
4705 			       RXDMA_MONITOR_BUF);
4706 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4707 			       soc->rxdma_mon_dst_ring[lmac_id].hal_srng,
4708 			       RXDMA_MONITOR_DST);
4709 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4710 			       soc->rxdma_mon_status_ring[lmac_id].hal_srng,
4711 			       RXDMA_MONITOR_STATUS);
4712 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4713 			       soc->rxdma_mon_desc_ring[lmac_id].hal_srng,
4714 			       RXDMA_MONITOR_DESC);
4715 #endif
4716 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4717 			       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
4718 			       RXDMA_DST);
4719 	}
4720 
4721 	/* Configure LMAC rings in Polled mode */
4722 	if (soc->lmac_polled_mode) {
4723 		/*
4724 		 * Timer to reap lmac rings.
4725 		 */
4726 		qdf_timer_init(soc->osdev, &soc->lmac_reap_timer,
4727 			       dp_service_lmac_rings, (void *)soc,
4728 			       QDF_TIMER_TYPE_WAKE_APPS);
4729 		soc->lmac_timer_init = 1;
4730 		qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
4731 	}
4732 	return status;
4733 }
4734 #endif
4735 
4736 #ifdef NO_RX_PKT_HDR_TLV
4737 static QDF_STATUS
4738 dp_rxdma_ring_sel_cfg(struct dp_soc *soc)
4739 {
4740 	int i;
4741 	int mac_id;
4742 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
4743 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4744 
4745 	htt_tlv_filter.mpdu_start = 1;
4746 	htt_tlv_filter.msdu_start = 1;
4747 	htt_tlv_filter.mpdu_end = 1;
4748 	htt_tlv_filter.msdu_end = 1;
4749 	htt_tlv_filter.attention = 1;
4750 	htt_tlv_filter.packet = 1;
4751 	htt_tlv_filter.packet_header = 0;
4752 
4753 	htt_tlv_filter.ppdu_start = 0;
4754 	htt_tlv_filter.ppdu_end = 0;
4755 	htt_tlv_filter.ppdu_end_user_stats = 0;
4756 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4757 	htt_tlv_filter.ppdu_end_status_done = 0;
4758 	htt_tlv_filter.enable_fp = 1;
4759 	htt_tlv_filter.enable_md = 0;
4760 	htt_tlv_filter.enable_md = 0;
4761 	htt_tlv_filter.enable_mo = 0;
4762 
4763 	htt_tlv_filter.fp_mgmt_filter = 0;
4764 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
4765 	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
4766 					 FILTER_DATA_MCAST |
4767 					 FILTER_DATA_DATA);
4768 	htt_tlv_filter.mo_mgmt_filter = 0;
4769 	htt_tlv_filter.mo_ctrl_filter = 0;
4770 	htt_tlv_filter.mo_data_filter = 0;
4771 	htt_tlv_filter.md_data_filter = 0;
4772 
4773 	htt_tlv_filter.offset_valid = true;
4774 
4775 	htt_tlv_filter.rx_packet_offset = RX_PKT_TLVS_LEN;
4776 	/*Not subscribing rx_pkt_header*/
4777 	htt_tlv_filter.rx_header_offset = 0;
4778 	htt_tlv_filter.rx_mpdu_start_offset =
4779 				hal_rx_mpdu_start_offset_get(soc->hal_soc);
4780 	htt_tlv_filter.rx_mpdu_end_offset =
4781 				hal_rx_mpdu_end_offset_get(soc->hal_soc);
4782 	htt_tlv_filter.rx_msdu_start_offset =
4783 				hal_rx_msdu_start_offset_get(soc->hal_soc);
4784 	htt_tlv_filter.rx_msdu_end_offset =
4785 				hal_rx_msdu_end_offset_get(soc->hal_soc);
4786 	htt_tlv_filter.rx_attn_offset =
4787 				hal_rx_attn_offset_get(soc->hal_soc);
4788 
4789 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4790 		struct dp_pdev *pdev = soc->pdev_list[i];
4791 
4792 		if (!pdev)
4793 			continue;
4794 
4795 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4796 			int mac_for_pdev =
4797 				dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
4798 			/*
4799 			 * Obtain lmac id from pdev to access the LMAC ring
4800 			 * in soc context
4801 			 */
4802 			int lmac_id =
4803 				dp_get_lmac_id_for_pdev_id(soc, mac_id,
4804 							   pdev->pdev_id);
4805 
4806 			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4807 					    soc->rx_refill_buf_ring[lmac_id].
4808 					    hal_srng,
4809 					    RXDMA_BUF, RX_DATA_BUFFER_SIZE,
4810 					    &htt_tlv_filter);
4811 		}
4812 	}
4813 	return status;
4814 }
4815 #else
4816 static QDF_STATUS
4817 dp_rxdma_ring_sel_cfg(struct dp_soc *soc)
4818 {
4819 	return QDF_STATUS_SUCCESS;
4820 }
4821 #endif
4822 
4823 /*
4824  * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine
4825  *
4826  * This function is used to configure the FSE HW block in RX OLE on a
4827  * per pdev basis. Here, we will be programming parameters related to
4828  * the Flow Search Table.
4829  *
4830  * @soc: data path SoC handle
4831  *
4832  * Return: zero on success, non-zero on failure
4833  */
4834 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
4835 static QDF_STATUS
4836 dp_rx_target_fst_config(struct dp_soc *soc)
4837 {
4838 	int i;
4839 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4840 
4841 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4842 		struct dp_pdev *pdev = soc->pdev_list[i];
4843 
4844 		/* Flow search is not enabled if NSS offload is enabled */
4845 		if (pdev &&
4846 		    !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
4847 			status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev);
4848 			if (status != QDF_STATUS_SUCCESS)
4849 				break;
4850 		}
4851 	}
4852 	return status;
4853 }
4854 #elif defined(WLAN_SUPPORT_RX_FISA)
4855 /**
4856  * dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW
4857  * @soc: SoC handle
4858  *
4859  * Return: Success
4860  */
4861 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
4862 {
4863 	/* Check if it is enabled in the INI */
4864 	if (!soc->fisa_enable) {
4865 		dp_err("RX FISA feature is disabled");
4866 		return QDF_STATUS_E_NOSUPPORT;
4867 	}
4868 
4869 	return dp_rx_flow_send_fst_fw_setup(soc, soc->pdev_list[0]);
4870 }
4871 
4872 #define FISA_MAX_TIMEOUT 0xffffffff
4873 #define FISA_DISABLE_TIMEOUT 0
4874 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
4875 {
4876 	struct dp_htt_rx_fisa_cfg fisa_config;
4877 
4878 	fisa_config.pdev_id = 0;
4879 	fisa_config.fisa_timeout = FISA_MAX_TIMEOUT;
4880 
4881 	return dp_htt_rx_fisa_config(soc->pdev_list[0], &fisa_config);
4882 }
4883 #else /* !WLAN_SUPPORT_RX_FISA */
4884 static inline QDF_STATUS dp_rx_target_fst_config(struct dp_soc *soc)
4885 {
4886 	return QDF_STATUS_SUCCESS;
4887 }
4888 #endif /* !WLAN_SUPPORT_RX_FISA */
4889 
4890 #ifndef WLAN_SUPPORT_RX_FISA
4891 static QDF_STATUS dp_rx_fisa_config(struct dp_soc *soc)
4892 {
4893 	return QDF_STATUS_SUCCESS;
4894 }
4895 
4896 static QDF_STATUS dp_rx_dump_fisa_stats(struct dp_soc *soc)
4897 {
4898 	return QDF_STATUS_SUCCESS;
4899 }
4900 
4901 static void dp_rx_dump_fisa_table(struct dp_soc *soc)
4902 {
4903 }
4904 #endif /* !WLAN_SUPPORT_RX_FISA */
4905 
4906 /*
4907  * dp_soc_attach_target_wifi3() - SOC initialization in the target
4908  * @cdp_soc: Opaque Datapath SOC handle
4909  *
4910  * Return: zero on success, non-zero on failure
4911  */
4912 static QDF_STATUS
4913 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
4914 {
4915 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
4916 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4917 
4918 	htt_soc_attach_target(soc->htt_handle);
4919 
4920 	status = dp_rxdma_ring_config(soc);
4921 	if (status != QDF_STATUS_SUCCESS) {
4922 		dp_err("Failed to send htt srng setup messages to target");
4923 		return status;
4924 	}
4925 
4926 	status = dp_rxdma_ring_sel_cfg(soc);
4927 	if (status != QDF_STATUS_SUCCESS) {
4928 		dp_err("Failed to send htt ring config message to target");
4929 		return status;
4930 	}
4931 
4932 	status = dp_rx_target_fst_config(soc);
4933 	if (status != QDF_STATUS_SUCCESS &&
4934 	    status != QDF_STATUS_E_NOSUPPORT) {
4935 		dp_err("Failed to send htt fst setup config message to target");
4936 		return status;
4937 	}
4938 
4939 	if (status == QDF_STATUS_SUCCESS) {
4940 		status = dp_rx_fisa_config(soc);
4941 		if (status != QDF_STATUS_SUCCESS) {
4942 			dp_err("Failed to send htt FISA config message to target");
4943 			return status;
4944 		}
4945 	}
4946 
4947 	DP_STATS_INIT(soc);
4948 
4949 	/* initialize work queue for stats processing */
4950 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
4951 
4952 	return QDF_STATUS_SUCCESS;
4953 }
4954 
4955 #ifdef QCA_SUPPORT_FULL_MON
4956 static inline QDF_STATUS
4957 dp_soc_config_full_mon_mode(struct dp_pdev *pdev, enum dp_full_mon_config val)
4958 {
4959 	struct dp_soc *soc = pdev->soc;
4960 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4961 
4962 	if (!soc->full_mon_mode)
4963 		return QDF_STATUS_SUCCESS;
4964 
4965 	if ((htt_h2t_full_mon_cfg(soc->htt_handle,
4966 				  pdev->pdev_id,
4967 				  val)) != QDF_STATUS_SUCCESS) {
4968 		status = QDF_STATUS_E_FAILURE;
4969 	}
4970 
4971 	return status;
4972 }
4973 #else
4974 static inline QDF_STATUS
4975 dp_soc_config_full_mon_mode(struct dp_pdev *pdev, enum dp_full_mon_config val)
4976 {
4977 	return 0;
4978 }
4979 #endif
4980 
4981 /*
4982  * dp_vdev_id_map_tbl_add() - Add vdev into vdev_id table
4983  * @soc: SoC handle
4984  * @vdev: vdev handle
4985  * @vdev_id: vdev_id
4986  *
4987  * Return: None
4988  */
4989 static void dp_vdev_id_map_tbl_add(struct dp_soc *soc,
4990 				   struct dp_vdev *vdev,
4991 				   uint8_t vdev_id)
4992 {
4993 	QDF_ASSERT(vdev_id <= MAX_VDEV_CNT);
4994 
4995 	qdf_spin_lock_bh(&soc->vdev_map_lock);
4996 
4997 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
4998 			QDF_STATUS_SUCCESS) {
4999 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5000 			  "unable to get vdev reference at MAP vdev %pK vdev_id %u",
5001 			  vdev, vdev_id);
5002 		qdf_spin_unlock_bh(&soc->vdev_map_lock);
5003 		return;
5004 	}
5005 
5006 	if (!soc->vdev_id_map[vdev_id])
5007 		soc->vdev_id_map[vdev_id] = vdev;
5008 	else
5009 		QDF_ASSERT(0);
5010 
5011 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
5012 }
5013 
5014 /*
5015  * dp_vdev_id_map_tbl_remove() - remove vdev from vdev_id table
5016  * @soc: SoC handle
5017  * @vdev: vdev handle
5018  *
5019  * Return: None
5020  */
5021 static void dp_vdev_id_map_tbl_remove(struct dp_soc *soc,
5022 				      struct dp_vdev *vdev)
5023 {
5024 	qdf_spin_lock_bh(&soc->vdev_map_lock);
5025 	QDF_ASSERT(soc->vdev_id_map[vdev->vdev_id] == vdev);
5026 
5027 	soc->vdev_id_map[vdev->vdev_id] = NULL;
5028 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
5029 	qdf_spin_unlock_bh(&soc->vdev_map_lock);
5030 }
5031 
5032 /*
5033  * dp_vdev_pdev_list_add() - add vdev into pdev's list
5034  * @soc: soc handle
5035  * @pdev: pdev handle
5036  * @vdev: vdev handle
5037  *
5038  * return: none
5039  */
5040 static void dp_vdev_pdev_list_add(struct dp_soc *soc,
5041 				  struct dp_pdev *pdev,
5042 				  struct dp_vdev *vdev)
5043 {
5044 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5045 	if (dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CONFIG) !=
5046 			QDF_STATUS_SUCCESS) {
5047 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5048 			  "unable to get vdev reference at MAP vdev %pK",
5049 			  vdev);
5050 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5051 		return;
5052 	}
5053 	/* add this vdev into the pdev's list */
5054 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
5055 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5056 }
5057 
5058 /*
5059  * dp_vdev_pdev_list_remove() - remove vdev from pdev's list
5060  * @soc: SoC handle
5061  * @pdev: pdev handle
5062  * @vdev: VDEV handle
5063  *
5064  * Return: none
5065  */
5066 static void dp_vdev_pdev_list_remove(struct dp_soc *soc,
5067 				     struct dp_pdev *pdev,
5068 				     struct dp_vdev *vdev)
5069 {
5070 	uint8_t found = 0;
5071 	struct dp_vdev *tmpvdev = NULL;
5072 
5073 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5074 	TAILQ_FOREACH(tmpvdev, &pdev->vdev_list, vdev_list_elem) {
5075 		if (tmpvdev == vdev) {
5076 			found = 1;
5077 			break;
5078 		}
5079 	}
5080 
5081 	if (found) {
5082 		TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
5083 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
5084 	} else {
5085 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
5086 			  "vdev:%pK not found in pdev:%pK vdevlist:%pK",
5087 			  vdev, pdev, &pdev->vdev_list);
5088 		QDF_ASSERT(0);
5089 	}
5090 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5091 }
5092 
5093 /*
5094 * dp_vdev_attach_wifi3() - attach txrx vdev
5095 * @txrx_pdev: Datapath PDEV handle
5096 * @vdev_mac_addr: MAC address of the virtual interface
5097 * @vdev_id: VDEV Id
5098 * @wlan_op_mode: VDEV operating mode
5099 * @subtype: VDEV operating subtype
5100 *
5101 * Return: status
5102 */
5103 static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc,
5104 				       uint8_t pdev_id,
5105 				       uint8_t *vdev_mac_addr,
5106 				       uint8_t vdev_id,
5107 				       enum wlan_op_mode op_mode,
5108 				       enum wlan_op_subtype subtype)
5109 {
5110 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
5111 	struct dp_pdev *pdev =
5112 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
5113 						   pdev_id);
5114 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
5115 	int i = 0;
5116 
5117 	if (!pdev) {
5118 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5119 			  FL("DP PDEV is Null for pdev id %d"), pdev_id);
5120 		qdf_mem_free(vdev);
5121 		goto fail0;
5122 	}
5123 
5124 	if (!vdev) {
5125 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5126 			FL("DP VDEV memory allocation failed"));
5127 		goto fail0;
5128 	}
5129 
5130 	wlan_minidump_log(vdev, sizeof(*vdev), soc->ctrl_psoc,
5131 			  WLAN_MD_DP_VDEV, "dp_vdev");
5132 
5133 	vdev->pdev = pdev;
5134 	vdev->vdev_id = vdev_id;
5135 	vdev->opmode = op_mode;
5136 	vdev->subtype = subtype;
5137 	vdev->osdev = soc->osdev;
5138 
5139 	vdev->osif_rx = NULL;
5140 	vdev->osif_rsim_rx_decap = NULL;
5141 	vdev->osif_get_key = NULL;
5142 	vdev->osif_rx_mon = NULL;
5143 	vdev->osif_tx_free_ext = NULL;
5144 	vdev->osif_vdev = NULL;
5145 
5146 	vdev->delete.pending = 0;
5147 	vdev->safemode = 0;
5148 	vdev->drop_unenc = 1;
5149 	vdev->sec_type = cdp_sec_type_none;
5150 	vdev->multipass_en = false;
5151 	qdf_atomic_init(&vdev->ref_cnt);
5152 	for (i = 0; i < DP_MOD_ID_MAX; i++)
5153 		qdf_atomic_init(&vdev->mod_refs[i]);
5154 
5155 	/* Take one reference for create*/
5156 	qdf_atomic_inc(&vdev->ref_cnt);
5157 	qdf_atomic_inc(&vdev->mod_refs[DP_MOD_ID_CONFIG]);
5158 	vdev->num_peers = 0;
5159 #ifdef notyet
5160 	vdev->filters_num = 0;
5161 #endif
5162 	vdev->lmac_id = pdev->lmac_id;
5163 
5164 	qdf_mem_copy(
5165 		&vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
5166 
5167 	/* TODO: Initialize default HTT meta data that will be used in
5168 	 * TCL descriptors for packets transmitted from this VDEV
5169 	 */
5170 
5171 	qdf_spinlock_create(&vdev->peer_list_lock);
5172 	TAILQ_INIT(&vdev->peer_list);
5173 	dp_peer_multipass_list_init(vdev);
5174 
5175 	if ((soc->intr_mode == DP_INTR_POLL) &&
5176 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
5177 		if ((pdev->vdev_count == 0) ||
5178 		    (wlan_op_mode_monitor == vdev->opmode))
5179 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
5180 	}
5181 
5182 	dp_vdev_id_map_tbl_add(soc, vdev, vdev_id);
5183 
5184 	if (wlan_op_mode_monitor == vdev->opmode) {
5185 		pdev->monitor_vdev = vdev;
5186 		return QDF_STATUS_SUCCESS;
5187 	}
5188 
5189 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
5190 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
5191 	vdev->dscp_tid_map_id = 0;
5192 	vdev->mcast_enhancement_en = 0;
5193 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
5194 	vdev->prev_tx_enq_tstamp = 0;
5195 	vdev->prev_rx_deliver_tstamp = 0;
5196 
5197 	dp_vdev_pdev_list_add(soc, pdev, vdev);
5198 	pdev->vdev_count++;
5199 
5200 	if (wlan_op_mode_sta != vdev->opmode)
5201 		vdev->ap_bridge_enabled = true;
5202 	else
5203 		vdev->ap_bridge_enabled = false;
5204 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5205 		  "%s: wlan_cfg_ap_bridge_enabled %d",
5206 		  __func__, vdev->ap_bridge_enabled);
5207 
5208 	dp_tx_vdev_attach(vdev);
5209 
5210 	if (pdev->vdev_count == 1)
5211 		dp_lro_hash_setup(soc, pdev);
5212 
5213 	dp_info("Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
5214 	DP_STATS_INIT(vdev);
5215 
5216 	if (wlan_op_mode_sta == vdev->opmode)
5217 		dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id,
5218 				     vdev->mac_addr.raw);
5219 	return QDF_STATUS_SUCCESS;
5220 
5221 fail0:
5222 	return QDF_STATUS_E_FAILURE;
5223 }
5224 
5225 /**
5226  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
5227  * @soc: Datapath soc handle
5228  * @vdev_id: id of Datapath VDEV handle
5229  * @osif_vdev: OSIF vdev handle
5230  * @txrx_ops: Tx and Rx operations
5231  *
5232  * Return: DP VDEV handle on success, NULL on failure
5233  */
5234 static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc_hdl,
5235 					 uint8_t vdev_id,
5236 					 ol_osif_vdev_handle osif_vdev,
5237 					 struct ol_txrx_ops *txrx_ops)
5238 {
5239 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
5240 	struct dp_vdev *vdev =	dp_vdev_get_ref_by_id(soc, vdev_id,
5241 						      DP_MOD_ID_CDP);
5242 
5243 	if (!vdev)
5244 		return QDF_STATUS_E_FAILURE;
5245 
5246 	vdev->osif_vdev = osif_vdev;
5247 	vdev->osif_rx = txrx_ops->rx.rx;
5248 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
5249 	vdev->osif_rx_flush = txrx_ops->rx.rx_flush;
5250 	vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush;
5251 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
5252 	vdev->osif_fisa_rx = txrx_ops->rx.osif_fisa_rx;
5253 	vdev->osif_fisa_flush = txrx_ops->rx.osif_fisa_flush;
5254 	vdev->osif_get_key = txrx_ops->get_key;
5255 	vdev->osif_rx_mon = txrx_ops->rx.mon;
5256 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
5257 	vdev->tx_comp = txrx_ops->tx.tx_comp;
5258 	vdev->stats_cb = txrx_ops->rx.stats_rx;
5259 #ifdef notyet
5260 #if ATH_SUPPORT_WAPI
5261 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
5262 #endif
5263 #endif
5264 #ifdef UMAC_SUPPORT_PROXY_ARP
5265 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
5266 #endif
5267 	vdev->me_convert = txrx_ops->me_convert;
5268 
5269 	/* TODO: Enable the following once Tx code is integrated */
5270 	if (vdev->mesh_vdev)
5271 		txrx_ops->tx.tx = dp_tx_send_mesh;
5272 	else
5273 		txrx_ops->tx.tx = dp_tx_send;
5274 
5275 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
5276 
5277 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
5278 		"DP Vdev Register success");
5279 
5280 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5281 	return QDF_STATUS_SUCCESS;
5282 }
5283 
5284 /**
5285  * dp_peer_delete() - delete DP peer
5286  *
5287  * @soc: Datatpath soc
5288  * @peer: Datapath peer
5289  * @arg: argument to iter function
5290  *
5291  * Return: void
5292  */
5293 static void
5294 dp_peer_delete(struct dp_soc *soc,
5295 	       struct dp_peer *peer,
5296 	       void *arg)
5297 {
5298 	if (!peer->valid)
5299 		return;
5300 
5301 	dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
5302 			     peer->vdev->vdev_id,
5303 			     peer->mac_addr.raw, 0);
5304 }
5305 
5306 /**
5307  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
5308  * @vdev: Datapath VDEV handle
5309  * @unmap_only: Flag to indicate "only unmap"
5310  *
5311  * Return: void
5312  */
5313 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
5314 {
5315 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5316 	struct dp_pdev *pdev = vdev->pdev;
5317 	struct dp_soc *soc = pdev->soc;
5318 	struct dp_peer *peer;
5319 	uint32_t i = 0;
5320 
5321 
5322 	if (!unmap_only)
5323 		dp_vdev_iterate_peer(vdev, dp_peer_delete, NULL,
5324 				     DP_MOD_ID_CDP);
5325 
5326 	for (i = 0; i < soc->max_peers ; i++) {
5327 		peer = __dp_peer_get_ref_by_id(soc, i, DP_MOD_ID_CDP);
5328 
5329 		if (!peer)
5330 			continue;
5331 
5332 		if (peer->vdev != vdev) {
5333 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5334 			continue;
5335 		}
5336 
5337 		dp_info("peer: %pM is getting unmap",
5338 			peer->mac_addr.raw);
5339 
5340 		dp_rx_peer_unmap_handler(soc, i,
5341 					 vdev->vdev_id,
5342 					 peer->mac_addr.raw, 0,
5343 					 DP_PEER_WDS_COUNT_INVALID);
5344 		SET_PEER_REF_CNT_ONE(peer);
5345 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5346 	}
5347 
5348 }
5349 
5350 /*
5351  * dp_vdev_detach_wifi3() - Detach txrx vdev
5352  * @cdp_soc: Datapath soc handle
5353  * @vdev_id: VDEV Id
5354  * @callback: Callback OL_IF on completion of detach
5355  * @cb_context:	Callback context
5356  *
5357  */
5358 static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc,
5359 				       uint8_t vdev_id,
5360 				       ol_txrx_vdev_delete_cb callback,
5361 				       void *cb_context)
5362 {
5363 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
5364 	struct dp_pdev *pdev;
5365 	struct dp_neighbour_peer *peer = NULL;
5366 	struct dp_neighbour_peer *temp_peer = NULL;
5367 	struct dp_peer *vap_self_peer = NULL;
5368 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
5369 						     DP_MOD_ID_CDP);
5370 
5371 	if (!vdev)
5372 		return QDF_STATUS_E_FAILURE;
5373 
5374 	pdev = vdev->pdev;
5375 
5376 	vap_self_peer = dp_sta_vdev_self_peer_ref_n_get(soc, vdev,
5377 							DP_MOD_ID_CONFIG);
5378 	if (vap_self_peer) {
5379 		qdf_spin_lock_bh(&soc->ast_lock);
5380 		if (vap_self_peer->self_ast_entry) {
5381 			dp_peer_del_ast(soc, vap_self_peer->self_ast_entry);
5382 			vap_self_peer->self_ast_entry = NULL;
5383 		}
5384 		qdf_spin_unlock_bh(&soc->ast_lock);
5385 
5386 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc, vdev->vdev_id,
5387 				     vap_self_peer->mac_addr.raw, 0);
5388 		dp_peer_unref_delete(vap_self_peer, DP_MOD_ID_CONFIG);
5389 	}
5390 
5391 	/*
5392 	 * If Target is hung, flush all peers before detaching vdev
5393 	 * this will free all references held due to missing
5394 	 * unmap commands from Target
5395 	 */
5396 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
5397 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, false);
5398 	else if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
5399 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, true);
5400 
5401 	dp_rx_vdev_detach(vdev);
5402 	/*
5403 	 * move it after dp_rx_vdev_detach(),
5404 	 * as the call back done in dp_rx_vdev_detach()
5405 	 * still need to get vdev pointer by vdev_id.
5406 	 */
5407 	dp_vdev_id_map_tbl_remove(soc, vdev);
5408 
5409 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5410 	if (!soc->hw_nac_monitor_support) {
5411 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
5412 			      neighbour_peer_list_elem) {
5413 			QDF_ASSERT(peer->vdev != vdev);
5414 		}
5415 	} else {
5416 		TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
5417 				   neighbour_peer_list_elem, temp_peer) {
5418 			if (peer->vdev == vdev) {
5419 				TAILQ_REMOVE(&pdev->neighbour_peers_list, peer,
5420 					     neighbour_peer_list_elem);
5421 				qdf_mem_free(peer);
5422 			}
5423 		}
5424 	}
5425 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5426 
5427 	if (vdev->vdev_dp_ext_handle) {
5428 		qdf_mem_free(vdev->vdev_dp_ext_handle);
5429 		vdev->vdev_dp_ext_handle = NULL;
5430 	}
5431 	/* indicate that the vdev needs to be deleted */
5432 	vdev->delete.pending = 1;
5433 	vdev->delete.callback = callback;
5434 	vdev->delete.context = cb_context;
5435 
5436 	if (vdev->opmode != wlan_op_mode_monitor)
5437 		dp_vdev_pdev_list_remove(soc, pdev, vdev);
5438 
5439 	/* release reference taken above for find */
5440 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5441 
5442 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
5443 	TAILQ_INSERT_TAIL(&soc->inactive_vdev_list, vdev, inactive_list_elem);
5444 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
5445 
5446 	/* release reference taken at dp_vdev_create */
5447 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CONFIG);
5448 
5449 	return QDF_STATUS_SUCCESS;
5450 }
5451 
5452 #ifdef FEATURE_AST
5453 /*
5454  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
5455  * @soc - datapath soc handle
5456  * @peer - datapath peer handle
5457  *
5458  * Delete the AST entries belonging to a peer
5459  */
5460 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
5461 					      struct dp_peer *peer)
5462 {
5463 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
5464 
5465 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
5466 		dp_peer_del_ast(soc, ast_entry);
5467 
5468 	peer->self_ast_entry = NULL;
5469 }
5470 #else
5471 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
5472 					      struct dp_peer *peer)
5473 {
5474 }
5475 #endif
5476 #if ATH_SUPPORT_WRAP
5477 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
5478 						uint8_t *peer_mac_addr)
5479 {
5480 	struct dp_peer *peer;
5481 
5482 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
5483 				      0, vdev->vdev_id,
5484 				      DP_MOD_ID_CONFIG);
5485 	if (!peer)
5486 		return NULL;
5487 
5488 	if (peer->bss_peer)
5489 		return peer;
5490 
5491 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
5492 	return NULL;
5493 }
5494 #else
5495 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
5496 						uint8_t *peer_mac_addr)
5497 {
5498 	struct dp_peer *peer;
5499 
5500 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
5501 				      0, vdev->vdev_id,
5502 				      DP_MOD_ID_CONFIG);
5503 	if (!peer)
5504 		return NULL;
5505 
5506 	if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
5507 		return peer;
5508 
5509 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
5510 	return NULL;
5511 }
5512 #endif
5513 
5514 #ifdef FEATURE_AST
5515 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
5516 					       struct dp_pdev *pdev,
5517 					       uint8_t *peer_mac_addr)
5518 {
5519 	struct dp_ast_entry *ast_entry;
5520 
5521 	qdf_spin_lock_bh(&soc->ast_lock);
5522 	if (soc->ast_override_support)
5523 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
5524 							    pdev->pdev_id);
5525 	else
5526 		ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
5527 
5528 	if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress)
5529 		dp_peer_del_ast(soc, ast_entry);
5530 
5531 	qdf_spin_unlock_bh(&soc->ast_lock);
5532 }
5533 #endif
5534 
5535 #ifdef PEER_CACHE_RX_PKTS
5536 static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
5537 {
5538 	qdf_spinlock_create(&peer->bufq_info.bufq_lock);
5539 	peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
5540 	qdf_list_create(&peer->bufq_info.cached_bufq, DP_RX_CACHED_BUFQ_THRESH);
5541 }
5542 #else
5543 static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
5544 {
5545 }
5546 #endif
5547 
5548 /*
5549  * dp_peer_create_wifi3() - attach txrx peer
5550  * @soc_hdl: Datapath soc handle
5551  * @vdev_id: id of vdev
5552  * @peer_mac_addr: Peer MAC address
5553  *
5554  * Return: 0 on success, -1 on failure
5555  */
5556 static QDF_STATUS
5557 dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5558 		     uint8_t *peer_mac_addr)
5559 {
5560 	struct dp_peer *peer;
5561 	int i;
5562 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5563 	struct dp_pdev *pdev;
5564 	struct cdp_peer_cookie peer_cookie;
5565 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
5566 	struct dp_vdev *vdev = NULL;
5567 
5568 	if (!peer_mac_addr)
5569 		return QDF_STATUS_E_FAILURE;
5570 
5571 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
5572 
5573 	if (!vdev)
5574 		return QDF_STATUS_E_FAILURE;
5575 
5576 	pdev = vdev->pdev;
5577 	soc = pdev->soc;
5578 
5579 	/*
5580 	 * If a peer entry with given MAC address already exists,
5581 	 * reuse the peer and reset the state of peer.
5582 	 */
5583 	peer = dp_peer_can_reuse(vdev, peer_mac_addr);
5584 
5585 	if (peer) {
5586 		qdf_atomic_init(&peer->is_default_route_set);
5587 		dp_peer_cleanup(vdev, peer);
5588 
5589 		qdf_spin_lock_bh(&soc->ast_lock);
5590 		dp_peer_delete_ast_entries(soc, peer);
5591 		qdf_spin_unlock_bh(&soc->ast_lock);
5592 
5593 		if ((vdev->opmode == wlan_op_mode_sta) &&
5594 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
5595 		     QDF_MAC_ADDR_SIZE)) {
5596 			ast_type = CDP_TXRX_AST_TYPE_SELF;
5597 		}
5598 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
5599 
5600 		peer->valid = 1;
5601 		dp_local_peer_id_alloc(pdev, peer);
5602 
5603 		qdf_spinlock_create(&peer->peer_info_lock);
5604 		dp_peer_rx_bufq_resources_init(peer);
5605 
5606 		DP_STATS_INIT(peer);
5607 		DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI);
5608 
5609 		/*
5610 		 * In tx_monitor mode, filter may be set for unassociated peer
5611 		 * when unassociated peer get associated peer need to
5612 		 * update tx_cap_enabled flag to support peer filter.
5613 		 */
5614 		dp_peer_tx_capture_filter_check(pdev, peer);
5615 
5616 		dp_set_peer_isolation(peer, false);
5617 
5618 		for (i = 0; i < DP_MAX_TIDS; i++)
5619 			qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
5620 
5621 		dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
5622 
5623 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5624 		return QDF_STATUS_SUCCESS;
5625 	} else {
5626 		/*
5627 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
5628 		 * need to remove the AST entry which was earlier added as a WDS
5629 		 * entry.
5630 		 * If an AST entry exists, but no peer entry exists with a given
5631 		 * MAC addresses, we could deduce it as a WDS entry
5632 		 */
5633 		dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
5634 	}
5635 
5636 #ifdef notyet
5637 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
5638 		soc->mempool_ol_ath_peer);
5639 #else
5640 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
5641 #endif
5642 	wlan_minidump_log(peer,
5643 			  sizeof(*peer),
5644 			  soc->ctrl_psoc,
5645 			  WLAN_MD_DP_PEER, "dp_peer");
5646 	if (!peer) {
5647 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5648 		return QDF_STATUS_E_FAILURE; /* failure */
5649 	}
5650 
5651 	qdf_mem_zero(peer, sizeof(struct dp_peer));
5652 
5653 	TAILQ_INIT(&peer->ast_entry_list);
5654 
5655 	/* store provided params */
5656 	peer->vdev = vdev;
5657 	/* get the vdev reference for new peer */
5658 	dp_vdev_get_ref(soc, vdev, DP_MOD_ID_CHILD);
5659 
5660 	if ((vdev->opmode == wlan_op_mode_sta) &&
5661 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
5662 			 QDF_MAC_ADDR_SIZE)) {
5663 		ast_type = CDP_TXRX_AST_TYPE_SELF;
5664 	}
5665 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
5666 	qdf_spinlock_create(&peer->peer_info_lock);
5667 
5668 	dp_peer_rx_bufq_resources_init(peer);
5669 
5670 	qdf_mem_copy(
5671 		&peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
5672 
5673 	/* initialize the peer_id */
5674 	peer->peer_id = HTT_INVALID_PEER;
5675 
5676 	/* reset the ast index to flowid table */
5677 	dp_peer_reset_flowq_map(peer);
5678 
5679 	qdf_atomic_init(&peer->ref_cnt);
5680 
5681 	for (i = 0; i < DP_MOD_ID_MAX; i++)
5682 		qdf_atomic_init(&peer->mod_refs[i]);
5683 
5684 	/* keep one reference for attach */
5685 	qdf_atomic_inc(&peer->ref_cnt);
5686 	qdf_atomic_inc(&peer->mod_refs[DP_MOD_ID_CONFIG]);
5687 
5688 	dp_peer_vdev_list_add(soc, vdev, peer);
5689 
5690 	/* TODO: See if hash based search is required */
5691 	dp_peer_find_hash_add(soc, peer);
5692 
5693 	/* Initialize the peer state */
5694 	peer->state = OL_TXRX_PEER_STATE_DISC;
5695 
5696 	dp_info("vdev %pK created peer %pK (%pM) ref_cnt: %d",
5697 		vdev, peer, peer->mac_addr.raw,
5698 		qdf_atomic_read(&peer->ref_cnt));
5699 	/*
5700 	 * For every peer MAp message search and set if bss_peer
5701 	 */
5702 	if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
5703 			QDF_MAC_ADDR_SIZE) == 0 &&
5704 			(wlan_op_mode_sta != vdev->opmode)) {
5705 		dp_info("vdev bss_peer!!");
5706 		peer->bss_peer = 1;
5707 	}
5708 
5709 	if (wlan_op_mode_sta == vdev->opmode &&
5710 	    qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
5711 			QDF_MAC_ADDR_SIZE) == 0) {
5712 		peer->sta_self_peer = 1;
5713 	}
5714 
5715 	for (i = 0; i < DP_MAX_TIDS; i++)
5716 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
5717 
5718 	peer->valid = 1;
5719 	dp_local_peer_id_alloc(pdev, peer);
5720 	DP_STATS_INIT(peer);
5721 	DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI);
5722 
5723 	qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
5724 		     QDF_MAC_ADDR_SIZE);
5725 	peer_cookie.ctx = NULL;
5726 	peer_cookie.pdev_id = pdev->pdev_id;
5727 	peer_cookie.cookie = pdev->next_peer_cookie++;
5728 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
5729 	dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, pdev->soc,
5730 			     (void *)&peer_cookie,
5731 			     peer->peer_id, WDI_NO_VAL, pdev->pdev_id);
5732 #endif
5733 	if (soc->wlanstats_enabled) {
5734 		if (!peer_cookie.ctx) {
5735 			pdev->next_peer_cookie--;
5736 			qdf_err("Failed to initialize peer rate stats");
5737 		} else {
5738 			peer->wlanstats_ctx = (struct cdp_peer_rate_stats_ctx *)
5739 						peer_cookie.ctx;
5740 		}
5741 	}
5742 
5743 	/*
5744 	 * Allocate peer extended stats context. Fall through in
5745 	 * case of failure as its not an implicit requirement to have
5746 	 * this object for regular statistics updates.
5747 	 */
5748 	if (dp_peer_ext_stats_ctx_alloc(soc, peer) !=
5749 			QDF_STATUS_SUCCESS)
5750 		dp_warn("peer ext_stats ctx alloc failed");
5751 
5752 	/*
5753 	 * In tx_monitor mode, filter may be set for unassociated peer
5754 	 * when unassociated peer get associated peer need to
5755 	 * update tx_cap_enabled flag to support peer filter.
5756 	 */
5757 	dp_peer_tx_capture_filter_check(pdev, peer);
5758 
5759 	dp_set_peer_isolation(peer, false);
5760 
5761 	dp_peer_update_state(soc, peer, DP_PEER_STATE_INIT);
5762 
5763 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
5764 
5765 	return QDF_STATUS_SUCCESS;
5766 }
5767 
5768 /*
5769  * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
5770  * @vdev: Datapath VDEV handle
5771  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5772  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5773  *
5774  * Return: None
5775  */
5776 static
5777 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
5778 				  enum cdp_host_reo_dest_ring *reo_dest,
5779 				  bool *hash_based)
5780 {
5781 	struct dp_soc *soc;
5782 	struct dp_pdev *pdev;
5783 
5784 	pdev = vdev->pdev;
5785 	soc = pdev->soc;
5786 	/*
5787 	 * hash based steering is disabled for Radios which are offloaded
5788 	 * to NSS
5789 	 */
5790 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
5791 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
5792 
5793 	/*
5794 	 * Below line of code will ensure the proper reo_dest ring is chosen
5795 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
5796 	 */
5797 	*reo_dest = pdev->reo_dest;
5798 }
5799 
5800 #ifdef IPA_OFFLOAD
5801 /**
5802  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
5803  * @vdev: Virtual device
5804  *
5805  * Return: true if the vdev is of subtype P2P
5806  *	   false if the vdev is of any other subtype
5807  */
5808 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
5809 {
5810 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
5811 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
5812 	    vdev->subtype == wlan_op_subtype_p2p_go)
5813 		return true;
5814 
5815 	return false;
5816 }
5817 
5818 /*
5819  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
5820  * @vdev: Datapath VDEV handle
5821  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5822  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5823  *
5824  * If IPA is enabled in ini, for SAP mode, disable hash based
5825  * steering, use default reo_dst ring for RX. Use config values for other modes.
5826  * Return: None
5827  */
5828 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
5829 				       enum cdp_host_reo_dest_ring *reo_dest,
5830 				       bool *hash_based)
5831 {
5832 	struct dp_soc *soc;
5833 	struct dp_pdev *pdev;
5834 
5835 	pdev = vdev->pdev;
5836 	soc = pdev->soc;
5837 
5838 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
5839 
5840 	/* For P2P-GO interfaces we do not need to change the REO
5841 	 * configuration even if IPA config is enabled
5842 	 */
5843 	if (dp_is_vdev_subtype_p2p(vdev))
5844 		return;
5845 
5846 	/*
5847 	 * If IPA is enabled, disable hash-based flow steering and set
5848 	 * reo_dest_ring_4 as the REO ring to receive packets on.
5849 	 * IPA is configured to reap reo_dest_ring_4.
5850 	 *
5851 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
5852 	 * value enum value is from 1 - 4.
5853 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
5854 	 */
5855 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5856 		if (vdev->opmode == wlan_op_mode_ap) {
5857 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
5858 			*hash_based = 0;
5859 		} else if (vdev->opmode == wlan_op_mode_sta &&
5860 			   dp_ipa_is_mdm_platform()) {
5861 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
5862 		}
5863 	}
5864 }
5865 
5866 #else
5867 
5868 /*
5869  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
5870  * @vdev: Datapath VDEV handle
5871  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5872  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5873  *
5874  * Use system config values for hash based steering.
5875  * Return: None
5876  */
5877 
5878 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
5879 				       enum cdp_host_reo_dest_ring *reo_dest,
5880 				       bool *hash_based)
5881 {
5882 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
5883 }
5884 #endif /* IPA_OFFLOAD */
5885 
5886 /*
5887  * dp_peer_setup_wifi3() - initialize the peer
5888  * @soc_hdl: soc handle object
5889  * @vdev_id : vdev_id of vdev object
5890  * @peer_mac: Peer's mac address
5891  *
5892  * Return: QDF_STATUS
5893  */
5894 static QDF_STATUS
5895 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5896 		    uint8_t *peer_mac)
5897 {
5898 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5899 	struct dp_pdev *pdev;
5900 	bool hash_based = 0;
5901 	enum cdp_host_reo_dest_ring reo_dest;
5902 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5903 	struct dp_vdev *vdev = NULL;
5904 	struct dp_peer *peer =
5905 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
5906 					       DP_MOD_ID_CDP);
5907 
5908 	if (!peer)
5909 		return QDF_STATUS_E_FAILURE;
5910 
5911 	vdev = peer->vdev;
5912 	if (!vdev) {
5913 		status = QDF_STATUS_E_FAILURE;
5914 		goto fail;
5915 	}
5916 
5917 	pdev = vdev->pdev;
5918 	dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
5919 
5920 	dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
5921 		pdev->pdev_id, vdev->vdev_id,
5922 		vdev->opmode, hash_based, reo_dest);
5923 
5924 	/*
5925 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
5926 	 * i.e both the devices have same MAC address. In these
5927 	 * cases we want such pkts to be processed in NULL Q handler
5928 	 * which is REO2TCL ring. for this reason we should
5929 	 * not setup reo_queues and default route for bss_peer.
5930 	 */
5931 	dp_peer_tx_init(pdev, peer);
5932 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
5933 		status = QDF_STATUS_E_FAILURE;
5934 		goto fail;
5935 	}
5936 
5937 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
5938 		/* TODO: Check the destination ring number to be passed to FW */
5939 		soc->cdp_soc.ol_ops->peer_set_default_routing(
5940 				soc->ctrl_psoc,
5941 				peer->vdev->pdev->pdev_id,
5942 				peer->mac_addr.raw,
5943 				peer->vdev->vdev_id, hash_based, reo_dest);
5944 	}
5945 
5946 	qdf_atomic_set(&peer->is_default_route_set, 1);
5947 
5948 	dp_peer_rx_init(pdev, peer);
5949 
5950 	dp_peer_ppdu_delayed_ba_init(peer);
5951 
5952 fail:
5953 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
5954 	return status;
5955 }
5956 
5957 /*
5958  * dp_cp_peer_del_resp_handler - Handle the peer delete response
5959  * @soc_hdl: Datapath SOC handle
5960  * @vdev_id: id of virtual device object
5961  * @mac_addr: Mac address of the peer
5962  *
5963  * Return: QDF_STATUS
5964  */
5965 static QDF_STATUS dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl,
5966 					      uint8_t vdev_id,
5967 					      uint8_t *mac_addr)
5968 {
5969 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5970 	struct dp_ast_entry  *ast_entry = NULL;
5971 	txrx_ast_free_cb cb = NULL;
5972 	void *cookie;
5973 
5974 	qdf_spin_lock_bh(&soc->ast_lock);
5975 
5976 	ast_entry =
5977 		dp_peer_ast_hash_find_by_vdevid(soc, mac_addr,
5978 						vdev_id);
5979 
5980 	/* in case of qwrap we have multiple BSS peers
5981 	 * with same mac address
5982 	 *
5983 	 * AST entry for this mac address will be created
5984 	 * only for one peer hence it will be NULL here
5985 	 */
5986 	if ((!ast_entry || !ast_entry->delete_in_progress) ||
5987 	    (ast_entry->peer_id != HTT_INVALID_PEER)) {
5988 		qdf_spin_unlock_bh(&soc->ast_lock);
5989 		return QDF_STATUS_E_FAILURE;
5990 	}
5991 
5992 	if (ast_entry->is_mapped)
5993 		soc->ast_table[ast_entry->ast_idx] = NULL;
5994 
5995 	DP_STATS_INC(soc, ast.deleted, 1);
5996 	dp_peer_ast_hash_remove(soc, ast_entry);
5997 
5998 	cb = ast_entry->callback;
5999 	cookie = ast_entry->cookie;
6000 	ast_entry->callback = NULL;
6001 	ast_entry->cookie = NULL;
6002 
6003 	soc->num_ast_entries--;
6004 	qdf_spin_unlock_bh(&soc->ast_lock);
6005 
6006 	if (cb) {
6007 		cb(soc->ctrl_psoc,
6008 		   dp_soc_to_cdp_soc(soc),
6009 		   cookie,
6010 		   CDP_TXRX_AST_DELETED);
6011 	}
6012 	qdf_mem_free(ast_entry);
6013 
6014 	return QDF_STATUS_SUCCESS;
6015 }
6016 
6017 /*
6018  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
6019  * @txrx_soc: cdp soc handle
6020  * @ac: Access category
6021  * @value: timeout value in millisec
6022  *
6023  * Return: void
6024  */
6025 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
6026 				    uint8_t ac, uint32_t value)
6027 {
6028 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6029 
6030 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
6031 }
6032 
6033 /*
6034  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
6035  * @txrx_soc: cdp soc handle
6036  * @ac: access category
6037  * @value: timeout value in millisec
6038  *
6039  * Return: void
6040  */
6041 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
6042 				    uint8_t ac, uint32_t *value)
6043 {
6044 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
6045 
6046 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
6047 }
6048 
6049 /*
6050  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
6051  * @txrx_soc: cdp soc handle
6052  * @pdev_id: id of physical device object
6053  * @val: reo destination ring index (1 - 4)
6054  *
6055  * Return: QDF_STATUS
6056  */
6057 static QDF_STATUS
6058 dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
6059 		     enum cdp_host_reo_dest_ring val)
6060 {
6061 	struct dp_pdev *pdev =
6062 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
6063 						   pdev_id);
6064 
6065 	if (pdev) {
6066 		pdev->reo_dest = val;
6067 		return QDF_STATUS_SUCCESS;
6068 	}
6069 
6070 	return QDF_STATUS_E_FAILURE;
6071 }
6072 
6073 /*
6074  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
6075  * @txrx_soc: cdp soc handle
6076  * @pdev_id: id of physical device object
6077  *
6078  * Return: reo destination ring index
6079  */
6080 static enum cdp_host_reo_dest_ring
6081 dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id)
6082 {
6083 	struct dp_pdev *pdev =
6084 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
6085 						   pdev_id);
6086 
6087 	if (pdev)
6088 		return pdev->reo_dest;
6089 	else
6090 		return cdp_host_reo_dest_ring_unknown;
6091 }
6092 
6093 #ifdef ATH_SUPPORT_NAC
6094 /*
6095  * dp_set_filter_neigh_peers() - set filter neighbour peers for smart mesh
6096  * @pdev_handle: device object
6097  * @val: value to be set
6098  *
6099  * Return: void
6100  */
6101 static int dp_set_filter_neigh_peers(struct dp_pdev *pdev,
6102 				     bool val)
6103 {
6104 	/* Enable/Disable smart mesh filtering. This flag will be checked
6105 	 * during rx processing to check if packets are from NAC clients.
6106 	 */
6107 	pdev->filter_neighbour_peers = val;
6108 	return 0;
6109 }
6110 #else
6111 static int dp_set_filter_neigh_peers(struct dp_pdev *pdev,
6112 				     bool val)
6113 {
6114 	return 0;
6115 }
6116 #endif /* ATH_SUPPORT_NAC */
6117 
6118 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
6119 /*
6120  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
6121  * address for smart mesh filtering
6122  * @txrx_soc: cdp soc handle
6123  * @vdev_id: id of virtual device object
6124  * @cmd: Add/Del command
6125  * @macaddr: nac client mac address
6126  *
6127  * Return: success/failure
6128  */
6129 static int dp_update_filter_neighbour_peers(struct cdp_soc_t *soc_hdl,
6130 					    uint8_t vdev_id,
6131 					    uint32_t cmd, uint8_t *macaddr)
6132 {
6133 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6134 	struct dp_pdev *pdev;
6135 	struct dp_neighbour_peer *peer = NULL;
6136 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
6137 						     DP_MOD_ID_CDP);
6138 
6139 	if (!vdev || !macaddr)
6140 		goto fail0;
6141 
6142 	pdev = vdev->pdev;
6143 
6144 	if (!pdev)
6145 		goto fail0;
6146 
6147 	/* Store address of NAC (neighbour peer) which will be checked
6148 	 * against TA of received packets.
6149 	 */
6150 	if (cmd == DP_NAC_PARAM_ADD) {
6151 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
6152 				sizeof(*peer));
6153 
6154 		if (!peer) {
6155 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6156 				FL("DP neighbour peer node memory allocation failed"));
6157 			goto fail0;
6158 		}
6159 
6160 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
6161 			macaddr, QDF_MAC_ADDR_SIZE);
6162 		peer->vdev = vdev;
6163 
6164 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
6165 
6166 		/* add this neighbour peer into the list */
6167 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
6168 				neighbour_peer_list_elem);
6169 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
6170 
6171 		/* first neighbour */
6172 		if (!pdev->neighbour_peers_added) {
6173 			QDF_STATUS status = QDF_STATUS_SUCCESS;
6174 
6175 			pdev->neighbour_peers_added = true;
6176 			dp_mon_filter_setup_smart_monitor(pdev);
6177 			status = dp_mon_filter_update(pdev);
6178 			if (status != QDF_STATUS_SUCCESS) {
6179 				QDF_TRACE(QDF_MODULE_ID_DP,
6180 					  QDF_TRACE_LEVEL_ERROR,
6181 					  FL("smart mon filter setup failed"));
6182 				dp_mon_filter_reset_smart_monitor(pdev);
6183 				pdev->neighbour_peers_added = false;
6184 			}
6185 		}
6186 
6187 	} else if (cmd == DP_NAC_PARAM_DEL) {
6188 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
6189 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
6190 				neighbour_peer_list_elem) {
6191 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
6192 				macaddr, QDF_MAC_ADDR_SIZE)) {
6193 				/* delete this peer from the list */
6194 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
6195 					peer, neighbour_peer_list_elem);
6196 				qdf_mem_free(peer);
6197 				break;
6198 			}
6199 		}
6200 		/* last neighbour deleted */
6201 		if (TAILQ_EMPTY(&pdev->neighbour_peers_list)) {
6202 			QDF_STATUS status = QDF_STATUS_SUCCESS;
6203 
6204 			pdev->neighbour_peers_added = false;
6205 			dp_mon_filter_reset_smart_monitor(pdev);
6206 			status = dp_mon_filter_update(pdev);
6207 			if (status != QDF_STATUS_SUCCESS) {
6208 				QDF_TRACE(QDF_MODULE_ID_DP,
6209 					  QDF_TRACE_LEVEL_ERROR,
6210 					  FL("smart mon filter clear failed"));
6211 			}
6212 
6213 		}
6214 
6215 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
6216 	}
6217 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6218 	return 1;
6219 
6220 fail0:
6221 	if (vdev)
6222 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6223 	return 0;
6224 }
6225 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
6226 
6227 #ifdef WLAN_SUPPORT_MSCS
6228 /*
6229  * dp_record_mscs_params - MSCS parameters sent by the STA in
6230  * the MSCS Request to the AP. The AP makes a note of these
6231  * parameters while comparing the MSDUs sent by the STA, to
6232  * send the downlink traffic with correct User priority.
6233  * @soc - Datapath soc handle
6234  * @peer_mac - STA Mac address
6235  * @vdev_id - ID of the vdev handle
6236  * @mscs_params - Structure having MSCS parameters obtained
6237  * from handshake
6238  * @active - Flag to set MSCS active/inactive
6239  * return type - QDF_STATUS - Success/Invalid
6240  */
6241 static QDF_STATUS
6242 dp_record_mscs_params(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
6243 		      uint8_t vdev_id, struct cdp_mscs_params *mscs_params,
6244 		      bool active)
6245 {
6246 	struct dp_peer *peer;
6247 	QDF_STATUS status = QDF_STATUS_E_INVAL;
6248 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6249 
6250 	peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
6251 				      DP_MOD_ID_CDP);
6252 
6253 	if (!peer) {
6254 		dp_err("%s: Peer is NULL!\n", __func__);
6255 		goto fail;
6256 	}
6257 	if (!active) {
6258 		dp_info("MSCS Procedure is terminated");
6259 		peer->mscs_active = active;
6260 		goto fail;
6261 	}
6262 
6263 	if (mscs_params->classifier_type == IEEE80211_TCLAS_MASK_CLA_TYPE_4) {
6264 		/* Populate entries inside IPV4 database first */
6265 		peer->mscs_ipv4_parameter.user_priority_bitmap =
6266 			mscs_params->user_pri_bitmap;
6267 		peer->mscs_ipv4_parameter.user_priority_limit =
6268 			mscs_params->user_pri_limit;
6269 		peer->mscs_ipv4_parameter.classifier_mask =
6270 			mscs_params->classifier_mask;
6271 
6272 		/* Populate entries inside IPV6 database */
6273 		peer->mscs_ipv6_parameter.user_priority_bitmap =
6274 			mscs_params->user_pri_bitmap;
6275 		peer->mscs_ipv6_parameter.user_priority_limit =
6276 			mscs_params->user_pri_limit;
6277 		peer->mscs_ipv6_parameter.classifier_mask =
6278 			mscs_params->classifier_mask;
6279 		peer->mscs_active = 1;
6280 		dp_info("\n\tMSCS Procedure request based parameters for %pM\n"
6281 			"\tClassifier_type = %d\tUser priority bitmap = %x\n"
6282 			"\tUser priority limit = %x\tClassifier mask = %x",
6283 			peer_mac,
6284 			mscs_params->classifier_type,
6285 			peer->mscs_ipv4_parameter.user_priority_bitmap,
6286 			peer->mscs_ipv4_parameter.user_priority_limit,
6287 			peer->mscs_ipv4_parameter.classifier_mask);
6288 	}
6289 
6290 	status = QDF_STATUS_SUCCESS;
6291 fail:
6292 	if (peer)
6293 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6294 	return status;
6295 }
6296 #endif
6297 
6298 /*
6299  * dp_get_sec_type() - Get the security type
6300  * @soc: soc handle
6301  * @vdev_id: id of dp handle
6302  * @peer_mac: mac of datapath PEER handle
6303  * @sec_idx:    Security id (mcast, ucast)
6304  *
6305  * return sec_type: Security type
6306  */
6307 static int dp_get_sec_type(struct cdp_soc_t *soc, uint8_t vdev_id,
6308 			   uint8_t *peer_mac, uint8_t sec_idx)
6309 {
6310 	int sec_type = 0;
6311 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
6312 						       peer_mac, 0, vdev_id,
6313 						       DP_MOD_ID_CDP);
6314 
6315 	if (!peer) {
6316 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
6317 			  "%s: Peer is NULL!\n", __func__);
6318 		return sec_type;
6319 	}
6320 
6321 	sec_type = peer->security[sec_idx].sec_type;
6322 
6323 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6324 	return sec_type;
6325 }
6326 
6327 /*
6328  * dp_peer_authorize() - authorize txrx peer
6329  * @soc: soc handle
6330  * @vdev_id: id of dp handle
6331  * @peer_mac: mac of datapath PEER handle
6332  * @authorize
6333  *
6334  */
6335 static QDF_STATUS
6336 dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
6337 		  uint8_t *peer_mac, uint32_t authorize)
6338 {
6339 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6340 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6341 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
6342 						      0, vdev_id,
6343 						      DP_MOD_ID_CDP);
6344 
6345 	if (!peer) {
6346 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
6347 			  "%s: Peer is NULL!\n", __func__);
6348 		status = QDF_STATUS_E_FAILURE;
6349 	} else {
6350 		peer->authorize = authorize ? 1 : 0;
6351 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6352 	}
6353 
6354 	return status;
6355 }
6356 
6357 /**
6358  * dp_vdev_unref_delete() - check and process vdev delete
6359  * @soc : DP specific soc pointer
6360  * @vdev: DP specific vdev pointer
6361  * @mod_id: module id
6362  *
6363  */
6364 void dp_vdev_unref_delete(struct dp_soc *soc, struct dp_vdev *vdev,
6365 			  enum dp_mod_id mod_id)
6366 {
6367 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
6368 	void *vdev_delete_context = NULL;
6369 	uint8_t vdev_id = vdev->vdev_id;
6370 	struct dp_pdev *pdev = vdev->pdev;
6371 	struct dp_vdev *tmp_vdev = NULL;
6372 	uint8_t found = 0;
6373 
6374 	QDF_ASSERT(qdf_atomic_dec_return(&vdev->mod_refs[mod_id]) >= 0);
6375 
6376 	/* Return if this is not the last reference*/
6377 	if (!qdf_atomic_dec_and_test(&vdev->ref_cnt))
6378 		return;
6379 
6380 	/*
6381 	 * This should be set as last reference need to released
6382 	 * after cdp_vdev_detach() is called
6383 	 *
6384 	 * if this assert is hit there is a ref count issue
6385 	 */
6386 	QDF_ASSERT(vdev->delete.pending);
6387 
6388 	vdev_delete_cb = vdev->delete.callback;
6389 	vdev_delete_context = vdev->delete.context;
6390 
6391 	dp_info("deleting vdev object %pK (%pM)- its last peer is done",
6392 		vdev, vdev->mac_addr.raw);
6393 
6394 	if (wlan_op_mode_monitor == vdev->opmode) {
6395 		if (soc->intr_mode == DP_INTR_POLL)
6396 			qdf_timer_sync_cancel(&soc->int_timer);
6397 		pdev->monitor_vdev = NULL;
6398 		goto free_vdev;
6399 	}
6400 	/* all peers are gone, go ahead and delete it */
6401 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
6402 			FLOW_TYPE_VDEV, vdev_id);
6403 	dp_tx_vdev_detach(vdev);
6404 
6405 free_vdev:
6406 	qdf_spinlock_destroy(&vdev->peer_list_lock);
6407 
6408 	qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
6409 	TAILQ_FOREACH(tmp_vdev, &soc->inactive_vdev_list,
6410 		      inactive_list_elem) {
6411 		if (tmp_vdev == vdev) {
6412 			found = 1;
6413 			break;
6414 		}
6415 	}
6416 	if (found)
6417 		TAILQ_REMOVE(&soc->inactive_vdev_list, vdev,
6418 			     inactive_list_elem);
6419 	/* delete this peer from the list */
6420 	qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
6421 
6422 	dp_info("deleting vdev object %pK (%pM)",
6423 		vdev, vdev->mac_addr.raw);
6424 	wlan_minidump_remove(vdev);
6425 	qdf_mem_free(vdev);
6426 	vdev = NULL;
6427 
6428 	if (vdev_delete_cb)
6429 		vdev_delete_cb(vdev_delete_context);
6430 }
6431 
6432 /*
6433  * dp_peer_unref_delete() - unref and delete peer
6434  * @peer_handle:    Datapath peer handle
6435  * @mod_id:         ID of module releasing reference
6436  *
6437  */
6438 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id mod_id)
6439 {
6440 	struct dp_vdev *vdev = peer->vdev;
6441 	struct dp_pdev *pdev = vdev->pdev;
6442 	struct dp_soc *soc = pdev->soc;
6443 	uint16_t peer_id;
6444 	struct cdp_peer_cookie peer_cookie;
6445 	struct dp_peer *tmp_peer;
6446 	bool found = false;
6447 
6448 	if (mod_id > DP_MOD_ID_RX)
6449 		QDF_ASSERT(qdf_atomic_dec_return(&peer->mod_refs[mod_id]) >= 0);
6450 
6451 	/*
6452 	 * Hold the lock all the way from checking if the peer ref count
6453 	 * is zero until the peer references are removed from the hash
6454 	 * table and vdev list (if the peer ref count is zero).
6455 	 * This protects against a new HL tx operation starting to use the
6456 	 * peer object just after this function concludes it's done being used.
6457 	 * Furthermore, the lock needs to be held while checking whether the
6458 	 * vdev's list of peers is empty, to make sure that list is not modified
6459 	 * concurrently with the empty check.
6460 	 */
6461 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
6462 		peer_id = peer->peer_id;
6463 
6464 		/*
6465 		 * Make sure that the reference to the peer in
6466 		 * peer object map is removed
6467 		 */
6468 		QDF_ASSERT(peer_id == HTT_INVALID_PEER);
6469 
6470 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
6471 			  "Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
6472 
6473 		/*
6474 		 * Deallocate the extended stats contenxt
6475 		 */
6476 		dp_peer_ext_stats_ctx_dealloc(soc, peer);
6477 
6478 		/* send peer destroy event to upper layer */
6479 		qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
6480 			     QDF_MAC_ADDR_SIZE);
6481 		peer_cookie.ctx = NULL;
6482 		peer_cookie.ctx = (struct cdp_stats_cookie *)
6483 					peer->wlanstats_ctx;
6484 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6485 		dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY,
6486 				     soc,
6487 				     (void *)&peer_cookie,
6488 				     peer->peer_id,
6489 				     WDI_NO_VAL,
6490 				     pdev->pdev_id);
6491 #endif
6492 		peer->wlanstats_ctx = NULL;
6493 		wlan_minidump_remove(peer);
6494 
6495 		qdf_spin_lock_bh(&soc->inactive_peer_list_lock);
6496 		TAILQ_FOREACH(tmp_peer, &soc->inactive_peer_list,
6497 			      inactive_list_elem) {
6498 			if (tmp_peer == peer) {
6499 				found = 1;
6500 				break;
6501 			}
6502 		}
6503 		if (found)
6504 			TAILQ_REMOVE(&soc->inactive_peer_list, peer,
6505 				     inactive_list_elem);
6506 		/* delete this peer from the list */
6507 		qdf_spin_unlock_bh(&soc->inactive_peer_list_lock);
6508 		DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
6509 		dp_peer_update_state(soc, peer, DP_PEER_STATE_FREED);
6510 
6511 		qdf_mem_free(peer);
6512 
6513 		/*
6514 		 * Decrement ref count taken at peer create
6515 		 */
6516 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CHILD);
6517 	}
6518 }
6519 
6520 #ifdef PEER_CACHE_RX_PKTS
6521 static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
6522 {
6523 	qdf_list_destroy(&peer->bufq_info.cached_bufq);
6524 	qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
6525 }
6526 #else
6527 static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
6528 {
6529 }
6530 #endif
6531 
6532 /*
6533  * dp_peer_detach_wifi3() – Detach txrx peer
6534  * @soc_hdl: soc handle
6535  * @vdev_id: id of dp handle
6536  * @peer_mac: mac of datapath PEER handle
6537  * @bitmap: bitmap indicating special handling of request.
6538  *
6539  */
6540 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc_hdl,
6541 				       uint8_t vdev_id,
6542 				       uint8_t *peer_mac, uint32_t bitmap)
6543 {
6544 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6545 	struct dp_peer *peer = dp_peer_find_hash_find(soc, peer_mac,
6546 						      0, vdev_id,
6547 						      DP_MOD_ID_CDP);
6548 	struct dp_vdev *vdev = NULL;
6549 
6550 	/* Peer can be null for monitor vap mac address */
6551 	if (!peer) {
6552 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
6553 			  "%s: Invalid peer\n", __func__);
6554 		return QDF_STATUS_E_FAILURE;
6555 	}
6556 
6557 	if (!peer->valid) {
6558 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6559 		dp_err("Invalid peer: %pM", peer_mac);
6560 		return QDF_STATUS_E_ALREADY;
6561 	}
6562 
6563 	vdev = peer->vdev;
6564 
6565 	if (!vdev)
6566 		return QDF_STATUS_E_FAILURE;
6567 	peer->valid = 0;
6568 
6569 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
6570 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
6571 
6572 	dp_local_peer_id_free(peer->vdev->pdev, peer);
6573 
6574 	/* Drop all rx packets before deleting peer */
6575 	dp_clear_peer_internal(soc, peer);
6576 
6577 	dp_peer_rx_bufq_resources_deinit(peer);
6578 
6579 	qdf_spinlock_destroy(&peer->peer_info_lock);
6580 	dp_peer_multipass_list_remove(peer);
6581 
6582 	/* remove the reference to the peer from the hash table */
6583 	dp_peer_find_hash_remove(soc, peer);
6584 
6585 	dp_peer_vdev_list_remove(soc, vdev, peer);
6586 
6587 	/*
6588 	 * Remove the reference added during peer_attach.
6589 	 * The peer will still be left allocated until the
6590 	 * PEER_UNMAP message arrives to remove the other
6591 	 * reference, added by the PEER_MAP message.
6592 	 */
6593 	dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
6594 	/*
6595 	 * Remove the reference taken above
6596 	 */
6597 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6598 
6599 	return QDF_STATUS_SUCCESS;
6600 }
6601 
6602 /*
6603  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
6604  * @soc_hdl: Datapath soc handle
6605  * @vdev_id: virtual interface id
6606  *
6607  * Return: MAC address on success, NULL on failure.
6608  *
6609  */
6610 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_soc_t *soc_hdl,
6611 					 uint8_t vdev_id)
6612 {
6613 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6614 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
6615 						     DP_MOD_ID_CDP);
6616 	uint8_t *mac = NULL;
6617 
6618 	if (!vdev)
6619 		return NULL;
6620 
6621 	mac = vdev->mac_addr.raw;
6622 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6623 
6624 	return mac;
6625 }
6626 
6627 /*
6628  * dp_vdev_set_wds() - Enable per packet stats
6629  * @soc: DP soc handle
6630  * @vdev_id: id of DP VDEV handle
6631  * @val: value
6632  *
6633  * Return: none
6634  */
6635 static int dp_vdev_set_wds(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
6636 			   uint32_t val)
6637 {
6638 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6639 	struct dp_vdev *vdev =
6640 		dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id,
6641 				      DP_MOD_ID_CDP);
6642 
6643 	if (!vdev)
6644 		return QDF_STATUS_E_FAILURE;
6645 
6646 	vdev->wds_enabled = val;
6647 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6648 
6649 	return QDF_STATUS_SUCCESS;
6650 }
6651 
6652 /*
6653  * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev id of monitor mode
6654  * @soc_hdl: datapath soc handle
6655  * @pdev_id: physical device instance id
6656  *
6657  * Return: virtual interface id
6658  */
6659 static uint8_t dp_get_mon_vdev_from_pdev_wifi3(struct cdp_soc_t *soc_hdl,
6660 					       uint8_t pdev_id)
6661 {
6662 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6663 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
6664 
6665 	if (qdf_unlikely(!pdev))
6666 		return -EINVAL;
6667 
6668 	return pdev->monitor_vdev->vdev_id;
6669 }
6670 
6671 static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
6672 {
6673 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6674 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
6675 						     DP_MOD_ID_CDP);
6676 	int opmode;
6677 
6678 	if (!vdev) {
6679 		dp_err("vdev for id %d is NULL", vdev_id);
6680 		return -EINVAL;
6681 	}
6682 	opmode = vdev->opmode;
6683 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6684 
6685 	return opmode;
6686 }
6687 
6688 /**
6689  * dp_get_os_rx_handles_from_vdev_wifi3() - Get os rx handles for a vdev
6690  * @soc_hdl: ol_txrx_soc_handle handle
6691  * @vdev_id: vdev id for which os rx handles are needed
6692  * @stack_fn_p: pointer to stack function pointer
6693  * @osif_handle_p: pointer to ol_osif_vdev_handle
6694  *
6695  * Return: void
6696  */
6697 static
6698 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_soc_t *soc_hdl,
6699 					  uint8_t vdev_id,
6700 					  ol_txrx_rx_fp *stack_fn_p,
6701 					  ol_osif_vdev_handle *osif_vdev_p)
6702 {
6703 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6704 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
6705 						     DP_MOD_ID_CDP);
6706 
6707 	if (!vdev)
6708 		return;
6709 
6710 	*stack_fn_p = vdev->osif_rx_stack;
6711 	*osif_vdev_p = vdev->osif_vdev;
6712 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6713 }
6714 
6715 /**
6716  * dp_get_ctrl_pdev_from_vdev() - Get control pdev of vdev
6717  * @soc_hdl: datapath soc handle
6718  * @vdev_id: virtual device/interface id
6719  *
6720  * Return: Handle to control pdev
6721  */
6722 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(
6723 						struct cdp_soc_t *soc_hdl,
6724 						uint8_t vdev_id)
6725 {
6726 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6727 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
6728 						     DP_MOD_ID_CDP);
6729 	struct dp_pdev *pdev;
6730 
6731 	if (!vdev)
6732 		return NULL;
6733 
6734 	pdev = vdev->pdev;
6735 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6736 	return pdev ? (struct cdp_cfg *)pdev->wlan_cfg_ctx : NULL;
6737 }
6738 
6739 /**
6740  * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
6741  *                                 ring based on target
6742  * @soc: soc handle
6743  * @mac_for_pdev: WIN- pdev_id, MCL- mac id
6744  * @pdev: physical device handle
6745  * @ring_num: mac id
6746  * @htt_tlv_filter: tlv filter
6747  *
6748  * Return: zero on success, non-zero on failure
6749  */
6750 static inline
6751 QDF_STATUS dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
6752 				       struct dp_pdev *pdev, uint8_t ring_num,
6753 				       struct htt_rx_ring_tlv_filter htt_tlv_filter)
6754 {
6755 	QDF_STATUS status;
6756 
6757 	if (soc->wlan_cfg_ctx->rxdma1_enable)
6758 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6759 					     soc->rxdma_mon_buf_ring[ring_num]
6760 					     .hal_srng,
6761 					     RXDMA_MONITOR_BUF,
6762 					     RX_MONITOR_BUFFER_SIZE,
6763 					     &htt_tlv_filter);
6764 	else
6765 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6766 					     pdev->rx_mac_buf_ring[ring_num]
6767 					     .hal_srng,
6768 					     RXDMA_BUF, RX_DATA_BUFFER_SIZE,
6769 					     &htt_tlv_filter);
6770 
6771 	return status;
6772 }
6773 
6774 static inline void
6775 dp_pdev_disable_mcopy_code(struct dp_pdev *pdev)
6776 {
6777 	pdev->mcopy_mode = M_COPY_DISABLED;
6778 	pdev->monitor_configured = false;
6779 	pdev->monitor_vdev = NULL;
6780 }
6781 
6782 /**
6783  * dp_reset_monitor_mode() - Disable monitor mode
6784  * @soc_hdl: Datapath soc handle
6785  * @pdev_id: id of datapath PDEV handle
6786  *
6787  * Return: QDF_STATUS
6788  */
6789 QDF_STATUS dp_reset_monitor_mode(struct cdp_soc_t *soc_hdl,
6790 				 uint8_t pdev_id,
6791 				 uint8_t special_monitor)
6792 {
6793 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6794 	struct dp_pdev *pdev =
6795 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
6796 						   pdev_id);
6797 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6798 
6799 	if (!pdev)
6800 		return QDF_STATUS_E_FAILURE;
6801 
6802 	qdf_spin_lock_bh(&pdev->mon_lock);
6803 
6804 	dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_DISABLE);
6805 	pdev->monitor_vdev = NULL;
6806 	pdev->monitor_configured = false;
6807 
6808 	/*
6809 	 * Lite monitor mode, smart monitor mode and monitor
6810 	 * mode uses this APIs to filter reset and mode disable
6811 	 */
6812 	if (pdev->mcopy_mode) {
6813 #if defined(FEATURE_PERPKT_INFO)
6814 		dp_pdev_disable_mcopy_code(pdev);
6815 		dp_mon_filter_reset_mcopy_mode(pdev);
6816 #endif /* FEATURE_PERPKT_INFO */
6817 	} else if (special_monitor) {
6818 #if defined(ATH_SUPPORT_NAC)
6819 		dp_mon_filter_reset_smart_monitor(pdev);
6820 #endif /* ATH_SUPPORT_NAC */
6821 	} else {
6822 		dp_mon_filter_reset_mon_mode(pdev);
6823 	}
6824 
6825 	status = dp_mon_filter_update(pdev);
6826 	if (status != QDF_STATUS_SUCCESS) {
6827 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6828 			  FL("Failed to reset monitor filters"));
6829 	}
6830 
6831 	qdf_spin_unlock_bh(&pdev->mon_lock);
6832 	return QDF_STATUS_SUCCESS;
6833 }
6834 
6835 /**
6836  * dp_get_tx_pending() - read pending tx
6837  * @pdev_handle: Datapath PDEV handle
6838  *
6839  * Return: outstanding tx
6840  */
6841 static uint32_t dp_get_tx_pending(struct cdp_pdev *pdev_handle)
6842 {
6843 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6844 
6845 	return qdf_atomic_read(&pdev->num_tx_outstanding);
6846 }
6847 
6848 /**
6849  * dp_get_peer_mac_from_peer_id() - get peer mac
6850  * @pdev_handle: Datapath PDEV handle
6851  * @peer_id: Peer ID
6852  * @peer_mac: MAC addr of PEER
6853  *
6854  * Return: QDF_STATUS
6855  */
6856 static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc,
6857 					       uint32_t peer_id,
6858 					       uint8_t *peer_mac)
6859 {
6860 	struct dp_peer *peer;
6861 
6862 	if (soc && peer_mac) {
6863 		peer = dp_peer_get_ref_by_id((struct dp_soc *)soc,
6864 					     (uint16_t)peer_id,
6865 					     DP_MOD_ID_CDP);
6866 		if (peer) {
6867 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
6868 				     QDF_MAC_ADDR_SIZE);
6869 			dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
6870 			return QDF_STATUS_SUCCESS;
6871 		}
6872 	}
6873 
6874 	return QDF_STATUS_E_FAILURE;
6875 }
6876 
6877 /**
6878  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
6879  * @vdev_handle: Datapath VDEV handle
6880  * @smart_monitor: Flag to denote if its smart monitor mode
6881  *
6882  * Return: 0 on success, not 0 on failure
6883  */
6884 static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *soc_hdl,
6885 					   uint8_t vdev_id,
6886 					   uint8_t special_monitor)
6887 {
6888 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6889 	uint32_t mac_id;
6890 	uint32_t mac_for_pdev;
6891 	struct dp_pdev *pdev;
6892 	uint32_t num_entries;
6893 	struct dp_srng *mon_buf_ring;
6894 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
6895 						     DP_MOD_ID_CDP);
6896 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6897 
6898 	if (!vdev)
6899 		return QDF_STATUS_E_FAILURE;
6900 
6901 	pdev = vdev->pdev;
6902 	pdev->monitor_vdev = vdev;
6903 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
6904 		  "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
6905 		  pdev, pdev->pdev_id, pdev->soc, vdev);
6906 
6907 	/*
6908 	 * do not configure monitor buf ring and filter for smart and
6909 	 * lite monitor
6910 	 * for smart monitor filters are added along with first NAC
6911 	 * for lite monitor required configuration done through
6912 	 * dp_set_pdev_param
6913 	 */
6914 	if (special_monitor) {
6915 		status = QDF_STATUS_SUCCESS;
6916 		goto fail;
6917 	}
6918 
6919 	/*Check if current pdev's monitor_vdev exists */
6920 	if (pdev->monitor_configured) {
6921 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
6922 			  "monitor vap already created vdev=%pK\n", vdev);
6923 		status = QDF_STATUS_E_RESOURCES;
6924 		goto fail;
6925 	}
6926 
6927 	pdev->monitor_configured = true;
6928 
6929 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6930 		mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id,
6931 							  pdev->pdev_id);
6932 		dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev,
6933 						 FALSE);
6934 		/*
6935 		 * Configure low interrupt threshld when monitor mode is
6936 		 * configured.
6937 		 */
6938 		mon_buf_ring = &pdev->soc->rxdma_mon_buf_ring[mac_for_pdev];
6939 		if (mon_buf_ring->hal_srng) {
6940 			num_entries = mon_buf_ring->num_entries;
6941 			hal_set_low_threshold(mon_buf_ring->hal_srng,
6942 					      num_entries >> 3);
6943 			htt_srng_setup(pdev->soc->htt_handle,
6944 				       pdev->pdev_id,
6945 				       mon_buf_ring->hal_srng,
6946 				       RXDMA_MONITOR_BUF);
6947 		}
6948 	}
6949 
6950 	dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_ENABLE);
6951 
6952 	dp_mon_filter_setup_mon_mode(pdev);
6953 	status = dp_mon_filter_update(pdev);
6954 	if (status != QDF_STATUS_SUCCESS) {
6955 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6956 			  FL("Failed to reset monitor filters"));
6957 		dp_mon_filter_reset_mon_mode(pdev);
6958 		pdev->monitor_configured = false;
6959 		pdev->monitor_vdev = NULL;
6960 	}
6961 
6962 fail:
6963 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
6964 	return status;
6965 }
6966 
6967 /**
6968  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
6969  * @soc: soc handle
6970  * @pdev_id: id of Datapath PDEV handle
6971  * @filter_val: Flag to select Filter for monitor mode
6972  * Return: 0 on success, not 0 on failure
6973  */
6974 static QDF_STATUS
6975 dp_pdev_set_advance_monitor_filter(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
6976 				   struct cdp_monitor_filter *filter_val)
6977 {
6978 	/* Many monitor VAPs can exists in a system but only one can be up at
6979 	 * anytime
6980 	 */
6981 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6982 	struct dp_vdev *vdev;
6983 	struct dp_pdev *pdev =
6984 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
6985 						   pdev_id);
6986 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6987 
6988 	if (!pdev)
6989 		return QDF_STATUS_E_FAILURE;
6990 
6991 	vdev = pdev->monitor_vdev;
6992 
6993 	if (!vdev)
6994 		return QDF_STATUS_E_FAILURE;
6995 
6996 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
6997 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
6998 		pdev, pdev_id, soc, vdev);
6999 
7000 	/*Check if current pdev's monitor_vdev exists */
7001 	if (!pdev->monitor_vdev) {
7002 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7003 			"vdev=%pK", vdev);
7004 		qdf_assert(vdev);
7005 	}
7006 
7007 	/* update filter mode, type in pdev structure */
7008 	pdev->mon_filter_mode = filter_val->mode;
7009 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
7010 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
7011 	pdev->fp_data_filter = filter_val->fp_data;
7012 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
7013 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
7014 	pdev->mo_data_filter = filter_val->mo_data;
7015 
7016 	dp_mon_filter_setup_mon_mode(pdev);
7017 	status = dp_mon_filter_update(pdev);
7018 	if (status != QDF_STATUS_SUCCESS) {
7019 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7020 			  FL("Failed to set filter for advance mon mode"));
7021 		dp_mon_filter_reset_mon_mode(pdev);
7022 	}
7023 
7024 	return status;
7025 }
7026 
7027 /**
7028  * dp_deliver_tx_mgmt() - Deliver mgmt frame for tx capture
7029  * @cdp_soc : data path soc handle
7030  * @pdev_id : pdev_id
7031  * @nbuf: Management frame buffer
7032  */
7033 static QDF_STATUS
7034 dp_deliver_tx_mgmt(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf)
7035 {
7036 	struct dp_pdev *pdev =
7037 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
7038 						   pdev_id);
7039 
7040 	if (!pdev)
7041 		return QDF_STATUS_E_FAILURE;
7042 
7043 	dp_deliver_mgmt_frm(pdev, nbuf);
7044 
7045 	return QDF_STATUS_SUCCESS;
7046 }
7047 
7048 /**
7049  * dp_set_bsscolor() - sets bsscolor for tx capture
7050  * @pdev: Datapath PDEV handle
7051  * @bsscolor: new bsscolor
7052  */
7053 static void
7054 dp_mon_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor)
7055 {
7056 	pdev->rx_mon_recv_status.bsscolor = bsscolor;
7057 }
7058 
7059 /**
7060  * dp_pdev_get_filter_ucast_data() - get DP PDEV monitor ucast filter
7061  * @soc : data path soc handle
7062  * @pdev_id : pdev_id
7063  * Return: true on ucast filter flag set
7064  */
7065 static bool dp_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle)
7066 {
7067 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7068 
7069 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
7070 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
7071 		return true;
7072 
7073 	return false;
7074 }
7075 
7076 /**
7077  * dp_pdev_get_filter_mcast_data() - get DP PDEV monitor mcast filter
7078  * @pdev_handle: Datapath PDEV handle
7079  * Return: true on mcast filter flag set
7080  */
7081 static bool dp_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle)
7082 {
7083 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7084 
7085 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
7086 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
7087 		return true;
7088 
7089 	return false;
7090 }
7091 
7092 /**
7093  * dp_pdev_get_filter_non_data() - get DP PDEV monitor non_data filter
7094  * @pdev_handle: Datapath PDEV handle
7095  * Return: true on non data filter flag set
7096  */
7097 static bool dp_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle)
7098 {
7099 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7100 
7101 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
7102 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
7103 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
7104 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
7105 			return true;
7106 		}
7107 	}
7108 
7109 	return false;
7110 }
7111 
7112 #ifdef MESH_MODE_SUPPORT
7113 static
7114 void dp_vdev_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
7115 {
7116 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
7117 
7118 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7119 		FL("val %d"), val);
7120 	vdev->mesh_vdev = val;
7121 }
7122 
7123 /*
7124  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
7125  * @vdev_hdl: virtual device object
7126  * @val: value to be set
7127  *
7128  * Return: void
7129  */
7130 static
7131 void dp_vdev_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
7132 {
7133 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
7134 
7135 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
7136 		FL("val %d"), val);
7137 	vdev->mesh_rx_filter = val;
7138 }
7139 #endif
7140 
7141 #ifdef VDEV_PEER_PROTOCOL_COUNT
7142 static void dp_enable_vdev_peer_protocol_count(struct cdp_soc_t *soc_hdl,
7143 					       int8_t vdev_id,
7144 					       bool enable)
7145 {
7146 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7147 	struct dp_vdev *vdev;
7148 
7149 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
7150 	if (!vdev)
7151 		return;
7152 
7153 	dp_info("enable %d vdev_id %d", enable, vdev_id);
7154 	vdev->peer_protocol_count_track = enable;
7155 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7156 }
7157 
7158 static void dp_enable_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
7159 						   int8_t vdev_id,
7160 						   int drop_mask)
7161 {
7162 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7163 	struct dp_vdev *vdev;
7164 
7165 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
7166 	if (!vdev)
7167 		return;
7168 
7169 	dp_info("drop_mask %d vdev_id %d", drop_mask, vdev_id);
7170 	vdev->peer_protocol_count_dropmask = drop_mask;
7171 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7172 }
7173 
7174 static int dp_is_vdev_peer_protocol_count_enabled(struct cdp_soc_t *soc_hdl,
7175 						  int8_t vdev_id)
7176 {
7177 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7178 	struct dp_vdev *vdev;
7179 	int peer_protocol_count_track;
7180 
7181 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
7182 	if (!vdev)
7183 		return 0;
7184 
7185 	dp_info("enable %d vdev_id %d", vdev->peer_protocol_count_track,
7186 		vdev_id);
7187 	peer_protocol_count_track =
7188 		vdev->peer_protocol_count_track;
7189 
7190 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7191 	return peer_protocol_count_track;
7192 }
7193 
7194 static int dp_get_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc_hdl,
7195 					       int8_t vdev_id)
7196 {
7197 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7198 	struct dp_vdev *vdev;
7199 	int peer_protocol_count_dropmask;
7200 
7201 	vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_CDP);
7202 	if (!vdev)
7203 		return 0;
7204 
7205 	dp_info("drop_mask %d vdev_id %d", vdev->peer_protocol_count_dropmask,
7206 		vdev_id);
7207 	peer_protocol_count_dropmask =
7208 		vdev->peer_protocol_count_dropmask;
7209 
7210 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7211 	return peer_protocol_count_dropmask;
7212 }
7213 
7214 #endif
7215 
7216 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data)
7217 {
7218 	uint8_t pdev_count;
7219 
7220 	for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) {
7221 		if (soc->pdev_list[pdev_count] &&
7222 		    soc->pdev_list[pdev_count] == data)
7223 			return true;
7224 	}
7225 	return false;
7226 }
7227 
7228 /**
7229  * dp_rx_bar_stats_cb(): BAR received stats callback
7230  * @soc: SOC handle
7231  * @cb_ctxt: Call back context
7232  * @reo_status: Reo status
7233  *
7234  * return: void
7235  */
7236 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
7237 	union hal_reo_status *reo_status)
7238 {
7239 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
7240 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
7241 
7242 	if (!dp_check_pdev_exists(soc, pdev)) {
7243 		dp_err_rl("pdev doesn't exist");
7244 		return;
7245 	}
7246 
7247 	if (!qdf_atomic_read(&soc->cmn_init_done))
7248 		return;
7249 
7250 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
7251 		DP_PRINT_STATS("REO stats failure %d",
7252 			       queue_status->header.status);
7253 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
7254 		return;
7255 	}
7256 
7257 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
7258 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
7259 
7260 }
7261 
7262 /**
7263  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
7264  * @vdev: DP VDEV handle
7265  *
7266  * return: void
7267  */
7268 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
7269 			     struct cdp_vdev_stats *vdev_stats)
7270 {
7271 	struct dp_soc *soc = NULL;
7272 
7273 	if (!vdev || !vdev->pdev)
7274 		return;
7275 
7276 	soc = vdev->pdev->soc;
7277 
7278 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
7279 
7280 	dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats,
7281 			     DP_MOD_ID_GENERIC_STATS);
7282 
7283 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7284 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
7285 			     vdev_stats, vdev->vdev_id,
7286 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
7287 #endif
7288 }
7289 
7290 void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
7291 {
7292 	struct dp_vdev *vdev = NULL;
7293 	struct dp_soc *soc;
7294 	struct cdp_vdev_stats *vdev_stats =
7295 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
7296 
7297 	if (!vdev_stats) {
7298 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7299 			  "DP alloc failure - unable to get alloc vdev stats");
7300 		return;
7301 	}
7302 
7303 	qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
7304 	qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
7305 	qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
7306 
7307 	if (pdev->mcopy_mode)
7308 		DP_UPDATE_STATS(pdev, pdev->invalid_peer);
7309 
7310 	soc = pdev->soc;
7311 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
7312 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
7313 
7314 		dp_aggregate_vdev_stats(vdev, vdev_stats);
7315 		dp_update_pdev_stats(pdev, vdev_stats);
7316 		dp_update_pdev_ingress_stats(pdev, vdev);
7317 	}
7318 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
7319 	qdf_mem_free(vdev_stats);
7320 
7321 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7322 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
7323 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
7324 #endif
7325 }
7326 
7327 /**
7328  * dp_vdev_getstats() - get vdev packet level stats
7329  * @vdev_handle: Datapath VDEV handle
7330  * @stats: cdp network device stats structure
7331  *
7332  * Return: QDF_STATUS
7333  */
7334 static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle,
7335 				   struct cdp_dev_stats *stats)
7336 {
7337 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7338 	struct dp_pdev *pdev;
7339 	struct dp_soc *soc;
7340 	struct cdp_vdev_stats *vdev_stats;
7341 
7342 	if (!vdev)
7343 		return QDF_STATUS_E_FAILURE;
7344 
7345 	pdev = vdev->pdev;
7346 	if (!pdev)
7347 		return QDF_STATUS_E_FAILURE;
7348 
7349 	soc = pdev->soc;
7350 
7351 	vdev_stats = qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
7352 
7353 	if (!vdev_stats) {
7354 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7355 			  "DP alloc failure - unable to get alloc vdev stats");
7356 		return QDF_STATUS_E_FAILURE;
7357 	}
7358 
7359 	dp_aggregate_vdev_stats(vdev, vdev_stats);
7360 
7361 	stats->tx_packets = vdev_stats->tx_i.rcvd.num;
7362 	stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
7363 
7364 	stats->tx_errors = vdev_stats->tx.tx_failed +
7365 		vdev_stats->tx_i.dropped.dropped_pkt.num;
7366 	stats->tx_dropped = stats->tx_errors;
7367 
7368 	stats->rx_packets = vdev_stats->rx.unicast.num +
7369 		vdev_stats->rx.multicast.num +
7370 		vdev_stats->rx.bcast.num;
7371 	stats->rx_bytes = vdev_stats->rx.unicast.bytes +
7372 		vdev_stats->rx.multicast.bytes +
7373 		vdev_stats->rx.bcast.bytes;
7374 
7375 	qdf_mem_free(vdev_stats);
7376 
7377 	return QDF_STATUS_SUCCESS;
7378 }
7379 
7380 /**
7381  * dp_pdev_getstats() - get pdev packet level stats
7382  * @pdev_handle: Datapath PDEV handle
7383  * @stats: cdp network device stats structure
7384  *
7385  * Return: QDF_STATUS
7386  */
7387 static void dp_pdev_getstats(struct cdp_pdev *pdev_handle,
7388 			     struct cdp_dev_stats *stats)
7389 {
7390 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7391 
7392 	dp_aggregate_pdev_stats(pdev);
7393 
7394 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
7395 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
7396 
7397 	stats->tx_errors = pdev->stats.tx.tx_failed +
7398 		pdev->stats.tx_i.dropped.dropped_pkt.num;
7399 	stats->tx_dropped = stats->tx_errors;
7400 
7401 	stats->rx_packets = pdev->stats.rx.unicast.num +
7402 		pdev->stats.rx.multicast.num +
7403 		pdev->stats.rx.bcast.num;
7404 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
7405 		pdev->stats.rx.multicast.bytes +
7406 		pdev->stats.rx.bcast.bytes;
7407 	stats->rx_errors = pdev->stats.err.desc_alloc_fail +
7408 		pdev->stats.err.ip_csum_err +
7409 		pdev->stats.err.tcp_udp_csum_err +
7410 		pdev->stats.rx.err.mic_err +
7411 		pdev->stats.rx.err.decrypt_err +
7412 		pdev->stats.err.rxdma_error +
7413 		pdev->stats.err.reo_error;
7414 	stats->rx_dropped = pdev->stats.dropped.msdu_not_done +
7415 		pdev->stats.dropped.mec +
7416 		pdev->stats.dropped.mesh_filter +
7417 		pdev->stats.dropped.wifi_parse +
7418 		pdev->stats.dropped.mon_rx_drop +
7419 		pdev->stats.dropped.mon_radiotap_update_err;
7420 }
7421 
7422 /**
7423  * dp_get_device_stats() - get interface level packet stats
7424  * @soc: soc handle
7425  * @id : vdev_id or pdev_id based on type
7426  * @stats: cdp network device stats structure
7427  * @type: device type pdev/vdev
7428  *
7429  * Return: QDF_STATUS
7430  */
7431 static QDF_STATUS dp_get_device_stats(struct cdp_soc_t *soc_hdl, uint8_t id,
7432 				      struct cdp_dev_stats *stats,
7433 				      uint8_t type)
7434 {
7435 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
7436 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
7437 	struct dp_vdev *vdev;
7438 
7439 	switch (type) {
7440 	case UPDATE_VDEV_STATS:
7441 		vdev = dp_vdev_get_ref_by_id(soc, id, DP_MOD_ID_CDP);
7442 
7443 		if (vdev) {
7444 			status = dp_vdev_getstats((struct cdp_vdev *)vdev,
7445 						  stats);
7446 			dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
7447 		}
7448 		return status;
7449 	case UPDATE_PDEV_STATS:
7450 		{
7451 			struct dp_pdev *pdev =
7452 				dp_get_pdev_from_soc_pdev_id_wifi3(
7453 						(struct dp_soc *)soc,
7454 						 id);
7455 			if (pdev) {
7456 				dp_pdev_getstats((struct cdp_pdev *)pdev,
7457 						 stats);
7458 				return QDF_STATUS_SUCCESS;
7459 			}
7460 		}
7461 		break;
7462 	default:
7463 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7464 			"apstats cannot be updated for this input "
7465 			"type %d", type);
7466 		break;
7467 	}
7468 
7469 	return QDF_STATUS_E_FAILURE;
7470 }
7471 
7472 const
7473 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
7474 {
7475 	switch (ring_type) {
7476 	case REO_DST:
7477 		return "Reo_dst";
7478 	case REO_EXCEPTION:
7479 		return "Reo_exception";
7480 	case REO_CMD:
7481 		return "Reo_cmd";
7482 	case REO_REINJECT:
7483 		return "Reo_reinject";
7484 	case REO_STATUS:
7485 		return "Reo_status";
7486 	case WBM2SW_RELEASE:
7487 		return "wbm2sw_release";
7488 	case TCL_DATA:
7489 		return "tcl_data";
7490 	case TCL_CMD_CREDIT:
7491 		return "tcl_cmd_credit";
7492 	case TCL_STATUS:
7493 		return "tcl_status";
7494 	case SW2WBM_RELEASE:
7495 		return "sw2wbm_release";
7496 	case RXDMA_BUF:
7497 		return "Rxdma_buf";
7498 	case RXDMA_DST:
7499 		return "Rxdma_dst";
7500 	case RXDMA_MONITOR_BUF:
7501 		return "Rxdma_monitor_buf";
7502 	case RXDMA_MONITOR_DESC:
7503 		return "Rxdma_monitor_desc";
7504 	case RXDMA_MONITOR_STATUS:
7505 		return "Rxdma_monitor_status";
7506 	default:
7507 		dp_err("Invalid ring type");
7508 		break;
7509 	}
7510 	return "Invalid";
7511 }
7512 
7513 /*
7514  * dp_print_napi_stats(): NAPI stats
7515  * @soc - soc handle
7516  */
7517 void dp_print_napi_stats(struct dp_soc *soc)
7518 {
7519 	hif_print_napi_stats(soc->hif_handle);
7520 }
7521 
7522 /**
7523  * dp_txrx_host_peer_stats_clr): Reinitialize the txrx peer stats
7524  * @soc: Datapath soc
7525  * @peer: Datatpath peer
7526  * @arg: argument to iter function
7527  *
7528  * Return: QDF_STATUS
7529  */
7530 static inline void
7531 dp_txrx_host_peer_stats_clr(struct dp_soc *soc,
7532 			    struct dp_peer *peer,
7533 			    void *arg)
7534 {
7535 	struct dp_rx_tid *rx_tid;
7536 	uint8_t tid;
7537 
7538 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
7539 		rx_tid = &peer->rx_tid[tid];
7540 		DP_STATS_CLR(rx_tid);
7541 	}
7542 	DP_STATS_CLR(peer);
7543 
7544 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7545 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, peer->vdev->pdev->soc,
7546 			     &peer->stats,  peer->peer_id,
7547 			     UPDATE_PEER_STATS, peer->vdev->pdev->pdev_id);
7548 #endif
7549 }
7550 
7551 /**
7552  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
7553  * @vdev: DP_VDEV handle
7554  * @dp_soc: DP_SOC handle
7555  *
7556  * Return: QDF_STATUS
7557  */
7558 static inline QDF_STATUS
7559 dp_txrx_host_stats_clr(struct dp_vdev *vdev, struct dp_soc *soc)
7560 {
7561 	if (!vdev || !vdev->pdev)
7562 		return QDF_STATUS_E_FAILURE;
7563 
7564 	/*
7565 	 * if NSS offload is enabled, then send message
7566 	 * to NSS FW to clear the stats. Once NSS FW clears the statistics
7567 	 * then clear host statistics.
7568 	 */
7569 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
7570 		if (soc->cdp_soc.ol_ops->nss_stats_clr)
7571 			soc->cdp_soc.ol_ops->nss_stats_clr(soc->ctrl_psoc,
7572 							   vdev->vdev_id);
7573 	}
7574 
7575 	DP_STATS_CLR(vdev->pdev);
7576 	DP_STATS_CLR(vdev->pdev->soc);
7577 	DP_STATS_CLR(vdev);
7578 
7579 	hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
7580 
7581 	dp_vdev_iterate_peer(vdev, dp_txrx_host_peer_stats_clr, NULL,
7582 			     DP_MOD_ID_GENERIC_STATS);
7583 
7584 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7585 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
7586 			     &vdev->stats,  vdev->vdev_id,
7587 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
7588 #endif
7589 	return QDF_STATUS_SUCCESS;
7590 }
7591 
7592 /*
7593  * dp_get_host_peer_stats()- function to print peer stats
7594  * @soc: dp_soc handle
7595  * @mac_addr: mac address of the peer
7596  *
7597  * Return: QDF_STATUS
7598  */
7599 static QDF_STATUS
7600 dp_get_host_peer_stats(struct cdp_soc_t *soc, uint8_t *mac_addr)
7601 {
7602 	struct dp_peer *peer = NULL;
7603 
7604 	if (!mac_addr) {
7605 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7606 			  "%s: NULL peer mac addr\n", __func__);
7607 		return QDF_STATUS_E_FAILURE;
7608 	}
7609 
7610 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
7611 				      mac_addr, 0,
7612 				      DP_VDEV_ALL,
7613 				      DP_MOD_ID_CDP);
7614 	if (!peer) {
7615 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7616 			  "%s: Invalid peer\n", __func__);
7617 		return QDF_STATUS_E_FAILURE;
7618 	}
7619 
7620 	dp_print_peer_stats(peer);
7621 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
7622 
7623 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
7624 
7625 	return QDF_STATUS_SUCCESS;
7626 }
7627 
7628 /**
7629  * dp_txrx_stats_help() - Helper function for Txrx_Stats
7630  *
7631  * Return: None
7632  */
7633 static void dp_txrx_stats_help(void)
7634 {
7635 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
7636 	dp_info("stats_option:");
7637 	dp_info("  1 -- HTT Tx Statistics");
7638 	dp_info("  2 -- HTT Rx Statistics");
7639 	dp_info("  3 -- HTT Tx HW Queue Statistics");
7640 	dp_info("  4 -- HTT Tx HW Sched Statistics");
7641 	dp_info("  5 -- HTT Error Statistics");
7642 	dp_info("  6 -- HTT TQM Statistics");
7643 	dp_info("  7 -- HTT TQM CMDQ Statistics");
7644 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
7645 	dp_info("  9 -- HTT Tx Rate Statistics");
7646 	dp_info(" 10 -- HTT Rx Rate Statistics");
7647 	dp_info(" 11 -- HTT Peer Statistics");
7648 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
7649 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
7650 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
7651 	dp_info(" 15 -- HTT SRNG Statistics");
7652 	dp_info(" 16 -- HTT SFM Info Statistics");
7653 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
7654 	dp_info(" 18 -- HTT Peer List Details");
7655 	dp_info(" 20 -- Clear Host Statistics");
7656 	dp_info(" 21 -- Host Rx Rate Statistics");
7657 	dp_info(" 22 -- Host Tx Rate Statistics");
7658 	dp_info(" 23 -- Host Tx Statistics");
7659 	dp_info(" 24 -- Host Rx Statistics");
7660 	dp_info(" 25 -- Host AST Statistics");
7661 	dp_info(" 26 -- Host SRNG PTR Statistics");
7662 	dp_info(" 27 -- Host Mon Statistics");
7663 	dp_info(" 28 -- Host REO Queue Statistics");
7664 	dp_info(" 29 -- Host Soc cfg param Statistics");
7665 	dp_info(" 30 -- Host pdev cfg param Statistics");
7666 	dp_info(" 31 -- Host FISA stats");
7667 	dp_info(" 32 -- Host Register Work stats");
7668 }
7669 
7670 /**
7671  * dp_print_host_stats()- Function to print the stats aggregated at host
7672  * @vdev_handle: DP_VDEV handle
7673  * @req: host stats type
7674  * @soc: dp soc handler
7675  *
7676  * Return: 0 on success, print error message in case of failure
7677  */
7678 static int
7679 dp_print_host_stats(struct dp_vdev *vdev,
7680 		    struct cdp_txrx_stats_req *req,
7681 		    struct dp_soc *soc)
7682 {
7683 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
7684 	enum cdp_host_txrx_stats type =
7685 			dp_stats_mapping_table[req->stats][STATS_HOST];
7686 
7687 	dp_aggregate_pdev_stats(pdev);
7688 
7689 	switch (type) {
7690 	case TXRX_CLEAR_STATS:
7691 		dp_txrx_host_stats_clr(vdev, soc);
7692 		break;
7693 	case TXRX_RX_RATE_STATS:
7694 		dp_print_rx_rates(vdev);
7695 		break;
7696 	case TXRX_TX_RATE_STATS:
7697 		dp_print_tx_rates(vdev);
7698 		break;
7699 	case TXRX_TX_HOST_STATS:
7700 		dp_print_pdev_tx_stats(pdev);
7701 		dp_print_soc_tx_stats(pdev->soc);
7702 		break;
7703 	case TXRX_RX_HOST_STATS:
7704 		dp_print_pdev_rx_stats(pdev);
7705 		dp_print_soc_rx_stats(pdev->soc);
7706 		break;
7707 	case TXRX_AST_STATS:
7708 		dp_print_ast_stats(pdev->soc);
7709 		dp_print_peer_table(vdev);
7710 		break;
7711 	case TXRX_SRNG_PTR_STATS:
7712 		dp_print_ring_stats(pdev);
7713 		break;
7714 	case TXRX_RX_MON_STATS:
7715 		dp_print_pdev_rx_mon_stats(pdev);
7716 		break;
7717 	case TXRX_REO_QUEUE_STATS:
7718 		dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc,
7719 				       req->peer_addr);
7720 		break;
7721 	case TXRX_SOC_CFG_PARAMS:
7722 		dp_print_soc_cfg_params(pdev->soc);
7723 		break;
7724 	case TXRX_PDEV_CFG_PARAMS:
7725 		dp_print_pdev_cfg_params(pdev);
7726 		break;
7727 	case TXRX_NAPI_STATS:
7728 		dp_print_napi_stats(pdev->soc);
7729 		break;
7730 	case TXRX_SOC_INTERRUPT_STATS:
7731 		dp_print_soc_interrupt_stats(pdev->soc);
7732 		break;
7733 	case TXRX_SOC_FSE_STATS:
7734 		dp_rx_dump_fisa_table(pdev->soc);
7735 		break;
7736 	case TXRX_HAL_REG_WRITE_STATS:
7737 		hal_dump_reg_write_stats(pdev->soc->hal_soc);
7738 		hal_dump_reg_write_srng_stats(pdev->soc->hal_soc);
7739 		break;
7740 	default:
7741 		dp_info("Wrong Input For TxRx Host Stats");
7742 		dp_txrx_stats_help();
7743 		break;
7744 	}
7745 	return 0;
7746 }
7747 
7748 /*
7749  * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
7750  *                              modes are enabled or not.
7751  * @dp_pdev: dp pdev handle.
7752  *
7753  * Return: bool
7754  */
7755 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
7756 {
7757 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
7758 	    !pdev->mcopy_mode)
7759 		return true;
7760 	else
7761 		return false;
7762 }
7763 
7764 /*
7765  *dp_set_bpr_enable() - API to enable/disable bpr feature
7766  *@pdev_handle: DP_PDEV handle.
7767  *@val: Provided value.
7768  *
7769  *Return: 0 for success. nonzero for failure.
7770  */
7771 static QDF_STATUS
7772 dp_set_bpr_enable(struct dp_pdev *pdev, int val)
7773 {
7774 	switch (val) {
7775 	case CDP_BPR_DISABLE:
7776 		pdev->bpr_enable = CDP_BPR_DISABLE;
7777 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7778 		    !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
7779 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7780 		} else if (pdev->enhanced_stats_en &&
7781 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
7782 			   !pdev->pktlog_ppdu_stats) {
7783 			dp_h2t_cfg_stats_msg_send(pdev,
7784 						  DP_PPDU_STATS_CFG_ENH_STATS,
7785 						  pdev->pdev_id);
7786 		}
7787 		break;
7788 	case CDP_BPR_ENABLE:
7789 		pdev->bpr_enable = CDP_BPR_ENABLE;
7790 		if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
7791 		    !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
7792 			dp_h2t_cfg_stats_msg_send(pdev,
7793 						  DP_PPDU_STATS_CFG_BPR,
7794 						  pdev->pdev_id);
7795 		} else if (pdev->enhanced_stats_en &&
7796 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
7797 			   !pdev->pktlog_ppdu_stats) {
7798 			dp_h2t_cfg_stats_msg_send(pdev,
7799 						  DP_PPDU_STATS_CFG_BPR_ENH,
7800 						  pdev->pdev_id);
7801 		} else if (pdev->pktlog_ppdu_stats) {
7802 			dp_h2t_cfg_stats_msg_send(pdev,
7803 						  DP_PPDU_STATS_CFG_BPR_PKTLOG,
7804 						  pdev->pdev_id);
7805 		}
7806 		break;
7807 	default:
7808 		break;
7809 	}
7810 
7811 	return QDF_STATUS_SUCCESS;
7812 }
7813 
7814 /*
7815  * dp_pdev_tid_stats_ingress_inc
7816  * @pdev: pdev handle
7817  * @val: increase in value
7818  *
7819  * Return: void
7820  */
7821 static void
7822 dp_pdev_tid_stats_ingress_inc(struct dp_pdev *pdev, uint32_t val)
7823 {
7824 	pdev->stats.tid_stats.ingress_stack += val;
7825 }
7826 
7827 /*
7828  * dp_pdev_tid_stats_osif_drop
7829  * @pdev: pdev handle
7830  * @val: increase in value
7831  *
7832  * Return: void
7833  */
7834 static void
7835 dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val)
7836 {
7837 	pdev->stats.tid_stats.osif_drop += val;
7838 }
7839 
7840 /*
7841  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
7842  * @pdev: DP_PDEV handle
7843  * @val: user provided value
7844  *
7845  * Return: 0 for success. nonzero for failure.
7846  */
7847 static QDF_STATUS
7848 dp_config_debug_sniffer(struct dp_pdev *pdev, int val)
7849 {
7850 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7851 
7852 	/*
7853 	 * Note: The mirror copy mode cannot co-exist with any other
7854 	 * monitor modes. Hence disabling the filter for this mode will
7855 	 * reset the monitor destination ring filters.
7856 	 */
7857 	if (pdev->mcopy_mode) {
7858 #ifdef FEATURE_PERPKT_INFO
7859 		dp_pdev_disable_mcopy_code(pdev);
7860 		dp_mon_filter_reset_mcopy_mode(pdev);
7861 		status = dp_mon_filter_update(pdev);
7862 		if (status != QDF_STATUS_SUCCESS) {
7863 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7864 				  FL("Failed to reset AM copy mode filters"));
7865 		}
7866 #endif /* FEATURE_PERPKT_INFO */
7867 	}
7868 	switch (val) {
7869 	case 0:
7870 		pdev->tx_sniffer_enable = 0;
7871 		pdev->monitor_configured = false;
7872 
7873 		/*
7874 		 * We don't need to reset the Rx monitor status ring  or call
7875 		 * the API dp_ppdu_ring_reset() if all debug sniffer mode is
7876 		 * disabled. The Rx monitor status ring will be disabled when
7877 		 * the last mode using the monitor status ring get disabled.
7878 		 */
7879 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7880 		    !pdev->bpr_enable) {
7881 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7882 		} else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
7883 			dp_h2t_cfg_stats_msg_send(pdev,
7884 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
7885 		} else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
7886 			dp_h2t_cfg_stats_msg_send(pdev,
7887 						  DP_PPDU_STATS_CFG_BPR_ENH,
7888 						  pdev->pdev_id);
7889 		} else {
7890 			dp_h2t_cfg_stats_msg_send(pdev,
7891 						  DP_PPDU_STATS_CFG_BPR,
7892 						  pdev->pdev_id);
7893 		}
7894 		break;
7895 
7896 	case 1:
7897 		pdev->tx_sniffer_enable = 1;
7898 		pdev->monitor_configured = false;
7899 
7900 		if (!pdev->pktlog_ppdu_stats)
7901 			dp_h2t_cfg_stats_msg_send(pdev,
7902 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
7903 		break;
7904 	case 2:
7905 	case 4:
7906 		if (pdev->monitor_vdev) {
7907 			status = QDF_STATUS_E_RESOURCES;
7908 			break;
7909 		}
7910 
7911 #ifdef FEATURE_PERPKT_INFO
7912 		pdev->mcopy_mode = val;
7913 		pdev->tx_sniffer_enable = 0;
7914 		pdev->monitor_configured = true;
7915 
7916 		/*
7917 		 * Setup the M copy mode filter.
7918 		 */
7919 		dp_mon_filter_setup_mcopy_mode(pdev);
7920 		status = dp_mon_filter_update(pdev);
7921 		if (status != QDF_STATUS_SUCCESS) {
7922 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7923 				  FL("Failed to set M_copy mode filters"));
7924 			dp_mon_filter_reset_mcopy_mode(pdev);
7925 			dp_pdev_disable_mcopy_code(pdev);
7926 			return status;
7927 		}
7928 
7929 		if (!pdev->pktlog_ppdu_stats)
7930 			dp_h2t_cfg_stats_msg_send(pdev,
7931 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
7932 #endif /* FEATURE_PERPKT_INFO */
7933 		break;
7934 
7935 	default:
7936 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7937 			"Invalid value");
7938 		break;
7939 	}
7940 	return status;
7941 }
7942 
7943 #ifdef FEATURE_PERPKT_INFO
7944 /*
7945  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
7946  * @soc_handle: DP_SOC handle
7947  * @pdev_id: id of DP_PDEV handle
7948  *
7949  * Return: QDF_STATUS
7950  */
7951 static QDF_STATUS
7952 dp_enable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
7953 {
7954 	struct dp_pdev *pdev = NULL;
7955 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7956 
7957 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7958 						  pdev_id);
7959 
7960 	if (!pdev)
7961 		return QDF_STATUS_E_FAILURE;
7962 
7963 	if (pdev->enhanced_stats_en == 0)
7964 		dp_cal_client_timer_start(pdev->cal_client_ctx);
7965 
7966 	pdev->enhanced_stats_en = 1;
7967 
7968 	dp_mon_filter_setup_enhanced_stats(pdev);
7969 	status = dp_mon_filter_update(pdev);
7970 	if (status != QDF_STATUS_SUCCESS) {
7971 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7972 			  FL("Failed to set enhanced mode filters"));
7973 		dp_mon_filter_reset_enhanced_stats(pdev);
7974 		dp_cal_client_timer_stop(pdev->cal_client_ctx);
7975 		pdev->enhanced_stats_en = 0;
7976 		return QDF_STATUS_E_FAILURE;
7977 	}
7978 
7979 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
7980 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
7981 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7982 		dp_h2t_cfg_stats_msg_send(pdev,
7983 					  DP_PPDU_STATS_CFG_BPR_ENH,
7984 					  pdev->pdev_id);
7985 	}
7986 
7987 	return QDF_STATUS_SUCCESS;
7988 }
7989 
7990 /*
7991  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
7992  *
7993  * @param soc - the soc handle
7994  * @param pdev_id - pdev_id of pdev
7995  * @return - QDF_STATUS
7996  */
7997 static QDF_STATUS
7998 dp_disable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
7999 {
8000 	struct dp_pdev *pdev =
8001 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8002 						   pdev_id);
8003 
8004 	if (!pdev)
8005 		return QDF_STATUS_E_FAILURE;
8006 
8007 	if (pdev->enhanced_stats_en == 1)
8008 		dp_cal_client_timer_stop(pdev->cal_client_ctx);
8009 
8010 	pdev->enhanced_stats_en = 0;
8011 
8012 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
8013 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
8014 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
8015 		dp_h2t_cfg_stats_msg_send(pdev,
8016 					  DP_PPDU_STATS_CFG_BPR,
8017 					  pdev->pdev_id);
8018 	}
8019 
8020 	dp_mon_filter_reset_enhanced_stats(pdev);
8021 	if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
8022 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8023 			  FL("Failed to reset enhanced mode filters"));
8024 	}
8025 
8026 	return QDF_STATUS_SUCCESS;
8027 }
8028 #endif /* FEATURE_PERPKT_INFO */
8029 
8030 /*
8031  * dp_get_fw_peer_stats()- function to print peer stats
8032  * @soc: soc handle
8033  * @pdev_id : id of the pdev handle
8034  * @mac_addr: mac address of the peer
8035  * @cap: Type of htt stats requested
8036  * @is_wait: if set, wait on completion from firmware response
8037  *
8038  * Currently Supporting only MAC ID based requests Only
8039  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
8040  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
8041  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
8042  *
8043  * Return: QDF_STATUS
8044  */
8045 static QDF_STATUS
8046 dp_get_fw_peer_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
8047 		     uint8_t *mac_addr,
8048 		     uint32_t cap, uint32_t is_wait)
8049 {
8050 	int i;
8051 	uint32_t config_param0 = 0;
8052 	uint32_t config_param1 = 0;
8053 	uint32_t config_param2 = 0;
8054 	uint32_t config_param3 = 0;
8055 	struct dp_pdev *pdev =
8056 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8057 						   pdev_id);
8058 
8059 	if (!pdev)
8060 		return QDF_STATUS_E_FAILURE;
8061 
8062 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
8063 	config_param0 |= (1 << (cap + 1));
8064 
8065 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
8066 		config_param1 |= (1 << i);
8067 	}
8068 
8069 	config_param2 |= (mac_addr[0] & 0x000000ff);
8070 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
8071 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
8072 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
8073 
8074 	config_param3 |= (mac_addr[4] & 0x000000ff);
8075 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
8076 
8077 	if (is_wait) {
8078 		qdf_event_reset(&pdev->fw_peer_stats_event);
8079 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
8080 					  config_param0, config_param1,
8081 					  config_param2, config_param3,
8082 					  0, 1, 0);
8083 		qdf_wait_single_event(&pdev->fw_peer_stats_event,
8084 				      DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
8085 	} else {
8086 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
8087 					  config_param0, config_param1,
8088 					  config_param2, config_param3,
8089 					  0, 0, 0);
8090 	}
8091 
8092 	return QDF_STATUS_SUCCESS;
8093 
8094 }
8095 
8096 /* This struct definition will be removed from here
8097  * once it get added in FW headers*/
8098 struct httstats_cmd_req {
8099     uint32_t    config_param0;
8100     uint32_t    config_param1;
8101     uint32_t    config_param2;
8102     uint32_t    config_param3;
8103     int cookie;
8104     u_int8_t    stats_id;
8105 };
8106 
8107 /*
8108  * dp_get_htt_stats: function to process the httstas request
8109  * @soc: DP soc handle
8110  * @pdev_id: id of pdev handle
8111  * @data: pointer to request data
8112  * @data_len: length for request data
8113  *
8114  * return: QDF_STATUS
8115  */
8116 static QDF_STATUS
8117 dp_get_htt_stats(struct cdp_soc_t *soc, uint8_t pdev_id, void *data,
8118 		 uint32_t data_len)
8119 {
8120 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
8121 	struct dp_pdev *pdev =
8122 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8123 						   pdev_id);
8124 
8125 	if (!pdev)
8126 		return QDF_STATUS_E_FAILURE;
8127 
8128 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
8129 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
8130 				req->config_param0, req->config_param1,
8131 				req->config_param2, req->config_param3,
8132 				req->cookie, 0, 0);
8133 
8134 	return QDF_STATUS_SUCCESS;
8135 }
8136 
8137 /**
8138  * dp_set_pdev_tidmap_prty_wifi3(): update tidmap priority in pdev
8139  * @pdev: DP_PDEV handle
8140  * @prio: tidmap priority value passed by the user
8141  *
8142  * Return: QDF_STATUS_SUCCESS on success
8143  */
8144 static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct dp_pdev *pdev,
8145 						uint8_t prio)
8146 {
8147 	struct dp_soc *soc = pdev->soc;
8148 
8149 	soc->tidmap_prty = prio;
8150 
8151 	hal_tx_set_tidmap_prty(soc->hal_soc, prio);
8152 	return QDF_STATUS_SUCCESS;
8153 }
8154 
8155 /*
8156  * dp_get_peer_param: function to get parameters in peer
8157  * @cdp_soc: DP soc handle
8158  * @vdev_id: id of vdev handle
8159  * @peer_mac: peer mac address
8160  * @param: parameter type to be set
8161  * @val : address of buffer
8162  *
8163  * Return: val
8164  */
8165 static QDF_STATUS dp_get_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
8166 				    uint8_t *peer_mac,
8167 				    enum cdp_peer_param_type param,
8168 				    cdp_config_param_type *val)
8169 {
8170 	return QDF_STATUS_SUCCESS;
8171 }
8172 
8173 #ifdef WLAN_ATF_ENABLE
8174 static void dp_set_atf_stats_enable(struct dp_pdev *pdev, bool value)
8175 {
8176 	if (!pdev) {
8177 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8178 			  "Invalid pdev");
8179 		return;
8180 	}
8181 
8182 	pdev->dp_atf_stats_enable = value;
8183 }
8184 #else
8185 static void dp_set_atf_stats_enable(struct dp_pdev *pdev, bool value)
8186 {
8187 }
8188 #endif
8189 
8190 /*
8191  * dp_set_peer_param: function to set parameters in peer
8192  * @cdp_soc: DP soc handle
8193  * @vdev_id: id of vdev handle
8194  * @peer_mac: peer mac address
8195  * @param: parameter type to be set
8196  * @val: value of parameter to be set
8197  *
8198  * Return: 0 for success. nonzero for failure.
8199  */
8200 static QDF_STATUS dp_set_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
8201 				    uint8_t *peer_mac,
8202 				    enum cdp_peer_param_type param,
8203 				    cdp_config_param_type val)
8204 {
8205 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
8206 						      peer_mac, 0, vdev_id,
8207 						      DP_MOD_ID_CDP);
8208 
8209 	if (!peer)
8210 		return QDF_STATUS_E_FAILURE;
8211 
8212 	switch (param) {
8213 	case CDP_CONFIG_NAWDS:
8214 		peer->nawds_enabled = val.cdp_peer_param_nawds;
8215 		break;
8216 	case CDP_CONFIG_NAC:
8217 		peer->nac = !!(val.cdp_peer_param_nac);
8218 		break;
8219 	case CDP_CONFIG_ISOLATION:
8220 		dp_set_peer_isolation(peer, val.cdp_peer_param_isolation);
8221 		break;
8222 	case CDP_CONFIG_IN_TWT:
8223 		peer->in_twt = !!(val.cdp_peer_param_in_twt);
8224 		break;
8225 	default:
8226 		break;
8227 	}
8228 
8229 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8230 
8231 	return QDF_STATUS_SUCCESS;
8232 }
8233 
8234 /*
8235  * dp_get_pdev_param: function to get parameters from pdev
8236  * @cdp_soc: DP soc handle
8237  * @pdev_id: id of pdev handle
8238  * @param: parameter type to be get
8239  * @value : buffer for value
8240  *
8241  * Return: status
8242  */
8243 static QDF_STATUS dp_get_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
8244 				    enum cdp_pdev_param_type param,
8245 				    cdp_config_param_type *val)
8246 {
8247 	struct cdp_pdev *pdev = (struct cdp_pdev *)
8248 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
8249 						   pdev_id);
8250 	if (!pdev)
8251 		return QDF_STATUS_E_FAILURE;
8252 
8253 	switch (param) {
8254 	case CDP_CONFIG_VOW:
8255 		val->cdp_pdev_param_cfg_vow =
8256 				((struct dp_pdev *)pdev)->delay_stats_flag;
8257 		break;
8258 	case CDP_TX_PENDING:
8259 		val->cdp_pdev_param_tx_pending = dp_get_tx_pending(pdev);
8260 		break;
8261 	case CDP_FILTER_MCAST_DATA:
8262 		val->cdp_pdev_param_fltr_mcast =
8263 					dp_pdev_get_filter_mcast_data(pdev);
8264 		break;
8265 	case CDP_FILTER_NO_DATA:
8266 		val->cdp_pdev_param_fltr_none =
8267 					dp_pdev_get_filter_non_data(pdev);
8268 		break;
8269 	case CDP_FILTER_UCAST_DATA:
8270 		val->cdp_pdev_param_fltr_ucast =
8271 					dp_pdev_get_filter_ucast_data(pdev);
8272 		break;
8273 	default:
8274 		return QDF_STATUS_E_FAILURE;
8275 	}
8276 
8277 	return QDF_STATUS_SUCCESS;
8278 }
8279 
8280 /*
8281  * dp_set_pdev_param: function to set parameters in pdev
8282  * @cdp_soc: DP soc handle
8283  * @pdev_id: id of pdev handle
8284  * @param: parameter type to be set
8285  * @val: value of parameter to be set
8286  *
8287  * Return: 0 for success. nonzero for failure.
8288  */
8289 static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
8290 				    enum cdp_pdev_param_type param,
8291 				    cdp_config_param_type val)
8292 {
8293 	int target_type;
8294 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
8295 	struct dp_pdev *pdev =
8296 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
8297 						   pdev_id);
8298 	if (!pdev)
8299 		return QDF_STATUS_E_FAILURE;
8300 
8301 	target_type = hal_get_target_type(soc->hal_soc);
8302 	switch (target_type) {
8303 	case TARGET_TYPE_QCA6750:
8304 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_5G_LMAC_ID;
8305 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_5G_LMAC_ID;
8306 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_6G_LMAC_ID;
8307 		break;
8308 	default:
8309 		pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_2G_LMAC_ID;
8310 		pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_5G_LMAC_ID;
8311 		pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_6G_LMAC_ID;
8312 		break;
8313 	}
8314 
8315 	switch (param) {
8316 	case CDP_CONFIG_TX_CAPTURE:
8317 		return dp_config_debug_sniffer(pdev,
8318 					       val.cdp_pdev_param_tx_capture);
8319 	case CDP_CONFIG_DEBUG_SNIFFER:
8320 		return dp_config_debug_sniffer(pdev,
8321 					       val.cdp_pdev_param_dbg_snf);
8322 	case CDP_CONFIG_BPR_ENABLE:
8323 		return dp_set_bpr_enable(pdev, val.cdp_pdev_param_bpr_enable);
8324 	case CDP_CONFIG_PRIMARY_RADIO:
8325 		pdev->is_primary = val.cdp_pdev_param_primary_radio;
8326 		break;
8327 	case CDP_CONFIG_CAPTURE_LATENCY:
8328 		pdev->latency_capture_enable = val.cdp_pdev_param_cptr_latcy;
8329 		break;
8330 	case CDP_INGRESS_STATS:
8331 		dp_pdev_tid_stats_ingress_inc(pdev,
8332 					      val.cdp_pdev_param_ingrs_stats);
8333 		break;
8334 	case CDP_OSIF_DROP:
8335 		dp_pdev_tid_stats_osif_drop(pdev,
8336 					    val.cdp_pdev_param_osif_drop);
8337 		break;
8338 	case CDP_CONFIG_ENH_RX_CAPTURE:
8339 		return dp_config_enh_rx_capture(pdev,
8340 						val.cdp_pdev_param_en_rx_cap);
8341 	case CDP_CONFIG_ENH_TX_CAPTURE:
8342 		return dp_config_enh_tx_capture(pdev,
8343 						val.cdp_pdev_param_en_tx_cap);
8344 	case CDP_CONFIG_HMMC_TID_OVERRIDE:
8345 		pdev->hmmc_tid_override_en = val.cdp_pdev_param_hmmc_tid_ovrd;
8346 		break;
8347 	case CDP_CONFIG_HMMC_TID_VALUE:
8348 		pdev->hmmc_tid = val.cdp_pdev_param_hmmc_tid;
8349 		break;
8350 	case CDP_CHAN_NOISE_FLOOR:
8351 		pdev->chan_noise_floor = val.cdp_pdev_param_chn_noise_flr;
8352 		break;
8353 	case CDP_TIDMAP_PRTY:
8354 		dp_set_pdev_tidmap_prty_wifi3(pdev,
8355 					      val.cdp_pdev_param_tidmap_prty);
8356 		break;
8357 	case CDP_FILTER_NEIGH_PEERS:
8358 		dp_set_filter_neigh_peers(pdev,
8359 					  val.cdp_pdev_param_fltr_neigh_peers);
8360 		break;
8361 	case CDP_MONITOR_CHANNEL:
8362 		pdev->mon_chan_num = val.cdp_pdev_param_monitor_chan;
8363 		break;
8364 	case CDP_MONITOR_FREQUENCY:
8365 		pdev->mon_chan_freq = val.cdp_pdev_param_mon_freq;
8366 		pdev->mon_chan_band =
8367 				wlan_reg_freq_to_band(pdev->mon_chan_freq);
8368 		break;
8369 	case CDP_CONFIG_BSS_COLOR:
8370 		dp_mon_set_bsscolor(pdev, val.cdp_pdev_param_bss_color);
8371 		break;
8372 	case CDP_SET_ATF_STATS_ENABLE:
8373 		dp_set_atf_stats_enable(pdev,
8374 					val.cdp_pdev_param_atf_stats_enable);
8375 		break;
8376 	default:
8377 		return QDF_STATUS_E_INVAL;
8378 	}
8379 	return QDF_STATUS_SUCCESS;
8380 }
8381 
8382 #ifdef QCA_PEER_EXT_STATS
8383 static void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
8384 					  qdf_nbuf_t nbuf)
8385 {
8386 	struct dp_peer *peer = NULL;
8387 	uint16_t peer_id, ring_id;
8388 	uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
8389 	struct cdp_peer_ext_stats *pext_stats = NULL;
8390 
8391 	peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
8392 	if (peer_id > soc->max_peers)
8393 		return;
8394 
8395 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
8396 	if (qdf_unlikely(!peer))
8397 		return;
8398 
8399 	if (qdf_likely(peer->pext_stats)) {
8400 		pext_stats = peer->pext_stats;
8401 		ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
8402 		dp_rx_compute_tid_delay(&pext_stats->delay_stats[tid][ring_id],
8403 					nbuf);
8404 	}
8405 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8406 }
8407 #else
8408 static inline void dp_rx_update_peer_delay_stats(struct dp_soc *soc,
8409 						 qdf_nbuf_t nbuf)
8410 {
8411 }
8412 #endif
8413 
8414 /*
8415  * dp_calculate_delay_stats: function to get rx delay stats
8416  * @cdp_soc: DP soc handle
8417  * @vdev_id: id of DP vdev handle
8418  * @nbuf: skb
8419  *
8420  * Return: QDF_STATUS
8421  */
8422 static QDF_STATUS
8423 dp_calculate_delay_stats(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
8424 			 qdf_nbuf_t nbuf)
8425 {
8426 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
8427 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8428 						     DP_MOD_ID_CDP);
8429 
8430 	if (!vdev)
8431 		return QDF_STATUS_SUCCESS;
8432 
8433 	if (vdev->pdev->delay_stats_flag)
8434 		dp_rx_compute_delay(vdev, nbuf);
8435 	else
8436 		dp_rx_update_peer_delay_stats(soc, nbuf);
8437 
8438 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8439 	return QDF_STATUS_SUCCESS;
8440 }
8441 
8442 /*
8443  * dp_get_vdev_param: function to get parameters from vdev
8444  * @cdp_soc : DP soc handle
8445  * @vdev_id: id of DP vdev handle
8446  * @param: parameter type to get value
8447  * @val: buffer address
8448  *
8449  * return: status
8450  */
8451 static QDF_STATUS dp_get_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
8452 				    enum cdp_vdev_param_type param,
8453 				    cdp_config_param_type *val)
8454 {
8455 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
8456 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8457 						     DP_MOD_ID_CDP);
8458 
8459 	if (!vdev)
8460 		return QDF_STATUS_E_FAILURE;
8461 
8462 	switch (param) {
8463 	case CDP_ENABLE_WDS:
8464 		val->cdp_vdev_param_wds = vdev->wds_enabled;
8465 		break;
8466 	case CDP_ENABLE_MEC:
8467 		val->cdp_vdev_param_mec = vdev->mec_enabled;
8468 		break;
8469 	case CDP_ENABLE_DA_WAR:
8470 		val->cdp_vdev_param_da_war = vdev->pdev->soc->da_war_enabled;
8471 		break;
8472 	default:
8473 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8474 			  "param value %d is wrong\n",
8475 			  param);
8476 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8477 		return QDF_STATUS_E_FAILURE;
8478 	}
8479 
8480 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8481 	return QDF_STATUS_SUCCESS;
8482 }
8483 
8484 /*
8485  * dp_set_vdev_param: function to set parameters in vdev
8486  * @cdp_soc : DP soc handle
8487  * @vdev_id: id of DP vdev handle
8488  * @param: parameter type to get value
8489  * @val: value
8490  *
8491  * return: QDF_STATUS
8492  */
8493 static QDF_STATUS
8494 dp_set_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
8495 		  enum cdp_vdev_param_type param, cdp_config_param_type val)
8496 {
8497 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
8498 	struct dp_vdev *vdev =
8499 		dp_vdev_get_ref_by_id(dsoc, vdev_id, DP_MOD_ID_CDP);
8500 	uint32_t var = 0;
8501 
8502 	if (!vdev)
8503 		return QDF_STATUS_E_FAILURE;
8504 
8505 	switch (param) {
8506 	case CDP_ENABLE_WDS:
8507 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8508 			  "wds_enable %d for vdev(%pK) id(%d)\n",
8509 			  val.cdp_vdev_param_wds, vdev, vdev->vdev_id);
8510 		vdev->wds_enabled = val.cdp_vdev_param_wds;
8511 		break;
8512 	case CDP_ENABLE_MEC:
8513 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8514 			  "mec_enable %d for vdev(%pK) id(%d)\n",
8515 			  val.cdp_vdev_param_mec, vdev, vdev->vdev_id);
8516 		vdev->mec_enabled = val.cdp_vdev_param_mec;
8517 		break;
8518 	case CDP_ENABLE_DA_WAR:
8519 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8520 			  "da_war_enable %d for vdev(%pK) id(%d)\n",
8521 			  val.cdp_vdev_param_da_war, vdev, vdev->vdev_id);
8522 		vdev->pdev->soc->da_war_enabled = val.cdp_vdev_param_da_war;
8523 		dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
8524 					     vdev->pdev->soc));
8525 		break;
8526 	case CDP_ENABLE_NAWDS:
8527 		vdev->nawds_enabled = val.cdp_vdev_param_nawds;
8528 		break;
8529 	case CDP_ENABLE_MCAST_EN:
8530 		vdev->mcast_enhancement_en = val.cdp_vdev_param_mcast_en;
8531 		break;
8532 	case CDP_ENABLE_PROXYSTA:
8533 		vdev->proxysta_vdev = val.cdp_vdev_param_proxysta;
8534 		break;
8535 	case CDP_UPDATE_TDLS_FLAGS:
8536 		vdev->tdls_link_connected = val.cdp_vdev_param_tdls_flags;
8537 		break;
8538 	case CDP_CFG_WDS_AGING_TIMER:
8539 		var = val.cdp_vdev_param_aging_tmr;
8540 		if (!var)
8541 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
8542 		else if (var != vdev->wds_aging_timer_val)
8543 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, var);
8544 
8545 		vdev->wds_aging_timer_val = var;
8546 		break;
8547 	case CDP_ENABLE_AP_BRIDGE:
8548 		if (wlan_op_mode_sta != vdev->opmode)
8549 			vdev->ap_bridge_enabled = val.cdp_vdev_param_ap_brdg_en;
8550 		else
8551 			vdev->ap_bridge_enabled = false;
8552 		break;
8553 	case CDP_ENABLE_CIPHER:
8554 		vdev->sec_type = val.cdp_vdev_param_cipher_en;
8555 		break;
8556 	case CDP_ENABLE_QWRAP_ISOLATION:
8557 		vdev->isolation_vdev = val.cdp_vdev_param_qwrap_isolation;
8558 		break;
8559 	case CDP_UPDATE_MULTIPASS:
8560 		vdev->multipass_en = val.cdp_vdev_param_update_multipass;
8561 		break;
8562 	case CDP_TX_ENCAP_TYPE:
8563 		vdev->tx_encap_type = val.cdp_vdev_param_tx_encap;
8564 		break;
8565 	case CDP_RX_DECAP_TYPE:
8566 		vdev->rx_decap_type = val.cdp_vdev_param_rx_decap;
8567 		break;
8568 	case CDP_TID_VDEV_PRTY:
8569 		vdev->tidmap_prty = val.cdp_vdev_param_tidmap_prty;
8570 		break;
8571 	case CDP_TIDMAP_TBL_ID:
8572 		vdev->tidmap_tbl_id = val.cdp_vdev_param_tidmap_tbl_id;
8573 		break;
8574 #ifdef MESH_MODE_SUPPORT
8575 	case CDP_MESH_RX_FILTER:
8576 		dp_vdev_set_mesh_rx_filter((struct cdp_vdev *)vdev,
8577 					   val.cdp_vdev_param_mesh_rx_filter);
8578 		break;
8579 	case CDP_MESH_MODE:
8580 		dp_vdev_set_mesh_mode((struct cdp_vdev *)vdev,
8581 				      val.cdp_vdev_param_mesh_mode);
8582 		break;
8583 #endif
8584 	case CDP_ENABLE_CSUM:
8585 		dp_info("vdev_id %d enable Checksum %d", vdev_id,
8586 			val.cdp_enable_tx_checksum);
8587 		vdev->csum_enabled = val.cdp_enable_tx_checksum;
8588 		break;
8589 	default:
8590 		break;
8591 	}
8592 
8593 	dp_tx_vdev_update_search_flags((struct dp_vdev *)vdev);
8594 	dp_vdev_unref_delete(dsoc, vdev, DP_MOD_ID_CDP);
8595 
8596 	return QDF_STATUS_SUCCESS;
8597 }
8598 
8599 /*
8600  * dp_set_psoc_param: function to set parameters in psoc
8601  * @cdp_soc : DP soc handle
8602  * @param: parameter type to be set
8603  * @val: value of parameter to be set
8604  *
8605  * return: QDF_STATUS
8606  */
8607 static QDF_STATUS
8608 dp_set_psoc_param(struct cdp_soc_t *cdp_soc,
8609 		  enum cdp_psoc_param_type param, cdp_config_param_type val)
8610 {
8611 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
8612 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->wlan_cfg_ctx;
8613 
8614 	switch (param) {
8615 	case CDP_ENABLE_RATE_STATS:
8616 		soc->wlanstats_enabled = val.cdp_psoc_param_en_rate_stats;
8617 		break;
8618 	case CDP_SET_NSS_CFG:
8619 		wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx,
8620 					    val.cdp_psoc_param_en_nss_cfg);
8621 		/*
8622 		 * TODO: masked out based on the per offloaded radio
8623 		 */
8624 		switch (val.cdp_psoc_param_en_nss_cfg) {
8625 		case dp_nss_cfg_default:
8626 			break;
8627 		case dp_nss_cfg_first_radio:
8628 		/*
8629 		 * This configuration is valid for single band radio which
8630 		 * is also NSS offload.
8631 		 */
8632 		case dp_nss_cfg_dbdc:
8633 		case dp_nss_cfg_dbtc:
8634 			wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
8635 			wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
8636 			wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
8637 			wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
8638 			break;
8639 		default:
8640 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8641 				  "Invalid offload config %d",
8642 				  val.cdp_psoc_param_en_nss_cfg);
8643 		}
8644 
8645 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8646 			  FL("nss-wifi<0> nss config is enabled"));
8647 		break;
8648 	case CDP_SET_PREFERRED_HW_MODE:
8649 		soc->preferred_hw_mode = val.cdp_psoc_param_preferred_hw_mode;
8650 		break;
8651 	default:
8652 		break;
8653 	}
8654 
8655 	return QDF_STATUS_SUCCESS;
8656 }
8657 
8658 /*
8659  * dp_get_psoc_param: function to get parameters in soc
8660  * @cdp_soc : DP soc handle
8661  * @param: parameter type to be set
8662  * @val: address of buffer
8663  *
8664  * return: status
8665  */
8666 static QDF_STATUS dp_get_psoc_param(struct cdp_soc_t *cdp_soc,
8667 				    enum cdp_psoc_param_type param,
8668 				    cdp_config_param_type *val)
8669 {
8670 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
8671 
8672 	if (!soc)
8673 		return QDF_STATUS_E_FAILURE;
8674 
8675 	switch (param) {
8676 	case CDP_CFG_PEER_EXT_STATS:
8677 		val->cdp_psoc_param_pext_stats =
8678 			wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx);
8679 		break;
8680 	default:
8681 		dp_warn("Invalid param");
8682 		break;
8683 	}
8684 
8685 	return QDF_STATUS_SUCCESS;
8686 }
8687 
8688 /**
8689  * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer
8690  * @soc: DP_SOC handle
8691  * @pdev_id: id of DP_PDEV handle
8692  * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
8693  * @is_tx_pkt_cap_enable: enable/disable/delete/print
8694  * Tx packet capture in monitor mode
8695  * @peer_mac: MAC address for which the above need to be enabled/disabled
8696  *
8697  * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
8698  */
8699 QDF_STATUS
8700 dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
8701 				  uint8_t pdev_id,
8702 				  bool is_rx_pkt_cap_enable,
8703 				  uint8_t is_tx_pkt_cap_enable,
8704 				  uint8_t *peer_mac)
8705 {
8706 	struct dp_peer *peer;
8707 	QDF_STATUS status;
8708 	struct dp_pdev *pdev =
8709 			dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8710 							   pdev_id);
8711 	if (!pdev)
8712 		return QDF_STATUS_E_FAILURE;
8713 
8714 	peer = dp_peer_find_hash_find((struct dp_soc *)soc,
8715 				      peer_mac, 0, DP_VDEV_ALL,
8716 				      DP_MOD_ID_CDP);
8717 	if (!peer)
8718 		return QDF_STATUS_E_FAILURE;
8719 
8720 	/* we need to set tx pkt capture for non associated peer */
8721 	status = dp_peer_set_tx_capture_enabled(pdev, peer,
8722 						is_tx_pkt_cap_enable,
8723 						peer_mac);
8724 
8725 	status = dp_peer_set_rx_capture_enabled(pdev, peer,
8726 						is_rx_pkt_cap_enable,
8727 						peer_mac);
8728 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8729 
8730 	return status;
8731 }
8732 
8733 /*
8734  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
8735  * @soc: DP_SOC handle
8736  * @vdev_id: id of DP_VDEV handle
8737  * @map_id:ID of map that needs to be updated
8738  *
8739  * Return: QDF_STATUS
8740  */
8741 static QDF_STATUS dp_set_vdev_dscp_tid_map_wifi3(ol_txrx_soc_handle cdp_soc,
8742 						 uint8_t vdev_id,
8743 						 uint8_t map_id)
8744 {
8745 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(cdp_soc);
8746 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8747 						     DP_MOD_ID_CDP);
8748 	if (vdev) {
8749 		vdev->dscp_tid_map_id = map_id;
8750 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8751 		return QDF_STATUS_SUCCESS;
8752 	}
8753 
8754 	return QDF_STATUS_E_FAILURE;
8755 }
8756 
8757 #ifdef DP_RATETABLE_SUPPORT
8758 static int dp_txrx_get_ratekbps(int preamb, int mcs,
8759 				int htflag, int gintval)
8760 {
8761 	uint32_t rix;
8762 	uint16_t ratecode;
8763 
8764 	return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
8765 			       (uint8_t)preamb, 1, &rix, &ratecode);
8766 }
8767 #else
8768 static int dp_txrx_get_ratekbps(int preamb, int mcs,
8769 				int htflag, int gintval)
8770 {
8771 	return 0;
8772 }
8773 #endif
8774 
8775 /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
8776  * @soc: DP soc handle
8777  * @pdev_id: id of DP pdev handle
8778  * @pdev_stats: buffer to copy to
8779  *
8780  * return : status success/failure
8781  */
8782 static QDF_STATUS
8783 dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
8784 		       struct cdp_pdev_stats *pdev_stats)
8785 {
8786 	struct dp_pdev *pdev =
8787 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8788 						   pdev_id);
8789 	if (!pdev)
8790 		return QDF_STATUS_E_FAILURE;
8791 
8792 	dp_aggregate_pdev_stats(pdev);
8793 
8794 	qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats));
8795 	return QDF_STATUS_SUCCESS;
8796 }
8797 
8798 /* dp_txrx_update_vdev_me_stats(): Update vdev ME stats sent from CDP
8799  * @vdev: DP vdev handle
8800  * @buf: buffer containing specific stats structure
8801  *
8802  * Returns: void
8803  */
8804 static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev,
8805 					 void *buf)
8806 {
8807 	struct cdp_tx_ingress_stats *host_stats = NULL;
8808 
8809 	if (!buf) {
8810 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8811 			  "Invalid host stats buf");
8812 		return;
8813 	}
8814 	host_stats = (struct cdp_tx_ingress_stats *)buf;
8815 
8816 	DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt,
8817 			 host_stats->mcast_en.mcast_pkt.num,
8818 			 host_stats->mcast_en.mcast_pkt.bytes);
8819 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error,
8820 		     host_stats->mcast_en.dropped_map_error);
8821 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac,
8822 		     host_stats->mcast_en.dropped_self_mac);
8823 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail,
8824 		     host_stats->mcast_en.dropped_send_fail);
8825 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast,
8826 		     host_stats->mcast_en.ucast);
8827 	DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc,
8828 		     host_stats->mcast_en.fail_seg_alloc);
8829 	DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail,
8830 		     host_stats->mcast_en.clone_fail);
8831 }
8832 
8833 /* dp_txrx_update_vdev_host_stats(): Update stats sent through CDP
8834  * @soc: DP soc handle
8835  * @vdev_id: id of DP vdev handle
8836  * @buf: buffer containing specific stats structure
8837  * @stats_id: stats type
8838  *
8839  * Returns: QDF_STATUS
8840  */
8841 static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc_hdl,
8842 						 uint8_t vdev_id,
8843 						 void *buf,
8844 						 uint16_t stats_id)
8845 {
8846 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8847 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
8848 						     DP_MOD_ID_CDP);
8849 
8850 	if (!vdev) {
8851 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8852 			  "Invalid vdev handle");
8853 		return QDF_STATUS_E_FAILURE;
8854 	}
8855 
8856 	switch (stats_id) {
8857 	case DP_VDEV_STATS_PKT_CNT_ONLY:
8858 		break;
8859 	case DP_VDEV_STATS_TX_ME:
8860 		dp_txrx_update_vdev_me_stats(vdev, buf);
8861 		break;
8862 	default:
8863 		qdf_info("Invalid stats_id %d", stats_id);
8864 		break;
8865 	}
8866 
8867 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
8868 	return QDF_STATUS_SUCCESS;
8869 }
8870 
8871 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
8872  * @soc: soc handle
8873  * @vdev_id: id of vdev handle
8874  * @peer_mac: mac of DP_PEER handle
8875  * @peer_stats: buffer to copy to
8876  * return : status success/failure
8877  */
8878 static QDF_STATUS
8879 dp_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
8880 		       uint8_t *peer_mac, struct cdp_peer_stats *peer_stats)
8881 {
8882 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8883 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
8884 						       peer_mac, 0, vdev_id,
8885 						       DP_MOD_ID_CDP);
8886 
8887 	if (!peer)
8888 		return QDF_STATUS_E_FAILURE;
8889 
8890 	qdf_mem_copy(peer_stats, &peer->stats,
8891 		     sizeof(struct cdp_peer_stats));
8892 
8893 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8894 
8895 	return status;
8896 }
8897 
8898 /* dp_txrx_get_peer_stats_param - will return specified cdp_peer_stats
8899  * @param soc - soc handle
8900  * @param vdev_id - vdev_id of vdev object
8901  * @param peer_mac - mac address of the peer
8902  * @param type - enum of required stats
8903  * @param buf - buffer to hold the value
8904  * return : status success/failure
8905  */
8906 static QDF_STATUS
8907 dp_txrx_get_peer_stats_param(struct cdp_soc_t *soc, uint8_t vdev_id,
8908 			     uint8_t *peer_mac, enum cdp_peer_stats_type type,
8909 			     cdp_peer_stats_param_t *buf)
8910 {
8911 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
8912 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
8913 						      peer_mac, 0, vdev_id,
8914 						      DP_MOD_ID_CDP);
8915 
8916 	if (!peer) {
8917 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8918 			  "Invalid Peer for Mac %pM", peer_mac);
8919 		return QDF_STATUS_E_FAILURE;
8920 	} else if (type < cdp_peer_stats_max) {
8921 		switch (type) {
8922 		case cdp_peer_tx_ucast:
8923 			buf->tx_ucast = peer->stats.tx.ucast;
8924 			break;
8925 		case cdp_peer_tx_mcast:
8926 			buf->tx_mcast = peer->stats.tx.mcast;
8927 			break;
8928 		case cdp_peer_tx_rate:
8929 			buf->tx_rate = peer->stats.tx.tx_rate;
8930 			break;
8931 		case cdp_peer_tx_last_tx_rate:
8932 			buf->last_tx_rate = peer->stats.tx.last_tx_rate;
8933 			break;
8934 		case cdp_peer_tx_inactive_time:
8935 			buf->tx_inactive_time = peer->stats.tx.inactive_time;
8936 			break;
8937 		case cdp_peer_tx_ratecode:
8938 			buf->tx_ratecode = peer->stats.tx.tx_ratecode;
8939 			break;
8940 		case cdp_peer_tx_flags:
8941 			buf->tx_flags = peer->stats.tx.tx_flags;
8942 			break;
8943 		case cdp_peer_tx_power:
8944 			buf->tx_power = peer->stats.tx.tx_power;
8945 			break;
8946 		case cdp_peer_rx_rate:
8947 			buf->rx_rate = peer->stats.rx.rx_rate;
8948 			break;
8949 		case cdp_peer_rx_last_rx_rate:
8950 			buf->last_rx_rate = peer->stats.rx.last_rx_rate;
8951 			break;
8952 		case cdp_peer_rx_ratecode:
8953 			buf->rx_ratecode = peer->stats.rx.rx_ratecode;
8954 			break;
8955 		case cdp_peer_rx_ucast:
8956 			buf->rx_ucast = peer->stats.rx.unicast;
8957 			break;
8958 		case cdp_peer_rx_flags:
8959 			buf->rx_flags = peer->stats.rx.rx_flags;
8960 			break;
8961 		case cdp_peer_rx_avg_rssi:
8962 			buf->rx_avg_rssi = peer->stats.rx.avg_rssi;
8963 			break;
8964 		default:
8965 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8966 				  "Invalid value");
8967 			ret = QDF_STATUS_E_FAILURE;
8968 			break;
8969 		}
8970 	} else {
8971 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8972 			  "Invalid value");
8973 		ret = QDF_STATUS_E_FAILURE;
8974 	}
8975 
8976 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
8977 
8978 	return ret;
8979 }
8980 
8981 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
8982  * @soc: soc handle
8983  * @vdev_id: id of vdev handle
8984  * @peer_mac: mac of DP_PEER handle
8985  *
8986  * return : QDF_STATUS
8987  */
8988 static QDF_STATUS
8989 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
8990 			 uint8_t *peer_mac)
8991 {
8992 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8993 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
8994 						      peer_mac, 0, vdev_id,
8995 						      DP_MOD_ID_CDP);
8996 
8997 	if (!peer)
8998 		return QDF_STATUS_E_FAILURE;
8999 
9000 	qdf_mem_zero(&peer->stats, sizeof(peer->stats));
9001 
9002 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9003 
9004 	return status;
9005 }
9006 
9007 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
9008  * @vdev_handle: DP_VDEV handle
9009  * @buf: buffer for vdev stats
9010  *
9011  * return : int
9012  */
9013 static int dp_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
9014 				  void *buf, bool is_aggregate)
9015 {
9016 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9017 	struct cdp_vdev_stats *vdev_stats;
9018 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9019 						     DP_MOD_ID_CDP);
9020 
9021 	if (!vdev)
9022 		return 1;
9023 
9024 	vdev_stats = (struct cdp_vdev_stats *)buf;
9025 
9026 	if (is_aggregate) {
9027 		dp_aggregate_vdev_stats(vdev, buf);
9028 	} else {
9029 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
9030 	}
9031 
9032 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9033 	return 0;
9034 }
9035 
9036 /*
9037  * dp_get_total_per(): get total per
9038  * @soc: DP soc handle
9039  * @pdev_id: id of DP_PDEV handle
9040  *
9041  * Return: % error rate using retries per packet and success packets
9042  */
9043 static int dp_get_total_per(struct cdp_soc_t *soc, uint8_t pdev_id)
9044 {
9045 	struct dp_pdev *pdev =
9046 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9047 						   pdev_id);
9048 
9049 	if (!pdev)
9050 		return 0;
9051 
9052 	dp_aggregate_pdev_stats(pdev);
9053 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
9054 		return 0;
9055 	return ((pdev->stats.tx.retries * 100) /
9056 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
9057 }
9058 
9059 /*
9060  * dp_txrx_stats_publish(): publish pdev stats into a buffer
9061  * @soc: DP soc handle
9062  * @pdev_id: id of DP_PDEV handle
9063  * @buf: to hold pdev_stats
9064  *
9065  * Return: int
9066  */
9067 static int
9068 dp_txrx_stats_publish(struct cdp_soc_t *soc, uint8_t pdev_id,
9069 		      struct cdp_stats_extd *buf)
9070 {
9071 	struct cdp_txrx_stats_req req = {0,};
9072 	struct dp_pdev *pdev =
9073 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9074 						   pdev_id);
9075 
9076 	if (!pdev)
9077 		return TXRX_STATS_LEVEL_OFF;
9078 
9079 	dp_aggregate_pdev_stats(pdev);
9080 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
9081 	req.cookie_val = 1;
9082 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
9083 				req.param1, req.param2, req.param3, 0,
9084 				req.cookie_val, 0);
9085 
9086 	msleep(DP_MAX_SLEEP_TIME);
9087 
9088 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
9089 	req.cookie_val = 1;
9090 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
9091 				req.param1, req.param2, req.param3, 0,
9092 				req.cookie_val, 0);
9093 
9094 	msleep(DP_MAX_SLEEP_TIME);
9095 	qdf_mem_copy(buf, &pdev->stats, sizeof(struct cdp_pdev_stats));
9096 
9097 	return TXRX_STATS_LEVEL;
9098 }
9099 
9100 /**
9101  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
9102  * @soc: soc handle
9103  * @pdev_id: id of DP_PDEV handle
9104  * @map_id: ID of map that needs to be updated
9105  * @tos: index value in map
9106  * @tid: tid value passed by the user
9107  *
9108  * Return: QDF_STATUS
9109  */
9110 static QDF_STATUS
9111 dp_set_pdev_dscp_tid_map_wifi3(struct cdp_soc_t *soc_handle,
9112 			       uint8_t pdev_id,
9113 			       uint8_t map_id,
9114 			       uint8_t tos, uint8_t tid)
9115 {
9116 	uint8_t dscp;
9117 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9118 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
9119 
9120 	if (!pdev)
9121 		return QDF_STATUS_E_FAILURE;
9122 
9123 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
9124 	pdev->dscp_tid_map[map_id][dscp] = tid;
9125 
9126 	if (map_id < soc->num_hw_dscp_tid_map)
9127 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
9128 				       map_id, dscp);
9129 	else
9130 		return QDF_STATUS_E_FAILURE;
9131 
9132 	return QDF_STATUS_SUCCESS;
9133 }
9134 
9135 /**
9136  * dp_fw_stats_process(): Process TxRX FW stats request
9137  * @vdev_handle: DP VDEV handle
9138  * @req: stats request
9139  *
9140  * return: int
9141  */
9142 static int dp_fw_stats_process(struct dp_vdev *vdev,
9143 			       struct cdp_txrx_stats_req *req)
9144 {
9145 	struct dp_pdev *pdev = NULL;
9146 	uint32_t stats = req->stats;
9147 	uint8_t mac_id = req->mac_id;
9148 
9149 	if (!vdev) {
9150 		DP_TRACE(NONE, "VDEV not found");
9151 		return 1;
9152 	}
9153 	pdev = vdev->pdev;
9154 
9155 	/*
9156 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
9157 	 * from param0 to param3 according to below rule:
9158 	 *
9159 	 * PARAM:
9160 	 *   - config_param0 : start_offset (stats type)
9161 	 *   - config_param1 : stats bmask from start offset
9162 	 *   - config_param2 : stats bmask from start offset + 32
9163 	 *   - config_param3 : stats bmask from start offset + 64
9164 	 */
9165 	if (req->stats == CDP_TXRX_STATS_0) {
9166 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
9167 		req->param1 = 0xFFFFFFFF;
9168 		req->param2 = 0xFFFFFFFF;
9169 		req->param3 = 0xFFFFFFFF;
9170 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
9171 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
9172 	}
9173 
9174 	if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT) {
9175 		return dp_h2t_ext_stats_msg_send(pdev,
9176 				HTT_DBG_EXT_STATS_PDEV_RX_RATE_EXT,
9177 				req->param0, req->param1, req->param2,
9178 				req->param3, 0, 0, mac_id);
9179 	} else {
9180 		return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
9181 				req->param1, req->param2, req->param3,
9182 				0, 0, mac_id);
9183 	}
9184 }
9185 
9186 /**
9187  * dp_txrx_stats_request - function to map to firmware and host stats
9188  * @soc: soc handle
9189  * @vdev_id: virtual device ID
9190  * @req: stats request
9191  *
9192  * Return: QDF_STATUS
9193  */
9194 static
9195 QDF_STATUS dp_txrx_stats_request(struct cdp_soc_t *soc_handle,
9196 				 uint8_t vdev_id,
9197 				 struct cdp_txrx_stats_req *req)
9198 {
9199 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_handle);
9200 	int host_stats;
9201 	int fw_stats;
9202 	enum cdp_stats stats;
9203 	int num_stats;
9204 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9205 						     DP_MOD_ID_CDP);
9206 	QDF_STATUS status = QDF_STATUS_E_INVAL;
9207 
9208 	if (!vdev || !req) {
9209 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
9210 				"Invalid vdev/req instance");
9211 		status = QDF_STATUS_E_INVAL;
9212 		goto fail0;
9213 	}
9214 
9215 	if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) {
9216 		dp_err("Invalid mac id request");
9217 		status = QDF_STATUS_E_INVAL;
9218 		goto fail0;
9219 	}
9220 
9221 	stats = req->stats;
9222 	if (stats >= CDP_TXRX_MAX_STATS) {
9223 		status = QDF_STATUS_E_INVAL;
9224 		goto fail0;
9225 	}
9226 
9227 	/*
9228 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
9229 	 *			has to be updated if new FW HTT stats added
9230 	 */
9231 	if (stats > CDP_TXRX_STATS_HTT_MAX)
9232 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
9233 
9234 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
9235 
9236 	if (stats >= num_stats) {
9237 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
9238 			  "%s: Invalid stats option: %d", __func__, stats);
9239 		status = QDF_STATUS_E_INVAL;
9240 		goto fail0;
9241 	}
9242 
9243 	req->stats = stats;
9244 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
9245 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
9246 
9247 	dp_info("stats: %u fw_stats_type: %d host_stats: %d",
9248 		stats, fw_stats, host_stats);
9249 
9250 	if (fw_stats != TXRX_FW_STATS_INVALID) {
9251 		/* update request with FW stats type */
9252 		req->stats = fw_stats;
9253 		status = dp_fw_stats_process(vdev, req);
9254 	} else if ((host_stats != TXRX_HOST_STATS_INVALID) &&
9255 			(host_stats <= TXRX_HOST_STATS_MAX))
9256 		status = dp_print_host_stats(vdev, req, soc);
9257 	else
9258 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
9259 				"Wrong Input for TxRx Stats");
9260 fail0:
9261 	if (vdev)
9262 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9263 	return status;
9264 }
9265 
9266 /*
9267  * dp_txrx_dump_stats() -  Dump statistics
9268  * @value - Statistics option
9269  */
9270 static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value,
9271 				     enum qdf_stats_verbosity_level level)
9272 {
9273 	struct dp_soc *soc =
9274 		(struct dp_soc *)psoc;
9275 	QDF_STATUS status = QDF_STATUS_SUCCESS;
9276 
9277 	if (!soc) {
9278 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
9279 			"%s: soc is NULL", __func__);
9280 		return QDF_STATUS_E_INVAL;
9281 	}
9282 
9283 	switch (value) {
9284 	case CDP_TXRX_PATH_STATS:
9285 		dp_txrx_path_stats(soc);
9286 		dp_print_soc_interrupt_stats(soc);
9287 		hal_dump_reg_write_stats(soc->hal_soc);
9288 		break;
9289 
9290 	case CDP_RX_RING_STATS:
9291 		dp_print_per_ring_stats(soc);
9292 		break;
9293 
9294 	case CDP_TXRX_TSO_STATS:
9295 		dp_print_tso_stats(soc, level);
9296 		break;
9297 
9298 	case CDP_DUMP_TX_FLOW_POOL_INFO:
9299 		if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH)
9300 			cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
9301 		break;
9302 
9303 	case CDP_DP_NAPI_STATS:
9304 		dp_print_napi_stats(soc);
9305 		break;
9306 
9307 	case CDP_TXRX_DESC_STATS:
9308 		/* TODO: NOT IMPLEMENTED */
9309 		break;
9310 
9311 	case CDP_DP_RX_FISA_STATS:
9312 		dp_rx_dump_fisa_stats(soc);
9313 		break;
9314 
9315 	default:
9316 		status = QDF_STATUS_E_INVAL;
9317 		break;
9318 	}
9319 
9320 	return status;
9321 
9322 }
9323 
9324 /**
9325  * dp_txrx_clear_dump_stats() - clear dumpStats
9326  * @soc- soc handle
9327  * @value - stats option
9328  *
9329  * Return: 0 - Success, non-zero - failure
9330  */
9331 static
9332 QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
9333 				    uint8_t value)
9334 {
9335 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9336 	QDF_STATUS status = QDF_STATUS_SUCCESS;
9337 
9338 	if (!soc) {
9339 		dp_err("%s: soc is NULL", __func__);
9340 		return QDF_STATUS_E_INVAL;
9341 	}
9342 
9343 	switch (value) {
9344 	case CDP_TXRX_TSO_STATS:
9345 		dp_txrx_clear_tso_stats(soc);
9346 		break;
9347 
9348 	default:
9349 		status = QDF_STATUS_E_INVAL;
9350 		break;
9351 	}
9352 
9353 	return status;
9354 }
9355 
9356 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
9357 /**
9358  * dp_update_flow_control_parameters() - API to store datapath
9359  *                            config parameters
9360  * @soc: soc handle
9361  * @cfg: ini parameter handle
9362  *
9363  * Return: void
9364  */
9365 static inline
9366 void dp_update_flow_control_parameters(struct dp_soc *soc,
9367 				struct cdp_config_params *params)
9368 {
9369 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
9370 					params->tx_flow_stop_queue_threshold;
9371 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
9372 					params->tx_flow_start_queue_offset;
9373 }
9374 #else
9375 static inline
9376 void dp_update_flow_control_parameters(struct dp_soc *soc,
9377 				struct cdp_config_params *params)
9378 {
9379 }
9380 #endif
9381 
9382 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
9383 /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */
9384 #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024
9385 
9386 /* Max packet limit for RX REAP Loop (dp_rx_process) */
9387 #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024
9388 
9389 static
9390 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
9391 					struct cdp_config_params *params)
9392 {
9393 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit =
9394 				params->tx_comp_loop_pkt_limit;
9395 
9396 	if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX)
9397 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true;
9398 	else
9399 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false;
9400 
9401 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit =
9402 				params->rx_reap_loop_pkt_limit;
9403 
9404 	if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX)
9405 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = true;
9406 	else
9407 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = false;
9408 
9409 	soc->wlan_cfg_ctx->rx_hp_oos_update_limit =
9410 				params->rx_hp_oos_update_limit;
9411 
9412 	dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u",
9413 		soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit,
9414 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check,
9415 		soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit,
9416 		soc->wlan_cfg_ctx->rx_enable_eol_data_check,
9417 		soc->wlan_cfg_ctx->rx_hp_oos_update_limit);
9418 }
9419 #else
9420 static inline
9421 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
9422 					struct cdp_config_params *params)
9423 { }
9424 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
9425 
9426 /**
9427  * dp_update_config_parameters() - API to store datapath
9428  *                            config parameters
9429  * @soc: soc handle
9430  * @cfg: ini parameter handle
9431  *
9432  * Return: status
9433  */
9434 static
9435 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
9436 				struct cdp_config_params *params)
9437 {
9438 	struct dp_soc *soc = (struct dp_soc *)psoc;
9439 
9440 	if (!(soc)) {
9441 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
9442 				"%s: Invalid handle", __func__);
9443 		return QDF_STATUS_E_INVAL;
9444 	}
9445 
9446 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
9447 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
9448 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
9449 	soc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload =
9450 				params->p2p_tcp_udp_checksumoffload;
9451 	soc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload =
9452 				params->nan_tcp_udp_checksumoffload;
9453 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
9454 				params->tcp_udp_checksumoffload;
9455 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
9456 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
9457 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
9458 
9459 	dp_update_rx_soft_irq_limit_params(soc, params);
9460 	dp_update_flow_control_parameters(soc, params);
9461 
9462 	return QDF_STATUS_SUCCESS;
9463 }
9464 
9465 static struct cdp_wds_ops dp_ops_wds = {
9466 	.vdev_set_wds = dp_vdev_set_wds,
9467 #ifdef WDS_VENDOR_EXTENSION
9468 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
9469 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
9470 #endif
9471 };
9472 
9473 /*
9474  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
9475  * @soc_hdl - datapath soc handle
9476  * @vdev_id - virtual interface id
9477  * @callback - callback function
9478  * @ctxt: callback context
9479  *
9480  */
9481 static void
9482 dp_txrx_data_tx_cb_set(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
9483 		       ol_txrx_data_tx_cb callback, void *ctxt)
9484 {
9485 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9486 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9487 						     DP_MOD_ID_CDP);
9488 
9489 	if (!vdev)
9490 		return;
9491 
9492 	vdev->tx_non_std_data_callback.func = callback;
9493 	vdev->tx_non_std_data_callback.ctxt = ctxt;
9494 
9495 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9496 }
9497 
9498 /**
9499  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
9500  * @soc: datapath soc handle
9501  * @pdev_id: id of datapath pdev handle
9502  *
9503  * Return: opaque pointer to dp txrx handle
9504  */
9505 static void *dp_pdev_get_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id)
9506 {
9507 	struct dp_pdev *pdev =
9508 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9509 						   pdev_id);
9510 	if (qdf_unlikely(!pdev))
9511 		return NULL;
9512 
9513 	return pdev->dp_txrx_handle;
9514 }
9515 
9516 /**
9517  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
9518  * @soc: datapath soc handle
9519  * @pdev_id: id of datapath pdev handle
9520  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
9521  *
9522  * Return: void
9523  */
9524 static void
9525 dp_pdev_set_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id,
9526 			   void *dp_txrx_hdl)
9527 {
9528 	struct dp_pdev *pdev =
9529 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9530 						   pdev_id);
9531 
9532 	if (!pdev)
9533 		return;
9534 
9535 	pdev->dp_txrx_handle = dp_txrx_hdl;
9536 }
9537 
9538 /**
9539  * dp_vdev_get_dp_ext_handle() - get dp handle from vdev
9540  * @soc: datapath soc handle
9541  * @vdev_id: vdev id
9542  *
9543  * Return: opaque pointer to dp txrx handle
9544  */
9545 static void *dp_vdev_get_dp_ext_handle(ol_txrx_soc_handle soc_hdl,
9546 				       uint8_t vdev_id)
9547 {
9548 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9549 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9550 						     DP_MOD_ID_CDP);
9551 	void *dp_ext_handle;
9552 
9553 	if (!vdev)
9554 		return NULL;
9555 	dp_ext_handle = vdev->vdev_dp_ext_handle;
9556 
9557 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9558 	return dp_ext_handle;
9559 }
9560 
9561 /**
9562  * dp_vdev_set_dp_ext_handle() - set dp handle in vdev
9563  * @soc: datapath soc handle
9564  * @vdev_id: vdev id
9565  * @size: size of advance dp handle
9566  *
9567  * Return: QDF_STATUS
9568  */
9569 static QDF_STATUS
9570 dp_vdev_set_dp_ext_handle(ol_txrx_soc_handle soc_hdl, uint8_t vdev_id,
9571 			  uint16_t size)
9572 {
9573 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9574 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9575 						     DP_MOD_ID_CDP);
9576 	void *dp_ext_handle;
9577 
9578 	if (!vdev)
9579 		return QDF_STATUS_E_FAILURE;
9580 
9581 	dp_ext_handle = qdf_mem_malloc(size);
9582 
9583 	if (!dp_ext_handle) {
9584 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9585 		return QDF_STATUS_E_FAILURE;
9586 	}
9587 
9588 	vdev->vdev_dp_ext_handle = dp_ext_handle;
9589 
9590 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9591 	return QDF_STATUS_SUCCESS;
9592 }
9593 
9594 /**
9595  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
9596  * @soc_handle: datapath soc handle
9597  *
9598  * Return: opaque pointer to external dp (non-core DP)
9599  */
9600 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
9601 {
9602 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9603 
9604 	return soc->external_txrx_handle;
9605 }
9606 
9607 /**
9608  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
9609  * @soc_handle: datapath soc handle
9610  * @txrx_handle: opaque pointer to external dp (non-core DP)
9611  *
9612  * Return: void
9613  */
9614 static void
9615 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
9616 {
9617 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9618 
9619 	soc->external_txrx_handle = txrx_handle;
9620 }
9621 
9622 /**
9623  * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
9624  * @soc_hdl: datapath soc handle
9625  * @pdev_id: id of the datapath pdev handle
9626  * @lmac_id: lmac id
9627  *
9628  * Return: QDF_STATUS
9629  */
9630 static QDF_STATUS
9631 dp_soc_map_pdev_to_lmac
9632 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
9633 	 uint32_t lmac_id)
9634 {
9635 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9636 
9637 	wlan_cfg_set_hw_mac_idx(soc->wlan_cfg_ctx,
9638 				pdev_id,
9639 				lmac_id);
9640 
9641 	/*Set host PDEV ID for lmac_id*/
9642 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
9643 			      pdev_id,
9644 			      lmac_id);
9645 
9646 	return QDF_STATUS_SUCCESS;
9647 }
9648 
9649 /**
9650  * dp_soc_handle_pdev_mode_change() - Update pdev to lmac mapping
9651  * @soc_hdl: datapath soc handle
9652  * @pdev_id: id of the datapath pdev handle
9653  * @lmac_id: lmac id
9654  *
9655  * In the event of a dynamic mode change, update the pdev to lmac mapping
9656  *
9657  * Return: QDF_STATUS
9658  */
9659 static QDF_STATUS
9660 dp_soc_handle_pdev_mode_change
9661 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
9662 	 uint32_t lmac_id)
9663 {
9664 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9665 	struct dp_vdev *vdev = NULL;
9666 	uint8_t hw_pdev_id, mac_id;
9667 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc,
9668 								  pdev_id);
9669 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
9670 
9671 	if (qdf_unlikely(!pdev))
9672 		return QDF_STATUS_E_FAILURE;
9673 
9674 	pdev->lmac_id = lmac_id;
9675 	dp_info(" mode change %d %d\n", pdev->pdev_id, pdev->lmac_id);
9676 
9677 	/*Set host PDEV ID for lmac_id*/
9678 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
9679 			      pdev->pdev_id,
9680 			      lmac_id);
9681 
9682 	hw_pdev_id =
9683 		dp_get_target_pdev_id_for_host_pdev_id(soc,
9684 						       pdev->pdev_id);
9685 
9686 	/*
9687 	 * When NSS offload is enabled, send pdev_id->lmac_id
9688 	 * and pdev_id to hw_pdev_id to NSS FW
9689 	 */
9690 	if (nss_config) {
9691 		mac_id = pdev->lmac_id;
9692 		if (soc->cdp_soc.ol_ops->pdev_update_lmac_n_target_pdev_id)
9693 			soc->cdp_soc.ol_ops->
9694 				pdev_update_lmac_n_target_pdev_id(
9695 				soc->ctrl_psoc,
9696 				&pdev_id, &mac_id, &hw_pdev_id);
9697 	}
9698 
9699 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
9700 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
9701 		HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
9702 						hw_pdev_id);
9703 		vdev->lmac_id = pdev->lmac_id;
9704 	}
9705 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
9706 
9707 	return QDF_STATUS_SUCCESS;
9708 }
9709 
9710 /**
9711  * dp_soc_set_pdev_status_down() - set pdev down/up status
9712  * @soc: datapath soc handle
9713  * @pdev_id: id of datapath pdev handle
9714  * @is_pdev_down: pdev down/up status
9715  *
9716  * Return: QDF_STATUS
9717  */
9718 static QDF_STATUS
9719 dp_soc_set_pdev_status_down(struct cdp_soc_t *soc, uint8_t pdev_id,
9720 			    bool is_pdev_down)
9721 {
9722 	struct dp_pdev *pdev =
9723 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9724 						   pdev_id);
9725 	if (!pdev)
9726 		return QDF_STATUS_E_FAILURE;
9727 
9728 	pdev->is_pdev_down = is_pdev_down;
9729 	return QDF_STATUS_SUCCESS;
9730 }
9731 
9732 /**
9733  * dp_get_cfg_capabilities() - get dp capabilities
9734  * @soc_handle: datapath soc handle
9735  * @dp_caps: enum for dp capabilities
9736  *
9737  * Return: bool to determine if dp caps is enabled
9738  */
9739 static bool
9740 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
9741 			enum cdp_capabilities dp_caps)
9742 {
9743 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9744 
9745 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
9746 }
9747 
9748 #ifdef FEATURE_AST
9749 static QDF_STATUS
9750 dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
9751 		       uint8_t *peer_mac)
9752 {
9753 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9754 	QDF_STATUS status = QDF_STATUS_SUCCESS;
9755 	struct dp_peer *peer =
9756 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
9757 					       DP_MOD_ID_CDP);
9758 
9759 	/* Peer can be null for monitor vap mac address */
9760 	if (!peer) {
9761 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
9762 			  "%s: Invalid peer\n", __func__);
9763 		return QDF_STATUS_E_FAILURE;
9764 	}
9765 
9766 	if (peer->peer_state == DP_PEER_STATE_INIT)
9767 		dp_peer_cleanup(peer->vdev, peer);
9768 
9769 	qdf_spin_lock_bh(&soc->ast_lock);
9770 	dp_peer_delete_ast_entries(soc, peer);
9771 
9772 	dp_peer_update_state(soc, peer, DP_PEER_STATE_LOGICAL_DELETE);
9773 	qdf_spin_unlock_bh(&soc->ast_lock);
9774 
9775 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9776 	return status;
9777 }
9778 #endif
9779 
9780 #ifdef ATH_SUPPORT_NAC_RSSI
9781 /**
9782  * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
9783  * @soc_hdl: DP soc handle
9784  * @vdev_id: id of DP vdev handle
9785  * @mac_addr: neighbour mac
9786  * @rssi: rssi value
9787  *
9788  * Return: 0 for success. nonzero for failure.
9789  */
9790 static QDF_STATUS  dp_vdev_get_neighbour_rssi(struct cdp_soc_t *soc_hdl,
9791 					      uint8_t vdev_id,
9792 					      char *mac_addr,
9793 					      uint8_t *rssi)
9794 {
9795 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9796 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9797 						     DP_MOD_ID_CDP);
9798 	struct dp_pdev *pdev;
9799 	struct dp_neighbour_peer *peer = NULL;
9800 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
9801 
9802 	if (!vdev)
9803 		return status;
9804 
9805 	pdev = vdev->pdev;
9806 	*rssi = 0;
9807 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
9808 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
9809 		      neighbour_peer_list_elem) {
9810 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
9811 				mac_addr, QDF_MAC_ADDR_SIZE) == 0) {
9812 			*rssi = peer->rssi;
9813 			status = QDF_STATUS_SUCCESS;
9814 			break;
9815 		}
9816 	}
9817 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
9818 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9819 	return status;
9820 }
9821 
9822 static QDF_STATUS
9823 dp_config_for_nac_rssi(struct cdp_soc_t *cdp_soc,
9824 		       uint8_t vdev_id,
9825 		       enum cdp_nac_param_cmd cmd, char *bssid,
9826 		       char *client_macaddr,
9827 		       uint8_t chan_num)
9828 {
9829 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
9830 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
9831 						     DP_MOD_ID_CDP);
9832 	struct dp_pdev *pdev;
9833 
9834 	if (!vdev)
9835 		return QDF_STATUS_E_FAILURE;
9836 
9837 	pdev = (struct dp_pdev *)vdev->pdev;
9838 	pdev->nac_rssi_filtering = 1;
9839 	/* Store address of NAC (neighbour peer) which will be checked
9840 	 * against TA of received packets.
9841 	 */
9842 
9843 	if (cmd == CDP_NAC_PARAM_ADD) {
9844 		dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id,
9845 						 DP_NAC_PARAM_ADD,
9846 						 (uint8_t *)client_macaddr);
9847 	} else if (cmd == CDP_NAC_PARAM_DEL) {
9848 		dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id,
9849 						 DP_NAC_PARAM_DEL,
9850 						 (uint8_t *)client_macaddr);
9851 	}
9852 
9853 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
9854 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
9855 			(soc->ctrl_psoc, pdev->pdev_id,
9856 			 vdev->vdev_id, cmd, bssid, client_macaddr);
9857 
9858 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
9859 	return QDF_STATUS_SUCCESS;
9860 }
9861 #endif
9862 
9863 /**
9864  * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
9865  * for pktlog
9866  * @soc: cdp_soc handle
9867  * @pdev_id: id of dp pdev handle
9868  * @mac_addr: Peer mac address
9869  * @enb_dsb: Enable or disable peer based filtering
9870  *
9871  * Return: QDF_STATUS
9872  */
9873 static int
9874 dp_enable_peer_based_pktlog(struct cdp_soc_t *soc, uint8_t pdev_id,
9875 			    uint8_t *mac_addr, uint8_t enb_dsb)
9876 {
9877 	struct dp_peer *peer;
9878 	struct dp_pdev *pdev =
9879 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9880 						   pdev_id);
9881 
9882 	if (!pdev)
9883 		return QDF_STATUS_E_FAILURE;
9884 
9885 	peer = dp_peer_find_hash_find((struct dp_soc *)soc, mac_addr,
9886 				      0, DP_VDEV_ALL, DP_MOD_ID_CDP);
9887 
9888 	if (!peer) {
9889 		dp_err("Invalid Peer");
9890 		return QDF_STATUS_E_FAILURE;
9891 	}
9892 
9893 	peer->peer_based_pktlog_filter = enb_dsb;
9894 	pdev->dp_peer_based_pktlog = enb_dsb;
9895 
9896 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
9897 
9898 	return QDF_STATUS_SUCCESS;
9899 }
9900 
9901 #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS
9902 /**
9903  * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
9904  * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
9905  * @soc: cdp_soc handle
9906  * @pdev_id: id of cdp_pdev handle
9907  * @protocol_type: protocol type for which stats should be displayed
9908  *
9909  * Return: none
9910  */
9911 static inline void
9912 dp_dump_pdev_rx_protocol_tag_stats(struct cdp_soc_t  *soc, uint8_t pdev_id,
9913 				   uint16_t protocol_type)
9914 {
9915 }
9916 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
9917 
9918 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
9919 /**
9920  * dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be
9921  * applied to the desired protocol type packets
9922  * @soc: soc handle
9923  * @pdev_id: id of cdp_pdev handle
9924  * @enable_rx_protocol_tag - bitmask that indicates what protocol types
9925  * are enabled for tagging. zero indicates disable feature, non-zero indicates
9926  * enable feature
9927  * @protocol_type: new protocol type for which the tag is being added
9928  * @tag: user configured tag for the new protocol
9929  *
9930  * Return: Success
9931  */
9932 static inline QDF_STATUS
9933 dp_update_pdev_rx_protocol_tag(struct cdp_soc_t  *soc, uint8_t pdev_id,
9934 			       uint32_t enable_rx_protocol_tag,
9935 			       uint16_t protocol_type,
9936 			       uint16_t tag)
9937 {
9938 	return QDF_STATUS_SUCCESS;
9939 }
9940 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
9941 
9942 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
9943 /**
9944  * dp_set_rx_flow_tag - add/delete a flow
9945  * @soc: soc handle
9946  * @pdev_id: id of cdp_pdev handle
9947  * @flow_info: flow tuple that is to be added to/deleted from flow search table
9948  *
9949  * Return: Success
9950  */
9951 static inline QDF_STATUS
9952 dp_set_rx_flow_tag(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
9953 		   struct cdp_rx_flow_info *flow_info)
9954 {
9955 	return QDF_STATUS_SUCCESS;
9956 }
9957 /**
9958  * dp_dump_rx_flow_tag_stats - dump the number of packets tagged for
9959  * given flow 5-tuple
9960  * @cdp_soc: soc handle
9961  * @pdev_id: id of cdp_pdev handle
9962  * @flow_info: flow 5-tuple for which stats should be displayed
9963  *
9964  * Return: Success
9965  */
9966 static inline QDF_STATUS
9967 dp_dump_rx_flow_tag_stats(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
9968 			  struct cdp_rx_flow_info *flow_info)
9969 {
9970 	return QDF_STATUS_SUCCESS;
9971 }
9972 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
9973 
9974 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
9975 					   uint32_t max_peers,
9976 					   uint32_t max_ast_index,
9977 					   bool peer_map_unmap_v2)
9978 {
9979 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9980 
9981 	soc->max_peers = max_peers;
9982 
9983 	qdf_print ("%s max_peers %u, max_ast_index: %u\n",
9984 		   __func__, max_peers, max_ast_index);
9985 	wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
9986 
9987 	if (dp_peer_find_attach(soc))
9988 		return QDF_STATUS_E_FAILURE;
9989 
9990 	soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
9991 	soc->peer_map_attach_success = TRUE;
9992 
9993 	return QDF_STATUS_SUCCESS;
9994 }
9995 
9996 static QDF_STATUS dp_soc_set_param(struct cdp_soc_t  *soc_hdl,
9997 				   enum cdp_soc_param_t param,
9998 				   uint32_t value)
9999 {
10000 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10001 
10002 	switch (param) {
10003 	case DP_SOC_PARAM_MSDU_EXCEPTION_DESC:
10004 		soc->num_msdu_exception_desc = value;
10005 		dp_info("num_msdu exception_desc %u",
10006 			value);
10007 		break;
10008 	default:
10009 		dp_info("not handled param %d ", param);
10010 		break;
10011 	}
10012 
10013 	return QDF_STATUS_SUCCESS;
10014 }
10015 
10016 static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
10017 				      void *stats_ctx)
10018 {
10019 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10020 
10021 	soc->rate_stats_ctx = (struct cdp_soc_rate_stats_ctx *)stats_ctx;
10022 }
10023 
10024 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
10025 /**
10026  * dp_peer_flush_rate_stats_req(): Flush peer rate stats
10027  * @soc: Datapath SOC handle
10028  * @peer: Datapath peer
10029  * @arg: argument to iter function
10030  *
10031  * Return: QDF_STATUS
10032  */
10033 static void
10034 dp_peer_flush_rate_stats_req(struct dp_soc *soc, struct dp_peer *peer,
10035 			     void *arg)
10036 {
10037 	if (peer->bss_peer)
10038 		return;
10039 
10040 	dp_wdi_event_handler(
10041 		WDI_EVENT_FLUSH_RATE_STATS_REQ,
10042 		soc, peer->wlanstats_ctx,
10043 		peer->peer_id,
10044 		WDI_NO_VAL, peer->vdev->pdev->pdev_id);
10045 }
10046 
10047 /**
10048  * dp_flush_rate_stats_req(): Flush peer rate stats in pdev
10049  * @soc_hdl: Datapath SOC handle
10050  * @pdev_id: pdev_id
10051  *
10052  * Return: QDF_STATUS
10053  */
10054 static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
10055 					  uint8_t pdev_id)
10056 {
10057 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10058 	struct dp_pdev *pdev =
10059 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10060 						   pdev_id);
10061 	if (!pdev)
10062 		return QDF_STATUS_E_FAILURE;
10063 
10064 	dp_pdev_iterate_peer(pdev, dp_peer_flush_rate_stats_req, NULL,
10065 			     DP_MOD_ID_CDP);
10066 
10067 	return QDF_STATUS_SUCCESS;
10068 }
10069 #else
10070 static inline QDF_STATUS
10071 dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
10072 			uint8_t pdev_id)
10073 {
10074 	return QDF_STATUS_SUCCESS;
10075 }
10076 #endif
10077 
10078 static void *dp_peer_get_wlan_stats_ctx(struct cdp_soc_t *soc_hdl,
10079 					uint8_t vdev_id,
10080 					uint8_t *mac_addr)
10081 {
10082 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10083 	struct dp_peer *peer;
10084 	void *wlanstats_ctx = NULL;
10085 
10086 	if (mac_addr) {
10087 		peer = dp_peer_find_hash_find(soc, mac_addr,
10088 					      0, vdev_id,
10089 					      DP_MOD_ID_CDP);
10090 		if (!peer)
10091 			return NULL;
10092 
10093 		wlanstats_ctx = peer->wlanstats_ctx;
10094 
10095 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10096 	}
10097 
10098 	return wlanstats_ctx;
10099 }
10100 
10101 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
10102 static QDF_STATUS dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
10103 					   uint8_t pdev_id,
10104 					   void *buf)
10105 {
10106 	 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS,
10107 			      (struct dp_soc *)soc, buf, HTT_INVALID_PEER,
10108 			      WDI_NO_VAL, pdev_id);
10109 	return QDF_STATUS_SUCCESS;
10110 }
10111 #else
10112 static inline QDF_STATUS
10113 dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
10114 			 uint8_t pdev_id,
10115 			 void *buf)
10116 {
10117 	return QDF_STATUS_SUCCESS;
10118 }
10119 #endif
10120 
10121 static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle)
10122 {
10123 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10124 
10125 	return soc->rate_stats_ctx;
10126 }
10127 
10128 /*
10129  * dp_get_cfg() - get dp cfg
10130  * @soc: cdp soc handle
10131  * @cfg: cfg enum
10132  *
10133  * Return: cfg value
10134  */
10135 static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg)
10136 {
10137 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
10138 	uint32_t value = 0;
10139 
10140 	switch (cfg) {
10141 	case cfg_dp_enable_data_stall:
10142 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
10143 		break;
10144 	case cfg_dp_enable_p2p_ip_tcp_udp_checksum_offload:
10145 		value = dpsoc->wlan_cfg_ctx->p2p_tcp_udp_checksumoffload;
10146 		break;
10147 	case cfg_dp_enable_nan_ip_tcp_udp_checksum_offload:
10148 		value = dpsoc->wlan_cfg_ctx->nan_tcp_udp_checksumoffload;
10149 		break;
10150 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
10151 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
10152 		break;
10153 	case cfg_dp_disable_legacy_mode_csum_offload:
10154 		value = dpsoc->wlan_cfg_ctx->
10155 					legacy_mode_checksumoffload_disable;
10156 		break;
10157 	case cfg_dp_tso_enable:
10158 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
10159 		break;
10160 	case cfg_dp_lro_enable:
10161 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
10162 		break;
10163 	case cfg_dp_gro_enable:
10164 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
10165 		break;
10166 	case cfg_dp_tx_flow_start_queue_offset:
10167 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
10168 		break;
10169 	case cfg_dp_tx_flow_stop_queue_threshold:
10170 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
10171 		break;
10172 	case cfg_dp_disable_intra_bss_fwd:
10173 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
10174 		break;
10175 	case cfg_dp_pktlog_buffer_size:
10176 		value = dpsoc->wlan_cfg_ctx->pktlog_buffer_size;
10177 		break;
10178 	default:
10179 		value =  0;
10180 	}
10181 
10182 	return value;
10183 }
10184 
10185 #ifdef PEER_FLOW_CONTROL
10186 /**
10187  * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params
10188  * @soc_handle: datapath soc handle
10189  * @pdev_id: id of datapath pdev handle
10190  * @param: ol ath params
10191  * @value: value of the flag
10192  * @buff: Buffer to be passed
10193  *
10194  * Implemented this function same as legacy function. In legacy code, single
10195  * function is used to display stats and update pdev params.
10196  *
10197  * Return: 0 for success. nonzero for failure.
10198  */
10199 static uint32_t dp_tx_flow_ctrl_configure_pdev(struct cdp_soc_t *soc_handle,
10200 					       uint8_t pdev_id,
10201 					       enum _dp_param_t param,
10202 					       uint32_t value, void *buff)
10203 {
10204 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10205 	struct dp_pdev *pdev =
10206 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
10207 						   pdev_id);
10208 
10209 	if (qdf_unlikely(!pdev))
10210 		return 1;
10211 
10212 	soc = pdev->soc;
10213 	if (!soc)
10214 		return 1;
10215 
10216 	switch (param) {
10217 #ifdef QCA_ENH_V3_STATS_SUPPORT
10218 	case DP_PARAM_VIDEO_DELAY_STATS_FC:
10219 		if (value)
10220 			pdev->delay_stats_flag = true;
10221 		else
10222 			pdev->delay_stats_flag = false;
10223 		break;
10224 	case DP_PARAM_VIDEO_STATS_FC:
10225 		qdf_print("------- TID Stats ------\n");
10226 		dp_pdev_print_tid_stats(pdev);
10227 		qdf_print("------ Delay Stats ------\n");
10228 		dp_pdev_print_delay_stats(pdev);
10229 		break;
10230 #endif
10231 	case DP_PARAM_TOTAL_Q_SIZE:
10232 		{
10233 			uint32_t tx_min, tx_max;
10234 
10235 			tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx);
10236 			tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
10237 
10238 			if (!buff) {
10239 				if ((value >= tx_min) && (value <= tx_max)) {
10240 					pdev->num_tx_allowed = value;
10241 				} else {
10242 					QDF_TRACE(QDF_MODULE_ID_DP,
10243 						  QDF_TRACE_LEVEL_INFO,
10244 						  "Failed to update num_tx_allowed, Q_min = %d Q_max = %d",
10245 						  tx_min, tx_max);
10246 					break;
10247 				}
10248 			} else {
10249 				*(int *)buff = pdev->num_tx_allowed;
10250 			}
10251 		}
10252 		break;
10253 	default:
10254 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
10255 			  "%s: not handled param %d ", __func__, param);
10256 		break;
10257 	}
10258 
10259 	return 0;
10260 }
10261 #endif
10262 
10263 /**
10264  * dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev
10265  * @psoc: dp soc handle
10266  * @pdev_id: id of DP_PDEV handle
10267  * @pcp: pcp value
10268  * @tid: tid value passed by the user
10269  *
10270  * Return: QDF_STATUS_SUCCESS on success
10271  */
10272 static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
10273 						uint8_t pdev_id,
10274 						uint8_t pcp, uint8_t tid)
10275 {
10276 	struct dp_soc *soc = (struct dp_soc *)psoc;
10277 
10278 	soc->pcp_tid_map[pcp] = tid;
10279 
10280 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
10281 	return QDF_STATUS_SUCCESS;
10282 }
10283 
10284 /**
10285  * dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev
10286  * @soc: DP soc handle
10287  * @vdev_id: id of DP_VDEV handle
10288  * @pcp: pcp value
10289  * @tid: tid value passed by the user
10290  *
10291  * Return: QDF_STATUS_SUCCESS on success
10292  */
10293 static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc_hdl,
10294 						uint8_t vdev_id,
10295 						uint8_t pcp, uint8_t tid)
10296 {
10297 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10298 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10299 						     DP_MOD_ID_CDP);
10300 
10301 	if (!vdev)
10302 		return QDF_STATUS_E_FAILURE;
10303 
10304 	vdev->pcp_tid_map[pcp] = tid;
10305 
10306 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10307 	return QDF_STATUS_SUCCESS;
10308 }
10309 
10310 #ifdef QCA_SUPPORT_FULL_MON
10311 static inline QDF_STATUS
10312 dp_config_full_mon_mode(struct cdp_soc_t *soc_handle,
10313 			uint8_t val)
10314 {
10315 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
10316 
10317 	soc->full_mon_mode = val;
10318 	qdf_alert("Configure full monitor mode val: %d ", val);
10319 
10320 	return QDF_STATUS_SUCCESS;
10321 }
10322 #else
10323 static inline QDF_STATUS
10324 dp_config_full_mon_mode(struct cdp_soc_t *soc_handle,
10325 			uint8_t val)
10326 {
10327 	return 0;
10328 }
10329 #endif
10330 
10331 static struct cdp_cmn_ops dp_ops_cmn = {
10332 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
10333 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
10334 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
10335 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
10336 	.txrx_pdev_post_attach = dp_pdev_post_attach_wifi3,
10337 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
10338 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
10339 	.txrx_peer_create = dp_peer_create_wifi3,
10340 	.txrx_peer_setup = dp_peer_setup_wifi3,
10341 #ifdef FEATURE_AST
10342 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
10343 #else
10344 	.txrx_peer_teardown = NULL,
10345 #endif
10346 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
10347 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
10348 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
10349 	.txrx_peer_get_ast_info_by_pdev =
10350 		dp_peer_get_ast_info_by_pdevid_wifi3,
10351 	.txrx_peer_ast_delete_by_soc =
10352 		dp_peer_ast_entry_del_by_soc,
10353 	.txrx_peer_ast_delete_by_pdev =
10354 		dp_peer_ast_entry_del_by_pdev,
10355 	.txrx_peer_delete = dp_peer_delete_wifi3,
10356 	.txrx_vdev_register = dp_vdev_register_wifi3,
10357 	.txrx_soc_detach = dp_soc_detach_wifi3,
10358 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
10359 	.txrx_soc_init = dp_soc_init_wifi3,
10360 	.txrx_tso_soc_attach = dp_tso_soc_attach,
10361 	.txrx_tso_soc_detach = dp_tso_soc_detach,
10362 	.txrx_pdev_init = dp_pdev_init_wifi3,
10363 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
10364 	.txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3,
10365 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
10366 	.txrx_ath_getstats = dp_get_device_stats,
10367 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
10368 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
10369 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
10370 	.delba_process = dp_delba_process_wifi3,
10371 	.set_addba_response = dp_set_addba_response,
10372 	.flush_cache_rx_queue = NULL,
10373 	/* TODO: get API's for dscp-tid need to be added*/
10374 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
10375 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
10376 	.txrx_get_total_per = dp_get_total_per,
10377 	.txrx_stats_request = dp_txrx_stats_request,
10378 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
10379 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
10380 	.display_stats = dp_txrx_dump_stats,
10381 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
10382 	.txrx_intr_detach = dp_soc_interrupt_detach,
10383 	.set_pn_check = dp_set_pn_check_wifi3,
10384 	.set_key_sec_type = dp_set_key_sec_type_wifi3,
10385 	.update_config_parameters = dp_update_config_parameters,
10386 	/* TODO: Add other functions */
10387 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
10388 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
10389 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
10390 	.get_vdev_dp_ext_txrx_handle = dp_vdev_get_dp_ext_handle,
10391 	.set_vdev_dp_ext_txrx_handle = dp_vdev_set_dp_ext_handle,
10392 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
10393 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
10394 	.map_pdev_to_lmac = dp_soc_map_pdev_to_lmac,
10395 	.handle_mode_change = dp_soc_handle_pdev_mode_change,
10396 	.set_pdev_status_down = dp_soc_set_pdev_status_down,
10397 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
10398 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
10399 	.tx_send = dp_tx_send,
10400 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
10401 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
10402 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
10403 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
10404 	.set_soc_param = dp_soc_set_param,
10405 	.txrx_get_os_rx_handles_from_vdev =
10406 					dp_get_os_rx_handles_from_vdev_wifi3,
10407 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
10408 	.get_dp_capabilities = dp_get_cfg_capabilities,
10409 	.txrx_get_cfg = dp_get_cfg,
10410 	.set_rate_stats_ctx = dp_soc_set_rate_stats_ctx,
10411 	.get_rate_stats_ctx = dp_soc_get_rate_stats_ctx,
10412 	.txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats,
10413 	.txrx_flush_rate_stats_request = dp_flush_rate_stats_req,
10414 	.txrx_peer_get_wlan_stats_ctx = dp_peer_get_wlan_stats_ctx,
10415 
10416 	.set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3,
10417 	.set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3,
10418 
10419 	.txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler,
10420 #ifdef QCA_MULTIPASS_SUPPORT
10421 	.set_vlan_groupkey = dp_set_vlan_groupkey,
10422 #endif
10423 	.get_peer_mac_list = dp_get_peer_mac_list,
10424 	.tx_send_exc = dp_tx_send_exception,
10425 };
10426 
10427 static struct cdp_ctrl_ops dp_ops_ctrl = {
10428 	.txrx_peer_authorize = dp_peer_authorize,
10429 #ifdef VDEV_PEER_PROTOCOL_COUNT
10430 	.txrx_enable_peer_protocol_count = dp_enable_vdev_peer_protocol_count,
10431 	.txrx_set_peer_protocol_drop_mask =
10432 		dp_enable_vdev_peer_protocol_drop_mask,
10433 	.txrx_is_peer_protocol_count_enabled =
10434 		dp_is_vdev_peer_protocol_count_enabled,
10435 	.txrx_get_peer_protocol_drop_mask = dp_get_vdev_peer_protocol_drop_mask,
10436 #endif
10437 	.txrx_set_vdev_param = dp_set_vdev_param,
10438 	.txrx_set_psoc_param = dp_set_psoc_param,
10439 	.txrx_get_psoc_param = dp_get_psoc_param,
10440 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
10441 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
10442 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
10443 	.txrx_update_filter_neighbour_peers =
10444 		dp_update_filter_neighbour_peers,
10445 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
10446 	.txrx_get_sec_type = dp_get_sec_type,
10447 	.txrx_wdi_event_sub = dp_wdi_event_sub,
10448 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
10449 #ifdef WDI_EVENT_ENABLE
10450 	.txrx_get_pldev = dp_get_pldev,
10451 #endif
10452 	.txrx_set_pdev_param = dp_set_pdev_param,
10453 	.txrx_get_pdev_param = dp_get_pdev_param,
10454 	.txrx_set_peer_param = dp_set_peer_param,
10455 	.txrx_get_peer_param = dp_get_peer_param,
10456 #ifdef VDEV_PEER_PROTOCOL_COUNT
10457 	.txrx_peer_protocol_cnt = dp_peer_stats_update_protocol_cnt,
10458 #endif
10459 #ifdef ATH_SUPPORT_NAC_RSSI
10460 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
10461 	.txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
10462 #endif
10463 #ifdef WLAN_SUPPORT_MSCS
10464 	.txrx_record_mscs_params = dp_record_mscs_params,
10465 #endif
10466 	.set_key = dp_set_michael_key,
10467 	.txrx_get_vdev_param = dp_get_vdev_param,
10468 	.enable_peer_based_pktlog = dp_enable_peer_based_pktlog,
10469 	.calculate_delay_stats = dp_calculate_delay_stats,
10470 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
10471 	.txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag,
10472 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
10473 	.txrx_dump_pdev_rx_protocol_tag_stats =
10474 				dp_dump_pdev_rx_protocol_tag_stats,
10475 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
10476 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
10477 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
10478 	.txrx_set_rx_flow_tag = dp_set_rx_flow_tag,
10479 	.txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats,
10480 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
10481 #ifdef QCA_MULTIPASS_SUPPORT
10482 	.txrx_peer_set_vlan_id = dp_peer_set_vlan_id,
10483 #endif /*QCA_MULTIPASS_SUPPORT*/
10484 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
10485 	.txrx_update_peer_pkt_capture_params =
10486 		 dp_peer_update_pkt_capture_params,
10487 #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
10488 };
10489 
10490 static struct cdp_me_ops dp_ops_me = {
10491 #ifdef ATH_SUPPORT_IQUE
10492 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
10493 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
10494 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
10495 #endif
10496 };
10497 
10498 static struct cdp_mon_ops dp_ops_mon = {
10499 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
10500 	/* Added support for HK advance filter */
10501 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
10502 	.txrx_deliver_tx_mgmt = dp_deliver_tx_mgmt,
10503 	.config_full_mon_mode = dp_config_full_mon_mode,
10504 };
10505 
10506 static struct cdp_host_stats_ops dp_ops_host_stats = {
10507 	.txrx_per_peer_stats = dp_get_host_peer_stats,
10508 	.get_fw_peer_stats = dp_get_fw_peer_stats,
10509 	.get_htt_stats = dp_get_htt_stats,
10510 #ifdef FEATURE_PERPKT_INFO
10511 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
10512 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
10513 #endif /* FEATURE_PERPKT_INFO */
10514 	.txrx_stats_publish = dp_txrx_stats_publish,
10515 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
10516 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
10517 	.txrx_get_peer_stats_param = dp_txrx_get_peer_stats_param,
10518 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
10519 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
10520 	.txrx_get_ratekbps = dp_txrx_get_ratekbps,
10521 	.txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats,
10522 	/* TODO */
10523 };
10524 
10525 static struct cdp_raw_ops dp_ops_raw = {
10526 	/* TODO */
10527 };
10528 
10529 #ifdef PEER_FLOW_CONTROL
10530 static struct cdp_pflow_ops dp_ops_pflow = {
10531 	dp_tx_flow_ctrl_configure_pdev,
10532 };
10533 #endif /* CONFIG_WIN */
10534 
10535 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
10536 static struct cdp_cfr_ops dp_ops_cfr = {
10537 	.txrx_cfr_filter = dp_cfr_filter,
10538 	.txrx_get_cfr_rcc = dp_get_cfr_rcc,
10539 	.txrx_set_cfr_rcc = dp_set_cfr_rcc,
10540 	.txrx_get_cfr_dbg_stats = dp_get_cfr_dbg_stats,
10541 	.txrx_clear_cfr_dbg_stats = dp_clear_cfr_dbg_stats,
10542 	.txrx_enable_mon_reap_timer = dp_enable_mon_reap_timer,
10543 };
10544 #endif
10545 
10546 #ifdef FEATURE_RUNTIME_PM
10547 /**
10548  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
10549  * @soc_hdl: Datapath soc handle
10550  * @pdev_id: id of data path pdev handle
10551  *
10552  * DP is ready to runtime suspend if there are no pending TX packets.
10553  *
10554  * Return: QDF_STATUS
10555  */
10556 static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
10557 {
10558 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10559 	struct dp_pdev *pdev;
10560 
10561 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10562 	if (!pdev) {
10563 		dp_err("pdev is NULL");
10564 		return QDF_STATUS_E_INVAL;
10565 	}
10566 
10567 	/* Abort if there are any pending TX packets */
10568 	if (dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev)) > 0) {
10569 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
10570 			  FL("Abort suspend due to pending TX packets"));
10571 		return QDF_STATUS_E_AGAIN;
10572 	}
10573 
10574 	if (soc->intr_mode == DP_INTR_POLL)
10575 		qdf_timer_stop(&soc->int_timer);
10576 
10577 	return QDF_STATUS_SUCCESS;
10578 }
10579 
10580 /**
10581  * dp_flush_ring_hptp() - Update ring shadow
10582  *			  register HP/TP address when runtime
10583  *                        resume
10584  * @opaque_soc: DP soc context
10585  *
10586  * Return: None
10587  */
10588 static
10589 void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
10590 {
10591 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
10592 						 HAL_SRNG_FLUSH_EVENT)) {
10593 		/* Acquire the lock */
10594 		hal_srng_access_start(soc->hal_soc, hal_srng);
10595 
10596 		hal_srng_access_end(soc->hal_soc, hal_srng);
10597 
10598 		hal_srng_set_flush_last_ts(hal_srng);
10599 	}
10600 }
10601 
10602 /**
10603  * dp_runtime_resume() - ensure DP is ready to runtime resume
10604  * @soc_hdl: Datapath soc handle
10605  * @pdev_id: id of data path pdev handle
10606  *
10607  * Resume DP for runtime PM.
10608  *
10609  * Return: QDF_STATUS
10610  */
10611 static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
10612 {
10613 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10614 	int i;
10615 
10616 	if (soc->intr_mode == DP_INTR_POLL)
10617 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
10618 
10619 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
10620 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
10621 	}
10622 
10623 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
10624 
10625 	return QDF_STATUS_SUCCESS;
10626 }
10627 #endif /* FEATURE_RUNTIME_PM */
10628 
10629 /**
10630  * dp_tx_get_success_ack_stats() - get tx success completion count
10631  * @soc_hdl: Datapath soc handle
10632  * @vdevid: vdev identifier
10633  *
10634  * Return: tx success ack count
10635  */
10636 static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl,
10637 					    uint8_t vdev_id)
10638 {
10639 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10640 	struct cdp_vdev_stats *vdev_stats = NULL;
10641 	uint32_t tx_success;
10642 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10643 						     DP_MOD_ID_CDP);
10644 
10645 	if (!vdev) {
10646 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
10647 			  FL("Invalid vdev id %d"), vdev_id);
10648 		return 0;
10649 	}
10650 
10651 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
10652 	if (!vdev_stats) {
10653 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
10654 			  "DP alloc failure - unable to get alloc vdev stats");
10655 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10656 		return 0;
10657 	}
10658 
10659 	dp_aggregate_vdev_stats(vdev, vdev_stats);
10660 
10661 	tx_success = vdev_stats->tx.tx_success.num;
10662 	qdf_mem_free(vdev_stats);
10663 
10664 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10665 	return tx_success;
10666 }
10667 
10668 #ifdef WLAN_SUPPORT_DATA_STALL
10669 /**
10670  * dp_register_data_stall_detect_cb() - register data stall callback
10671  * @soc_hdl: Datapath soc handle
10672  * @pdev_id: id of data path pdev handle
10673  * @data_stall_detect_callback: data stall callback function
10674  *
10675  * Return: QDF_STATUS Enumeration
10676  */
10677 static
10678 QDF_STATUS dp_register_data_stall_detect_cb(
10679 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
10680 			data_stall_detect_cb data_stall_detect_callback)
10681 {
10682 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10683 	struct dp_pdev *pdev;
10684 
10685 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10686 	if (!pdev) {
10687 		dp_err("pdev NULL!");
10688 		return QDF_STATUS_E_INVAL;
10689 	}
10690 
10691 	pdev->data_stall_detect_callback = data_stall_detect_callback;
10692 	return QDF_STATUS_SUCCESS;
10693 }
10694 
10695 /**
10696  * dp_deregister_data_stall_detect_cb() - de-register data stall callback
10697  * @soc_hdl: Datapath soc handle
10698  * @pdev_id: id of data path pdev handle
10699  * @data_stall_detect_callback: data stall callback function
10700  *
10701  * Return: QDF_STATUS Enumeration
10702  */
10703 static
10704 QDF_STATUS dp_deregister_data_stall_detect_cb(
10705 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
10706 			data_stall_detect_cb data_stall_detect_callback)
10707 {
10708 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10709 	struct dp_pdev *pdev;
10710 
10711 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10712 	if (!pdev) {
10713 		dp_err("pdev NULL!");
10714 		return QDF_STATUS_E_INVAL;
10715 	}
10716 
10717 	pdev->data_stall_detect_callback = NULL;
10718 	return QDF_STATUS_SUCCESS;
10719 }
10720 
10721 /**
10722  * dp_txrx_post_data_stall_event() - post data stall event
10723  * @soc_hdl: Datapath soc handle
10724  * @indicator: Module triggering data stall
10725  * @data_stall_type: data stall event type
10726  * @pdev_id: pdev id
10727  * @vdev_id_bitmap: vdev id bitmap
10728  * @recovery_type: data stall recovery type
10729  *
10730  * Return: None
10731  */
10732 static void
10733 dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl,
10734 			      enum data_stall_log_event_indicator indicator,
10735 			      enum data_stall_log_event_type data_stall_type,
10736 			      uint32_t pdev_id, uint32_t vdev_id_bitmap,
10737 			      enum data_stall_log_recovery_type recovery_type)
10738 {
10739 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10740 	struct data_stall_event_info data_stall_info;
10741 	struct dp_pdev *pdev;
10742 
10743 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10744 	if (!pdev) {
10745 		dp_err("pdev NULL!");
10746 		return;
10747 	}
10748 
10749 	if (!pdev->data_stall_detect_callback) {
10750 		dp_err("data stall cb not registered!");
10751 		return;
10752 	}
10753 
10754 	dp_info("data_stall_type: %x pdev_id: %d",
10755 		data_stall_type, pdev_id);
10756 
10757 	data_stall_info.indicator = indicator;
10758 	data_stall_info.data_stall_type = data_stall_type;
10759 	data_stall_info.vdev_id_bitmap = vdev_id_bitmap;
10760 	data_stall_info.pdev_id = pdev_id;
10761 	data_stall_info.recovery_type = recovery_type;
10762 
10763 	pdev->data_stall_detect_callback(&data_stall_info);
10764 }
10765 #endif /* WLAN_SUPPORT_DATA_STALL */
10766 
10767 #ifdef WLAN_FEATURE_STATS_EXT
10768 /* rx hw stats event wait timeout in ms */
10769 #define DP_REO_STATUS_STATS_TIMEOUT 1500
10770 /**
10771  * dp_txrx_ext_stats_request - request dp txrx extended stats request
10772  * @soc_hdl: soc handle
10773  * @pdev_id: pdev id
10774  * @req: stats request
10775  *
10776  * Return: QDF_STATUS
10777  */
10778 static QDF_STATUS
10779 dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
10780 			  struct cdp_txrx_ext_stats *req)
10781 {
10782 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10783 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10784 
10785 	if (!pdev) {
10786 		dp_err("pdev is null");
10787 		return QDF_STATUS_E_INVAL;
10788 	}
10789 
10790 	dp_aggregate_pdev_stats(pdev);
10791 
10792 	req->tx_msdu_enqueue = pdev->stats.tx_i.processed.num;
10793 	req->tx_msdu_overflow = pdev->stats.tx_i.dropped.ring_full;
10794 	req->rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
10795 	req->rx_mpdu_delivered = soc->ext_stats.rx_mpdu_received;
10796 	req->rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
10797 	req->rx_mpdu_error = soc->stats.rx.err_ring_pkts -
10798 				soc->stats.rx.rx_frags;
10799 
10800 	return QDF_STATUS_SUCCESS;
10801 }
10802 
10803 /**
10804  * dp_rx_hw_stats_cb - request rx hw stats response callback
10805  * @soc: soc handle
10806  * @cb_ctxt: callback context
10807  * @reo_status: reo command response status
10808  *
10809  * Return: None
10810  */
10811 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
10812 			      union hal_reo_status *reo_status)
10813 {
10814 	struct dp_req_rx_hw_stats_t *rx_hw_stats = cb_ctxt;
10815 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
10816 	bool is_query_timeout;
10817 
10818 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
10819 	is_query_timeout = rx_hw_stats->is_query_timeout;
10820 	/* free the cb_ctxt if all pending tid stats query is received */
10821 	if (qdf_atomic_dec_and_test(&rx_hw_stats->pending_tid_stats_cnt)) {
10822 		if (!is_query_timeout) {
10823 			qdf_event_set(&soc->rx_hw_stats_event);
10824 			soc->is_last_stats_ctx_init = false;
10825 		}
10826 
10827 		qdf_mem_free(rx_hw_stats);
10828 	}
10829 
10830 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
10831 		dp_info("REO stats failure %d",
10832 			queue_status->header.status);
10833 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
10834 		return;
10835 	}
10836 
10837 	if (!is_query_timeout) {
10838 		soc->ext_stats.rx_mpdu_received +=
10839 					queue_status->mpdu_frms_cnt;
10840 		soc->ext_stats.rx_mpdu_missed +=
10841 					queue_status->late_recv_mpdu_cnt;
10842 	}
10843 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
10844 }
10845 
10846 /**
10847  * dp_request_rx_hw_stats - request rx hardware stats
10848  * @soc_hdl: soc handle
10849  * @vdev_id: vdev id
10850  *
10851  * Return: None
10852  */
10853 static QDF_STATUS
10854 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
10855 {
10856 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10857 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
10858 						     DP_MOD_ID_CDP);
10859 	struct dp_peer *peer = NULL;
10860 	QDF_STATUS status;
10861 	struct dp_req_rx_hw_stats_t *rx_hw_stats;
10862 	int rx_stats_sent_cnt = 0;
10863 	uint32_t last_rx_mpdu_received;
10864 	uint32_t last_rx_mpdu_missed;
10865 
10866 	if (!vdev) {
10867 		dp_err("vdev is null for vdev_id: %u", vdev_id);
10868 		status = QDF_STATUS_E_INVAL;
10869 		goto out;
10870 	}
10871 
10872 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_CDP);
10873 
10874 	if (!peer) {
10875 		dp_err("Peer is NULL");
10876 		status = QDF_STATUS_E_INVAL;
10877 		goto out;
10878 	}
10879 
10880 	rx_hw_stats = qdf_mem_malloc(sizeof(*rx_hw_stats));
10881 
10882 	if (!rx_hw_stats) {
10883 		dp_err("malloc failed for hw stats structure");
10884 		status = QDF_STATUS_E_INVAL;
10885 		goto out;
10886 	}
10887 
10888 	qdf_event_reset(&soc->rx_hw_stats_event);
10889 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
10890 	/* save the last soc cumulative stats and reset it to 0 */
10891 	last_rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
10892 	last_rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
10893 	soc->ext_stats.rx_mpdu_received = 0;
10894 	soc->ext_stats.rx_mpdu_missed = 0;
10895 
10896 	rx_stats_sent_cnt =
10897 		dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, rx_hw_stats);
10898 	if (!rx_stats_sent_cnt) {
10899 		dp_err("no tid stats sent successfully");
10900 		qdf_mem_free(rx_hw_stats);
10901 		qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
10902 		status = QDF_STATUS_E_INVAL;
10903 		goto out;
10904 	}
10905 	qdf_atomic_set(&rx_hw_stats->pending_tid_stats_cnt,
10906 		       rx_stats_sent_cnt);
10907 	rx_hw_stats->is_query_timeout = false;
10908 	soc->is_last_stats_ctx_init = true;
10909 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
10910 
10911 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
10912 				       DP_REO_STATUS_STATS_TIMEOUT);
10913 
10914 	qdf_spin_lock_bh(&soc->rx_hw_stats_lock);
10915 	if (status != QDF_STATUS_SUCCESS) {
10916 		dp_info("rx hw stats event timeout");
10917 		if (soc->is_last_stats_ctx_init)
10918 			rx_hw_stats->is_query_timeout = true;
10919 		/**
10920 		 * If query timeout happened, use the last saved stats
10921 		 * for this time query.
10922 		 */
10923 		soc->ext_stats.rx_mpdu_received = last_rx_mpdu_received;
10924 		soc->ext_stats.rx_mpdu_missed = last_rx_mpdu_missed;
10925 	}
10926 	qdf_spin_unlock_bh(&soc->rx_hw_stats_lock);
10927 
10928 out:
10929 	if (peer)
10930 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
10931 	if (vdev)
10932 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
10933 
10934 	return status;
10935 }
10936 #endif /* WLAN_FEATURE_STATS_EXT */
10937 
10938 #ifdef DP_PEER_EXTENDED_API
10939 static struct cdp_misc_ops dp_ops_misc = {
10940 #ifdef FEATURE_WLAN_TDLS
10941 	.tx_non_std = dp_tx_non_std,
10942 #endif /* FEATURE_WLAN_TDLS */
10943 	.get_opmode = dp_get_opmode,
10944 #ifdef FEATURE_RUNTIME_PM
10945 	.runtime_suspend = dp_runtime_suspend,
10946 	.runtime_resume = dp_runtime_resume,
10947 #endif /* FEATURE_RUNTIME_PM */
10948 	.pkt_log_init = dp_pkt_log_init,
10949 	.pkt_log_con_service = dp_pkt_log_con_service,
10950 	.get_num_rx_contexts = dp_get_num_rx_contexts,
10951 	.get_tx_ack_stats = dp_tx_get_success_ack_stats,
10952 #ifdef WLAN_SUPPORT_DATA_STALL
10953 	.txrx_data_stall_cb_register = dp_register_data_stall_detect_cb,
10954 	.txrx_data_stall_cb_deregister = dp_deregister_data_stall_detect_cb,
10955 	.txrx_post_data_stall_event = dp_txrx_post_data_stall_event,
10956 #endif
10957 
10958 #ifdef WLAN_FEATURE_STATS_EXT
10959 	.txrx_ext_stats_request = dp_txrx_ext_stats_request,
10960 	.request_rx_hw_stats = dp_request_rx_hw_stats,
10961 #endif /* WLAN_FEATURE_STATS_EXT */
10962 };
10963 #endif
10964 
10965 #ifdef DP_FLOW_CTL
10966 static struct cdp_flowctl_ops dp_ops_flowctl = {
10967 	/* WIFI 3.0 DP implement as required. */
10968 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
10969 	.flow_pool_map_handler = dp_tx_flow_pool_map,
10970 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
10971 	.register_pause_cb = dp_txrx_register_pause_cb,
10972 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
10973 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
10974 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
10975 };
10976 
10977 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
10978 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
10979 };
10980 #endif
10981 
10982 #ifdef IPA_OFFLOAD
10983 static struct cdp_ipa_ops dp_ops_ipa = {
10984 	.ipa_get_resource = dp_ipa_get_resource,
10985 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
10986 	.ipa_op_response = dp_ipa_op_response,
10987 	.ipa_register_op_cb = dp_ipa_register_op_cb,
10988 	.ipa_get_stat = dp_ipa_get_stat,
10989 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
10990 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
10991 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
10992 	.ipa_setup = dp_ipa_setup,
10993 	.ipa_cleanup = dp_ipa_cleanup,
10994 	.ipa_setup_iface = dp_ipa_setup_iface,
10995 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
10996 	.ipa_enable_pipes = dp_ipa_enable_pipes,
10997 	.ipa_disable_pipes = dp_ipa_disable_pipes,
10998 	.ipa_set_perf_level = dp_ipa_set_perf_level,
10999 	.ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd
11000 };
11001 #endif
11002 
11003 #ifdef DP_POWER_SAVE
11004 static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
11005 {
11006 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11007 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11008 	int timeout = SUSPEND_DRAIN_WAIT;
11009 	int drain_wait_delay = 50; /* 50 ms */
11010 
11011 	if (qdf_unlikely(!pdev)) {
11012 		dp_err("pdev is NULL");
11013 		return QDF_STATUS_E_INVAL;
11014 	}
11015 
11016 	/* Abort if there are any pending TX packets */
11017 	while (dp_get_tx_pending((struct cdp_pdev *)pdev) > 0) {
11018 		qdf_sleep(drain_wait_delay);
11019 		if (timeout <= 0) {
11020 			dp_err("TX frames are pending, abort suspend");
11021 			return QDF_STATUS_E_TIMEOUT;
11022 		}
11023 		timeout = timeout - drain_wait_delay;
11024 	}
11025 
11026 	if (soc->intr_mode == DP_INTR_POLL)
11027 		qdf_timer_stop(&soc->int_timer);
11028 
11029 	/* Stop monitor reap timer and reap any pending frames in ring */
11030 	if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) ||
11031 	     dp_is_enable_reap_timer_non_pkt(pdev)) &&
11032 	    soc->reap_timer_init) {
11033 		qdf_timer_sync_cancel(&soc->mon_reap_timer);
11034 		dp_service_mon_rings(soc, DP_MON_REAP_BUDGET);
11035 	}
11036 
11037 	return QDF_STATUS_SUCCESS;
11038 }
11039 
11040 static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
11041 {
11042 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11043 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11044 
11045 	if (qdf_unlikely(!pdev)) {
11046 		dp_err("pdev is NULL");
11047 		return QDF_STATUS_E_INVAL;
11048 	}
11049 
11050 	if (soc->intr_mode == DP_INTR_POLL)
11051 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
11052 
11053 	/* Start monitor reap timer */
11054 	if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) ||
11055 	     dp_is_enable_reap_timer_non_pkt(pdev)) &&
11056 	    soc->reap_timer_init)
11057 		qdf_timer_mod(&soc->mon_reap_timer,
11058 			      DP_INTR_POLL_TIMER_MS);
11059 
11060 	return QDF_STATUS_SUCCESS;
11061 }
11062 
11063 /**
11064  * dp_process_wow_ack_rsp() - process wow ack response
11065  * @soc_hdl: datapath soc handle
11066  * @pdev_id: data path pdev handle id
11067  *
11068  * Return: none
11069  */
11070 static void dp_process_wow_ack_rsp(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
11071 {
11072 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11073 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11074 
11075 	if (qdf_unlikely(!pdev)) {
11076 		dp_err("pdev is NULL");
11077 		return;
11078 	}
11079 
11080 	/*
11081 	 * As part of wow enable FW disables the mon status ring and in wow ack
11082 	 * response from FW reap mon status ring to make sure no packets pending
11083 	 * in the ring.
11084 	 */
11085 	if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) ||
11086 	     dp_is_enable_reap_timer_non_pkt(pdev)) &&
11087 	    soc->reap_timer_init) {
11088 		dp_service_mon_rings(soc, DP_MON_REAP_BUDGET);
11089 	}
11090 }
11091 
11092 /**
11093  * dp_process_target_suspend_req() - process target suspend request
11094  * @soc_hdl: datapath soc handle
11095  * @pdev_id: data path pdev handle id
11096  *
11097  * Return: none
11098  */
11099 static void dp_process_target_suspend_req(struct cdp_soc_t *soc_hdl,
11100 					  uint8_t pdev_id)
11101 {
11102 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11103 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11104 
11105 	if (qdf_unlikely(!pdev)) {
11106 		dp_err("pdev is NULL");
11107 		return;
11108 	}
11109 
11110 	/* Stop monitor reap timer and reap any pending frames in ring */
11111 	if (((pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) ||
11112 	     dp_is_enable_reap_timer_non_pkt(pdev)) &&
11113 	    soc->reap_timer_init) {
11114 		qdf_timer_sync_cancel(&soc->mon_reap_timer);
11115 		dp_service_mon_rings(soc, DP_MON_REAP_BUDGET);
11116 	}
11117 }
11118 
11119 static struct cdp_bus_ops dp_ops_bus = {
11120 	.bus_suspend = dp_bus_suspend,
11121 	.bus_resume = dp_bus_resume,
11122 	.process_wow_ack_rsp = dp_process_wow_ack_rsp,
11123 	.process_target_suspend_req = dp_process_target_suspend_req
11124 };
11125 #endif
11126 
11127 #ifdef DP_FLOW_CTL
11128 static struct cdp_throttle_ops dp_ops_throttle = {
11129 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
11130 };
11131 
11132 static struct cdp_cfg_ops dp_ops_cfg = {
11133 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
11134 };
11135 #endif
11136 
11137 #ifdef DP_PEER_EXTENDED_API
11138 static struct cdp_ocb_ops dp_ops_ocb = {
11139 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
11140 };
11141 
11142 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
11143 	.clear_stats = dp_txrx_clear_dump_stats,
11144 };
11145 
11146 static struct cdp_peer_ops dp_ops_peer = {
11147 	.register_peer = dp_register_peer,
11148 	.clear_peer = dp_clear_peer,
11149 	.find_peer_exist = dp_find_peer_exist,
11150 	.find_peer_exist_on_vdev = dp_find_peer_exist_on_vdev,
11151 	.find_peer_exist_on_other_vdev = dp_find_peer_exist_on_other_vdev,
11152 	.peer_state_update = dp_peer_state_update,
11153 	.get_vdevid = dp_get_vdevid,
11154 	.get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr,
11155 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
11156 	.get_peer_state = dp_get_peer_state,
11157 };
11158 #endif
11159 
11160 static struct cdp_ops dp_txrx_ops = {
11161 	.cmn_drv_ops = &dp_ops_cmn,
11162 	.ctrl_ops = &dp_ops_ctrl,
11163 	.me_ops = &dp_ops_me,
11164 	.mon_ops = &dp_ops_mon,
11165 	.host_stats_ops = &dp_ops_host_stats,
11166 	.wds_ops = &dp_ops_wds,
11167 	.raw_ops = &dp_ops_raw,
11168 #ifdef PEER_FLOW_CONTROL
11169 	.pflow_ops = &dp_ops_pflow,
11170 #endif /* PEER_FLOW_CONTROL */
11171 #ifdef DP_PEER_EXTENDED_API
11172 	.misc_ops = &dp_ops_misc,
11173 	.ocb_ops = &dp_ops_ocb,
11174 	.peer_ops = &dp_ops_peer,
11175 	.mob_stats_ops = &dp_ops_mob_stats,
11176 #endif
11177 #ifdef DP_FLOW_CTL
11178 	.cfg_ops = &dp_ops_cfg,
11179 	.flowctl_ops = &dp_ops_flowctl,
11180 	.l_flowctl_ops = &dp_ops_l_flowctl,
11181 	.throttle_ops = &dp_ops_throttle,
11182 #endif
11183 #ifdef IPA_OFFLOAD
11184 	.ipa_ops = &dp_ops_ipa,
11185 #endif
11186 #ifdef DP_POWER_SAVE
11187 	.bus_ops = &dp_ops_bus,
11188 #endif
11189 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
11190 	.cfr_ops = &dp_ops_cfr,
11191 #endif
11192 };
11193 
11194 /*
11195  * dp_soc_set_txrx_ring_map()
11196  * @dp_soc: DP handler for soc
11197  *
11198  * Return: Void
11199  */
11200 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
11201 {
11202 	uint32_t i;
11203 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
11204 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
11205 	}
11206 }
11207 
11208 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
11209 	defined(QCA_WIFI_QCA5018)
11210 /**
11211  * dp_soc_attach_wifi3() - Attach txrx SOC
11212  * @ctrl_psoc: Opaque SOC handle from control plane
11213  * @htc_handle: Opaque HTC handle
11214  * @hif_handle: Opaque HIF handle
11215  * @qdf_osdev: QDF device
11216  * @ol_ops: Offload Operations
11217  * @device_id: Device ID
11218  *
11219  * Return: DP SOC handle on success, NULL on failure
11220  */
11221 struct cdp_soc_t *
11222 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
11223 		    struct hif_opaque_softc *hif_handle,
11224 		    HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
11225 		    struct ol_if_ops *ol_ops, uint16_t device_id)
11226 {
11227 	struct dp_soc *dp_soc = NULL;
11228 
11229 	dp_soc = dp_soc_attach(ctrl_psoc, hif_handle, htc_handle, qdf_osdev,
11230 			       ol_ops, device_id);
11231 	return dp_soc_to_cdp_soc_t(dp_soc);
11232 }
11233 
11234 static inline void dp_soc_set_def_pdev(struct dp_soc *soc)
11235 {
11236 	int lmac_id;
11237 
11238 	for (lmac_id = 0; lmac_id < MAX_NUM_LMAC_HW; lmac_id++) {
11239 		/*Set default host PDEV ID for lmac_id*/
11240 		wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
11241 				      INVALID_PDEV_ID, lmac_id);
11242 	}
11243 }
11244 
11245 /**
11246  * dp_soc_attach() - Attach txrx SOC
11247  * @ctrl_psoc: Opaque SOC handle from control plane
11248  * @hif_handle: Opaque HIF handle
11249  * @htc_handle: Opaque HTC handle
11250  * @qdf_osdev: QDF device
11251  * @ol_ops: Offload Operations
11252  * @device_id: Device ID
11253  *
11254  * Return: DP SOC handle on success, NULL on failure
11255  */
11256 static struct dp_soc *
11257 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
11258 	      struct hif_opaque_softc *hif_handle, HTC_HANDLE htc_handle,
11259 	      qdf_device_t qdf_osdev, struct ol_if_ops *ol_ops,
11260 	      uint16_t device_id)
11261 {
11262 	int int_ctx;
11263 	struct dp_soc *soc =  NULL;
11264 
11265 	if (!hif_handle) {
11266 		dp_err("HIF handle is NULL");
11267 		goto fail0;
11268 	}
11269 
11270 	soc = qdf_mem_malloc(sizeof(*soc));
11271 	if (!soc) {
11272 		dp_err("DP SOC memory allocation failed");
11273 		goto fail0;
11274 	}
11275 
11276 	soc->hif_handle = hif_handle;
11277 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
11278 	if (!soc->hal_soc)
11279 		goto fail1;
11280 
11281 	int_ctx = 0;
11282 	soc->device_id = device_id;
11283 	soc->cdp_soc.ops = &dp_txrx_ops;
11284 	soc->cdp_soc.ol_ops = ol_ops;
11285 	soc->ctrl_psoc = ctrl_psoc;
11286 	soc->osdev = qdf_osdev;
11287 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
11288 
11289 	/* Reset wbm sg list and flags */
11290 	dp_rx_wbm_sg_list_reset(soc);
11291 
11292 	dp_soc_rx_history_attach(soc);
11293 	wlan_set_srng_cfg(&soc->wlan_srng_cfg);
11294 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
11295 	if (!soc->wlan_cfg_ctx) {
11296 		dp_err("wlan_cfg_ctx failed\n");
11297 		goto fail1;
11298 	}
11299 
11300 	dp_soc_cfg_attach(soc);
11301 
11302 	if (dp_hw_link_desc_pool_banks_alloc(soc, WLAN_INVALID_PDEV_ID)) {
11303 		dp_err("failed to allocate link desc pool banks");
11304 		goto fail2;
11305 	}
11306 
11307 	if (dp_hw_link_desc_ring_alloc(soc)) {
11308 		dp_err("failed to allocate link_desc_ring");
11309 		goto fail3;
11310 	}
11311 
11312 	if (dp_soc_srng_alloc(soc)) {
11313 		dp_err("failed to allocate soc srng rings");
11314 		goto fail4;
11315 	}
11316 
11317 	if (dp_soc_tx_desc_sw_pools_alloc(soc)) {
11318 		dp_err("dp_soc_tx_desc_sw_pools_alloc failed");
11319 		goto fail5;
11320 	}
11321 
11322 	dp_soc_set_interrupt_mode(soc);
11323 	dp_soc_set_def_pdev(soc);
11324 
11325 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
11326 		qdf_dma_mem_stats_read(),
11327 		qdf_heap_mem_stats_read(),
11328 		qdf_skb_mem_stats_read());
11329 
11330 	return soc;
11331 fail5:
11332 	dp_soc_srng_free(soc);
11333 fail4:
11334 	dp_hw_link_desc_ring_free(soc);
11335 fail3:
11336 	dp_hw_link_desc_pool_banks_free(soc, WLAN_INVALID_PDEV_ID);
11337 fail2:
11338 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
11339 fail1:
11340 	qdf_mem_free(soc);
11341 fail0:
11342 	return NULL;
11343 }
11344 
11345 /**
11346  * dp_soc_init() - Initialize txrx SOC
11347  * @dp_soc: Opaque DP SOC handle
11348  * @htc_handle: Opaque HTC handle
11349  * @hif_handle: Opaque HIF handle
11350  *
11351  * Return: DP SOC handle on success, NULL on failure
11352  */
11353 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
11354 		  struct hif_opaque_softc *hif_handle)
11355 {
11356 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
11357 	bool is_monitor_mode = false;
11358 	struct hal_reo_params reo_params;
11359 	uint8_t i;
11360 
11361 	wlan_minidump_log(soc, sizeof(*soc), soc->ctrl_psoc,
11362 			  WLAN_MD_DP_SOC, "dp_soc");
11363 
11364 	htt_soc = htt_soc_attach(soc, htc_handle);
11365 	if (!htt_soc)
11366 		goto fail0;
11367 
11368 	soc->htt_handle = htt_soc;
11369 
11370 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
11371 		goto fail1;
11372 
11373 	htt_set_htc_handle(htt_soc, htc_handle);
11374 	soc->hif_handle = hif_handle;
11375 
11376 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
11377 	if (!soc->hal_soc)
11378 		goto fail2;
11379 
11380 	dp_soc_cfg_init(soc);
11381 
11382 	/* Reset/Initialize wbm sg list and flags */
11383 	dp_rx_wbm_sg_list_reset(soc);
11384 
11385 	/* Note: Any SRNG ring initialization should happen only after
11386 	 * Interrupt mode is set and followed by filling up the
11387 	 * interrupt mask. IT SHOULD ALWAYS BE IN THIS ORDER.
11388 	 */
11389 	dp_soc_set_interrupt_mode(soc);
11390 	if (soc->cdp_soc.ol_ops->get_con_mode &&
11391 	    soc->cdp_soc.ol_ops->get_con_mode() ==
11392 	    QDF_GLOBAL_MONITOR_MODE)
11393 		is_monitor_mode = true;
11394 
11395 	wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, soc->intr_mode,
11396 				     is_monitor_mode);
11397 
11398 	/* initialize WBM_IDLE_LINK ring */
11399 	if (dp_hw_link_desc_ring_init(soc)) {
11400 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11401 			  FL("dp_hw_link_desc_ring_init failed"));
11402 		goto fail3;
11403 	}
11404 
11405 	dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
11406 
11407 	if (dp_soc_srng_init(soc)) {
11408 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11409 			  FL("dp_soc_srng_init failed"));
11410 		goto fail4;
11411 	}
11412 
11413 	if (htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
11414 			       htt_get_htc_handle(htt_soc),
11415 			       soc->hal_soc, soc->osdev) == NULL)
11416 		goto fail5;
11417 
11418 	/* Initialize descriptors in TCL Rings */
11419 	for (i = 0; i < soc->num_tcl_data_rings; i++) {
11420 		hal_tx_init_data_ring(soc->hal_soc,
11421 				      soc->tcl_data_ring[i].hal_srng);
11422 	}
11423 
11424 	if (dp_soc_tx_desc_sw_pools_init(soc)) {
11425 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
11426 			  FL("dp_tx_soc_attach failed"));
11427 		goto fail6;
11428 	}
11429 
11430 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
11431 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
11432 	soc->cce_disable = false;
11433 
11434 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
11435 	qdf_spinlock_create(&soc->vdev_map_lock);
11436 	qdf_atomic_init(&soc->num_tx_outstanding);
11437 	qdf_atomic_init(&soc->num_tx_exception);
11438 	soc->num_tx_allowed =
11439 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
11440 
11441 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
11442 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
11443 				CDP_CFG_MAX_PEER_ID);
11444 
11445 		if (ret != -EINVAL)
11446 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
11447 
11448 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
11449 				CDP_CFG_CCE_DISABLE);
11450 		if (ret == 1)
11451 			soc->cce_disable = true;
11452 	}
11453 
11454 	/*
11455 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
11456 	 * and IPQ5018 WMAC2 is not there in these platforms.
11457 	 */
11458 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018 ||
11459 	    soc->disable_mac2_intr)
11460 		dp_soc_disable_unused_mac_intr_mask(soc, 0x2);
11461 
11462 	/*
11463 	 * Skip registering hw ring interrupts for WMAC1 on IPQ5018
11464 	 * WMAC1 is not there in this platform.
11465 	 */
11466 	if (soc->disable_mac1_intr)
11467 		dp_soc_disable_unused_mac_intr_mask(soc, 0x1);
11468 
11469 	/* Setup HW REO */
11470 	qdf_mem_zero(&reo_params, sizeof(reo_params));
11471 
11472 	if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
11473 		/*
11474 		 * Reo ring remap is not required if both radios
11475 		 * are offloaded to NSS
11476 		 */
11477 		if (dp_reo_remap_config(soc,
11478 					&reo_params.remap1,
11479 					&reo_params.remap2))
11480 			reo_params.rx_hash_enabled = true;
11481 		else
11482 			reo_params.rx_hash_enabled = false;
11483 	}
11484 
11485 	/* setup the global rx defrag waitlist */
11486 	TAILQ_INIT(&soc->rx.defrag.waitlist);
11487 	soc->rx.defrag.timeout_ms =
11488 		wlan_cfg_get_rx_defrag_min_timeout(soc->wlan_cfg_ctx);
11489 	soc->rx.defrag.next_flush_ms = 0;
11490 	soc->rx.flags.defrag_timeout_check =
11491 		wlan_cfg_get_defrag_timeout_check(soc->wlan_cfg_ctx);
11492 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
11493 
11494 	/*
11495 	 * set the fragment destination ring
11496 	 */
11497 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
11498 
11499 	hal_reo_setup(soc->hal_soc, &reo_params);
11500 
11501 	hal_reo_set_err_dst_remap(soc->hal_soc);
11502 
11503 	qdf_atomic_set(&soc->cmn_init_done, 1);
11504 
11505 	dp_soc_wds_attach(soc);
11506 
11507 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
11508 
11509 	qdf_spinlock_create(&soc->ast_lock);
11510 
11511 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
11512 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
11513 	INIT_RX_HW_STATS_LOCK(soc);
11514 
11515 	/* fill the tx/rx cpu ring map*/
11516 	dp_soc_set_txrx_ring_map(soc);
11517 
11518 	TAILQ_INIT(&soc->inactive_peer_list);
11519 	qdf_spinlock_create(&soc->inactive_peer_list_lock);
11520 	TAILQ_INIT(&soc->inactive_vdev_list);
11521 	qdf_spinlock_create(&soc->inactive_vdev_list_lock);
11522 	qdf_spinlock_create(&soc->htt_stats.lock);
11523 	/* initialize work queue for stats processing */
11524 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
11525 
11526 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
11527 		qdf_dma_mem_stats_read(),
11528 		qdf_heap_mem_stats_read(),
11529 		qdf_skb_mem_stats_read());
11530 
11531 	return soc;
11532 fail6:
11533 	htt_soc_htc_dealloc(soc->htt_handle);
11534 fail5:
11535 	dp_soc_srng_deinit(soc);
11536 fail4:
11537 	dp_hw_link_desc_ring_deinit(soc);
11538 fail3:
11539 	dp_hw_link_desc_ring_free(soc);
11540 fail2:
11541 	htt_htc_pkt_pool_free(htt_soc);
11542 fail1:
11543 	htt_soc_detach(htt_soc);
11544 fail0:
11545 	return NULL;
11546 }
11547 
11548 /**
11549  * dp_soc_init_wifi3() - Initialize txrx SOC
11550  * @soc: Opaque DP SOC handle
11551  * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
11552  * @hif_handle: Opaque HIF handle
11553  * @htc_handle: Opaque HTC handle
11554  * @qdf_osdev: QDF device (Unused)
11555  * @ol_ops: Offload Operations (Unused)
11556  * @device_id: Device ID (Unused)
11557  *
11558  * Return: DP SOC handle on success, NULL on failure
11559  */
11560 void *dp_soc_init_wifi3(struct cdp_soc_t *soc,
11561 			struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
11562 			struct hif_opaque_softc *hif_handle,
11563 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
11564 			struct ol_if_ops *ol_ops, uint16_t device_id)
11565 {
11566 	return dp_soc_init((struct dp_soc *)soc, htc_handle, hif_handle);
11567 }
11568 
11569 #endif
11570 
11571 /*
11572  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
11573  *
11574  * @soc: handle to DP soc
11575  * @mac_id: MAC id
11576  *
11577  * Return: Return pdev corresponding to MAC
11578  */
11579 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
11580 {
11581 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
11582 		return (mac_id < MAX_PDEV_CNT) ? soc->pdev_list[mac_id] : NULL;
11583 
11584 	/* Typically for MCL as there only 1 PDEV*/
11585 	return soc->pdev_list[0];
11586 }
11587 
11588 /*
11589  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
11590  * @soc:		DP SoC context
11591  * @max_mac_rings:	No of MAC rings
11592  *
11593  * Return: None
11594  */
11595 void dp_is_hw_dbs_enable(struct dp_soc *soc,
11596 				int *max_mac_rings)
11597 {
11598 	bool dbs_enable = false;
11599 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
11600 		dbs_enable = soc->cdp_soc.ol_ops->
11601 		is_hw_dbs_2x2_capable((void *)soc->ctrl_psoc);
11602 
11603 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
11604 }
11605 
11606 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
11607 /*
11608  * dp_cfr_filter() -  Configure HOST RX monitor status ring for CFR
11609  * @soc_hdl: Datapath soc handle
11610  * @pdev_id: id of data path pdev handle
11611  * @enable: Enable/Disable CFR
11612  * @filter_val: Flag to select Filter for monitor mode
11613  */
11614 static void dp_cfr_filter(struct cdp_soc_t *soc_hdl,
11615 			  uint8_t pdev_id,
11616 			  bool enable,
11617 			  struct cdp_monitor_filter *filter_val)
11618 {
11619 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11620 	struct dp_pdev *pdev = NULL;
11621 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
11622 	int max_mac_rings;
11623 	uint8_t mac_id = 0;
11624 
11625 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11626 	if (!pdev) {
11627 		dp_err("pdev is NULL");
11628 		return;
11629 	}
11630 
11631 	if (pdev->monitor_vdev) {
11632 		dp_info("No action is needed since monitor mode is enabled\n");
11633 		return;
11634 	}
11635 	soc = pdev->soc;
11636 	pdev->cfr_rcc_mode = false;
11637 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
11638 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
11639 
11640 	dp_debug("Max_mac_rings %d", max_mac_rings);
11641 	dp_info("enable : %d, mode: 0x%x", enable, filter_val->mode);
11642 
11643 	if (enable) {
11644 		pdev->cfr_rcc_mode = true;
11645 
11646 		htt_tlv_filter.ppdu_start = 1;
11647 		htt_tlv_filter.ppdu_end = 1;
11648 		htt_tlv_filter.ppdu_end_user_stats = 1;
11649 		htt_tlv_filter.ppdu_end_user_stats_ext = 1;
11650 		htt_tlv_filter.ppdu_end_status_done = 1;
11651 		htt_tlv_filter.mpdu_start = 1;
11652 		htt_tlv_filter.offset_valid = false;
11653 
11654 		htt_tlv_filter.enable_fp =
11655 			(filter_val->mode & MON_FILTER_PASS) ? 1 : 0;
11656 		htt_tlv_filter.enable_md = 0;
11657 		htt_tlv_filter.enable_mo =
11658 			(filter_val->mode & MON_FILTER_OTHER) ? 1 : 0;
11659 		htt_tlv_filter.fp_mgmt_filter = filter_val->fp_mgmt;
11660 		htt_tlv_filter.fp_ctrl_filter = filter_val->fp_ctrl;
11661 		htt_tlv_filter.fp_data_filter = filter_val->fp_data;
11662 		htt_tlv_filter.mo_mgmt_filter = filter_val->mo_mgmt;
11663 		htt_tlv_filter.mo_ctrl_filter = filter_val->mo_ctrl;
11664 		htt_tlv_filter.mo_data_filter = filter_val->mo_data;
11665 	}
11666 
11667 	for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
11668 		int mac_for_pdev =
11669 			dp_get_mac_id_for_pdev(mac_id,
11670 					       pdev->pdev_id);
11671 
11672 		htt_h2t_rx_ring_cfg(soc->htt_handle,
11673 				    mac_for_pdev,
11674 				    soc->rxdma_mon_status_ring[mac_id]
11675 				    .hal_srng,
11676 				    RXDMA_MONITOR_STATUS,
11677 				    RX_MON_STATUS_BUF_SIZE,
11678 				    &htt_tlv_filter);
11679 	}
11680 }
11681 
11682 /**
11683  * dp_get_cfr_rcc() - get cfr rcc config
11684  * @soc_hdl: Datapath soc handle
11685  * @pdev_id: id of objmgr pdev
11686  *
11687  * Return: true/false based on cfr mode setting
11688  */
11689 static
11690 bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
11691 {
11692 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11693 	struct dp_pdev *pdev = NULL;
11694 
11695 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11696 	if (!pdev) {
11697 		dp_err("pdev is NULL");
11698 		return false;
11699 	}
11700 
11701 	return pdev->cfr_rcc_mode;
11702 }
11703 
11704 /**
11705  * dp_set_cfr_rcc() - enable/disable cfr rcc config
11706  * @soc_hdl: Datapath soc handle
11707  * @pdev_id: id of objmgr pdev
11708  * @enable: Enable/Disable cfr rcc mode
11709  *
11710  * Return: none
11711  */
11712 static
11713 void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, bool enable)
11714 {
11715 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11716 	struct dp_pdev *pdev = NULL;
11717 
11718 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11719 	if (!pdev) {
11720 		dp_err("pdev is NULL");
11721 		return;
11722 	}
11723 
11724 	pdev->cfr_rcc_mode = enable;
11725 }
11726 
11727 /*
11728  * dp_get_cfr_dbg_stats - Get the debug statistics for CFR
11729  * @soc_hdl: Datapath soc handle
11730  * @pdev_id: id of data path pdev handle
11731  * @cfr_rcc_stats: CFR RCC debug statistics buffer
11732  *
11733  * Return: none
11734  */
11735 static inline void
11736 dp_get_cfr_dbg_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
11737 		     struct cdp_cfr_rcc_stats *cfr_rcc_stats)
11738 {
11739 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11740 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11741 
11742 	if (!pdev) {
11743 		dp_err("Invalid pdev");
11744 		return;
11745 	}
11746 
11747 	qdf_mem_copy(cfr_rcc_stats, &pdev->stats.rcc,
11748 		     sizeof(struct cdp_cfr_rcc_stats));
11749 }
11750 
11751 /*
11752  * dp_clear_cfr_dbg_stats - Clear debug statistics for CFR
11753  * @soc_hdl: Datapath soc handle
11754  * @pdev_id: id of data path pdev handle
11755  *
11756  * Return: none
11757  */
11758 static void dp_clear_cfr_dbg_stats(struct cdp_soc_t *soc_hdl,
11759 				   uint8_t pdev_id)
11760 {
11761 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11762 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11763 
11764 	if (!pdev) {
11765 		dp_err("dp pdev is NULL");
11766 		return;
11767 	}
11768 
11769 	qdf_mem_zero(&pdev->stats.rcc, sizeof(pdev->stats.rcc));
11770 }
11771 
11772 /*
11773  * dp_enable_mon_reap_timer() - enable/disable reap timer
11774  * @soc_hdl: Datapath soc handle
11775  * @pdev_id: id of objmgr pdev
11776  * @enable: Enable/Disable reap timer of monitor status ring
11777  *
11778  * Return: none
11779  */
11780 static void
11781 dp_enable_mon_reap_timer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
11782 			 bool enable)
11783 {
11784 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11785 	struct dp_pdev *pdev = NULL;
11786 
11787 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11788 	if (!pdev) {
11789 		dp_err("pdev is NULL");
11790 		return;
11791 	}
11792 
11793 	pdev->enable_reap_timer_non_pkt = enable;
11794 	if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
11795 		dp_debug("pktlog enabled %d", pdev->rx_pktlog_mode);
11796 		return;
11797 	}
11798 
11799 	if (!soc->reap_timer_init) {
11800 		dp_err("reap timer not init");
11801 		return;
11802 	}
11803 
11804 	if (enable)
11805 		qdf_timer_mod(&soc->mon_reap_timer,
11806 			      DP_INTR_POLL_TIMER_MS);
11807 	else
11808 		qdf_timer_sync_cancel(&soc->mon_reap_timer);
11809 }
11810 #endif
11811 
11812 /*
11813  * dp_is_enable_reap_timer_non_pkt() - check if mon reap timer is
11814  * enabled by non-pkt log or not
11815  * @pdev: point to dp pdev
11816  *
11817  * Return: true if mon reap timer is enabled by non-pkt log
11818  */
11819 static bool dp_is_enable_reap_timer_non_pkt(struct dp_pdev *pdev)
11820 {
11821 	if (!pdev) {
11822 		dp_err("null pdev");
11823 		return false;
11824 	}
11825 
11826 	return pdev->enable_reap_timer_non_pkt;
11827 }
11828 
11829 /*
11830 * dp_set_pktlog_wifi3() - attach txrx vdev
11831 * @pdev: Datapath PDEV handle
11832 * @event: which event's notifications are being subscribed to
11833 * @enable: WDI event subscribe or not. (True or False)
11834 *
11835 * Return: Success, NULL on failure
11836 */
11837 #ifdef WDI_EVENT_ENABLE
11838 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
11839 		bool enable)
11840 {
11841 	struct dp_soc *soc = NULL;
11842 	int max_mac_rings = wlan_cfg_get_num_mac_rings
11843 					(pdev->wlan_cfg_ctx);
11844 	uint8_t mac_id = 0;
11845 
11846 	soc = pdev->soc;
11847 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
11848 
11849 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
11850 			FL("Max_mac_rings %d "),
11851 			max_mac_rings);
11852 
11853 	if (enable) {
11854 		switch (event) {
11855 		case WDI_EVENT_RX_DESC:
11856 			if (pdev->monitor_vdev) {
11857 				/* Nothing needs to be done if monitor mode is
11858 				 * enabled
11859 				 */
11860 				return 0;
11861 			}
11862 
11863 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
11864 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
11865 				dp_mon_filter_setup_rx_pkt_log_full(pdev);
11866 				if (dp_mon_filter_update(pdev) !=
11867 						QDF_STATUS_SUCCESS) {
11868 					QDF_TRACE(QDF_MODULE_ID_DP,
11869 						  QDF_TRACE_LEVEL_ERROR,
11870 						  FL("Pktlog full filters set failed"));
11871 					dp_mon_filter_reset_rx_pkt_log_full(pdev);
11872 					pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
11873 					return 0;
11874 				}
11875 
11876 				if (soc->reap_timer_init &&
11877 				    (!dp_is_enable_reap_timer_non_pkt(pdev)))
11878 					qdf_timer_mod(&soc->mon_reap_timer,
11879 					DP_INTR_POLL_TIMER_MS);
11880 			}
11881 			break;
11882 
11883 		case WDI_EVENT_LITE_RX:
11884 			if (pdev->monitor_vdev) {
11885 				/* Nothing needs to be done if monitor mode is
11886 				 * enabled
11887 				 */
11888 				return 0;
11889 			}
11890 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
11891 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
11892 
11893 				/*
11894 				 * Set the packet log lite mode filter.
11895 				 */
11896 				dp_mon_filter_setup_rx_pkt_log_lite(pdev);
11897 				if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
11898 					QDF_TRACE(QDF_MODULE_ID_DP,
11899 						  QDF_TRACE_LEVEL_ERROR,
11900 						  FL("Pktlog lite filters set failed"));
11901 					dp_mon_filter_reset_rx_pkt_log_lite(pdev);
11902 					pdev->rx_pktlog_mode =
11903 						DP_RX_PKTLOG_DISABLED;
11904 					return 0;
11905 				}
11906 
11907 				if (soc->reap_timer_init &&
11908 				    (!dp_is_enable_reap_timer_non_pkt(pdev)))
11909 					qdf_timer_mod(&soc->mon_reap_timer,
11910 					DP_INTR_POLL_TIMER_MS);
11911 			}
11912 			break;
11913 
11914 		case WDI_EVENT_LITE_T2H:
11915 			if (pdev->monitor_vdev) {
11916 				/* Nothing needs to be done if monitor mode is
11917 				 * enabled
11918 				 */
11919 				return 0;
11920 			}
11921 
11922 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
11923 				int mac_for_pdev = dp_get_mac_id_for_pdev(
11924 							mac_id,	pdev->pdev_id);
11925 
11926 				pdev->pktlog_ppdu_stats = true;
11927 				dp_h2t_cfg_stats_msg_send(pdev,
11928 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
11929 					mac_for_pdev);
11930 			}
11931 			break;
11932 
11933 		default:
11934 			/* Nothing needs to be done for other pktlog types */
11935 			break;
11936 		}
11937 	} else {
11938 		switch (event) {
11939 		case WDI_EVENT_RX_DESC:
11940 		case WDI_EVENT_LITE_RX:
11941 			if (pdev->monitor_vdev) {
11942 				/* Nothing needs to be done if monitor mode is
11943 				 * enabled
11944 				 */
11945 				return 0;
11946 			}
11947 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
11948 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
11949 				dp_mon_filter_reset_rx_pkt_log_full(pdev);
11950 				if (dp_mon_filter_update(pdev) !=
11951 						QDF_STATUS_SUCCESS) {
11952 					QDF_TRACE(QDF_MODULE_ID_DP,
11953 						  QDF_TRACE_LEVEL_ERROR,
11954 						  FL("Pktlog filters reset failed"));
11955 					return 0;
11956 				}
11957 
11958 				dp_mon_filter_reset_rx_pkt_log_lite(pdev);
11959 				if (dp_mon_filter_update(pdev) !=
11960 						QDF_STATUS_SUCCESS) {
11961 					QDF_TRACE(QDF_MODULE_ID_DP,
11962 						  QDF_TRACE_LEVEL_ERROR,
11963 						  FL("Pktlog filters reset failed"));
11964 					return 0;
11965 				}
11966 
11967 				if (soc->reap_timer_init &&
11968 				    (!dp_is_enable_reap_timer_non_pkt(pdev)))
11969 					qdf_timer_stop(&soc->mon_reap_timer);
11970 			}
11971 			break;
11972 		case WDI_EVENT_LITE_T2H:
11973 			if (pdev->monitor_vdev) {
11974 				/* Nothing needs to be done if monitor mode is
11975 				 * enabled
11976 				 */
11977 				return 0;
11978 			}
11979 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
11980 			 * passing value 0. Once these macros will define in htt
11981 			 * header file will use proper macros
11982 			*/
11983 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
11984 				int mac_for_pdev =
11985 						dp_get_mac_id_for_pdev(mac_id,
11986 								pdev->pdev_id);
11987 
11988 				pdev->pktlog_ppdu_stats = false;
11989 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
11990 					dp_h2t_cfg_stats_msg_send(pdev, 0,
11991 								mac_for_pdev);
11992 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
11993 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
11994 								mac_for_pdev);
11995 				} else if (pdev->enhanced_stats_en) {
11996 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
11997 								mac_for_pdev);
11998 				}
11999 			}
12000 
12001 			break;
12002 		default:
12003 			/* Nothing needs to be done for other pktlog types */
12004 			break;
12005 		}
12006 	}
12007 	return 0;
12008 }
12009 #endif
12010 
12011 /**
12012  * dp_bucket_index() - Return index from array
12013  *
12014  * @delay: delay measured
12015  * @array: array used to index corresponding delay
12016  *
12017  * Return: index
12018  */
12019 static uint8_t dp_bucket_index(uint32_t delay, uint16_t *array)
12020 {
12021 	uint8_t i = CDP_DELAY_BUCKET_0;
12022 
12023 	for (; i < CDP_DELAY_BUCKET_MAX - 1; i++) {
12024 		if (delay >= array[i] && delay <= array[i + 1])
12025 			return i;
12026 	}
12027 
12028 	return (CDP_DELAY_BUCKET_MAX - 1);
12029 }
12030 
12031 /**
12032  * dp_fill_delay_buckets() - Fill delay statistics bucket for each
12033  *				type of delay
12034  *
12035  * @pdev: pdev handle
12036  * @delay: delay in ms
12037  * @tid: tid value
12038  * @mode: type of tx delay mode
12039  * @ring_id: ring number
12040  * Return: pointer to cdp_delay_stats structure
12041  */
12042 static struct cdp_delay_stats *
12043 dp_fill_delay_buckets(struct dp_pdev *pdev, uint32_t delay,
12044 		      uint8_t tid, uint8_t mode, uint8_t ring_id)
12045 {
12046 	uint8_t delay_index = 0;
12047 	struct cdp_tid_tx_stats *tstats =
12048 		&pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
12049 	struct cdp_tid_rx_stats *rstats =
12050 		&pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
12051 	/*
12052 	 * cdp_fw_to_hw_delay_range
12053 	 * Fw to hw delay ranges in milliseconds
12054 	 */
12055 	uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
12056 		0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
12057 
12058 	/*
12059 	 * cdp_sw_enq_delay_range
12060 	 * Software enqueue delay ranges in milliseconds
12061 	 */
12062 	uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
12063 		0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
12064 
12065 	/*
12066 	 * cdp_intfrm_delay_range
12067 	 * Interframe delay ranges in milliseconds
12068 	 */
12069 	uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
12070 		0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
12071 
12072 	/*
12073 	 * Update delay stats in proper bucket
12074 	 */
12075 	switch (mode) {
12076 	/* Software Enqueue delay ranges */
12077 	case CDP_DELAY_STATS_SW_ENQ:
12078 
12079 		delay_index = dp_bucket_index(delay, cdp_sw_enq_delay);
12080 		tstats->swq_delay.delay_bucket[delay_index]++;
12081 		return &tstats->swq_delay;
12082 
12083 	/* Tx Completion delay ranges */
12084 	case CDP_DELAY_STATS_FW_HW_TRANSMIT:
12085 
12086 		delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay);
12087 		tstats->hwtx_delay.delay_bucket[delay_index]++;
12088 		return &tstats->hwtx_delay;
12089 
12090 	/* Interframe tx delay ranges */
12091 	case CDP_DELAY_STATS_TX_INTERFRAME:
12092 
12093 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
12094 		tstats->intfrm_delay.delay_bucket[delay_index]++;
12095 		return &tstats->intfrm_delay;
12096 
12097 	/* Interframe rx delay ranges */
12098 	case CDP_DELAY_STATS_RX_INTERFRAME:
12099 
12100 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
12101 		rstats->intfrm_delay.delay_bucket[delay_index]++;
12102 		return &rstats->intfrm_delay;
12103 
12104 	/* Ring reap to indication to network stack */
12105 	case CDP_DELAY_STATS_REAP_STACK:
12106 
12107 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
12108 		rstats->to_stack_delay.delay_bucket[delay_index]++;
12109 		return &rstats->to_stack_delay;
12110 	default:
12111 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
12112 			  "%s Incorrect delay mode: %d", __func__, mode);
12113 	}
12114 
12115 	return NULL;
12116 }
12117 
12118 /**
12119  * dp_update_delay_stats() - Update delay statistics in structure
12120  *				and fill min, max and avg delay
12121  *
12122  * @pdev: pdev handle
12123  * @delay: delay in ms
12124  * @tid: tid value
12125  * @mode: type of tx delay mode
12126  * @ring id: ring number
12127  * Return: none
12128  */
12129 void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay,
12130 			   uint8_t tid, uint8_t mode, uint8_t ring_id)
12131 {
12132 	struct cdp_delay_stats *dstats = NULL;
12133 
12134 	/*
12135 	 * Delay ranges are different for different delay modes
12136 	 * Get the correct index to update delay bucket
12137 	 */
12138 	dstats = dp_fill_delay_buckets(pdev, delay, tid, mode, ring_id);
12139 	if (qdf_unlikely(!dstats))
12140 		return;
12141 
12142 	if (delay != 0) {
12143 		/*
12144 		 * Compute minimum,average and maximum
12145 		 * delay
12146 		 */
12147 		if (delay < dstats->min_delay)
12148 			dstats->min_delay = delay;
12149 
12150 		if (delay > dstats->max_delay)
12151 			dstats->max_delay = delay;
12152 
12153 		/*
12154 		 * Average over delay measured till now
12155 		 */
12156 		if (!dstats->avg_delay)
12157 			dstats->avg_delay = delay;
12158 		else
12159 			dstats->avg_delay = ((delay + dstats->avg_delay) / 2);
12160 	}
12161 }
12162 
12163 /**
12164  * dp_get_peer_mac_list(): function to get peer mac list of vdev
12165  * @soc: Datapath soc handle
12166  * @vdev_id: vdev id
12167  * @newmac: Table of the clients mac
12168  * @mac_cnt: No. of MACs required
12169  *
12170  * return: no of clients
12171  */
12172 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id,
12173 			      u_int8_t newmac[][QDF_MAC_ADDR_SIZE],
12174 			      u_int16_t mac_cnt)
12175 {
12176 	struct dp_soc *dp_soc = (struct dp_soc *)soc;
12177 	struct dp_vdev *vdev =
12178 		dp_vdev_get_ref_by_id(dp_soc, vdev_id, DP_MOD_ID_CDP);
12179 	struct dp_peer *peer;
12180 	uint16_t new_mac_cnt = 0;
12181 
12182 	if (!vdev)
12183 		return new_mac_cnt;
12184 
12185 	qdf_spin_lock_bh(&vdev->peer_list_lock);
12186 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
12187 		if (peer->bss_peer)
12188 			continue;
12189 		if (new_mac_cnt < mac_cnt) {
12190 			WLAN_ADDR_COPY(newmac[new_mac_cnt], peer->mac_addr.raw);
12191 			new_mac_cnt++;
12192 		}
12193 	}
12194 	qdf_spin_unlock_bh(&vdev->peer_list_lock);
12195 	dp_vdev_unref_delete(dp_soc, vdev, DP_MOD_ID_CDP);
12196 	return new_mac_cnt;
12197 }
12198 
12199 /**
12200  * dp_pdev_srng_deinit() - de-initialize all pdev srng ring including
12201  *			   monitor rings
12202  * @pdev: Datapath pdev handle
12203  *
12204  */
12205 static void dp_pdev_srng_deinit(struct dp_pdev *pdev)
12206 {
12207 	struct dp_soc *soc = pdev->soc;
12208 	uint8_t i;
12209 
12210 	dp_srng_deinit(soc, &soc->rx_refill_buf_ring[pdev->lmac_id], RXDMA_BUF,
12211 		       pdev->lmac_id);
12212 
12213 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
12214 		dp_deinit_tx_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
12215 
12216 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
12217 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id);
12218 
12219 		wlan_minidump_remove(soc->rxdma_err_dst_ring[lmac_id].base_vaddr_unaligned);
12220 		dp_srng_deinit(soc, &soc->rxdma_err_dst_ring[lmac_id],
12221 			       RXDMA_DST, lmac_id);
12222 	}
12223 
12224 	dp_mon_rings_deinit(pdev);
12225 }
12226 
12227 /**
12228  * dp_pdev_srng_init() - initialize all pdev srng rings including
12229  *			   monitor rings
12230  * @pdev: Datapath pdev handle
12231  *
12232  * return: QDF_STATUS_SUCCESS on success
12233  *	   QDF_STATUS_E_NOMEM on failure
12234  */
12235 static QDF_STATUS dp_pdev_srng_init(struct dp_pdev *pdev)
12236 {
12237 	struct dp_soc *soc = pdev->soc;
12238 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
12239 	uint32_t i;
12240 
12241 	soc_cfg_ctx = soc->wlan_cfg_ctx;
12242 
12243 	if (dp_srng_init(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
12244 			 RXDMA_BUF, 0, pdev->lmac_id)) {
12245 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12246 			  FL("dp_srng_init failed rx refill ring"));
12247 		goto fail1;
12248 	}
12249 
12250 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
12251 		if (dp_init_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
12252 			goto fail1;
12253 	}
12254 
12255 	if (dp_mon_rings_init(soc, pdev)) {
12256 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12257 			  FL("MONITOR rings setup failed"));
12258 		goto fail1;
12259 	}
12260 
12261 	/* LMAC RxDMA to SW Rings configuration */
12262 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
12263 		/* Only valid for MCL */
12264 		pdev = soc->pdev_list[0];
12265 
12266 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
12267 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id);
12268 		struct dp_srng *srng = &soc->rxdma_err_dst_ring[lmac_id];
12269 
12270 		if (srng->hal_srng)
12271 			continue;
12272 
12273 		if (dp_srng_init(soc, srng, RXDMA_DST, 0, lmac_id)) {
12274 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12275 				  FL(RNG_ERR "rxdma_err_dst_ring"));
12276 			goto fail1;
12277 		}
12278 		wlan_minidump_log(soc->rxdma_err_dst_ring[lmac_id].base_vaddr_unaligned,
12279 				  soc->rxdma_err_dst_ring[lmac_id].alloc_size,
12280 				  soc->ctrl_psoc,
12281 				  WLAN_MD_DP_SRNG_RXDMA_ERR_DST,
12282 				  "rxdma_err_dst");
12283 	}
12284 	return QDF_STATUS_SUCCESS;
12285 
12286 fail1:
12287 	dp_pdev_srng_deinit(pdev);
12288 	return QDF_STATUS_E_NOMEM;
12289 }
12290 
12291 /**
12292  * dp_pdev_srng_free() - free all pdev srng rings including monitor rings
12293  * pdev: Datapath pdev handle
12294  *
12295  */
12296 static void dp_pdev_srng_free(struct dp_pdev *pdev)
12297 {
12298 	struct dp_soc *soc = pdev->soc;
12299 	uint8_t i;
12300 
12301 	dp_srng_free(soc, &soc->rx_refill_buf_ring[pdev->lmac_id]);
12302 	dp_mon_rings_free(pdev);
12303 
12304 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
12305 		dp_free_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX);
12306 
12307 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
12308 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id);
12309 
12310 		dp_srng_free(soc, &soc->rxdma_err_dst_ring[lmac_id]);
12311 	}
12312 }
12313 
12314 /**
12315  * dp_pdev_srng_alloc() - allocate memory for all pdev srng rings including
12316  *			  monitor rings
12317  * pdev: Datapath pdev handle
12318  *
12319  * return: QDF_STATUS_SUCCESS on success
12320  *	   QDF_STATUS_E_NOMEM on failure
12321  */
12322 static QDF_STATUS dp_pdev_srng_alloc(struct dp_pdev *pdev)
12323 {
12324 	struct dp_soc *soc = pdev->soc;
12325 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
12326 	uint32_t ring_size;
12327 	uint32_t i;
12328 
12329 	soc_cfg_ctx = soc->wlan_cfg_ctx;
12330 
12331 	ring_size = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
12332 	if (dp_srng_alloc(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
12333 			  RXDMA_BUF, ring_size, 0)) {
12334 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12335 			  FL("dp_srng_alloc failed rx refill ring"));
12336 		goto fail1;
12337 	}
12338 
12339 	if (dp_mon_rings_alloc(soc, pdev)) {
12340 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12341 			  FL("MONITOR rings setup failed"));
12342 		goto fail1;
12343 	}
12344 
12345 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
12346 		if (dp_alloc_tx_ring_pair_by_index(soc, IPA_TCL_DATA_RING_IDX))
12347 			goto fail1;
12348 	}
12349 
12350 	ring_size = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
12351 	/* LMAC RxDMA to SW Rings configuration */
12352 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx))
12353 		/* Only valid for MCL */
12354 		pdev = soc->pdev_list[0];
12355 
12356 	for (i = 0; i < NUM_RXDMA_RINGS_PER_PDEV; i++) {
12357 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, i, pdev->pdev_id);
12358 		struct dp_srng *srng = &soc->rxdma_err_dst_ring[lmac_id];
12359 
12360 		if (srng->base_vaddr_unaligned)
12361 			continue;
12362 
12363 		if (dp_srng_alloc(soc, srng, RXDMA_DST, ring_size, 0)) {
12364 			QDF_TRACE(QDF_MODULE_ID_DP,
12365 				  QDF_TRACE_LEVEL_ERROR,
12366 				  FL(RNG_ERR "rxdma_err_dst_ring"));
12367 			goto fail1;
12368 		}
12369 	}
12370 
12371 	return QDF_STATUS_SUCCESS;
12372 fail1:
12373 	dp_pdev_srng_free(pdev);
12374 	return QDF_STATUS_E_NOMEM;
12375 }
12376 
12377 /**
12378  * dp_soc_srng_deinit() - de-initialize soc srng rings
12379  * @soc: Datapath soc handle
12380  *
12381  */
12382 static void dp_soc_srng_deinit(struct dp_soc *soc)
12383 {
12384 	uint32_t i;
12385 	/* Free the ring memories */
12386 	/* Common rings */
12387 	wlan_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned);
12388 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
12389 
12390 	/* Tx data rings */
12391 	for (i = 0; i < soc->num_tcl_data_rings; i++)
12392 		dp_deinit_tx_pair_by_index(soc, i);
12393 
12394 	/* TCL command and status rings */
12395 	wlan_minidump_remove(soc->tcl_cmd_credit_ring.base_vaddr_unaligned);
12396 	dp_srng_deinit(soc, &soc->tcl_cmd_credit_ring, TCL_CMD_CREDIT, 0);
12397 	wlan_minidump_remove(soc->tcl_status_ring.base_vaddr_unaligned);
12398 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
12399 
12400 	/* Rx data rings */
12401 	soc->num_reo_dest_rings =
12402 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
12403 	for (i = 0; i < soc->num_reo_dest_rings; i++) {
12404 		/* TODO: Get number of rings and ring sizes
12405 		 * from wlan_cfg
12406 		 */
12407 		wlan_minidump_remove(soc->reo_dest_ring[i].base_vaddr_unaligned);
12408 		dp_srng_deinit(soc, &soc->reo_dest_ring[i], REO_DST, i);
12409 	}
12410 
12411 	/* REO reinjection ring */
12412 	wlan_minidump_remove(soc->reo_reinject_ring.base_vaddr_unaligned);
12413 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
12414 
12415 	/* Rx release ring */
12416 	wlan_minidump_remove(soc->rx_rel_ring.base_vaddr_unaligned);
12417 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
12418 
12419 	/* Rx exception ring */
12420 	/* TODO: Better to store ring_type and ring_num in
12421 	 * dp_srng during setup
12422 	 */
12423 	wlan_minidump_remove(soc->reo_exception_ring.base_vaddr_unaligned);
12424 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
12425 
12426 	/* REO command and status rings */
12427 	wlan_minidump_remove(soc->reo_cmd_ring.base_vaddr_unaligned);
12428 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
12429 	wlan_minidump_remove(soc->reo_status_ring.base_vaddr_unaligned);
12430 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
12431 }
12432 
12433 /**
12434  * dp_soc_srng_init() - Initialize soc level srng rings
12435  * @soc: Datapath soc handle
12436  *
12437  * return: QDF_STATUS_SUCCESS on success
12438  *	   QDF_STATUS_E_FAILURE on failure
12439  */
12440 static QDF_STATUS dp_soc_srng_init(struct dp_soc *soc)
12441 {
12442 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
12443 	uint32_t num_tcl_data_rings, num_reo_dest_rings;
12444 	uint8_t i;
12445 
12446 	soc_cfg_ctx = soc->wlan_cfg_ctx;
12447 
12448 	dp_enable_verbose_debug(soc);
12449 
12450 	/* WBM descriptor release ring */
12451 	if (dp_srng_init(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0)) {
12452 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12453 			  FL("dp_srng_init failed for wbm_desc_rel_ring"));
12454 		goto fail1;
12455 	}
12456 
12457 	wlan_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
12458 			  soc->wbm_desc_rel_ring.alloc_size,
12459 			  soc->ctrl_psoc,
12460 			  WLAN_MD_DP_SRNG_WBM_DESC_REL,
12461 			  "wbm_desc_rel_ring");
12462 
12463 	/* TCL command and status rings */
12464 	if (dp_srng_init(soc, &soc->tcl_cmd_credit_ring,
12465 			 TCL_CMD_CREDIT, 0, 0)) {
12466 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12467 			  FL("dp_srng_init failed for tcl_cmd_ring"));
12468 		goto fail1;
12469 	}
12470 
12471 	wlan_minidump_log(soc->tcl_cmd_credit_ring.base_vaddr_unaligned,
12472 			  soc->tcl_cmd_credit_ring.alloc_size,
12473 			  soc->ctrl_psoc,
12474 			  WLAN_MD_DP_SRNG_TCL_CMD,
12475 			  "wbm_desc_rel_ring");
12476 
12477 	if (dp_srng_init(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0)) {
12478 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12479 			  FL("dp_srng_init failed for tcl_status_ring"));
12480 		goto fail1;
12481 	}
12482 
12483 	wlan_minidump_log(soc->tcl_status_ring.base_vaddr_unaligned,
12484 			  soc->tcl_status_ring.alloc_size,
12485 			  soc->ctrl_psoc,
12486 			  WLAN_MD_DP_SRNG_TCL_STATUS,
12487 			  "wbm_desc_rel_ring");
12488 
12489 	/* REO reinjection ring */
12490 	if (dp_srng_init(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0)) {
12491 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12492 			  FL("dp_srng_init failed for reo_reinject_ring"));
12493 		goto fail1;
12494 	}
12495 
12496 	wlan_minidump_log(soc->reo_reinject_ring.base_vaddr_unaligned,
12497 			  soc->reo_reinject_ring.alloc_size,
12498 			  soc->ctrl_psoc,
12499 			  WLAN_MD_DP_SRNG_REO_REINJECT,
12500 			  "reo_reinject_ring");
12501 
12502 	/* Rx release ring */
12503 	if (dp_srng_init(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0)) {
12504 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12505 			  FL("dp_srng_init failed for rx_rel_ring"));
12506 		goto fail1;
12507 	}
12508 
12509 	wlan_minidump_log(soc->rx_rel_ring.base_vaddr_unaligned,
12510 			  soc->rx_rel_ring.alloc_size,
12511 			  soc->ctrl_psoc,
12512 			  WLAN_MD_DP_SRNG_RX_REL,
12513 			  "reo_release_ring");
12514 
12515 	/* Rx exception ring */
12516 	if (dp_srng_init(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0,
12517 			 MAX_REO_DEST_RINGS)) {
12518 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12519 			  FL("dp_srng_init failed for reo_exception_ring"));
12520 		goto fail1;
12521 	}
12522 
12523 	wlan_minidump_log(soc->reo_exception_ring.base_vaddr_unaligned,
12524 			  soc->reo_exception_ring.alloc_size,
12525 			  soc->ctrl_psoc,
12526 			  WLAN_MD_DP_SRNG_REO_EXCEPTION,
12527 			  "reo_exception_ring");
12528 
12529 	/* REO command and status rings */
12530 	if (dp_srng_init(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0)) {
12531 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12532 			  FL("dp_srng_init failed for reo_cmd_ring"));
12533 		goto fail1;
12534 	}
12535 
12536 	wlan_minidump_log(soc->reo_cmd_ring.base_vaddr_unaligned,
12537 			  soc->reo_cmd_ring.alloc_size,
12538 			  soc->ctrl_psoc,
12539 			  WLAN_MD_DP_SRNG_REO_CMD,
12540 			  "reo_cmd_ring");
12541 
12542 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
12543 	TAILQ_INIT(&soc->rx.reo_cmd_list);
12544 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
12545 
12546 	if (dp_srng_init(soc, &soc->reo_status_ring, REO_STATUS, 0, 0)) {
12547 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12548 			  FL("dp_srng_init failed for reo_status_ring"));
12549 		goto fail1;
12550 	}
12551 
12552 	wlan_minidump_log(soc->reo_status_ring.base_vaddr_unaligned,
12553 			  soc->reo_status_ring.alloc_size,
12554 			  soc->ctrl_psoc,
12555 			  WLAN_MD_DP_SRNG_REO_STATUS,
12556 			  "reo_status_ring");
12557 
12558 	num_tcl_data_rings = wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
12559 	num_reo_dest_rings = wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
12560 
12561 	for (i = 0; i < num_tcl_data_rings; i++) {
12562 		if (dp_init_tx_ring_pair_by_index(soc, i))
12563 			goto fail1;
12564 	}
12565 
12566 	dp_create_ext_stats_event(soc);
12567 
12568 	for (i = 0; i < num_reo_dest_rings; i++) {
12569 		/* Initialize REO destination ring */
12570 		if (dp_srng_init(soc, &soc->reo_dest_ring[i], REO_DST, i, 0)) {
12571 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12572 				  FL("dp_srng_init failed for reo_dest_ringn"));
12573 			goto fail1;
12574 		}
12575 
12576 		wlan_minidump_log(soc->reo_dest_ring[i].base_vaddr_unaligned,
12577 				  soc->reo_dest_ring[i].alloc_size,
12578 				  soc->ctrl_psoc,
12579 				  WLAN_MD_DP_SRNG_REO_DEST,
12580 				  "reo_dest_ring");
12581 	}
12582 
12583 	return QDF_STATUS_SUCCESS;
12584 fail1:
12585 	/*
12586 	 * Cleanup will be done as part of soc_detach, which will
12587 	 * be called on pdev attach failure
12588 	 */
12589 	dp_soc_srng_deinit(soc);
12590 	return QDF_STATUS_E_FAILURE;
12591 }
12592 
12593 /**
12594  * dp_soc_srng_free() - free soc level srng rings
12595  * @soc: Datapath soc handle
12596  *
12597  */
12598 static void dp_soc_srng_free(struct dp_soc *soc)
12599 {
12600 	uint32_t i;
12601 
12602 	dp_srng_free(soc, &soc->wbm_desc_rel_ring);
12603 
12604 	for (i = 0; i < soc->num_tcl_data_rings; i++)
12605 		dp_free_tx_ring_pair_by_index(soc, i);
12606 
12607 	dp_srng_free(soc, &soc->tcl_cmd_credit_ring);
12608 	dp_srng_free(soc, &soc->tcl_status_ring);
12609 
12610 	for (i = 0; i < soc->num_reo_dest_rings; i++)
12611 		dp_srng_free(soc, &soc->reo_dest_ring[i]);
12612 
12613 	dp_srng_free(soc, &soc->reo_reinject_ring);
12614 	dp_srng_free(soc, &soc->rx_rel_ring);
12615 	dp_srng_free(soc, &soc->reo_exception_ring);
12616 	dp_srng_free(soc, &soc->reo_cmd_ring);
12617 	dp_srng_free(soc, &soc->reo_status_ring);
12618 }
12619 
12620 /**
12621  * dp_soc_srng_alloc() - Allocate memory for soc level srng rings
12622  * @soc: Datapath soc handle
12623  *
12624  * return: QDF_STATUS_SUCCESS on success
12625  *	   QDF_STATUS_E_NOMEM on failure
12626  */
12627 static QDF_STATUS dp_soc_srng_alloc(struct dp_soc *soc)
12628 {
12629 	uint32_t entries;
12630 	uint32_t i;
12631 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
12632 	uint32_t num_tcl_data_rings, num_reo_dest_rings;
12633 	uint32_t cached = WLAN_CFG_DST_RING_CACHED_DESC;
12634 	uint32_t tx_comp_ring_size, tx_ring_size, reo_dst_ring_size;
12635 
12636 	soc_cfg_ctx = soc->wlan_cfg_ctx;
12637 
12638 	/* sw2wbm link descriptor release ring */
12639 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
12640 	if (dp_srng_alloc(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE,
12641 			  entries, 0)) {
12642 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12643 			  FL("dp_srng_alloc failed for wbm_desc_rel_ring"));
12644 		goto fail1;
12645 	}
12646 
12647 	entries = wlan_cfg_get_dp_soc_tcl_cmd_credit_ring_size(soc_cfg_ctx);
12648 	/* TCL command and status rings */
12649 	if (dp_srng_alloc(soc, &soc->tcl_cmd_credit_ring, TCL_CMD_CREDIT,
12650 			  entries, 0)) {
12651 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12652 			  FL("dp_srng_alloc failed for tcl_cmd_ring"));
12653 		goto fail1;
12654 	}
12655 
12656 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
12657 	if (dp_srng_alloc(soc, &soc->tcl_status_ring, TCL_STATUS, entries,
12658 			  0)) {
12659 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12660 			  FL("dp_srng_alloc failed for tcl_status_ring"));
12661 		goto fail1;
12662 	}
12663 
12664 	/* REO reinjection ring */
12665 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
12666 	if (dp_srng_alloc(soc, &soc->reo_reinject_ring, REO_REINJECT,
12667 			  entries, 0)) {
12668 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12669 			  FL("dp_srng_alloc failed for reo_reinject_ring"));
12670 		goto fail1;
12671 	}
12672 
12673 	/* Rx release ring */
12674 	entries = wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx);
12675 	if (dp_srng_alloc(soc, &soc->rx_rel_ring, WBM2SW_RELEASE,
12676 			  entries, 0)) {
12677 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12678 			  FL("dp_srng_alloc failed for rx_rel_ring"));
12679 		goto fail1;
12680 	}
12681 
12682 	/* Rx exception ring */
12683 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
12684 	if (dp_srng_alloc(soc, &soc->reo_exception_ring, REO_EXCEPTION,
12685 			  entries, 0)) {
12686 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12687 			  FL("dp_srng_alloc failed for reo_exception_ring"));
12688 		goto fail1;
12689 	}
12690 
12691 	/* REO command and status rings */
12692 	entries = wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx);
12693 	if (dp_srng_alloc(soc, &soc->reo_cmd_ring, REO_CMD, entries, 0)) {
12694 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12695 			  FL("dp_srng_alloc failed for reo_cmd_ring"));
12696 		goto fail1;
12697 	}
12698 
12699 	entries = wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx);
12700 	if (dp_srng_alloc(soc, &soc->reo_status_ring, REO_STATUS,
12701 			  entries, 0)) {
12702 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12703 			  FL("dp_srng_alloc failed for reo_status_ring"));
12704 		goto fail1;
12705 	}
12706 
12707 	num_tcl_data_rings = wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
12708 	num_reo_dest_rings = wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
12709 	tx_comp_ring_size = wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
12710 	tx_ring_size = wlan_cfg_tx_ring_size(soc_cfg_ctx);
12711 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc_cfg_ctx);
12712 
12713 	/* Disable cached desc if NSS offload is enabled */
12714 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
12715 		cached = 0;
12716 
12717 	for (i = 0; i < num_tcl_data_rings; i++) {
12718 		if (dp_alloc_tx_ring_pair_by_index(soc, i))
12719 			goto fail1;
12720 	}
12721 
12722 	soc->num_tcl_data_rings = num_tcl_data_rings;
12723 
12724 	for (i = 0; i < num_reo_dest_rings; i++) {
12725 		/* Setup REO destination ring */
12726 		if (dp_srng_alloc(soc, &soc->reo_dest_ring[i], REO_DST,
12727 				  reo_dst_ring_size, cached)) {
12728 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12729 				  FL("dp_srng_alloc failed for reo_dest_ring"));
12730 			goto fail1;
12731 		}
12732 	}
12733 	soc->num_reo_dest_rings = num_reo_dest_rings;
12734 
12735 	return QDF_STATUS_SUCCESS;
12736 
12737 fail1:
12738 	dp_soc_srng_free(soc);
12739 	return QDF_STATUS_E_NOMEM;
12740 }
12741 
12742 /**
12743  * dp_soc_cfg_init() - initialize target specific configuration
12744  *		       during dp_soc_init
12745  * @soc: dp soc handle
12746  */
12747 static void dp_soc_cfg_init(struct dp_soc *soc)
12748 {
12749 	int target_type;
12750 
12751 	target_type = hal_get_target_type(soc->hal_soc);
12752 	switch (target_type) {
12753 	case TARGET_TYPE_QCA6290:
12754 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12755 					       REO_DST_RING_SIZE_QCA6290);
12756 		soc->ast_override_support = 1;
12757 		soc->da_war_enabled = false;
12758 		break;
12759 	case TARGET_TYPE_QCA6390:
12760 	case TARGET_TYPE_QCA6490:
12761 	case TARGET_TYPE_QCA6750:
12762 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12763 					       REO_DST_RING_SIZE_QCA6290);
12764 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
12765 		soc->ast_override_support = 1;
12766 		if (soc->cdp_soc.ol_ops->get_con_mode &&
12767 		    soc->cdp_soc.ol_ops->get_con_mode() ==
12768 		    QDF_GLOBAL_MONITOR_MODE) {
12769 			int int_ctx;
12770 
12771 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
12772 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
12773 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
12774 			}
12775 		}
12776 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
12777 		break;
12778 	case TARGET_TYPE_QCA8074:
12779 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
12780 							   MON_BUF_MIN_ENTRIES);
12781 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12782 					       REO_DST_RING_SIZE_QCA8074);
12783 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
12784 		soc->da_war_enabled = true;
12785 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
12786 		break;
12787 	case TARGET_TYPE_QCA8074V2:
12788 	case TARGET_TYPE_QCA6018:
12789 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
12790 							   MON_BUF_MIN_ENTRIES);
12791 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12792 					       REO_DST_RING_SIZE_QCA8074);
12793 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
12794 		soc->hw_nac_monitor_support = 1;
12795 		soc->ast_override_support = 1;
12796 		soc->per_tid_basize_max_tid = 8;
12797 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
12798 		soc->da_war_enabled = false;
12799 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
12800 		break;
12801 	case TARGET_TYPE_QCN9000:
12802 		wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
12803 							   MON_BUF_MIN_ENTRIES);
12804 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12805 					       REO_DST_RING_SIZE_QCN9000);
12806 		soc->ast_override_support = 1;
12807 		soc->da_war_enabled = false;
12808 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
12809 		soc->hw_nac_monitor_support = 1;
12810 		soc->per_tid_basize_max_tid = 8;
12811 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
12812 		soc->lmac_polled_mode = 0;
12813 		soc->wbm_release_desc_rx_sg_support = 1;
12814 		if (cfg_get(soc->ctrl_psoc, CFG_DP_FULL_MON_MODE))
12815 			soc->full_mon_mode = true;
12816 		break;
12817 	case TARGET_TYPE_QCA5018:
12818 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12819 					       REO_DST_RING_SIZE_QCA8074);
12820 		soc->ast_override_support = 1;
12821 		soc->da_war_enabled = false;
12822 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
12823 		soc->hw_nac_monitor_support = 1;
12824 		soc->per_tid_basize_max_tid = 8;
12825 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
12826 		soc->disable_mac1_intr = 1;
12827 		soc->disable_mac2_intr = 1;
12828 		soc->wbm_release_desc_rx_sg_support = 1;
12829 		break;
12830 	default:
12831 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
12832 		qdf_assert_always(0);
12833 		break;
12834 	}
12835 }
12836 
12837 /**
12838  * dp_soc_cfg_attach() - set target specific configuration in
12839  *			 dp soc cfg.
12840  * @soc: dp soc handle
12841  */
12842 static void dp_soc_cfg_attach(struct dp_soc *soc)
12843 {
12844 	int target_type;
12845 	int nss_cfg = 0;
12846 
12847 	target_type = hal_get_target_type(soc->hal_soc);
12848 	switch (target_type) {
12849 	case TARGET_TYPE_QCA6290:
12850 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12851 					       REO_DST_RING_SIZE_QCA6290);
12852 		break;
12853 	case TARGET_TYPE_QCA6390:
12854 	case TARGET_TYPE_QCA6490:
12855 	case TARGET_TYPE_QCA6750:
12856 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12857 					       REO_DST_RING_SIZE_QCA6290);
12858 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
12859 		break;
12860 	case TARGET_TYPE_QCA8074:
12861 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
12862 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12863 					       REO_DST_RING_SIZE_QCA8074);
12864 		break;
12865 	case TARGET_TYPE_QCA8074V2:
12866 	case TARGET_TYPE_QCA6018:
12867 	case TARGET_TYPE_QCA5018:
12868 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
12869 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12870 					       REO_DST_RING_SIZE_QCA8074);
12871 		break;
12872 	case TARGET_TYPE_QCN9000:
12873 		wlan_cfg_set_tso_desc_attach_defer(soc->wlan_cfg_ctx, 1);
12874 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
12875 					       REO_DST_RING_SIZE_QCN9000);
12876 		break;
12877 	default:
12878 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
12879 		qdf_assert_always(0);
12880 		break;
12881 	}
12882 
12883 	if (soc->cdp_soc.ol_ops->get_soc_nss_cfg)
12884 		nss_cfg = soc->cdp_soc.ol_ops->get_soc_nss_cfg(soc->ctrl_psoc);
12885 
12886 	wlan_cfg_set_dp_soc_nss_cfg(soc->wlan_cfg_ctx, nss_cfg);
12887 
12888 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
12889 		wlan_cfg_set_num_tx_desc_pool(soc->wlan_cfg_ctx, 0);
12890 		wlan_cfg_set_num_tx_ext_desc_pool(soc->wlan_cfg_ctx, 0);
12891 		wlan_cfg_set_num_tx_desc(soc->wlan_cfg_ctx, 0);
12892 		wlan_cfg_set_num_tx_ext_desc(soc->wlan_cfg_ctx, 0);
12893 	}
12894 }
12895 
12896 static inline QDF_STATUS dp_pdev_init(struct cdp_soc_t *txrx_soc,
12897 				      HTC_HANDLE htc_handle,
12898 				      qdf_device_t qdf_osdev,
12899 				      uint8_t pdev_id)
12900 {
12901 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
12902 	int nss_cfg;
12903 	void *sojourn_buf;
12904 	QDF_STATUS ret;
12905 
12906 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
12907 	struct dp_pdev *pdev = soc->pdev_list[pdev_id];
12908 
12909 	soc_cfg_ctx = soc->wlan_cfg_ctx;
12910 	pdev->soc = soc;
12911 	pdev->pdev_id = pdev_id;
12912 
12913 	pdev->filter = dp_mon_filter_alloc(pdev);
12914 	if (!pdev->filter) {
12915 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12916 			  FL("Memory allocation failed for monitor filters"));
12917 		ret = QDF_STATUS_E_NOMEM;
12918 		goto fail0;
12919 	}
12920 
12921 	/*
12922 	 * Variable to prevent double pdev deinitialization during
12923 	 * radio detach execution .i.e. in the absence of any vdev.
12924 	 */
12925 	pdev->pdev_deinit = 0;
12926 
12927 	if (dp_wdi_event_attach(pdev)) {
12928 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
12929 			  "dp_wdi_evet_attach failed");
12930 		goto fail1;
12931 	}
12932 
12933 	if (dp_pdev_srng_init(pdev)) {
12934 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12935 			  FL("Failed to initialize pdev srng rings"));
12936 		goto fail2;
12937 	}
12938 
12939 	/* Initialize descriptors in TCL Rings used by IPA */
12940 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
12941 		hal_tx_init_data_ring(soc->hal_soc,
12942 				      soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng);
12943 
12944 	/*
12945 	 * Initialize command/credit ring descriptor
12946 	 * Command/CREDIT ring also used for sending DATA cmds
12947 	 */
12948 	hal_tx_init_cmd_credit_ring(soc->hal_soc,
12949 				    soc->tcl_cmd_credit_ring.hal_srng);
12950 
12951 	dp_tx_pdev_init(pdev);
12952 	/*
12953 	 * Variable to prevent double pdev deinitialization during
12954 	 * radio detach execution .i.e. in the absence of any vdev.
12955 	 */
12956 	pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer));
12957 
12958 	if (!pdev->invalid_peer) {
12959 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
12960 			  FL("Invalid peer memory allocation failed"));
12961 		goto fail3;
12962 	}
12963 
12964 	/*
12965 	 * set nss pdev config based on soc config
12966 	 */
12967 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
12968 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
12969 					 (nss_cfg & (1 << pdev_id)));
12970 
12971 	pdev->target_pdev_id =
12972 		dp_calculate_target_pdev_id_from_host_pdev_id(soc, pdev_id);
12973 
12974 	if (soc->preferred_hw_mode == WMI_HOST_HW_MODE_2G_PHYB &&
12975 	    pdev->lmac_id == PHYB_2G_LMAC_ID) {
12976 		pdev->target_pdev_id = PHYB_2G_TARGET_PDEV_ID;
12977 	}
12978 
12979 	/* Reset the cpu ring map if radio is NSS offloaded */
12980 	if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx)) {
12981 		dp_soc_reset_cpu_ring_map(soc);
12982 		dp_soc_reset_intr_mask(soc);
12983 	}
12984 
12985 	TAILQ_INIT(&pdev->vdev_list);
12986 	qdf_spinlock_create(&pdev->vdev_list_lock);
12987 	pdev->vdev_count = 0;
12988 
12989 	qdf_spinlock_create(&pdev->tx_mutex);
12990 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
12991 	TAILQ_INIT(&pdev->neighbour_peers_list);
12992 	pdev->neighbour_peers_added = false;
12993 	pdev->monitor_configured = false;
12994 	pdev->mon_chan_band = REG_BAND_UNKNOWN;
12995 	pdev->ch_band_lmac_id_mapping[REG_BAND_2G] = DP_MON_INVALID_LMAC_ID;
12996 	pdev->ch_band_lmac_id_mapping[REG_BAND_5G] = DP_MON_INVALID_LMAC_ID;
12997 	pdev->ch_band_lmac_id_mapping[REG_BAND_6G] = DP_MON_INVALID_LMAC_ID;
12998 
12999 	DP_STATS_INIT(pdev);
13000 
13001 	/* Monitor filter init */
13002 	pdev->mon_filter_mode = MON_FILTER_ALL;
13003 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
13004 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
13005 	pdev->fp_data_filter = FILTER_DATA_ALL;
13006 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
13007 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
13008 	pdev->mo_data_filter = FILTER_DATA_ALL;
13009 
13010 	dp_local_peer_id_pool_init(pdev);
13011 
13012 	dp_dscp_tid_map_setup(pdev);
13013 	dp_pcp_tid_map_setup(pdev);
13014 
13015 	/* set the reo destination during initialization */
13016 	pdev->reo_dest = pdev->pdev_id + 1;
13017 
13018 	/*
13019 	 * initialize ppdu tlv list
13020 	 */
13021 	TAILQ_INIT(&pdev->ppdu_info_list);
13022 	TAILQ_INIT(&pdev->sched_comp_ppdu_list);
13023 	pdev->tlv_count = 0;
13024 	pdev->list_depth = 0;
13025 
13026 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
13027 
13028 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
13029 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
13030 			      TRUE);
13031 
13032 	if (!pdev->sojourn_buf) {
13033 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
13034 			  FL("Failed to allocate sojourn buf"));
13035 		goto fail4;
13036 	}
13037 	sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
13038 	qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
13039 
13040 	/* initlialize cal client timer */
13041 	dp_cal_client_attach(&pdev->cal_client_ctx,
13042 			     dp_pdev_to_cdp_pdev(pdev),
13043 			     pdev->soc->osdev,
13044 			     &dp_iterate_update_peer_list);
13045 	qdf_event_create(&pdev->fw_peer_stats_event);
13046 
13047 	pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
13048 	if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS)
13049 		goto fail5;
13050 
13051 	if (dp_rxdma_ring_setup(soc, pdev)) {
13052 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
13053 			  FL("RXDMA ring config failed"));
13054 		goto fail6;
13055 	}
13056 
13057 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
13058 		goto fail7;
13059 
13060 	if (dp_ipa_ring_resource_setup(soc, pdev))
13061 		goto fail8;
13062 
13063 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
13064 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
13065 			  FL("dp_ipa_uc_attach failed"));
13066 		goto fail8;
13067 	}
13068 
13069 	ret = dp_rx_fst_attach(soc, pdev);
13070 	if ((ret != QDF_STATUS_SUCCESS) &&
13071 	    (ret != QDF_STATUS_E_NOSUPPORT)) {
13072 		QDF_TRACE(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_ERROR,
13073 			  "RX Flow Search Table attach failed: pdev %d err %d",
13074 			  pdev_id, ret);
13075 		goto fail9;
13076 	}
13077 
13078 	/* initialize sw rx descriptors */
13079 	dp_rx_pdev_desc_pool_init(pdev);
13080 	/* initialize sw monitor rx descriptors */
13081 	dp_rx_pdev_mon_desc_pool_init(pdev);
13082 	/* allocate buffers and replenish the RxDMA ring */
13083 	dp_rx_pdev_buffers_alloc(pdev);
13084 	/* allocate buffers and replenish the monitor RxDMA ring */
13085 	dp_rx_pdev_mon_buffers_alloc(pdev);
13086 
13087 	dp_init_tso_stats(pdev);
13088 	dp_tx_ppdu_stats_attach(pdev);
13089 
13090 	dp_info("Mem stats: DMA = %u HEAP = %u SKB = %u",
13091 		qdf_dma_mem_stats_read(),
13092 		qdf_heap_mem_stats_read(),
13093 		qdf_skb_mem_stats_read());
13094 
13095 	return QDF_STATUS_SUCCESS;
13096 fail9:
13097 	dp_ipa_uc_detach(soc, pdev);
13098 fail8:
13099 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
13100 fail7:
13101 	dp_rxdma_ring_cleanup(soc, pdev);
13102 fail6:
13103 	dp_htt_ppdu_stats_detach(pdev);
13104 fail5:
13105 	qdf_nbuf_free(pdev->sojourn_buf);
13106 fail4:
13107 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
13108 	qdf_spinlock_destroy(&pdev->tx_mutex);
13109 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
13110 	qdf_mem_free(pdev->invalid_peer);
13111 fail3:
13112 	dp_pdev_srng_deinit(pdev);
13113 fail2:
13114 	dp_wdi_event_detach(pdev);
13115 fail1:
13116 	dp_mon_filter_dealloc(pdev);
13117 fail0:
13118 	return QDF_STATUS_E_FAILURE;
13119 }
13120 
13121 /*
13122  * dp_pdev_init_wifi3() - Init txrx pdev
13123  * @htc_handle: HTC handle for host-target interface
13124  * @qdf_osdev: QDF OS device
13125  * @force: Force deinit
13126  *
13127  * Return: QDF_STATUS
13128  */
13129 static QDF_STATUS dp_pdev_init_wifi3(struct cdp_soc_t *txrx_soc,
13130 				     HTC_HANDLE htc_handle,
13131 				     qdf_device_t qdf_osdev,
13132 				     uint8_t pdev_id)
13133 {
13134 	return dp_pdev_init(txrx_soc, htc_handle, qdf_osdev, pdev_id);
13135 }
13136 
13137