xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision f28396d060cff5c6519f883cb28ae0116ce479f1)
1 /*
2  * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_hw_headers.h>
25 #include <hal_api.h>
26 #include <hif.h>
27 #include <htt.h>
28 #include <wdi_event.h>
29 #include <queue.h>
30 #include "dp_types.h"
31 #include "dp_internal.h"
32 #include "dp_tx.h"
33 #include "dp_tx_desc.h"
34 #include "dp_rx.h"
35 #include "dp_rx_mon.h"
36 #ifdef DP_RATETABLE_SUPPORT
37 #include "dp_ratetable.h"
38 #endif
39 #include <cdp_txrx_handle.h>
40 #include <wlan_cfg.h>
41 #include "cdp_txrx_cmn_struct.h"
42 #include "cdp_txrx_stats_struct.h"
43 #include "cdp_txrx_cmn_reg.h"
44 #include <qdf_util.h>
45 #include "dp_peer.h"
46 #include "dp_rx_mon.h"
47 #include "htt_stats.h"
48 #include "dp_htt.h"
49 #include "htt_ppdu_stats.h"
50 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
51 #include "cfg_ucfg_api.h"
52 #include "dp_mon_filter.h"
53 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
54 #include "cdp_txrx_flow_ctrl_v2.h"
55 #else
56 static inline void
57 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
58 {
59 	return;
60 }
61 #endif
62 #include "dp_ipa.h"
63 #include "dp_cal_client_api.h"
64 #ifdef FEATURE_WDS
65 #include "dp_txrx_wds.h"
66 #endif
67 #ifdef ATH_SUPPORT_IQUE
68 #include "dp_txrx_me.h"
69 #endif
70 #if defined(DP_CON_MON)
71 #ifndef REMOVE_PKT_LOG
72 #include <pktlog_ac_api.h>
73 #include <pktlog_ac.h>
74 #endif
75 #endif
76 
77 /*
78  * The max size of cdp_peer_stats_param_t is limited to 16 bytes.
79  * If the buffer size is exceeding this size limit,
80  * dp_txrx_get_peer_stats is to be used instead.
81  */
82 QDF_COMPILE_TIME_ASSERT(cdp_peer_stats_param_t_max_size,
83 			(sizeof(cdp_peer_stats_param_t) <= 16));
84 
85 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
86 /*
87  * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS
88  * also should be updated accordingly
89  */
90 QDF_COMPILE_TIME_ASSERT(num_intr_grps,
91 			HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS);
92 
93 /*
94  * HIF_EVENT_HIST_MAX should always be power of 2
95  */
96 QDF_COMPILE_TIME_ASSERT(hif_event_history_size,
97 			(HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0);
98 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
99 
100 /*
101  * If WLAN_CFG_INT_NUM_CONTEXTS is changed,
102  * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated
103  */
104 QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs,
105 			WLAN_CFG_INT_NUM_CONTEXTS_MAX >=
106 			WLAN_CFG_INT_NUM_CONTEXTS);
107 
108 #ifdef WLAN_RX_PKT_CAPTURE_ENH
109 #include "dp_rx_mon_feature.h"
110 #else
111 /*
112  * dp_config_enh_rx_capture()- API to enable/disable enhanced rx capture
113  * @pdev_handle: DP_PDEV handle
114  * @val: user provided value
115  *
116  * Return: QDF_STATUS
117  */
118 static QDF_STATUS
119 dp_config_enh_rx_capture(struct dp_pdev *pdev_handle, uint8_t val)
120 {
121 	return QDF_STATUS_E_INVAL;
122 }
123 #endif /* WLAN_RX_PKT_CAPTURE_ENH */
124 
125 #ifdef WLAN_TX_PKT_CAPTURE_ENH
126 #include "dp_tx_capture.h"
127 #else
128 /*
129  * dp_config_enh_tx_capture()- API to enable/disable enhanced tx capture
130  * @pdev_handle: DP_PDEV handle
131  * @val: user provided value
132  *
133  * Return: QDF_STATUS
134  */
135 static QDF_STATUS
136 dp_config_enh_tx_capture(struct dp_pdev *pdev_handle, uint8_t val)
137 {
138 	return QDF_STATUS_E_INVAL;
139 }
140 #endif
141 
142 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
143 		  struct hif_opaque_softc *hif_handle);
144 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
145 static struct dp_soc *
146 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, HTC_HANDLE htc_handle,
147 	      qdf_device_t qdf_osdev,
148 	      struct ol_if_ops *ol_ops, uint16_t device_id);
149 static void dp_pktlogmod_exit(struct dp_pdev *handle);
150 static inline QDF_STATUS dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl,
151 					      uint8_t vdev_id,
152 					      uint8_t *peer_mac_addr);
153 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
154 				       uint8_t *peer_mac, uint32_t bitmap);
155 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
156 				bool unmap_only);
157 #ifdef ENABLE_VERBOSE_DEBUG
158 bool is_dp_verbose_debug_enabled;
159 #endif
160 
161 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
162 static void dp_cfr_filter(struct cdp_soc_t *soc_hdl,
163 			  uint8_t pdev_id,
164 			  bool enable,
165 			  struct cdp_monitor_filter *filter_val);
166 static bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
167 static void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
168 			   bool enable);
169 #endif
170 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
171 					    enum hal_ring_type ring_type,
172 					    int ring_num);
173 #define DP_INTR_POLL_TIMER_MS	10
174 /* Generic AST entry aging timer value */
175 #define DP_AST_AGING_TIMER_DEFAULT_MS	1000
176 #define DP_MCS_LENGTH (6*MAX_MCS)
177 
178 #define DP_CURR_FW_STATS_AVAIL 19
179 #define DP_HTT_DBG_EXT_STATS_MAX 256
180 #define DP_MAX_SLEEP_TIME 100
181 #ifndef QCA_WIFI_3_0_EMU
182 #define SUSPEND_DRAIN_WAIT 500
183 #else
184 #define SUSPEND_DRAIN_WAIT 3000
185 #endif
186 
187 #ifdef IPA_OFFLOAD
188 /* Exclude IPA rings from the interrupt context */
189 #define TX_RING_MASK_VAL	0xb
190 #define RX_RING_MASK_VAL	0x7
191 #else
192 #define TX_RING_MASK_VAL	0xF
193 #define RX_RING_MASK_VAL	0xF
194 #endif
195 
196 #define STR_MAXLEN	64
197 
198 #define RNG_ERR		"SRNG setup failed for"
199 
200 /* Threshold for peer's cached buf queue beyond which frames are dropped */
201 #define DP_RX_CACHED_BUFQ_THRESH 64
202 
203 /* Budget to reap monitor status ring */
204 #define DP_MON_REAP_BUDGET 1024
205 
206 /**
207  * default_dscp_tid_map - Default DSCP-TID mapping
208  *
209  * DSCP        TID
210  * 000000      0
211  * 001000      1
212  * 010000      2
213  * 011000      3
214  * 100000      4
215  * 101000      5
216  * 110000      6
217  * 111000      7
218  */
219 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
220 	0, 0, 0, 0, 0, 0, 0, 0,
221 	1, 1, 1, 1, 1, 1, 1, 1,
222 	2, 2, 2, 2, 2, 2, 2, 2,
223 	3, 3, 3, 3, 3, 3, 3, 3,
224 	4, 4, 4, 4, 4, 4, 4, 4,
225 	5, 5, 5, 5, 5, 5, 5, 5,
226 	6, 6, 6, 6, 6, 6, 6, 6,
227 	7, 7, 7, 7, 7, 7, 7, 7,
228 };
229 
230 /**
231  * default_pcp_tid_map - Default PCP-TID mapping
232  *
233  * PCP     TID
234  * 000      0
235  * 001      1
236  * 010      2
237  * 011      3
238  * 100      4
239  * 101      5
240  * 110      6
241  * 111      7
242  */
243 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
244 	0, 1, 2, 3, 4, 5, 6, 7,
245 };
246 
247 /**
248  * @brief Cpu to tx ring map
249  */
250 uint8_t
251 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
252 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
253 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
254 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
255 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
256 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
257 #ifdef WLAN_TX_PKT_CAPTURE_ENH
258 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
259 #endif
260 };
261 
262 /**
263  * @brief Select the type of statistics
264  */
265 enum dp_stats_type {
266 	STATS_FW = 0,
267 	STATS_HOST = 1,
268 	STATS_TYPE_MAX = 2,
269 };
270 
271 /**
272  * @brief General Firmware statistics options
273  *
274  */
275 enum dp_fw_stats {
276 	TXRX_FW_STATS_INVALID	= -1,
277 };
278 
279 /**
280  * dp_stats_mapping_table - Firmware and Host statistics
281  * currently supported
282  */
283 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
284 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
285 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
286 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
287 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
288 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
289 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
290 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
291 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
292 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
293 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
294 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
295 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
296 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
297 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
298 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
299 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
300 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
301 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
302 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
303 	/* Last ENUM for HTT FW STATS */
304 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
305 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
306 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
307 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
308 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
309 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
310 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
311 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
312 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
313 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
314 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
315 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
316 	{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
317 };
318 
319 /* MCL specific functions */
320 #if defined(DP_CON_MON)
321 /**
322  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
323  * @soc: pointer to dp_soc handle
324  * @intr_ctx_num: interrupt context number for which mon mask is needed
325  *
326  * For MCL, monitor mode rings are being processed in timer contexts (polled).
327  * This function is returning 0, since in interrupt mode(softirq based RX),
328  * we donot want to process monitor mode rings in a softirq.
329  *
330  * So, in case packet log is enabled for SAP/STA/P2P modes,
331  * regular interrupt processing will not process monitor mode rings. It would be
332  * done in a separate timer context.
333  *
334  * Return: 0
335  */
336 static inline
337 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
338 {
339 	return 0;
340 }
341 
342 /*
343  * dp_service_mon_rings()- service monitor rings
344  * @soc: soc dp handle
345  * @quota: number of ring entry that can be serviced
346  *
347  * Return: None
348  *
349  */
350 static void dp_service_mon_rings(struct  dp_soc *soc, uint32_t quota)
351 {
352 	int ring = 0, work_done;
353 	struct dp_pdev *pdev = NULL;
354 
355 	for (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
356 		pdev = dp_get_pdev_for_lmac_id(soc, ring);
357 		if (!pdev)
358 			continue;
359 		work_done = dp_mon_process(soc, ring, quota);
360 
361 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
362 			  FL("Reaped %d descs from Monitor rings"),
363 			  work_done);
364 	}
365 }
366 
367 /*
368  * dp_mon_reap_timer_handler()- timer to reap monitor rings
369  * reqd as we are not getting ppdu end interrupts
370  * @arg: SoC Handle
371  *
372  * Return:
373  *
374  */
375 static void dp_mon_reap_timer_handler(void *arg)
376 {
377 	struct dp_soc *soc = (struct dp_soc *)arg;
378 
379 	dp_service_mon_rings(soc, QCA_NAPI_BUDGET);
380 
381 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
382 }
383 
384 #ifndef REMOVE_PKT_LOG
385 /**
386  * dp_pkt_log_init() - API to initialize packet log
387  * @soc_hdl: Datapath soc handle
388  * @pdev_id: id of data path pdev handle
389  * @scn: HIF context
390  *
391  * Return: none
392  */
393 void dp_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn)
394 {
395 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
396 	struct dp_pdev *handle =
397 		dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
398 
399 	if (!handle) {
400 		dp_err("pdev handle is NULL");
401 		return;
402 	}
403 
404 	if (handle->pkt_log_init) {
405 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
406 			  "%s: Packet log not initialized", __func__);
407 		return;
408 	}
409 
410 	pktlog_sethandle(&handle->pl_dev, scn);
411 	pktlog_set_pdev_id(handle->pl_dev, pdev_id);
412 	pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
413 
414 	if (pktlogmod_init(scn)) {
415 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
416 			  "%s: pktlogmod_init failed", __func__);
417 		handle->pkt_log_init = false;
418 	} else {
419 		handle->pkt_log_init = true;
420 	}
421 }
422 
423 /**
424  * dp_pkt_log_con_service() - connect packet log service
425  * @soc_hdl: Datapath soc handle
426  * @pdev_id: id of data path pdev handle
427  * @scn: device context
428  *
429  * Return: none
430  */
431 static void dp_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
432 				   uint8_t pdev_id, void *scn)
433 {
434 	dp_pkt_log_init(soc_hdl, pdev_id, scn);
435 	pktlog_htc_attach();
436 }
437 
438 /**
439  * dp_get_num_rx_contexts() - get number of RX contexts
440  * @soc_hdl: cdp opaque soc handle
441  *
442  * Return: number of RX contexts
443  */
444 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
445 {
446 	int i;
447 	int num_rx_contexts = 0;
448 
449 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
450 
451 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
452 		if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i))
453 			num_rx_contexts++;
454 
455 	return num_rx_contexts;
456 }
457 
458 /**
459  * dp_pktlogmod_exit() - API to cleanup pktlog info
460  * @pdev: Pdev handle
461  *
462  * Return: none
463  */
464 static void dp_pktlogmod_exit(struct dp_pdev *pdev)
465 {
466 	struct dp_soc *soc = pdev->soc;
467 	struct hif_opaque_softc *scn = soc->hif_handle;
468 
469 	if (!scn) {
470 		dp_err("Invalid hif(scn) handle");
471 		return;
472 	}
473 
474 	/* stop mon_reap_timer if it has been started */
475 	if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED &&
476 	    soc->reap_timer_init)
477 		qdf_timer_sync_cancel(&soc->mon_reap_timer);
478 
479 	pktlogmod_exit(scn);
480 	pdev->pkt_log_init = false;
481 }
482 #endif
483 #else
484 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
485 
486 /**
487  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
488  * @soc: pointer to dp_soc handle
489  * @intr_ctx_num: interrupt context number for which mon mask is needed
490  *
491  * Return: mon mask value
492  */
493 static inline
494 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
495 {
496 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
497 }
498 
499 /*
500  * dp_service_lmac_rings()- timer to reap lmac rings
501  * @arg: SoC Handle
502  *
503  * Return:
504  *
505  */
506 static void dp_service_lmac_rings(void *arg)
507 {
508 	struct dp_soc *soc = (struct dp_soc *)arg;
509 	int ring = 0, i;
510 	struct dp_pdev *pdev = NULL;
511 	union dp_rx_desc_list_elem_t *desc_list = NULL;
512 	union dp_rx_desc_list_elem_t *tail = NULL;
513 
514 	/* Process LMAC interrupts */
515 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
516 		int mac_for_pdev = ring;
517 		struct dp_srng *rx_refill_buf_ring;
518 
519 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
520 		if (!pdev)
521 			continue;
522 
523 		rx_refill_buf_ring = &soc->rx_refill_buf_ring[mac_for_pdev];
524 
525 		dp_mon_process(soc, mac_for_pdev,
526 			       QCA_NAPI_BUDGET);
527 
528 		for (i = 0;
529 		     i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
530 			dp_rxdma_err_process(&soc->intr_ctx[i], soc,
531 					     mac_for_pdev,
532 					     QCA_NAPI_BUDGET);
533 
534 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF,
535 						  mac_for_pdev))
536 			dp_rx_buffers_replenish(soc, mac_for_pdev,
537 						rx_refill_buf_ring,
538 						&soc->rx_desc_buf[mac_for_pdev],
539 						0, &desc_list, &tail);
540 	}
541 
542 	qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
543 }
544 
545 #endif
546 
547 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
548 				 uint8_t vdev_id,
549 				 uint8_t *peer_mac,
550 				 uint8_t *mac_addr,
551 				 enum cdp_txrx_ast_entry_type type,
552 				 uint32_t flags)
553 {
554 	int ret = -1;
555 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
556 						       peer_mac, 0, vdev_id);
557 
558 	if (!peer || peer->delete_in_progress) {
559 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
560 			  "%s: Peer is NULL!\n", __func__);
561 		goto fail;
562 	}
563 
564 	ret = dp_peer_add_ast((struct dp_soc *)soc_hdl,
565 			      peer,
566 			      mac_addr,
567 			      type,
568 			      flags);
569 fail:
570 	if (peer)
571 		dp_peer_unref_delete(peer);
572 
573 	return ret;
574 }
575 
576 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
577 						uint8_t vdev_id,
578 						uint8_t *peer_mac,
579 						uint8_t *wds_macaddr,
580 						uint32_t flags)
581 {
582 	int status = -1;
583 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
584 	struct dp_ast_entry  *ast_entry = NULL;
585 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc_hdl,
586 						       peer_mac, 0, vdev_id);
587 
588 	if (!peer || peer->delete_in_progress) {
589 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
590 			  "%s: Peer is NULL!\n", __func__);
591 		goto fail;
592 	}
593 
594 	qdf_spin_lock_bh(&soc->ast_lock);
595 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
596 						    peer->vdev->pdev->pdev_id);
597 
598 	if (ast_entry) {
599 		status = dp_peer_update_ast(soc,
600 					    peer,
601 					    ast_entry, flags);
602 	}
603 	qdf_spin_unlock_bh(&soc->ast_lock);
604 
605 fail:
606 	if (peer)
607 		dp_peer_unref_delete(peer);
608 
609 	return status;
610 }
611 
612 /*
613  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
614  * @soc_handle:		Datapath SOC handle
615  * @wds_macaddr:	WDS entry MAC Address
616  * @peer_macaddr:	WDS entry MAC Address
617  * @vdev_id:		id of vdev handle
618  * Return: QDF_STATUS
619  */
620 static QDF_STATUS dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
621 					 uint8_t *wds_macaddr,
622 					 uint8_t *peer_mac_addr,
623 					 uint8_t vdev_id)
624 {
625 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
626 	struct dp_ast_entry *ast_entry = NULL;
627 	struct dp_ast_entry *tmp_ast_entry;
628 	struct dp_peer *peer;
629 	struct dp_pdev *pdev;
630 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
631 
632 	if (!vdev)
633 		return QDF_STATUS_E_FAILURE;
634 
635 	pdev = vdev->pdev;
636 
637 	if (peer_mac_addr) {
638 		peer = dp_peer_find_hash_find(soc, peer_mac_addr,
639 					      0, vdev->vdev_id);
640 		if (!peer) {
641 			return QDF_STATUS_E_FAILURE;
642 		}
643 
644 		if (peer->delete_in_progress) {
645 			dp_peer_unref_delete(peer);
646 			return QDF_STATUS_E_FAILURE;
647 		}
648 
649 		qdf_spin_lock_bh(&soc->ast_lock);
650 		DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
651 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
652 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
653 				dp_peer_del_ast(soc, ast_entry);
654 		}
655 		qdf_spin_unlock_bh(&soc->ast_lock);
656 		dp_peer_unref_delete(peer);
657 
658 		return QDF_STATUS_SUCCESS;
659 	} else if (wds_macaddr) {
660 		qdf_spin_lock_bh(&soc->ast_lock);
661 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
662 							    pdev->pdev_id);
663 
664 		if (ast_entry) {
665 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
666 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
667 				dp_peer_del_ast(soc, ast_entry);
668 		}
669 		qdf_spin_unlock_bh(&soc->ast_lock);
670 	}
671 
672 	return QDF_STATUS_SUCCESS;
673 }
674 
675 /*
676  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
677  * @soc:		Datapath SOC handle
678  *
679  * Return: QDF_STATUS
680  */
681 static QDF_STATUS
682 dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
683 			     uint8_t vdev_id)
684 {
685 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
686 	struct dp_pdev *pdev;
687 	struct dp_vdev *vdev;
688 	struct dp_peer *peer;
689 	struct dp_ast_entry *ase, *temp_ase;
690 	int i;
691 
692 	qdf_spin_lock_bh(&soc->ast_lock);
693 
694 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
695 		pdev = soc->pdev_list[i];
696 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
697 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
698 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
699 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
700 					if ((ase->type ==
701 						CDP_TXRX_AST_TYPE_WDS_HM) ||
702 					    (ase->type ==
703 						CDP_TXRX_AST_TYPE_WDS_HM_SEC))
704 						dp_peer_del_ast(soc, ase);
705 				}
706 			}
707 		}
708 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
709 	}
710 
711 	qdf_spin_unlock_bh(&soc->ast_lock);
712 
713 	return QDF_STATUS_SUCCESS;
714 }
715 
716 /*
717  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
718  * @soc:		Datapath SOC handle
719  *
720  * Return: None
721  */
722 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
723 {
724 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
725 	struct dp_pdev *pdev;
726 	struct dp_vdev *vdev;
727 	struct dp_peer *peer;
728 	struct dp_ast_entry *ase, *temp_ase;
729 	int i;
730 
731 	qdf_spin_lock_bh(&soc->ast_lock);
732 
733 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
734 		pdev = soc->pdev_list[i];
735 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
736 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
737 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
738 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
739 					if ((ase->type ==
740 						CDP_TXRX_AST_TYPE_STATIC) ||
741 						(ase->type ==
742 						 CDP_TXRX_AST_TYPE_SELF) ||
743 						(ase->type ==
744 						 CDP_TXRX_AST_TYPE_STA_BSS))
745 						continue;
746 					dp_peer_del_ast(soc, ase);
747 				}
748 			}
749 		}
750 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
751 	}
752 
753 	qdf_spin_unlock_bh(&soc->ast_lock);
754 }
755 
756 /**
757  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
758  *                                       and return ast entry information
759  *                                       of first ast entry found in the
760  *                                       table with given mac address
761  *
762  * @soc : data path soc handle
763  * @ast_mac_addr : AST entry mac address
764  * @ast_entry_info : ast entry information
765  *
766  * return : true if ast entry found with ast_mac_addr
767  *          false if ast entry not found
768  */
769 static bool dp_peer_get_ast_info_by_soc_wifi3
770 	(struct cdp_soc_t *soc_hdl,
771 	 uint8_t *ast_mac_addr,
772 	 struct cdp_ast_entry_info *ast_entry_info)
773 {
774 	struct dp_ast_entry *ast_entry = NULL;
775 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
776 
777 	qdf_spin_lock_bh(&soc->ast_lock);
778 
779 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
780 	if (!ast_entry || !ast_entry->peer) {
781 		qdf_spin_unlock_bh(&soc->ast_lock);
782 		return false;
783 	}
784 	if (ast_entry->delete_in_progress && !ast_entry->callback) {
785 		qdf_spin_unlock_bh(&soc->ast_lock);
786 		return false;
787 	}
788 	ast_entry_info->type = ast_entry->type;
789 	ast_entry_info->pdev_id = ast_entry->pdev_id;
790 	ast_entry_info->vdev_id = ast_entry->peer->vdev->vdev_id;
791 	ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
792 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
793 		     &ast_entry->peer->mac_addr.raw[0],
794 		     QDF_MAC_ADDR_SIZE);
795 	qdf_spin_unlock_bh(&soc->ast_lock);
796 	return true;
797 }
798 
799 /**
800  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
801  *                                          and return ast entry information
802  *                                          if mac address and pdev_id matches
803  *
804  * @soc : data path soc handle
805  * @ast_mac_addr : AST entry mac address
806  * @pdev_id : pdev_id
807  * @ast_entry_info : ast entry information
808  *
809  * return : true if ast entry found with ast_mac_addr
810  *          false if ast entry not found
811  */
812 static bool dp_peer_get_ast_info_by_pdevid_wifi3
813 		(struct cdp_soc_t *soc_hdl,
814 		 uint8_t *ast_mac_addr,
815 		 uint8_t pdev_id,
816 		 struct cdp_ast_entry_info *ast_entry_info)
817 {
818 	struct dp_ast_entry *ast_entry;
819 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
820 
821 	qdf_spin_lock_bh(&soc->ast_lock);
822 
823 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr, pdev_id);
824 
825 	if (!ast_entry || !ast_entry->peer) {
826 		qdf_spin_unlock_bh(&soc->ast_lock);
827 		return false;
828 	}
829 	if (ast_entry->delete_in_progress && !ast_entry->callback) {
830 		qdf_spin_unlock_bh(&soc->ast_lock);
831 		return false;
832 	}
833 	ast_entry_info->type = ast_entry->type;
834 	ast_entry_info->pdev_id = ast_entry->pdev_id;
835 	ast_entry_info->vdev_id = ast_entry->peer->vdev->vdev_id;
836 	ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
837 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
838 		     &ast_entry->peer->mac_addr.raw[0],
839 		     QDF_MAC_ADDR_SIZE);
840 	qdf_spin_unlock_bh(&soc->ast_lock);
841 	return true;
842 }
843 
844 /**
845  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
846  *                            with given mac address
847  *
848  * @soc : data path soc handle
849  * @ast_mac_addr : AST entry mac address
850  * @callback : callback function to called on ast delete response from FW
851  * @cookie : argument to be passed to callback
852  *
853  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
854  *          is sent
855  *          QDF_STATUS_E_INVAL false if ast entry not found
856  */
857 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
858 					       uint8_t *mac_addr,
859 					       txrx_ast_free_cb callback,
860 					       void *cookie)
861 
862 {
863 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
864 	struct dp_ast_entry *ast_entry = NULL;
865 	txrx_ast_free_cb cb = NULL;
866 	void *arg = NULL;
867 
868 	qdf_spin_lock_bh(&soc->ast_lock);
869 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
870 	if (!ast_entry) {
871 		qdf_spin_unlock_bh(&soc->ast_lock);
872 		return -QDF_STATUS_E_INVAL;
873 	}
874 
875 	if (ast_entry->callback) {
876 		cb = ast_entry->callback;
877 		arg = ast_entry->cookie;
878 	}
879 
880 	ast_entry->callback = callback;
881 	ast_entry->cookie = cookie;
882 
883 	/*
884 	 * if delete_in_progress is set AST delete is sent to target
885 	 * and host is waiting for response should not send delete
886 	 * again
887 	 */
888 	if (!ast_entry->delete_in_progress)
889 		dp_peer_del_ast(soc, ast_entry);
890 
891 	qdf_spin_unlock_bh(&soc->ast_lock);
892 	if (cb) {
893 		cb(soc->ctrl_psoc,
894 		   dp_soc_to_cdp_soc(soc),
895 		   arg,
896 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
897 	}
898 	return QDF_STATUS_SUCCESS;
899 }
900 
901 /**
902  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
903  *                                   table if mac address and pdev_id matches
904  *
905  * @soc : data path soc handle
906  * @ast_mac_addr : AST entry mac address
907  * @pdev_id : pdev id
908  * @callback : callback function to called on ast delete response from FW
909  * @cookie : argument to be passed to callback
910  *
911  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
912  *          is sent
913  *          QDF_STATUS_E_INVAL false if ast entry not found
914  */
915 
916 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
917 						uint8_t *mac_addr,
918 						uint8_t pdev_id,
919 						txrx_ast_free_cb callback,
920 						void *cookie)
921 
922 {
923 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
924 	struct dp_ast_entry *ast_entry;
925 	txrx_ast_free_cb cb = NULL;
926 	void *arg = NULL;
927 
928 	qdf_spin_lock_bh(&soc->ast_lock);
929 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
930 
931 	if (!ast_entry) {
932 		qdf_spin_unlock_bh(&soc->ast_lock);
933 		return -QDF_STATUS_E_INVAL;
934 	}
935 
936 	if (ast_entry->callback) {
937 		cb = ast_entry->callback;
938 		arg = ast_entry->cookie;
939 	}
940 
941 	ast_entry->callback = callback;
942 	ast_entry->cookie = cookie;
943 
944 	/*
945 	 * if delete_in_progress is set AST delete is sent to target
946 	 * and host is waiting for response should not sent delete
947 	 * again
948 	 */
949 	if (!ast_entry->delete_in_progress)
950 		dp_peer_del_ast(soc, ast_entry);
951 
952 	qdf_spin_unlock_bh(&soc->ast_lock);
953 
954 	if (cb) {
955 		cb(soc->ctrl_psoc,
956 		   dp_soc_to_cdp_soc(soc),
957 		   arg,
958 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
959 	}
960 	return QDF_STATUS_SUCCESS;
961 }
962 
963 /**
964  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
965  * @ring_num: ring num of the ring being queried
966  * @grp_mask: the grp_mask array for the ring type in question.
967  *
968  * The grp_mask array is indexed by group number and the bit fields correspond
969  * to ring numbers.  We are finding which interrupt group a ring belongs to.
970  *
971  * Return: the index in the grp_mask array with the ring number.
972  * -QDF_STATUS_E_NOENT if no entry is found
973  */
974 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
975 {
976 	int ext_group_num;
977 	int mask = 1 << ring_num;
978 
979 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
980 	     ext_group_num++) {
981 		if (mask & grp_mask[ext_group_num])
982 			return ext_group_num;
983 	}
984 
985 	return -QDF_STATUS_E_NOENT;
986 }
987 
988 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
989 				       enum hal_ring_type ring_type,
990 				       int ring_num)
991 {
992 	int *grp_mask;
993 
994 	switch (ring_type) {
995 	case WBM2SW_RELEASE:
996 		/* dp_tx_comp_handler - soc->tx_comp_ring */
997 		if (ring_num < 3)
998 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
999 
1000 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
1001 		else if (ring_num == 3) {
1002 			/* sw treats this as a separate ring type */
1003 			grp_mask = &soc->wlan_cfg_ctx->
1004 				int_rx_wbm_rel_ring_mask[0];
1005 			ring_num = 0;
1006 		} else {
1007 			qdf_assert(0);
1008 			return -QDF_STATUS_E_NOENT;
1009 		}
1010 	break;
1011 
1012 	case REO_EXCEPTION:
1013 		/* dp_rx_err_process - &soc->reo_exception_ring */
1014 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
1015 	break;
1016 
1017 	case REO_DST:
1018 		/* dp_rx_process - soc->reo_dest_ring */
1019 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
1020 	break;
1021 
1022 	case REO_STATUS:
1023 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
1024 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
1025 	break;
1026 
1027 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
1028 	case RXDMA_MONITOR_STATUS:
1029 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
1030 	case RXDMA_MONITOR_DST:
1031 		/* dp_mon_process */
1032 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
1033 	break;
1034 	case RXDMA_DST:
1035 		/* dp_rxdma_err_process */
1036 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
1037 	break;
1038 
1039 	case RXDMA_BUF:
1040 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
1041 	break;
1042 
1043 	case RXDMA_MONITOR_BUF:
1044 		/* TODO: support low_thresh interrupt */
1045 		return -QDF_STATUS_E_NOENT;
1046 	break;
1047 
1048 	case TCL_DATA:
1049 	case TCL_CMD:
1050 	case REO_CMD:
1051 	case SW2WBM_RELEASE:
1052 	case WBM_IDLE_LINK:
1053 		/* normally empty SW_TO_HW rings */
1054 		return -QDF_STATUS_E_NOENT;
1055 	break;
1056 
1057 	case TCL_STATUS:
1058 	case REO_REINJECT:
1059 		/* misc unused rings */
1060 		return -QDF_STATUS_E_NOENT;
1061 	break;
1062 
1063 	case CE_SRC:
1064 	case CE_DST:
1065 	case CE_DST_STATUS:
1066 		/* CE_rings - currently handled by hif */
1067 	default:
1068 		return -QDF_STATUS_E_NOENT;
1069 	break;
1070 	}
1071 
1072 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
1073 }
1074 
1075 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
1076 			      *ring_params, int ring_type, int ring_num)
1077 {
1078 	int msi_group_number;
1079 	int msi_data_count;
1080 	int ret;
1081 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
1082 
1083 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1084 					    &msi_data_count, &msi_data_start,
1085 					    &msi_irq_start);
1086 
1087 	if (ret)
1088 		return;
1089 
1090 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
1091 						       ring_num);
1092 	if (msi_group_number < 0) {
1093 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
1094 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
1095 			ring_type, ring_num);
1096 		ring_params->msi_addr = 0;
1097 		ring_params->msi_data = 0;
1098 		return;
1099 	}
1100 
1101 	if (msi_group_number > msi_data_count) {
1102 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
1103 			FL("2 msi_groups will share an msi; msi_group_num %d"),
1104 			msi_group_number);
1105 
1106 		QDF_ASSERT(0);
1107 	}
1108 
1109 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
1110 
1111 	ring_params->msi_addr = addr_low;
1112 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
1113 	ring_params->msi_data = (msi_group_number % msi_data_count)
1114 		+ msi_data_start;
1115 	ring_params->flags |= HAL_SRNG_MSI_INTR;
1116 }
1117 
1118 /**
1119  * dp_print_ast_stats() - Dump AST table contents
1120  * @soc: Datapath soc handle
1121  *
1122  * return void
1123  */
1124 #ifdef FEATURE_AST
1125 void dp_print_ast_stats(struct dp_soc *soc)
1126 {
1127 	uint8_t i;
1128 	uint8_t num_entries = 0;
1129 	struct dp_vdev *vdev;
1130 	struct dp_pdev *pdev;
1131 	struct dp_peer *peer;
1132 	struct dp_ast_entry *ase, *tmp_ase;
1133 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1134 			"NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS",
1135 			"DA", "HMWDS_SEC"};
1136 
1137 	DP_PRINT_STATS("AST Stats:");
1138 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1139 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1140 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1141 	DP_PRINT_STATS("	Entries MAP ERR  = %d", soc->stats.ast.map_err);
1142 
1143 	DP_PRINT_STATS("AST Table:");
1144 
1145 	qdf_spin_lock_bh(&soc->ast_lock);
1146 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1147 		pdev = soc->pdev_list[i];
1148 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
1149 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1150 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1151 				DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1152 				    DP_PRINT_STATS("%6d mac_addr = %pM"
1153 					    " peer_mac_addr = %pM"
1154 					    " peer_id = %u"
1155 					    " type = %s"
1156 					    " next_hop = %d"
1157 					    " is_active = %d"
1158 					    " ast_idx = %d"
1159 					    " ast_hash = %d"
1160 					    " delete_in_progress = %d"
1161 					    " pdev_id = %d"
1162 					    " vdev_id = %d",
1163 					    ++num_entries,
1164 					    ase->mac_addr.raw,
1165 					    ase->peer->mac_addr.raw,
1166 					    ase->peer->peer_ids[0],
1167 					    type[ase->type],
1168 					    ase->next_hop,
1169 					    ase->is_active,
1170 					    ase->ast_idx,
1171 					    ase->ast_hash_value,
1172 					    ase->delete_in_progress,
1173 					    ase->pdev_id,
1174 					    vdev->vdev_id);
1175 				}
1176 			}
1177 		}
1178 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1179 	}
1180 	qdf_spin_unlock_bh(&soc->ast_lock);
1181 }
1182 #else
1183 void dp_print_ast_stats(struct dp_soc *soc)
1184 {
1185 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1186 	return;
1187 }
1188 #endif
1189 
1190 /**
1191  *  dp_print_peer_table() - Dump all Peer stats
1192  * @vdev: Datapath Vdev handle
1193  *
1194  * return void
1195  */
1196 static void dp_print_peer_table(struct dp_vdev *vdev)
1197 {
1198 	struct dp_peer *peer = NULL;
1199 
1200 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1201 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1202 		if (!peer) {
1203 			DP_PRINT_STATS("Invalid Peer");
1204 			return;
1205 		}
1206 		DP_PRINT_STATS("    peer_mac_addr = %pM"
1207 			       " nawds_enabled = %d"
1208 			       " bss_peer = %d"
1209 			       " wds_enabled = %d"
1210 			       " tx_cap_enabled = %d"
1211 			       " rx_cap_enabled = %d"
1212 			       " delete in progress = %d"
1213 			       " peer id = %d",
1214 			       peer->mac_addr.raw,
1215 			       peer->nawds_enabled,
1216 			       peer->bss_peer,
1217 			       peer->wds_enabled,
1218 			       peer->tx_cap_enabled,
1219 			       peer->rx_cap_enabled,
1220 			       peer->delete_in_progress,
1221 			       peer->peer_ids[0]);
1222 	}
1223 }
1224 
1225 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
1226 /**
1227  * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt
1228  * threshold values from the wlan_srng_cfg table for each ring type
1229  * @soc: device handle
1230  * @ring_params: per ring specific parameters
1231  * @ring_type: Ring type
1232  * @ring_num: Ring number for a given ring type
1233  *
1234  * Fill the ring params with the interrupt threshold
1235  * configuration parameters available in the per ring type wlan_srng_cfg
1236  * table.
1237  *
1238  * Return: None
1239  */
1240 static void
1241 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1242 				       struct hal_srng_params *ring_params,
1243 				       int ring_type, int ring_num,
1244 				       int num_entries)
1245 {
1246 	if (ring_type == WBM2SW_RELEASE && (ring_num == 3)) {
1247 		ring_params->intr_timer_thres_us =
1248 				wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1249 		ring_params->intr_batch_cntr_thres_entries =
1250 				wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1251 	} else {
1252 		ring_params->intr_timer_thres_us =
1253 				soc->wlan_srng_cfg[ring_type].timer_threshold;
1254 		ring_params->intr_batch_cntr_thres_entries =
1255 				soc->wlan_srng_cfg[ring_type].batch_count_threshold;
1256 	}
1257 	ring_params->low_threshold =
1258 			soc->wlan_srng_cfg[ring_type].low_threshold;
1259 
1260 	if (ring_params->low_threshold)
1261 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1262 }
1263 #else
1264 static void
1265 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1266 				       struct hal_srng_params *ring_params,
1267 				       int ring_type, int ring_num,
1268 				       int num_entries)
1269 {
1270 	if (ring_type == REO_DST) {
1271 		ring_params->intr_timer_thres_us =
1272 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1273 		ring_params->intr_batch_cntr_thres_entries =
1274 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1275 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
1276 		ring_params->intr_timer_thres_us =
1277 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1278 		ring_params->intr_batch_cntr_thres_entries =
1279 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1280 	} else {
1281 		ring_params->intr_timer_thres_us =
1282 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1283 		ring_params->intr_batch_cntr_thres_entries =
1284 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1285 	}
1286 
1287 	/* Enable low threshold interrupts for rx buffer rings (regular and
1288 	 * monitor buffer rings.
1289 	 * TODO: See if this is required for any other ring
1290 	 */
1291 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1292 	    (ring_type == RXDMA_MONITOR_STATUS)) {
1293 		/* TODO: Setting low threshold to 1/8th of ring size
1294 		 * see if this needs to be configurable
1295 		 */
1296 		ring_params->low_threshold = num_entries >> 3;
1297 		ring_params->intr_timer_thres_us =
1298 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1299 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1300 		ring_params->intr_batch_cntr_thres_entries = 0;
1301 	}
1302 }
1303 #endif
1304 
1305 /**
1306  * dp_srng_setup() - Internal function to setup SRNG rings used by data path
1307  * @soc: datapath soc handle
1308  * @srng: srng handle
1309  * @ring_type: ring that needs to be configured
1310  * @mac_id: mac number
1311  * @num_entries: Total number of entries for a given ring
1312  *
1313  * Return: non-zero - failure/zero - success
1314  */
1315 static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
1316 			 int ring_type, int ring_num, int mac_id,
1317 			 uint32_t num_entries, bool cached)
1318 {
1319 	hal_soc_handle_t hal_soc = soc->hal_soc;
1320 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
1321 	/* TODO: See if we should get align size from hal */
1322 	uint32_t ring_base_align = 8;
1323 	struct hal_srng_params ring_params;
1324 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
1325 
1326 	/* TODO: Currently hal layer takes care of endianness related settings.
1327 	 * See if these settings need to passed from DP layer
1328 	 */
1329 	ring_params.flags = 0;
1330 
1331 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
1332 	srng->hal_srng = NULL;
1333 	srng->alloc_size = num_entries * entry_size;
1334 	srng->num_entries = num_entries;
1335 
1336 	if (!dp_is_soc_reinit(soc)) {
1337 		if (!cached) {
1338 			ring_params.ring_base_vaddr =
1339 			    qdf_aligned_mem_alloc_consistent(
1340 						soc->osdev, &srng->alloc_size,
1341 						&srng->base_vaddr_unaligned,
1342 						&srng->base_paddr_unaligned,
1343 						&ring_params.ring_base_paddr,
1344 						ring_base_align);
1345 		} else {
1346 			ring_params.ring_base_vaddr = qdf_aligned_malloc(
1347 					&srng->alloc_size,
1348 					&srng->base_vaddr_unaligned,
1349 					&srng->base_paddr_unaligned,
1350 					&ring_params.ring_base_paddr,
1351 					ring_base_align);
1352 		}
1353 
1354 		if (!ring_params.ring_base_vaddr) {
1355 			dp_err("alloc failed - ring_type: %d, ring_num %d",
1356 					ring_type, ring_num);
1357 			return QDF_STATUS_E_NOMEM;
1358 		}
1359 	}
1360 
1361 	ring_params.ring_base_paddr = (qdf_dma_addr_t)qdf_align(
1362 			(unsigned long)(srng->base_paddr_unaligned),
1363 			ring_base_align);
1364 
1365 	ring_params.ring_base_vaddr = (void *)(
1366 			(unsigned long)(srng->base_vaddr_unaligned) +
1367 			((unsigned long)(ring_params.ring_base_paddr) -
1368 			 (unsigned long)(srng->base_paddr_unaligned)));
1369 
1370 	qdf_assert_always(ring_params.ring_base_vaddr);
1371 
1372 	ring_params.num_entries = num_entries;
1373 
1374 	dp_verbose_debug("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
1375 			 ring_type, ring_num,
1376 			 (void *)ring_params.ring_base_vaddr,
1377 			 (void *)ring_params.ring_base_paddr,
1378 			 ring_params.num_entries);
1379 
1380 	if (soc->intr_mode == DP_INTR_MSI) {
1381 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
1382 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
1383 				 ring_type, ring_num);
1384 
1385 	} else {
1386 		ring_params.msi_data = 0;
1387 		ring_params.msi_addr = 0;
1388 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
1389 				 ring_type, ring_num);
1390 	}
1391 
1392 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
1393 					       ring_type, ring_num,
1394 					       num_entries);
1395 
1396 	if (cached) {
1397 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
1398 		srng->cached = 1;
1399 	}
1400 
1401 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
1402 		mac_id, &ring_params);
1403 
1404 	if (!srng->hal_srng) {
1405 		if (cached) {
1406 			qdf_mem_free(srng->base_vaddr_unaligned);
1407 		} else {
1408 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1409 						srng->alloc_size,
1410 						srng->base_vaddr_unaligned,
1411 						srng->base_paddr_unaligned, 0);
1412 		}
1413 	}
1414 
1415 	return 0;
1416 }
1417 
1418 /*
1419  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
1420  * @soc: DP SOC handle
1421  * @srng: source ring structure
1422  * @ring_type: type of ring
1423  * @ring_num: ring number
1424  *
1425  * Return: None
1426  */
1427 static void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
1428 			   int ring_type, int ring_num)
1429 {
1430 	if (!srng->hal_srng) {
1431 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1432 			  FL("Ring type: %d, num:%d not setup"),
1433 			  ring_type, ring_num);
1434 		return;
1435 	}
1436 
1437 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1438 	srng->hal_srng = NULL;
1439 }
1440 
1441 /**
1442  * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
1443  * Any buffers allocated and attached to ring entries are expected to be freed
1444  * before calling this function.
1445  */
1446 static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
1447 	int ring_type, int ring_num)
1448 {
1449 	if (!dp_is_soc_reinit(soc)) {
1450 		if (!srng->hal_srng && (srng->alloc_size == 0)) {
1451 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1452 				  FL("Ring type: %d, num:%d not setup"),
1453 				  ring_type, ring_num);
1454 			return;
1455 		}
1456 
1457 		if (srng->hal_srng) {
1458 			hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1459 			srng->hal_srng = NULL;
1460 		}
1461 	}
1462 
1463 	if (srng->alloc_size && srng->base_vaddr_unaligned) {
1464 		if (!srng->cached) {
1465 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1466 						srng->alloc_size,
1467 						srng->base_vaddr_unaligned,
1468 						srng->base_paddr_unaligned, 0);
1469 		} else {
1470 			qdf_mem_free(srng->base_vaddr_unaligned);
1471 		}
1472 		srng->alloc_size = 0;
1473 		srng->base_vaddr_unaligned = NULL;
1474 	}
1475 	srng->hal_srng = NULL;
1476 }
1477 
1478 /* TODO: Need this interface from HIF */
1479 void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle);
1480 
1481 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1482 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
1483 			 hal_ring_handle_t hal_ring_hdl)
1484 {
1485 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1486 	uint32_t hp, tp;
1487 	uint8_t ring_id;
1488 
1489 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
1490 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
1491 
1492 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
1493 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START);
1494 
1495 	return hal_srng_access_start(hal_soc, hal_ring_hdl);
1496 }
1497 
1498 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
1499 			hal_ring_handle_t hal_ring_hdl)
1500 {
1501 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1502 	uint32_t hp, tp;
1503 	uint8_t ring_id;
1504 
1505 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
1506 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
1507 
1508 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
1509 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END);
1510 
1511 	return hal_srng_access_end(hal_soc, hal_ring_hdl);
1512 }
1513 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1514 
1515 /*
1516  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
1517  * @dp_ctx: DP SOC handle
1518  * @budget: Number of frames/descriptors that can be processed in one shot
1519  *
1520  * Return: remaining budget/quota for the soc device
1521  */
1522 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
1523 {
1524 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1525 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
1526 	struct dp_soc *soc = int_ctx->soc;
1527 	int ring = 0;
1528 	uint32_t work_done  = 0;
1529 	int budget = dp_budget;
1530 	uint8_t tx_mask = int_ctx->tx_ring_mask;
1531 	uint8_t rx_mask = int_ctx->rx_ring_mask;
1532 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1533 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
1534 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
1535 	uint32_t remaining_quota = dp_budget;
1536 	struct dp_pdev *pdev = NULL;
1537 
1538 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n",
1539 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
1540 			 reo_status_mask,
1541 			 int_ctx->rx_mon_ring_mask,
1542 			 int_ctx->host2rxdma_ring_mask,
1543 			 int_ctx->rxdma2host_ring_mask);
1544 
1545 	/* Process Tx completion interrupts first to return back buffers */
1546 	while (tx_mask) {
1547 		if (tx_mask & 0x1) {
1548 			work_done = dp_tx_comp_handler(int_ctx,
1549 						       soc,
1550 						       soc->tx_comp_ring[ring].hal_srng,
1551 						       ring, remaining_quota);
1552 
1553 			if (work_done) {
1554 				intr_stats->num_tx_ring_masks[ring]++;
1555 				dp_verbose_debug("tx mask 0x%x ring %d, budget %d, work_done %d",
1556 						 tx_mask, ring, budget,
1557 						 work_done);
1558 			}
1559 
1560 			budget -= work_done;
1561 			if (budget <= 0)
1562 				goto budget_done;
1563 
1564 			remaining_quota = budget;
1565 		}
1566 		tx_mask = tx_mask >> 1;
1567 		ring++;
1568 	}
1569 
1570 	/* Process REO Exception ring interrupt */
1571 	if (rx_err_mask) {
1572 		work_done = dp_rx_err_process(int_ctx, soc,
1573 					      soc->reo_exception_ring.hal_srng,
1574 					      remaining_quota);
1575 
1576 		if (work_done) {
1577 			intr_stats->num_rx_err_ring_masks++;
1578 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
1579 					 work_done, budget);
1580 		}
1581 
1582 		budget -=  work_done;
1583 		if (budget <= 0) {
1584 			goto budget_done;
1585 		}
1586 		remaining_quota = budget;
1587 	}
1588 
1589 	/* Process Rx WBM release ring interrupt */
1590 	if (rx_wbm_rel_mask) {
1591 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
1592 						  soc->rx_rel_ring.hal_srng,
1593 						  remaining_quota);
1594 
1595 		if (work_done) {
1596 			intr_stats->num_rx_wbm_rel_ring_masks++;
1597 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
1598 					 work_done, budget);
1599 		}
1600 
1601 		budget -=  work_done;
1602 		if (budget <= 0) {
1603 			goto budget_done;
1604 		}
1605 		remaining_quota = budget;
1606 	}
1607 
1608 	/* Process Rx interrupts */
1609 	if (rx_mask) {
1610 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1611 			if (!(rx_mask & (1 << ring)))
1612 				continue;
1613 			work_done = dp_rx_process(int_ctx,
1614 						  soc->reo_dest_ring[ring].hal_srng,
1615 						  ring,
1616 						  remaining_quota);
1617 			if (work_done) {
1618 				intr_stats->num_rx_ring_masks[ring]++;
1619 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
1620 						 rx_mask, ring,
1621 						 work_done, budget);
1622 				budget -=  work_done;
1623 				if (budget <= 0)
1624 					goto budget_done;
1625 				remaining_quota = budget;
1626 			}
1627 		}
1628 	}
1629 
1630 	if (reo_status_mask) {
1631 		if (dp_reo_status_ring_handler(int_ctx, soc))
1632 			int_ctx->intr_stats.num_reo_status_ring_masks++;
1633 	}
1634 
1635 	/* Process LMAC interrupts */
1636 	for  (ring = 0 ; ring < MAX_NUM_LMAC_HW; ring++) {
1637 		int mac_for_pdev = ring;
1638 
1639 		pdev = dp_get_pdev_for_lmac_id(soc, mac_for_pdev);
1640 		if (!pdev)
1641 			continue;
1642 		if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1643 			work_done = dp_mon_process(soc, mac_for_pdev,
1644 						   remaining_quota);
1645 			if (work_done)
1646 				intr_stats->num_rx_mon_ring_masks++;
1647 			budget -= work_done;
1648 			if (budget <= 0)
1649 				goto budget_done;
1650 			remaining_quota = budget;
1651 		}
1652 
1653 		if (int_ctx->rxdma2host_ring_mask &
1654 				(1 << mac_for_pdev)) {
1655 			work_done = dp_rxdma_err_process(int_ctx, soc,
1656 							 mac_for_pdev,
1657 							 remaining_quota);
1658 			if (work_done)
1659 				intr_stats->num_rxdma2host_ring_masks++;
1660 			budget -=  work_done;
1661 			if (budget <= 0)
1662 				goto budget_done;
1663 			remaining_quota = budget;
1664 		}
1665 
1666 		if (int_ctx->host2rxdma_ring_mask &
1667 					(1 << mac_for_pdev)) {
1668 			union dp_rx_desc_list_elem_t *desc_list = NULL;
1669 			union dp_rx_desc_list_elem_t *tail = NULL;
1670 			struct dp_srng *rx_refill_buf_ring =
1671 				&soc->rx_refill_buf_ring[mac_for_pdev];
1672 
1673 			intr_stats->num_host2rxdma_ring_masks++;
1674 			DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1675 				     1);
1676 			dp_rx_buffers_replenish(soc, mac_for_pdev,
1677 						rx_refill_buf_ring,
1678 						&soc->rx_desc_buf[mac_for_pdev],
1679 						0, &desc_list, &tail);
1680 
1681 		}
1682 	}
1683 
1684 	qdf_lro_flush(int_ctx->lro_ctx);
1685 	intr_stats->num_masks++;
1686 
1687 budget_done:
1688 	return dp_budget - budget;
1689 }
1690 
1691 /* dp_interrupt_timer()- timer poll for interrupts
1692  *
1693  * @arg: SoC Handle
1694  *
1695  * Return:
1696  *
1697  */
1698 static void dp_interrupt_timer(void *arg)
1699 {
1700 	struct dp_soc *soc = (struct dp_soc *) arg;
1701 	int i;
1702 
1703 	if (qdf_atomic_read(&soc->cmn_init_done)) {
1704 		for (i = 0;
1705 			i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
1706 			dp_service_srngs(&soc->intr_ctx[i], 0xffff);
1707 
1708 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1709 	}
1710 }
1711 
1712 /*
1713  * dp_soc_attach_poll() - Register handlers for DP interrupts
1714  * @txrx_soc: DP SOC handle
1715  *
1716  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1717  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1718  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1719  *
1720  * Return: 0 for success, nonzero for failure.
1721  */
1722 static QDF_STATUS dp_soc_attach_poll(struct cdp_soc_t *txrx_soc)
1723 {
1724 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1725 	int i;
1726 
1727 	soc->intr_mode = DP_INTR_POLL;
1728 
1729 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1730 		soc->intr_ctx[i].dp_intr_id = i;
1731 		soc->intr_ctx[i].tx_ring_mask =
1732 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1733 		soc->intr_ctx[i].rx_ring_mask =
1734 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1735 		soc->intr_ctx[i].rx_mon_ring_mask =
1736 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1737 		soc->intr_ctx[i].rx_err_ring_mask =
1738 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1739 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1740 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1741 		soc->intr_ctx[i].reo_status_ring_mask =
1742 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1743 		soc->intr_ctx[i].rxdma2host_ring_mask =
1744 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1745 		soc->intr_ctx[i].soc = soc;
1746 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1747 	}
1748 
1749 	qdf_timer_init(soc->osdev, &soc->int_timer,
1750 			dp_interrupt_timer, (void *)soc,
1751 			QDF_TIMER_TYPE_WAKE_APPS);
1752 
1753 	return QDF_STATUS_SUCCESS;
1754 }
1755 
1756 /**
1757  * dp_soc_set_interrupt_mode() - Set the interrupt mode in soc
1758  * soc: DP soc handle
1759  *
1760  * Set the appropriate interrupt mode flag in the soc
1761  */
1762 static void dp_soc_set_interrupt_mode(struct dp_soc *soc)
1763 {
1764 	uint32_t msi_base_data, msi_vector_start;
1765 	int msi_vector_count, ret;
1766 
1767 	soc->intr_mode = DP_INTR_INTEGRATED;
1768 
1769 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1770 	    (soc->cdp_soc.ol_ops->get_con_mode &&
1771 	     soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) {
1772 		soc->intr_mode = DP_INTR_POLL;
1773 	} else {
1774 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1775 						  &msi_vector_count,
1776 						  &msi_base_data,
1777 						  &msi_vector_start);
1778 		if (ret)
1779 			return;
1780 
1781 		soc->intr_mode = DP_INTR_MSI;
1782 	}
1783 }
1784 
1785 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc);
1786 #if defined(DP_INTR_POLL_BOTH)
1787 /*
1788  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1789  * @txrx_soc: DP SOC handle
1790  *
1791  * Call the appropriate attach function based on the mode of operation.
1792  * This is a WAR for enabling monitor mode.
1793  *
1794  * Return: 0 for success. nonzero for failure.
1795  */
1796 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
1797 {
1798 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1799 
1800 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1801 	    (soc->cdp_soc.ol_ops->get_con_mode &&
1802 	     soc->cdp_soc.ol_ops->get_con_mode() ==
1803 	     QDF_GLOBAL_MONITOR_MODE)) {
1804 		dp_info("Poll mode");
1805 		return dp_soc_attach_poll(txrx_soc);
1806 	} else {
1807 		dp_info("Interrupt  mode");
1808 		return dp_soc_interrupt_attach(txrx_soc);
1809 	}
1810 }
1811 #else
1812 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
1813 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
1814 {
1815 	return dp_soc_attach_poll(txrx_soc);
1816 }
1817 #else
1818 static QDF_STATUS dp_soc_interrupt_attach_wrapper(struct cdp_soc_t *txrx_soc)
1819 {
1820 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1821 
1822 	if (hif_is_polled_mode_enabled(soc->hif_handle))
1823 		return dp_soc_attach_poll(txrx_soc);
1824 	else
1825 		return dp_soc_interrupt_attach(txrx_soc);
1826 }
1827 #endif
1828 #endif
1829 
1830 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1831 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1832 {
1833 	int j;
1834 	int num_irq = 0;
1835 
1836 	int tx_mask =
1837 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1838 	int rx_mask =
1839 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1840 	int rx_mon_mask =
1841 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1842 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1843 					soc->wlan_cfg_ctx, intr_ctx_num);
1844 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1845 					soc->wlan_cfg_ctx, intr_ctx_num);
1846 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1847 					soc->wlan_cfg_ctx, intr_ctx_num);
1848 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1849 					soc->wlan_cfg_ctx, intr_ctx_num);
1850 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1851 					soc->wlan_cfg_ctx, intr_ctx_num);
1852 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
1853 					soc->wlan_cfg_ctx, intr_ctx_num);
1854 
1855 	soc->intr_mode = DP_INTR_INTEGRATED;
1856 
1857 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1858 
1859 		if (tx_mask & (1 << j)) {
1860 			irq_id_map[num_irq++] =
1861 				(wbm2host_tx_completions_ring1 - j);
1862 		}
1863 
1864 		if (rx_mask & (1 << j)) {
1865 			irq_id_map[num_irq++] =
1866 				(reo2host_destination_ring1 - j);
1867 		}
1868 
1869 		if (rxdma2host_ring_mask & (1 << j)) {
1870 			irq_id_map[num_irq++] =
1871 				rxdma2host_destination_ring_mac1 - j;
1872 		}
1873 
1874 		if (host2rxdma_ring_mask & (1 << j)) {
1875 			irq_id_map[num_irq++] =
1876 				host2rxdma_host_buf_ring_mac1 -	j;
1877 		}
1878 
1879 		if (host2rxdma_mon_ring_mask & (1 << j)) {
1880 			irq_id_map[num_irq++] =
1881 				host2rxdma_monitor_ring1 - j;
1882 		}
1883 
1884 		if (rx_mon_mask & (1 << j)) {
1885 			irq_id_map[num_irq++] =
1886 				ppdu_end_interrupts_mac1 - j;
1887 			irq_id_map[num_irq++] =
1888 				rxdma2host_monitor_status_ring_mac1 - j;
1889 		}
1890 
1891 		if (rx_wbm_rel_ring_mask & (1 << j))
1892 			irq_id_map[num_irq++] = wbm2host_rx_release;
1893 
1894 		if (rx_err_ring_mask & (1 << j))
1895 			irq_id_map[num_irq++] = reo2host_exception;
1896 
1897 		if (reo_status_ring_mask & (1 << j))
1898 			irq_id_map[num_irq++] = reo2host_status;
1899 
1900 	}
1901 	*num_irq_r = num_irq;
1902 }
1903 
1904 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1905 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1906 		int msi_vector_count, int msi_vector_start)
1907 {
1908 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1909 					soc->wlan_cfg_ctx, intr_ctx_num);
1910 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1911 					soc->wlan_cfg_ctx, intr_ctx_num);
1912 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1913 					soc->wlan_cfg_ctx, intr_ctx_num);
1914 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1915 					soc->wlan_cfg_ctx, intr_ctx_num);
1916 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1917 					soc->wlan_cfg_ctx, intr_ctx_num);
1918 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1919 					soc->wlan_cfg_ctx, intr_ctx_num);
1920 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1921 					soc->wlan_cfg_ctx, intr_ctx_num);
1922 
1923 	unsigned int vector =
1924 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
1925 	int num_irq = 0;
1926 
1927 	soc->intr_mode = DP_INTR_MSI;
1928 
1929 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
1930 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
1931 		irq_id_map[num_irq++] =
1932 			pld_get_msi_irq(soc->osdev->dev, vector);
1933 
1934 	*num_irq_r = num_irq;
1935 }
1936 
1937 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1938 				    int *irq_id_map, int *num_irq)
1939 {
1940 	int msi_vector_count, ret;
1941 	uint32_t msi_base_data, msi_vector_start;
1942 
1943 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1944 					    &msi_vector_count,
1945 					    &msi_base_data,
1946 					    &msi_vector_start);
1947 	if (ret)
1948 		return dp_soc_interrupt_map_calculate_integrated(soc,
1949 				intr_ctx_num, irq_id_map, num_irq);
1950 
1951 	else
1952 		dp_soc_interrupt_map_calculate_msi(soc,
1953 				intr_ctx_num, irq_id_map, num_irq,
1954 				msi_vector_count, msi_vector_start);
1955 }
1956 
1957 /*
1958  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1959  * @txrx_soc: DP SOC handle
1960  *
1961  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1962  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1963  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1964  *
1965  * Return: 0 for success. nonzero for failure.
1966  */
1967 static QDF_STATUS dp_soc_interrupt_attach(struct cdp_soc_t *txrx_soc)
1968 {
1969 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1970 
1971 	int i = 0;
1972 	int num_irq = 0;
1973 
1974 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1975 		int ret = 0;
1976 
1977 		/* Map of IRQ ids registered with one interrupt context */
1978 		int irq_id_map[HIF_MAX_GRP_IRQ];
1979 
1980 		int tx_mask =
1981 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1982 		int rx_mask =
1983 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1984 		int rx_mon_mask =
1985 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
1986 		int rx_err_ring_mask =
1987 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1988 		int rx_wbm_rel_ring_mask =
1989 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1990 		int reo_status_ring_mask =
1991 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1992 		int rxdma2host_ring_mask =
1993 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1994 		int host2rxdma_ring_mask =
1995 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1996 		int host2rxdma_mon_ring_mask =
1997 			wlan_cfg_get_host2rxdma_mon_ring_mask(
1998 				soc->wlan_cfg_ctx, i);
1999 
2000 		soc->intr_ctx[i].dp_intr_id = i;
2001 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
2002 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
2003 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
2004 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
2005 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
2006 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
2007 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
2008 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
2009 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
2010 			 host2rxdma_mon_ring_mask;
2011 
2012 		soc->intr_ctx[i].soc = soc;
2013 
2014 		num_irq = 0;
2015 
2016 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
2017 					       &num_irq);
2018 
2019 		ret = hif_register_ext_group(soc->hif_handle,
2020 				num_irq, irq_id_map, dp_service_srngs,
2021 				&soc->intr_ctx[i], "dp_intr",
2022 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
2023 
2024 		if (ret) {
2025 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2026 			FL("failed, ret = %d"), ret);
2027 
2028 			return QDF_STATUS_E_FAILURE;
2029 		}
2030 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
2031 	}
2032 
2033 	hif_configure_ext_group_interrupts(soc->hif_handle);
2034 
2035 	return QDF_STATUS_SUCCESS;
2036 }
2037 
2038 /*
2039  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
2040  * @txrx_soc: DP SOC handle
2041  *
2042  * Return: none
2043  */
2044 static void dp_soc_interrupt_detach(struct cdp_soc_t *txrx_soc)
2045 {
2046 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
2047 	int i;
2048 
2049 	if (soc->intr_mode == DP_INTR_POLL) {
2050 		qdf_timer_stop(&soc->int_timer);
2051 		qdf_timer_free(&soc->int_timer);
2052 	} else {
2053 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
2054 	}
2055 
2056 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
2057 		soc->intr_ctx[i].tx_ring_mask = 0;
2058 		soc->intr_ctx[i].rx_ring_mask = 0;
2059 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
2060 		soc->intr_ctx[i].rx_err_ring_mask = 0;
2061 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
2062 		soc->intr_ctx[i].reo_status_ring_mask = 0;
2063 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
2064 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
2065 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
2066 
2067 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
2068 	}
2069 }
2070 
2071 #define AVG_MAX_MPDUS_PER_TID 128
2072 #define AVG_TIDS_PER_CLIENT 2
2073 #define AVG_FLOWS_PER_TID 2
2074 #define AVG_MSDUS_PER_FLOW 128
2075 #define AVG_MSDUS_PER_MPDU 4
2076 
2077 /*
2078  * Allocate and setup link descriptor pool that will be used by HW for
2079  * various link and queue descriptors and managed by WBM
2080  */
2081 static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
2082 {
2083 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
2084 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
2085 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
2086 	uint32_t num_mpdus_per_link_desc =
2087 		hal_num_mpdus_per_link_desc(soc->hal_soc);
2088 	uint32_t num_msdus_per_link_desc =
2089 		hal_num_msdus_per_link_desc(soc->hal_soc);
2090 	uint32_t num_mpdu_links_per_queue_desc =
2091 		hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
2092 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
2093 	uint32_t total_link_descs, total_mem_size;
2094 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
2095 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
2096 	uint32_t num_link_desc_banks;
2097 	uint32_t last_bank_size = 0;
2098 	uint32_t entry_size, num_entries;
2099 	int i;
2100 	uint32_t desc_id = 0;
2101 	qdf_dma_addr_t *baseaddr = NULL;
2102 
2103 	/* Only Tx queue descriptors are allocated from common link descriptor
2104 	 * pool Rx queue descriptors are not included in this because (REO queue
2105 	 * extension descriptors) they are expected to be allocated contiguously
2106 	 * with REO queue descriptors
2107 	 */
2108 	num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2109 		AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
2110 
2111 	num_mpdu_queue_descs = num_mpdu_link_descs /
2112 		num_mpdu_links_per_queue_desc;
2113 
2114 	num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2115 		AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
2116 		num_msdus_per_link_desc;
2117 
2118 	num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2119 		AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
2120 
2121 	num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
2122 		num_tx_msdu_link_descs + num_rx_msdu_link_descs;
2123 
2124 	/* Round up to power of 2 */
2125 	total_link_descs = 1;
2126 	while (total_link_descs < num_entries)
2127 		total_link_descs <<= 1;
2128 
2129 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2130 		FL("total_link_descs: %u, link_desc_size: %d"),
2131 		total_link_descs, link_desc_size);
2132 	total_mem_size =  total_link_descs * link_desc_size;
2133 
2134 	total_mem_size += link_desc_align;
2135 
2136 	if (total_mem_size <= max_alloc_size) {
2137 		num_link_desc_banks = 0;
2138 		last_bank_size = total_mem_size;
2139 	} else {
2140 		num_link_desc_banks = (total_mem_size) /
2141 			(max_alloc_size - link_desc_align);
2142 		last_bank_size = total_mem_size %
2143 			(max_alloc_size - link_desc_align);
2144 	}
2145 
2146 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2147 		FL("total_mem_size: %d, num_link_desc_banks: %u"),
2148 		total_mem_size, num_link_desc_banks);
2149 
2150 	for (i = 0; i < num_link_desc_banks; i++) {
2151 		if (!dp_is_soc_reinit(soc)) {
2152 			baseaddr = &soc->link_desc_banks[i].
2153 					base_paddr_unaligned;
2154 			soc->link_desc_banks[i].base_vaddr_unaligned =
2155 				qdf_mem_alloc_consistent(soc->osdev,
2156 							 soc->osdev->dev,
2157 							 max_alloc_size,
2158 							 baseaddr);
2159 		}
2160 		soc->link_desc_banks[i].size = max_alloc_size;
2161 
2162 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
2163 			soc->link_desc_banks[i].base_vaddr_unaligned) +
2164 			((unsigned long)(
2165 			soc->link_desc_banks[i].base_vaddr_unaligned) %
2166 			link_desc_align));
2167 
2168 		soc->link_desc_banks[i].base_paddr = (unsigned long)(
2169 			soc->link_desc_banks[i].base_paddr_unaligned) +
2170 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
2171 			(unsigned long)(
2172 			soc->link_desc_banks[i].base_vaddr_unaligned));
2173 
2174 		if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
2175 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2176 				FL("Link descriptor memory alloc failed"));
2177 			goto fail;
2178 		}
2179 		if (!dp_is_soc_reinit(soc)) {
2180 			qdf_minidump_log(soc->link_desc_banks[i].base_vaddr,
2181 					 soc->link_desc_banks[i].size,
2182 					 "link_desc_bank");
2183 		}
2184 		qdf_minidump_log((soc->link_desc_banks[i].base_vaddr),
2185 				 soc->link_desc_banks[i].size,
2186 				 "link_desc_bank");
2187 	}
2188 
2189 	if (last_bank_size) {
2190 		/* Allocate last bank in case total memory required is not exact
2191 		 * multiple of max_alloc_size
2192 		 */
2193 		if (!dp_is_soc_reinit(soc)) {
2194 			baseaddr = &soc->link_desc_banks[i].
2195 					base_paddr_unaligned;
2196 			soc->link_desc_banks[i].base_vaddr_unaligned =
2197 				qdf_mem_alloc_consistent(soc->osdev,
2198 							 soc->osdev->dev,
2199 							 last_bank_size,
2200 							 baseaddr);
2201 		}
2202 		soc->link_desc_banks[i].size = last_bank_size;
2203 
2204 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
2205 			(soc->link_desc_banks[i].base_vaddr_unaligned) +
2206 			((unsigned long)(
2207 			soc->link_desc_banks[i].base_vaddr_unaligned) %
2208 			link_desc_align));
2209 
2210 		soc->link_desc_banks[i].base_paddr =
2211 			(unsigned long)(
2212 			soc->link_desc_banks[i].base_paddr_unaligned) +
2213 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
2214 			(unsigned long)(
2215 			soc->link_desc_banks[i].base_vaddr_unaligned));
2216 
2217 		if (!dp_is_soc_reinit(soc)) {
2218 			qdf_minidump_log(soc->link_desc_banks[i].base_vaddr,
2219 					 soc->link_desc_banks[i].size,
2220 					 "link_desc_bank");
2221 		}
2222 		qdf_minidump_log((soc->link_desc_banks[i].base_vaddr),
2223 				 soc->link_desc_banks[i].size,
2224 				 "link_desc_bank");
2225 	}
2226 
2227 
2228 	/* Allocate and setup link descriptor idle list for HW internal use */
2229 	entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
2230 	total_mem_size = entry_size * total_link_descs;
2231 
2232 	if (total_mem_size <= max_alloc_size) {
2233 		void *desc;
2234 
2235 		if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
2236 				  WBM_IDLE_LINK, 0, 0, total_link_descs, 0)) {
2237 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2238 				FL("Link desc idle ring setup failed"));
2239 			goto fail;
2240 		}
2241 
2242 		qdf_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
2243 				 soc->wbm_idle_link_ring.alloc_size,
2244 				 "wbm_idle_link_ring");
2245 
2246 		hal_srng_access_start_unlocked(soc->hal_soc,
2247 			soc->wbm_idle_link_ring.hal_srng);
2248 
2249 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
2250 			soc->link_desc_banks[i].base_paddr; i++) {
2251 			uint32_t num_entries = (soc->link_desc_banks[i].size -
2252 				((unsigned long)(
2253 				soc->link_desc_banks[i].base_vaddr) -
2254 				(unsigned long)(
2255 				soc->link_desc_banks[i].base_vaddr_unaligned)))
2256 				/ link_desc_size;
2257 			unsigned long paddr = (unsigned long)(
2258 				soc->link_desc_banks[i].base_paddr);
2259 
2260 			while (num_entries && (desc = hal_srng_src_get_next(
2261 				soc->hal_soc,
2262 				soc->wbm_idle_link_ring.hal_srng))) {
2263 				hal_set_link_desc_addr(desc,
2264 					LINK_DESC_COOKIE(desc_id, i), paddr);
2265 				num_entries--;
2266 				desc_id++;
2267 				paddr += link_desc_size;
2268 			}
2269 		}
2270 		hal_srng_access_end_unlocked(soc->hal_soc,
2271 			soc->wbm_idle_link_ring.hal_srng);
2272 	} else {
2273 		uint32_t num_scatter_bufs;
2274 		uint32_t num_entries_per_buf;
2275 		uint32_t rem_entries;
2276 		uint8_t *scatter_buf_ptr;
2277 		uint16_t scatter_buf_num;
2278 		uint32_t buf_size = 0;
2279 
2280 		soc->wbm_idle_scatter_buf_size =
2281 			hal_idle_list_scatter_buf_size(soc->hal_soc);
2282 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
2283 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
2284 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
2285 					soc->hal_soc, total_mem_size,
2286 					soc->wbm_idle_scatter_buf_size);
2287 
2288 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
2289 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2290 					FL("scatter bufs size out of bounds"));
2291 			goto fail;
2292 		}
2293 
2294 		for (i = 0; i < num_scatter_bufs; i++) {
2295 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
2296 			if (!dp_is_soc_reinit(soc)) {
2297 				buf_size = soc->wbm_idle_scatter_buf_size;
2298 				soc->wbm_idle_scatter_buf_base_vaddr[i] =
2299 					qdf_mem_alloc_consistent(soc->osdev,
2300 								 soc->osdev->
2301 								 dev,
2302 								 buf_size,
2303 								 baseaddr);
2304 			}
2305 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2306 				QDF_TRACE(QDF_MODULE_ID_DP,
2307 					  QDF_TRACE_LEVEL_ERROR,
2308 					  FL("Scatter lst memory alloc fail"));
2309 				goto fail;
2310 			}
2311 		}
2312 
2313 		/* Populate idle list scatter buffers with link descriptor
2314 		 * pointers
2315 		 */
2316 		scatter_buf_num = 0;
2317 		scatter_buf_ptr = (uint8_t *)(
2318 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
2319 		rem_entries = num_entries_per_buf;
2320 
2321 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
2322 			soc->link_desc_banks[i].base_paddr; i++) {
2323 			uint32_t num_link_descs =
2324 				(soc->link_desc_banks[i].size -
2325 				((unsigned long)(
2326 				soc->link_desc_banks[i].base_vaddr) -
2327 				(unsigned long)(
2328 				soc->link_desc_banks[i].base_vaddr_unaligned)))
2329 				/ link_desc_size;
2330 			unsigned long paddr = (unsigned long)(
2331 				soc->link_desc_banks[i].base_paddr);
2332 
2333 			while (num_link_descs) {
2334 				hal_set_link_desc_addr((void *)scatter_buf_ptr,
2335 					LINK_DESC_COOKIE(desc_id, i), paddr);
2336 				num_link_descs--;
2337 				desc_id++;
2338 				paddr += link_desc_size;
2339 				rem_entries--;
2340 				if (rem_entries) {
2341 					scatter_buf_ptr += entry_size;
2342 				} else {
2343 					rem_entries = num_entries_per_buf;
2344 					scatter_buf_num++;
2345 
2346 					if (scatter_buf_num >= num_scatter_bufs)
2347 						break;
2348 
2349 					scatter_buf_ptr = (uint8_t *)(
2350 						soc->wbm_idle_scatter_buf_base_vaddr[
2351 						scatter_buf_num]);
2352 				}
2353 			}
2354 		}
2355 		/* Setup link descriptor idle list in HW */
2356 		hal_setup_link_idle_list(soc->hal_soc,
2357 			soc->wbm_idle_scatter_buf_base_paddr,
2358 			soc->wbm_idle_scatter_buf_base_vaddr,
2359 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
2360 			(uint32_t)(scatter_buf_ptr -
2361 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
2362 			scatter_buf_num-1])), total_link_descs);
2363 	}
2364 	return 0;
2365 
2366 fail:
2367 	if (soc->wbm_idle_link_ring.hal_srng) {
2368 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
2369 				WBM_IDLE_LINK, 0);
2370 	}
2371 
2372 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2373 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2374 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2375 				soc->wbm_idle_scatter_buf_size,
2376 				soc->wbm_idle_scatter_buf_base_vaddr[i],
2377 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
2378 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
2379 		}
2380 	}
2381 
2382 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
2383 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
2384 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2385 				soc->link_desc_banks[i].size,
2386 				soc->link_desc_banks[i].base_vaddr_unaligned,
2387 				soc->link_desc_banks[i].base_paddr_unaligned,
2388 				0);
2389 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
2390 		}
2391 	}
2392 	return QDF_STATUS_E_FAILURE;
2393 }
2394 
2395 /*
2396  * Free link descriptor pool that was setup HW
2397  */
2398 static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
2399 {
2400 	int i;
2401 
2402 	if (soc->wbm_idle_link_ring.hal_srng) {
2403 		qdf_minidump_remove(
2404 			soc->wbm_idle_link_ring.base_vaddr_unaligned);
2405 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
2406 			WBM_IDLE_LINK, 0);
2407 	}
2408 
2409 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2410 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2411 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2412 				soc->wbm_idle_scatter_buf_size,
2413 				soc->wbm_idle_scatter_buf_base_vaddr[i],
2414 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
2415 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
2416 		}
2417 	}
2418 
2419 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
2420 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
2421 			qdf_minidump_remove(soc->link_desc_banks[i].base_vaddr);
2422 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2423 				soc->link_desc_banks[i].size,
2424 				soc->link_desc_banks[i].base_vaddr_unaligned,
2425 				soc->link_desc_banks[i].base_paddr_unaligned,
2426 				0);
2427 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
2428 		}
2429 	}
2430 }
2431 
2432 #ifdef IPA_OFFLOAD
2433 #define REO_DST_RING_SIZE_QCA6290 1023
2434 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
2435 #define REO_DST_RING_SIZE_QCA8074 1023
2436 #define REO_DST_RING_SIZE_QCN9000 2048
2437 #else
2438 #define REO_DST_RING_SIZE_QCA8074 8
2439 #define REO_DST_RING_SIZE_QCN9000 8
2440 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
2441 
2442 #else
2443 
2444 #define REO_DST_RING_SIZE_QCA6290 1024
2445 #ifndef CONFIG_WIFI_EMULATION_WIFI_3_0
2446 #define REO_DST_RING_SIZE_QCA8074 2048
2447 #define REO_DST_RING_SIZE_QCN9000 2048
2448 #else
2449 #define REO_DST_RING_SIZE_QCA8074 8
2450 #define REO_DST_RING_SIZE_QCN9000 8
2451 #endif /* CONFIG_WIFI_EMULATION_WIFI_3_0 */
2452 #endif /* IPA_OFFLOAD */
2453 
2454 #ifndef FEATURE_WDS
2455 static void dp_soc_wds_attach(struct dp_soc *soc)
2456 {
2457 }
2458 
2459 static void dp_soc_wds_detach(struct dp_soc *soc)
2460 {
2461 }
2462 #endif
2463 /*
2464  * dp_soc_reset_ring_map() - Reset cpu ring map
2465  * @soc: Datapath soc handler
2466  *
2467  * This api resets the default cpu ring map
2468  */
2469 
2470 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
2471 {
2472 	uint8_t i;
2473 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2474 
2475 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
2476 		switch (nss_config) {
2477 		case dp_nss_cfg_first_radio:
2478 			/*
2479 			 * Setting Tx ring map for one nss offloaded radio
2480 			 */
2481 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
2482 			break;
2483 
2484 		case dp_nss_cfg_second_radio:
2485 			/*
2486 			 * Setting Tx ring for two nss offloaded radios
2487 			 */
2488 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
2489 			break;
2490 
2491 		case dp_nss_cfg_dbdc:
2492 			/*
2493 			 * Setting Tx ring map for 2 nss offloaded radios
2494 			 */
2495 			soc->tx_ring_map[i] =
2496 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
2497 			break;
2498 
2499 		case dp_nss_cfg_dbtc:
2500 			/*
2501 			 * Setting Tx ring map for 3 nss offloaded radios
2502 			 */
2503 			soc->tx_ring_map[i] =
2504 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
2505 			break;
2506 
2507 		default:
2508 			dp_err("tx_ring_map failed due to invalid nss cfg");
2509 			break;
2510 		}
2511 	}
2512 }
2513 
2514 /*
2515  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
2516  * @dp_soc - DP soc handle
2517  * @ring_type - ring type
2518  * @ring_num - ring_num
2519  *
2520  * return 0 or 1
2521  */
2522 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
2523 {
2524 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2525 	uint8_t status = 0;
2526 
2527 	switch (ring_type) {
2528 	case WBM2SW_RELEASE:
2529 	case REO_DST:
2530 	case RXDMA_BUF:
2531 		status = ((nss_config) & (1 << ring_num));
2532 		break;
2533 	default:
2534 		break;
2535 	}
2536 
2537 	return status;
2538 }
2539 
2540 /*
2541  * dp_soc_disable_mac2_intr_mask() - reset interrupt mask for WMAC2 hw rings
2542  * @dp_soc - DP Soc handle
2543  *
2544  * Return: Return void
2545  */
2546 static void dp_soc_disable_mac2_intr_mask(struct dp_soc *soc)
2547 {
2548 	int *grp_mask = NULL;
2549 	int group_number;
2550 
2551 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2552 	group_number = dp_srng_find_ring_in_mask(0x2, grp_mask);
2553 	wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2554 					  group_number, 0x0);
2555 
2556 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
2557 	group_number = dp_srng_find_ring_in_mask(0x2, grp_mask);
2558 	wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
2559 				      group_number, 0x0);
2560 
2561 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
2562 	group_number = dp_srng_find_ring_in_mask(0x2, grp_mask);
2563 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
2564 					  group_number, 0x0);
2565 
2566 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
2567 	group_number = dp_srng_find_ring_in_mask(0x2, grp_mask);
2568 	wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
2569 					      group_number, 0x0);
2570 }
2571 
2572 /*
2573  * dp_soc_reset_intr_mask() - reset interrupt mask
2574  * @dp_soc - DP Soc handle
2575  *
2576  * Return: Return void
2577  */
2578 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
2579 {
2580 	uint8_t j;
2581 	int *grp_mask = NULL;
2582 	int group_number, mask, num_ring;
2583 
2584 	/* number of tx ring */
2585 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
2586 
2587 	/*
2588 	 * group mask for tx completion  ring.
2589 	 */
2590 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
2591 
2592 	/* loop and reset the mask for only offloaded ring */
2593 	for (j = 0; j < num_ring; j++) {
2594 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
2595 			continue;
2596 		}
2597 
2598 		/*
2599 		 * Group number corresponding to tx offloaded ring.
2600 		 */
2601 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2602 		if (group_number < 0) {
2603 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2604 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2605 					WBM2SW_RELEASE, j);
2606 			return;
2607 		}
2608 
2609 		/* reset the tx mask for offloaded ring */
2610 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2611 		mask &= (~(1 << j));
2612 
2613 		/*
2614 		 * reset the interrupt mask for offloaded ring.
2615 		 */
2616 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2617 	}
2618 
2619 	/* number of rx rings */
2620 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2621 
2622 	/*
2623 	 * group mask for reo destination ring.
2624 	 */
2625 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2626 
2627 	/* loop and reset the mask for only offloaded ring */
2628 	for (j = 0; j < num_ring; j++) {
2629 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
2630 			continue;
2631 		}
2632 
2633 		/*
2634 		 * Group number corresponding to rx offloaded ring.
2635 		 */
2636 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2637 		if (group_number < 0) {
2638 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2639 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2640 					REO_DST, j);
2641 			return;
2642 		}
2643 
2644 		/* set the interrupt mask for offloaded ring */
2645 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2646 		mask &= (~(1 << j));
2647 
2648 		/*
2649 		 * set the interrupt mask to zero for rx offloaded radio.
2650 		 */
2651 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2652 	}
2653 
2654 	/*
2655 	 * group mask for Rx buffer refill ring
2656 	 */
2657 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2658 
2659 	/* loop and reset the mask for only offloaded ring */
2660 	for (j = 0; j < MAX_PDEV_CNT; j++) {
2661 		int lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
2662 
2663 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2664 			continue;
2665 		}
2666 
2667 		/*
2668 		 * Group number corresponding to rx offloaded ring.
2669 		 */
2670 		group_number = dp_srng_find_ring_in_mask(lmac_id, grp_mask);
2671 		if (group_number < 0) {
2672 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2673 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2674 					REO_DST, lmac_id);
2675 			return;
2676 		}
2677 
2678 		/* set the interrupt mask for offloaded ring */
2679 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2680 				group_number);
2681 		mask &= (~(1 << lmac_id));
2682 
2683 		/*
2684 		 * set the interrupt mask to zero for rx offloaded radio.
2685 		 */
2686 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2687 			group_number, mask);
2688 	}
2689 }
2690 
2691 #ifdef IPA_OFFLOAD
2692 /**
2693  * dp_reo_remap_config() - configure reo remap register value based
2694  *                         nss configuration.
2695  *		based on offload_radio value below remap configuration
2696  *		get applied.
2697  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
2698  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
2699  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
2700  *		3 - both Radios handled by NSS (remap not required)
2701  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
2702  *
2703  * @remap1: output parameter indicates reo remap 1 register value
2704  * @remap2: output parameter indicates reo remap 2 register value
2705  * Return: bool type, true if remap is configured else false.
2706  */
2707 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap1, uint32_t *remap2)
2708 {
2709 	*remap1 = HAL_REO_REMAP_IX2(REO_REMAP_SW1, 16) |
2710 		  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 17) |
2711 		  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 18) |
2712 		  HAL_REO_REMAP_IX2(REO_REMAP_SW1, 19) |
2713 		  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 20) |
2714 		  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 21) |
2715 		  HAL_REO_REMAP_IX2(REO_REMAP_SW1, 22) |
2716 		  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 23);
2717 
2718 	*remap2 = HAL_REO_REMAP_IX3(REO_REMAP_SW3, 24) |
2719 		  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 25) |
2720 		  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 26) |
2721 		  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 27) |
2722 		  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 28) |
2723 		  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 29) |
2724 		  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 30) |
2725 		  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 31);
2726 
2727 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
2728 
2729 	return true;
2730 }
2731 #else
2732 static bool dp_reo_remap_config(struct dp_soc *soc,
2733 				uint32_t *remap1,
2734 				uint32_t *remap2)
2735 {
2736 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2737 	uint8_t target_type;
2738 
2739 	target_type = hal_get_target_type(soc->hal_soc);
2740 
2741 	switch (offload_radio) {
2742 	case dp_nss_cfg_default:
2743 		*remap1 = HAL_REO_REMAP_IX2(REO_REMAP_SW1, 16) |
2744 			  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 17) |
2745 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 18) |
2746 			  HAL_REO_REMAP_IX2(REO_REMAP_SW4, 19) |
2747 			  HAL_REO_REMAP_IX2(REO_REMAP_SW1, 20) |
2748 			  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 21) |
2749 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 22) |
2750 			  HAL_REO_REMAP_IX2(REO_REMAP_SW4, 23);
2751 
2752 		*remap2 = HAL_REO_REMAP_IX3(REO_REMAP_SW1, 24) |
2753 			  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 25) |
2754 			  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 26) |
2755 			  HAL_REO_REMAP_IX3(REO_REMAP_SW4, 27) |
2756 			  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 28) |
2757 			  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 29) |
2758 			  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 30) |
2759 			  HAL_REO_REMAP_IX3(REO_REMAP_SW4, 31);
2760 		break;
2761 	case dp_nss_cfg_first_radio:
2762 		*remap1 = HAL_REO_REMAP_IX2(REO_REMAP_SW2, 16) |
2763 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 17) |
2764 			  HAL_REO_REMAP_IX2(REO_REMAP_SW4, 18) |
2765 			  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 19) |
2766 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 20) |
2767 			  HAL_REO_REMAP_IX2(REO_REMAP_SW4, 21) |
2768 			  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 22) |
2769 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 23);
2770 
2771 		*remap2 = HAL_REO_REMAP_IX3(REO_REMAP_SW4, 24) |
2772 			  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 25) |
2773 			  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 26) |
2774 			  HAL_REO_REMAP_IX3(REO_REMAP_SW4, 27) |
2775 			  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 28) |
2776 			  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 29) |
2777 			  HAL_REO_REMAP_IX3(REO_REMAP_SW4, 30) |
2778 			  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 31);
2779 		break;
2780 	case dp_nss_cfg_second_radio:
2781 		*remap1 = HAL_REO_REMAP_IX2(REO_REMAP_SW1, 16) |
2782 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 17) |
2783 			  HAL_REO_REMAP_IX2(REO_REMAP_SW4, 18) |
2784 			  HAL_REO_REMAP_IX2(REO_REMAP_SW1, 19) |
2785 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 20) |
2786 			  HAL_REO_REMAP_IX2(REO_REMAP_SW4, 21) |
2787 			  HAL_REO_REMAP_IX2(REO_REMAP_SW1, 22) |
2788 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 23);
2789 
2790 		*remap2 = HAL_REO_REMAP_IX3(REO_REMAP_SW4, 24) |
2791 			  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 25) |
2792 			  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 26) |
2793 			  HAL_REO_REMAP_IX3(REO_REMAP_SW4, 27) |
2794 			  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 28) |
2795 			  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 29) |
2796 			  HAL_REO_REMAP_IX3(REO_REMAP_SW4, 30) |
2797 			  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 31);
2798 
2799 		break;
2800 	case dp_nss_cfg_dbdc:
2801 	case dp_nss_cfg_dbtc:
2802 		/* return false if both or all are offloaded to NSS */
2803 		return false;
2804 	}
2805 
2806 	dp_debug("remap1 %x remap2 %x offload_radio %u",
2807 		 *remap1, *remap2, offload_radio);
2808 	return true;
2809 }
2810 #endif /* IPA_OFFLOAD */
2811 
2812 /*
2813  * dp_reo_frag_dst_set() - configure reo register to set the
2814  *                        fragment destination ring
2815  * @soc : Datapath soc
2816  * @frag_dst_ring : output parameter to set fragment destination ring
2817  *
2818  * Based on offload_radio below fragment destination rings is selected
2819  * 0 - TCL
2820  * 1 - SW1
2821  * 2 - SW2
2822  * 3 - SW3
2823  * 4 - SW4
2824  * 5 - Release
2825  * 6 - FW
2826  * 7 - alternate select
2827  *
2828  * return: void
2829  */
2830 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2831 {
2832 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2833 
2834 	switch (offload_radio) {
2835 	case dp_nss_cfg_default:
2836 		*frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
2837 		break;
2838 	case dp_nss_cfg_first_radio:
2839 		/*
2840 		 * This configuration is valid for single band radio which
2841 		 * is also NSS offload.
2842 		 */
2843 	case dp_nss_cfg_dbdc:
2844 	case dp_nss_cfg_dbtc:
2845 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2846 		break;
2847 	default:
2848 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2849 				FL("dp_reo_frag_dst_set invalid offload radio config"));
2850 		break;
2851 	}
2852 }
2853 
2854 #ifdef ENABLE_VERBOSE_DEBUG
2855 static void dp_enable_verbose_debug(struct dp_soc *soc)
2856 {
2857 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2858 
2859 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2860 
2861 	if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask)
2862 		is_dp_verbose_debug_enabled = true;
2863 
2864 	if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask)
2865 		hal_set_verbose_debug(true);
2866 	else
2867 		hal_set_verbose_debug(false);
2868 }
2869 #else
2870 static void dp_enable_verbose_debug(struct dp_soc *soc)
2871 {
2872 }
2873 #endif
2874 
2875 #ifdef WLAN_FEATURE_STATS_EXT
2876 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
2877 {
2878 	qdf_event_create(&soc->rx_hw_stats_event);
2879 }
2880 #else
2881 static inline void dp_create_ext_stats_event(struct dp_soc *soc)
2882 {
2883 }
2884 #endif
2885 
2886 /*
2887  * dp_soc_cmn_setup() - Common SoC level initializion
2888  * @soc:		Datapath SOC handle
2889  *
2890  * This is an internal function used to setup common SOC data structures,
2891  * to be called from PDEV attach after receiving HW mode capabilities from FW
2892  */
2893 static int dp_soc_cmn_setup(struct dp_soc *soc)
2894 {
2895 	int i, cached;
2896 	struct hal_reo_params reo_params;
2897 	int tx_ring_size;
2898 	int tx_comp_ring_size;
2899 	int reo_dst_ring_size;
2900 	uint32_t entries;
2901 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2902 
2903 	if (qdf_atomic_read(&soc->cmn_init_done))
2904 		return 0;
2905 
2906 	if (dp_hw_link_desc_pool_setup(soc))
2907 		goto fail1;
2908 
2909 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2910 
2911 	dp_enable_verbose_debug(soc);
2912 
2913 	/* Setup SRNG rings */
2914 	/* Common rings */
2915 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
2916 
2917 	if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
2918 			  entries, 0)) {
2919 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2920 			FL("dp_srng_setup failed for wbm_desc_rel_ring"));
2921 		goto fail1;
2922 	}
2923 
2924 	qdf_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
2925 			 soc->wbm_desc_rel_ring.alloc_size,
2926 			 "wbm_desc_rel_ring");
2927 
2928 	soc->num_tcl_data_rings = 0;
2929 	/* Tx data rings */
2930 	if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
2931 		soc->num_tcl_data_rings =
2932 			wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
2933 		tx_comp_ring_size =
2934 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2935 		tx_ring_size =
2936 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
2937 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2938 			if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
2939 					  TCL_DATA, i, 0, tx_ring_size, 0)) {
2940 				QDF_TRACE(QDF_MODULE_ID_DP,
2941 					QDF_TRACE_LEVEL_ERROR,
2942 					FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
2943 				goto fail1;
2944 			}
2945 
2946 			/* Disable cached desc if NSS offload is enabled */
2947 			cached = WLAN_CFG_DST_RING_CACHED_DESC;
2948 			if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
2949 				cached = 0;
2950 			/*
2951 			 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
2952 			 * count
2953 			 */
2954 			if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
2955 					  WBM2SW_RELEASE, i, 0,
2956 					  tx_comp_ring_size,
2957 					  cached)) {
2958 				QDF_TRACE(QDF_MODULE_ID_DP,
2959 					QDF_TRACE_LEVEL_ERROR,
2960 					FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
2961 				goto fail1;
2962 			}
2963 		}
2964 	} else {
2965 		/* This will be incremented during per pdev ring setup */
2966 		soc->num_tcl_data_rings = 0;
2967 	}
2968 
2969 	if (dp_tx_soc_attach(soc)) {
2970 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2971 				FL("dp_tx_soc_attach failed"));
2972 		goto fail1;
2973 	}
2974 
2975 	entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
2976 	/* TCL command and status rings */
2977 	if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
2978 			  entries, 0)) {
2979 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2980 			FL("dp_srng_setup failed for tcl_cmd_ring"));
2981 		goto fail2;
2982 	}
2983 
2984 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
2985 	if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
2986 			  entries, 0)) {
2987 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2988 			FL("dp_srng_setup failed for tcl_status_ring"));
2989 		goto fail2;
2990 	}
2991 
2992 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
2993 
2994 	/* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
2995 	 * descriptors
2996 	 */
2997 
2998 	/* Rx data rings */
2999 	if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
3000 		soc->num_reo_dest_rings =
3001 			wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
3002 		QDF_TRACE(QDF_MODULE_ID_DP,
3003 			QDF_TRACE_LEVEL_INFO,
3004 			FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
3005 
3006 		/* Disable cached desc if NSS offload is enabled */
3007 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
3008 		if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
3009 			cached = 0;
3010 
3011 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
3012 			if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
3013 					  i, 0, reo_dst_ring_size, cached)) {
3014 				QDF_TRACE(QDF_MODULE_ID_DP,
3015 					  QDF_TRACE_LEVEL_ERROR,
3016 					  FL(RNG_ERR "reo_dest_ring [%d]"), i);
3017 				goto fail2;
3018 			}
3019 		}
3020 	} else {
3021 		/* This will be incremented during per pdev ring setup */
3022 		soc->num_reo_dest_rings = 0;
3023 	}
3024 
3025 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
3026 	/* LMAC RxDMA to SW Rings configuration */
3027 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
3028 
3029 		for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
3030 			if (dp_srng_setup(soc, &soc->rxdma_err_dst_ring[i],
3031 					  RXDMA_DST, 0, i, entries, 0)) {
3032 				QDF_TRACE(QDF_MODULE_ID_DP,
3033 					  QDF_TRACE_LEVEL_ERROR,
3034 					  FL(RNG_ERR "rxdma_err_dst_ring"));
3035 				goto fail2;
3036 			}
3037 		}
3038 	}
3039 	/* TBD: call dp_rx_init to setup Rx SW descriptors */
3040 
3041 	/* REO reinjection ring */
3042 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
3043 	if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
3044 			  entries, 0)) {
3045 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3046 			  FL("dp_srng_setup failed for reo_reinject_ring"));
3047 		goto fail2;
3048 	}
3049 
3050 
3051 	/* Rx release ring */
3052 	if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
3053 			  wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx),
3054 			  0)) {
3055 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3056 			  FL("dp_srng_setup failed for rx_rel_ring"));
3057 		goto fail2;
3058 	}
3059 
3060 
3061 	/* Rx exception ring */
3062 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
3063 	if (dp_srng_setup(soc, &soc->reo_exception_ring,
3064 			  REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries, 0)) {
3065 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3066 			  FL("dp_srng_setup failed for reo_exception_ring"));
3067 		goto fail2;
3068 	}
3069 
3070 
3071 	/* REO command and status rings */
3072 	if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
3073 			  wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx),
3074 			  0)) {
3075 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3076 			FL("dp_srng_setup failed for reo_cmd_ring"));
3077 		goto fail2;
3078 	}
3079 
3080 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
3081 	TAILQ_INIT(&soc->rx.reo_cmd_list);
3082 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
3083 
3084 	if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
3085 			  wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx),
3086 			  0)) {
3087 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3088 			FL("dp_srng_setup failed for reo_status_ring"));
3089 		goto fail2;
3090 	}
3091 
3092 	/*
3093 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
3094 	 * WMAC2 is not there in IPQ6018 platform.
3095 	 */
3096 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018) {
3097 		dp_soc_disable_mac2_intr_mask(soc);
3098 	}
3099 
3100 	/* Reset the cpu ring map if radio is NSS offloaded */
3101 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
3102 		dp_soc_reset_cpu_ring_map(soc);
3103 		dp_soc_reset_intr_mask(soc);
3104 	}
3105 
3106 	/* Setup HW REO */
3107 	qdf_mem_zero(&reo_params, sizeof(reo_params));
3108 
3109 	if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
3110 
3111 		/*
3112 		 * Reo ring remap is not required if both radios
3113 		 * are offloaded to NSS
3114 		 */
3115 		if (!dp_reo_remap_config(soc,
3116 					&reo_params.remap1,
3117 					&reo_params.remap2))
3118 			goto out;
3119 
3120 		reo_params.rx_hash_enabled = true;
3121 	}
3122 
3123 	/* setup the global rx defrag waitlist */
3124 	TAILQ_INIT(&soc->rx.defrag.waitlist);
3125 	soc->rx.defrag.timeout_ms =
3126 		wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
3127 	soc->rx.defrag.next_flush_ms = 0;
3128 	soc->rx.flags.defrag_timeout_check =
3129 		wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
3130 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
3131 
3132 	dp_create_ext_stats_event(soc);
3133 out:
3134 	/*
3135 	 * set the fragment destination ring
3136 	 */
3137 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
3138 
3139 	hal_reo_setup(soc->hal_soc, &reo_params);
3140 
3141 	qdf_atomic_set(&soc->cmn_init_done, 1);
3142 
3143 	dp_soc_wds_attach(soc);
3144 
3145 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
3146 	return 0;
3147 fail2:
3148 	dp_tx_soc_detach(soc);
3149 fail1:
3150 	/*
3151 	 * Cleanup will be done as part of soc_detach, which will
3152 	 * be called on pdev attach failure
3153 	 */
3154 	return QDF_STATUS_E_FAILURE;
3155 }
3156 
3157 /*
3158  * dp_soc_cmn_cleanup() - Common SoC level De-initializion
3159  *
3160  * @soc: Datapath SOC handle
3161  *
3162  * This function is responsible for cleaning up DP resource of Soc
3163  * initialled in dp_pdev_attach_wifi3-->dp_soc_cmn_setup, since
3164  * dp_soc_detach_wifi3 could not identify some of them
3165  * whether they have done initialized or not accurately.
3166  *
3167  */
3168 static void dp_soc_cmn_cleanup(struct dp_soc *soc)
3169 {
3170 	if (!dp_is_soc_reinit(soc)) {
3171 		dp_tx_soc_detach(soc);
3172 	}
3173 
3174 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
3175 
3176 	dp_reo_cmdlist_destroy(soc);
3177 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
3178 }
3179 
3180 static QDF_STATUS
3181 dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
3182 		     int force);
3183 
3184 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3185 {
3186 	struct cdp_lro_hash_config lro_hash;
3187 	QDF_STATUS status;
3188 
3189 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
3190 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
3191 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
3192 		dp_err("LRO, GRO and RX hash disabled");
3193 		return QDF_STATUS_E_FAILURE;
3194 	}
3195 
3196 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
3197 
3198 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
3199 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
3200 		lro_hash.lro_enable = 1;
3201 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
3202 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
3203 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
3204 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
3205 	}
3206 
3207 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
3208 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
3209 		 LRO_IPV4_SEED_ARR_SZ));
3210 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
3211 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
3212 		 LRO_IPV6_SEED_ARR_SZ));
3213 
3214 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
3215 
3216 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
3217 		QDF_BUG(0);
3218 		dp_err("lro_hash_config not configured");
3219 		return QDF_STATUS_E_FAILURE;
3220 	}
3221 
3222 	status = soc->cdp_soc.ol_ops->lro_hash_config(soc->ctrl_psoc,
3223 						      pdev->pdev_id,
3224 						      &lro_hash);
3225 	if (!QDF_IS_STATUS_SUCCESS(status)) {
3226 		dp_err("failed to send lro_hash_config to FW %u", status);
3227 		return status;
3228 	}
3229 
3230 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
3231 		lro_hash.lro_enable, lro_hash.tcp_flag,
3232 		lro_hash.tcp_flag_mask);
3233 
3234 	dp_info("toeplitz_hash_ipv4:");
3235 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3236 			   lro_hash.toeplitz_hash_ipv4,
3237 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
3238 			   LRO_IPV4_SEED_ARR_SZ));
3239 
3240 	dp_info("toeplitz_hash_ipv6:");
3241 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3242 			   lro_hash.toeplitz_hash_ipv6,
3243 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
3244 			   LRO_IPV6_SEED_ARR_SZ));
3245 
3246 	return status;
3247 }
3248 
3249 /*
3250 * dp_rxdma_ring_setup() - configure the RX DMA rings
3251 * @soc: data path SoC handle
3252 * @pdev: Physical device handle
3253 *
3254 * Return: 0 - success, > 0 - failure
3255 */
3256 #ifdef QCA_HOST2FW_RXBUF_RING
3257 static int dp_rxdma_ring_setup(struct dp_soc *soc,
3258 	 struct dp_pdev *pdev)
3259 {
3260 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3261 	int max_mac_rings;
3262 	int i;
3263 	int ring_size;
3264 
3265 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3266 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
3267 	ring_size =  wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx);
3268 
3269 	for (i = 0; i < max_mac_rings; i++) {
3270 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
3271 		if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
3272 				  RXDMA_BUF, 1, i, ring_size, 0)) {
3273 			QDF_TRACE(QDF_MODULE_ID_DP,
3274 				 QDF_TRACE_LEVEL_ERROR,
3275 				 FL("failed rx mac ring setup"));
3276 			return QDF_STATUS_E_FAILURE;
3277 		}
3278 	}
3279 	return QDF_STATUS_SUCCESS;
3280 }
3281 #else
3282 static int dp_rxdma_ring_setup(struct dp_soc *soc,
3283 	 struct dp_pdev *pdev)
3284 {
3285 	return QDF_STATUS_SUCCESS;
3286 }
3287 #endif
3288 
3289 /**
3290  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
3291  * @pdev - DP_PDEV handle
3292  *
3293  * Return: void
3294  */
3295 static inline void
3296 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
3297 {
3298 	uint8_t map_id;
3299 	struct dp_soc *soc = pdev->soc;
3300 
3301 	if (!soc)
3302 		return;
3303 
3304 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
3305 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
3306 			     default_dscp_tid_map,
3307 			     sizeof(default_dscp_tid_map));
3308 	}
3309 
3310 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
3311 		hal_tx_set_dscp_tid_map(soc->hal_soc,
3312 					default_dscp_tid_map,
3313 					map_id);
3314 	}
3315 }
3316 
3317 /**
3318  * dp_pcp_tid_map_setup(): Initialize the pcp-tid maps
3319  * @pdev - DP_PDEV handle
3320  *
3321  * Return: void
3322  */
3323 static inline void
3324 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
3325 {
3326 	struct dp_soc *soc = pdev->soc;
3327 
3328 	if (!soc)
3329 		return;
3330 
3331 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
3332 		     sizeof(default_pcp_tid_map));
3333 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
3334 }
3335 
3336 #ifdef IPA_OFFLOAD
3337 /**
3338  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
3339  * @soc: data path instance
3340  * @pdev: core txrx pdev context
3341  *
3342  * Return: QDF_STATUS_SUCCESS: success
3343  *         QDF_STATUS_E_RESOURCES: Error return
3344  */
3345 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3346 					   struct dp_pdev *pdev)
3347 {
3348 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3349 	int entries;
3350 
3351 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3352 	entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
3353 
3354 	/* Setup second Rx refill buffer ring */
3355 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
3356 			  IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id, entries, 0)
3357 	   ) {
3358 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3359 			FL("dp_srng_setup failed second rx refill ring"));
3360 		return QDF_STATUS_E_FAILURE;
3361 	}
3362 	return QDF_STATUS_SUCCESS;
3363 }
3364 
3365 /**
3366  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
3367  * @soc: data path instance
3368  * @pdev: core txrx pdev context
3369  *
3370  * Return: void
3371  */
3372 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3373 					      struct dp_pdev *pdev)
3374 {
3375 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
3376 			IPA_RX_REFILL_BUF_RING_IDX);
3377 }
3378 
3379 #else
3380 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3381 					   struct dp_pdev *pdev)
3382 {
3383 	return QDF_STATUS_SUCCESS;
3384 }
3385 
3386 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3387 					      struct dp_pdev *pdev)
3388 {
3389 }
3390 #endif
3391 
3392 #if !defined(DISABLE_MON_CONFIG)
3393 /**
3394  * dp_mon_rings_setup() - Initialize Monitor rings based on target
3395  * @soc: soc handle
3396  * @pdev: physical device handle
3397  *
3398  * Return: nonzero on failure and zero on success
3399  */
3400 static
3401 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3402 {
3403 	int mac_id = 0;
3404 	int pdev_id = pdev->pdev_id;
3405 	int entries;
3406 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3407 
3408 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3409 
3410 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3411 		int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev_id);
3412 
3413 		if (soc->wlan_cfg_ctx->rxdma1_enable) {
3414 			entries =
3415 			   wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
3416 			if (dp_srng_setup(soc,
3417 					  &soc->rxdma_mon_buf_ring[lmac_id],
3418 					  RXDMA_MONITOR_BUF, 0,	lmac_id,
3419 					  entries, 0)) {
3420 				QDF_TRACE(QDF_MODULE_ID_DP,
3421 					  QDF_TRACE_LEVEL_ERROR,
3422 					  FL(RNG_ERR "rxdma_mon_buf_ring "));
3423 				return QDF_STATUS_E_NOMEM;
3424 			}
3425 
3426 			entries =
3427 			   wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
3428 			if (dp_srng_setup(soc,
3429 					  &soc->rxdma_mon_dst_ring[lmac_id],
3430 					  RXDMA_MONITOR_DST, 0, lmac_id,
3431 					  entries, 0)) {
3432 				QDF_TRACE(QDF_MODULE_ID_DP,
3433 					  QDF_TRACE_LEVEL_ERROR,
3434 					  FL(RNG_ERR "rxdma_mon_dst_ring"));
3435 				return QDF_STATUS_E_NOMEM;
3436 			}
3437 
3438 			entries =
3439 			    wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
3440 			if (dp_srng_setup(soc,
3441 					  &soc->rxdma_mon_status_ring[lmac_id],
3442 					  RXDMA_MONITOR_STATUS, 0, lmac_id,
3443 					  entries, 0)) {
3444 				QDF_TRACE(QDF_MODULE_ID_DP,
3445 					  QDF_TRACE_LEVEL_ERROR,
3446 					  FL(RNG_ERR "rxdma_mon_status_ring"));
3447 				return QDF_STATUS_E_NOMEM;
3448 			}
3449 
3450 			entries =
3451 			   wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
3452 			if (dp_srng_setup(soc,
3453 				  &soc->rxdma_mon_desc_ring[lmac_id],
3454 					  RXDMA_MONITOR_DESC, 0, lmac_id,
3455 					  entries, 0)) {
3456 				QDF_TRACE(QDF_MODULE_ID_DP,
3457 					  QDF_TRACE_LEVEL_ERROR,
3458 					  FL(RNG_ERR "rxdma_mon_desc_ring"));
3459 				return QDF_STATUS_E_NOMEM;
3460 			}
3461 		} else {
3462 			entries =
3463 			   wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
3464 			if (dp_srng_setup(soc,
3465 					  &soc->rxdma_mon_status_ring[lmac_id],
3466 					  RXDMA_MONITOR_STATUS, 0, lmac_id,
3467 					  entries, 0)) {
3468 				QDF_TRACE(QDF_MODULE_ID_DP,
3469 					  QDF_TRACE_LEVEL_ERROR,
3470 					  FL(RNG_ERR "rxdma_mon_status_ring"));
3471 				return QDF_STATUS_E_NOMEM;
3472 			}
3473 		}
3474 	}
3475 
3476 	return QDF_STATUS_SUCCESS;
3477 }
3478 #else
3479 static
3480 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3481 {
3482 	return QDF_STATUS_SUCCESS;
3483 }
3484 #endif
3485 
3486 /*dp_iterate_update_peer_list - update peer stats on cal client timer
3487  * @pdev_hdl: pdev handle
3488  */
3489 #ifdef ATH_SUPPORT_EXT_STAT
3490 void  dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
3491 {
3492 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
3493 	struct dp_soc *soc = pdev->soc;
3494 	struct dp_vdev *vdev = NULL;
3495 	struct dp_peer *peer = NULL;
3496 
3497 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3498 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3499 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
3500 		DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
3501 			dp_cal_client_update_peer_stats(&peer->stats);
3502 		}
3503 	}
3504 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3505 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3506 }
3507 #else
3508 void  dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
3509 {
3510 }
3511 #endif
3512 
3513 /*
3514  * dp_htt_ppdu_stats_attach() - attach resources for HTT PPDU stats processing
3515  * @pdev: Datapath PDEV handle
3516  *
3517  * Return: QDF_STATUS_SUCCESS: Success
3518  *         QDF_STATUS_E_NOMEM: Error
3519  */
3520 static QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev)
3521 {
3522 	pdev->ppdu_tlv_buf = qdf_mem_malloc(HTT_T2H_MAX_MSG_SIZE);
3523 
3524 	if (!pdev->ppdu_tlv_buf) {
3525 		QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "ppdu_tlv_buf alloc fail");
3526 		return QDF_STATUS_E_NOMEM;
3527 	}
3528 
3529 	return QDF_STATUS_SUCCESS;
3530 }
3531 
3532 /*
3533 * dp_pdev_attach_wifi3() - attach txrx pdev
3534 * @txrx_soc: Datapath SOC handle
3535 * @htc_handle: HTC handle for host-target interface
3536 * @qdf_osdev: QDF OS device
3537 * @pdev_id: PDEV ID
3538 *
3539 * Return: QDF_STATUS
3540 */
3541 static inline QDF_STATUS dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
3542 					      HTC_HANDLE htc_handle,
3543 					      qdf_device_t qdf_osdev,
3544 					      uint8_t pdev_id)
3545 {
3546 	int ring_size;
3547 	int entries;
3548 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3549 	int nss_cfg;
3550 	void *sojourn_buf;
3551 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3552 	struct dp_pdev *pdev = NULL;
3553 	QDF_STATUS ret;
3554 
3555 	if (dp_is_soc_reinit(soc)) {
3556 		pdev = soc->pdev_list[pdev_id];
3557 	} else {
3558 		pdev = qdf_mem_malloc(sizeof(*pdev));
3559 		qdf_minidump_log(pdev, sizeof(*pdev), "dp_pdev");
3560 	}
3561 
3562 	if (!pdev) {
3563 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3564 			FL("DP PDEV memory allocation failed"));
3565 		ret = QDF_STATUS_E_NOMEM;
3566 		goto fail0;
3567 	}
3568 
3569 	pdev->filter = dp_mon_filter_alloc(pdev);
3570 	if (!pdev->filter) {
3571 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3572 			  FL("Memory allocation failed for monitor filters"));
3573 		qdf_mem_free(pdev);
3574 		ret = QDF_STATUS_E_NOMEM;
3575 		goto fail0;
3576 	}
3577 
3578 	/*
3579 	 * Variable to prevent double pdev deinitialization during
3580 	 * radio detach execution .i.e. in the absence of any vdev.
3581 	 */
3582 	pdev->pdev_deinit = 0;
3583 	pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer));
3584 
3585 	if (!pdev->invalid_peer) {
3586 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3587 			  FL("Invalid peer memory allocation failed"));
3588 		dp_mon_filter_dealloc(pdev);
3589 		qdf_mem_free(pdev);
3590 		ret = QDF_STATUS_E_NOMEM;
3591 		goto fail0;
3592 	}
3593 
3594 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3595 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
3596 
3597 	if (!pdev->wlan_cfg_ctx) {
3598 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3599 			FL("pdev cfg_attach failed"));
3600 
3601 		qdf_mem_free(pdev->invalid_peer);
3602 		dp_mon_filter_dealloc(pdev);
3603 		qdf_mem_free(pdev);
3604 		ret = QDF_STATUS_E_FAILURE;
3605 		goto fail0;
3606 	}
3607 
3608 	/*
3609 	 * set nss pdev config based on soc config
3610 	 */
3611 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
3612 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
3613 			(nss_cfg & (1 << pdev_id)));
3614 
3615 	pdev->soc = soc;
3616 	pdev->pdev_id = pdev_id;
3617 	soc->pdev_list[pdev_id] = pdev;
3618 
3619 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
3620 	soc->pdev_count++;
3621 
3622 	TAILQ_INIT(&pdev->vdev_list);
3623 	qdf_spinlock_create(&pdev->vdev_list_lock);
3624 	pdev->vdev_count = 0;
3625 
3626 	qdf_spinlock_create(&pdev->tx_mutex);
3627 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
3628 	TAILQ_INIT(&pdev->neighbour_peers_list);
3629 	pdev->neighbour_peers_added = false;
3630 	pdev->monitor_configured = false;
3631 
3632 	if (dp_soc_cmn_setup(soc)) {
3633 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3634 			FL("dp_soc_cmn_setup failed"));
3635 		ret = QDF_STATUS_E_FAILURE;
3636 		goto fail1;
3637 	}
3638 
3639 	/* Setup per PDEV TCL rings if configured */
3640 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3641 		ring_size =
3642 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
3643 
3644 		if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
3645 				  pdev_id, pdev_id, ring_size, 0)) {
3646 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3647 				FL("dp_srng_setup failed for tcl_data_ring"));
3648 			ret = QDF_STATUS_E_FAILURE;
3649 			goto fail1;
3650 		}
3651 
3652 		ring_size =
3653 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
3654 
3655 		if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
3656 				  WBM2SW_RELEASE, pdev_id, pdev_id,
3657 				  ring_size, 0)) {
3658 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3659 				FL("dp_srng_setup failed for tx_comp_ring"));
3660 			ret = QDF_STATUS_E_FAILURE;
3661 			goto fail1;
3662 		}
3663 		soc->num_tcl_data_rings++;
3664 	}
3665 
3666 	/* Tx specific init */
3667 	if (dp_tx_pdev_attach(pdev)) {
3668 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3669 			FL("dp_tx_pdev_attach failed"));
3670 		ret = QDF_STATUS_E_FAILURE;
3671 		goto fail1;
3672 	}
3673 
3674 	ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
3675 	/* Setup per PDEV REO rings if configured */
3676 	if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
3677 		if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
3678 				  pdev_id, pdev_id, ring_size, 0)) {
3679 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3680 				FL("dp_srng_setup failed for reo_dest_ringn"));
3681 			ret = QDF_STATUS_E_FAILURE;
3682 			goto fail1;
3683 		}
3684 		soc->num_reo_dest_rings++;
3685 	}
3686 
3687 	ring_size =
3688 		wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc->wlan_cfg_ctx);
3689 
3690 	if (dp_srng_setup(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
3691 			  RXDMA_BUF, 0, pdev->lmac_id, ring_size, 0)) {
3692 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3693 			 FL("dp_srng_setup failed rx refill ring"));
3694 		ret = QDF_STATUS_E_FAILURE;
3695 		goto fail1;
3696 	}
3697 
3698 	if (dp_rxdma_ring_setup(soc, pdev)) {
3699 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3700 			 FL("RXDMA ring config failed"));
3701 		ret = QDF_STATUS_E_FAILURE;
3702 		goto fail1;
3703 	}
3704 
3705 	if (dp_mon_rings_setup(soc, pdev)) {
3706 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3707 			  FL("MONITOR rings setup failed"));
3708 		ret = QDF_STATUS_E_FAILURE;
3709 		goto fail1;
3710 	}
3711 
3712 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
3713 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
3714 		if (dp_srng_setup(soc,
3715 				  &soc->rxdma_err_dst_ring[pdev->lmac_id],
3716 				  RXDMA_DST,
3717 				  0, pdev->lmac_id, entries, 0)) {
3718 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3719 				  FL(RNG_ERR "rxdma_err_dst_ring"));
3720 			ret = QDF_STATUS_E_FAILURE;
3721 			goto fail1;
3722 		}
3723 	}
3724 
3725 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev)) {
3726 		ret = QDF_STATUS_E_FAILURE;
3727 		goto fail1;
3728 	}
3729 
3730 	if (dp_ipa_ring_resource_setup(soc, pdev)) {
3731 		ret = QDF_STATUS_E_FAILURE;
3732 		goto fail1;
3733 	}
3734 
3735 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
3736 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3737 			FL("dp_ipa_uc_attach failed"));
3738 		ret = QDF_STATUS_E_FAILURE;
3739 		goto fail1;
3740 	}
3741 
3742 	/* Rx specific init */
3743 	if (dp_rx_pdev_attach(pdev)) {
3744 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3745 			  FL("dp_rx_pdev_attach failed"));
3746 		ret = QDF_STATUS_E_FAILURE;
3747 		goto fail2;
3748 	}
3749 
3750 	DP_STATS_INIT(pdev);
3751 
3752 	/* Monitor filter init */
3753 	pdev->mon_filter_mode = MON_FILTER_ALL;
3754 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
3755 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
3756 	pdev->fp_data_filter = FILTER_DATA_ALL;
3757 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
3758 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
3759 	pdev->mo_data_filter = FILTER_DATA_ALL;
3760 
3761 	dp_local_peer_id_pool_init(pdev);
3762 
3763 	dp_dscp_tid_map_setup(pdev);
3764 	dp_pcp_tid_map_setup(pdev);
3765 
3766 	/* Rx monitor mode specific init */
3767 	if (dp_rx_pdev_mon_attach(pdev)) {
3768 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3769 				"dp_rx_pdev_mon_attach failed");
3770 		ret = QDF_STATUS_E_FAILURE;
3771 		goto fail2;
3772 	}
3773 
3774 	if (dp_wdi_event_attach(pdev)) {
3775 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3776 				"dp_wdi_evet_attach failed");
3777 		ret = QDF_STATUS_E_FAILURE;
3778 		goto wdi_attach_fail;
3779 	}
3780 
3781 	/* set the reo destination during initialization */
3782 	pdev->reo_dest = pdev->pdev_id + 1;
3783 
3784 	/*
3785 	 * initialize ppdu tlv list
3786 	 */
3787 	TAILQ_INIT(&pdev->ppdu_info_list);
3788 	pdev->tlv_count = 0;
3789 	pdev->list_depth = 0;
3790 
3791 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
3792 
3793 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
3794 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
3795 			      TRUE);
3796 
3797 	if (pdev->sojourn_buf) {
3798 		sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
3799 		qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
3800 	}
3801 	/* initlialize cal client timer */
3802 	dp_cal_client_attach(&pdev->cal_client_ctx,
3803 			     dp_pdev_to_cdp_pdev(pdev),
3804 			     pdev->soc->osdev,
3805 			     &dp_iterate_update_peer_list);
3806 	qdf_event_create(&pdev->fw_peer_stats_event);
3807 
3808 	pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3809 
3810 	dp_init_tso_stats(pdev);
3811 
3812 	if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS) {
3813 		ret = QDF_STATUS_E_FAILURE;
3814 		goto fail1;
3815 	}
3816 
3817 	dp_tx_ppdu_stats_attach(pdev);
3818 
3819 	return QDF_STATUS_SUCCESS;
3820 
3821 wdi_attach_fail:
3822 	/*
3823 	 * dp_mon_link_desc_pool_cleanup is done in dp_pdev_detach
3824 	 * and hence need not to be done here.
3825 	 */
3826 	dp_rx_pdev_mon_detach(pdev);
3827 
3828 fail2:
3829 	dp_rx_pdev_detach(pdev);
3830 
3831 fail1:
3832 	if (pdev->invalid_peer)
3833 		qdf_mem_free(pdev->invalid_peer);
3834 
3835 	if (pdev->filter)
3836 		dp_mon_filter_dealloc(pdev);
3837 
3838 	dp_pdev_detach((struct cdp_pdev *)pdev, 0);
3839 fail0:
3840 	return ret;
3841 }
3842 
3843 /*
3844 * dp_rxdma_ring_cleanup() - configure the RX DMA rings
3845 * @soc: data path SoC handle
3846 * @pdev: Physical device handle
3847 *
3848 * Return: void
3849 */
3850 #ifdef QCA_HOST2FW_RXBUF_RING
3851 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3852 	 struct dp_pdev *pdev)
3853 {
3854 	int i;
3855 
3856 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
3857 		dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
3858 			 RXDMA_BUF, 1);
3859 
3860 	if (soc->reap_timer_init) {
3861 		qdf_timer_free(&soc->mon_reap_timer);
3862 		soc->reap_timer_init = 0;
3863 	}
3864 }
3865 #else
3866 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3867 	 struct dp_pdev *pdev)
3868 {
3869 	if (soc->lmac_timer_init) {
3870 		qdf_timer_stop(&soc->lmac_reap_timer);
3871 		qdf_timer_free(&soc->lmac_reap_timer);
3872 		soc->lmac_timer_init = 0;
3873 	}
3874 }
3875 #endif
3876 
3877 /*
3878  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
3879  * @pdev: device object
3880  *
3881  * Return: void
3882  */
3883 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
3884 {
3885 	struct dp_neighbour_peer *peer = NULL;
3886 	struct dp_neighbour_peer *temp_peer = NULL;
3887 
3888 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
3889 			neighbour_peer_list_elem, temp_peer) {
3890 		/* delete this peer from the list */
3891 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
3892 				peer, neighbour_peer_list_elem);
3893 		qdf_mem_free(peer);
3894 	}
3895 
3896 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
3897 }
3898 
3899 /**
3900 * dp_htt_ppdu_stats_detach() - detach stats resources
3901 * @pdev: Datapath PDEV handle
3902 *
3903 * Return: void
3904 */
3905 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
3906 {
3907 	struct ppdu_info *ppdu_info, *ppdu_info_next;
3908 
3909 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3910 			ppdu_info_list_elem, ppdu_info_next) {
3911 		if (!ppdu_info)
3912 			break;
3913 		qdf_assert_always(ppdu_info->nbuf);
3914 		qdf_nbuf_free(ppdu_info->nbuf);
3915 		qdf_mem_free(ppdu_info);
3916 	}
3917 
3918 	if (pdev->ppdu_tlv_buf)
3919 		qdf_mem_free(pdev->ppdu_tlv_buf);
3920 
3921 }
3922 
3923 #if !defined(DISABLE_MON_CONFIG)
3924 
3925 static
3926 void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
3927 			 int mac_id)
3928 {
3929 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
3930 		dp_srng_cleanup(soc,
3931 				&soc->rxdma_mon_buf_ring[mac_id],
3932 				RXDMA_MONITOR_BUF, 0);
3933 
3934 		dp_srng_cleanup(soc,
3935 				&soc->rxdma_mon_dst_ring[mac_id],
3936 				RXDMA_MONITOR_DST, 0);
3937 
3938 		dp_srng_cleanup(soc,
3939 				&soc->rxdma_mon_status_ring[mac_id],
3940 				RXDMA_MONITOR_STATUS, 0);
3941 
3942 		dp_srng_cleanup(soc,
3943 				&soc->rxdma_mon_desc_ring[mac_id],
3944 				RXDMA_MONITOR_DESC, 0);
3945 
3946 		dp_srng_cleanup(soc,
3947 				&soc->rxdma_err_dst_ring[mac_id],
3948 				RXDMA_DST, 0);
3949 	} else {
3950 		dp_srng_cleanup(soc,
3951 				&soc->rxdma_mon_status_ring[mac_id],
3952 				RXDMA_MONITOR_STATUS, 0);
3953 
3954 		dp_srng_cleanup(soc,
3955 				&soc->rxdma_err_dst_ring[mac_id],
3956 				RXDMA_DST, 0);
3957 	}
3958 
3959 }
3960 #else
3961 static void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
3962 				int mac_id)
3963 {
3964 }
3965 #endif
3966 
3967 /**
3968  * dp_mon_ring_deinit() - Placeholder to deinitialize Monitor rings
3969  *
3970  * @soc: soc handle
3971  * @pdev: datapath physical dev handle
3972  * @mac_id: mac number
3973  *
3974  * Return: None
3975  */
3976 static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3977 			       int mac_id)
3978 {
3979 }
3980 
3981 /**
3982  * dp_pdev_mem_reset() - Reset txrx pdev memory
3983  * @pdev: dp pdev handle
3984  *
3985  * Return: None
3986  */
3987 static void dp_pdev_mem_reset(struct dp_pdev *pdev)
3988 {
3989 	uint16_t len = 0;
3990 	uint8_t *dp_pdev_offset = (uint8_t *)pdev;
3991 
3992 	len = sizeof(struct dp_pdev) -
3993 		offsetof(struct dp_pdev, pdev_deinit) -
3994 		sizeof(pdev->pdev_deinit);
3995 	dp_pdev_offset = dp_pdev_offset +
3996 			 offsetof(struct dp_pdev, pdev_deinit) +
3997 			 sizeof(pdev->pdev_deinit);
3998 
3999 	qdf_mem_zero(dp_pdev_offset, len);
4000 }
4001 
4002 #ifdef WLAN_DP_PENDING_MEM_FLUSH
4003 /**
4004  * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev
4005  * @pdev: Datapath PDEV handle
4006  *
4007  * This is the last chance to flush all pending dp vdevs/peers,
4008  * some peer/vdev leak case like Non-SSR + peer unmap missing
4009  * will be covered here.
4010  *
4011  * Return: None
4012  */
4013 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
4014 {
4015 	struct dp_vdev *vdev = NULL;
4016 
4017 	while (true) {
4018 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
4019 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4020 			if (vdev->delete.pending)
4021 				break;
4022 		}
4023 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4024 
4025 		/*
4026 		 * vdev will be freed when all peers get cleanup,
4027 		 * dp_delete_pending_vdev will remove vdev from vdev_list
4028 		 * in pdev.
4029 		 */
4030 		if (vdev)
4031 			dp_vdev_flush_peers((struct cdp_vdev *)vdev, 0);
4032 		else
4033 			break;
4034 	}
4035 }
4036 #else
4037 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
4038 {
4039 }
4040 #endif
4041 
4042 /**
4043  * dp_pdev_deinit() - Deinit txrx pdev
4044  * @txrx_pdev: Datapath PDEV handle
4045  * @force: Force deinit
4046  *
4047  * Return: None
4048  */
4049 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
4050 {
4051 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
4052 	struct dp_soc *soc = pdev->soc;
4053 	qdf_nbuf_t curr_nbuf, next_nbuf;
4054 	int mac_id;
4055 
4056 	/*
4057 	 * Prevent double pdev deinitialization during radio detach
4058 	 * execution .i.e. in the absence of any vdev
4059 	 */
4060 	if (pdev->pdev_deinit)
4061 		return;
4062 
4063 	pdev->pdev_deinit = 1;
4064 
4065 	dp_wdi_event_detach(pdev);
4066 
4067 	dp_pdev_flush_pending_vdevs(pdev);
4068 
4069 	dp_tx_pdev_detach(pdev);
4070 
4071 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
4072 		dp_srng_deinit(soc, &soc->tcl_data_ring[pdev->pdev_id],
4073 			       TCL_DATA, pdev->pdev_id);
4074 		dp_srng_deinit(soc, &soc->tx_comp_ring[pdev->pdev_id],
4075 			       WBM2SW_RELEASE, pdev->pdev_id);
4076 	}
4077 
4078 	dp_pktlogmod_exit(pdev);
4079 
4080 	dp_rx_fst_detach(soc, pdev);
4081 	dp_rx_pdev_detach(pdev);
4082 	dp_rx_pdev_mon_detach(pdev);
4083 	dp_neighbour_peers_detach(pdev);
4084 	qdf_spinlock_destroy(&pdev->tx_mutex);
4085 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
4086 
4087 	dp_ipa_uc_detach(soc, pdev);
4088 
4089 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
4090 
4091 	/* Cleanup per PDEV REO rings if configured */
4092 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
4093 		dp_srng_deinit(soc, &soc->reo_dest_ring[pdev->pdev_id],
4094 			       REO_DST, pdev->pdev_id);
4095 	}
4096 
4097 	dp_srng_deinit(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
4098 		       RXDMA_BUF, 0);
4099 
4100 	dp_rxdma_ring_cleanup(soc, pdev);
4101 
4102 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4103 		int lmac_id =
4104 			dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id);
4105 
4106 		dp_mon_ring_deinit(soc, pdev, lmac_id);
4107 
4108 		dp_srng_deinit(soc, &soc->rxdma_err_dst_ring[lmac_id],
4109 			       RXDMA_DST, 0);
4110 	}
4111 
4112 	curr_nbuf = pdev->invalid_peer_head_msdu;
4113 	while (curr_nbuf) {
4114 		next_nbuf = qdf_nbuf_next(curr_nbuf);
4115 		qdf_nbuf_free(curr_nbuf);
4116 		curr_nbuf = next_nbuf;
4117 	}
4118 	pdev->invalid_peer_head_msdu = NULL;
4119 	pdev->invalid_peer_tail_msdu = NULL;
4120 
4121 	dp_htt_ppdu_stats_detach(pdev);
4122 
4123 	dp_tx_ppdu_stats_detach(pdev);
4124 
4125 	qdf_nbuf_free(pdev->sojourn_buf);
4126 	qdf_nbuf_queue_free(&pdev->rx_ppdu_buf_q);
4127 
4128 	dp_cal_client_detach(&pdev->cal_client_ctx);
4129 
4130 	soc->pdev_count--;
4131 
4132 	/* only do soc common cleanup when last pdev do detach */
4133 	if (!(soc->pdev_count))
4134 		dp_soc_cmn_cleanup(soc);
4135 
4136 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
4137 	if (pdev->invalid_peer)
4138 		qdf_mem_free(pdev->invalid_peer);
4139 
4140 	/*
4141 	 * Fee the monitor filter allocated and stored
4142 	 */
4143 	if (pdev->filter)
4144 		dp_mon_filter_dealloc(pdev);
4145 
4146 	qdf_mem_free(pdev->dp_txrx_handle);
4147 	dp_pdev_mem_reset(pdev);
4148 }
4149 
4150 /**
4151  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
4152  * @psoc: Datapath psoc handle
4153  * @pdev_id: Id of datapath PDEV handle
4154  * @force: Force deinit
4155  *
4156  * Return: QDF_STATUS
4157  */
4158 static QDF_STATUS
4159 dp_pdev_deinit_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
4160 		     int force)
4161 {
4162 	struct dp_soc *soc = (struct dp_soc *)psoc;
4163 	struct dp_pdev *txrx_pdev =
4164 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
4165 						   pdev_id);
4166 
4167 	if (!txrx_pdev)
4168 		return QDF_STATUS_E_FAILURE;
4169 
4170 	soc->dp_soc_reinit = TRUE;
4171 
4172 	dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force);
4173 
4174 	return QDF_STATUS_SUCCESS;
4175 }
4176 
4177 /*
4178  * dp_pdev_detach() - Complete rest of pdev detach
4179  * @txrx_pdev: Datapath PDEV handle
4180  * @force: Force deinit
4181  *
4182  * Return: None
4183  */
4184 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
4185 {
4186 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
4187 	struct dp_soc *soc = pdev->soc;
4188 	struct rx_desc_pool *rx_desc_pool;
4189 	int mac_id, mac_for_pdev;
4190 	int lmac_id;
4191 
4192 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
4193 		dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
4194 				TCL_DATA, pdev->pdev_id);
4195 		dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
4196 				WBM2SW_RELEASE, pdev->pdev_id);
4197 	}
4198 
4199 	dp_mon_link_free(pdev);
4200 
4201 	/* Cleanup per PDEV REO rings if configured */
4202 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
4203 		dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
4204 				REO_DST, pdev->pdev_id);
4205 	}
4206 	dp_rxdma_ring_cleanup(soc, pdev);
4207 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
4208 
4209 	dp_srng_cleanup(soc, &soc->rx_refill_buf_ring[pdev->lmac_id],
4210 			RXDMA_BUF, 0);
4211 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
4212 
4213 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4214 		lmac_id =
4215 			dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id);
4216 		dp_mon_ring_cleanup(soc, pdev, lmac_id);
4217 		dp_srng_cleanup(soc, &soc->rxdma_err_dst_ring[lmac_id],
4218 				RXDMA_DST, 0);
4219 
4220 		if (dp_is_soc_reinit(soc)) {
4221 			mac_for_pdev =
4222 				dp_get_lmac_id_for_pdev_id(soc, mac_id,
4223 							   pdev->pdev_id);
4224 			rx_desc_pool = &soc->rx_desc_status[mac_for_pdev];
4225 			dp_rx_desc_pool_free(soc, rx_desc_pool);
4226 			rx_desc_pool = &soc->rx_desc_mon[mac_for_pdev];
4227 			dp_rx_desc_pool_free(soc, rx_desc_pool);
4228 		}
4229 	}
4230 
4231 	if (dp_is_soc_reinit(soc)) {
4232 		rx_desc_pool = &soc->rx_desc_buf[pdev->lmac_id];
4233 		dp_rx_desc_pool_free(soc, rx_desc_pool);
4234 	}
4235 
4236 	soc->pdev_list[pdev->pdev_id] = NULL;
4237 	qdf_minidump_remove(pdev);
4238 	qdf_mem_free(pdev);
4239 }
4240 
4241 /*
4242  * dp_pdev_detach_wifi3() - detach txrx pdev
4243  * @psoc: Datapath soc handle
4244  * @pdev_id: pdev id of pdev
4245  * @force: Force detach
4246  *
4247  * Return: QDF_STATUS
4248  */
4249 static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
4250 				       int force)
4251 {
4252 	struct dp_soc *soc = (struct dp_soc *)psoc;
4253 	struct dp_pdev *txrx_pdev =
4254 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)psoc,
4255 						   pdev_id);
4256 
4257 	if (!txrx_pdev) {
4258 		dp_err("Couldn't find dp pdev");
4259 		return QDF_STATUS_E_FAILURE;
4260 	}
4261 
4262 	if (dp_is_soc_reinit(soc)) {
4263 		dp_pdev_detach((struct cdp_pdev *)txrx_pdev, force);
4264 	} else {
4265 		dp_pdev_deinit((struct cdp_pdev *)txrx_pdev, force);
4266 		dp_pdev_detach((struct cdp_pdev *)txrx_pdev, force);
4267 	}
4268 
4269 	return QDF_STATUS_SUCCESS;
4270 }
4271 
4272 /*
4273  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
4274  * @soc: DP SOC handle
4275  */
4276 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
4277 {
4278 	struct reo_desc_list_node *desc;
4279 	struct dp_rx_tid *rx_tid;
4280 
4281 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
4282 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
4283 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
4284 		rx_tid = &desc->rx_tid;
4285 		qdf_mem_unmap_nbytes_single(soc->osdev,
4286 			rx_tid->hw_qdesc_paddr,
4287 			QDF_DMA_BIDIRECTIONAL,
4288 			rx_tid->hw_qdesc_alloc_size);
4289 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
4290 		qdf_mem_free(desc);
4291 	}
4292 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
4293 	qdf_list_destroy(&soc->reo_desc_freelist);
4294 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
4295 }
4296 
4297 /**
4298  * dp_soc_mem_reset() - Reset Dp Soc memory
4299  * @soc: DP handle
4300  *
4301  * Return: None
4302  */
4303 static void dp_soc_mem_reset(struct dp_soc *soc)
4304 {
4305 	uint16_t len = 0;
4306 	uint8_t *dp_soc_offset = (uint8_t *)soc;
4307 
4308 	len = sizeof(struct dp_soc) -
4309 		offsetof(struct dp_soc, dp_soc_reinit) -
4310 		sizeof(soc->dp_soc_reinit);
4311 	dp_soc_offset = dp_soc_offset +
4312 			offsetof(struct dp_soc, dp_soc_reinit) +
4313 			sizeof(soc->dp_soc_reinit);
4314 
4315 	qdf_mem_zero(dp_soc_offset, len);
4316 }
4317 
4318 /**
4319  * dp_soc_deinit() - Deinitialize txrx SOC
4320  * @txrx_soc: Opaque DP SOC handle
4321  *
4322  * Return: None
4323  */
4324 static void dp_soc_deinit(void *txrx_soc)
4325 {
4326 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4327 	int i;
4328 
4329 	qdf_atomic_set(&soc->cmn_init_done, 0);
4330 
4331 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4332 		if (soc->pdev_list[i])
4333 			dp_pdev_deinit((struct cdp_pdev *)
4334 					soc->pdev_list[i], 1);
4335 	}
4336 
4337 	qdf_flush_work(&soc->htt_stats.work);
4338 	qdf_disable_work(&soc->htt_stats.work);
4339 
4340 	/* Free pending htt stats messages */
4341 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
4342 
4343 	dp_peer_find_detach(soc);
4344 
4345 	/* Free the ring memories */
4346 	/* Common rings */
4347 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
4348 
4349 	/* Tx data rings */
4350 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
4351 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
4352 			dp_srng_deinit(soc, &soc->tcl_data_ring[i],
4353 				       TCL_DATA, i);
4354 			dp_srng_deinit(soc, &soc->tx_comp_ring[i],
4355 				       WBM2SW_RELEASE, i);
4356 		}
4357 	}
4358 
4359 	/* TCL command and status rings */
4360 	dp_srng_deinit(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
4361 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
4362 
4363 	/* Rx data rings */
4364 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
4365 		soc->num_reo_dest_rings =
4366 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
4367 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
4368 			/* TODO: Get number of rings and ring sizes
4369 			 * from wlan_cfg
4370 			 */
4371 			dp_srng_deinit(soc, &soc->reo_dest_ring[i],
4372 				       REO_DST, i);
4373 		}
4374 	}
4375 	/* REO reinjection ring */
4376 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
4377 
4378 	/* Rx release ring */
4379 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
4380 
4381 	/* Rx exception ring */
4382 	/* TODO: Better to store ring_type and ring_num in
4383 	 * dp_srng during setup
4384 	 */
4385 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
4386 
4387 	/* REO command and status rings */
4388 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
4389 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
4390 
4391 	dp_soc_wds_detach(soc);
4392 
4393 	qdf_spinlock_destroy(&soc->peer_ref_mutex);
4394 	qdf_spinlock_destroy(&soc->htt_stats.lock);
4395 
4396 	htt_soc_htc_dealloc(soc->htt_handle);
4397 
4398 	dp_reo_desc_freelist_destroy(soc);
4399 
4400 	qdf_spinlock_destroy(&soc->ast_lock);
4401 
4402 	dp_soc_mem_reset(soc);
4403 }
4404 
4405 /**
4406  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
4407  * @txrx_soc: Opaque DP SOC handle
4408  *
4409  * Return: None
4410  */
4411 static void dp_soc_deinit_wifi3(struct cdp_soc_t *txrx_soc)
4412 {
4413 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4414 
4415 	soc->dp_soc_reinit = 1;
4416 	dp_soc_deinit(txrx_soc);
4417 }
4418 
4419 /*
4420  * dp_soc_detach() - Detach rest of txrx SOC
4421  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
4422  *
4423  * Return: None
4424  */
4425 static void dp_soc_detach(struct cdp_soc_t *txrx_soc)
4426 {
4427 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4428 	int i;
4429 
4430 	qdf_atomic_set(&soc->cmn_init_done, 0);
4431 
4432 	/* TBD: Call Tx and Rx cleanup functions to free buffers and
4433 	 * SW descriptors
4434 	 */
4435 
4436 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4437 		if (soc->pdev_list[i])
4438 			dp_pdev_detach((struct cdp_pdev *)
4439 					     soc->pdev_list[i], 1);
4440 	}
4441 
4442 	/* Free the ring memories */
4443 	/* Common rings */
4444 	qdf_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned);
4445 	dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
4446 
4447 	if (dp_is_soc_reinit(soc)) {
4448 		dp_tx_soc_detach(soc);
4449 	}
4450 
4451 	/* Tx data rings */
4452 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
4453 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
4454 			dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
4455 				TCL_DATA, i);
4456 			dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
4457 				WBM2SW_RELEASE, i);
4458 		}
4459 	}
4460 
4461 	/* TCL command and status rings */
4462 	dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
4463 	dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
4464 
4465 	/* Rx data rings */
4466 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
4467 		soc->num_reo_dest_rings =
4468 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
4469 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
4470 			/* TODO: Get number of rings and ring sizes
4471 			 * from wlan_cfg
4472 			 */
4473 			dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
4474 				REO_DST, i);
4475 		}
4476 	}
4477 	/* REO reinjection ring */
4478 	dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
4479 
4480 	/* Rx release ring */
4481 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
4482 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3);
4483 
4484 	/* Rx exception ring */
4485 	/* TODO: Better to store ring_type and ring_num in
4486 	 * dp_srng during setup
4487 	 */
4488 	dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
4489 
4490 	/* REO command and status rings */
4491 	dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
4492 	dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
4493 	dp_hw_link_desc_pool_cleanup(soc);
4494 
4495 	htt_soc_detach(soc->htt_handle);
4496 	soc->dp_soc_reinit = 0;
4497 
4498 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
4499 
4500 	qdf_minidump_remove(soc);
4501 	qdf_mem_free(soc);
4502 }
4503 
4504 /*
4505  * dp_soc_detach_wifi3() - Detach txrx SOC
4506  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
4507  *
4508  * Return: None
4509  */
4510 static void dp_soc_detach_wifi3(struct cdp_soc_t *txrx_soc)
4511 {
4512 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4513 
4514 	if (dp_is_soc_reinit(soc)) {
4515 		dp_soc_detach(txrx_soc);
4516 	} else {
4517 		dp_soc_deinit(txrx_soc);
4518 		dp_soc_detach(txrx_soc);
4519 	}
4520 }
4521 
4522 #if !defined(DISABLE_MON_CONFIG)
4523 /**
4524  * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings
4525  * @soc: soc handle
4526  * @pdev: physical device handle
4527  * @mac_id: ring number
4528  * @mac_for_pdev: mac_id
4529  *
4530  * Return: non-zero for failure, zero for success
4531  */
4532 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
4533 					struct dp_pdev *pdev,
4534 					int mac_id,
4535 					int mac_for_pdev)
4536 {
4537 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4538 
4539 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
4540 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4541 					soc->rxdma_mon_buf_ring[mac_id]
4542 					.hal_srng,
4543 					RXDMA_MONITOR_BUF);
4544 
4545 		if (status != QDF_STATUS_SUCCESS) {
4546 			dp_err("Failed to send htt srng setup message for Rxdma mon buf ring");
4547 			return status;
4548 		}
4549 
4550 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4551 					soc->rxdma_mon_dst_ring[mac_id]
4552 					.hal_srng,
4553 					RXDMA_MONITOR_DST);
4554 
4555 		if (status != QDF_STATUS_SUCCESS) {
4556 			dp_err("Failed to send htt srng setup message for Rxdma mon dst ring");
4557 			return status;
4558 		}
4559 
4560 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4561 					soc->rxdma_mon_status_ring[mac_id]
4562 					.hal_srng,
4563 					RXDMA_MONITOR_STATUS);
4564 
4565 		if (status != QDF_STATUS_SUCCESS) {
4566 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
4567 			return status;
4568 		}
4569 
4570 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4571 				soc->rxdma_mon_desc_ring[mac_id]
4572 					.hal_srng,
4573 					RXDMA_MONITOR_DESC);
4574 
4575 		if (status != QDF_STATUS_SUCCESS) {
4576 			dp_err("Failed to send htt srng message for Rxdma mon desc ring");
4577 			return status;
4578 		}
4579 	} else {
4580 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4581 					soc->rxdma_mon_status_ring[mac_id]
4582 					.hal_srng,
4583 					RXDMA_MONITOR_STATUS);
4584 
4585 		if (status != QDF_STATUS_SUCCESS) {
4586 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
4587 			return status;
4588 		}
4589 	}
4590 
4591 	return status;
4592 
4593 }
4594 #else
4595 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
4596 					struct dp_pdev *pdev,
4597 					int mac_id,
4598 					int mac_for_pdev)
4599 {
4600 	return QDF_STATUS_SUCCESS;
4601 }
4602 #endif
4603 
4604 /*
4605  * dp_rxdma_ring_config() - configure the RX DMA rings
4606  *
4607  * This function is used to configure the MAC rings.
4608  * On MCL host provides buffers in Host2FW ring
4609  * FW refills (copies) buffers to the ring and updates
4610  * ring_idx in register
4611  *
4612  * @soc: data path SoC handle
4613  *
4614  * Return: zero on success, non-zero on failure
4615  */
4616 #ifdef QCA_HOST2FW_RXBUF_RING
4617 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
4618 {
4619 	int i;
4620 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4621 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4622 		struct dp_pdev *pdev = soc->pdev_list[i];
4623 
4624 		if (pdev) {
4625 			int mac_id;
4626 			bool dbs_enable = 0;
4627 			int max_mac_rings =
4628 				 wlan_cfg_get_num_mac_rings
4629 				(pdev->wlan_cfg_ctx);
4630 			int lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
4631 
4632 			htt_srng_setup(soc->htt_handle, 0,
4633 				       soc->rx_refill_buf_ring[lmac_id]
4634 				       .hal_srng,
4635 				       RXDMA_BUF);
4636 
4637 			if (pdev->rx_refill_buf_ring2.hal_srng)
4638 				htt_srng_setup(soc->htt_handle, 0,
4639 					pdev->rx_refill_buf_ring2.hal_srng,
4640 					RXDMA_BUF);
4641 
4642 			if (soc->cdp_soc.ol_ops->
4643 				is_hw_dbs_2x2_capable) {
4644 				dbs_enable = soc->cdp_soc.ol_ops->
4645 					is_hw_dbs_2x2_capable(
4646 							(void *)soc->ctrl_psoc);
4647 			}
4648 
4649 			if (dbs_enable) {
4650 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4651 				QDF_TRACE_LEVEL_ERROR,
4652 				FL("DBS enabled max_mac_rings %d"),
4653 					 max_mac_rings);
4654 			} else {
4655 				max_mac_rings = 1;
4656 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4657 					 QDF_TRACE_LEVEL_ERROR,
4658 					 FL("DBS disabled, max_mac_rings %d"),
4659 					 max_mac_rings);
4660 			}
4661 
4662 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4663 					 FL("pdev_id %d max_mac_rings %d"),
4664 					 pdev->pdev_id, max_mac_rings);
4665 
4666 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
4667 				int mac_for_pdev =
4668 					dp_get_mac_id_for_pdev(mac_id,
4669 							       pdev->pdev_id);
4670 				/*
4671 				 * Obtain lmac id from pdev to access the LMAC
4672 				 * ring in soc context
4673 				 */
4674 				lmac_id =
4675 				dp_get_lmac_id_for_pdev_id(soc,
4676 							   mac_id,
4677 							   pdev->pdev_id);
4678 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4679 					 QDF_TRACE_LEVEL_ERROR,
4680 					 FL("mac_id %d"), mac_for_pdev);
4681 
4682 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
4683 					 pdev->rx_mac_buf_ring[mac_id]
4684 						.hal_srng,
4685 					 RXDMA_BUF);
4686 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
4687 				soc->rxdma_err_dst_ring[lmac_id]
4688 					.hal_srng,
4689 					RXDMA_DST);
4690 
4691 				/* Configure monitor mode rings */
4692 				status = dp_mon_htt_srng_setup(soc, pdev,
4693 							       lmac_id,
4694 							       mac_for_pdev);
4695 				if (status != QDF_STATUS_SUCCESS) {
4696 					dp_err("Failed to send htt monitor messages to target");
4697 					return status;
4698 				}
4699 
4700 			}
4701 		}
4702 	}
4703 
4704 	/*
4705 	 * Timer to reap rxdma status rings.
4706 	 * Needed until we enable ppdu end interrupts
4707 	 */
4708 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
4709 		       dp_mon_reap_timer_handler, (void *)soc,
4710 		       QDF_TIMER_TYPE_WAKE_APPS);
4711 	soc->reap_timer_init = 1;
4712 	return status;
4713 }
4714 #else
4715 /* This is only for WIN */
4716 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
4717 {
4718 	int i;
4719 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4720 	int mac_for_pdev;
4721 	int lmac_id;
4722 
4723 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4724 		struct dp_pdev *pdev =  soc->pdev_list[i];
4725 
4726 		if (!pdev)
4727 			continue;
4728 
4729 		mac_for_pdev = i;
4730 		lmac_id = dp_get_lmac_id_for_pdev_id(soc, 0, i);
4731 
4732 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4733 			       soc->rx_refill_buf_ring[lmac_id].
4734 			       hal_srng, RXDMA_BUF);
4735 #ifndef DISABLE_MON_CONFIG
4736 
4737 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4738 			       soc->rxdma_mon_buf_ring[lmac_id].hal_srng,
4739 			       RXDMA_MONITOR_BUF);
4740 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4741 			       soc->rxdma_mon_dst_ring[lmac_id].hal_srng,
4742 			       RXDMA_MONITOR_DST);
4743 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4744 			       soc->rxdma_mon_status_ring[lmac_id].hal_srng,
4745 			       RXDMA_MONITOR_STATUS);
4746 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4747 			       soc->rxdma_mon_desc_ring[lmac_id].hal_srng,
4748 			       RXDMA_MONITOR_DESC);
4749 #endif
4750 		htt_srng_setup(soc->htt_handle, mac_for_pdev,
4751 			       soc->rxdma_err_dst_ring[lmac_id].hal_srng,
4752 			       RXDMA_DST);
4753 	}
4754 
4755 	/* Configure LMAC rings in Polled mode */
4756 	if (soc->lmac_polled_mode) {
4757 		/*
4758 		 * Timer to reap lmac rings.
4759 		 */
4760 		qdf_timer_init(soc->osdev, &soc->lmac_reap_timer,
4761 			       dp_service_lmac_rings, (void *)soc,
4762 			       QDF_TIMER_TYPE_WAKE_APPS);
4763 		soc->lmac_timer_init = 1;
4764 		qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
4765 	}
4766 	return status;
4767 }
4768 #endif
4769 
4770 #ifdef NO_RX_PKT_HDR_TLV
4771 static QDF_STATUS
4772 dp_rxdma_ring_sel_cfg(struct dp_soc *soc)
4773 {
4774 	int i;
4775 	int mac_id;
4776 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
4777 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4778 
4779 	htt_tlv_filter.mpdu_start = 1;
4780 	htt_tlv_filter.msdu_start = 1;
4781 	htt_tlv_filter.mpdu_end = 1;
4782 	htt_tlv_filter.msdu_end = 1;
4783 	htt_tlv_filter.attention = 1;
4784 	htt_tlv_filter.packet = 1;
4785 	htt_tlv_filter.packet_header = 0;
4786 
4787 	htt_tlv_filter.ppdu_start = 0;
4788 	htt_tlv_filter.ppdu_end = 0;
4789 	htt_tlv_filter.ppdu_end_user_stats = 0;
4790 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4791 	htt_tlv_filter.ppdu_end_status_done = 0;
4792 	htt_tlv_filter.enable_fp = 1;
4793 	htt_tlv_filter.enable_md = 0;
4794 	htt_tlv_filter.enable_md = 0;
4795 	htt_tlv_filter.enable_mo = 0;
4796 
4797 	htt_tlv_filter.fp_mgmt_filter = 0;
4798 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
4799 	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
4800 					 FILTER_DATA_MCAST |
4801 					 FILTER_DATA_DATA);
4802 	htt_tlv_filter.mo_mgmt_filter = 0;
4803 	htt_tlv_filter.mo_ctrl_filter = 0;
4804 	htt_tlv_filter.mo_data_filter = 0;
4805 	htt_tlv_filter.md_data_filter = 0;
4806 
4807 	htt_tlv_filter.offset_valid = true;
4808 
4809 	htt_tlv_filter.rx_packet_offset = RX_PKT_TLVS_LEN;
4810 	/*Not subscribing rx_pkt_header*/
4811 	htt_tlv_filter.rx_header_offset = 0;
4812 	htt_tlv_filter.rx_mpdu_start_offset =
4813 				HAL_RX_PKT_TLV_MPDU_START_OFFSET(soc->hal_soc);
4814 	htt_tlv_filter.rx_mpdu_end_offset =
4815 				HAL_RX_PKT_TLV_MPDU_END_OFFSET(soc->hal_soc);
4816 	htt_tlv_filter.rx_msdu_start_offset =
4817 				HAL_RX_PKT_TLV_MSDU_START_OFFSET(soc->hal_soc);
4818 	htt_tlv_filter.rx_msdu_end_offset =
4819 				HAL_RX_PKT_TLV_MSDU_END_OFFSET(soc->hal_soc);
4820 	htt_tlv_filter.rx_attn_offset =
4821 				HAL_RX_PKT_TLV_ATTN_OFFSET(soc->hal_soc);
4822 
4823 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4824 		struct dp_pdev *pdev = soc->pdev_list[i];
4825 
4826 		if (!pdev)
4827 			continue;
4828 
4829 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4830 			int mac_for_pdev =
4831 				dp_get_mac_id_for_pdev(mac_id, pdev->pdev_id);
4832 			/*
4833 			 * Obtain lmac id from pdev to access the LMAC ring
4834 			 * in soc context
4835 			 */
4836 			int lmac_id =
4837 				dp_get_lmac_id_for_pdev_id(soc, mac_id,
4838 							   pdev->pdev_id);
4839 
4840 			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4841 					    soc->rx_refill_buf_ring[lmac_id].
4842 					    hal_srng,
4843 					    RXDMA_BUF, RX_DATA_BUFFER_SIZE,
4844 					    &htt_tlv_filter);
4845 		}
4846 	}
4847 	return status;
4848 }
4849 #else
4850 static QDF_STATUS
4851 dp_rxdma_ring_sel_cfg(struct dp_soc *soc)
4852 {
4853 	return QDF_STATUS_SUCCESS;
4854 }
4855 #endif
4856 
4857 /*
4858  * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine
4859  *
4860  * This function is used to configure the FSE HW block in RX OLE on a
4861  * per pdev basis. Here, we will be programming parameters related to
4862  * the Flow Search Table.
4863  *
4864  * @soc: data path SoC handle
4865  *
4866  * Return: zero on success, non-zero on failure
4867  */
4868 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
4869 static QDF_STATUS
4870 dp_rx_target_fst_config(struct dp_soc *soc)
4871 {
4872 	int i;
4873 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4874 
4875 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4876 		struct dp_pdev *pdev = soc->pdev_list[i];
4877 
4878 		/* Flow search is not enabled if NSS offload is enabled */
4879 		if (pdev &&
4880 		    !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
4881 			status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev);
4882 			if (status != QDF_STATUS_SUCCESS)
4883 				break;
4884 		}
4885 	}
4886 	return status;
4887 }
4888 #else
4889 /**
4890  * dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW
4891  * @soc: SoC handle
4892  *
4893  * Return: Success
4894  */
4895 static inline QDF_STATUS
4896 dp_rx_target_fst_config(struct dp_soc *soc)
4897 {
4898 	return QDF_STATUS_SUCCESS;
4899 }
4900 
4901 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
4902 
4903 /*
4904  * dp_soc_attach_target_wifi3() - SOC initialization in the target
4905  * @cdp_soc: Opaque Datapath SOC handle
4906  *
4907  * Return: zero on success, non-zero on failure
4908  */
4909 static QDF_STATUS
4910 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
4911 {
4912 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
4913 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4914 
4915 	htt_soc_attach_target(soc->htt_handle);
4916 
4917 	status = dp_rxdma_ring_config(soc);
4918 	if (status != QDF_STATUS_SUCCESS) {
4919 		dp_err("Failed to send htt srng setup messages to target");
4920 		return status;
4921 	}
4922 
4923 	status = dp_rxdma_ring_sel_cfg(soc);
4924 	if (status != QDF_STATUS_SUCCESS) {
4925 		dp_err("Failed to send htt ring config message to target");
4926 		return status;
4927 	}
4928 
4929 	status = dp_rx_target_fst_config(soc);
4930 	if (status != QDF_STATUS_SUCCESS) {
4931 		dp_err("Failed to send htt fst setup config message to target");
4932 		return status;
4933 	}
4934 
4935 	DP_STATS_INIT(soc);
4936 
4937 	/* initialize work queue for stats processing */
4938 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
4939 
4940 	qdf_minidump_log(soc, sizeof(*soc), "dp_soc");
4941 
4942 	return QDF_STATUS_SUCCESS;
4943 }
4944 
4945 /*
4946 * dp_vdev_attach_wifi3() - attach txrx vdev
4947 * @txrx_pdev: Datapath PDEV handle
4948 * @vdev_mac_addr: MAC address of the virtual interface
4949 * @vdev_id: VDEV Id
4950 * @wlan_op_mode: VDEV operating mode
4951 * @subtype: VDEV operating subtype
4952 *
4953 * Return: status
4954 */
4955 static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc,
4956 				       uint8_t pdev_id,
4957 				       uint8_t *vdev_mac_addr,
4958 				       uint8_t vdev_id,
4959 				       enum wlan_op_mode op_mode,
4960 				       enum wlan_op_subtype subtype)
4961 {
4962 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
4963 	struct dp_pdev *pdev =
4964 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
4965 						   pdev_id);
4966 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
4967 
4968 	if (!pdev) {
4969 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4970 			  FL("DP PDEV is Null for pdev id %d"), pdev_id);
4971 		qdf_mem_free(vdev);
4972 		goto fail0;
4973 	}
4974 
4975 	if (!vdev) {
4976 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4977 			FL("DP VDEV memory allocation failed"));
4978 		goto fail0;
4979 	}
4980 
4981 	vdev->pdev = pdev;
4982 	vdev->vdev_id = vdev_id;
4983 	vdev->opmode = op_mode;
4984 	vdev->subtype = subtype;
4985 	vdev->osdev = soc->osdev;
4986 
4987 	vdev->osif_rx = NULL;
4988 	vdev->osif_rsim_rx_decap = NULL;
4989 	vdev->osif_get_key = NULL;
4990 	vdev->osif_rx_mon = NULL;
4991 	vdev->osif_tx_free_ext = NULL;
4992 	vdev->osif_vdev = NULL;
4993 
4994 	vdev->delete.pending = 0;
4995 	vdev->safemode = 0;
4996 	vdev->drop_unenc = 1;
4997 	vdev->sec_type = cdp_sec_type_none;
4998 	vdev->multipass_en = false;
4999 #ifdef notyet
5000 	vdev->filters_num = 0;
5001 #endif
5002 
5003 	qdf_mem_copy(
5004 		&vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
5005 
5006 	/* TODO: Initialize default HTT meta data that will be used in
5007 	 * TCL descriptors for packets transmitted from this VDEV
5008 	 */
5009 
5010 	TAILQ_INIT(&vdev->peer_list);
5011 	dp_peer_multipass_list_init(vdev);
5012 
5013 	if ((soc->intr_mode == DP_INTR_POLL) &&
5014 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
5015 		if ((pdev->vdev_count == 0) ||
5016 		    (wlan_op_mode_monitor == vdev->opmode))
5017 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
5018 	}
5019 
5020 	soc->vdev_id_map[vdev_id] = vdev;
5021 
5022 	if (wlan_op_mode_monitor == vdev->opmode) {
5023 		pdev->monitor_vdev = vdev;
5024 		return QDF_STATUS_SUCCESS;
5025 	}
5026 
5027 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
5028 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
5029 	vdev->dscp_tid_map_id = 0;
5030 	vdev->mcast_enhancement_en = 0;
5031 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
5032 	vdev->prev_tx_enq_tstamp = 0;
5033 	vdev->prev_rx_deliver_tstamp = 0;
5034 
5035 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5036 	/* add this vdev into the pdev's list */
5037 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
5038 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5039 	pdev->vdev_count++;
5040 
5041 	if (wlan_op_mode_sta != vdev->opmode)
5042 		vdev->ap_bridge_enabled = true;
5043 	else
5044 		vdev->ap_bridge_enabled = false;
5045 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
5046 		  "%s: wlan_cfg_ap_bridge_enabled %d",
5047 		  __func__, vdev->ap_bridge_enabled);
5048 
5049 	dp_tx_vdev_attach(vdev);
5050 
5051 	if (pdev->vdev_count == 1)
5052 		dp_lro_hash_setup(soc, pdev);
5053 
5054 	dp_info("Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
5055 	DP_STATS_INIT(vdev);
5056 
5057 	if (wlan_op_mode_sta == vdev->opmode)
5058 		dp_peer_create_wifi3((struct cdp_soc_t *)soc, vdev_id,
5059 				     vdev->mac_addr.raw);
5060 
5061 	return QDF_STATUS_SUCCESS;
5062 
5063 fail0:
5064 	return QDF_STATUS_E_FAILURE;
5065 }
5066 
5067 /**
5068  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
5069  * @soc: Datapath soc handle
5070  * @vdev_id: id of Datapath VDEV handle
5071  * @osif_vdev: OSIF vdev handle
5072  * @txrx_ops: Tx and Rx operations
5073  *
5074  * Return: DP VDEV handle on success, NULL on failure
5075  */
5076 static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc,
5077 					 uint8_t vdev_id,
5078 					 ol_osif_vdev_handle osif_vdev,
5079 					 struct ol_txrx_ops *txrx_ops)
5080 {
5081 	struct dp_vdev *vdev =
5082 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
5083 						   vdev_id);
5084 
5085 	if (!vdev)
5086 		return QDF_STATUS_E_FAILURE;
5087 
5088 	vdev->osif_vdev = osif_vdev;
5089 	vdev->osif_rx = txrx_ops->rx.rx;
5090 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
5091 	vdev->osif_rx_flush = txrx_ops->rx.rx_flush;
5092 	vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush;
5093 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
5094 	vdev->osif_get_key = txrx_ops->get_key;
5095 	vdev->osif_rx_mon = txrx_ops->rx.mon;
5096 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
5097 	vdev->tx_comp = txrx_ops->tx.tx_comp;
5098 #ifdef notyet
5099 #if ATH_SUPPORT_WAPI
5100 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
5101 #endif
5102 #endif
5103 #ifdef UMAC_SUPPORT_PROXY_ARP
5104 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
5105 #endif
5106 	vdev->me_convert = txrx_ops->me_convert;
5107 
5108 	/* TODO: Enable the following once Tx code is integrated */
5109 	if (vdev->mesh_vdev)
5110 		txrx_ops->tx.tx = dp_tx_send_mesh;
5111 	else
5112 		txrx_ops->tx.tx = dp_tx_send;
5113 
5114 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
5115 
5116 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
5117 		"DP Vdev Register success");
5118 
5119 	return QDF_STATUS_SUCCESS;
5120 }
5121 
5122 /**
5123  * dp_peer_flush_ast_entry() - Forcibily flush all AST entry of peer
5124  * @soc: Datapath soc handle
5125  * @peer: Datapath peer handle
5126  * @peer_id: Peer ID
5127  * @vdev_id: Vdev ID
5128  *
5129  * Return: void
5130  */
5131 static void dp_peer_flush_ast_entry(struct dp_soc *soc,
5132 				    struct dp_peer *peer,
5133 				    uint16_t peer_id,
5134 				    uint8_t vdev_id)
5135 {
5136 	struct dp_ast_entry *ase, *tmp_ase;
5137 
5138 	if (soc->is_peer_map_unmap_v2) {
5139 		DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
5140 				dp_rx_peer_unmap_handler
5141 						(soc, peer_id,
5142 						 vdev_id,
5143 						 ase->mac_addr.raw,
5144 						 1);
5145 		}
5146 	}
5147 }
5148 
5149 /**
5150  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
5151  * @vdev: Datapath VDEV handle
5152  * @unmap_only: Flag to indicate "only unmap"
5153  *
5154  * Return: void
5155  */
5156 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
5157 {
5158 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5159 	struct dp_pdev *pdev = vdev->pdev;
5160 	struct dp_soc *soc = pdev->soc;
5161 	struct dp_peer *peer;
5162 	uint16_t *peer_ids;
5163 	struct dp_peer **peer_array = NULL;
5164 	uint8_t i = 0, j = 0;
5165 	uint8_t m = 0, n = 0;
5166 
5167 	peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
5168 	if (!peer_ids) {
5169 		dp_err("DP alloc failure - unable to flush peers");
5170 		return;
5171 	}
5172 
5173 	if (!unmap_only) {
5174 		peer_array = qdf_mem_malloc(
5175 				soc->max_peers * sizeof(struct dp_peer *));
5176 		if (!peer_array) {
5177 			qdf_mem_free(peer_ids);
5178 			dp_err("DP alloc failure - unable to flush peers");
5179 			return;
5180 		}
5181 	}
5182 
5183 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
5184 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
5185 		if (!unmap_only && n < soc->max_peers)
5186 			peer_array[n++] = peer;
5187 
5188 		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
5189 			if (peer->peer_ids[i] != HTT_INVALID_PEER)
5190 				if (j < soc->max_peers)
5191 					peer_ids[j++] = peer->peer_ids[i];
5192 	}
5193 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5194 
5195 	/*
5196 	 * If peer id is invalid, need to flush the peer if
5197 	 * peer valid flag is true, this is needed for NAN + SSR case.
5198 	 */
5199 	if (!unmap_only) {
5200 		for (m = 0; m < n ; m++) {
5201 			peer = peer_array[m];
5202 
5203 			dp_info("peer: %pM is getting deleted",
5204 				peer->mac_addr.raw);
5205 			/* only if peer valid is true */
5206 			if (peer->valid)
5207 				dp_peer_delete_wifi3((struct cdp_soc_t *)soc,
5208 						     vdev->vdev_id,
5209 						     peer->mac_addr.raw, 0);
5210 		}
5211 		qdf_mem_free(peer_array);
5212 	}
5213 
5214 	for (i = 0; i < j ; i++) {
5215 		peer = __dp_peer_find_by_id(soc, peer_ids[i]);
5216 
5217 		if (!peer)
5218 			continue;
5219 
5220 		dp_info("peer: %pM is getting unmap",
5221 			peer->mac_addr.raw);
5222 		/* free AST entries of peer */
5223 		dp_peer_flush_ast_entry(soc, peer,
5224 					peer_ids[i],
5225 					vdev->vdev_id);
5226 
5227 		dp_rx_peer_unmap_handler(soc, peer_ids[i],
5228 					 vdev->vdev_id,
5229 					 peer->mac_addr.raw, 0);
5230 	}
5231 
5232 	qdf_mem_free(peer_ids);
5233 	dp_info("Flushed peers for vdev object %pK ", vdev);
5234 }
5235 
5236 /*
5237  * dp_vdev_detach_wifi3() - Detach txrx vdev
5238  * @cdp_soc: Datapath soc handle
5239  * @vdev_id: VDEV Id
5240  * @callback: Callback OL_IF on completion of detach
5241  * @cb_context:	Callback context
5242  *
5243  */
5244 static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc,
5245 				       uint8_t vdev_id,
5246 				       ol_txrx_vdev_delete_cb callback,
5247 				       void *cb_context)
5248 {
5249 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
5250 	struct dp_pdev *pdev;
5251 	struct dp_neighbour_peer *peer = NULL;
5252 	struct dp_neighbour_peer *temp_peer = NULL;
5253 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
5254 
5255 	if (!vdev)
5256 		return QDF_STATUS_E_FAILURE;
5257 
5258 	pdev = vdev->pdev;
5259 
5260 	soc->vdev_id_map[vdev->vdev_id] = NULL;
5261 
5262 	if (wlan_op_mode_sta == vdev->opmode)
5263 		dp_peer_delete_wifi3((struct cdp_soc_t *)soc, vdev->vdev_id,
5264 				     vdev->vap_self_peer->mac_addr.raw, 0);
5265 
5266 	/*
5267 	 * If Target is hung, flush all peers before detaching vdev
5268 	 * this will free all references held due to missing
5269 	 * unmap commands from Target
5270 	 */
5271 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
5272 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, false);
5273 	else if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
5274 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, true);
5275 
5276 	/*
5277 	 * Use peer_ref_mutex while accessing peer_list, in case
5278 	 * a peer is in the process of being removed from the list.
5279 	 */
5280 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
5281 	/* check that the vdev has no peers allocated */
5282 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
5283 		/* debug print - will be removed later */
5284 		dp_warn("not deleting vdev object %pK (%pM) until deletion finishes for all its peers",
5285 			vdev, vdev->mac_addr.raw);
5286 
5287 		if (vdev->vdev_dp_ext_handle) {
5288 			qdf_mem_free(vdev->vdev_dp_ext_handle);
5289 			vdev->vdev_dp_ext_handle = NULL;
5290 		}
5291 		/* indicate that the vdev needs to be deleted */
5292 		vdev->delete.pending = 1;
5293 		vdev->delete.callback = callback;
5294 		vdev->delete.context = cb_context;
5295 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5296 		return QDF_STATUS_E_FAILURE;
5297 	}
5298 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5299 
5300 	if (wlan_op_mode_monitor == vdev->opmode)
5301 		goto free_vdev;
5302 
5303 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5304 	if (!soc->hw_nac_monitor_support) {
5305 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
5306 			      neighbour_peer_list_elem) {
5307 			QDF_ASSERT(peer->vdev != vdev);
5308 		}
5309 	} else {
5310 		TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
5311 				   neighbour_peer_list_elem, temp_peer) {
5312 			if (peer->vdev == vdev) {
5313 				TAILQ_REMOVE(&pdev->neighbour_peers_list, peer,
5314 					     neighbour_peer_list_elem);
5315 				qdf_mem_free(peer);
5316 			}
5317 		}
5318 	}
5319 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5320 
5321 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5322 	/* remove the vdev from its parent pdev's list */
5323 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
5324 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5325 
5326 	dp_tx_vdev_detach(vdev);
5327 	dp_rx_vdev_detach(vdev);
5328 
5329 free_vdev:
5330 	if (wlan_op_mode_monitor == vdev->opmode)
5331 		pdev->monitor_vdev = NULL;
5332 
5333 	if (vdev->vdev_dp_ext_handle) {
5334 		qdf_mem_free(vdev->vdev_dp_ext_handle);
5335 		vdev->vdev_dp_ext_handle = NULL;
5336 	}
5337 
5338 	dp_info("deleting vdev object %pK (%pM)", vdev, vdev->mac_addr.raw);
5339 
5340 	qdf_mem_free(vdev);
5341 
5342 	if (callback)
5343 		callback(cb_context);
5344 
5345 	return QDF_STATUS_SUCCESS;
5346 }
5347 
5348 #ifdef FEATURE_AST
5349 /*
5350  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
5351  * @soc - datapath soc handle
5352  * @peer - datapath peer handle
5353  *
5354  * Delete the AST entries belonging to a peer
5355  */
5356 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
5357 					      struct dp_peer *peer)
5358 {
5359 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
5360 
5361 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
5362 		dp_peer_del_ast(soc, ast_entry);
5363 
5364 	peer->self_ast_entry = NULL;
5365 }
5366 #else
5367 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
5368 					      struct dp_peer *peer)
5369 {
5370 }
5371 #endif
5372 #if ATH_SUPPORT_WRAP
5373 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
5374 						uint8_t *peer_mac_addr)
5375 {
5376 	struct dp_peer *peer;
5377 
5378 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
5379 				      0, vdev->vdev_id);
5380 	if (!peer)
5381 		return NULL;
5382 
5383 	if (peer->bss_peer)
5384 		return peer;
5385 
5386 	dp_peer_unref_delete(peer);
5387 	return NULL;
5388 }
5389 #else
5390 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
5391 						uint8_t *peer_mac_addr)
5392 {
5393 	struct dp_peer *peer;
5394 
5395 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
5396 				      0, vdev->vdev_id);
5397 	if (!peer)
5398 		return NULL;
5399 
5400 	if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
5401 		return peer;
5402 
5403 	dp_peer_unref_delete(peer);
5404 	return NULL;
5405 }
5406 #endif
5407 
5408 #ifdef FEATURE_AST
5409 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
5410 					       struct dp_pdev *pdev,
5411 					       uint8_t *peer_mac_addr)
5412 {
5413 	struct dp_ast_entry *ast_entry;
5414 
5415 	qdf_spin_lock_bh(&soc->ast_lock);
5416 	if (soc->ast_override_support)
5417 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
5418 							    pdev->pdev_id);
5419 	else
5420 		ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
5421 
5422 	if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress)
5423 		dp_peer_del_ast(soc, ast_entry);
5424 
5425 	qdf_spin_unlock_bh(&soc->ast_lock);
5426 }
5427 #endif
5428 
5429 #ifdef PEER_CACHE_RX_PKTS
5430 static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
5431 {
5432 	qdf_spinlock_create(&peer->bufq_info.bufq_lock);
5433 	peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
5434 	qdf_list_create(&peer->bufq_info.cached_bufq, DP_RX_CACHED_BUFQ_THRESH);
5435 }
5436 #else
5437 static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
5438 {
5439 }
5440 #endif
5441 
5442 #ifdef WLAN_FEATURE_STATS_EXT
5443 /*
5444  * dp_set_ignore_reo_status_cb() - set ignore reo status cb flag
5445  * @soc: dp soc handle
5446  * @flag: flag to set or reset
5447  *
5448  * Return: None
5449  */
5450 static inline void dp_set_ignore_reo_status_cb(struct dp_soc *soc,
5451 					       bool flag)
5452 {
5453 	soc->ignore_reo_status_cb = flag;
5454 }
5455 #else
5456 static inline void dp_set_ignore_reo_status_cb(struct dp_soc *soc,
5457 					       bool flag)
5458 {
5459 }
5460 #endif
5461 
5462 /*
5463  * dp_peer_create_wifi3() - attach txrx peer
5464  * @soc_hdl: Datapath soc handle
5465  * @vdev_id: id of vdev
5466  * @peer_mac_addr: Peer MAC address
5467  *
5468  * Return: 0 on success, -1 on failure
5469  */
5470 static QDF_STATUS
5471 dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5472 		     uint8_t *peer_mac_addr)
5473 {
5474 	struct dp_peer *peer;
5475 	int i;
5476 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5477 	struct dp_pdev *pdev;
5478 	struct cdp_peer_cookie peer_cookie;
5479 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
5480 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
5481 
5482 	if (!vdev || !peer_mac_addr)
5483 		return QDF_STATUS_E_FAILURE;
5484 
5485 	pdev = vdev->pdev;
5486 	soc = pdev->soc;
5487 
5488 	/*
5489 	 * If a peer entry with given MAC address already exists,
5490 	 * reuse the peer and reset the state of peer.
5491 	 */
5492 	peer = dp_peer_can_reuse(vdev, peer_mac_addr);
5493 
5494 	if (peer) {
5495 		qdf_atomic_init(&peer->is_default_route_set);
5496 		dp_peer_cleanup(vdev, peer, true);
5497 
5498 		qdf_spin_lock_bh(&soc->ast_lock);
5499 		dp_peer_delete_ast_entries(soc, peer);
5500 		peer->delete_in_progress = false;
5501 		qdf_spin_unlock_bh(&soc->ast_lock);
5502 
5503 		if ((vdev->opmode == wlan_op_mode_sta) &&
5504 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
5505 		     QDF_MAC_ADDR_SIZE)) {
5506 			ast_type = CDP_TXRX_AST_TYPE_SELF;
5507 		}
5508 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
5509 		/*
5510 		* Control path maintains a node count which is incremented
5511 		* for every new peer create command. Since new peer is not being
5512 		* created and earlier reference is reused here,
5513 		* peer_unref_delete event is sent to control path to
5514 		* increment the count back.
5515 		*/
5516 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
5517 			soc->cdp_soc.ol_ops->peer_unref_delete(
5518 				soc->ctrl_psoc,
5519 				pdev->pdev_id,
5520 				peer->mac_addr.raw, vdev->mac_addr.raw,
5521 				vdev->opmode);
5522 		}
5523 
5524 		dp_local_peer_id_alloc(pdev, peer);
5525 
5526 		qdf_spinlock_create(&peer->peer_info_lock);
5527 		dp_peer_rx_bufq_resources_init(peer);
5528 
5529 		DP_STATS_INIT(peer);
5530 		DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI);
5531 
5532 		return QDF_STATUS_SUCCESS;
5533 	} else {
5534 		/*
5535 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
5536 		 * need to remove the AST entry which was earlier added as a WDS
5537 		 * entry.
5538 		 * If an AST entry exists, but no peer entry exists with a given
5539 		 * MAC addresses, we could deduce it as a WDS entry
5540 		 */
5541 		dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
5542 	}
5543 
5544 #ifdef notyet
5545 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
5546 		soc->mempool_ol_ath_peer);
5547 #else
5548 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
5549 #endif
5550 
5551 	if (!peer)
5552 		return QDF_STATUS_E_FAILURE; /* failure */
5553 
5554 	qdf_mem_zero(peer, sizeof(struct dp_peer));
5555 
5556 	TAILQ_INIT(&peer->ast_entry_list);
5557 
5558 	/* store provided params */
5559 	peer->vdev = vdev;
5560 
5561 	if ((vdev->opmode == wlan_op_mode_sta) &&
5562 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
5563 			 QDF_MAC_ADDR_SIZE)) {
5564 		ast_type = CDP_TXRX_AST_TYPE_SELF;
5565 	}
5566 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
5567 	qdf_spinlock_create(&peer->peer_info_lock);
5568 
5569 	dp_peer_rx_bufq_resources_init(peer);
5570 
5571 	qdf_mem_copy(
5572 		&peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
5573 
5574 	/* initialize the peer_id */
5575 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
5576 		peer->peer_ids[i] = HTT_INVALID_PEER;
5577 
5578 	/* reset the ast index to flowid table */
5579 	dp_peer_reset_flowq_map(peer);
5580 
5581 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
5582 
5583 	qdf_atomic_init(&peer->ref_cnt);
5584 
5585 	/* keep one reference for attach */
5586 	qdf_atomic_inc(&peer->ref_cnt);
5587 
5588 	/* add this peer into the vdev's list */
5589 	if (wlan_op_mode_sta == vdev->opmode)
5590 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
5591 	else
5592 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
5593 
5594 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5595 
5596 	/* TODO: See if hash based search is required */
5597 	dp_peer_find_hash_add(soc, peer);
5598 
5599 	/* Initialize the peer state */
5600 	peer->state = OL_TXRX_PEER_STATE_DISC;
5601 
5602 	dp_info("vdev %pK created peer %pK (%pM) ref_cnt: %d",
5603 		vdev, peer, peer->mac_addr.raw,
5604 		qdf_atomic_read(&peer->ref_cnt));
5605 	/*
5606 	 * For every peer MAp message search and set if bss_peer
5607 	 */
5608 	if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
5609 			QDF_MAC_ADDR_SIZE) == 0 &&
5610 			(wlan_op_mode_sta != vdev->opmode)) {
5611 		dp_info("vdev bss_peer!!");
5612 		peer->bss_peer = 1;
5613 		vdev->vap_bss_peer = peer;
5614 	}
5615 
5616 	if (wlan_op_mode_sta == vdev->opmode &&
5617 	    qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
5618 			QDF_MAC_ADDR_SIZE) == 0) {
5619 		vdev->vap_self_peer = peer;
5620 	}
5621 
5622 	if (wlan_op_mode_sta == vdev->opmode &&
5623 	    qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
5624 			QDF_MAC_ADDR_SIZE) != 0) {
5625 		dp_set_ignore_reo_status_cb(soc, false);
5626 	}
5627 
5628 	for (i = 0; i < DP_MAX_TIDS; i++)
5629 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
5630 
5631 	peer->valid = 1;
5632 	dp_local_peer_id_alloc(pdev, peer);
5633 	DP_STATS_INIT(peer);
5634 	DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI);
5635 
5636 	qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
5637 		     QDF_MAC_ADDR_SIZE);
5638 	peer_cookie.ctx = NULL;
5639 	peer_cookie.pdev_id = pdev->pdev_id;
5640 	peer_cookie.cookie = pdev->next_peer_cookie++;
5641 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
5642 	dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, pdev->soc,
5643 			     (void *)&peer_cookie,
5644 			     peer->peer_ids[0], WDI_NO_VAL, pdev->pdev_id);
5645 #endif
5646 	if (soc->wlanstats_enabled) {
5647 		if (!peer_cookie.ctx) {
5648 			pdev->next_peer_cookie--;
5649 			qdf_err("Failed to initialize peer rate stats");
5650 		} else {
5651 			peer->wlanstats_ctx = (struct cdp_peer_rate_stats_ctx *)
5652 						peer_cookie.ctx;
5653 		}
5654 	}
5655 	return QDF_STATUS_SUCCESS;
5656 }
5657 
5658 /*
5659  * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
5660  * @vdev: Datapath VDEV handle
5661  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5662  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5663  *
5664  * Return: None
5665  */
5666 static
5667 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
5668 				  enum cdp_host_reo_dest_ring *reo_dest,
5669 				  bool *hash_based)
5670 {
5671 	struct dp_soc *soc;
5672 	struct dp_pdev *pdev;
5673 
5674 	pdev = vdev->pdev;
5675 	soc = pdev->soc;
5676 	/*
5677 	 * hash based steering is disabled for Radios which are offloaded
5678 	 * to NSS
5679 	 */
5680 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
5681 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
5682 
5683 	/*
5684 	 * Below line of code will ensure the proper reo_dest ring is chosen
5685 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
5686 	 */
5687 	*reo_dest = pdev->reo_dest;
5688 }
5689 
5690 #ifdef IPA_OFFLOAD
5691 /**
5692  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
5693  * @vdev: Virtual device
5694  *
5695  * Return: true if the vdev is of subtype P2P
5696  *	   false if the vdev is of any other subtype
5697  */
5698 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
5699 {
5700 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
5701 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
5702 	    vdev->subtype == wlan_op_subtype_p2p_go)
5703 		return true;
5704 
5705 	return false;
5706 }
5707 
5708 /*
5709  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
5710  * @vdev: Datapath VDEV handle
5711  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5712  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5713  *
5714  * If IPA is enabled in ini, for SAP mode, disable hash based
5715  * steering, use default reo_dst ring for RX. Use config values for other modes.
5716  * Return: None
5717  */
5718 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
5719 				       enum cdp_host_reo_dest_ring *reo_dest,
5720 				       bool *hash_based)
5721 {
5722 	struct dp_soc *soc;
5723 	struct dp_pdev *pdev;
5724 
5725 	pdev = vdev->pdev;
5726 	soc = pdev->soc;
5727 
5728 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
5729 
5730 	/* For P2P-GO interfaces we do not need to change the REO
5731 	 * configuration even if IPA config is enabled
5732 	 */
5733 	if (dp_is_vdev_subtype_p2p(vdev))
5734 		return;
5735 
5736 	/*
5737 	 * If IPA is enabled, disable hash-based flow steering and set
5738 	 * reo_dest_ring_4 as the REO ring to receive packets on.
5739 	 * IPA is configured to reap reo_dest_ring_4.
5740 	 *
5741 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
5742 	 * value enum value is from 1 - 4.
5743 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
5744 	 */
5745 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5746 		if (vdev->opmode == wlan_op_mode_ap) {
5747 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
5748 			*hash_based = 0;
5749 		} else if (vdev->opmode == wlan_op_mode_sta &&
5750 			   dp_ipa_is_mdm_platform()) {
5751 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
5752 		}
5753 	}
5754 }
5755 
5756 #else
5757 
5758 /*
5759  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
5760  * @vdev: Datapath VDEV handle
5761  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5762  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5763  *
5764  * Use system config values for hash based steering.
5765  * Return: None
5766  */
5767 
5768 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
5769 				       enum cdp_host_reo_dest_ring *reo_dest,
5770 				       bool *hash_based)
5771 {
5772 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
5773 }
5774 #endif /* IPA_OFFLOAD */
5775 
5776 /*
5777  * dp_peer_setup_wifi3() - initialize the peer
5778  * @soc_hdl: soc handle object
5779  * @vdev_id : vdev_id of vdev object
5780  * @peer_mac: Peer's mac address
5781  *
5782  * Return: QDF_STATUS
5783  */
5784 static QDF_STATUS
5785 dp_peer_setup_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
5786 		    uint8_t *peer_mac)
5787 {
5788 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5789 	struct dp_pdev *pdev;
5790 	bool hash_based = 0;
5791 	enum cdp_host_reo_dest_ring reo_dest;
5792 	QDF_STATUS status = QDF_STATUS_SUCCESS;
5793 	struct dp_vdev *vdev =
5794 			dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
5795 	struct dp_peer *peer =
5796 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id);
5797 
5798 	if (!vdev || !peer || peer->delete_in_progress) {
5799 		status = QDF_STATUS_E_FAILURE;
5800 		goto fail;
5801 	}
5802 
5803 	pdev = vdev->pdev;
5804 	dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
5805 
5806 	dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
5807 		pdev->pdev_id, vdev->vdev_id,
5808 		vdev->opmode, hash_based, reo_dest);
5809 
5810 
5811 	/*
5812 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
5813 	 * i.e both the devices have same MAC address. In these
5814 	 * cases we want such pkts to be processed in NULL Q handler
5815 	 * which is REO2TCL ring. for this reason we should
5816 	 * not setup reo_queues and default route for bss_peer.
5817 	 */
5818 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap) {
5819 		status = QDF_STATUS_E_FAILURE;
5820 		goto fail;
5821 	}
5822 
5823 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
5824 		/* TODO: Check the destination ring number to be passed to FW */
5825 		soc->cdp_soc.ol_ops->peer_set_default_routing(
5826 				soc->ctrl_psoc,
5827 				peer->vdev->pdev->pdev_id,
5828 				peer->mac_addr.raw,
5829 				peer->vdev->vdev_id, hash_based, reo_dest);
5830 	}
5831 
5832 	qdf_atomic_set(&peer->is_default_route_set, 1);
5833 
5834 	dp_peer_rx_init(pdev, peer);
5835 	dp_peer_tx_init(pdev, peer);
5836 
5837 	dp_peer_ppdu_delayed_ba_init(peer);
5838 
5839 fail:
5840 	if (peer)
5841 		dp_peer_unref_delete(peer);
5842 	return status;
5843 }
5844 
5845 /*
5846  * dp_cp_peer_del_resp_handler - Handle the peer delete response
5847  * @soc_hdl: Datapath SOC handle
5848  * @vdev_id: id of virtual device object
5849  * @mac_addr: Mac address of the peer
5850  *
5851  * Return: QDF_STATUS
5852  */
5853 static QDF_STATUS dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl,
5854 					      uint8_t vdev_id,
5855 					      uint8_t *mac_addr)
5856 {
5857 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5858 	struct dp_ast_entry  *ast_entry = NULL;
5859 	txrx_ast_free_cb cb = NULL;
5860 	void *cookie;
5861 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
5862 
5863 	if (!vdev)
5864 		return QDF_STATUS_E_FAILURE;
5865 
5866 	qdf_spin_lock_bh(&soc->ast_lock);
5867 
5868 	if (soc->ast_override_support)
5869 		ast_entry =
5870 			dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
5871 							vdev->pdev->pdev_id);
5872 	else
5873 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
5874 
5875 	/* in case of qwrap we have multiple BSS peers
5876 	 * with same mac address
5877 	 *
5878 	 * AST entry for this mac address will be created
5879 	 * only for one peer hence it will be NULL here
5880 	 */
5881 	if (!ast_entry || ast_entry->peer || !ast_entry->delete_in_progress) {
5882 		qdf_spin_unlock_bh(&soc->ast_lock);
5883 		return QDF_STATUS_E_FAILURE;
5884 	}
5885 
5886 	if (ast_entry->is_mapped)
5887 		soc->ast_table[ast_entry->ast_idx] = NULL;
5888 
5889 	DP_STATS_INC(soc, ast.deleted, 1);
5890 	dp_peer_ast_hash_remove(soc, ast_entry);
5891 
5892 	cb = ast_entry->callback;
5893 	cookie = ast_entry->cookie;
5894 	ast_entry->callback = NULL;
5895 	ast_entry->cookie = NULL;
5896 
5897 	soc->num_ast_entries--;
5898 	qdf_spin_unlock_bh(&soc->ast_lock);
5899 
5900 	if (cb) {
5901 		cb(soc->ctrl_psoc,
5902 		   dp_soc_to_cdp_soc(soc),
5903 		   cookie,
5904 		   CDP_TXRX_AST_DELETED);
5905 	}
5906 	qdf_mem_free(ast_entry);
5907 
5908 	return QDF_STATUS_SUCCESS;
5909 }
5910 
5911 /*
5912  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
5913  * @txrx_soc: cdp soc handle
5914  * @ac: Access category
5915  * @value: timeout value in millisec
5916  *
5917  * Return: void
5918  */
5919 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
5920 				    uint8_t ac, uint32_t value)
5921 {
5922 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5923 
5924 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
5925 }
5926 
5927 /*
5928  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
5929  * @txrx_soc: cdp soc handle
5930  * @ac: access category
5931  * @value: timeout value in millisec
5932  *
5933  * Return: void
5934  */
5935 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
5936 				    uint8_t ac, uint32_t *value)
5937 {
5938 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5939 
5940 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
5941 }
5942 
5943 /*
5944  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
5945  * @txrx_soc: cdp soc handle
5946  * @pdev_id: id of physical device object
5947  * @val: reo destination ring index (1 - 4)
5948  *
5949  * Return: QDF_STATUS
5950  */
5951 static QDF_STATUS
5952 dp_set_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id,
5953 		     enum cdp_host_reo_dest_ring val)
5954 {
5955 	struct dp_pdev *pdev =
5956 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
5957 						   pdev_id);
5958 
5959 	if (pdev) {
5960 		pdev->reo_dest = val;
5961 		return QDF_STATUS_SUCCESS;
5962 	}
5963 
5964 	return QDF_STATUS_E_FAILURE;
5965 }
5966 
5967 /*
5968  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
5969  * @txrx_soc: cdp soc handle
5970  * @pdev_id: id of physical device object
5971  *
5972  * Return: reo destination ring index
5973  */
5974 static enum cdp_host_reo_dest_ring
5975 dp_get_pdev_reo_dest(struct cdp_soc_t *txrx_soc, uint8_t pdev_id)
5976 {
5977 	struct dp_pdev *pdev =
5978 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)txrx_soc,
5979 						   pdev_id);
5980 
5981 	if (pdev)
5982 		return pdev->reo_dest;
5983 	else
5984 		return cdp_host_reo_dest_ring_unknown;
5985 }
5986 
5987 #ifdef ATH_SUPPORT_NAC
5988 /*
5989  * dp_set_filter_neigh_peers() - set filter neighbour peers for smart mesh
5990  * @pdev_handle: device object
5991  * @val: value to be set
5992  *
5993  * Return: void
5994  */
5995 static int dp_set_filter_neigh_peers(struct dp_pdev *pdev,
5996 				     bool val)
5997 {
5998 	/* Enable/Disable smart mesh filtering. This flag will be checked
5999 	 * during rx processing to check if packets are from NAC clients.
6000 	 */
6001 	pdev->filter_neighbour_peers = val;
6002 	return 0;
6003 }
6004 #else
6005 static int dp_set_filter_neigh_peers(struct dp_pdev *pdev,
6006 				     bool val)
6007 {
6008 	return 0;
6009 }
6010 #endif /* ATH_SUPPORT_NAC */
6011 
6012 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
6013 /*
6014  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
6015  * address for smart mesh filtering
6016  * @txrx_soc: cdp soc handle
6017  * @vdev_id: id of virtual device object
6018  * @cmd: Add/Del command
6019  * @macaddr: nac client mac address
6020  *
6021  * Return: success/failure
6022  */
6023 static int dp_update_filter_neighbour_peers(struct cdp_soc_t *soc,
6024 					    uint8_t vdev_id,
6025 					    uint32_t cmd, uint8_t *macaddr)
6026 {
6027 	struct dp_pdev *pdev;
6028 	struct dp_neighbour_peer *peer = NULL;
6029 	struct dp_vdev *vdev =
6030 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
6031 						   vdev_id);
6032 
6033 	if (!vdev || !macaddr)
6034 		goto fail0;
6035 
6036 	pdev = vdev->pdev;
6037 
6038 	if (!pdev)
6039 		goto fail0;
6040 
6041 	/* Store address of NAC (neighbour peer) which will be checked
6042 	 * against TA of received packets.
6043 	 */
6044 	if (cmd == DP_NAC_PARAM_ADD) {
6045 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
6046 				sizeof(*peer));
6047 
6048 		if (!peer) {
6049 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6050 				FL("DP neighbour peer node memory allocation failed"));
6051 			goto fail0;
6052 		}
6053 
6054 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
6055 			macaddr, QDF_MAC_ADDR_SIZE);
6056 		peer->vdev = vdev;
6057 
6058 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
6059 
6060 		/* add this neighbour peer into the list */
6061 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
6062 				neighbour_peer_list_elem);
6063 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
6064 
6065 		/* first neighbour */
6066 		if (!pdev->neighbour_peers_added) {
6067 			QDF_STATUS status = QDF_STATUS_SUCCESS;
6068 
6069 			pdev->neighbour_peers_added = true;
6070 			dp_mon_filter_setup_smart_monitor(pdev);
6071 			status = dp_mon_filter_update(pdev);
6072 			if (status != QDF_STATUS_SUCCESS) {
6073 				QDF_TRACE(QDF_MODULE_ID_DP,
6074 					  QDF_TRACE_LEVEL_ERROR,
6075 					  FL("smart mon filter setup failed"));
6076 				dp_mon_filter_reset_smart_monitor(pdev);
6077 				pdev->neighbour_peers_added = false;
6078 			}
6079 		}
6080 		return 1;
6081 
6082 	} else if (cmd == DP_NAC_PARAM_DEL) {
6083 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
6084 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
6085 				neighbour_peer_list_elem) {
6086 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
6087 				macaddr, QDF_MAC_ADDR_SIZE)) {
6088 				/* delete this peer from the list */
6089 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
6090 					peer, neighbour_peer_list_elem);
6091 				qdf_mem_free(peer);
6092 				break;
6093 			}
6094 		}
6095 		/* last neighbour deleted */
6096 		if (TAILQ_EMPTY(&pdev->neighbour_peers_list)) {
6097 			QDF_STATUS status = QDF_STATUS_SUCCESS;
6098 
6099 			pdev->neighbour_peers_added = false;
6100 			dp_mon_filter_reset_smart_monitor(pdev);
6101 			status = dp_mon_filter_update(pdev);
6102 			if (status != QDF_STATUS_SUCCESS) {
6103 				QDF_TRACE(QDF_MODULE_ID_DP,
6104 					  QDF_TRACE_LEVEL_ERROR,
6105 					  FL("smart mon filter clear failed"));
6106 			}
6107 
6108 		}
6109 
6110 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
6111 		return 1;
6112 
6113 	}
6114 
6115 fail0:
6116 	return 0;
6117 }
6118 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
6119 
6120 /*
6121  * dp_get_sec_type() - Get the security type
6122  * @soc: soc handle
6123  * @vdev_id: id of dp handle
6124  * @peer_mac: mac of datapath PEER handle
6125  * @sec_idx:    Security id (mcast, ucast)
6126  *
6127  * return sec_type: Security type
6128  */
6129 static int dp_get_sec_type(struct cdp_soc_t *soc, uint8_t vdev_id,
6130 			   uint8_t *peer_mac, uint8_t sec_idx)
6131 {
6132 	int sec_type = 0;
6133 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
6134 						       peer_mac, 0, vdev_id);
6135 
6136 	if (!peer || peer->delete_in_progress) {
6137 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
6138 			  "%s: Peer is NULL!\n", __func__);
6139 		goto fail;
6140 	}
6141 
6142 	sec_type = peer->security[sec_idx].sec_type;
6143 fail:
6144 	if (peer)
6145 		dp_peer_unref_delete(peer);
6146 	return sec_type;
6147 }
6148 
6149 /*
6150  * dp_peer_authorize() - authorize txrx peer
6151  * @soc: soc handle
6152  * @vdev_id: id of dp handle
6153  * @peer_mac: mac of datapath PEER handle
6154  * @authorize
6155  *
6156  */
6157 static QDF_STATUS
6158 dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
6159 		  uint8_t *peer_mac, uint32_t authorize)
6160 {
6161 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6162 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6163 	struct dp_peer *peer = dp_peer_find_hash_find(soc,
6164 						      peer_mac,
6165 						      0, vdev_id);
6166 
6167 	if (!peer || peer->delete_in_progress) {
6168 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
6169 			  "%s: Peer is NULL!\n", __func__);
6170 		status = QDF_STATUS_E_FAILURE;
6171 	} else {
6172 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
6173 		peer->authorize = authorize ? 1 : 0;
6174 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6175 	}
6176 
6177 	if (peer)
6178 		dp_peer_unref_delete(peer);
6179 
6180 	return status;
6181 }
6182 
6183 /*
6184  * dp_vdev_reset_peer() - Update peer related member in vdev
6185 			  as peer is going to free
6186  * @vdev: datapath vdev handle
6187  * @peer: dataptah peer handle
6188  *
6189  * Return: None
6190  */
6191 static void dp_vdev_reset_peer(struct dp_vdev *vdev,
6192 			       struct dp_peer *peer)
6193 {
6194 	struct dp_peer *bss_peer = NULL;
6195 
6196 	if (!vdev) {
6197 		dp_err("vdev is NULL");
6198 	} else {
6199 		if (vdev->vap_bss_peer == peer)
6200 		    vdev->vap_bss_peer = NULL;
6201 
6202 		if (vdev && vdev->vap_bss_peer) {
6203 		    bss_peer = vdev->vap_bss_peer;
6204 		    DP_UPDATE_STATS(vdev, peer);
6205 		}
6206 	}
6207 }
6208 
6209 /*
6210  * dp_peer_release_mem() - free dp peer handle memory
6211  * @soc: dataptah soc handle
6212  * @pdev: datapath pdev handle
6213  * @peer: datapath peer handle
6214  * @vdev_opmode: Vdev operation mode
6215  * @vdev_mac_addr: Vdev Mac address
6216  *
6217  * Return: None
6218  */
6219 static void dp_peer_release_mem(struct dp_soc *soc,
6220 				struct dp_pdev *pdev,
6221 				struct dp_peer *peer,
6222 				enum wlan_op_mode vdev_opmode,
6223 				uint8_t *vdev_mac_addr)
6224 {
6225 	if (soc->cdp_soc.ol_ops->peer_unref_delete)
6226 		soc->cdp_soc.ol_ops->peer_unref_delete(
6227 				soc->ctrl_psoc,
6228 				pdev->pdev_id,
6229 				peer->mac_addr.raw, vdev_mac_addr,
6230 				vdev_opmode);
6231 
6232 	/*
6233 	 * Peer AST list hast to be empty here
6234 	 */
6235 	DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
6236 
6237 	qdf_mem_free(peer);
6238 }
6239 
6240 /**
6241  * dp_delete_pending_vdev() - check and process vdev delete
6242  * @pdev: DP specific pdev pointer
6243  * @vdev: DP specific vdev pointer
6244  * @vdev_id: vdev id corresponding to vdev
6245  *
6246  * This API does following:
6247  * 1) It releases tx flow pools buffers as vdev is
6248  *    going down and no peers are associated.
6249  * 2) It also detaches vdev before cleaning vdev (struct dp_vdev) memory
6250  */
6251 static void dp_delete_pending_vdev(struct dp_pdev *pdev, struct dp_vdev *vdev,
6252 				   uint8_t vdev_id)
6253 {
6254 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
6255 	void *vdev_delete_context = NULL;
6256 
6257 	vdev_delete_cb = vdev->delete.callback;
6258 	vdev_delete_context = vdev->delete.context;
6259 
6260 	dp_info("deleting vdev object %pK (%pM)- its last peer is done",
6261 		vdev, vdev->mac_addr.raw);
6262 	/* all peers are gone, go ahead and delete it */
6263 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
6264 			FLOW_TYPE_VDEV, vdev_id);
6265 	dp_tx_vdev_detach(vdev);
6266 
6267 	pdev->soc->vdev_id_map[vdev_id] = NULL;
6268 
6269 	if (wlan_op_mode_monitor == vdev->opmode) {
6270 		pdev->monitor_vdev = NULL;
6271 	} else {
6272 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
6273 		TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
6274 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6275 	}
6276 
6277 	dp_info("deleting vdev object %pK (%pM)",
6278 		vdev, vdev->mac_addr.raw);
6279 	qdf_mem_free(vdev);
6280 	vdev = NULL;
6281 
6282 	if (vdev_delete_cb)
6283 		vdev_delete_cb(vdev_delete_context);
6284 }
6285 
6286 /*
6287  * dp_peer_unref_delete() - unref and delete peer
6288  * @peer_handle:		Datapath peer handle
6289  *
6290  */
6291 void dp_peer_unref_delete(struct dp_peer *peer)
6292 {
6293 	struct dp_vdev *vdev = peer->vdev;
6294 	struct dp_pdev *pdev = vdev->pdev;
6295 	struct dp_soc *soc = pdev->soc;
6296 	struct dp_peer *tmppeer;
6297 	int found = 0;
6298 	uint16_t peer_id;
6299 	uint16_t vdev_id;
6300 	bool vdev_delete = false;
6301 	struct cdp_peer_cookie peer_cookie;
6302 	enum wlan_op_mode vdev_opmode;
6303 	uint8_t vdev_mac_addr[QDF_MAC_ADDR_SIZE];
6304 
6305 	/*
6306 	 * Hold the lock all the way from checking if the peer ref count
6307 	 * is zero until the peer references are removed from the hash
6308 	 * table and vdev list (if the peer ref count is zero).
6309 	 * This protects against a new HL tx operation starting to use the
6310 	 * peer object just after this function concludes it's done being used.
6311 	 * Furthermore, the lock needs to be held while checking whether the
6312 	 * vdev's list of peers is empty, to make sure that list is not modified
6313 	 * concurrently with the empty check.
6314 	 */
6315 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
6316 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
6317 		peer_id = peer->peer_ids[0];
6318 		vdev_id = vdev->vdev_id;
6319 
6320 		/*
6321 		 * Make sure that the reference to the peer in
6322 		 * peer object map is removed
6323 		 */
6324 		if (peer_id != HTT_INVALID_PEER)
6325 			soc->peer_id_to_obj_map[peer_id] = NULL;
6326 
6327 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
6328 			  "Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
6329 
6330 		/* remove the reference to the peer from the hash table */
6331 		dp_peer_find_hash_remove(soc, peer);
6332 
6333 		qdf_spin_lock_bh(&soc->ast_lock);
6334 		if (peer->self_ast_entry) {
6335 			dp_peer_del_ast(soc, peer->self_ast_entry);
6336 		}
6337 		qdf_spin_unlock_bh(&soc->ast_lock);
6338 
6339 		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
6340 			if (tmppeer == peer) {
6341 				found = 1;
6342 				break;
6343 			}
6344 		}
6345 
6346 		if (found) {
6347 			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
6348 				peer_list_elem);
6349 		} else {
6350 			/*Ignoring the remove operation as peer not found*/
6351 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
6352 				  "peer:%pK not found in vdev:%pK peerlist:%pK",
6353 				  peer, vdev, &peer->vdev->peer_list);
6354 		}
6355 
6356 		/* send peer destroy event to upper layer */
6357 		qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
6358 			     QDF_MAC_ADDR_SIZE);
6359 		peer_cookie.ctx = NULL;
6360 		peer_cookie.ctx = (struct cdp_stats_cookie *)
6361 					peer->wlanstats_ctx;
6362 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6363 		dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY,
6364 				     pdev->soc,
6365 				     (void *)&peer_cookie,
6366 				     peer->peer_ids[0],
6367 				     WDI_NO_VAL,
6368 				     pdev->pdev_id);
6369 #endif
6370 		peer->wlanstats_ctx = NULL;
6371 
6372 		/* cleanup the peer data */
6373 		dp_peer_cleanup(vdev, peer, false);
6374 		/* reset this peer related info in vdev */
6375 		dp_vdev_reset_peer(vdev, peer);
6376 		/* save vdev related member in case vdev freed */
6377 		vdev_opmode = vdev->opmode;
6378 		qdf_mem_copy(vdev_mac_addr, vdev->mac_addr.raw,
6379 			     QDF_MAC_ADDR_SIZE);
6380 		/*
6381 		 * check whether the parent vdev is pending for deleting
6382 		 * and no peers left.
6383 		 */
6384 		if (vdev->delete.pending && TAILQ_EMPTY(&vdev->peer_list))
6385 			vdev_delete = true;
6386 		/*
6387 		 * Now that there are no references to the peer, we can
6388 		 * release the peer reference lock.
6389 		 */
6390 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6391 
6392 		/*
6393 		 * Invoke soc.ol_ops->peer_unref_delete out of
6394 		 * peer_ref_mutex in case deadlock issue.
6395 		 */
6396 		dp_peer_release_mem(soc, pdev, peer,
6397 				    vdev_opmode,
6398 				    vdev_mac_addr);
6399 		/*
6400 		 * Delete the vdev if it's waiting all peer deleted
6401 		 * and it's chance now.
6402 		 */
6403 		if (vdev_delete)
6404 			dp_delete_pending_vdev(pdev, vdev, vdev_id);
6405 
6406 	} else {
6407 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6408 	}
6409 }
6410 
6411 #ifdef PEER_CACHE_RX_PKTS
6412 static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
6413 {
6414 	dp_rx_flush_rx_cached(peer, true);
6415 	qdf_list_destroy(&peer->bufq_info.cached_bufq);
6416 	qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
6417 }
6418 #else
6419 static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
6420 {
6421 }
6422 #endif
6423 
6424 /*
6425  * dp_peer_detach_wifi3() – Detach txrx peer
6426  * @soc: soc handle
6427  * @vdev_id: id of dp handle
6428  * @peer_mac: mac of datapath PEER handle
6429  * @bitmap: bitmap indicating special handling of request.
6430  *
6431  */
6432 static QDF_STATUS dp_peer_delete_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
6433 				       uint8_t *peer_mac, uint32_t bitmap)
6434 {
6435 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
6436 						      peer_mac, 0, vdev_id);
6437 
6438 	/* Peer can be null for monitor vap mac address */
6439 	if (!peer) {
6440 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
6441 			  "%s: Invalid peer\n", __func__);
6442 		return QDF_STATUS_E_FAILURE;
6443 	}
6444 
6445 	if (!peer->valid) {
6446 		dp_peer_unref_delete(peer);
6447 		dp_err("Invalid peer: %pM", peer_mac);
6448 		return QDF_STATUS_E_ALREADY;
6449 	}
6450 
6451 	peer->valid = 0;
6452 
6453 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
6454 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
6455 
6456 	dp_local_peer_id_free(peer->vdev->pdev, peer);
6457 
6458 	dp_peer_rx_bufq_resources_deinit(peer);
6459 
6460 	qdf_spinlock_destroy(&peer->peer_info_lock);
6461 	dp_peer_multipass_list_remove(peer);
6462 
6463 	if (wlan_op_mode_sta == peer->vdev->opmode &&
6464 	    qdf_mem_cmp(peer->mac_addr.raw, peer->vdev->mac_addr.raw,
6465 			QDF_MAC_ADDR_SIZE) != 0) {
6466 		dp_set_ignore_reo_status_cb(peer->vdev->pdev->soc, true);
6467 	}
6468 
6469 	/*
6470 	 * Remove the reference added during peer_attach.
6471 	 * The peer will still be left allocated until the
6472 	 * PEER_UNMAP message arrives to remove the other
6473 	 * reference, added by the PEER_MAP message.
6474 	 */
6475 	dp_peer_unref_delete(peer);
6476 	/*
6477 	 * Remove the reference taken above
6478 	 */
6479 	dp_peer_unref_delete(peer);
6480 
6481 	return QDF_STATUS_SUCCESS;
6482 }
6483 
6484 /*
6485  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
6486  * @soc_hdl: Datapath soc handle
6487  * @vdev_id: virtual interface id
6488  *
6489  * Return: MAC address on success, NULL on failure.
6490  *
6491  */
6492 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_soc_t *soc_hdl,
6493 					 uint8_t vdev_id)
6494 {
6495 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6496 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
6497 
6498 	if (!vdev)
6499 		return NULL;
6500 
6501 	return vdev->mac_addr.raw;
6502 }
6503 
6504 /*
6505  * dp_vdev_set_wds() - Enable per packet stats
6506  * @soc: DP soc handle
6507  * @vdev_id: id of DP VDEV handle
6508  * @val: value
6509  *
6510  * Return: none
6511  */
6512 static int dp_vdev_set_wds(struct cdp_soc_t *soc, uint8_t vdev_id, uint32_t val)
6513 {
6514 	struct dp_vdev *vdev =
6515 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
6516 						   vdev_id);
6517 
6518 	if (!vdev)
6519 		return QDF_STATUS_E_FAILURE;
6520 
6521 	vdev->wds_enabled = val;
6522 	return QDF_STATUS_SUCCESS;
6523 }
6524 
6525 /*
6526  * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev id of monitor mode
6527  * @soc_hdl: datapath soc handle
6528  * @pdev_id: physical device instance id
6529  *
6530  * Return: virtual interface id
6531  */
6532 static uint8_t dp_get_mon_vdev_from_pdev_wifi3(struct cdp_soc_t *soc_hdl,
6533 					       uint8_t pdev_id)
6534 {
6535 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6536 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
6537 
6538 	if (qdf_unlikely(!pdev))
6539 		return -EINVAL;
6540 
6541 	return pdev->monitor_vdev->vdev_id;
6542 }
6543 
6544 static int dp_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
6545 {
6546 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6547 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
6548 
6549 	if (!vdev) {
6550 		dp_err("vdev for id %d is NULL", vdev_id);
6551 		return -EINVAL;
6552 	}
6553 
6554 	return vdev->opmode;
6555 }
6556 
6557 /**
6558  * dp_get_os_rx_handles_from_vdev_wifi3() - Get os rx handles for a vdev
6559  * @soc_hdl: ol_txrx_soc_handle handle
6560  * @vdev_id: vdev id for which os rx handles are needed
6561  * @stack_fn_p: pointer to stack function pointer
6562  * @osif_handle_p: pointer to ol_osif_vdev_handle
6563  *
6564  * Return: void
6565  */
6566 static
6567 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_soc_t *soc_hdl,
6568 					  uint8_t vdev_id,
6569 					  ol_txrx_rx_fp *stack_fn_p,
6570 					  ol_osif_vdev_handle *osif_vdev_p)
6571 {
6572 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6573 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
6574 
6575 	if (!vdev)
6576 		return;
6577 
6578 	*stack_fn_p = vdev->osif_rx_stack;
6579 	*osif_vdev_p = vdev->osif_vdev;
6580 }
6581 
6582 /**
6583  * dp_get_ctrl_pdev_from_vdev() - Get control pdev of vdev
6584  * @soc_hdl: datapath soc handle
6585  * @vdev_id: virtual device/interface id
6586  *
6587  * Return: Handle to control pdev
6588  */
6589 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(
6590 						struct cdp_soc_t *soc_hdl,
6591 						uint8_t vdev_id)
6592 {
6593 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
6594 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
6595 	struct dp_pdev *pdev;
6596 
6597 	if (!vdev || !vdev->pdev)
6598 		return NULL;
6599 
6600 	pdev = vdev->pdev;
6601 	return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
6602 }
6603 
6604 /**
6605  * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
6606  *                                 ring based on target
6607  * @soc: soc handle
6608  * @mac_for_pdev: WIN- pdev_id, MCL- mac id
6609  * @pdev: physical device handle
6610  * @ring_num: mac id
6611  * @htt_tlv_filter: tlv filter
6612  *
6613  * Return: zero on success, non-zero on failure
6614  */
6615 static inline
6616 QDF_STATUS dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
6617 				       struct dp_pdev *pdev, uint8_t ring_num,
6618 				       struct htt_rx_ring_tlv_filter htt_tlv_filter)
6619 {
6620 	QDF_STATUS status;
6621 
6622 	if (soc->wlan_cfg_ctx->rxdma1_enable)
6623 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6624 					     soc->rxdma_mon_buf_ring[ring_num]
6625 					     .hal_srng,
6626 					     RXDMA_MONITOR_BUF,
6627 					     RX_MONITOR_BUFFER_SIZE,
6628 					     &htt_tlv_filter);
6629 	else
6630 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6631 					     pdev->rx_mac_buf_ring[ring_num]
6632 					     .hal_srng,
6633 					     RXDMA_BUF, RX_DATA_BUFFER_SIZE,
6634 					     &htt_tlv_filter);
6635 
6636 	return status;
6637 }
6638 
6639 static inline void
6640 dp_pdev_disable_mcopy_code(struct dp_pdev *pdev)
6641 {
6642 	pdev->mcopy_mode = 0;
6643 	pdev->monitor_configured = false;
6644 	pdev->monitor_vdev = NULL;
6645 	qdf_nbuf_queue_free(&pdev->rx_ppdu_buf_q);
6646 }
6647 
6648 /**
6649  * dp_reset_monitor_mode() - Disable monitor mode
6650  * @soc_hdl: Datapath soc handle
6651  * @pdev_id: id of datapath PDEV handle
6652  *
6653  * Return: QDF_STATUS
6654  */
6655 QDF_STATUS dp_reset_monitor_mode(struct cdp_soc_t *soc_hdl,
6656 				 uint8_t pdev_id,
6657 				 uint8_t special_monitor)
6658 {
6659 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6660 	struct dp_pdev *pdev =
6661 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
6662 						   pdev_id);
6663 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6664 
6665 	if (!pdev)
6666 		return QDF_STATUS_E_FAILURE;
6667 
6668 	qdf_spin_lock_bh(&pdev->mon_lock);
6669 
6670 	pdev->monitor_vdev = NULL;
6671 	pdev->monitor_configured = false;
6672 
6673 	/*
6674 	 * Lite monitor mode, smart monitor mode and monitor
6675 	 * mode uses this APIs to filter reset and mode disable
6676 	 */
6677 	if (pdev->mcopy_mode) {
6678 #if defined(FEATURE_PERPKT_INFO)
6679 		dp_pdev_disable_mcopy_code(pdev);
6680 		dp_mon_filter_reset_mcopy_mode(pdev);
6681 #endif /* FEATURE_PERPKT_INFO */
6682 	} else if (special_monitor) {
6683 #if defined(ATH_SUPPORT_NAC)
6684 		dp_mon_filter_reset_smart_monitor(pdev);
6685 #endif /* ATH_SUPPORT_NAC */
6686 	} else {
6687 		dp_mon_filter_reset_mon_mode(pdev);
6688 	}
6689 
6690 	status = dp_mon_filter_update(pdev);
6691 	if (status != QDF_STATUS_SUCCESS) {
6692 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6693 			  FL("Failed to reset monitor filters"));
6694 	}
6695 
6696 	qdf_spin_unlock_bh(&pdev->mon_lock);
6697 	return QDF_STATUS_SUCCESS;
6698 }
6699 
6700 /**
6701  * dp_get_tx_pending() - read pending tx
6702  * @pdev_handle: Datapath PDEV handle
6703  *
6704  * Return: outstanding tx
6705  */
6706 static uint32_t dp_get_tx_pending(struct cdp_pdev *pdev_handle)
6707 {
6708 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6709 
6710 	return qdf_atomic_read(&pdev->num_tx_outstanding);
6711 }
6712 
6713 /**
6714  * dp_get_peer_mac_from_peer_id() - get peer mac
6715  * @pdev_handle: Datapath PDEV handle
6716  * @peer_id: Peer ID
6717  * @peer_mac: MAC addr of PEER
6718  *
6719  * Return: QDF_STATUS
6720  */
6721 static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc,
6722 					       uint32_t peer_id,
6723 					       uint8_t *peer_mac)
6724 {
6725 	struct dp_peer *peer;
6726 
6727 	if (soc && peer_mac) {
6728 		peer = dp_peer_find_by_id((struct dp_soc *)soc,
6729 					  (uint16_t)peer_id);
6730 		if (peer) {
6731 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
6732 				     QDF_MAC_ADDR_SIZE);
6733 			dp_peer_unref_del_find_by_id(peer);
6734 			return QDF_STATUS_SUCCESS;
6735 		}
6736 	}
6737 
6738 	return QDF_STATUS_E_FAILURE;
6739 }
6740 
6741 /**
6742  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
6743  * @vdev_handle: Datapath VDEV handle
6744  * @smart_monitor: Flag to denote if its smart monitor mode
6745  *
6746  * Return: 0 on success, not 0 on failure
6747  */
6748 static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *soc,
6749 					   uint8_t vdev_id,
6750 					   uint8_t special_monitor)
6751 {
6752 	struct dp_pdev *pdev;
6753 	struct dp_vdev *vdev =
6754 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
6755 						   vdev_id);
6756 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6757 
6758 	if (!vdev)
6759 		return QDF_STATUS_E_FAILURE;
6760 
6761 	pdev = vdev->pdev;
6762 	pdev->monitor_vdev = vdev;
6763 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
6764 		  "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
6765 		  pdev, pdev->pdev_id, pdev->soc, vdev);
6766 
6767 	/*
6768 	 * do not configure monitor buf ring and filter for smart and
6769 	 * lite monitor
6770 	 * for smart monitor filters are added along with first NAC
6771 	 * for lite monitor required configuration done through
6772 	 * dp_set_pdev_param
6773 	 */
6774 	if (special_monitor)
6775 		return QDF_STATUS_SUCCESS;
6776 
6777 	/*Check if current pdev's monitor_vdev exists */
6778 	if (pdev->monitor_configured) {
6779 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
6780 			  "monitor vap already created vdev=%pK\n", vdev);
6781 		return QDF_STATUS_E_RESOURCES;
6782 	}
6783 
6784 	pdev->monitor_configured = true;
6785 	dp_mon_buf_delayed_replenish(pdev);
6786 
6787 	dp_mon_filter_setup_mon_mode(pdev);
6788 	status = dp_mon_filter_update(pdev);
6789 	if (status != QDF_STATUS_SUCCESS) {
6790 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6791 			  FL("Failed to reset monitor filters"));
6792 		dp_mon_filter_reset_mon_mode(pdev);
6793 		pdev->monitor_configured = false;
6794 		pdev->monitor_vdev = NULL;
6795 	}
6796 
6797 	return status;
6798 }
6799 
6800 /**
6801  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
6802  * @soc: soc handle
6803  * @pdev_id: id of Datapath PDEV handle
6804  * @filter_val: Flag to select Filter for monitor mode
6805  * Return: 0 on success, not 0 on failure
6806  */
6807 static QDF_STATUS
6808 dp_pdev_set_advance_monitor_filter(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
6809 				   struct cdp_monitor_filter *filter_val)
6810 {
6811 	/* Many monitor VAPs can exists in a system but only one can be up at
6812 	 * anytime
6813 	 */
6814 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
6815 	struct dp_vdev *vdev;
6816 	struct dp_pdev *pdev =
6817 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
6818 						   pdev_id);
6819 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6820 
6821 	if (!pdev)
6822 		return QDF_STATUS_E_FAILURE;
6823 
6824 	vdev = pdev->monitor_vdev;
6825 
6826 	if (!vdev)
6827 		return QDF_STATUS_E_FAILURE;
6828 
6829 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
6830 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
6831 		pdev, pdev_id, soc, vdev);
6832 
6833 	/*Check if current pdev's monitor_vdev exists */
6834 	if (!pdev->monitor_vdev) {
6835 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6836 			"vdev=%pK", vdev);
6837 		qdf_assert(vdev);
6838 	}
6839 
6840 	/* update filter mode, type in pdev structure */
6841 	pdev->mon_filter_mode = filter_val->mode;
6842 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
6843 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
6844 	pdev->fp_data_filter = filter_val->fp_data;
6845 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
6846 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
6847 	pdev->mo_data_filter = filter_val->mo_data;
6848 
6849 	dp_mon_filter_setup_mon_mode(pdev);
6850 	status = dp_mon_filter_update(pdev);
6851 	if (status != QDF_STATUS_SUCCESS) {
6852 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6853 			  FL("Failed to set filter for advance mon mode"));
6854 		dp_mon_filter_reset_mon_mode(pdev);
6855 	}
6856 
6857 	return status;
6858 }
6859 
6860 /**
6861  * dp_deliver_tx_mgmt() - Deliver mgmt frame for tx capture
6862  * @cdp_soc : data path soc handle
6863  * @pdev_id : pdev_id
6864  * @nbuf: Management frame buffer
6865  */
6866 static QDF_STATUS
6867 dp_deliver_tx_mgmt(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf)
6868 {
6869 	struct dp_pdev *pdev =
6870 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
6871 						   pdev_id);
6872 
6873 	if (!pdev)
6874 		return QDF_STATUS_E_FAILURE;
6875 
6876 	dp_deliver_mgmt_frm(pdev, nbuf);
6877 
6878 	return QDF_STATUS_SUCCESS;
6879 }
6880 
6881 /**
6882  * dp_set_bsscolor() - sets bsscolor for tx capture
6883  * @pdev: Datapath PDEV handle
6884  * @bsscolor: new bsscolor
6885  */
6886 static void
6887 dp_mon_set_bsscolor(struct dp_pdev *pdev, uint8_t bsscolor)
6888 {
6889 	pdev->rx_mon_recv_status.bsscolor = bsscolor;
6890 }
6891 
6892 /**
6893  * dp_pdev_get_filter_ucast_data() - get DP PDEV monitor ucast filter
6894  * @soc : data path soc handle
6895  * @pdev_id : pdev_id
6896  * Return: true on ucast filter flag set
6897  */
6898 static bool dp_pdev_get_filter_ucast_data(struct cdp_pdev *pdev_handle)
6899 {
6900 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6901 
6902 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
6903 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
6904 		return true;
6905 
6906 	return false;
6907 }
6908 
6909 /**
6910  * dp_pdev_get_filter_mcast_data() - get DP PDEV monitor mcast filter
6911  * @pdev_handle: Datapath PDEV handle
6912  * Return: true on mcast filter flag set
6913  */
6914 static bool dp_pdev_get_filter_mcast_data(struct cdp_pdev *pdev_handle)
6915 {
6916 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6917 
6918 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
6919 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
6920 		return true;
6921 
6922 	return false;
6923 }
6924 
6925 /**
6926  * dp_pdev_get_filter_non_data() - get DP PDEV monitor non_data filter
6927  * @pdev_handle: Datapath PDEV handle
6928  * Return: true on non data filter flag set
6929  */
6930 static bool dp_pdev_get_filter_non_data(struct cdp_pdev *pdev_handle)
6931 {
6932 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6933 
6934 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
6935 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
6936 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
6937 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
6938 			return true;
6939 		}
6940 	}
6941 
6942 	return false;
6943 }
6944 
6945 #ifdef MESH_MODE_SUPPORT
6946 void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
6947 {
6948 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
6949 
6950 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6951 		FL("val %d"), val);
6952 	vdev->mesh_vdev = val;
6953 }
6954 
6955 /*
6956  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
6957  * @vdev_hdl: virtual device object
6958  * @val: value to be set
6959  *
6960  * Return: void
6961  */
6962 void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
6963 {
6964 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
6965 
6966 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6967 		FL("val %d"), val);
6968 	vdev->mesh_rx_filter = val;
6969 }
6970 #endif
6971 
6972 #ifdef VDEV_PEER_PROTOCOL_COUNT
6973 static void dp_enable_vdev_peer_protocol_count(struct cdp_soc_t *soc,
6974 					       int8_t vdev_id,
6975 					       bool enable)
6976 {
6977 	struct dp_vdev *vdev;
6978 
6979 	vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
6980 						  vdev_id);
6981 	dp_info("enable %d vdev_id %d", enable, vdev_id);
6982 	vdev->peer_protocol_count_track = enable;
6983 }
6984 
6985 static void dp_enable_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc,
6986 						   int8_t vdev_id,
6987 						   int drop_mask)
6988 {
6989 	struct dp_vdev *vdev;
6990 
6991 	vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
6992 						  vdev_id);
6993 	dp_info("drop_mask %d vdev_id %d", drop_mask, vdev_id);
6994 	vdev->peer_protocol_count_dropmask = drop_mask;
6995 }
6996 
6997 static int dp_is_vdev_peer_protocol_count_enabled(struct cdp_soc_t *soc,
6998 						  int8_t vdev_id)
6999 {
7000 	struct dp_vdev *vdev;
7001 
7002 	vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
7003 						  vdev_id);
7004 	dp_info("enable %d vdev_id %d", vdev->peer_protocol_count_track,
7005 		vdev_id);
7006 	return vdev->peer_protocol_count_track;
7007 }
7008 
7009 static int dp_get_vdev_peer_protocol_drop_mask(struct cdp_soc_t *soc,
7010 					       int8_t vdev_id)
7011 {
7012 	struct dp_vdev *vdev;
7013 
7014 	vdev = dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
7015 						  vdev_id);
7016 	dp_info("drop_mask %d vdev_id %d", vdev->peer_protocol_count_dropmask,
7017 		vdev_id);
7018 	return vdev->peer_protocol_count_dropmask;
7019 }
7020 
7021 #endif
7022 
7023 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data)
7024 {
7025 	uint8_t pdev_count;
7026 
7027 	for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) {
7028 		if (soc->pdev_list[pdev_count] &&
7029 		    soc->pdev_list[pdev_count] == data)
7030 			return true;
7031 	}
7032 	return false;
7033 }
7034 
7035 /**
7036  * dp_rx_bar_stats_cb(): BAR received stats callback
7037  * @soc: SOC handle
7038  * @cb_ctxt: Call back context
7039  * @reo_status: Reo status
7040  *
7041  * return: void
7042  */
7043 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
7044 	union hal_reo_status *reo_status)
7045 {
7046 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
7047 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
7048 
7049 	if (!dp_check_pdev_exists(soc, pdev)) {
7050 		dp_err_rl("pdev doesn't exist");
7051 		return;
7052 	}
7053 
7054 	if (!qdf_atomic_read(&soc->cmn_init_done))
7055 		return;
7056 
7057 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
7058 		DP_PRINT_STATS("REO stats failure %d",
7059 			       queue_status->header.status);
7060 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
7061 		return;
7062 	}
7063 
7064 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
7065 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
7066 
7067 }
7068 
7069 /**
7070  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
7071  * @vdev: DP VDEV handle
7072  *
7073  * return: void
7074  */
7075 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
7076 			     struct cdp_vdev_stats *vdev_stats)
7077 {
7078 	struct dp_peer *peer = NULL;
7079 	struct dp_soc *soc = NULL;
7080 
7081 	if (!vdev || !vdev->pdev)
7082 		return;
7083 
7084 	soc = vdev->pdev->soc;
7085 
7086 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
7087 
7088 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
7089 		dp_update_vdev_stats(vdev_stats, peer);
7090 
7091 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7092 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
7093 			     vdev_stats, vdev->vdev_id,
7094 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
7095 #endif
7096 }
7097 
7098 void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
7099 {
7100 	struct dp_vdev *vdev = NULL;
7101 	struct dp_soc *soc;
7102 	struct cdp_vdev_stats *vdev_stats =
7103 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
7104 
7105 	if (!vdev_stats) {
7106 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7107 			  "DP alloc failure - unable to get alloc vdev stats");
7108 		return;
7109 	}
7110 
7111 	qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
7112 	qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
7113 	qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
7114 
7115 	if (pdev->mcopy_mode)
7116 		DP_UPDATE_STATS(pdev, pdev->invalid_peer);
7117 
7118 	soc = pdev->soc;
7119 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
7120 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
7121 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
7122 
7123 		dp_aggregate_vdev_stats(vdev, vdev_stats);
7124 		dp_update_pdev_stats(pdev, vdev_stats);
7125 		dp_update_pdev_ingress_stats(pdev, vdev);
7126 	}
7127 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
7128 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
7129 	qdf_mem_free(vdev_stats);
7130 
7131 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7132 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
7133 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
7134 #endif
7135 }
7136 
7137 /**
7138  * dp_vdev_getstats() - get vdev packet level stats
7139  * @vdev_handle: Datapath VDEV handle
7140  * @stats: cdp network device stats structure
7141  *
7142  * Return: QDF_STATUS
7143  */
7144 static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle,
7145 				   struct cdp_dev_stats *stats)
7146 {
7147 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7148 	struct dp_pdev *pdev;
7149 	struct dp_soc *soc;
7150 	struct cdp_vdev_stats *vdev_stats;
7151 
7152 	if (!vdev)
7153 		return QDF_STATUS_E_FAILURE;
7154 
7155 	pdev = vdev->pdev;
7156 	if (!pdev)
7157 		return QDF_STATUS_E_FAILURE;
7158 
7159 	soc = pdev->soc;
7160 
7161 	vdev_stats = qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
7162 
7163 	if (!vdev_stats) {
7164 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7165 			  "DP alloc failure - unable to get alloc vdev stats");
7166 		return QDF_STATUS_E_FAILURE;
7167 	}
7168 
7169 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
7170 	dp_aggregate_vdev_stats(vdev, vdev_stats);
7171 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
7172 
7173 	stats->tx_packets = vdev_stats->tx_i.rcvd.num;
7174 	stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
7175 
7176 	stats->tx_errors = vdev_stats->tx.tx_failed +
7177 		vdev_stats->tx_i.dropped.dropped_pkt.num;
7178 	stats->tx_dropped = stats->tx_errors;
7179 
7180 	stats->rx_packets = vdev_stats->rx.unicast.num +
7181 		vdev_stats->rx.multicast.num +
7182 		vdev_stats->rx.bcast.num;
7183 	stats->rx_bytes = vdev_stats->rx.unicast.bytes +
7184 		vdev_stats->rx.multicast.bytes +
7185 		vdev_stats->rx.bcast.bytes;
7186 
7187 	qdf_mem_free(vdev_stats);
7188 
7189 	return QDF_STATUS_SUCCESS;
7190 }
7191 
7192 
7193 /**
7194  * dp_pdev_getstats() - get pdev packet level stats
7195  * @pdev_handle: Datapath PDEV handle
7196  * @stats: cdp network device stats structure
7197  *
7198  * Return: QDF_STATUS
7199  */
7200 static void dp_pdev_getstats(struct cdp_pdev *pdev_handle,
7201 			     struct cdp_dev_stats *stats)
7202 {
7203 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7204 
7205 	dp_aggregate_pdev_stats(pdev);
7206 
7207 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
7208 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
7209 
7210 	stats->tx_errors = pdev->stats.tx.tx_failed +
7211 		pdev->stats.tx_i.dropped.dropped_pkt.num;
7212 	stats->tx_dropped = stats->tx_errors;
7213 
7214 	stats->rx_packets = pdev->stats.rx.unicast.num +
7215 		pdev->stats.rx.multicast.num +
7216 		pdev->stats.rx.bcast.num;
7217 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
7218 		pdev->stats.rx.multicast.bytes +
7219 		pdev->stats.rx.bcast.bytes;
7220 	stats->rx_errors = pdev->stats.err.desc_alloc_fail +
7221 		pdev->stats.err.ip_csum_err +
7222 		pdev->stats.err.tcp_udp_csum_err +
7223 		pdev->stats.rx.err.mic_err +
7224 		pdev->stats.rx.err.decrypt_err +
7225 		pdev->stats.err.rxdma_error +
7226 		pdev->stats.err.reo_error;
7227 	stats->rx_dropped = pdev->stats.dropped.msdu_not_done +
7228 		pdev->stats.dropped.mec +
7229 		pdev->stats.dropped.mesh_filter +
7230 		pdev->stats.dropped.wifi_parse +
7231 		pdev->stats.dropped.mon_rx_drop +
7232 		pdev->stats.dropped.mon_radiotap_update_err;
7233 }
7234 
7235 /**
7236  * dp_get_device_stats() - get interface level packet stats
7237  * @soc: soc handle
7238  * @id : vdev_id or pdev_id based on type
7239  * @stats: cdp network device stats structure
7240  * @type: device type pdev/vdev
7241  *
7242  * Return: QDF_STATUS
7243  */
7244 static QDF_STATUS dp_get_device_stats(struct cdp_soc_t *soc, uint8_t id,
7245 				      struct cdp_dev_stats *stats,
7246 				      uint8_t type)
7247 {
7248 	switch (type) {
7249 	case UPDATE_VDEV_STATS:
7250 		return dp_vdev_getstats(
7251 			(struct cdp_vdev *)dp_get_vdev_from_soc_vdev_id_wifi3(
7252 			 (struct dp_soc *)soc, id), stats);
7253 	case UPDATE_PDEV_STATS:
7254 		{
7255 			struct dp_pdev *pdev =
7256 				dp_get_pdev_from_soc_pdev_id_wifi3(
7257 						(struct dp_soc *)soc,
7258 						 id);
7259 			if (pdev) {
7260 				dp_pdev_getstats((struct cdp_pdev *)pdev,
7261 						 stats);
7262 				return QDF_STATUS_SUCCESS;
7263 			}
7264 		}
7265 		break;
7266 	default:
7267 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7268 			"apstats cannot be updated for this input "
7269 			"type %d", type);
7270 		break;
7271 	}
7272 
7273 	return QDF_STATUS_E_FAILURE;
7274 }
7275 
7276 const
7277 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
7278 {
7279 	switch (ring_type) {
7280 	case REO_DST:
7281 		return "Reo_dst";
7282 	case REO_EXCEPTION:
7283 		return "Reo_exception";
7284 	case REO_CMD:
7285 		return "Reo_cmd";
7286 	case REO_REINJECT:
7287 		return "Reo_reinject";
7288 	case REO_STATUS:
7289 		return "Reo_status";
7290 	case WBM2SW_RELEASE:
7291 		return "wbm2sw_release";
7292 	case TCL_DATA:
7293 		return "tcl_data";
7294 	case TCL_CMD:
7295 		return "tcl_cmd";
7296 	case TCL_STATUS:
7297 		return "tcl_status";
7298 	case SW2WBM_RELEASE:
7299 		return "sw2wbm_release";
7300 	case RXDMA_BUF:
7301 		return "Rxdma_buf";
7302 	case RXDMA_DST:
7303 		return "Rxdma_dst";
7304 	case RXDMA_MONITOR_BUF:
7305 		return "Rxdma_monitor_buf";
7306 	case RXDMA_MONITOR_DESC:
7307 		return "Rxdma_monitor_desc";
7308 	case RXDMA_MONITOR_STATUS:
7309 		return "Rxdma_monitor_status";
7310 	default:
7311 		dp_err("Invalid ring type");
7312 		break;
7313 	}
7314 	return "Invalid";
7315 }
7316 
7317 /*
7318  * dp_print_napi_stats(): NAPI stats
7319  * @soc - soc handle
7320  */
7321 void dp_print_napi_stats(struct dp_soc *soc)
7322 {
7323 	hif_print_napi_stats(soc->hif_handle);
7324 }
7325 
7326 /**
7327  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
7328  * @vdev: DP_VDEV handle
7329  *
7330  * Return: QDF_STATUS
7331  */
7332 static inline QDF_STATUS
7333 dp_txrx_host_stats_clr(struct dp_vdev *vdev)
7334 {
7335 	struct dp_peer *peer = NULL;
7336 
7337 	if (!vdev || !vdev->pdev)
7338 		return QDF_STATUS_E_FAILURE;
7339 
7340 	DP_STATS_CLR(vdev->pdev);
7341 	DP_STATS_CLR(vdev->pdev->soc);
7342 	DP_STATS_CLR(vdev);
7343 
7344 	hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
7345 
7346 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
7347 		if (!peer)
7348 			return QDF_STATUS_E_FAILURE;
7349 		DP_STATS_CLR(peer);
7350 
7351 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7352 		dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
7353 				     &peer->stats,  peer->peer_ids[0],
7354 				     UPDATE_PEER_STATS, vdev->pdev->pdev_id);
7355 #endif
7356 	}
7357 
7358 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7359 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
7360 			     &vdev->stats,  vdev->vdev_id,
7361 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
7362 #endif
7363 	return QDF_STATUS_SUCCESS;
7364 }
7365 
7366 /*
7367  * dp_get_host_peer_stats()- function to print peer stats
7368  * @soc: dp_soc handle
7369  * @mac_addr: mac address of the peer
7370  *
7371  * Return: QDF_STATUS
7372  */
7373 static QDF_STATUS
7374 dp_get_host_peer_stats(struct cdp_soc_t *soc, uint8_t *mac_addr)
7375 {
7376 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7377 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
7378 						      mac_addr, 0,
7379 						      DP_VDEV_ALL);
7380 	if (!peer || peer->delete_in_progress) {
7381 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7382 			  "%s: Invalid peer\n", __func__);
7383 		status = QDF_STATUS_E_FAILURE;
7384 		goto fail;
7385 	}
7386 
7387 	dp_print_peer_stats(peer);
7388 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
7389 fail:
7390 	if (peer)
7391 		dp_peer_unref_delete(peer);
7392 
7393 	return status;
7394 }
7395 
7396 /**
7397  * dp_txrx_stats_help() - Helper function for Txrx_Stats
7398  *
7399  * Return: None
7400  */
7401 static void dp_txrx_stats_help(void)
7402 {
7403 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
7404 	dp_info("stats_option:");
7405 	dp_info("  1 -- HTT Tx Statistics");
7406 	dp_info("  2 -- HTT Rx Statistics");
7407 	dp_info("  3 -- HTT Tx HW Queue Statistics");
7408 	dp_info("  4 -- HTT Tx HW Sched Statistics");
7409 	dp_info("  5 -- HTT Error Statistics");
7410 	dp_info("  6 -- HTT TQM Statistics");
7411 	dp_info("  7 -- HTT TQM CMDQ Statistics");
7412 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
7413 	dp_info("  9 -- HTT Tx Rate Statistics");
7414 	dp_info(" 10 -- HTT Rx Rate Statistics");
7415 	dp_info(" 11 -- HTT Peer Statistics");
7416 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
7417 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
7418 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
7419 	dp_info(" 15 -- HTT SRNG Statistics");
7420 	dp_info(" 16 -- HTT SFM Info Statistics");
7421 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
7422 	dp_info(" 18 -- HTT Peer List Details");
7423 	dp_info(" 20 -- Clear Host Statistics");
7424 	dp_info(" 21 -- Host Rx Rate Statistics");
7425 	dp_info(" 22 -- Host Tx Rate Statistics");
7426 	dp_info(" 23 -- Host Tx Statistics");
7427 	dp_info(" 24 -- Host Rx Statistics");
7428 	dp_info(" 25 -- Host AST Statistics");
7429 	dp_info(" 26 -- Host SRNG PTR Statistics");
7430 	dp_info(" 27 -- Host Mon Statistics");
7431 	dp_info(" 28 -- Host REO Queue Statistics");
7432 	dp_info(" 29 -- Host Soc cfg param Statistics");
7433 	dp_info(" 30 -- Host pdev cfg param Statistics");
7434 }
7435 
7436 /**
7437  * dp_print_host_stats()- Function to print the stats aggregated at host
7438  * @vdev_handle: DP_VDEV handle
7439  * @type: host stats type
7440  *
7441  * Return: 0 on success, print error message in case of failure
7442  */
7443 static int
7444 dp_print_host_stats(struct dp_vdev *vdev,
7445 		    struct cdp_txrx_stats_req *req)
7446 {
7447 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
7448 	enum cdp_host_txrx_stats type =
7449 			dp_stats_mapping_table[req->stats][STATS_HOST];
7450 
7451 	dp_aggregate_pdev_stats(pdev);
7452 
7453 	switch (type) {
7454 	case TXRX_CLEAR_STATS:
7455 		dp_txrx_host_stats_clr(vdev);
7456 		break;
7457 	case TXRX_RX_RATE_STATS:
7458 		dp_print_rx_rates(vdev);
7459 		break;
7460 	case TXRX_TX_RATE_STATS:
7461 		dp_print_tx_rates(vdev);
7462 		break;
7463 	case TXRX_TX_HOST_STATS:
7464 		dp_print_pdev_tx_stats(pdev);
7465 		dp_print_soc_tx_stats(pdev->soc);
7466 		break;
7467 	case TXRX_RX_HOST_STATS:
7468 		dp_print_pdev_rx_stats(pdev);
7469 		dp_print_soc_rx_stats(pdev->soc);
7470 		break;
7471 	case TXRX_AST_STATS:
7472 		dp_print_ast_stats(pdev->soc);
7473 		dp_print_peer_table(vdev);
7474 		break;
7475 	case TXRX_SRNG_PTR_STATS:
7476 		dp_print_ring_stats(pdev);
7477 		break;
7478 	case TXRX_RX_MON_STATS:
7479 		dp_print_pdev_rx_mon_stats(pdev);
7480 		break;
7481 	case TXRX_REO_QUEUE_STATS:
7482 		dp_get_host_peer_stats((struct cdp_soc_t *)pdev->soc,
7483 				       req->peer_addr);
7484 		break;
7485 	case TXRX_SOC_CFG_PARAMS:
7486 		dp_print_soc_cfg_params(pdev->soc);
7487 		break;
7488 	case TXRX_PDEV_CFG_PARAMS:
7489 		dp_print_pdev_cfg_params(pdev);
7490 		break;
7491 	case TXRX_NAPI_STATS:
7492 		dp_print_napi_stats(pdev->soc);
7493 	case TXRX_SOC_INTERRUPT_STATS:
7494 		dp_print_soc_interrupt_stats(pdev->soc);
7495 		break;
7496 	default:
7497 		dp_info("Wrong Input For TxRx Host Stats");
7498 		dp_txrx_stats_help();
7499 		break;
7500 	}
7501 	return 0;
7502 }
7503 
7504 /*
7505  * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
7506  *                              modes are enabled or not.
7507  * @dp_pdev: dp pdev handle.
7508  *
7509  * Return: bool
7510  */
7511 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
7512 {
7513 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
7514 	    !pdev->mcopy_mode)
7515 		return true;
7516 	else
7517 		return false;
7518 }
7519 
7520 /*
7521  *dp_set_bpr_enable() - API to enable/disable bpr feature
7522  *@pdev_handle: DP_PDEV handle.
7523  *@val: Provided value.
7524  *
7525  *Return: 0 for success. nonzero for failure.
7526  */
7527 static QDF_STATUS
7528 dp_set_bpr_enable(struct dp_pdev *pdev, int val)
7529 {
7530 	switch (val) {
7531 	case CDP_BPR_DISABLE:
7532 		pdev->bpr_enable = CDP_BPR_DISABLE;
7533 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7534 		    !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
7535 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7536 		} else if (pdev->enhanced_stats_en &&
7537 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
7538 			   !pdev->pktlog_ppdu_stats) {
7539 			dp_h2t_cfg_stats_msg_send(pdev,
7540 						  DP_PPDU_STATS_CFG_ENH_STATS,
7541 						  pdev->pdev_id);
7542 		}
7543 		break;
7544 	case CDP_BPR_ENABLE:
7545 		pdev->bpr_enable = CDP_BPR_ENABLE;
7546 		if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
7547 		    !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
7548 			dp_h2t_cfg_stats_msg_send(pdev,
7549 						  DP_PPDU_STATS_CFG_BPR,
7550 						  pdev->pdev_id);
7551 		} else if (pdev->enhanced_stats_en &&
7552 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
7553 			   !pdev->pktlog_ppdu_stats) {
7554 			dp_h2t_cfg_stats_msg_send(pdev,
7555 						  DP_PPDU_STATS_CFG_BPR_ENH,
7556 						  pdev->pdev_id);
7557 		} else if (pdev->pktlog_ppdu_stats) {
7558 			dp_h2t_cfg_stats_msg_send(pdev,
7559 						  DP_PPDU_STATS_CFG_BPR_PKTLOG,
7560 						  pdev->pdev_id);
7561 		}
7562 		break;
7563 	default:
7564 		break;
7565 	}
7566 
7567 	return QDF_STATUS_SUCCESS;
7568 }
7569 
7570 /*
7571  * dp_pdev_tid_stats_ingress_inc
7572  * @pdev: pdev handle
7573  * @val: increase in value
7574  *
7575  * Return: void
7576  */
7577 static void
7578 dp_pdev_tid_stats_ingress_inc(struct dp_pdev *pdev, uint32_t val)
7579 {
7580 	pdev->stats.tid_stats.ingress_stack += val;
7581 }
7582 
7583 /*
7584  * dp_pdev_tid_stats_osif_drop
7585  * @pdev: pdev handle
7586  * @val: increase in value
7587  *
7588  * Return: void
7589  */
7590 static void
7591 dp_pdev_tid_stats_osif_drop(struct dp_pdev *pdev, uint32_t val)
7592 {
7593 	pdev->stats.tid_stats.osif_drop += val;
7594 }
7595 
7596 
7597 /*
7598  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
7599  * @pdev: DP_PDEV handle
7600  * @val: user provided value
7601  *
7602  * Return: 0 for success. nonzero for failure.
7603  */
7604 static QDF_STATUS
7605 dp_config_debug_sniffer(struct dp_pdev *pdev, int val)
7606 {
7607 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7608 
7609 	/*
7610 	 * Note: The mirror copy mode cannot co-exist with any other
7611 	 * monitor modes. Hence disabling the filter for this mode will
7612 	 * reset the monitor destination ring filters.
7613 	 */
7614 	if (pdev->mcopy_mode) {
7615 #ifdef FEATURE_PERPKT_INFO
7616 		dp_pdev_disable_mcopy_code(pdev);
7617 		dp_mon_filter_reset_mcopy_mode(pdev);
7618 		status = dp_mon_filter_update(pdev);
7619 		if (status != QDF_STATUS_SUCCESS) {
7620 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7621 				  FL("Failed to reset AM copy mode filters"));
7622 		}
7623 #endif /* FEATURE_PERPKT_INFO */
7624 	}
7625 
7626 	switch (val) {
7627 	case 0:
7628 		pdev->tx_sniffer_enable = 0;
7629 		pdev->monitor_configured = false;
7630 
7631 		/*
7632 		 * We don't need to reset the Rx monitor status ring  or call
7633 		 * the API dp_ppdu_ring_reset() if all debug sniffer mode is
7634 		 * disabled. The Rx monitor status ring will be disabled when
7635 		 * the last mode using the monitor status ring get disabled.
7636 		 */
7637 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7638 		    !pdev->bpr_enable) {
7639 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7640 		} else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
7641 			dp_h2t_cfg_stats_msg_send(pdev,
7642 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
7643 		} else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
7644 			dp_h2t_cfg_stats_msg_send(pdev,
7645 						  DP_PPDU_STATS_CFG_BPR_ENH,
7646 						  pdev->pdev_id);
7647 		} else {
7648 			dp_h2t_cfg_stats_msg_send(pdev,
7649 						  DP_PPDU_STATS_CFG_BPR,
7650 						  pdev->pdev_id);
7651 		}
7652 		break;
7653 
7654 	case 1:
7655 		pdev->tx_sniffer_enable = 1;
7656 		pdev->monitor_configured = false;
7657 
7658 		if (!pdev->pktlog_ppdu_stats)
7659 			dp_h2t_cfg_stats_msg_send(pdev,
7660 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
7661 		break;
7662 	case 2:
7663 		if (pdev->monitor_vdev) {
7664 			status = QDF_STATUS_E_RESOURCES;
7665 			break;
7666 		}
7667 
7668 #ifdef FEATURE_PERPKT_INFO
7669 		pdev->mcopy_mode = 1;
7670 		pdev->tx_sniffer_enable = 0;
7671 		pdev->monitor_configured = true;
7672 
7673 		/*
7674 		 * Setup the M copy mode filter.
7675 		 */
7676 		dp_mon_filter_setup_mcopy_mode(pdev);
7677 		status = dp_mon_filter_update(pdev);
7678 		if (status != QDF_STATUS_SUCCESS) {
7679 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7680 				  FL("Failed to set M_copy mode filters"));
7681 			dp_mon_filter_reset_mcopy_mode(pdev);
7682 			dp_pdev_disable_mcopy_code(pdev);
7683 			return status;
7684 		}
7685 
7686 		if (!pdev->pktlog_ppdu_stats)
7687 			dp_h2t_cfg_stats_msg_send(pdev,
7688 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
7689 #endif /* FEATURE_PERPKT_INFO */
7690 		break;
7691 
7692 	default:
7693 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7694 			"Invalid value");
7695 		break;
7696 	}
7697 	return status;
7698 }
7699 
7700 #ifdef FEATURE_PERPKT_INFO
7701 /*
7702  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
7703  * @soc_handle: DP_SOC handle
7704  * @pdev_id: id of DP_PDEV handle
7705  *
7706  * Return: QDF_STATUS
7707  */
7708 static QDF_STATUS
7709 dp_enable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
7710 {
7711 	struct dp_pdev *pdev = NULL;
7712 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7713 
7714 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7715 						  pdev_id);
7716 
7717 	if (!pdev)
7718 		return QDF_STATUS_E_FAILURE;
7719 
7720 	if (pdev->enhanced_stats_en == 0)
7721 		dp_cal_client_timer_start(pdev->cal_client_ctx);
7722 
7723 	pdev->enhanced_stats_en = 1;
7724 
7725 	dp_mon_filter_setup_enhanced_stats(pdev);
7726 	status = dp_mon_filter_update(pdev);
7727 	if (status != QDF_STATUS_SUCCESS) {
7728 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7729 			  FL("Failed to set enhanced mode filters"));
7730 		dp_mon_filter_reset_enhanced_stats(pdev);
7731 		dp_cal_client_timer_stop(pdev->cal_client_ctx);
7732 		pdev->enhanced_stats_en = 0;
7733 		return QDF_STATUS_E_FAILURE;
7734 	}
7735 
7736 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
7737 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
7738 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7739 		dp_h2t_cfg_stats_msg_send(pdev,
7740 					  DP_PPDU_STATS_CFG_BPR_ENH,
7741 					  pdev->pdev_id);
7742 	}
7743 
7744 	return QDF_STATUS_SUCCESS;
7745 }
7746 
7747 /*
7748  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
7749  *
7750  * @param soc - the soc handle
7751  * @param pdev_id - pdev_id of pdev
7752  * @return - QDF_STATUS
7753  */
7754 static QDF_STATUS
7755 dp_disable_enhanced_stats(struct cdp_soc_t *soc, uint8_t pdev_id)
7756 {
7757 	struct dp_pdev *pdev =
7758 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7759 						   pdev_id);
7760 
7761 	if (!pdev)
7762 		return QDF_STATUS_E_FAILURE;
7763 
7764 	if (pdev->enhanced_stats_en == 1)
7765 		dp_cal_client_timer_stop(pdev->cal_client_ctx);
7766 
7767 	pdev->enhanced_stats_en = 0;
7768 
7769 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
7770 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7771 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7772 		dp_h2t_cfg_stats_msg_send(pdev,
7773 					  DP_PPDU_STATS_CFG_BPR,
7774 					  pdev->pdev_id);
7775 	}
7776 
7777 	dp_mon_filter_reset_enhanced_stats(pdev);
7778 	if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
7779 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7780 			  FL("Failed to reset enhanced mode filters"));
7781 	}
7782 
7783 	return QDF_STATUS_SUCCESS;
7784 }
7785 #endif /* FEATURE_PERPKT_INFO */
7786 
7787 /*
7788  * dp_get_fw_peer_stats()- function to print peer stats
7789  * @soc: soc handle
7790  * @pdev_id : id of the pdev handle
7791  * @mac_addr: mac address of the peer
7792  * @cap: Type of htt stats requested
7793  * @is_wait: if set, wait on completion from firmware response
7794  *
7795  * Currently Supporting only MAC ID based requests Only
7796  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
7797  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
7798  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
7799  *
7800  * Return: QDF_STATUS
7801  */
7802 static QDF_STATUS
7803 dp_get_fw_peer_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
7804 		     uint8_t *mac_addr,
7805 		     uint32_t cap, uint32_t is_wait)
7806 {
7807 	int i;
7808 	uint32_t config_param0 = 0;
7809 	uint32_t config_param1 = 0;
7810 	uint32_t config_param2 = 0;
7811 	uint32_t config_param3 = 0;
7812 	struct dp_pdev *pdev =
7813 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7814 						   pdev_id);
7815 
7816 	if (!pdev)
7817 		return QDF_STATUS_E_FAILURE;
7818 
7819 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
7820 	config_param0 |= (1 << (cap + 1));
7821 
7822 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
7823 		config_param1 |= (1 << i);
7824 	}
7825 
7826 	config_param2 |= (mac_addr[0] & 0x000000ff);
7827 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
7828 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
7829 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
7830 
7831 	config_param3 |= (mac_addr[4] & 0x000000ff);
7832 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
7833 
7834 	if (is_wait) {
7835 		qdf_event_reset(&pdev->fw_peer_stats_event);
7836 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
7837 					  config_param0, config_param1,
7838 					  config_param2, config_param3,
7839 					  0, 1, 0);
7840 		qdf_wait_single_event(&pdev->fw_peer_stats_event,
7841 				      DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
7842 	} else {
7843 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
7844 					  config_param0, config_param1,
7845 					  config_param2, config_param3,
7846 					  0, 0, 0);
7847 	}
7848 
7849 	return QDF_STATUS_SUCCESS;
7850 
7851 }
7852 
7853 /* This struct definition will be removed from here
7854  * once it get added in FW headers*/
7855 struct httstats_cmd_req {
7856     uint32_t    config_param0;
7857     uint32_t    config_param1;
7858     uint32_t    config_param2;
7859     uint32_t    config_param3;
7860     int cookie;
7861     u_int8_t    stats_id;
7862 };
7863 
7864 /*
7865  * dp_get_htt_stats: function to process the httstas request
7866  * @soc: DP soc handle
7867  * @pdev_id: id of pdev handle
7868  * @data: pointer to request data
7869  * @data_len: length for request data
7870  *
7871  * return: QDF_STATUS
7872  */
7873 static QDF_STATUS
7874 dp_get_htt_stats(struct cdp_soc_t *soc, uint8_t pdev_id, void *data,
7875 		 uint32_t data_len)
7876 {
7877 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
7878 	struct dp_pdev *pdev =
7879 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
7880 						   pdev_id);
7881 
7882 	if (!pdev)
7883 		return QDF_STATUS_E_FAILURE;
7884 
7885 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
7886 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
7887 				req->config_param0, req->config_param1,
7888 				req->config_param2, req->config_param3,
7889 				req->cookie, 0, 0);
7890 
7891 	return QDF_STATUS_SUCCESS;
7892 }
7893 
7894 /**
7895  * dp_set_pdev_tidmap_prty_wifi3(): update tidmap priority in pdev
7896  * @pdev: DP_PDEV handle
7897  * @prio: tidmap priority value passed by the user
7898  *
7899  * Return: QDF_STATUS_SUCCESS on success
7900  */
7901 static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct dp_pdev *pdev,
7902 						uint8_t prio)
7903 {
7904 	struct dp_soc *soc = pdev->soc;
7905 
7906 	soc->tidmap_prty = prio;
7907 
7908 	hal_tx_set_tidmap_prty(soc->hal_soc, prio);
7909 	return QDF_STATUS_SUCCESS;
7910 }
7911 
7912 /*
7913  * dp_get_peer_param: function to get parameters in peer
7914  * @cdp_soc: DP soc handle
7915  * @vdev_id: id of vdev handle
7916  * @peer_mac: peer mac address
7917  * @param: parameter type to be set
7918  * @val : address of buffer
7919  *
7920  * Return: val
7921  */
7922 static QDF_STATUS dp_get_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
7923 				    uint8_t *peer_mac,
7924 				    enum cdp_peer_param_type param,
7925 				    cdp_config_param_type *val)
7926 {
7927 	return QDF_STATUS_SUCCESS;
7928 }
7929 
7930 /*
7931  * dp_set_peer_param: function to set parameters in peer
7932  * @cdp_soc: DP soc handle
7933  * @vdev_id: id of vdev handle
7934  * @peer_mac: peer mac address
7935  * @param: parameter type to be set
7936  * @val: value of parameter to be set
7937  *
7938  * Return: 0 for success. nonzero for failure.
7939  */
7940 static QDF_STATUS dp_set_peer_param(struct cdp_soc_t *cdp_soc,  uint8_t vdev_id,
7941 				    uint8_t *peer_mac,
7942 				    enum cdp_peer_param_type param,
7943 				    cdp_config_param_type val)
7944 {
7945 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
7946 						      peer_mac, 0, vdev_id);
7947 
7948 	if (!peer || peer->delete_in_progress)
7949 		goto fail;
7950 
7951 	switch (param) {
7952 	case CDP_CONFIG_NAWDS:
7953 		peer->nawds_enabled = val.cdp_peer_param_nawds;
7954 		break;
7955 	case CDP_CONFIG_NAC:
7956 		peer->nac = !!(val.cdp_peer_param_nac);
7957 		break;
7958 	default:
7959 		break;
7960 	}
7961 
7962 fail:
7963 	if (peer)
7964 		dp_peer_unref_delete(peer);
7965 
7966 	return QDF_STATUS_SUCCESS;
7967 }
7968 
7969 /*
7970  * dp_get_pdev_param: function to get parameters from pdev
7971  * @cdp_soc: DP soc handle
7972  * @pdev_id: id of pdev handle
7973  * @param: parameter type to be get
7974  * @value : buffer for value
7975  *
7976  * Return: status
7977  */
7978 static QDF_STATUS dp_get_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
7979 				    enum cdp_pdev_param_type param,
7980 				    cdp_config_param_type *val)
7981 {
7982 	struct cdp_pdev *pdev = (struct cdp_pdev *)
7983 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
7984 						   pdev_id);
7985 	if (!pdev)
7986 		return QDF_STATUS_E_FAILURE;
7987 
7988 	switch (param) {
7989 	case CDP_CONFIG_VOW:
7990 		val->cdp_pdev_param_cfg_vow =
7991 				((struct dp_pdev *)pdev)->delay_stats_flag;
7992 		break;
7993 	case CDP_TX_PENDING:
7994 		val->cdp_pdev_param_tx_pending = dp_get_tx_pending(pdev);
7995 		break;
7996 	case CDP_FILTER_MCAST_DATA:
7997 		val->cdp_pdev_param_fltr_mcast =
7998 					dp_pdev_get_filter_mcast_data(pdev);
7999 		break;
8000 	case CDP_FILTER_NO_DATA:
8001 		val->cdp_pdev_param_fltr_none =
8002 					dp_pdev_get_filter_non_data(pdev);
8003 		break;
8004 	case CDP_FILTER_UCAST_DATA:
8005 		val->cdp_pdev_param_fltr_ucast =
8006 					dp_pdev_get_filter_ucast_data(pdev);
8007 		break;
8008 	default:
8009 		return QDF_STATUS_E_FAILURE;
8010 	}
8011 
8012 	return QDF_STATUS_SUCCESS;
8013 }
8014 
8015 /*
8016  * dp_set_pdev_param: function to set parameters in pdev
8017  * @cdp_soc: DP soc handle
8018  * @pdev_id: id of pdev handle
8019  * @param: parameter type to be set
8020  * @val: value of parameter to be set
8021  *
8022  * Return: 0 for success. nonzero for failure.
8023  */
8024 static QDF_STATUS dp_set_pdev_param(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
8025 				    enum cdp_pdev_param_type param,
8026 				    cdp_config_param_type val)
8027 {
8028 	struct dp_pdev *pdev =
8029 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
8030 						   pdev_id);
8031 	if (!pdev)
8032 		return QDF_STATUS_E_FAILURE;
8033 
8034 	switch (param) {
8035 	case CDP_CONFIG_TX_CAPTURE:
8036 		return dp_config_debug_sniffer(pdev,
8037 					       val.cdp_pdev_param_tx_capture);
8038 	case CDP_CONFIG_DEBUG_SNIFFER:
8039 		return dp_config_debug_sniffer(pdev,
8040 					       val.cdp_pdev_param_dbg_snf);
8041 	case CDP_CONFIG_BPR_ENABLE:
8042 		return dp_set_bpr_enable(pdev, val.cdp_pdev_param_bpr_enable);
8043 	case CDP_CONFIG_PRIMARY_RADIO:
8044 		pdev->is_primary = val.cdp_pdev_param_primary_radio;
8045 		break;
8046 	case CDP_CONFIG_CAPTURE_LATENCY:
8047 		pdev->latency_capture_enable = val.cdp_pdev_param_cptr_latcy;
8048 		break;
8049 	case CDP_INGRESS_STATS:
8050 		dp_pdev_tid_stats_ingress_inc(pdev,
8051 					      val.cdp_pdev_param_ingrs_stats);
8052 		break;
8053 	case CDP_OSIF_DROP:
8054 		dp_pdev_tid_stats_osif_drop(pdev,
8055 					    val.cdp_pdev_param_osif_drop);
8056 		break;
8057 	case CDP_CONFIG_ENH_RX_CAPTURE:
8058 		return dp_config_enh_rx_capture(pdev,
8059 						val.cdp_pdev_param_en_rx_cap);
8060 	case CDP_CONFIG_ENH_TX_CAPTURE:
8061 		return dp_config_enh_tx_capture(pdev,
8062 						val.cdp_pdev_param_en_tx_cap);
8063 	case CDP_CONFIG_HMMC_TID_OVERRIDE:
8064 		pdev->hmmc_tid_override_en = val.cdp_pdev_param_hmmc_tid_ovrd;
8065 		break;
8066 	case CDP_CONFIG_HMMC_TID_VALUE:
8067 		pdev->hmmc_tid = val.cdp_pdev_param_hmmc_tid;
8068 		break;
8069 	case CDP_CHAN_NOISE_FLOOR:
8070 		pdev->chan_noise_floor = val.cdp_pdev_param_chn_noise_flr;
8071 		break;
8072 	case CDP_TIDMAP_PRTY:
8073 		dp_set_pdev_tidmap_prty_wifi3(pdev,
8074 					      val.cdp_pdev_param_tidmap_prty);
8075 		break;
8076 	case CDP_FILTER_NEIGH_PEERS:
8077 		dp_set_filter_neigh_peers(pdev,
8078 					  val.cdp_pdev_param_fltr_neigh_peers);
8079 		break;
8080 	case CDP_MONITOR_CHANNEL:
8081 		pdev->mon_chan_num = val.cdp_pdev_param_monitor_chan;
8082 		break;
8083 	case CDP_MONITOR_FREQUENCY:
8084 		pdev->mon_chan_freq = val.cdp_pdev_param_mon_freq;
8085 		break;
8086 	case CDP_CONFIG_BSS_COLOR:
8087 		dp_mon_set_bsscolor(pdev, val.cdp_pdev_param_bss_color);
8088 		break;
8089 	default:
8090 		return QDF_STATUS_E_INVAL;
8091 	}
8092 	return QDF_STATUS_SUCCESS;
8093 }
8094 
8095 /*
8096  * dp_calculate_delay_stats: function to get rx delay stats
8097  * @cdp_soc: DP soc handle
8098  * @vdev_id: id of DP vdev handle
8099  * @nbuf: skb
8100  *
8101  * Return: QDF_STATUS
8102  */
8103 static QDF_STATUS
8104 dp_calculate_delay_stats(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
8105 			 qdf_nbuf_t nbuf)
8106 {
8107 	struct dp_vdev *vdev =
8108 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)cdp_soc,
8109 						   vdev_id);
8110 	if (vdev) {
8111 		dp_rx_compute_delay(vdev, nbuf);
8112 		return QDF_STATUS_E_FAILURE;
8113 	}
8114 
8115 	return QDF_STATUS_SUCCESS;
8116 }
8117 
8118 /*
8119  * dp_get_vdev_param: function to get parameters from vdev
8120  * @cdp_soc : DP soc handle
8121  * @vdev_id: id of DP vdev handle
8122  * @param: parameter type to get value
8123  * @val: buffer address
8124  *
8125  * return: status
8126  */
8127 static QDF_STATUS dp_get_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
8128 				    enum cdp_vdev_param_type param,
8129 				    cdp_config_param_type *val)
8130 {
8131 	struct dp_vdev *vdev =
8132 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)cdp_soc,
8133 						   vdev_id);
8134 	if (!vdev)
8135 		return QDF_STATUS_E_FAILURE;
8136 
8137 	switch (param) {
8138 	case CDP_ENABLE_WDS:
8139 		val->cdp_vdev_param_wds = vdev->wds_enabled;
8140 		break;
8141 	case CDP_ENABLE_MEC:
8142 		val->cdp_vdev_param_mec = vdev->mec_enabled;
8143 		break;
8144 	case CDP_ENABLE_DA_WAR:
8145 		val->cdp_vdev_param_da_war = vdev->pdev->soc->da_war_enabled;
8146 		break;
8147 	default:
8148 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8149 			  "param value %d is wrong\n",
8150 			  param);
8151 		return QDF_STATUS_E_FAILURE;
8152 	}
8153 
8154 	return QDF_STATUS_SUCCESS;
8155 }
8156 
8157 /*
8158  * dp_set_vdev_param: function to set parameters in vdev
8159  * @cdp_soc : DP soc handle
8160  * @vdev_id: id of DP vdev handle
8161  * @param: parameter type to get value
8162  * @val: value
8163  *
8164  * return: QDF_STATUS
8165  */
8166 static QDF_STATUS
8167 dp_set_vdev_param(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
8168 		  enum cdp_vdev_param_type param, cdp_config_param_type val)
8169 {
8170 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
8171 	struct dp_vdev *vdev =
8172 		dp_get_vdev_from_soc_vdev_id_wifi3(dsoc, vdev_id);
8173 	uint32_t var = 0;
8174 
8175 	if (!vdev)
8176 		return QDF_STATUS_E_FAILURE;
8177 
8178 	switch (param) {
8179 	case CDP_ENABLE_WDS:
8180 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8181 			  "wds_enable %d for vdev(%pK) id(%d)\n",
8182 			  val.cdp_vdev_param_wds, vdev, vdev->vdev_id);
8183 		vdev->wds_enabled = val.cdp_vdev_param_wds;
8184 		break;
8185 	case CDP_ENABLE_MEC:
8186 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8187 			  "mec_enable %d for vdev(%pK) id(%d)\n",
8188 			  val.cdp_vdev_param_mec, vdev, vdev->vdev_id);
8189 		vdev->mec_enabled = val.cdp_vdev_param_mec;
8190 		break;
8191 	case CDP_ENABLE_DA_WAR:
8192 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8193 			  "da_war_enable %d for vdev(%pK) id(%d)\n",
8194 			  val.cdp_vdev_param_da_war, vdev, vdev->vdev_id);
8195 		vdev->pdev->soc->da_war_enabled = val.cdp_vdev_param_da_war;
8196 		dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
8197 					     vdev->pdev->soc));
8198 		break;
8199 	case CDP_ENABLE_NAWDS:
8200 		vdev->nawds_enabled = val.cdp_vdev_param_nawds;
8201 		break;
8202 	case CDP_ENABLE_MCAST_EN:
8203 		vdev->mcast_enhancement_en = val.cdp_vdev_param_mcast_en;
8204 		break;
8205 	case CDP_ENABLE_PROXYSTA:
8206 		vdev->proxysta_vdev = val.cdp_vdev_param_proxysta;
8207 		break;
8208 	case CDP_UPDATE_TDLS_FLAGS:
8209 		vdev->tdls_link_connected = val.cdp_vdev_param_tdls_flags;
8210 		break;
8211 	case CDP_CFG_WDS_AGING_TIMER:
8212 		var = val.cdp_vdev_param_aging_tmr;
8213 		if (!var)
8214 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
8215 		else if (var != vdev->wds_aging_timer_val)
8216 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, var);
8217 
8218 		vdev->wds_aging_timer_val = var;
8219 		break;
8220 	case CDP_ENABLE_AP_BRIDGE:
8221 		if (wlan_op_mode_sta != vdev->opmode)
8222 			vdev->ap_bridge_enabled = val.cdp_vdev_param_ap_brdg_en;
8223 		else
8224 			vdev->ap_bridge_enabled = false;
8225 		break;
8226 	case CDP_ENABLE_CIPHER:
8227 		vdev->sec_type = val.cdp_vdev_param_cipher_en;
8228 		break;
8229 	case CDP_ENABLE_QWRAP_ISOLATION:
8230 		vdev->isolation_vdev = val.cdp_vdev_param_qwrap_isolation;
8231 		break;
8232 	case CDP_UPDATE_MULTIPASS:
8233 		vdev->multipass_en = val.cdp_vdev_param_update_multipass;
8234 		break;
8235 	case CDP_TX_ENCAP_TYPE:
8236 		vdev->tx_encap_type = val.cdp_vdev_param_tx_encap;
8237 		break;
8238 	case CDP_RX_DECAP_TYPE:
8239 		vdev->rx_decap_type = val.cdp_vdev_param_rx_decap;
8240 		break;
8241 	case CDP_TID_VDEV_PRTY:
8242 		vdev->tidmap_prty = val.cdp_vdev_param_tidmap_prty;
8243 		break;
8244 	case CDP_TIDMAP_TBL_ID:
8245 		vdev->tidmap_tbl_id = val.cdp_vdev_param_tidmap_tbl_id;
8246 		break;
8247 #ifdef MESH_MODE_SUPPORT
8248 	case CDP_MESH_RX_FILTER:
8249 		dp_peer_set_mesh_rx_filter((struct cdp_vdev *)vdev,
8250 					   val.cdp_vdev_param_mesh_rx_filter);
8251 		break;
8252 	case CDP_MESH_MODE:
8253 		dp_peer_set_mesh_mode((struct cdp_vdev *)vdev,
8254 				      val.cdp_vdev_param_mesh_mode);
8255 		break;
8256 #endif
8257 	default:
8258 		break;
8259 	}
8260 
8261 	dp_tx_vdev_update_search_flags((struct dp_vdev *)vdev);
8262 
8263 	return QDF_STATUS_SUCCESS;
8264 }
8265 
8266 /*
8267  * dp_set_psoc_param: function to set parameters in psoc
8268  * @cdp_soc : DP soc handle
8269  * @param: parameter type to be set
8270  * @val: value of parameter to be set
8271  *
8272  * return: QDF_STATUS
8273  */
8274 static QDF_STATUS
8275 dp_set_psoc_param(struct cdp_soc_t *cdp_soc,
8276 		  enum cdp_psoc_param_type param, cdp_config_param_type val)
8277 {
8278 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
8279 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = soc->wlan_cfg_ctx;
8280 
8281 	switch (param) {
8282 	case CDP_ENABLE_RATE_STATS:
8283 		soc->wlanstats_enabled = val.cdp_psoc_param_en_rate_stats;
8284 		break;
8285 	case CDP_SET_NSS_CFG:
8286 		wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx,
8287 					    val.cdp_psoc_param_en_nss_cfg);
8288 		/*
8289 		 * TODO: masked out based on the per offloaded radio
8290 		 */
8291 		switch (val.cdp_psoc_param_en_nss_cfg) {
8292 		case dp_nss_cfg_default:
8293 			break;
8294 		case dp_nss_cfg_first_radio:
8295 		/*
8296 		 * This configuration is valid for single band radio which
8297 		 * is also NSS offload.
8298 		 */
8299 		case dp_nss_cfg_dbdc:
8300 		case dp_nss_cfg_dbtc:
8301 			wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
8302 			wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
8303 			wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
8304 			wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
8305 			break;
8306 		default:
8307 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8308 				  "Invalid offload config %d",
8309 				  val.cdp_psoc_param_en_nss_cfg);
8310 		}
8311 
8312 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8313 			  FL("nss-wifi<0> nss config is enabled"));
8314 		break;
8315 
8316 	default:
8317 		break;
8318 	}
8319 
8320 	return QDF_STATUS_SUCCESS;
8321 }
8322 
8323 /*
8324  * dp_get_psoc_param: function to get parameters in soc
8325  * @cdp_soc : DP soc handle
8326  * @param: parameter type to be set
8327  * @val: address of buffer
8328  *
8329  * return: status
8330  */
8331 static QDF_STATUS dp_get_psoc_param(struct cdp_soc_t *cdp_soc,
8332 				    enum cdp_psoc_param_type param,
8333 				    cdp_config_param_type *val)
8334 {
8335 	return QDF_STATUS_SUCCESS;
8336 }
8337 
8338 /**
8339  * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer
8340  * @soc: DP_SOC handle
8341  * @pdev_id: id of DP_PDEV handle
8342  * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
8343  * @is_tx_pkt_cap_enable: enable/disable Tx packet capture in monitor mode
8344  * @peer_mac: MAC address for which the above need to be enabled/disabled
8345  *
8346  * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
8347  */
8348 QDF_STATUS
8349 dp_peer_update_pkt_capture_params(ol_txrx_soc_handle soc,
8350 				  uint8_t pdev_id,
8351 				  bool is_rx_pkt_cap_enable,
8352 				  bool is_tx_pkt_cap_enable,
8353 				  uint8_t *peer_mac)
8354 {
8355 	struct dp_peer *peer;
8356 	struct dp_pdev *pdev =
8357 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8358 						   pdev_id);
8359 
8360 	if (!pdev)
8361 		return QDF_STATUS_E_FAILURE;
8362 
8363 	peer = (struct dp_peer *)dp_find_peer_by_addr((struct cdp_pdev *)pdev,
8364 						      peer_mac);
8365 	if (!peer) {
8366 		dp_err("Invalid Peer");
8367 		return QDF_STATUS_E_FAILURE;
8368 	}
8369 
8370 	dp_peer_set_rx_capture_enabled(peer, is_rx_pkt_cap_enable);
8371 	dp_peer_set_tx_capture_enabled(peer, is_tx_pkt_cap_enable);
8372 
8373 	return QDF_STATUS_SUCCESS;
8374 }
8375 
8376 /*
8377  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
8378  * @soc: DP_SOC handle
8379  * @vdev_id: id of DP_VDEV handle
8380  * @map_id:ID of map that needs to be updated
8381  *
8382  * Return: QDF_STATUS
8383  */
8384 static QDF_STATUS dp_set_vdev_dscp_tid_map_wifi3(ol_txrx_soc_handle soc,
8385 						 uint8_t vdev_id,
8386 						 uint8_t map_id)
8387 {
8388 	struct dp_vdev *vdev =
8389 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
8390 						   vdev_id);
8391 	if (vdev) {
8392 		vdev->dscp_tid_map_id = map_id;
8393 		return QDF_STATUS_SUCCESS;
8394 	}
8395 
8396 	return QDF_STATUS_E_FAILURE;
8397 }
8398 
8399 #ifdef DP_RATETABLE_SUPPORT
8400 static int dp_txrx_get_ratekbps(int preamb, int mcs,
8401 				int htflag, int gintval)
8402 {
8403 	uint32_t rix;
8404 	uint16_t ratecode;
8405 
8406 	return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
8407 			       (uint8_t)preamb, 1, &rix, &ratecode);
8408 }
8409 #else
8410 static int dp_txrx_get_ratekbps(int preamb, int mcs,
8411 				int htflag, int gintval)
8412 {
8413 	return 0;
8414 }
8415 #endif
8416 
8417 /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
8418  * @soc: DP soc handle
8419  * @pdev_id: id of DP pdev handle
8420  * @pdev_stats: buffer to copy to
8421  *
8422  * return : status success/failure
8423  */
8424 static QDF_STATUS
8425 dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
8426 		       struct cdp_pdev_stats *pdev_stats)
8427 {
8428 	struct dp_pdev *pdev =
8429 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8430 						   pdev_id);
8431 	if (!pdev)
8432 		return QDF_STATUS_E_FAILURE;
8433 
8434 	dp_aggregate_pdev_stats(pdev);
8435 
8436 	qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats));
8437 	return QDF_STATUS_SUCCESS;
8438 }
8439 
8440 /* dp_txrx_update_vdev_me_stats(): Update vdev ME stats sent from CDP
8441  * @vdev: DP vdev handle
8442  * @buf: buffer containing specific stats structure
8443  *
8444  * Returns: void
8445  */
8446 static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev,
8447 					 void *buf)
8448 {
8449 	struct cdp_tx_ingress_stats *host_stats = NULL;
8450 
8451 	if (!buf) {
8452 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8453 			  "Invalid host stats buf");
8454 		return;
8455 	}
8456 	host_stats = (struct cdp_tx_ingress_stats *)buf;
8457 
8458 	DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt,
8459 			 host_stats->mcast_en.mcast_pkt.num,
8460 			 host_stats->mcast_en.mcast_pkt.bytes);
8461 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error,
8462 		     host_stats->mcast_en.dropped_map_error);
8463 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac,
8464 		     host_stats->mcast_en.dropped_self_mac);
8465 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail,
8466 		     host_stats->mcast_en.dropped_send_fail);
8467 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast,
8468 		     host_stats->mcast_en.ucast);
8469 	DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc,
8470 		     host_stats->mcast_en.fail_seg_alloc);
8471 	DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail,
8472 		     host_stats->mcast_en.clone_fail);
8473 }
8474 
8475 /* dp_txrx_update_vdev_host_stats(): Update stats sent through CDP
8476  * @soc: DP soc handle
8477  * @vdev_id: id of DP vdev handle
8478  * @buf: buffer containing specific stats structure
8479  * @stats_id: stats type
8480  *
8481  * Returns: QDF_STATUS
8482  */
8483 static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc,
8484 						 uint8_t vdev_id,
8485 						 void *buf,
8486 						 uint16_t stats_id)
8487 {
8488 	struct dp_vdev *vdev =
8489 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
8490 						   vdev_id);
8491 	if (!vdev) {
8492 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8493 			  "Invalid vdev handle");
8494 		return QDF_STATUS_E_FAILURE;
8495 	}
8496 
8497 	switch (stats_id) {
8498 	case DP_VDEV_STATS_PKT_CNT_ONLY:
8499 		break;
8500 	case DP_VDEV_STATS_TX_ME:
8501 		dp_txrx_update_vdev_me_stats(vdev, buf);
8502 		break;
8503 	default:
8504 		qdf_info("Invalid stats_id %d", stats_id);
8505 		break;
8506 	}
8507 
8508 	return QDF_STATUS_SUCCESS;
8509 }
8510 
8511 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
8512  * @soc: soc handle
8513  * @vdev_id: id of vdev handle
8514  * @peer_mac: mac of DP_PEER handle
8515  * @peer_stats: buffer to copy to
8516  * return : status success/failure
8517  */
8518 static QDF_STATUS
8519 dp_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
8520 		       uint8_t *peer_mac, struct cdp_peer_stats *peer_stats)
8521 {
8522 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8523 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
8524 						       peer_mac, 0, vdev_id);
8525 
8526 	if (!peer || peer->delete_in_progress) {
8527 		status = QDF_STATUS_E_FAILURE;
8528 	} else
8529 		qdf_mem_copy(peer_stats, &peer->stats,
8530 			     sizeof(struct cdp_peer_stats));
8531 
8532 	if (peer)
8533 		dp_peer_unref_delete(peer);
8534 
8535 	return status;
8536 }
8537 
8538 /* dp_txrx_get_peer_stats_param - will return specified cdp_peer_stats
8539  * @param soc - soc handle
8540  * @param vdev_id - vdev_id of vdev object
8541  * @param peer_mac - mac address of the peer
8542  * @param type - enum of required stats
8543  * @param buf - buffer to hold the value
8544  * return : status success/failure
8545  */
8546 static QDF_STATUS
8547 dp_txrx_get_peer_stats_param(struct cdp_soc_t *soc, uint8_t vdev_id,
8548 			     uint8_t *peer_mac, enum cdp_peer_stats_type type,
8549 			     cdp_peer_stats_param_t *buf)
8550 {
8551 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
8552 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
8553 						      peer_mac, 0, vdev_id);
8554 
8555 	if (!peer || peer->delete_in_progress) {
8556 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8557 			  "Invalid Peer for Mac %pM", peer_mac);
8558 		ret = QDF_STATUS_E_FAILURE;
8559 	} else if (type < cdp_peer_stats_max) {
8560 		switch (type) {
8561 		case cdp_peer_tx_ucast:
8562 			buf->tx_ucast = peer->stats.tx.ucast;
8563 			break;
8564 		case cdp_peer_tx_mcast:
8565 			buf->tx_mcast = peer->stats.tx.mcast;
8566 			break;
8567 		case cdp_peer_tx_rate:
8568 			buf->tx_rate = peer->stats.tx.tx_rate;
8569 			break;
8570 		case cdp_peer_tx_last_tx_rate:
8571 			buf->last_tx_rate = peer->stats.tx.last_tx_rate;
8572 			break;
8573 		case cdp_peer_tx_inactive_time:
8574 			buf->tx_inactive_time = peer->stats.tx.inactive_time;
8575 			break;
8576 		case cdp_peer_tx_ratecode:
8577 			buf->tx_ratecode = peer->stats.tx.tx_ratecode;
8578 			break;
8579 		case cdp_peer_tx_flags:
8580 			buf->tx_flags = peer->stats.tx.tx_flags;
8581 			break;
8582 		case cdp_peer_tx_power:
8583 			buf->tx_power = peer->stats.tx.tx_power;
8584 			break;
8585 		case cdp_peer_rx_rate:
8586 			buf->rx_rate = peer->stats.rx.rx_rate;
8587 			break;
8588 		case cdp_peer_rx_last_rx_rate:
8589 			buf->last_rx_rate = peer->stats.rx.last_rx_rate;
8590 			break;
8591 		case cdp_peer_rx_ratecode:
8592 			buf->rx_ratecode = peer->stats.rx.rx_ratecode;
8593 			break;
8594 		case cdp_peer_rx_ucast:
8595 			buf->rx_ucast = peer->stats.rx.unicast;
8596 			break;
8597 		case cdp_peer_rx_flags:
8598 			buf->rx_flags = peer->stats.rx.rx_flags;
8599 			break;
8600 		case cdp_peer_rx_avg_rssi:
8601 			buf->rx_avg_rssi = peer->stats.rx.avg_rssi;
8602 			break;
8603 		default:
8604 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8605 				  "Invalid value");
8606 			ret = QDF_STATUS_E_FAILURE;
8607 			break;
8608 		}
8609 	} else {
8610 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8611 			  "Invalid value");
8612 		ret = QDF_STATUS_E_FAILURE;
8613 	}
8614 
8615 	if (peer)
8616 		dp_peer_unref_delete(peer);
8617 
8618 	return ret;
8619 }
8620 
8621 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
8622  * @soc: soc handle
8623  * @vdev_id: id of vdev handle
8624  * @peer_mac: mac of DP_PEER handle
8625  *
8626  * return : QDF_STATUS
8627  */
8628 static QDF_STATUS
8629 dp_txrx_reset_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
8630 			 uint8_t *peer_mac)
8631 {
8632 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8633 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
8634 						       peer_mac, 0, vdev_id);
8635 
8636 	if (!peer || peer->delete_in_progress) {
8637 		status = QDF_STATUS_E_FAILURE;
8638 		goto fail;
8639 	}
8640 
8641 	qdf_mem_zero(&peer->stats, sizeof(peer->stats));
8642 
8643 fail:
8644 	if (peer)
8645 		dp_peer_unref_delete(peer);
8646 
8647 	return status;
8648 }
8649 
8650 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
8651  * @vdev_handle: DP_VDEV handle
8652  * @buf: buffer for vdev stats
8653  *
8654  * return : int
8655  */
8656 static int dp_txrx_get_vdev_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
8657 				   void *buf, bool is_aggregate)
8658 {
8659 	struct cdp_vdev_stats *vdev_stats;
8660 	struct dp_pdev *pdev;
8661 	struct dp_vdev *vdev =
8662 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
8663 						   vdev_id);
8664 
8665 	if (!vdev)
8666 		return 1;
8667 
8668 	pdev = vdev->pdev;
8669 	if (!pdev)
8670 		return 1;
8671 
8672 	vdev_stats = (struct cdp_vdev_stats *)buf;
8673 
8674 	if (is_aggregate) {
8675 		qdf_spin_lock_bh(&((struct dp_soc *)soc)->peer_ref_mutex);
8676 		dp_aggregate_vdev_stats(vdev, buf);
8677 		qdf_spin_unlock_bh(&((struct dp_soc *)soc)->peer_ref_mutex);
8678 	} else {
8679 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
8680 	}
8681 
8682 	return 0;
8683 }
8684 
8685 /*
8686  * dp_get_total_per(): get total per
8687  * @soc: DP soc handle
8688  * @pdev_id: id of DP_PDEV handle
8689  *
8690  * Return: % error rate using retries per packet and success packets
8691  */
8692 static int dp_get_total_per(struct cdp_soc_t *soc, uint8_t pdev_id)
8693 {
8694 	struct dp_pdev *pdev =
8695 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8696 						   pdev_id);
8697 
8698 	if (!pdev)
8699 		return 0;
8700 
8701 	dp_aggregate_pdev_stats(pdev);
8702 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
8703 		return 0;
8704 	return ((pdev->stats.tx.retries * 100) /
8705 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
8706 }
8707 
8708 /*
8709  * dp_txrx_stats_publish(): publish pdev stats into a buffer
8710  * @soc: DP soc handle
8711  * @pdev_id: id of DP_PDEV handle
8712  * @buf: to hold pdev_stats
8713  *
8714  * Return: int
8715  */
8716 static int
8717 dp_txrx_stats_publish(struct cdp_soc_t *soc, uint8_t pdev_id,
8718 		      struct cdp_stats_extd *buf)
8719 {
8720 	struct cdp_txrx_stats_req req = {0,};
8721 	struct dp_pdev *pdev =
8722 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
8723 						   pdev_id);
8724 
8725 	if (!pdev)
8726 		return TXRX_STATS_LEVEL_OFF;
8727 
8728 	dp_aggregate_pdev_stats(pdev);
8729 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
8730 	req.cookie_val = 1;
8731 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
8732 				req.param1, req.param2, req.param3, 0,
8733 				req.cookie_val, 0);
8734 
8735 	msleep(DP_MAX_SLEEP_TIME);
8736 
8737 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
8738 	req.cookie_val = 1;
8739 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
8740 				req.param1, req.param2, req.param3, 0,
8741 				req.cookie_val, 0);
8742 
8743 	msleep(DP_MAX_SLEEP_TIME);
8744 	qdf_mem_copy(buf, &pdev->stats, sizeof(struct cdp_pdev_stats));
8745 
8746 	return TXRX_STATS_LEVEL;
8747 }
8748 
8749 /**
8750  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
8751  * @soc: soc handle
8752  * @pdev_id: id of DP_PDEV handle
8753  * @map_id: ID of map that needs to be updated
8754  * @tos: index value in map
8755  * @tid: tid value passed by the user
8756  *
8757  * Return: QDF_STATUS
8758  */
8759 static QDF_STATUS
8760 dp_set_pdev_dscp_tid_map_wifi3(struct cdp_soc_t *soc_handle,
8761 			       uint8_t pdev_id,
8762 			       uint8_t map_id,
8763 			       uint8_t tos, uint8_t tid)
8764 {
8765 	uint8_t dscp;
8766 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8767 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
8768 
8769 	if (!pdev)
8770 		return QDF_STATUS_E_FAILURE;
8771 
8772 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
8773 	pdev->dscp_tid_map[map_id][dscp] = tid;
8774 
8775 	if (map_id < soc->num_hw_dscp_tid_map)
8776 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
8777 				       map_id, dscp);
8778 	else
8779 		return QDF_STATUS_E_FAILURE;
8780 
8781 	return QDF_STATUS_SUCCESS;
8782 }
8783 
8784 /**
8785  * dp_fw_stats_process(): Process TxRX FW stats request
8786  * @vdev_handle: DP VDEV handle
8787  * @req: stats request
8788  *
8789  * return: int
8790  */
8791 static int dp_fw_stats_process(struct dp_vdev *vdev,
8792 			       struct cdp_txrx_stats_req *req)
8793 {
8794 	struct dp_pdev *pdev = NULL;
8795 	uint32_t stats = req->stats;
8796 	uint8_t mac_id = req->mac_id;
8797 
8798 	if (!vdev) {
8799 		DP_TRACE(NONE, "VDEV not found");
8800 		return 1;
8801 	}
8802 	pdev = vdev->pdev;
8803 
8804 	/*
8805 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
8806 	 * from param0 to param3 according to below rule:
8807 	 *
8808 	 * PARAM:
8809 	 *   - config_param0 : start_offset (stats type)
8810 	 *   - config_param1 : stats bmask from start offset
8811 	 *   - config_param2 : stats bmask from start offset + 32
8812 	 *   - config_param3 : stats bmask from start offset + 64
8813 	 */
8814 	if (req->stats == CDP_TXRX_STATS_0) {
8815 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
8816 		req->param1 = 0xFFFFFFFF;
8817 		req->param2 = 0xFFFFFFFF;
8818 		req->param3 = 0xFFFFFFFF;
8819 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
8820 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
8821 	}
8822 
8823 	return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
8824 				req->param1, req->param2, req->param3,
8825 				0, 0, mac_id);
8826 }
8827 
8828 /**
8829  * dp_txrx_stats_request - function to map to firmware and host stats
8830  * @soc: soc handle
8831  * @vdev_id: virtual device ID
8832  * @req: stats request
8833  *
8834  * Return: QDF_STATUS
8835  */
8836 static
8837 QDF_STATUS dp_txrx_stats_request(struct cdp_soc_t *soc_handle,
8838 				 uint8_t vdev_id,
8839 				 struct cdp_txrx_stats_req *req)
8840 {
8841 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_handle);
8842 	int host_stats;
8843 	int fw_stats;
8844 	enum cdp_stats stats;
8845 	int num_stats;
8846 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc,
8847 								  vdev_id);
8848 
8849 	if (!vdev || !req) {
8850 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8851 				"Invalid vdev/req instance");
8852 		return QDF_STATUS_E_INVAL;
8853 	}
8854 
8855 	if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) {
8856 		dp_err("Invalid mac id request");
8857 		return QDF_STATUS_E_INVAL;
8858 	}
8859 
8860 	stats = req->stats;
8861 	if (stats >= CDP_TXRX_MAX_STATS)
8862 		return QDF_STATUS_E_INVAL;
8863 
8864 	/*
8865 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
8866 	 *			has to be updated if new FW HTT stats added
8867 	 */
8868 	if (stats > CDP_TXRX_STATS_HTT_MAX)
8869 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
8870 
8871 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
8872 
8873 	if (stats >= num_stats) {
8874 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8875 			  "%s: Invalid stats option: %d", __func__, stats);
8876 		return QDF_STATUS_E_INVAL;
8877 	}
8878 
8879 	req->stats = stats;
8880 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
8881 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
8882 
8883 	dp_info("stats: %u fw_stats_type: %d host_stats: %d",
8884 		stats, fw_stats, host_stats);
8885 
8886 	if (fw_stats != TXRX_FW_STATS_INVALID) {
8887 		/* update request with FW stats type */
8888 		req->stats = fw_stats;
8889 		return dp_fw_stats_process(vdev, req);
8890 	}
8891 
8892 	if ((host_stats != TXRX_HOST_STATS_INVALID) &&
8893 			(host_stats <= TXRX_HOST_STATS_MAX))
8894 		return dp_print_host_stats(vdev, req);
8895 	else
8896 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8897 				"Wrong Input for TxRx Stats");
8898 
8899 	return QDF_STATUS_SUCCESS;
8900 }
8901 
8902 /*
8903  * dp_txrx_dump_stats() -  Dump statistics
8904  * @value - Statistics option
8905  */
8906 static QDF_STATUS dp_txrx_dump_stats(struct cdp_soc_t *psoc, uint16_t value,
8907 				     enum qdf_stats_verbosity_level level)
8908 {
8909 	struct dp_soc *soc =
8910 		(struct dp_soc *)psoc;
8911 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8912 
8913 	if (!soc) {
8914 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8915 			"%s: soc is NULL", __func__);
8916 		return QDF_STATUS_E_INVAL;
8917 	}
8918 
8919 	switch (value) {
8920 	case CDP_TXRX_PATH_STATS:
8921 		dp_txrx_path_stats(soc);
8922 		dp_print_soc_interrupt_stats(soc);
8923 		break;
8924 
8925 	case CDP_RX_RING_STATS:
8926 		dp_print_per_ring_stats(soc);
8927 		break;
8928 
8929 	case CDP_TXRX_TSO_STATS:
8930 		dp_print_tso_stats(soc, level);
8931 		break;
8932 
8933 	case CDP_DUMP_TX_FLOW_POOL_INFO:
8934 		if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH)
8935 			cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
8936 		break;
8937 
8938 	case CDP_DP_NAPI_STATS:
8939 		dp_print_napi_stats(soc);
8940 		break;
8941 
8942 	case CDP_TXRX_DESC_STATS:
8943 		/* TODO: NOT IMPLEMENTED */
8944 		break;
8945 
8946 	default:
8947 		status = QDF_STATUS_E_INVAL;
8948 		break;
8949 	}
8950 
8951 	return status;
8952 
8953 }
8954 
8955 /**
8956  * dp_txrx_clear_dump_stats() - clear dumpStats
8957  * @soc- soc handle
8958  * @value - stats option
8959  *
8960  * Return: 0 - Success, non-zero - failure
8961  */
8962 static
8963 QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
8964 				    uint8_t value)
8965 {
8966 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
8967 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8968 
8969 	if (!soc) {
8970 		dp_err("%s: soc is NULL", __func__);
8971 		return QDF_STATUS_E_INVAL;
8972 	}
8973 
8974 	switch (value) {
8975 	case CDP_TXRX_TSO_STATS:
8976 		dp_txrx_clear_tso_stats(soc);
8977 		break;
8978 
8979 	default:
8980 		status = QDF_STATUS_E_INVAL;
8981 		break;
8982 	}
8983 
8984 	return status;
8985 }
8986 
8987 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
8988 /**
8989  * dp_update_flow_control_parameters() - API to store datapath
8990  *                            config parameters
8991  * @soc: soc handle
8992  * @cfg: ini parameter handle
8993  *
8994  * Return: void
8995  */
8996 static inline
8997 void dp_update_flow_control_parameters(struct dp_soc *soc,
8998 				struct cdp_config_params *params)
8999 {
9000 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
9001 					params->tx_flow_stop_queue_threshold;
9002 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
9003 					params->tx_flow_start_queue_offset;
9004 }
9005 #else
9006 static inline
9007 void dp_update_flow_control_parameters(struct dp_soc *soc,
9008 				struct cdp_config_params *params)
9009 {
9010 }
9011 #endif
9012 
9013 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
9014 /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */
9015 #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024
9016 
9017 /* Max packet limit for RX REAP Loop (dp_rx_process) */
9018 #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024
9019 
9020 static
9021 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
9022 					struct cdp_config_params *params)
9023 {
9024 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit =
9025 				params->tx_comp_loop_pkt_limit;
9026 
9027 	if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX)
9028 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true;
9029 	else
9030 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false;
9031 
9032 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit =
9033 				params->rx_reap_loop_pkt_limit;
9034 
9035 	if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX)
9036 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = true;
9037 	else
9038 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = false;
9039 
9040 	soc->wlan_cfg_ctx->rx_hp_oos_update_limit =
9041 				params->rx_hp_oos_update_limit;
9042 
9043 	dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u",
9044 		soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit,
9045 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check,
9046 		soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit,
9047 		soc->wlan_cfg_ctx->rx_enable_eol_data_check,
9048 		soc->wlan_cfg_ctx->rx_hp_oos_update_limit);
9049 }
9050 #else
9051 static inline
9052 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
9053 					struct cdp_config_params *params)
9054 { }
9055 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
9056 
9057 /**
9058  * dp_update_config_parameters() - API to store datapath
9059  *                            config parameters
9060  * @soc: soc handle
9061  * @cfg: ini parameter handle
9062  *
9063  * Return: status
9064  */
9065 static
9066 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
9067 				struct cdp_config_params *params)
9068 {
9069 	struct dp_soc *soc = (struct dp_soc *)psoc;
9070 
9071 	if (!(soc)) {
9072 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
9073 				"%s: Invalid handle", __func__);
9074 		return QDF_STATUS_E_INVAL;
9075 	}
9076 
9077 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
9078 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
9079 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
9080 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
9081 				params->tcp_udp_checksumoffload;
9082 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
9083 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
9084 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
9085 
9086 	dp_update_rx_soft_irq_limit_params(soc, params);
9087 	dp_update_flow_control_parameters(soc, params);
9088 
9089 	return QDF_STATUS_SUCCESS;
9090 }
9091 
9092 static struct cdp_wds_ops dp_ops_wds = {
9093 	.vdev_set_wds = dp_vdev_set_wds,
9094 #ifdef WDS_VENDOR_EXTENSION
9095 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
9096 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
9097 #endif
9098 };
9099 
9100 /*
9101  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
9102  * @soc_hdl - datapath soc handle
9103  * @vdev_id - virtual interface id
9104  * @callback - callback function
9105  * @ctxt: callback context
9106  *
9107  */
9108 static void
9109 dp_txrx_data_tx_cb_set(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
9110 		       ol_txrx_data_tx_cb callback, void *ctxt)
9111 {
9112 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
9113 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
9114 
9115 	if (!vdev)
9116 		return;
9117 
9118 	vdev->tx_non_std_data_callback.func = callback;
9119 	vdev->tx_non_std_data_callback.ctxt = ctxt;
9120 }
9121 
9122 /**
9123  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
9124  * @soc: datapath soc handle
9125  * @pdev_id: id of datapath pdev handle
9126  *
9127  * Return: opaque pointer to dp txrx handle
9128  */
9129 static void *dp_pdev_get_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id)
9130 {
9131 	struct dp_pdev *pdev =
9132 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9133 						   pdev_id);
9134 	if (qdf_unlikely(!pdev))
9135 		return NULL;
9136 
9137 	return pdev->dp_txrx_handle;
9138 }
9139 
9140 /**
9141  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
9142  * @soc: datapath soc handle
9143  * @pdev_id: id of datapath pdev handle
9144  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
9145  *
9146  * Return: void
9147  */
9148 static void
9149 dp_pdev_set_dp_txrx_handle(struct cdp_soc_t *soc, uint8_t pdev_id,
9150 			   void *dp_txrx_hdl)
9151 {
9152 	struct dp_pdev *pdev =
9153 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9154 						   pdev_id);
9155 
9156 	if (!pdev)
9157 		return;
9158 
9159 	pdev->dp_txrx_handle = dp_txrx_hdl;
9160 }
9161 
9162 /**
9163  * dp_vdev_get_dp_ext_handle() - get dp handle from vdev
9164  * @soc: datapath soc handle
9165  * @vdev_id: vdev id
9166  *
9167  * Return: opaque pointer to dp txrx handle
9168  */
9169 static void *dp_vdev_get_dp_ext_handle(ol_txrx_soc_handle soc, uint8_t vdev_id)
9170 {
9171 	struct dp_vdev *vdev =
9172 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
9173 						   vdev_id);
9174 
9175 	if (!vdev)
9176 		return NULL;
9177 
9178 	return vdev->vdev_dp_ext_handle;
9179 }
9180 
9181 /**
9182  * dp_vdev_set_dp_ext_handle() - set dp handle in vdev
9183  * @soc: datapath soc handle
9184  * @vdev_id: vdev id
9185  * @size: size of advance dp handle
9186  *
9187  * Return: QDF_STATUS
9188  */
9189 static QDF_STATUS
9190 dp_vdev_set_dp_ext_handle(ol_txrx_soc_handle soc, uint8_t vdev_id,
9191 			  uint16_t size)
9192 {
9193 	struct dp_vdev *vdev =
9194 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
9195 						   vdev_id);
9196 	void *dp_ext_handle;
9197 
9198 	if (!vdev)
9199 		return QDF_STATUS_E_FAILURE;
9200 
9201 	dp_ext_handle = qdf_mem_malloc(size);
9202 
9203 	if (!dp_ext_handle)
9204 		return QDF_STATUS_E_FAILURE;
9205 
9206 	vdev->vdev_dp_ext_handle = dp_ext_handle;
9207 	return QDF_STATUS_SUCCESS;
9208 }
9209 
9210 /**
9211  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
9212  * @soc_handle: datapath soc handle
9213  *
9214  * Return: opaque pointer to external dp (non-core DP)
9215  */
9216 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
9217 {
9218 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9219 
9220 	return soc->external_txrx_handle;
9221 }
9222 
9223 /**
9224  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
9225  * @soc_handle: datapath soc handle
9226  * @txrx_handle: opaque pointer to external dp (non-core DP)
9227  *
9228  * Return: void
9229  */
9230 static void
9231 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
9232 {
9233 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9234 
9235 	soc->external_txrx_handle = txrx_handle;
9236 }
9237 
9238 /**
9239  * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
9240  * @soc_hdl: datapath soc handle
9241  * @pdev_id: id of the datapath pdev handle
9242  * @lmac_id: lmac id
9243  *
9244  * Return: QDF_STATUS
9245  */
9246 static QDF_STATUS
9247 dp_soc_map_pdev_to_lmac
9248 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
9249 	 uint32_t lmac_id)
9250 {
9251 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9252 
9253 	wlan_cfg_set_hw_mac_idx(soc->wlan_cfg_ctx,
9254 				pdev_id,
9255 				lmac_id);
9256 
9257 	/*Set host PDEV ID for lmac_id*/
9258 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
9259 			      pdev_id,
9260 			      lmac_id);
9261 
9262 	return QDF_STATUS_SUCCESS;
9263 }
9264 
9265 /**
9266  * dp_soc_handle_pdev_mode_change() - Update pdev to lmac mapping
9267  * @soc_hdl: datapath soc handle
9268  * @pdev_id: id of the datapath pdev handle
9269  * @lmac_id: lmac id
9270  *
9271  * In the event of a dynamic mode change, update the pdev to lmac mapping
9272  *
9273  * Return: QDF_STATUS
9274  */
9275 static QDF_STATUS
9276 dp_soc_handle_pdev_mode_change
9277 	(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
9278 	 uint32_t lmac_id)
9279 {
9280 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9281 	struct dp_vdev *vdev = NULL;
9282 	uint8_t hw_pdev_id, mac_id;
9283 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc,
9284 								  pdev_id);
9285 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
9286 
9287 	if (qdf_unlikely(!pdev))
9288 		return QDF_STATUS_E_FAILURE;
9289 
9290 	pdev->lmac_id = lmac_id;
9291 	dp_info(" mode change %d %d\n", pdev->pdev_id, pdev->lmac_id);
9292 
9293 	/*Set host PDEV ID for lmac_id*/
9294 	wlan_cfg_set_pdev_idx(soc->wlan_cfg_ctx,
9295 			      pdev->pdev_id,
9296 			      lmac_id);
9297 
9298 	hw_pdev_id =
9299 		dp_get_target_pdev_id_for_host_pdev_id(soc,
9300 						       pdev->pdev_id);
9301 
9302 	/*
9303 	 * When NSS offload is enabled, send pdev_id->lmac_id
9304 	 * and pdev_id to hw_pdev_id to NSS FW
9305 	 */
9306 	if (nss_config) {
9307 		mac_id = pdev->lmac_id;
9308 		if (soc->cdp_soc.ol_ops->pdev_update_lmac_n_target_pdev_id)
9309 			soc->cdp_soc.ol_ops->
9310 				pdev_update_lmac_n_target_pdev_id(
9311 				soc->ctrl_psoc,
9312 				&pdev_id, &mac_id, &hw_pdev_id);
9313 	}
9314 
9315 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
9316 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
9317 		HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
9318 						hw_pdev_id);
9319 	}
9320 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
9321 
9322 	return QDF_STATUS_SUCCESS;
9323 }
9324 
9325 /**
9326  * dp_soc_set_pdev_status_down() - set pdev down/up status
9327  * @soc: datapath soc handle
9328  * @pdev_id: id of datapath pdev handle
9329  * @is_pdev_down: pdev down/up status
9330  *
9331  * Return: QDF_STATUS
9332  */
9333 static QDF_STATUS
9334 dp_soc_set_pdev_status_down(struct cdp_soc_t *soc, uint8_t pdev_id,
9335 			    bool is_pdev_down)
9336 {
9337 	struct dp_pdev *pdev =
9338 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9339 						   pdev_id);
9340 	if (!pdev)
9341 		return QDF_STATUS_E_FAILURE;
9342 
9343 	pdev->is_pdev_down = is_pdev_down;
9344 	return QDF_STATUS_SUCCESS;
9345 }
9346 
9347 /**
9348  * dp_get_cfg_capabilities() - get dp capabilities
9349  * @soc_handle: datapath soc handle
9350  * @dp_caps: enum for dp capabilities
9351  *
9352  * Return: bool to determine if dp caps is enabled
9353  */
9354 static bool
9355 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
9356 			enum cdp_capabilities dp_caps)
9357 {
9358 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9359 
9360 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
9361 }
9362 
9363 #ifdef FEATURE_AST
9364 static QDF_STATUS
9365 dp_peer_teardown_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
9366 		       uint8_t *peer_mac)
9367 {
9368 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9369 	QDF_STATUS status = QDF_STATUS_SUCCESS;
9370 	struct dp_peer *peer =
9371 			dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id);
9372 
9373 	/* Peer can be null for monitor vap mac address */
9374 	if (!peer) {
9375 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
9376 			  "%s: Invalid peer\n", __func__);
9377 		return QDF_STATUS_E_FAILURE;
9378 	}
9379 	/*
9380 	 * For BSS peer, new peer is not created on alloc_node if the
9381 	 * peer with same address already exists , instead refcnt is
9382 	 * increased for existing peer. Correspondingly in delete path,
9383 	 * only refcnt is decreased; and peer is only deleted , when all
9384 	 * references are deleted. So delete_in_progress should not be set
9385 	 * for bss_peer, unless only 3 reference remains (peer map reference,
9386 	 * peer hash table reference and above local reference).
9387 	 */
9388 	if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 3)) {
9389 		status =  QDF_STATUS_E_FAILURE;
9390 		goto fail;
9391 	}
9392 
9393 	qdf_spin_lock_bh(&soc->ast_lock);
9394 	peer->delete_in_progress = true;
9395 	dp_peer_delete_ast_entries(soc, peer);
9396 	qdf_spin_unlock_bh(&soc->ast_lock);
9397 
9398 fail:
9399 	if (peer)
9400 		dp_peer_unref_delete(peer);
9401 	return status;
9402 }
9403 #endif
9404 
9405 #ifdef ATH_SUPPORT_NAC_RSSI
9406 /**
9407  * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
9408  * @soc_hdl: DP soc handle
9409  * @vdev_id: id of DP vdev handle
9410  * @mac_addr: neighbour mac
9411  * @rssi: rssi value
9412  *
9413  * Return: 0 for success. nonzero for failure.
9414  */
9415 static QDF_STATUS  dp_vdev_get_neighbour_rssi(struct cdp_soc_t *soc,
9416 					      uint8_t vdev_id,
9417 					      char *mac_addr,
9418 					      uint8_t *rssi)
9419 {
9420 	struct dp_vdev *vdev =
9421 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
9422 						   vdev_id);
9423 	struct dp_pdev *pdev;
9424 	struct dp_neighbour_peer *peer = NULL;
9425 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
9426 
9427 	if (!vdev)
9428 		return status;
9429 
9430 	pdev = vdev->pdev;
9431 	*rssi = 0;
9432 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
9433 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
9434 		      neighbour_peer_list_elem) {
9435 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
9436 				mac_addr, QDF_MAC_ADDR_SIZE) == 0) {
9437 			*rssi = peer->rssi;
9438 			status = QDF_STATUS_SUCCESS;
9439 			break;
9440 		}
9441 	}
9442 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
9443 	return status;
9444 }
9445 
9446 static QDF_STATUS
9447 dp_config_for_nac_rssi(struct cdp_soc_t *cdp_soc,
9448 		       uint8_t vdev_id,
9449 		       enum cdp_nac_param_cmd cmd, char *bssid,
9450 		       char *client_macaddr,
9451 		       uint8_t chan_num)
9452 {
9453 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
9454 	struct dp_vdev *vdev =
9455 		dp_get_vdev_from_soc_vdev_id_wifi3(soc,
9456 						   vdev_id);
9457 	struct dp_pdev *pdev;
9458 
9459 	if (!vdev)
9460 		return QDF_STATUS_E_FAILURE;
9461 
9462 	pdev = (struct dp_pdev *)vdev->pdev;
9463 	pdev->nac_rssi_filtering = 1;
9464 	/* Store address of NAC (neighbour peer) which will be checked
9465 	 * against TA of received packets.
9466 	 */
9467 
9468 	if (cmd == CDP_NAC_PARAM_ADD) {
9469 		dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id,
9470 						 DP_NAC_PARAM_ADD,
9471 						 (uint8_t *)client_macaddr);
9472 	} else if (cmd == CDP_NAC_PARAM_DEL) {
9473 		dp_update_filter_neighbour_peers(cdp_soc, vdev->vdev_id,
9474 						 DP_NAC_PARAM_DEL,
9475 						 (uint8_t *)client_macaddr);
9476 	}
9477 
9478 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
9479 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
9480 			(soc->ctrl_psoc, pdev->pdev_id,
9481 			 vdev->vdev_id, cmd, bssid, client_macaddr);
9482 
9483 	return QDF_STATUS_SUCCESS;
9484 }
9485 #endif
9486 
9487 /**
9488  * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
9489  * for pktlog
9490  * @soc: cdp_soc handle
9491  * @pdev_id: id of dp pdev handle
9492  * @mac_addr: Peer mac address
9493  * @enb_dsb: Enable or disable peer based filtering
9494  *
9495  * Return: QDF_STATUS
9496  */
9497 static int
9498 dp_enable_peer_based_pktlog(struct cdp_soc_t *soc, uint8_t pdev_id,
9499 			    uint8_t *mac_addr, uint8_t enb_dsb)
9500 {
9501 	struct dp_peer *peer;
9502 	struct dp_pdev *pdev =
9503 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9504 						   pdev_id);
9505 
9506 	if (!pdev) {
9507 		dp_err("Invalid Pdev for pdev_id %d", pdev_id);
9508 		return QDF_STATUS_E_FAILURE;
9509 	}
9510 
9511 	peer = (struct dp_peer *)dp_find_peer_by_addr((struct cdp_pdev *)pdev,
9512 						      mac_addr);
9513 
9514 	if (!peer) {
9515 		dp_err("Invalid Peer");
9516 		return QDF_STATUS_E_FAILURE;
9517 	}
9518 
9519 	peer->peer_based_pktlog_filter = enb_dsb;
9520 	pdev->dp_peer_based_pktlog = enb_dsb;
9521 
9522 	return QDF_STATUS_SUCCESS;
9523 }
9524 
9525 #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS
9526 /**
9527  * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
9528  * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
9529  * @soc: cdp_soc handle
9530  * @pdev_id: id of cdp_pdev handle
9531  * @protocol_type: protocol type for which stats should be displayed
9532  *
9533  * Return: none
9534  */
9535 static inline void
9536 dp_dump_pdev_rx_protocol_tag_stats(struct cdp_soc_t  *soc, uint8_t pdev_id,
9537 				   uint16_t protocol_type)
9538 {
9539 }
9540 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
9541 
9542 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
9543 /**
9544  * dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be
9545  * applied to the desired protocol type packets
9546  * @soc: soc handle
9547  * @pdev_id: id of cdp_pdev handle
9548  * @enable_rx_protocol_tag - bitmask that indicates what protocol types
9549  * are enabled for tagging. zero indicates disable feature, non-zero indicates
9550  * enable feature
9551  * @protocol_type: new protocol type for which the tag is being added
9552  * @tag: user configured tag for the new protocol
9553  *
9554  * Return: Success
9555  */
9556 static inline QDF_STATUS
9557 dp_update_pdev_rx_protocol_tag(struct cdp_soc_t  *soc, uint8_t pdev_id,
9558 			       uint32_t enable_rx_protocol_tag,
9559 			       uint16_t protocol_type,
9560 			       uint16_t tag)
9561 {
9562 	return QDF_STATUS_SUCCESS;
9563 }
9564 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
9565 
9566 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
9567 /**
9568  * dp_set_rx_flow_tag - add/delete a flow
9569  * @soc: soc handle
9570  * @pdev_id: id of cdp_pdev handle
9571  * @flow_info: flow tuple that is to be added to/deleted from flow search table
9572  *
9573  * Return: Success
9574  */
9575 static inline QDF_STATUS
9576 dp_set_rx_flow_tag(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
9577 		   struct cdp_rx_flow_info *flow_info)
9578 {
9579 	return QDF_STATUS_SUCCESS;
9580 }
9581 /**
9582  * dp_dump_rx_flow_tag_stats - dump the number of packets tagged for
9583  * given flow 5-tuple
9584  * @cdp_soc: soc handle
9585  * @pdev_id: id of cdp_pdev handle
9586  * @flow_info: flow 5-tuple for which stats should be displayed
9587  *
9588  * Return: Success
9589  */
9590 static inline QDF_STATUS
9591 dp_dump_rx_flow_tag_stats(struct cdp_soc_t *cdp_soc, uint8_t pdev_id,
9592 			  struct cdp_rx_flow_info *flow_info)
9593 {
9594 	return QDF_STATUS_SUCCESS;
9595 }
9596 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
9597 
9598 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
9599 					   uint32_t max_peers,
9600 					   uint32_t max_ast_index,
9601 					   bool peer_map_unmap_v2)
9602 {
9603 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9604 
9605 	soc->max_peers = max_peers;
9606 
9607 	qdf_print ("%s max_peers %u, max_ast_index: %u\n",
9608 		   __func__, max_peers, max_ast_index);
9609 	wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
9610 
9611 	if (dp_peer_find_attach(soc))
9612 		return QDF_STATUS_E_FAILURE;
9613 
9614 	soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
9615 
9616 	return QDF_STATUS_SUCCESS;
9617 }
9618 
9619 static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
9620 				      void *stats_ctx)
9621 {
9622 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9623 
9624 	soc->rate_stats_ctx = (struct cdp_soc_rate_stats_ctx *)stats_ctx;
9625 }
9626 
9627 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9628 static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
9629 					  uint8_t pdev_id)
9630 {
9631 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
9632 	struct dp_vdev *vdev = NULL;
9633 	struct dp_peer *peer = NULL;
9634 	struct dp_pdev *pdev =
9635 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9636 						   pdev_id);
9637 
9638 	if (!pdev)
9639 		return QDF_STATUS_E_FAILURE;
9640 
9641 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
9642 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
9643 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
9644 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
9645 			if (peer && !peer->bss_peer)
9646 				dp_wdi_event_handler(
9647 					WDI_EVENT_FLUSH_RATE_STATS_REQ,
9648 					soc, peer->wlanstats_ctx,
9649 					peer->peer_ids[0],
9650 					WDI_NO_VAL, pdev_id);
9651 		}
9652 	}
9653 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
9654 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
9655 
9656 	return QDF_STATUS_SUCCESS;
9657 }
9658 #else
9659 static inline QDF_STATUS
9660 dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
9661 			uint8_t pdev_id)
9662 {
9663 	return QDF_STATUS_SUCCESS;
9664 }
9665 #endif
9666 
9667 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
9668 static QDF_STATUS dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
9669 					   uint8_t pdev_id,
9670 					   void *buf)
9671 {
9672 	 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS,
9673 			      (struct dp_soc *)soc, buf, HTT_INVALID_PEER,
9674 			      WDI_NO_VAL, pdev_id);
9675 	return QDF_STATUS_SUCCESS;
9676 }
9677 #else
9678 static inline QDF_STATUS
9679 dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
9680 			 uint8_t pdev_id,
9681 			 void *buf)
9682 {
9683 	return QDF_STATUS_SUCCESS;
9684 }
9685 #endif
9686 
9687 static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle)
9688 {
9689 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9690 
9691 	return soc->rate_stats_ctx;
9692 }
9693 
9694 /*
9695  * dp_get_cfg() - get dp cfg
9696  * @soc: cdp soc handle
9697  * @cfg: cfg enum
9698  *
9699  * Return: cfg value
9700  */
9701 static uint32_t dp_get_cfg(struct cdp_soc_t *soc, enum cdp_dp_cfg cfg)
9702 {
9703 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
9704 	uint32_t value = 0;
9705 
9706 	switch (cfg) {
9707 	case cfg_dp_enable_data_stall:
9708 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
9709 		break;
9710 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
9711 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
9712 		break;
9713 	case cfg_dp_tso_enable:
9714 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
9715 		break;
9716 	case cfg_dp_lro_enable:
9717 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
9718 		break;
9719 	case cfg_dp_gro_enable:
9720 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
9721 		break;
9722 	case cfg_dp_tx_flow_start_queue_offset:
9723 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
9724 		break;
9725 	case cfg_dp_tx_flow_stop_queue_threshold:
9726 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
9727 		break;
9728 	case cfg_dp_disable_intra_bss_fwd:
9729 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
9730 		break;
9731 	case cfg_dp_pktlog_buffer_size:
9732 		value = dpsoc->wlan_cfg_ctx->pktlog_buffer_size;
9733 		break;
9734 	default:
9735 		value =  0;
9736 	}
9737 
9738 	return value;
9739 }
9740 
9741 #ifdef PEER_FLOW_CONTROL
9742 /**
9743  * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params
9744  * @soc_handle: datapath soc handle
9745  * @pdev_id: id of datapath pdev handle
9746  * @param: ol ath params
9747  * @value: value of the flag
9748  * @buff: Buffer to be passed
9749  *
9750  * Implemented this function same as legacy function. In legacy code, single
9751  * function is used to display stats and update pdev params.
9752  *
9753  * Return: 0 for success. nonzero for failure.
9754  */
9755 static uint32_t dp_tx_flow_ctrl_configure_pdev(struct cdp_soc_t *soc_handle,
9756 					       uint8_t pdev_id,
9757 					       enum _ol_ath_param_t param,
9758 					       uint32_t value, void *buff)
9759 {
9760 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
9761 	struct dp_pdev *pdev =
9762 		dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
9763 						   pdev_id);
9764 
9765 	if (qdf_unlikely(!pdev))
9766 		return 1;
9767 
9768 	soc = pdev->soc;
9769 	if (!soc)
9770 		return 1;
9771 
9772 	switch (param) {
9773 #ifdef QCA_ENH_V3_STATS_SUPPORT
9774 	case OL_ATH_PARAM_VIDEO_DELAY_STATS_FC:
9775 		if (value)
9776 			pdev->delay_stats_flag = true;
9777 		else
9778 			pdev->delay_stats_flag = false;
9779 		break;
9780 	case OL_ATH_PARAM_VIDEO_STATS_FC:
9781 		qdf_print("------- TID Stats ------\n");
9782 		dp_pdev_print_tid_stats(pdev);
9783 		qdf_print("------ Delay Stats ------\n");
9784 		dp_pdev_print_delay_stats(pdev);
9785 		break;
9786 #endif
9787 	case OL_ATH_PARAM_TOTAL_Q_SIZE:
9788 		{
9789 			uint32_t tx_min, tx_max;
9790 
9791 			tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx);
9792 			tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
9793 
9794 			if (!buff) {
9795 				if ((value >= tx_min) && (value <= tx_max)) {
9796 					pdev->num_tx_allowed = value;
9797 				} else {
9798 					QDF_TRACE(QDF_MODULE_ID_DP,
9799 						  QDF_TRACE_LEVEL_INFO,
9800 						  "Failed to update num_tx_allowed, Q_min = %d Q_max = %d",
9801 						  tx_min, tx_max);
9802 					break;
9803 				}
9804 			} else {
9805 				*(int *)buff = pdev->num_tx_allowed;
9806 			}
9807 		}
9808 		break;
9809 	default:
9810 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
9811 			  "%s: not handled param %d ", __func__, param);
9812 		break;
9813 	}
9814 
9815 	return 0;
9816 }
9817 #endif
9818 
9819 /**
9820  * dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev
9821  * @psoc: dp soc handle
9822  * @pdev_id: id of DP_PDEV handle
9823  * @pcp: pcp value
9824  * @tid: tid value passed by the user
9825  *
9826  * Return: QDF_STATUS_SUCCESS on success
9827  */
9828 static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(ol_txrx_soc_handle psoc,
9829 						uint8_t pdev_id,
9830 						uint8_t pcp, uint8_t tid)
9831 {
9832 	struct dp_soc *soc = (struct dp_soc *)psoc;
9833 
9834 	soc->pcp_tid_map[pcp] = tid;
9835 
9836 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
9837 	return QDF_STATUS_SUCCESS;
9838 }
9839 
9840 /**
9841  * dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev
9842  * @soc: DP soc handle
9843  * @vdev_id: id of DP_VDEV handle
9844  * @pcp: pcp value
9845  * @tid: tid value passed by the user
9846  *
9847  * Return: QDF_STATUS_SUCCESS on success
9848  */
9849 static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_soc_t *soc,
9850 						uint8_t vdev_id,
9851 						uint8_t pcp, uint8_t tid)
9852 {
9853 	struct dp_vdev *vdev =
9854 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
9855 						   vdev_id);
9856 
9857 	if (!vdev)
9858 		return QDF_STATUS_E_FAILURE;
9859 
9860 	vdev->pcp_tid_map[pcp] = tid;
9861 
9862 	return QDF_STATUS_SUCCESS;
9863 }
9864 
9865 static struct cdp_cmn_ops dp_ops_cmn = {
9866 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
9867 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
9868 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
9869 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
9870 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
9871 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
9872 	.txrx_peer_create = dp_peer_create_wifi3,
9873 	.txrx_peer_setup = dp_peer_setup_wifi3,
9874 #ifdef FEATURE_AST
9875 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
9876 #else
9877 	.txrx_peer_teardown = NULL,
9878 #endif
9879 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
9880 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
9881 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
9882 	.txrx_peer_get_ast_info_by_pdev =
9883 		dp_peer_get_ast_info_by_pdevid_wifi3,
9884 	.txrx_peer_ast_delete_by_soc =
9885 		dp_peer_ast_entry_del_by_soc,
9886 	.txrx_peer_ast_delete_by_pdev =
9887 		dp_peer_ast_entry_del_by_pdev,
9888 	.txrx_peer_delete = dp_peer_delete_wifi3,
9889 	.txrx_vdev_register = dp_vdev_register_wifi3,
9890 	.txrx_soc_detach = dp_soc_detach_wifi3,
9891 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
9892 	.txrx_soc_init = dp_soc_init_wifi3,
9893 	.txrx_tso_soc_attach = dp_tso_soc_attach,
9894 	.txrx_tso_soc_detach = dp_tso_soc_detach,
9895 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
9896 	.txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3,
9897 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
9898 	.txrx_ath_getstats = dp_get_device_stats,
9899 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
9900 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
9901 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
9902 	.delba_process = dp_delba_process_wifi3,
9903 	.set_addba_response = dp_set_addba_response,
9904 	.flush_cache_rx_queue = NULL,
9905 	/* TODO: get API's for dscp-tid need to be added*/
9906 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
9907 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
9908 	.txrx_get_total_per = dp_get_total_per,
9909 	.txrx_stats_request = dp_txrx_stats_request,
9910 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
9911 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
9912 	.display_stats = dp_txrx_dump_stats,
9913 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
9914 	.txrx_intr_detach = dp_soc_interrupt_detach,
9915 	.set_pn_check = dp_set_pn_check_wifi3,
9916 	.update_config_parameters = dp_update_config_parameters,
9917 	/* TODO: Add other functions */
9918 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
9919 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
9920 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
9921 	.get_vdev_dp_ext_txrx_handle = dp_vdev_get_dp_ext_handle,
9922 	.set_vdev_dp_ext_txrx_handle = dp_vdev_set_dp_ext_handle,
9923 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
9924 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
9925 	.map_pdev_to_lmac = dp_soc_map_pdev_to_lmac,
9926 	.handle_mode_change = dp_soc_handle_pdev_mode_change,
9927 	.set_pdev_status_down = dp_soc_set_pdev_status_down,
9928 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
9929 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
9930 	.tx_send = dp_tx_send,
9931 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
9932 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
9933 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
9934 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
9935 	.txrx_get_os_rx_handles_from_vdev =
9936 					dp_get_os_rx_handles_from_vdev_wifi3,
9937 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
9938 	.get_dp_capabilities = dp_get_cfg_capabilities,
9939 	.txrx_get_cfg = dp_get_cfg,
9940 	.set_rate_stats_ctx = dp_soc_set_rate_stats_ctx,
9941 	.get_rate_stats_ctx = dp_soc_get_rate_stats_ctx,
9942 	.txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats,
9943 	.txrx_flush_rate_stats_request = dp_flush_rate_stats_req,
9944 
9945 	.set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3,
9946 	.set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3,
9947 
9948 	.txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler,
9949 #ifdef QCA_MULTIPASS_SUPPORT
9950 	.set_vlan_groupkey = dp_set_vlan_groupkey,
9951 #endif
9952 	.get_peer_mac_list = dp_get_peer_mac_list,
9953 	.tx_send_exc = dp_tx_send_exception,
9954 };
9955 
9956 static struct cdp_ctrl_ops dp_ops_ctrl = {
9957 	.txrx_peer_authorize = dp_peer_authorize,
9958 #ifdef VDEV_PEER_PROTOCOL_COUNT
9959 	.txrx_enable_peer_protocol_count = dp_enable_vdev_peer_protocol_count,
9960 	.txrx_set_peer_protocol_drop_mask =
9961 		dp_enable_vdev_peer_protocol_drop_mask,
9962 	.txrx_is_peer_protocol_count_enabled =
9963 		dp_is_vdev_peer_protocol_count_enabled,
9964 	.txrx_get_peer_protocol_drop_mask = dp_get_vdev_peer_protocol_drop_mask,
9965 #endif
9966 	.txrx_set_vdev_param = dp_set_vdev_param,
9967 	.txrx_set_psoc_param = dp_set_psoc_param,
9968 	.txrx_get_psoc_param = dp_get_psoc_param,
9969 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
9970 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
9971 #if defined(ATH_SUPPORT_NAC_RSSI) || defined(ATH_SUPPORT_NAC)
9972 	.txrx_update_filter_neighbour_peers =
9973 		dp_update_filter_neighbour_peers,
9974 #endif /* ATH_SUPPORT_NAC_RSSI || ATH_SUPPORT_NAC */
9975 	.txrx_get_sec_type = dp_get_sec_type,
9976 	.txrx_wdi_event_sub = dp_wdi_event_sub,
9977 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
9978 #ifdef WDI_EVENT_ENABLE
9979 	.txrx_get_pldev = dp_get_pldev,
9980 #endif
9981 	.txrx_set_pdev_param = dp_set_pdev_param,
9982 	.txrx_get_pdev_param = dp_get_pdev_param,
9983 	.txrx_set_peer_param = dp_set_peer_param,
9984 	.txrx_get_peer_param = dp_get_peer_param,
9985 #ifdef VDEV_PEER_PROTOCOL_COUNT
9986 	.txrx_peer_protocol_cnt = dp_peer_stats_update_protocol_cnt,
9987 #endif
9988 #ifdef ATH_SUPPORT_NAC_RSSI
9989 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
9990 	.txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
9991 #endif
9992 	.set_key = dp_set_michael_key,
9993 	.txrx_get_vdev_param = dp_get_vdev_param,
9994 	.enable_peer_based_pktlog = dp_enable_peer_based_pktlog,
9995 	.calculate_delay_stats = dp_calculate_delay_stats,
9996 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
9997 	.txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag,
9998 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
9999 	.txrx_dump_pdev_rx_protocol_tag_stats =
10000 				dp_dump_pdev_rx_protocol_tag_stats,
10001 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
10002 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
10003 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
10004 	.txrx_set_rx_flow_tag = dp_set_rx_flow_tag,
10005 	.txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats,
10006 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
10007 #ifdef QCA_MULTIPASS_SUPPORT
10008 	.txrx_peer_set_vlan_id = dp_peer_set_vlan_id,
10009 #endif /*QCA_MULTIPASS_SUPPORT*/
10010 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
10011 	.txrx_update_peer_pkt_capture_params =
10012 		 dp_peer_update_pkt_capture_params,
10013 #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
10014 };
10015 
10016 static struct cdp_me_ops dp_ops_me = {
10017 #ifdef ATH_SUPPORT_IQUE
10018 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
10019 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
10020 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
10021 #endif
10022 };
10023 
10024 static struct cdp_mon_ops dp_ops_mon = {
10025 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
10026 	/* Added support for HK advance filter */
10027 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
10028 	.txrx_deliver_tx_mgmt = dp_deliver_tx_mgmt,
10029 };
10030 
10031 static struct cdp_host_stats_ops dp_ops_host_stats = {
10032 	.txrx_per_peer_stats = dp_get_host_peer_stats,
10033 	.get_fw_peer_stats = dp_get_fw_peer_stats,
10034 	.get_htt_stats = dp_get_htt_stats,
10035 #ifdef FEATURE_PERPKT_INFO
10036 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
10037 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
10038 #endif /* FEATURE_PERPKT_INFO */
10039 	.txrx_stats_publish = dp_txrx_stats_publish,
10040 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
10041 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
10042 	.txrx_get_peer_stats_param = dp_txrx_get_peer_stats_param,
10043 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
10044 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
10045 	.txrx_get_ratekbps = dp_txrx_get_ratekbps,
10046 	.txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats,
10047 	/* TODO */
10048 };
10049 
10050 static struct cdp_raw_ops dp_ops_raw = {
10051 	/* TODO */
10052 };
10053 
10054 #ifdef PEER_FLOW_CONTROL
10055 static struct cdp_pflow_ops dp_ops_pflow = {
10056 	dp_tx_flow_ctrl_configure_pdev,
10057 };
10058 #endif /* CONFIG_WIN */
10059 
10060 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
10061 static struct cdp_cfr_ops dp_ops_cfr = {
10062 	.txrx_cfr_filter = dp_cfr_filter,
10063 	.txrx_get_cfr_rcc = dp_get_cfr_rcc,
10064 	.txrx_set_cfr_rcc = dp_set_cfr_rcc,
10065 };
10066 #endif
10067 
10068 #ifdef FEATURE_RUNTIME_PM
10069 /**
10070  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
10071  * @soc_hdl: Datapath soc handle
10072  * @pdev_id: id of data path pdev handle
10073  *
10074  * DP is ready to runtime suspend if there are no pending TX packets.
10075  *
10076  * Return: QDF_STATUS
10077  */
10078 static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
10079 {
10080 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10081 	struct dp_pdev *pdev;
10082 
10083 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10084 	if (!pdev) {
10085 		dp_err("pdev is NULL");
10086 		return QDF_STATUS_E_INVAL;
10087 	}
10088 
10089 	/* Abort if there are any pending TX packets */
10090 	if (dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev)) > 0) {
10091 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
10092 			  FL("Abort suspend due to pending TX packets"));
10093 		return QDF_STATUS_E_AGAIN;
10094 	}
10095 
10096 	if (soc->intr_mode == DP_INTR_POLL)
10097 		qdf_timer_stop(&soc->int_timer);
10098 
10099 	return QDF_STATUS_SUCCESS;
10100 }
10101 
10102 /**
10103  * dp_flush_ring_hptp() - Update ring shadow
10104  *			  register HP/TP address when runtime
10105  *                        resume
10106  * @opaque_soc: DP soc context
10107  *
10108  * Return: None
10109  */
10110 static
10111 void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
10112 {
10113 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
10114 						 HAL_SRNG_FLUSH_EVENT)) {
10115 		/* Acquire the lock */
10116 		hal_srng_access_start(soc->hal_soc, hal_srng);
10117 
10118 		hal_srng_access_end(soc->hal_soc, hal_srng);
10119 
10120 		hal_srng_set_flush_last_ts(hal_srng);
10121 	}
10122 }
10123 
10124 /**
10125  * dp_runtime_resume() - ensure DP is ready to runtime resume
10126  * @soc_hdl: Datapath soc handle
10127  * @pdev_id: id of data path pdev handle
10128  *
10129  * Resume DP for runtime PM.
10130  *
10131  * Return: QDF_STATUS
10132  */
10133 static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
10134 {
10135 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10136 	int i;
10137 
10138 	if (soc->intr_mode == DP_INTR_POLL)
10139 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
10140 
10141 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
10142 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
10143 	}
10144 
10145 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
10146 
10147 	return QDF_STATUS_SUCCESS;
10148 }
10149 #endif /* FEATURE_RUNTIME_PM */
10150 
10151 /**
10152  * dp_tx_get_success_ack_stats() - get tx success completion count
10153  * @soc_hdl: Datapath soc handle
10154  * @vdevid: vdev identifier
10155  *
10156  * Return: tx success ack count
10157  */
10158 static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl,
10159 					    uint8_t vdev_id)
10160 {
10161 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10162 	struct cdp_vdev_stats *vdev_stats = NULL;
10163 	uint32_t tx_success;
10164 	struct dp_vdev *vdev =
10165 		(struct dp_vdev *)dp_get_vdev_from_soc_vdev_id_wifi3(soc,
10166 								     vdev_id);
10167 
10168 	if (!vdev) {
10169 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
10170 			  FL("Invalid vdev id %d"), vdev_id);
10171 		return 0;
10172 	}
10173 
10174 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
10175 	if (!vdev_stats) {
10176 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
10177 			  "DP alloc failure - unable to get alloc vdev stats");
10178 		return 0;
10179 	}
10180 
10181 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
10182 	dp_aggregate_vdev_stats(vdev, vdev_stats);
10183 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
10184 
10185 	tx_success = vdev_stats->tx.tx_success.num;
10186 	qdf_mem_free(vdev_stats);
10187 
10188 	return tx_success;
10189 }
10190 
10191 #ifdef WLAN_SUPPORT_DATA_STALL
10192 /**
10193  * dp_register_data_stall_detect_cb() - register data stall callback
10194  * @soc_hdl: Datapath soc handle
10195  * @pdev_id: id of data path pdev handle
10196  * @data_stall_detect_callback: data stall callback function
10197  *
10198  * Return: QDF_STATUS Enumeration
10199  */
10200 static
10201 QDF_STATUS dp_register_data_stall_detect_cb(
10202 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
10203 			data_stall_detect_cb data_stall_detect_callback)
10204 {
10205 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10206 	struct dp_pdev *pdev;
10207 
10208 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10209 	if (!pdev) {
10210 		dp_err("pdev NULL!");
10211 		return QDF_STATUS_E_INVAL;
10212 	}
10213 
10214 	pdev->data_stall_detect_callback = data_stall_detect_callback;
10215 	return QDF_STATUS_SUCCESS;
10216 }
10217 
10218 /**
10219  * dp_deregister_data_stall_detect_cb() - de-register data stall callback
10220  * @soc_hdl: Datapath soc handle
10221  * @pdev_id: id of data path pdev handle
10222  * @data_stall_detect_callback: data stall callback function
10223  *
10224  * Return: QDF_STATUS Enumeration
10225  */
10226 static
10227 QDF_STATUS dp_deregister_data_stall_detect_cb(
10228 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
10229 			data_stall_detect_cb data_stall_detect_callback)
10230 {
10231 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10232 	struct dp_pdev *pdev;
10233 
10234 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10235 	if (!pdev) {
10236 		dp_err("pdev NULL!");
10237 		return QDF_STATUS_E_INVAL;
10238 	}
10239 
10240 	pdev->data_stall_detect_callback = NULL;
10241 	return QDF_STATUS_SUCCESS;
10242 }
10243 
10244 /**
10245  * dp_txrx_post_data_stall_event() - post data stall event
10246  * @soc_hdl: Datapath soc handle
10247  * @indicator: Module triggering data stall
10248  * @data_stall_type: data stall event type
10249  * @pdev_id: pdev id
10250  * @vdev_id_bitmap: vdev id bitmap
10251  * @recovery_type: data stall recovery type
10252  *
10253  * Return: None
10254  */
10255 static void
10256 dp_txrx_post_data_stall_event(struct cdp_soc_t *soc_hdl,
10257 			      enum data_stall_log_event_indicator indicator,
10258 			      enum data_stall_log_event_type data_stall_type,
10259 			      uint32_t pdev_id, uint32_t vdev_id_bitmap,
10260 			      enum data_stall_log_recovery_type recovery_type)
10261 {
10262 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10263 	struct data_stall_event_info data_stall_info;
10264 	struct dp_pdev *pdev;
10265 
10266 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10267 	if (!pdev) {
10268 		dp_err("pdev NULL!");
10269 		return;
10270 	}
10271 
10272 	if (!pdev->data_stall_detect_callback) {
10273 		dp_err("data stall cb not registered!");
10274 		return;
10275 	}
10276 
10277 	dp_info("data_stall_type: %x pdev_id: %d",
10278 		data_stall_type, pdev_id);
10279 
10280 	data_stall_info.indicator = indicator;
10281 	data_stall_info.data_stall_type = data_stall_type;
10282 	data_stall_info.vdev_id_bitmap = vdev_id_bitmap;
10283 	data_stall_info.pdev_id = pdev_id;
10284 	data_stall_info.recovery_type = recovery_type;
10285 
10286 	pdev->data_stall_detect_callback(&data_stall_info);
10287 }
10288 #endif /* WLAN_SUPPORT_DATA_STALL */
10289 
10290 #ifdef WLAN_FEATURE_STATS_EXT
10291 /* rx hw stats event wait timeout in ms */
10292 #define DP_REO_STATUS_STATS_TIMEOUT 1000
10293 /**
10294  * dp_txrx_ext_stats_request - request dp txrx extended stats request
10295  * @soc_hdl: soc handle
10296  * @pdev_id: pdev id
10297  * @req: stats request
10298  *
10299  * Return: QDF_STATUS
10300  */
10301 static QDF_STATUS
10302 dp_txrx_ext_stats_request(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
10303 			  struct cdp_txrx_ext_stats *req)
10304 {
10305 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10306 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10307 
10308 	if (!pdev) {
10309 		dp_err("pdev is null");
10310 		return QDF_STATUS_E_INVAL;
10311 	}
10312 
10313 	dp_aggregate_pdev_stats(pdev);
10314 
10315 	req->tx_msdu_enqueue = pdev->stats.tx_i.processed.num;
10316 	req->tx_msdu_overflow = pdev->stats.tx_i.dropped.ring_full;
10317 	req->rx_mpdu_received = soc->ext_stats.rx_mpdu_received;
10318 	req->rx_mpdu_delivered = soc->ext_stats.rx_mpdu_received;
10319 	req->rx_mpdu_missed = soc->ext_stats.rx_mpdu_missed;
10320 	req->rx_mpdu_error = soc->stats.rx.err_ring_pkts -
10321 				soc->stats.rx.rx_frags;
10322 
10323 	return QDF_STATUS_SUCCESS;
10324 }
10325 
10326 /**
10327  * dp_rx_hw_stats_cb - request rx hw stats response callback
10328  * @soc: soc handle
10329  * @cb_ctxt: callback context
10330  * @reo_status: reo command response status
10331  *
10332  * Return: None
10333  */
10334 static void dp_rx_hw_stats_cb(struct dp_soc *soc, void *cb_ctxt,
10335 			      union hal_reo_status *reo_status)
10336 {
10337 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
10338 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
10339 
10340 	if (soc->ignore_reo_status_cb) {
10341 		qdf_event_set(&soc->rx_hw_stats_event);
10342 		return;
10343 	}
10344 
10345 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
10346 		dp_info("REO stats failure %d for TID %d",
10347 			queue_status->header.status, rx_tid->tid);
10348 		return;
10349 	}
10350 
10351 	soc->ext_stats.rx_mpdu_received += queue_status->mpdu_frms_cnt;
10352 	soc->ext_stats.rx_mpdu_missed += queue_status->late_recv_mpdu_cnt;
10353 
10354 	if (rx_tid->tid == (DP_MAX_TIDS - 1))
10355 		qdf_event_set(&soc->rx_hw_stats_event);
10356 }
10357 
10358 /**
10359  * dp_request_rx_hw_stats - request rx hardware stats
10360  * @soc_hdl: soc handle
10361  * @vdev_id: vdev id
10362  *
10363  * Return: None
10364  */
10365 static void
10366 dp_request_rx_hw_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
10367 {
10368 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10369 	struct dp_vdev *vdev = dp_get_vdev_from_soc_vdev_id_wifi3(soc, vdev_id);
10370 	struct dp_peer *peer;
10371 
10372 	if (!vdev) {
10373 		dp_err("vdev is null");
10374 		qdf_event_set(&soc->rx_hw_stats_event);
10375 		return;
10376 	}
10377 
10378 	peer = vdev->vap_bss_peer;
10379 
10380 	if (!peer || peer->delete_in_progress) {
10381 		dp_err("Peer deletion in progress");
10382 		qdf_event_set(&soc->rx_hw_stats_event);
10383 		return;
10384 	}
10385 
10386 	qdf_event_reset(&soc->rx_hw_stats_event);
10387 	dp_peer_rxtid_stats(peer, dp_rx_hw_stats_cb, NULL);
10388 }
10389 
10390 /**
10391  * dp_wait_for_ext_rx_stats - wait for rx reo status for rx stats
10392  * @soc_hdl: cdp opaque soc handle
10393  *
10394  * Return: status
10395  */
10396 static QDF_STATUS
10397 dp_wait_for_ext_rx_stats(struct cdp_soc_t *soc_hdl)
10398 {
10399 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
10400 	QDF_STATUS status;
10401 
10402 	status = qdf_wait_single_event(&soc->rx_hw_stats_event,
10403 				       DP_REO_STATUS_STATS_TIMEOUT);
10404 
10405 	return status;
10406 }
10407 #endif /* WLAN_FEATURE_STATS_EXT */
10408 
10409 #ifdef DP_PEER_EXTENDED_API
10410 static struct cdp_misc_ops dp_ops_misc = {
10411 #ifdef FEATURE_WLAN_TDLS
10412 	.tx_non_std = dp_tx_non_std,
10413 #endif /* FEATURE_WLAN_TDLS */
10414 	.get_opmode = dp_get_opmode,
10415 #ifdef FEATURE_RUNTIME_PM
10416 	.runtime_suspend = dp_runtime_suspend,
10417 	.runtime_resume = dp_runtime_resume,
10418 #endif /* FEATURE_RUNTIME_PM */
10419 	.pkt_log_init = dp_pkt_log_init,
10420 	.pkt_log_con_service = dp_pkt_log_con_service,
10421 	.get_num_rx_contexts = dp_get_num_rx_contexts,
10422 	.get_tx_ack_stats = dp_tx_get_success_ack_stats,
10423 #ifdef WLAN_SUPPORT_DATA_STALL
10424 	.txrx_data_stall_cb_register = dp_register_data_stall_detect_cb,
10425 	.txrx_data_stall_cb_deregister = dp_deregister_data_stall_detect_cb,
10426 	.txrx_post_data_stall_event = dp_txrx_post_data_stall_event,
10427 #endif
10428 
10429 #ifdef WLAN_FEATURE_STATS_EXT
10430 	.txrx_ext_stats_request = dp_txrx_ext_stats_request,
10431 	.request_rx_hw_stats = dp_request_rx_hw_stats,
10432 	.wait_for_ext_rx_stats = dp_wait_for_ext_rx_stats,
10433 #endif
10434 };
10435 #endif
10436 
10437 #ifdef DP_FLOW_CTL
10438 static struct cdp_flowctl_ops dp_ops_flowctl = {
10439 	/* WIFI 3.0 DP implement as required. */
10440 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
10441 	.flow_pool_map_handler = dp_tx_flow_pool_map,
10442 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
10443 	.register_pause_cb = dp_txrx_register_pause_cb,
10444 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
10445 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
10446 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
10447 };
10448 
10449 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
10450 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
10451 };
10452 #endif
10453 
10454 #ifdef IPA_OFFLOAD
10455 static struct cdp_ipa_ops dp_ops_ipa = {
10456 	.ipa_get_resource = dp_ipa_get_resource,
10457 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
10458 	.ipa_op_response = dp_ipa_op_response,
10459 	.ipa_register_op_cb = dp_ipa_register_op_cb,
10460 	.ipa_get_stat = dp_ipa_get_stat,
10461 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
10462 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
10463 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
10464 	.ipa_setup = dp_ipa_setup,
10465 	.ipa_cleanup = dp_ipa_cleanup,
10466 	.ipa_setup_iface = dp_ipa_setup_iface,
10467 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
10468 	.ipa_enable_pipes = dp_ipa_enable_pipes,
10469 	.ipa_disable_pipes = dp_ipa_disable_pipes,
10470 	.ipa_set_perf_level = dp_ipa_set_perf_level,
10471 	.ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd
10472 };
10473 #endif
10474 
10475 #ifdef DP_POWER_SAVE
10476 static QDF_STATUS dp_bus_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
10477 {
10478 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10479 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10480 	int timeout = SUSPEND_DRAIN_WAIT;
10481 	int drain_wait_delay = 50; /* 50 ms */
10482 
10483 	if (qdf_unlikely(!pdev)) {
10484 		dp_err("pdev is NULL");
10485 		return QDF_STATUS_E_INVAL;
10486 	}
10487 
10488 	/* Abort if there are any pending TX packets */
10489 	while (dp_get_tx_pending((struct cdp_pdev *)pdev) > 0) {
10490 		qdf_sleep(drain_wait_delay);
10491 		if (timeout <= 0) {
10492 			dp_err("TX frames are pending, abort suspend");
10493 			return QDF_STATUS_E_TIMEOUT;
10494 		}
10495 		timeout = timeout - drain_wait_delay;
10496 	}
10497 
10498 	if (soc->intr_mode == DP_INTR_POLL)
10499 		qdf_timer_stop(&soc->int_timer);
10500 
10501 	/* Stop monitor reap timer and reap any pending frames in ring */
10502 	if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED &&
10503 	    soc->reap_timer_init) {
10504 		qdf_timer_sync_cancel(&soc->mon_reap_timer);
10505 		dp_service_mon_rings(soc, DP_MON_REAP_BUDGET);
10506 	}
10507 
10508 	return QDF_STATUS_SUCCESS;
10509 }
10510 
10511 static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
10512 {
10513 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10514 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10515 
10516 	if (qdf_unlikely(!pdev)) {
10517 		dp_err("pdev is NULL");
10518 		return QDF_STATUS_E_INVAL;
10519 	}
10520 
10521 	if (soc->intr_mode == DP_INTR_POLL)
10522 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
10523 
10524 	/* Start monitor reap timer */
10525 	if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED &&
10526 	    soc->reap_timer_init)
10527 		qdf_timer_mod(&soc->mon_reap_timer,
10528 			      DP_INTR_POLL_TIMER_MS);
10529 
10530 	return QDF_STATUS_SUCCESS;
10531 }
10532 
10533 /**
10534  * dp_process_wow_ack_rsp() - process wow ack response
10535  * @soc_hdl: datapath soc handle
10536  * @pdev_id: data path pdev handle id
10537  *
10538  * Return: none
10539  */
10540 static void dp_process_wow_ack_rsp(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
10541 {
10542 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10543 	struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10544 
10545 	if (qdf_unlikely(!pdev)) {
10546 		dp_err("pdev is NULL");
10547 		return;
10548 	}
10549 
10550 	/*
10551 	 * As part of wow enable FW disables the mon status ring and in wow ack
10552 	 * response from FW reap mon status ring to make sure no packets pending
10553 	 * in the ring.
10554 	 */
10555 	if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED &&
10556 	    soc->reap_timer_init) {
10557 		dp_service_mon_rings(soc, DP_MON_REAP_BUDGET);
10558 	}
10559 }
10560 
10561 static struct cdp_bus_ops dp_ops_bus = {
10562 	.bus_suspend = dp_bus_suspend,
10563 	.bus_resume = dp_bus_resume,
10564 	.process_wow_ack_rsp = dp_process_wow_ack_rsp,
10565 };
10566 #endif
10567 
10568 #ifdef DP_FLOW_CTL
10569 static struct cdp_throttle_ops dp_ops_throttle = {
10570 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
10571 };
10572 
10573 static struct cdp_cfg_ops dp_ops_cfg = {
10574 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
10575 };
10576 #endif
10577 
10578 #ifdef DP_PEER_EXTENDED_API
10579 static struct cdp_ocb_ops dp_ops_ocb = {
10580 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
10581 };
10582 
10583 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
10584 	.clear_stats = dp_txrx_clear_dump_stats,
10585 };
10586 
10587 static struct cdp_peer_ops dp_ops_peer = {
10588 	.register_peer = dp_register_peer,
10589 	.clear_peer = dp_clear_peer,
10590 	.find_peer_exist = dp_find_peer_exist,
10591 	.find_peer_exist_on_vdev = dp_find_peer_exist_on_vdev,
10592 	.find_peer_exist_on_other_vdev = dp_find_peer_exist_on_other_vdev,
10593 	.peer_state_update = dp_peer_state_update,
10594 	.get_vdevid = dp_get_vdevid,
10595 	.get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr,
10596 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
10597 	.get_peer_state = dp_get_peer_state,
10598 };
10599 #endif
10600 
10601 static struct cdp_ops dp_txrx_ops = {
10602 	.cmn_drv_ops = &dp_ops_cmn,
10603 	.ctrl_ops = &dp_ops_ctrl,
10604 	.me_ops = &dp_ops_me,
10605 	.mon_ops = &dp_ops_mon,
10606 	.host_stats_ops = &dp_ops_host_stats,
10607 	.wds_ops = &dp_ops_wds,
10608 	.raw_ops = &dp_ops_raw,
10609 #ifdef PEER_FLOW_CONTROL
10610 	.pflow_ops = &dp_ops_pflow,
10611 #endif /* PEER_FLOW_CONTROL */
10612 #ifdef DP_PEER_EXTENDED_API
10613 	.misc_ops = &dp_ops_misc,
10614 	.ocb_ops = &dp_ops_ocb,
10615 	.peer_ops = &dp_ops_peer,
10616 	.mob_stats_ops = &dp_ops_mob_stats,
10617 #endif
10618 #ifdef DP_FLOW_CTL
10619 	.cfg_ops = &dp_ops_cfg,
10620 	.flowctl_ops = &dp_ops_flowctl,
10621 	.l_flowctl_ops = &dp_ops_l_flowctl,
10622 	.throttle_ops = &dp_ops_throttle,
10623 #endif
10624 #ifdef IPA_OFFLOAD
10625 	.ipa_ops = &dp_ops_ipa,
10626 #endif
10627 #ifdef DP_POWER_SAVE
10628 	.bus_ops = &dp_ops_bus,
10629 #endif
10630 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
10631 	.cfr_ops = &dp_ops_cfr,
10632 #endif
10633 };
10634 
10635 /*
10636  * dp_soc_set_txrx_ring_map()
10637  * @dp_soc: DP handler for soc
10638  *
10639  * Return: Void
10640  */
10641 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
10642 {
10643 	uint32_t i;
10644 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
10645 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
10646 	}
10647 }
10648 
10649 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018)
10650 
10651 #ifndef QCA_MEM_ATTACH_ON_WIFI3
10652 
10653 /**
10654  * dp_soc_attach_wifi3() - Attach txrx SOC
10655  * @ctrl_psoc: Opaque SOC handle from control plane
10656  * @htc_handle: Opaque HTC handle
10657  * @hif_handle: Opaque HIF handle
10658  * @qdf_osdev: QDF device
10659  * @ol_ops: Offload Operations
10660  * @device_id: Device ID
10661  *
10662  * Return: DP SOC handle on success, NULL on failure
10663  */
10664 struct cdp_soc_t *
10665 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
10666 		    struct hif_opaque_softc *hif_handle,
10667 		    HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
10668 		    struct ol_if_ops *ol_ops, uint16_t device_id)
10669 {
10670 	struct dp_soc *dp_soc =  NULL;
10671 
10672 	dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
10673 			       ol_ops, device_id);
10674 	if (!dp_soc)
10675 		return NULL;
10676 
10677 	if (!dp_soc_init(dp_soc, htc_handle, hif_handle))
10678 		return NULL;
10679 
10680 	return dp_soc_to_cdp_soc_t(dp_soc);
10681 }
10682 #else
10683 
10684 /**
10685  * dp_soc_attach_wifi3() - Attach txrx SOC
10686  * @ctrl_psoc: Opaque SOC handle from control plane
10687  * @htc_handle: Opaque HTC handle
10688  * @hif_handle: Opaque HIF handle
10689  * @qdf_osdev: QDF device
10690  * @ol_ops: Offload Operations
10691  * @device_id: Device ID
10692  *
10693  * Return: DP SOC handle on success, NULL on failure
10694  */
10695 struct cdp_soc_t *
10696 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
10697 		    struct hif_opaque_softc *hif_handle,
10698 		    HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
10699 		    struct ol_if_ops *ol_ops, uint16_t device_id)
10700 {
10701 	struct dp_soc *dp_soc = NULL;
10702 
10703 	dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
10704 			       ol_ops, device_id);
10705 	return dp_soc_to_cdp_soc_t(dp_soc);
10706 }
10707 
10708 #endif
10709 
10710 /**
10711  * dp_soc_attach() - Attach txrx SOC
10712  * @ctrl_psoc: Opaque SOC handle from control plane
10713  * @htc_handle: Opaque HTC handle
10714  * @qdf_osdev: QDF device
10715  * @ol_ops: Offload Operations
10716  * @device_id: Device ID
10717  *
10718  * Return: DP SOC handle on success, NULL on failure
10719  */
10720 static struct dp_soc *
10721 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
10722 	      HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
10723 	      struct ol_if_ops *ol_ops, uint16_t device_id)
10724 {
10725 	int int_ctx;
10726 	struct dp_soc *soc =  NULL;
10727 	struct htt_soc *htt_soc;
10728 
10729 	soc = qdf_mem_malloc(sizeof(*soc));
10730 
10731 	if (!soc) {
10732 		dp_err("DP SOC memory allocation failed");
10733 		goto fail0;
10734 	}
10735 
10736 	int_ctx = 0;
10737 	soc->device_id = device_id;
10738 	soc->cdp_soc.ops = &dp_txrx_ops;
10739 	soc->cdp_soc.ol_ops = ol_ops;
10740 	soc->ctrl_psoc = ctrl_psoc;
10741 	soc->osdev = qdf_osdev;
10742 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
10743 
10744 	wlan_set_srng_cfg(&soc->wlan_srng_cfg);
10745 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
10746 
10747 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
10748 	if (!soc->wlan_cfg_ctx) {
10749 		dp_err("wlan_cfg_ctx failed\n");
10750 		goto fail1;
10751 	}
10752 
10753 	dp_soc_set_interrupt_mode(soc);
10754 
10755 	htt_soc = htt_soc_attach(soc, htc_handle);
10756 
10757 	if (!htt_soc)
10758 		goto fail1;
10759 
10760 	soc->htt_handle = htt_soc;
10761 
10762 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
10763 		goto fail2;
10764 
10765 	return soc;
10766 fail2:
10767 	htt_soc_detach(htt_soc);
10768 fail1:
10769 	qdf_mem_free(soc);
10770 fail0:
10771 	return NULL;
10772 }
10773 
10774 /**
10775  * dp_soc_init() - Initialize txrx SOC
10776  * @dp_soc: Opaque DP SOC handle
10777  * @htc_handle: Opaque HTC handle
10778  * @hif_handle: Opaque HIF handle
10779  *
10780  * Return: DP SOC handle on success, NULL on failure
10781  */
10782 void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
10783 		  struct hif_opaque_softc *hif_handle)
10784 {
10785 	int target_type;
10786 	struct htt_soc *htt_soc = (struct htt_soc *)soc->htt_handle;
10787 	bool is_monitor_mode = false;
10788 
10789 	htt_set_htc_handle(htt_soc, htc_handle);
10790 	soc->hif_handle = hif_handle;
10791 
10792 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
10793 	if (!soc->hal_soc)
10794 		return NULL;
10795 
10796 	htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
10797 			   htt_get_htc_handle(htt_soc),
10798 			   soc->hal_soc, soc->osdev);
10799 	target_type = hal_get_target_type(soc->hal_soc);
10800 	switch (target_type) {
10801 	case TARGET_TYPE_QCA6290:
10802 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
10803 					       REO_DST_RING_SIZE_QCA6290);
10804 		soc->ast_override_support = 1;
10805 		soc->da_war_enabled = false;
10806 		break;
10807 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
10808     defined(QCA_WIFI_QCA6750)
10809 	case TARGET_TYPE_QCA6390:
10810 	case TARGET_TYPE_QCA6490:
10811 	case TARGET_TYPE_QCA6750:
10812 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
10813 					       REO_DST_RING_SIZE_QCA6290);
10814 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
10815 		soc->ast_override_support = 1;
10816 		if (soc->cdp_soc.ol_ops->get_con_mode &&
10817 		    soc->cdp_soc.ol_ops->get_con_mode() ==
10818 		    QDF_GLOBAL_MONITOR_MODE) {
10819 			int int_ctx;
10820 
10821 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
10822 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
10823 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
10824 			}
10825 		}
10826 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
10827 		break;
10828 #endif /* QCA_WIFI_QCA6390 || QCA_WIFI_QCA6490 || QCA_WIFI_QCA6750 */
10829 
10830 	case TARGET_TYPE_QCA8074:
10831 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
10832 					       REO_DST_RING_SIZE_QCA8074);
10833 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
10834 		soc->da_war_enabled = true;
10835 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
10836 		break;
10837 	case TARGET_TYPE_QCA8074V2:
10838 	case TARGET_TYPE_QCA6018:
10839 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
10840 					       REO_DST_RING_SIZE_QCA8074);
10841 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
10842 		soc->hw_nac_monitor_support = 1;
10843 		soc->ast_override_support = 1;
10844 		soc->per_tid_basize_max_tid = 8;
10845 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
10846 		soc->da_war_enabled = false;
10847 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
10848 		break;
10849 	case TARGET_TYPE_QCN9000:
10850 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
10851 					       REO_DST_RING_SIZE_QCN9000);
10852 		soc->ast_override_support = 1;
10853 		soc->da_war_enabled = false;
10854 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
10855 		soc->hw_nac_monitor_support = 1;
10856 		soc->per_tid_basize_max_tid = 8;
10857 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
10858 		soc->lmac_polled_mode = 1;
10859 		break;
10860 	default:
10861 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
10862 		qdf_assert_always(0);
10863 		break;
10864 	}
10865 
10866 	dp_soc_set_interrupt_mode(soc);
10867 	if (soc->cdp_soc.ol_ops->get_con_mode &&
10868 	    soc->cdp_soc.ol_ops->get_con_mode() ==
10869 	    QDF_GLOBAL_MONITOR_MODE)
10870 		is_monitor_mode = true;
10871 
10872 	wlan_cfg_fill_interrupt_mask(soc->wlan_cfg_ctx, soc->intr_mode,
10873 				     is_monitor_mode);
10874 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
10875 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
10876 	soc->cce_disable = false;
10877 
10878 	qdf_atomic_init(&soc->num_tx_outstanding);
10879 	soc->num_tx_allowed =
10880 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
10881 
10882 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
10883 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
10884 				CDP_CFG_MAX_PEER_ID);
10885 
10886 		if (ret != -EINVAL) {
10887 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
10888 		}
10889 
10890 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
10891 				CDP_CFG_CCE_DISABLE);
10892 		if (ret == 1)
10893 			soc->cce_disable = true;
10894 	}
10895 
10896 	qdf_spinlock_create(&soc->peer_ref_mutex);
10897 	qdf_spinlock_create(&soc->ast_lock);
10898 
10899 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
10900 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
10901 
10902 	/* fill the tx/rx cpu ring map*/
10903 	dp_soc_set_txrx_ring_map(soc);
10904 
10905 	qdf_spinlock_create(&soc->htt_stats.lock);
10906 	/* initialize work queue for stats processing */
10907 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
10908 
10909 	return soc;
10910 
10911 }
10912 
10913 /**
10914  * dp_soc_init_wifi3() - Initialize txrx SOC
10915  * @soc: Opaque DP SOC handle
10916  * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
10917  * @hif_handle: Opaque HIF handle
10918  * @htc_handle: Opaque HTC handle
10919  * @qdf_osdev: QDF device (Unused)
10920  * @ol_ops: Offload Operations (Unused)
10921  * @device_id: Device ID (Unused)
10922  *
10923  * Return: DP SOC handle on success, NULL on failure
10924  */
10925 void *dp_soc_init_wifi3(struct cdp_soc_t *soc,
10926 			struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
10927 			struct hif_opaque_softc *hif_handle,
10928 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
10929 			struct ol_if_ops *ol_ops, uint16_t device_id)
10930 {
10931 	return dp_soc_init((struct dp_soc *)soc, htc_handle, hif_handle);
10932 }
10933 
10934 #endif
10935 
10936 /*
10937  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
10938  *
10939  * @soc: handle to DP soc
10940  * @mac_id: MAC id
10941  *
10942  * Return: Return pdev corresponding to MAC
10943  */
10944 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
10945 {
10946 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
10947 		return soc->pdev_list[mac_id];
10948 
10949 	/* Typically for MCL as there only 1 PDEV*/
10950 	return soc->pdev_list[0];
10951 }
10952 
10953 /*
10954  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
10955  * @soc:		DP SoC context
10956  * @max_mac_rings:	No of MAC rings
10957  *
10958  * Return: None
10959  */
10960 void dp_is_hw_dbs_enable(struct dp_soc *soc,
10961 				int *max_mac_rings)
10962 {
10963 	bool dbs_enable = false;
10964 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
10965 		dbs_enable = soc->cdp_soc.ol_ops->
10966 		is_hw_dbs_2x2_capable((void *)soc->ctrl_psoc);
10967 
10968 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
10969 }
10970 
10971 #if defined(WLAN_CFR_ENABLE) && defined(WLAN_ENH_CFR_ENABLE)
10972 /*
10973  * dp_cfr_filter() -  Configure HOST RX monitor status ring for CFR
10974  * @soc_hdl: Datapath soc handle
10975  * @pdev_id: id of data path pdev handle
10976  * @enable: Enable/Disable CFR
10977  * @filter_val: Flag to select Filter for monitor mode
10978  */
10979 static void dp_cfr_filter(struct cdp_soc_t *soc_hdl,
10980 			  uint8_t pdev_id,
10981 			  bool enable,
10982 			  struct cdp_monitor_filter *filter_val)
10983 {
10984 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
10985 	struct dp_pdev *pdev = NULL;
10986 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
10987 	int max_mac_rings;
10988 	uint8_t mac_id = 0;
10989 
10990 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
10991 	if (!pdev) {
10992 		dp_err("pdev is NULL");
10993 		return;
10994 	}
10995 
10996 	if (pdev->monitor_vdev) {
10997 		dp_info("No action is needed since monitor mode is enabled\n");
10998 		return;
10999 	}
11000 	soc = pdev->soc;
11001 	pdev->cfr_rcc_mode = false;
11002 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
11003 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
11004 
11005 	dp_debug("Max_mac_rings %d", max_mac_rings);
11006 	dp_info("enable : %d, mode: 0x%x", enable, filter_val->mode);
11007 
11008 	if (enable) {
11009 		pdev->cfr_rcc_mode = true;
11010 
11011 		htt_tlv_filter.ppdu_start = 1;
11012 		htt_tlv_filter.ppdu_end = 1;
11013 		htt_tlv_filter.ppdu_end_user_stats = 1;
11014 		htt_tlv_filter.ppdu_end_user_stats_ext = 1;
11015 		htt_tlv_filter.ppdu_end_status_done = 1;
11016 		htt_tlv_filter.mpdu_start = 1;
11017 		htt_tlv_filter.offset_valid = false;
11018 
11019 		htt_tlv_filter.enable_fp =
11020 			(filter_val->mode & MON_FILTER_PASS) ? 1 : 0;
11021 		htt_tlv_filter.enable_md = 0;
11022 		htt_tlv_filter.enable_mo =
11023 			(filter_val->mode & MON_FILTER_OTHER) ? 1 : 0;
11024 		htt_tlv_filter.fp_mgmt_filter = filter_val->fp_mgmt;
11025 		htt_tlv_filter.fp_ctrl_filter = filter_val->fp_ctrl;
11026 		htt_tlv_filter.fp_data_filter = filter_val->fp_data;
11027 		htt_tlv_filter.mo_mgmt_filter = filter_val->mo_mgmt;
11028 		htt_tlv_filter.mo_ctrl_filter = filter_val->mo_ctrl;
11029 		htt_tlv_filter.mo_data_filter = filter_val->mo_data;
11030 	}
11031 
11032 	for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
11033 		int mac_for_pdev =
11034 			dp_get_mac_id_for_pdev(mac_id,
11035 					       pdev->pdev_id);
11036 
11037 		htt_h2t_rx_ring_cfg(soc->htt_handle,
11038 				    mac_for_pdev,
11039 				    soc->rxdma_mon_status_ring[mac_id]
11040 				    .hal_srng,
11041 				    RXDMA_MONITOR_STATUS,
11042 				    RX_DATA_BUFFER_SIZE,
11043 				    &htt_tlv_filter);
11044 	}
11045 }
11046 
11047 /**
11048  * dp_get_cfr_rcc() - get cfr rcc config
11049  * @soc_hdl: Datapath soc handle
11050  * @pdev_id: id of objmgr pdev
11051  *
11052  * Return: true/false based on cfr mode setting
11053  */
11054 static
11055 bool dp_get_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
11056 {
11057 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11058 	struct dp_pdev *pdev = NULL;
11059 
11060 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11061 	if (!pdev) {
11062 		dp_err("pdev is NULL");
11063 		return false;
11064 	}
11065 
11066 	return pdev->cfr_rcc_mode;
11067 }
11068 
11069 /**
11070  * dp_set_cfr_rcc() - enable/disable cfr rcc config
11071  * @soc_hdl: Datapath soc handle
11072  * @pdev_id: id of objmgr pdev
11073  * @enable: Enable/Disable cfr rcc mode
11074  *
11075  * Return: none
11076  */
11077 static
11078 void dp_set_cfr_rcc(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, bool enable)
11079 {
11080 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
11081 	struct dp_pdev *pdev = NULL;
11082 
11083 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
11084 	if (!pdev) {
11085 		dp_err("pdev is NULL");
11086 		return;
11087 	}
11088 
11089 	pdev->cfr_rcc_mode = enable;
11090 }
11091 #endif
11092 
11093 /*
11094 * dp_is_soc_reinit() - Check if soc reinit is true
11095 * @soc: DP SoC context
11096 *
11097 * Return: true or false
11098 */
11099 bool dp_is_soc_reinit(struct dp_soc *soc)
11100 {
11101 	return soc->dp_soc_reinit;
11102 }
11103 
11104 /*
11105 * dp_set_pktlog_wifi3() - attach txrx vdev
11106 * @pdev: Datapath PDEV handle
11107 * @event: which event's notifications are being subscribed to
11108 * @enable: WDI event subscribe or not. (True or False)
11109 *
11110 * Return: Success, NULL on failure
11111 */
11112 #ifdef WDI_EVENT_ENABLE
11113 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
11114 		bool enable)
11115 {
11116 	struct dp_soc *soc = NULL;
11117 	int max_mac_rings = wlan_cfg_get_num_mac_rings
11118 					(pdev->wlan_cfg_ctx);
11119 	uint8_t mac_id = 0;
11120 
11121 	soc = pdev->soc;
11122 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
11123 
11124 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
11125 			FL("Max_mac_rings %d "),
11126 			max_mac_rings);
11127 
11128 	if (enable) {
11129 		switch (event) {
11130 		case WDI_EVENT_RX_DESC:
11131 			if (pdev->monitor_vdev) {
11132 				/* Nothing needs to be done if monitor mode is
11133 				 * enabled
11134 				 */
11135 				return 0;
11136 			}
11137 
11138 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
11139 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
11140 				dp_mon_filter_setup_rx_pkt_log_full(pdev);
11141 				if (dp_mon_filter_update(pdev) !=
11142 						QDF_STATUS_SUCCESS) {
11143 					QDF_TRACE(QDF_MODULE_ID_DP,
11144 						  QDF_TRACE_LEVEL_ERROR,
11145 						  FL("Pktlog full filters set failed"));
11146 					dp_mon_filter_reset_rx_pkt_log_full(pdev);
11147 					pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
11148 					return 0;
11149 				}
11150 
11151 				if (soc->reap_timer_init)
11152 					qdf_timer_mod(&soc->mon_reap_timer,
11153 					DP_INTR_POLL_TIMER_MS);
11154 			}
11155 			break;
11156 
11157 		case WDI_EVENT_LITE_RX:
11158 			if (pdev->monitor_vdev) {
11159 				/* Nothing needs to be done if monitor mode is
11160 				 * enabled
11161 				 */
11162 				return 0;
11163 			}
11164 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
11165 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
11166 
11167 				/*
11168 				 * Set the packet log lite mode filter.
11169 				 */
11170 				dp_mon_filter_setup_rx_pkt_log_lite(pdev);
11171 				if (dp_mon_filter_update(pdev) != QDF_STATUS_SUCCESS) {
11172 					QDF_TRACE(QDF_MODULE_ID_DP,
11173 						  QDF_TRACE_LEVEL_ERROR,
11174 						  FL("Pktlog lite filters set failed"));
11175 					dp_mon_filter_reset_rx_pkt_log_lite(pdev);
11176 					pdev->rx_pktlog_mode =
11177 						DP_RX_PKTLOG_DISABLED;
11178 					return 0;
11179 				}
11180 
11181 				if (soc->reap_timer_init)
11182 					qdf_timer_mod(&soc->mon_reap_timer,
11183 					DP_INTR_POLL_TIMER_MS);
11184 			}
11185 			break;
11186 
11187 		case WDI_EVENT_LITE_T2H:
11188 			if (pdev->monitor_vdev) {
11189 				/* Nothing needs to be done if monitor mode is
11190 				 * enabled
11191 				 */
11192 				return 0;
11193 			}
11194 
11195 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
11196 				int mac_for_pdev = dp_get_mac_id_for_pdev(
11197 							mac_id,	pdev->pdev_id);
11198 
11199 				pdev->pktlog_ppdu_stats = true;
11200 				dp_h2t_cfg_stats_msg_send(pdev,
11201 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
11202 					mac_for_pdev);
11203 			}
11204 			break;
11205 
11206 		default:
11207 			/* Nothing needs to be done for other pktlog types */
11208 			break;
11209 		}
11210 	} else {
11211 		switch (event) {
11212 		case WDI_EVENT_RX_DESC:
11213 		case WDI_EVENT_LITE_RX:
11214 			if (pdev->monitor_vdev) {
11215 				/* Nothing needs to be done if monitor mode is
11216 				 * enabled
11217 				 */
11218 				return 0;
11219 			}
11220 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
11221 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
11222 				dp_mon_filter_reset_rx_pkt_log_full(pdev);
11223 				if (dp_mon_filter_update(pdev) !=
11224 						QDF_STATUS_SUCCESS) {
11225 					QDF_TRACE(QDF_MODULE_ID_DP,
11226 						  QDF_TRACE_LEVEL_ERROR,
11227 						  FL("Pktlog filters reset failed"));
11228 					return 0;
11229 				}
11230 
11231 				dp_mon_filter_reset_rx_pkt_log_lite(pdev);
11232 				if (dp_mon_filter_update(pdev) !=
11233 						QDF_STATUS_SUCCESS) {
11234 					QDF_TRACE(QDF_MODULE_ID_DP,
11235 						  QDF_TRACE_LEVEL_ERROR,
11236 						  FL("Pktlog filters reset failed"));
11237 					return 0;
11238 				}
11239 
11240 				if (soc->reap_timer_init)
11241 					qdf_timer_stop(&soc->mon_reap_timer);
11242 			}
11243 			break;
11244 		case WDI_EVENT_LITE_T2H:
11245 			if (pdev->monitor_vdev) {
11246 				/* Nothing needs to be done if monitor mode is
11247 				 * enabled
11248 				 */
11249 				return 0;
11250 			}
11251 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
11252 			 * passing value 0. Once these macros will define in htt
11253 			 * header file will use proper macros
11254 			*/
11255 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
11256 				int mac_for_pdev =
11257 						dp_get_mac_id_for_pdev(mac_id,
11258 								pdev->pdev_id);
11259 
11260 				pdev->pktlog_ppdu_stats = false;
11261 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
11262 					dp_h2t_cfg_stats_msg_send(pdev, 0,
11263 								mac_for_pdev);
11264 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
11265 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
11266 								mac_for_pdev);
11267 				} else if (pdev->enhanced_stats_en) {
11268 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
11269 								mac_for_pdev);
11270 				}
11271 			}
11272 
11273 			break;
11274 		default:
11275 			/* Nothing needs to be done for other pktlog types */
11276 			break;
11277 		}
11278 	}
11279 	return 0;
11280 }
11281 #endif
11282 
11283 /**
11284  * dp_bucket_index() - Return index from array
11285  *
11286  * @delay: delay measured
11287  * @array: array used to index corresponding delay
11288  *
11289  * Return: index
11290  */
11291 static uint8_t dp_bucket_index(uint32_t delay, uint16_t *array)
11292 {
11293 	uint8_t i = CDP_DELAY_BUCKET_0;
11294 
11295 	for (; i < CDP_DELAY_BUCKET_MAX; i++) {
11296 		if (delay >= array[i] && delay <= array[i + 1])
11297 			return i;
11298 	}
11299 
11300 	return (CDP_DELAY_BUCKET_MAX - 1);
11301 }
11302 
11303 /**
11304  * dp_fill_delay_buckets() - Fill delay statistics bucket for each
11305  *				type of delay
11306  *
11307  * @pdev: pdev handle
11308  * @delay: delay in ms
11309  * @tid: tid value
11310  * @mode: type of tx delay mode
11311  * @ring_id: ring number
11312  * Return: pointer to cdp_delay_stats structure
11313  */
11314 static struct cdp_delay_stats *
11315 dp_fill_delay_buckets(struct dp_pdev *pdev, uint32_t delay,
11316 		      uint8_t tid, uint8_t mode, uint8_t ring_id)
11317 {
11318 	uint8_t delay_index = 0;
11319 	struct cdp_tid_tx_stats *tstats =
11320 		&pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
11321 	struct cdp_tid_rx_stats *rstats =
11322 		&pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
11323 	/*
11324 	 * cdp_fw_to_hw_delay_range
11325 	 * Fw to hw delay ranges in milliseconds
11326 	 */
11327 	uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
11328 		0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
11329 
11330 	/*
11331 	 * cdp_sw_enq_delay_range
11332 	 * Software enqueue delay ranges in milliseconds
11333 	 */
11334 	uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
11335 		0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
11336 
11337 	/*
11338 	 * cdp_intfrm_delay_range
11339 	 * Interframe delay ranges in milliseconds
11340 	 */
11341 	uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
11342 		0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
11343 
11344 	/*
11345 	 * Update delay stats in proper bucket
11346 	 */
11347 	switch (mode) {
11348 	/* Software Enqueue delay ranges */
11349 	case CDP_DELAY_STATS_SW_ENQ:
11350 
11351 		delay_index = dp_bucket_index(delay, cdp_sw_enq_delay);
11352 		tstats->swq_delay.delay_bucket[delay_index]++;
11353 		return &tstats->swq_delay;
11354 
11355 	/* Tx Completion delay ranges */
11356 	case CDP_DELAY_STATS_FW_HW_TRANSMIT:
11357 
11358 		delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay);
11359 		tstats->hwtx_delay.delay_bucket[delay_index]++;
11360 		return &tstats->hwtx_delay;
11361 
11362 	/* Interframe tx delay ranges */
11363 	case CDP_DELAY_STATS_TX_INTERFRAME:
11364 
11365 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
11366 		tstats->intfrm_delay.delay_bucket[delay_index]++;
11367 		return &tstats->intfrm_delay;
11368 
11369 	/* Interframe rx delay ranges */
11370 	case CDP_DELAY_STATS_RX_INTERFRAME:
11371 
11372 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
11373 		rstats->intfrm_delay.delay_bucket[delay_index]++;
11374 		return &rstats->intfrm_delay;
11375 
11376 	/* Ring reap to indication to network stack */
11377 	case CDP_DELAY_STATS_REAP_STACK:
11378 
11379 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
11380 		rstats->to_stack_delay.delay_bucket[delay_index]++;
11381 		return &rstats->to_stack_delay;
11382 	default:
11383 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
11384 			  "%s Incorrect delay mode: %d", __func__, mode);
11385 	}
11386 
11387 	return NULL;
11388 }
11389 
11390 /**
11391  * dp_update_delay_stats() - Update delay statistics in structure
11392  *				and fill min, max and avg delay
11393  *
11394  * @pdev: pdev handle
11395  * @delay: delay in ms
11396  * @tid: tid value
11397  * @mode: type of tx delay mode
11398  * @ring id: ring number
11399  * Return: none
11400  */
11401 void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay,
11402 			   uint8_t tid, uint8_t mode, uint8_t ring_id)
11403 {
11404 	struct cdp_delay_stats *dstats = NULL;
11405 
11406 	/*
11407 	 * Delay ranges are different for different delay modes
11408 	 * Get the correct index to update delay bucket
11409 	 */
11410 	dstats = dp_fill_delay_buckets(pdev, delay, tid, mode, ring_id);
11411 	if (qdf_unlikely(!dstats))
11412 		return;
11413 
11414 	if (delay != 0) {
11415 		/*
11416 		 * Compute minimum,average and maximum
11417 		 * delay
11418 		 */
11419 		if (delay < dstats->min_delay)
11420 			dstats->min_delay = delay;
11421 
11422 		if (delay > dstats->max_delay)
11423 			dstats->max_delay = delay;
11424 
11425 		/*
11426 		 * Average over delay measured till now
11427 		 */
11428 		if (!dstats->avg_delay)
11429 			dstats->avg_delay = delay;
11430 		else
11431 			dstats->avg_delay = ((delay + dstats->avg_delay) / 2);
11432 	}
11433 }
11434 
11435 /**
11436  * dp_get_peer_mac_list(): function to get peer mac list of vdev
11437  * @soc: Datapath soc handle
11438  * @vdev_id: vdev id
11439  * @newmac: Table of the clients mac
11440  * @mac_cnt: No. of MACs required
11441  *
11442  * return: no of clients
11443  */
11444 uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id,
11445 			      u_int8_t newmac[][QDF_MAC_ADDR_SIZE],
11446 			      u_int16_t mac_cnt)
11447 {
11448 	struct dp_vdev *vdev =
11449 		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
11450 						   vdev_id);
11451 	struct dp_soc *dp_soc = (struct dp_soc *)soc;
11452 	struct dp_peer *peer;
11453 	uint16_t new_mac_cnt = 0;
11454 
11455 	if (!vdev)
11456 		return new_mac_cnt;
11457 
11458 	qdf_spin_lock_bh(&dp_soc->peer_ref_mutex);
11459 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
11460 		if (peer->bss_peer)
11461 			continue;
11462 		if (new_mac_cnt < mac_cnt) {
11463 			WLAN_ADDR_COPY(newmac[new_mac_cnt], peer->mac_addr.raw);
11464 			new_mac_cnt++;
11465 		}
11466 	}
11467 	qdf_spin_unlock_bh(&dp_soc->peer_ref_mutex);
11468 	return new_mac_cnt;
11469 }
11470