xref: /wlan-dirver/qca-wifi-host-cmn/dp/wifi3.0/dp_main.c (revision 11f5a63a6cbdda84849a730de22f0a71e635d58c)
1 /*
2  * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for
5  * any purpose with or without fee is hereby granted, provided that the
6  * above copyright notice and this permission notice appear in all
7  * copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
10  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
11  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
12  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
13  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
14  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
15  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
16  * PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <qdf_types.h>
20 #include <qdf_lock.h>
21 #include <qdf_net_types.h>
22 #include <qdf_lro.h>
23 #include <qdf_module.h>
24 #include <hal_hw_headers.h>
25 #include <hal_api.h>
26 #include <hif.h>
27 #include <htt.h>
28 #include <wdi_event.h>
29 #include <queue.h>
30 #include "dp_types.h"
31 #include "dp_internal.h"
32 #include "dp_tx.h"
33 #include "dp_tx_desc.h"
34 #include "dp_rx.h"
35 #include "dp_rx_mon.h"
36 #ifdef DP_RATETABLE_SUPPORT
37 #include "dp_ratetable.h"
38 #endif
39 #include <cdp_txrx_handle.h>
40 #include <wlan_cfg.h>
41 #include "cdp_txrx_cmn_struct.h"
42 #include "cdp_txrx_stats_struct.h"
43 #include "cdp_txrx_cmn_reg.h"
44 #include <qdf_util.h>
45 #include "dp_peer.h"
46 #include "dp_rx_mon.h"
47 #include "htt_stats.h"
48 #include "dp_htt.h"
49 #include "htt_ppdu_stats.h"
50 #include "qdf_mem.h"   /* qdf_mem_malloc,free */
51 #include "cfg_ucfg_api.h"
52 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
53 #include "cdp_txrx_flow_ctrl_v2.h"
54 #else
55 static inline void
56 cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
57 {
58 	return;
59 }
60 #endif
61 #include "dp_ipa.h"
62 #include "dp_cal_client_api.h"
63 #ifdef FEATURE_WDS
64 #include "dp_txrx_wds.h"
65 #endif
66 #ifdef ATH_SUPPORT_IQUE
67 #include "dp_txrx_me.h"
68 #endif
69 #if defined(DP_CON_MON)
70 #ifndef REMOVE_PKT_LOG
71 #include <pktlog_ac_api.h>
72 #include <pktlog_ac.h>
73 #endif
74 #endif
75 
76 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
77 /*
78  * If WLAN_CFG_INT_NUM_CONTEXTS is changed, HIF_NUM_INT_CONTEXTS
79  * also should be updated accordingly
80  */
81 QDF_COMPILE_TIME_ASSERT(num_intr_grps,
82 			HIF_NUM_INT_CONTEXTS == WLAN_CFG_INT_NUM_CONTEXTS);
83 
84 /*
85  * HIF_EVENT_HIST_MAX should always be power of 2
86  */
87 QDF_COMPILE_TIME_ASSERT(hif_event_history_size,
88 			(HIF_EVENT_HIST_MAX & (HIF_EVENT_HIST_MAX - 1)) == 0);
89 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
90 
91 /*
92  * If WLAN_CFG_INT_NUM_CONTEXTS is changed,
93  * WLAN_CFG_INT_NUM_CONTEXTS_MAX should also be updated
94  */
95 QDF_COMPILE_TIME_ASSERT(wlan_cfg_num_int_ctxs,
96 			WLAN_CFG_INT_NUM_CONTEXTS_MAX >=
97 			WLAN_CFG_INT_NUM_CONTEXTS);
98 
99 #ifdef WLAN_RX_PKT_CAPTURE_ENH
100 #include "dp_rx_mon_feature.h"
101 #else
102 /*
103  * dp_config_enh_rx_capture()- API to enable/disable enhanced rx capture
104  * @pdev_handle: DP_PDEV handle
105  * @val: user provided value
106  *
107  * Return: QDF_STATUS
108  */
109 static QDF_STATUS
110 dp_config_enh_rx_capture(struct cdp_pdev *pdev_handle, uint8_t val)
111 {
112 	return QDF_STATUS_E_INVAL;
113 }
114 #endif /* WLAN_RX_PKT_CAPTURE_ENH */
115 
116 #ifdef WLAN_TX_PKT_CAPTURE_ENH
117 #include "dp_tx_capture.h"
118 #else
119 /*
120  * dp_config_enh_tx_capture()- API to enable/disable enhanced tx capture
121  * @pdev_handle: DP_PDEV handle
122  * @val: user provided value
123  *
124  * Return: QDF_STATUS
125  */
126 static QDF_STATUS
127 dp_config_enh_tx_capture(struct cdp_pdev *pdev_handle, uint8_t val)
128 {
129 	return QDF_STATUS_E_INVAL;
130 }
131 #endif
132 
133 void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle,
134 		  struct hif_opaque_softc *hif_handle);
135 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force);
136 static struct dp_soc *
137 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, HTC_HANDLE htc_handle,
138 	      qdf_device_t qdf_osdev,
139 	      struct ol_if_ops *ol_ops, uint16_t device_id);
140 static void dp_pktlogmod_exit(struct dp_pdev *handle);
141 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
142 				uint8_t *peer_mac_addr,
143 				struct cdp_ctrl_objmgr_peer *ctrl_peer);
144 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap);
145 static void dp_ppdu_ring_reset(struct dp_pdev *pdev);
146 static void dp_ppdu_ring_cfg(struct dp_pdev *pdev);
147 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle,
148 				bool unmap_only);
149 #ifdef ENABLE_VERBOSE_DEBUG
150 bool is_dp_verbose_debug_enabled;
151 #endif
152 
153 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc,
154 					    enum hal_ring_type ring_type,
155 					    int ring_num);
156 #define DP_INTR_POLL_TIMER_MS	10
157 /* Generic AST entry aging timer value */
158 #define DP_AST_AGING_TIMER_DEFAULT_MS	1000
159 #define DP_MCS_LENGTH (6*MAX_MCS)
160 
161 #define DP_CURR_FW_STATS_AVAIL 19
162 #define DP_HTT_DBG_EXT_STATS_MAX 256
163 #define DP_MAX_SLEEP_TIME 100
164 #ifndef QCA_WIFI_3_0_EMU
165 #define SUSPEND_DRAIN_WAIT 500
166 #else
167 #define SUSPEND_DRAIN_WAIT 3000
168 #endif
169 
170 #ifdef IPA_OFFLOAD
171 /* Exclude IPA rings from the interrupt context */
172 #define TX_RING_MASK_VAL	0xb
173 #define RX_RING_MASK_VAL	0x7
174 #else
175 #define TX_RING_MASK_VAL	0xF
176 #define RX_RING_MASK_VAL	0xF
177 #endif
178 
179 #define STR_MAXLEN	64
180 
181 #define RNG_ERR		"SRNG setup failed for"
182 
183 /* Threshold for peer's cached buf queue beyond which frames are dropped */
184 #define DP_RX_CACHED_BUFQ_THRESH 64
185 
186 /**
187  * default_dscp_tid_map - Default DSCP-TID mapping
188  *
189  * DSCP        TID
190  * 000000      0
191  * 001000      1
192  * 010000      2
193  * 011000      3
194  * 100000      4
195  * 101000      5
196  * 110000      6
197  * 111000      7
198  */
199 static uint8_t default_dscp_tid_map[DSCP_TID_MAP_MAX] = {
200 	0, 0, 0, 0, 0, 0, 0, 0,
201 	1, 1, 1, 1, 1, 1, 1, 1,
202 	2, 2, 2, 2, 2, 2, 2, 2,
203 	3, 3, 3, 3, 3, 3, 3, 3,
204 	4, 4, 4, 4, 4, 4, 4, 4,
205 	5, 5, 5, 5, 5, 5, 5, 5,
206 	6, 6, 6, 6, 6, 6, 6, 6,
207 	7, 7, 7, 7, 7, 7, 7, 7,
208 };
209 
210 /**
211  * default_pcp_tid_map - Default PCP-TID mapping
212  *
213  * PCP     TID
214  * 000      0
215  * 001      1
216  * 010      2
217  * 011      3
218  * 100      4
219  * 101      5
220  * 110      6
221  * 111      7
222  */
223 static uint8_t default_pcp_tid_map[PCP_TID_MAP_MAX] = {
224 	0, 1, 2, 3, 4, 5, 6, 7,
225 };
226 
227 /**
228  * @brief Cpu to tx ring map
229  */
230 uint8_t
231 dp_cpu_ring_map[DP_NSS_CPU_RING_MAP_MAX][WLAN_CFG_INT_NUM_CONTEXTS_MAX] = {
232 	{0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2, 0x0, 0x0, 0x1, 0x2},
233 	{0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1, 0x2, 0x1},
234 	{0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0, 0x2, 0x0},
235 	{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2},
236 	{0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3},
237 #ifdef WLAN_TX_PKT_CAPTURE_ENH
238 	{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
239 #endif
240 };
241 
242 /**
243  * @brief Select the type of statistics
244  */
245 enum dp_stats_type {
246 	STATS_FW = 0,
247 	STATS_HOST = 1,
248 	STATS_TYPE_MAX = 2,
249 };
250 
251 /**
252  * @brief General Firmware statistics options
253  *
254  */
255 enum dp_fw_stats {
256 	TXRX_FW_STATS_INVALID	= -1,
257 };
258 
259 /**
260  * dp_stats_mapping_table - Firmware and Host statistics
261  * currently supported
262  */
263 const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
264 	{HTT_DBG_EXT_STATS_RESET, TXRX_HOST_STATS_INVALID},
265 	{HTT_DBG_EXT_STATS_PDEV_TX, TXRX_HOST_STATS_INVALID},
266 	{HTT_DBG_EXT_STATS_PDEV_RX, TXRX_HOST_STATS_INVALID},
267 	{HTT_DBG_EXT_STATS_PDEV_TX_HWQ, TXRX_HOST_STATS_INVALID},
268 	{HTT_DBG_EXT_STATS_PDEV_TX_SCHED, TXRX_HOST_STATS_INVALID},
269 	{HTT_DBG_EXT_STATS_PDEV_ERROR, TXRX_HOST_STATS_INVALID},
270 	{HTT_DBG_EXT_STATS_PDEV_TQM, TXRX_HOST_STATS_INVALID},
271 	{HTT_DBG_EXT_STATS_TQM_CMDQ, TXRX_HOST_STATS_INVALID},
272 	{HTT_DBG_EXT_STATS_TX_DE_INFO, TXRX_HOST_STATS_INVALID},
273 	{HTT_DBG_EXT_STATS_PDEV_TX_RATE, TXRX_HOST_STATS_INVALID},
274 	{HTT_DBG_EXT_STATS_PDEV_RX_RATE, TXRX_HOST_STATS_INVALID},
275 	{TXRX_FW_STATS_INVALID, TXRX_HOST_STATS_INVALID},
276 	{HTT_DBG_EXT_STATS_TX_SELFGEN_INFO, TXRX_HOST_STATS_INVALID},
277 	{HTT_DBG_EXT_STATS_TX_MU_HWQ, TXRX_HOST_STATS_INVALID},
278 	{HTT_DBG_EXT_STATS_RING_IF_INFO, TXRX_HOST_STATS_INVALID},
279 	{HTT_DBG_EXT_STATS_SRNG_INFO, TXRX_HOST_STATS_INVALID},
280 	{HTT_DBG_EXT_STATS_SFM_INFO, TXRX_HOST_STATS_INVALID},
281 	{HTT_DBG_EXT_STATS_PDEV_TX_MU, TXRX_HOST_STATS_INVALID},
282 	{HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST, TXRX_HOST_STATS_INVALID},
283 	/* Last ENUM for HTT FW STATS */
284 	{DP_HTT_DBG_EXT_STATS_MAX, TXRX_HOST_STATS_INVALID},
285 	{TXRX_FW_STATS_INVALID, TXRX_CLEAR_STATS},
286 	{TXRX_FW_STATS_INVALID, TXRX_RX_RATE_STATS},
287 	{TXRX_FW_STATS_INVALID, TXRX_TX_RATE_STATS},
288 	{TXRX_FW_STATS_INVALID, TXRX_TX_HOST_STATS},
289 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
290 	{TXRX_FW_STATS_INVALID, TXRX_AST_STATS},
291 	{TXRX_FW_STATS_INVALID, TXRX_SRNG_PTR_STATS},
292 	{TXRX_FW_STATS_INVALID, TXRX_RX_MON_STATS},
293 	{TXRX_FW_STATS_INVALID, TXRX_REO_QUEUE_STATS},
294 	{TXRX_FW_STATS_INVALID, TXRX_SOC_CFG_PARAMS},
295 	{TXRX_FW_STATS_INVALID, TXRX_PDEV_CFG_PARAMS},
296 	{TXRX_FW_STATS_INVALID, TXRX_SOC_INTERRUPT_STATS},
297 };
298 
299 /* MCL specific functions */
300 #if defined(DP_CON_MON)
301 /**
302  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
303  * @soc: pointer to dp_soc handle
304  * @intr_ctx_num: interrupt context number for which mon mask is needed
305  *
306  * For MCL, monitor mode rings are being processed in timer contexts (polled).
307  * This function is returning 0, since in interrupt mode(softirq based RX),
308  * we donot want to process monitor mode rings in a softirq.
309  *
310  * So, in case packet log is enabled for SAP/STA/P2P modes,
311  * regular interrupt processing will not process monitor mode rings. It would be
312  * done in a separate timer context.
313  *
314  * Return: 0
315  */
316 static inline
317 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
318 {
319 	return 0;
320 }
321 
322 /*
323  * dp_service_mon_rings()- timer to reap monitor rings
324  * reqd as we are not getting ppdu end interrupts
325  * @arg: SoC Handle
326  *
327  * Return:
328  *
329  */
330 static void dp_service_mon_rings(void *arg)
331 {
332 	struct dp_soc *soc = (struct dp_soc *)arg;
333 	int ring = 0, work_done, mac_id;
334 	struct dp_pdev *pdev = NULL;
335 
336 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
337 		pdev = soc->pdev_list[ring];
338 		if (!pdev)
339 			continue;
340 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
341 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
342 								pdev->pdev_id);
343 			work_done = dp_mon_process(soc, mac_for_pdev,
344 						   QCA_NAPI_BUDGET);
345 
346 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
347 				  FL("Reaped %d descs from Monitor rings"),
348 				  work_done);
349 		}
350 	}
351 
352 	qdf_timer_mod(&soc->mon_reap_timer, DP_INTR_POLL_TIMER_MS);
353 }
354 
355 #ifndef REMOVE_PKT_LOG
356 /**
357  * dp_pkt_log_init() - API to initialize packet log
358  * @ppdev: physical device handle
359  * @scn: HIF context
360  *
361  * Return: none
362  */
363 void dp_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
364 {
365 	struct dp_pdev *handle = (struct dp_pdev *)ppdev;
366 
367 	if (handle->pkt_log_init) {
368 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
369 			  "%s: Packet log not initialized", __func__);
370 		return;
371 	}
372 
373 	pktlog_sethandle(&handle->pl_dev, scn);
374 	pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
375 
376 	if (pktlogmod_init(scn)) {
377 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
378 			  "%s: pktlogmod_init failed", __func__);
379 		handle->pkt_log_init = false;
380 	} else {
381 		handle->pkt_log_init = true;
382 	}
383 }
384 
385 /**
386  * dp_pkt_log_con_service() - connect packet log service
387  * @ppdev: physical device handle
388  * @scn: device context
389  *
390  * Return: none
391  */
392 static void dp_pkt_log_con_service(struct cdp_pdev *ppdev, void *scn)
393 {
394 	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
395 
396 	dp_pkt_log_init((struct cdp_pdev *)pdev, scn);
397 	pktlog_htc_attach();
398 }
399 
400 /**
401  * dp_get_num_rx_contexts() - get number of RX contexts
402  * @soc_hdl: cdp opaque soc handle
403  *
404  * Return: number of RX contexts
405  */
406 static int dp_get_num_rx_contexts(struct cdp_soc_t *soc_hdl)
407 {
408 	int i;
409 	int num_rx_contexts = 0;
410 
411 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
412 
413 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
414 		if (wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i))
415 			num_rx_contexts++;
416 
417 	return num_rx_contexts;
418 }
419 
420 /**
421  * dp_pktlogmod_exit() - API to cleanup pktlog info
422  * @handle: Pdev handle
423  *
424  * Return: none
425  */
426 static void dp_pktlogmod_exit(struct dp_pdev *handle)
427 {
428 	struct hif_opaque_softc *scn = (void *)handle->soc->hif_handle;
429 
430 	if (!scn) {
431 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
432 			  "%s: Invalid hif(scn) handle", __func__);
433 		return;
434 	}
435 
436 	pktlogmod_exit(scn);
437 	handle->pkt_log_init = false;
438 }
439 #endif
440 #else
441 static void dp_pktlogmod_exit(struct dp_pdev *handle) { }
442 
443 /**
444  * dp_soc_get_mon_mask_for_interrupt_mode() - get mon mode mask for intr mode
445  * @soc: pointer to dp_soc handle
446  * @intr_ctx_num: interrupt context number for which mon mask is needed
447  *
448  * Return: mon mask value
449  */
450 static inline
451 uint32_t dp_soc_get_mon_mask_for_interrupt_mode(struct dp_soc *soc, int intr_ctx_num)
452 {
453 	return wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
454 }
455 #endif
456 
457 /**
458  * dp_get_dp_vdev_from_cdp_vdev() - get dp_vdev from cdp_vdev by type-casting
459  * @cdp_opaque_vdev: pointer to cdp_vdev
460  *
461  * Return: pointer to dp_vdev
462  */
463 static
464 struct dp_vdev *dp_get_dp_vdev_from_cdp_vdev(struct cdp_vdev *cdp_opaque_vdev)
465 {
466 	return (struct dp_vdev *)cdp_opaque_vdev;
467 }
468 
469 
470 static int dp_peer_add_ast_wifi3(struct cdp_soc_t *soc_hdl,
471 					struct cdp_peer *peer_hdl,
472 					uint8_t *mac_addr,
473 					enum cdp_txrx_ast_entry_type type,
474 					uint32_t flags)
475 {
476 	return dp_peer_add_ast((struct dp_soc *)soc_hdl,
477 				(struct dp_peer *)peer_hdl,
478 				mac_addr,
479 				type,
480 				flags);
481 }
482 
483 static int dp_peer_update_ast_wifi3(struct cdp_soc_t *soc_hdl,
484 						struct cdp_peer *peer_hdl,
485 						uint8_t *wds_macaddr,
486 						uint32_t flags)
487 {
488 	int status = -1;
489 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
490 	struct dp_ast_entry  *ast_entry = NULL;
491 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
492 
493 	qdf_spin_lock_bh(&soc->ast_lock);
494 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
495 						    peer->vdev->pdev->pdev_id);
496 
497 	if (ast_entry) {
498 		status = dp_peer_update_ast(soc,
499 					    peer,
500 					    ast_entry, flags);
501 	}
502 
503 	qdf_spin_unlock_bh(&soc->ast_lock);
504 
505 	return status;
506 }
507 
508 /*
509  * dp_wds_reset_ast_wifi3() - Reset the is_active param for ast entry
510  * @soc_handle:		Datapath SOC handle
511  * @wds_macaddr:	WDS entry MAC Address
512  * Return: None
513  */
514 static void dp_wds_reset_ast_wifi3(struct cdp_soc_t *soc_hdl,
515 				   uint8_t *wds_macaddr,
516 				   uint8_t *peer_mac_addr,
517 				   void *vdev_handle)
518 {
519 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
520 	struct dp_ast_entry *ast_entry = NULL;
521 	struct dp_ast_entry *tmp_ast_entry;
522 	struct dp_peer *peer;
523 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
524 	struct dp_pdev *pdev;
525 
526 	if (!vdev)
527 		return;
528 
529 	pdev = vdev->pdev;
530 
531 	if (peer_mac_addr) {
532 		peer = dp_peer_find_hash_find(soc, peer_mac_addr,
533 					      0, vdev->vdev_id);
534 		if (!peer)
535 			return;
536 		qdf_spin_lock_bh(&soc->ast_lock);
537 		DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, tmp_ast_entry) {
538 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
539 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
540 				dp_peer_del_ast(soc, ast_entry);
541 		}
542 		qdf_spin_unlock_bh(&soc->ast_lock);
543 		dp_peer_unref_delete(peer);
544 
545 	} else if (wds_macaddr) {
546 		qdf_spin_lock_bh(&soc->ast_lock);
547 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, wds_macaddr,
548 							    pdev->pdev_id);
549 
550 		if (ast_entry) {
551 			if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
552 			    (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
553 				dp_peer_del_ast(soc, ast_entry);
554 		}
555 		qdf_spin_unlock_bh(&soc->ast_lock);
556 	}
557 }
558 
559 /*
560  * dp_wds_reset_ast_table_wifi3() - Reset the is_active param for all ast entry
561  * @soc:		Datapath SOC handle
562  *
563  * Return: None
564  */
565 static void dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
566 					 void *vdev_hdl)
567 {
568 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
569 	struct dp_pdev *pdev;
570 	struct dp_vdev *vdev;
571 	struct dp_peer *peer;
572 	struct dp_ast_entry *ase, *temp_ase;
573 	int i;
574 
575 	qdf_spin_lock_bh(&soc->ast_lock);
576 
577 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
578 		pdev = soc->pdev_list[i];
579 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
580 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
581 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
582 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
583 					if ((ase->type ==
584 						CDP_TXRX_AST_TYPE_WDS_HM) ||
585 					    (ase->type ==
586 						CDP_TXRX_AST_TYPE_WDS_HM_SEC))
587 						dp_peer_del_ast(soc, ase);
588 				}
589 			}
590 		}
591 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
592 	}
593 
594 	qdf_spin_unlock_bh(&soc->ast_lock);
595 }
596 
597 /*
598  * dp_wds_flush_ast_table_wifi3() - Delete all wds and hmwds ast entry
599  * @soc:		Datapath SOC handle
600  *
601  * Return: None
602  */
603 static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
604 {
605 	struct dp_soc *soc = (struct dp_soc *) soc_hdl;
606 	struct dp_pdev *pdev;
607 	struct dp_vdev *vdev;
608 	struct dp_peer *peer;
609 	struct dp_ast_entry *ase, *temp_ase;
610 	int i;
611 
612 	qdf_spin_lock_bh(&soc->ast_lock);
613 
614 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
615 		pdev = soc->pdev_list[i];
616 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
617 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
618 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
619 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
620 					if ((ase->type ==
621 						CDP_TXRX_AST_TYPE_STATIC) ||
622 						(ase->type ==
623 						 CDP_TXRX_AST_TYPE_SELF) ||
624 						(ase->type ==
625 						 CDP_TXRX_AST_TYPE_STA_BSS))
626 						continue;
627 					dp_peer_del_ast(soc, ase);
628 				}
629 			}
630 		}
631 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
632 	}
633 
634 	qdf_spin_unlock_bh(&soc->ast_lock);
635 }
636 
637 /**
638  * dp_peer_get_ast_info_by_soc_wifi3() - search the soc AST hash table
639  *                                       and return ast entry information
640  *                                       of first ast entry found in the
641  *                                       table with given mac address
642  *
643  * @soc : data path soc handle
644  * @ast_mac_addr : AST entry mac address
645  * @ast_entry_info : ast entry information
646  *
647  * return : true if ast entry found with ast_mac_addr
648  *          false if ast entry not found
649  */
650 static bool dp_peer_get_ast_info_by_soc_wifi3
651 	(struct cdp_soc_t *soc_hdl,
652 	 uint8_t *ast_mac_addr,
653 	 struct cdp_ast_entry_info *ast_entry_info)
654 {
655 	struct dp_ast_entry *ast_entry = NULL;
656 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
657 
658 	qdf_spin_lock_bh(&soc->ast_lock);
659 
660 	ast_entry = dp_peer_ast_hash_find_soc(soc, ast_mac_addr);
661 	if (!ast_entry || !ast_entry->peer) {
662 		qdf_spin_unlock_bh(&soc->ast_lock);
663 		return false;
664 	}
665 	if (ast_entry->delete_in_progress && !ast_entry->callback) {
666 		qdf_spin_unlock_bh(&soc->ast_lock);
667 		return false;
668 	}
669 	ast_entry_info->type = ast_entry->type;
670 	ast_entry_info->pdev_id = ast_entry->pdev_id;
671 	ast_entry_info->vdev_id = ast_entry->vdev_id;
672 	ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
673 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
674 		     &ast_entry->peer->mac_addr.raw[0],
675 		     QDF_MAC_ADDR_SIZE);
676 	qdf_spin_unlock_bh(&soc->ast_lock);
677 	return true;
678 }
679 
680 /**
681  * dp_peer_get_ast_info_by_pdevid_wifi3() - search the soc AST hash table
682  *                                          and return ast entry information
683  *                                          if mac address and pdev_id matches
684  *
685  * @soc : data path soc handle
686  * @ast_mac_addr : AST entry mac address
687  * @pdev_id : pdev_id
688  * @ast_entry_info : ast entry information
689  *
690  * return : true if ast entry found with ast_mac_addr
691  *          false if ast entry not found
692  */
693 static bool dp_peer_get_ast_info_by_pdevid_wifi3
694 		(struct cdp_soc_t *soc_hdl,
695 		 uint8_t *ast_mac_addr,
696 		 uint8_t pdev_id,
697 		 struct cdp_ast_entry_info *ast_entry_info)
698 {
699 	struct dp_ast_entry *ast_entry;
700 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
701 
702 	qdf_spin_lock_bh(&soc->ast_lock);
703 
704 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, ast_mac_addr, pdev_id);
705 
706 	if (!ast_entry || !ast_entry->peer) {
707 		qdf_spin_unlock_bh(&soc->ast_lock);
708 		return false;
709 	}
710 	if (ast_entry->delete_in_progress && !ast_entry->callback) {
711 		qdf_spin_unlock_bh(&soc->ast_lock);
712 		return false;
713 	}
714 	ast_entry_info->type = ast_entry->type;
715 	ast_entry_info->pdev_id = ast_entry->pdev_id;
716 	ast_entry_info->vdev_id = ast_entry->vdev_id;
717 	ast_entry_info->peer_id = ast_entry->peer->peer_ids[0];
718 	qdf_mem_copy(&ast_entry_info->peer_mac_addr[0],
719 		     &ast_entry->peer->mac_addr.raw[0],
720 		     QDF_MAC_ADDR_SIZE);
721 	qdf_spin_unlock_bh(&soc->ast_lock);
722 	return true;
723 }
724 
725 /**
726  * dp_peer_ast_entry_del_by_soc() - delete the ast entry from soc AST hash table
727  *                            with given mac address
728  *
729  * @soc : data path soc handle
730  * @ast_mac_addr : AST entry mac address
731  * @callback : callback function to called on ast delete response from FW
732  * @cookie : argument to be passed to callback
733  *
734  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
735  *          is sent
736  *          QDF_STATUS_E_INVAL false if ast entry not found
737  */
738 static QDF_STATUS dp_peer_ast_entry_del_by_soc(struct cdp_soc_t *soc_handle,
739 					       uint8_t *mac_addr,
740 					       txrx_ast_free_cb callback,
741 					       void *cookie)
742 
743 {
744 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
745 	struct dp_ast_entry *ast_entry = NULL;
746 	txrx_ast_free_cb cb = NULL;
747 	void *arg = NULL;
748 
749 	qdf_spin_lock_bh(&soc->ast_lock);
750 	ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
751 	if (!ast_entry) {
752 		qdf_spin_unlock_bh(&soc->ast_lock);
753 		return -QDF_STATUS_E_INVAL;
754 	}
755 
756 	if (ast_entry->callback) {
757 		cb = ast_entry->callback;
758 		arg = ast_entry->cookie;
759 	}
760 
761 	ast_entry->callback = callback;
762 	ast_entry->cookie = cookie;
763 
764 	/*
765 	 * if delete_in_progress is set AST delete is sent to target
766 	 * and host is waiting for response should not send delete
767 	 * again
768 	 */
769 	if (!ast_entry->delete_in_progress)
770 		dp_peer_del_ast(soc, ast_entry);
771 
772 	qdf_spin_unlock_bh(&soc->ast_lock);
773 	if (cb) {
774 		cb(soc->ctrl_psoc,
775 		   dp_soc_to_cdp_soc(soc),
776 		   arg,
777 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
778 	}
779 	return QDF_STATUS_SUCCESS;
780 }
781 
782 /**
783  * dp_peer_ast_entry_del_by_pdev() - delete the ast entry from soc AST hash
784  *                                   table if mac address and pdev_id matches
785  *
786  * @soc : data path soc handle
787  * @ast_mac_addr : AST entry mac address
788  * @pdev_id : pdev id
789  * @callback : callback function to called on ast delete response from FW
790  * @cookie : argument to be passed to callback
791  *
792  * return : QDF_STATUS_SUCCESS if ast entry found with ast_mac_addr and delete
793  *          is sent
794  *          QDF_STATUS_E_INVAL false if ast entry not found
795  */
796 
797 static QDF_STATUS dp_peer_ast_entry_del_by_pdev(struct cdp_soc_t *soc_handle,
798 						uint8_t *mac_addr,
799 						uint8_t pdev_id,
800 						txrx_ast_free_cb callback,
801 						void *cookie)
802 
803 {
804 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
805 	struct dp_ast_entry *ast_entry;
806 	txrx_ast_free_cb cb = NULL;
807 	void *arg = NULL;
808 
809 	qdf_spin_lock_bh(&soc->ast_lock);
810 	ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr, pdev_id);
811 
812 	if (!ast_entry) {
813 		qdf_spin_unlock_bh(&soc->ast_lock);
814 		return -QDF_STATUS_E_INVAL;
815 	}
816 
817 	if (ast_entry->callback) {
818 		cb = ast_entry->callback;
819 		arg = ast_entry->cookie;
820 	}
821 
822 	ast_entry->callback = callback;
823 	ast_entry->cookie = cookie;
824 
825 	/*
826 	 * if delete_in_progress is set AST delete is sent to target
827 	 * and host is waiting for response should not sent delete
828 	 * again
829 	 */
830 	if (!ast_entry->delete_in_progress)
831 		dp_peer_del_ast(soc, ast_entry);
832 
833 	qdf_spin_unlock_bh(&soc->ast_lock);
834 
835 	if (cb) {
836 		cb(soc->ctrl_psoc,
837 		   dp_soc_to_cdp_soc(soc),
838 		   arg,
839 		   CDP_TXRX_AST_DELETE_IN_PROGRESS);
840 	}
841 	return QDF_STATUS_SUCCESS;
842 }
843 
844 /**
845  * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
846  * @ring_num: ring num of the ring being queried
847  * @grp_mask: the grp_mask array for the ring type in question.
848  *
849  * The grp_mask array is indexed by group number and the bit fields correspond
850  * to ring numbers.  We are finding which interrupt group a ring belongs to.
851  *
852  * Return: the index in the grp_mask array with the ring number.
853  * -QDF_STATUS_E_NOENT if no entry is found
854  */
855 static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
856 {
857 	int ext_group_num;
858 	int mask = 1 << ring_num;
859 
860 	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
861 	     ext_group_num++) {
862 		if (mask & grp_mask[ext_group_num])
863 			return ext_group_num;
864 	}
865 
866 	return -QDF_STATUS_E_NOENT;
867 }
868 
869 static int dp_srng_calculate_msi_group(struct dp_soc *soc,
870 				       enum hal_ring_type ring_type,
871 				       int ring_num)
872 {
873 	int *grp_mask;
874 
875 	switch (ring_type) {
876 	case WBM2SW_RELEASE:
877 		/* dp_tx_comp_handler - soc->tx_comp_ring */
878 		if (ring_num < 3)
879 			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
880 
881 		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
882 		else if (ring_num == 3) {
883 			/* sw treats this as a separate ring type */
884 			grp_mask = &soc->wlan_cfg_ctx->
885 				int_rx_wbm_rel_ring_mask[0];
886 			ring_num = 0;
887 		} else {
888 			qdf_assert(0);
889 			return -QDF_STATUS_E_NOENT;
890 		}
891 	break;
892 
893 	case REO_EXCEPTION:
894 		/* dp_rx_err_process - &soc->reo_exception_ring */
895 		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
896 	break;
897 
898 	case REO_DST:
899 		/* dp_rx_process - soc->reo_dest_ring */
900 		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
901 	break;
902 
903 	case REO_STATUS:
904 		/* dp_reo_status_ring_handler - soc->reo_status_ring */
905 		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
906 	break;
907 
908 	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
909 	case RXDMA_MONITOR_STATUS:
910 	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
911 	case RXDMA_MONITOR_DST:
912 		/* dp_mon_process */
913 		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
914 	break;
915 	case RXDMA_DST:
916 		/* dp_rxdma_err_process */
917 		grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
918 	break;
919 
920 	case RXDMA_BUF:
921 		grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
922 	break;
923 
924 	case RXDMA_MONITOR_BUF:
925 		/* TODO: support low_thresh interrupt */
926 		return -QDF_STATUS_E_NOENT;
927 	break;
928 
929 	case TCL_DATA:
930 	case TCL_CMD:
931 	case REO_CMD:
932 	case SW2WBM_RELEASE:
933 	case WBM_IDLE_LINK:
934 		/* normally empty SW_TO_HW rings */
935 		return -QDF_STATUS_E_NOENT;
936 	break;
937 
938 	case TCL_STATUS:
939 	case REO_REINJECT:
940 		/* misc unused rings */
941 		return -QDF_STATUS_E_NOENT;
942 	break;
943 
944 	case CE_SRC:
945 	case CE_DST:
946 	case CE_DST_STATUS:
947 		/* CE_rings - currently handled by hif */
948 	default:
949 		return -QDF_STATUS_E_NOENT;
950 	break;
951 	}
952 
953 	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
954 }
955 
956 static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
957 			      *ring_params, int ring_type, int ring_num)
958 {
959 	int msi_group_number;
960 	int msi_data_count;
961 	int ret;
962 	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
963 
964 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
965 					    &msi_data_count, &msi_data_start,
966 					    &msi_irq_start);
967 
968 	if (ret)
969 		return;
970 
971 	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
972 						       ring_num);
973 	if (msi_group_number < 0) {
974 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
975 			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
976 			ring_type, ring_num);
977 		ring_params->msi_addr = 0;
978 		ring_params->msi_data = 0;
979 		return;
980 	}
981 
982 	if (msi_group_number > msi_data_count) {
983 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
984 			FL("2 msi_groups will share an msi; msi_group_num %d"),
985 			msi_group_number);
986 
987 		QDF_ASSERT(0);
988 	}
989 
990 	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
991 
992 	ring_params->msi_addr = addr_low;
993 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
994 	ring_params->msi_data = (msi_group_number % msi_data_count)
995 		+ msi_data_start;
996 	ring_params->flags |= HAL_SRNG_MSI_INTR;
997 }
998 
999 /**
1000  * dp_print_ast_stats() - Dump AST table contents
1001  * @soc: Datapath soc handle
1002  *
1003  * return void
1004  */
1005 #ifdef FEATURE_AST
1006 void dp_print_ast_stats(struct dp_soc *soc)
1007 {
1008 	uint8_t i;
1009 	uint8_t num_entries = 0;
1010 	struct dp_vdev *vdev;
1011 	struct dp_pdev *pdev;
1012 	struct dp_peer *peer;
1013 	struct dp_ast_entry *ase, *tmp_ase;
1014 	char type[CDP_TXRX_AST_TYPE_MAX][10] = {
1015 			"NONE", "STATIC", "SELF", "WDS", "MEC", "HMWDS", "BSS",
1016 			"DA", "HMWDS_SEC"};
1017 
1018 	DP_PRINT_STATS("AST Stats:");
1019 	DP_PRINT_STATS("	Entries Added   = %d", soc->stats.ast.added);
1020 	DP_PRINT_STATS("	Entries Deleted = %d", soc->stats.ast.deleted);
1021 	DP_PRINT_STATS("	Entries Agedout = %d", soc->stats.ast.aged_out);
1022 	DP_PRINT_STATS("	Entries MAP ERR  = %d", soc->stats.ast.map_err);
1023 
1024 	DP_PRINT_STATS("AST Table:");
1025 
1026 	qdf_spin_lock_bh(&soc->ast_lock);
1027 	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
1028 		pdev = soc->pdev_list[i];
1029 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
1030 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
1031 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
1032 				DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
1033 				    DP_PRINT_STATS("%6d mac_addr = %pM"
1034 					    " peer_mac_addr = %pM"
1035 					    " peer_id = %u"
1036 					    " type = %s"
1037 					    " next_hop = %d"
1038 					    " is_active = %d"
1039 					    " ast_idx = %d"
1040 					    " ast_hash = %d"
1041 					    " delete_in_progress = %d"
1042 					    " pdev_id = %d"
1043 					    " vdev_id = %d",
1044 					    ++num_entries,
1045 					    ase->mac_addr.raw,
1046 					    ase->peer->mac_addr.raw,
1047 					    ase->peer->peer_ids[0],
1048 					    type[ase->type],
1049 					    ase->next_hop,
1050 					    ase->is_active,
1051 					    ase->ast_idx,
1052 					    ase->ast_hash_value,
1053 					    ase->delete_in_progress,
1054 					    ase->pdev_id,
1055 					    ase->vdev_id);
1056 				}
1057 			}
1058 		}
1059 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
1060 	}
1061 	qdf_spin_unlock_bh(&soc->ast_lock);
1062 }
1063 #else
1064 void dp_print_ast_stats(struct dp_soc *soc)
1065 {
1066 	DP_PRINT_STATS("AST Stats not available.Enable FEATURE_AST");
1067 	return;
1068 }
1069 #endif
1070 
1071 /**
1072  *  dp_print_peer_table() - Dump all Peer stats
1073  * @vdev: Datapath Vdev handle
1074  *
1075  * return void
1076  */
1077 static void dp_print_peer_table(struct dp_vdev *vdev)
1078 {
1079 	struct dp_peer *peer = NULL;
1080 
1081 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
1082 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
1083 		if (!peer) {
1084 			DP_PRINT_STATS("Invalid Peer");
1085 			return;
1086 		}
1087 		DP_PRINT_STATS("    peer_mac_addr = %pM"
1088 			       " nawds_enabled = %d"
1089 			       " bss_peer = %d"
1090 			       " wds_enabled = %d"
1091 			       " tx_cap_enabled = %d"
1092 			       " rx_cap_enabled = %d"
1093 			       " delete in progress = %d"
1094 			       " peer id = %d",
1095 			       peer->mac_addr.raw,
1096 			       peer->nawds_enabled,
1097 			       peer->bss_peer,
1098 			       peer->wds_enabled,
1099 			       peer->tx_cap_enabled,
1100 			       peer->rx_cap_enabled,
1101 			       peer->delete_in_progress,
1102 			       peer->peer_ids[0]);
1103 	}
1104 }
1105 
1106 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
1107 /**
1108  * dp_srng_configure_interrupt_thresholds() - Retrieve interrupt
1109  * threshold values from the wlan_srng_cfg table for each ring type
1110  * @soc: device handle
1111  * @ring_params: per ring specific parameters
1112  * @ring_type: Ring type
1113  * @ring_num: Ring number for a given ring type
1114  *
1115  * Fill the ring params with the interrupt threshold
1116  * configuration parameters available in the per ring type wlan_srng_cfg
1117  * table.
1118  *
1119  * Return: None
1120  */
1121 static void
1122 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1123 				       struct hal_srng_params *ring_params,
1124 				       int ring_type, int ring_num,
1125 				       int num_entries)
1126 {
1127 	if (ring_type == WBM2SW_RELEASE && (ring_num == 3)) {
1128 		ring_params->intr_timer_thres_us =
1129 				wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1130 		ring_params->intr_batch_cntr_thres_entries =
1131 				wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1132 	} else {
1133 		ring_params->intr_timer_thres_us =
1134 				soc->wlan_srng_cfg[ring_type].timer_threshold;
1135 		ring_params->intr_batch_cntr_thres_entries =
1136 				soc->wlan_srng_cfg[ring_type].batch_count_threshold;
1137 	}
1138 	ring_params->low_threshold =
1139 			soc->wlan_srng_cfg[ring_type].low_threshold;
1140 
1141 	if (ring_params->low_threshold)
1142 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1143 }
1144 #else
1145 static void
1146 dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
1147 				       struct hal_srng_params *ring_params,
1148 				       int ring_type, int ring_num,
1149 				       int num_entries)
1150 {
1151 	if (ring_type == REO_DST) {
1152 		ring_params->intr_timer_thres_us =
1153 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1154 		ring_params->intr_batch_cntr_thres_entries =
1155 			wlan_cfg_get_int_batch_threshold_rx(soc->wlan_cfg_ctx);
1156 	} else if (ring_type == WBM2SW_RELEASE && (ring_num < 3)) {
1157 		ring_params->intr_timer_thres_us =
1158 			wlan_cfg_get_int_timer_threshold_tx(soc->wlan_cfg_ctx);
1159 		ring_params->intr_batch_cntr_thres_entries =
1160 			wlan_cfg_get_int_batch_threshold_tx(soc->wlan_cfg_ctx);
1161 	} else {
1162 		ring_params->intr_timer_thres_us =
1163 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
1164 		ring_params->intr_batch_cntr_thres_entries =
1165 			wlan_cfg_get_int_batch_threshold_other(soc->wlan_cfg_ctx);
1166 	}
1167 
1168 	/* Enable low threshold interrupts for rx buffer rings (regular and
1169 	 * monitor buffer rings.
1170 	 * TODO: See if this is required for any other ring
1171 	 */
1172 	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF) ||
1173 	    (ring_type == RXDMA_MONITOR_STATUS)) {
1174 		/* TODO: Setting low threshold to 1/8th of ring size
1175 		 * see if this needs to be configurable
1176 		 */
1177 		ring_params->low_threshold = num_entries >> 3;
1178 		ring_params->intr_timer_thres_us =
1179 			wlan_cfg_get_int_timer_threshold_rx(soc->wlan_cfg_ctx);
1180 		ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
1181 		ring_params->intr_batch_cntr_thres_entries = 0;
1182 	}
1183 }
1184 #endif
1185 
1186 /**
1187  * dp_srng_setup() - Internal function to setup SRNG rings used by data path
1188  * @soc: datapath soc handle
1189  * @srng: srng handle
1190  * @ring_type: ring that needs to be configured
1191  * @mac_id: mac number
1192  * @num_entries: Total number of entries for a given ring
1193  *
1194  * Return: non-zero - failure/zero - success
1195  */
1196 static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
1197 			 int ring_type, int ring_num, int mac_id,
1198 			 uint32_t num_entries, bool cached)
1199 {
1200 	hal_soc_handle_t hal_soc = soc->hal_soc;
1201 	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
1202 	/* TODO: See if we should get align size from hal */
1203 	uint32_t ring_base_align = 8;
1204 	struct hal_srng_params ring_params;
1205 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
1206 
1207 	/* TODO: Currently hal layer takes care of endianness related settings.
1208 	 * See if these settings need to passed from DP layer
1209 	 */
1210 	ring_params.flags = 0;
1211 
1212 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
1213 	srng->hal_srng = NULL;
1214 	srng->alloc_size = num_entries * entry_size;
1215 	srng->num_entries = num_entries;
1216 
1217 	if (!dp_is_soc_reinit(soc)) {
1218 		if (!cached) {
1219 			ring_params.ring_base_vaddr =
1220 			    qdf_aligned_mem_alloc_consistent(
1221 						soc->osdev, &srng->alloc_size,
1222 						&srng->base_vaddr_unaligned,
1223 						&srng->base_paddr_unaligned,
1224 						&ring_params.ring_base_paddr,
1225 						ring_base_align);
1226 		} else {
1227 			ring_params.ring_base_vaddr = qdf_aligned_malloc(
1228 					&srng->alloc_size,
1229 					&srng->base_vaddr_unaligned,
1230 					&srng->base_paddr_unaligned,
1231 					&ring_params.ring_base_paddr,
1232 					ring_base_align);
1233 		}
1234 
1235 		if (!ring_params.ring_base_vaddr) {
1236 			dp_err("alloc failed - ring_type: %d, ring_num %d",
1237 					ring_type, ring_num);
1238 			return QDF_STATUS_E_NOMEM;
1239 		}
1240 	}
1241 
1242 	ring_params.ring_base_paddr = (qdf_dma_addr_t)qdf_align(
1243 			(unsigned long)(srng->base_paddr_unaligned),
1244 			ring_base_align);
1245 
1246 	ring_params.ring_base_vaddr = (void *)(
1247 			(unsigned long)(srng->base_vaddr_unaligned) +
1248 			((unsigned long)(ring_params.ring_base_paddr) -
1249 			 (unsigned long)(srng->base_paddr_unaligned)));
1250 
1251 	qdf_assert_always(ring_params.ring_base_vaddr);
1252 
1253 	ring_params.num_entries = num_entries;
1254 
1255 	dp_verbose_debug("Ring type: %d, num:%d vaddr %pK paddr %pK entries %u",
1256 			 ring_type, ring_num,
1257 			 (void *)ring_params.ring_base_vaddr,
1258 			 (void *)ring_params.ring_base_paddr,
1259 			 ring_params.num_entries);
1260 
1261 	if (soc->intr_mode == DP_INTR_MSI) {
1262 		dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
1263 		dp_verbose_debug("Using MSI for ring_type: %d, ring_num %d",
1264 				 ring_type, ring_num);
1265 
1266 	} else {
1267 		ring_params.msi_data = 0;
1268 		ring_params.msi_addr = 0;
1269 		dp_verbose_debug("Skipping MSI for ring_type: %d, ring_num %d",
1270 				 ring_type, ring_num);
1271 	}
1272 
1273 	dp_srng_configure_interrupt_thresholds(soc, &ring_params,
1274 					       ring_type, ring_num,
1275 					       num_entries);
1276 
1277 	if (cached) {
1278 		ring_params.flags |= HAL_SRNG_CACHED_DESC;
1279 		srng->cached = 1;
1280 	}
1281 
1282 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
1283 		mac_id, &ring_params);
1284 
1285 	if (!srng->hal_srng) {
1286 		if (cached) {
1287 			qdf_mem_free(srng->base_vaddr_unaligned);
1288 		} else {
1289 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1290 						srng->alloc_size,
1291 						srng->base_vaddr_unaligned,
1292 						srng->base_paddr_unaligned, 0);
1293 		}
1294 	}
1295 
1296 	return 0;
1297 }
1298 
1299 /*
1300  * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
1301  * @soc: DP SOC handle
1302  * @srng: source ring structure
1303  * @ring_type: type of ring
1304  * @ring_num: ring number
1305  *
1306  * Return: None
1307  */
1308 static void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
1309 			   int ring_type, int ring_num)
1310 {
1311 	if (!srng->hal_srng) {
1312 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1313 			  FL("Ring type: %d, num:%d not setup"),
1314 			  ring_type, ring_num);
1315 		return;
1316 	}
1317 
1318 	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1319 	srng->hal_srng = NULL;
1320 }
1321 
1322 /**
1323  * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
1324  * Any buffers allocated and attached to ring entries are expected to be freed
1325  * before calling this function.
1326  */
1327 static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
1328 	int ring_type, int ring_num)
1329 {
1330 	if (!dp_is_soc_reinit(soc)) {
1331 		if (!srng->hal_srng && (srng->alloc_size == 0)) {
1332 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1333 				  FL("Ring type: %d, num:%d not setup"),
1334 				  ring_type, ring_num);
1335 			return;
1336 		}
1337 
1338 		if (srng->hal_srng) {
1339 			hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
1340 			srng->hal_srng = NULL;
1341 		}
1342 	}
1343 
1344 	if (srng->alloc_size && srng->base_vaddr_unaligned) {
1345 		if (!srng->cached) {
1346 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
1347 						srng->alloc_size,
1348 						srng->base_vaddr_unaligned,
1349 						srng->base_paddr_unaligned, 0);
1350 		} else {
1351 			qdf_mem_free(srng->base_vaddr_unaligned);
1352 		}
1353 		srng->alloc_size = 0;
1354 		srng->base_vaddr_unaligned = NULL;
1355 	}
1356 	srng->hal_srng = NULL;
1357 }
1358 
1359 /* TODO: Need this interface from HIF */
1360 void *hif_get_hal_handle(struct hif_opaque_softc *hif_handle);
1361 
1362 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1363 int dp_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
1364 			 hal_ring_handle_t hal_ring_hdl)
1365 {
1366 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1367 	uint32_t hp, tp;
1368 	uint8_t ring_id;
1369 
1370 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
1371 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
1372 
1373 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
1374 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_START);
1375 
1376 	return hal_srng_access_start(hal_soc, hal_ring_hdl);
1377 }
1378 
1379 void dp_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *dp_soc,
1380 			hal_ring_handle_t hal_ring_hdl)
1381 {
1382 	hal_soc_handle_t hal_soc = dp_soc->hal_soc;
1383 	uint32_t hp, tp;
1384 	uint8_t ring_id;
1385 
1386 	hal_get_sw_hptp(hal_soc, hal_ring_hdl, &tp, &hp);
1387 	ring_id = hal_srng_ring_id_get(hal_ring_hdl);
1388 
1389 	hif_record_event(dp_soc->hif_handle, int_ctx->dp_intr_id,
1390 			 ring_id, hp, tp, HIF_EVENT_SRNG_ACCESS_END);
1391 
1392 	return hal_srng_access_end(hal_soc, hal_ring_hdl);
1393 }
1394 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1395 
1396 /*
1397  * dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
1398  * @dp_ctx: DP SOC handle
1399  * @budget: Number of frames/descriptors that can be processed in one shot
1400  *
1401  * Return: remaining budget/quota for the soc device
1402  */
1403 static uint32_t dp_service_srngs(void *dp_ctx, uint32_t dp_budget)
1404 {
1405 	struct dp_intr *int_ctx = (struct dp_intr *)dp_ctx;
1406 	struct dp_intr_stats *intr_stats = &int_ctx->intr_stats;
1407 	struct dp_soc *soc = int_ctx->soc;
1408 	int ring = 0;
1409 	uint32_t work_done  = 0;
1410 	int budget = dp_budget;
1411 	uint8_t tx_mask = int_ctx->tx_ring_mask;
1412 	uint8_t rx_mask = int_ctx->rx_ring_mask;
1413 	uint8_t rx_err_mask = int_ctx->rx_err_ring_mask;
1414 	uint8_t rx_wbm_rel_mask = int_ctx->rx_wbm_rel_ring_mask;
1415 	uint8_t reo_status_mask = int_ctx->reo_status_ring_mask;
1416 	uint32_t remaining_quota = dp_budget;
1417 	struct dp_pdev *pdev = NULL;
1418 	int mac_id;
1419 
1420 	dp_verbose_debug("tx %x rx %x rx_err %x rx_wbm_rel %x reo_status %x rx_mon_ring %x host2rxdma %x rxdma2host %x\n",
1421 			 tx_mask, rx_mask, rx_err_mask, rx_wbm_rel_mask,
1422 			 reo_status_mask,
1423 			 int_ctx->rx_mon_ring_mask,
1424 			 int_ctx->host2rxdma_ring_mask,
1425 			 int_ctx->rxdma2host_ring_mask);
1426 
1427 	/* Process Tx completion interrupts first to return back buffers */
1428 	while (tx_mask) {
1429 		if (tx_mask & 0x1) {
1430 			work_done = dp_tx_comp_handler(int_ctx,
1431 						       soc,
1432 						       soc->tx_comp_ring[ring].hal_srng,
1433 						       ring, remaining_quota);
1434 
1435 			if (work_done) {
1436 				intr_stats->num_tx_ring_masks[ring]++;
1437 				dp_verbose_debug("tx mask 0x%x ring %d, budget %d, work_done %d",
1438 						 tx_mask, ring, budget,
1439 						 work_done);
1440 			}
1441 
1442 			budget -= work_done;
1443 			if (budget <= 0)
1444 				goto budget_done;
1445 
1446 			remaining_quota = budget;
1447 		}
1448 		tx_mask = tx_mask >> 1;
1449 		ring++;
1450 	}
1451 
1452 	/* Process REO Exception ring interrupt */
1453 	if (rx_err_mask) {
1454 		work_done = dp_rx_err_process(int_ctx, soc,
1455 					      soc->reo_exception_ring.hal_srng,
1456 					      remaining_quota);
1457 
1458 		if (work_done) {
1459 			intr_stats->num_rx_err_ring_masks++;
1460 			dp_verbose_debug("REO Exception Ring: work_done %d budget %d",
1461 					 work_done, budget);
1462 		}
1463 
1464 		budget -=  work_done;
1465 		if (budget <= 0) {
1466 			goto budget_done;
1467 		}
1468 		remaining_quota = budget;
1469 	}
1470 
1471 	/* Process Rx WBM release ring interrupt */
1472 	if (rx_wbm_rel_mask) {
1473 		work_done = dp_rx_wbm_err_process(int_ctx, soc,
1474 						  soc->rx_rel_ring.hal_srng,
1475 						  remaining_quota);
1476 
1477 		if (work_done) {
1478 			intr_stats->num_rx_wbm_rel_ring_masks++;
1479 			dp_verbose_debug("WBM Release Ring: work_done %d budget %d",
1480 					 work_done, budget);
1481 		}
1482 
1483 		budget -=  work_done;
1484 		if (budget <= 0) {
1485 			goto budget_done;
1486 		}
1487 		remaining_quota = budget;
1488 	}
1489 
1490 	/* Process Rx interrupts */
1491 	if (rx_mask) {
1492 		for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
1493 			if (!(rx_mask & (1 << ring)))
1494 				continue;
1495 			work_done = dp_rx_process(int_ctx,
1496 						  soc->reo_dest_ring[ring].hal_srng,
1497 						  ring,
1498 						  remaining_quota);
1499 			if (work_done) {
1500 				intr_stats->num_rx_ring_masks[ring]++;
1501 				dp_verbose_debug("rx mask 0x%x ring %d, work_done %d budget %d",
1502 						 rx_mask, ring,
1503 						 work_done, budget);
1504 				budget -=  work_done;
1505 				if (budget <= 0)
1506 					goto budget_done;
1507 				remaining_quota = budget;
1508 			}
1509 		}
1510 	}
1511 
1512 	if (reo_status_mask) {
1513 		if (dp_reo_status_ring_handler(int_ctx, soc))
1514 			int_ctx->intr_stats.num_reo_status_ring_masks++;
1515 	}
1516 
1517 	/* Process LMAC interrupts */
1518 	for  (ring = 0 ; ring < MAX_PDEV_CNT; ring++) {
1519 		pdev = soc->pdev_list[ring];
1520 		if (!pdev)
1521 			continue;
1522 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
1523 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
1524 								pdev->pdev_id);
1525 			if (int_ctx->rx_mon_ring_mask & (1 << mac_for_pdev)) {
1526 				work_done = dp_mon_process(soc, mac_for_pdev,
1527 							   remaining_quota);
1528 				if (work_done)
1529 					intr_stats->num_rx_mon_ring_masks++;
1530 				budget -= work_done;
1531 				if (budget <= 0)
1532 					goto budget_done;
1533 				remaining_quota = budget;
1534 			}
1535 
1536 			if (int_ctx->rxdma2host_ring_mask &
1537 					(1 << mac_for_pdev)) {
1538 				work_done = dp_rxdma_err_process(int_ctx, soc,
1539 								 mac_for_pdev,
1540 								 remaining_quota);
1541 				if (work_done)
1542 					intr_stats->num_rxdma2host_ring_masks++;
1543 				budget -=  work_done;
1544 				if (budget <= 0)
1545 					goto budget_done;
1546 				remaining_quota = budget;
1547 			}
1548 
1549 			if (int_ctx->host2rxdma_ring_mask &
1550 						(1 << mac_for_pdev)) {
1551 				union dp_rx_desc_list_elem_t *desc_list = NULL;
1552 				union dp_rx_desc_list_elem_t *tail = NULL;
1553 				struct dp_srng *rx_refill_buf_ring =
1554 					&pdev->rx_refill_buf_ring;
1555 
1556 				intr_stats->num_host2rxdma_ring_masks++;
1557 				DP_STATS_INC(pdev, replenish.low_thresh_intrs,
1558 						1);
1559 				dp_rx_buffers_replenish(soc, mac_for_pdev,
1560 							rx_refill_buf_ring,
1561 							&soc->rx_desc_buf[mac_for_pdev],
1562 							0, &desc_list, &tail);
1563 			}
1564 		}
1565 	}
1566 
1567 	qdf_lro_flush(int_ctx->lro_ctx);
1568 	intr_stats->num_masks++;
1569 
1570 budget_done:
1571 	return dp_budget - budget;
1572 }
1573 
1574 /* dp_interrupt_timer()- timer poll for interrupts
1575  *
1576  * @arg: SoC Handle
1577  *
1578  * Return:
1579  *
1580  */
1581 static void dp_interrupt_timer(void *arg)
1582 {
1583 	struct dp_soc *soc = (struct dp_soc *) arg;
1584 	int i;
1585 
1586 	if (qdf_atomic_read(&soc->cmn_init_done)) {
1587 		for (i = 0;
1588 			i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++)
1589 			dp_service_srngs(&soc->intr_ctx[i], 0xffff);
1590 
1591 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
1592 	}
1593 }
1594 
1595 /*
1596  * dp_soc_attach_poll() - Register handlers for DP interrupts
1597  * @txrx_soc: DP SOC handle
1598  *
1599  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1600  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1601  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1602  *
1603  * Return: 0 for success, nonzero for failure.
1604  */
1605 static QDF_STATUS dp_soc_attach_poll(void *txrx_soc)
1606 {
1607 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1608 	int i;
1609 
1610 	soc->intr_mode = DP_INTR_POLL;
1611 
1612 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1613 		soc->intr_ctx[i].dp_intr_id = i;
1614 		soc->intr_ctx[i].tx_ring_mask =
1615 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1616 		soc->intr_ctx[i].rx_ring_mask =
1617 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1618 		soc->intr_ctx[i].rx_mon_ring_mask =
1619 			wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, i);
1620 		soc->intr_ctx[i].rx_err_ring_mask =
1621 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1622 		soc->intr_ctx[i].rx_wbm_rel_ring_mask =
1623 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1624 		soc->intr_ctx[i].reo_status_ring_mask =
1625 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1626 		soc->intr_ctx[i].rxdma2host_ring_mask =
1627 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1628 		soc->intr_ctx[i].soc = soc;
1629 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1630 	}
1631 
1632 	qdf_timer_init(soc->osdev, &soc->int_timer,
1633 			dp_interrupt_timer, (void *)soc,
1634 			QDF_TIMER_TYPE_WAKE_APPS);
1635 
1636 	return QDF_STATUS_SUCCESS;
1637 }
1638 
1639 /**
1640  * dp_soc_set_interrupt_mode() - Set the interrupt mode in soc
1641  * soc: DP soc handle
1642  *
1643  * Set the appropriate interrupt mode flag in the soc
1644  */
1645 static void dp_soc_set_interrupt_mode(struct dp_soc *soc)
1646 {
1647 	uint32_t msi_base_data, msi_vector_start;
1648 	int msi_vector_count, ret;
1649 
1650 	soc->intr_mode = DP_INTR_LEGACY;
1651 
1652 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1653 	    (soc->cdp_soc.ol_ops->get_con_mode &&
1654 	     soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_MONITOR_MODE)) {
1655 		soc->intr_mode = DP_INTR_POLL;
1656 	} else {
1657 		ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1658 						  &msi_vector_count,
1659 						  &msi_base_data,
1660 						  &msi_vector_start);
1661 		if (ret)
1662 			return;
1663 
1664 		soc->intr_mode = DP_INTR_MSI;
1665 	}
1666 }
1667 
1668 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc);
1669 #if defined(DP_INTR_POLL_BOTH)
1670 /*
1671  * dp_soc_interrupt_attach_wrapper() - Register handlers for DP interrupts
1672  * @txrx_soc: DP SOC handle
1673  *
1674  * Call the appropriate attach function based on the mode of operation.
1675  * This is a WAR for enabling monitor mode.
1676  *
1677  * Return: 0 for success. nonzero for failure.
1678  */
1679 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1680 {
1681 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1682 
1683 	if (!(soc->wlan_cfg_ctx->napi_enabled) ||
1684 	    (soc->cdp_soc.ol_ops->get_con_mode &&
1685 	     soc->cdp_soc.ol_ops->get_con_mode() ==
1686 	     QDF_GLOBAL_MONITOR_MODE)) {
1687 		dp_info("Poll mode");
1688 		return dp_soc_attach_poll(txrx_soc);
1689 	} else {
1690 		dp_info("Interrupt  mode");
1691 		return dp_soc_interrupt_attach(txrx_soc);
1692 	}
1693 }
1694 #else
1695 #if defined(DP_INTR_POLL_BASED) && DP_INTR_POLL_BASED
1696 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1697 {
1698 	return dp_soc_attach_poll(txrx_soc);
1699 }
1700 #else
1701 static QDF_STATUS dp_soc_interrupt_attach_wrapper(void *txrx_soc)
1702 {
1703 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1704 
1705 	if (hif_is_polled_mode_enabled(soc->hif_handle))
1706 		return dp_soc_attach_poll(txrx_soc);
1707 	else
1708 		return dp_soc_interrupt_attach(txrx_soc);
1709 }
1710 #endif
1711 #endif
1712 
1713 static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
1714 		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
1715 {
1716 	int j;
1717 	int num_irq = 0;
1718 
1719 	int tx_mask =
1720 		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1721 	int rx_mask =
1722 		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1723 	int rx_mon_mask =
1724 		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
1725 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1726 					soc->wlan_cfg_ctx, intr_ctx_num);
1727 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1728 					soc->wlan_cfg_ctx, intr_ctx_num);
1729 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1730 					soc->wlan_cfg_ctx, intr_ctx_num);
1731 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1732 					soc->wlan_cfg_ctx, intr_ctx_num);
1733 	int host2rxdma_ring_mask = wlan_cfg_get_host2rxdma_ring_mask(
1734 					soc->wlan_cfg_ctx, intr_ctx_num);
1735 	int host2rxdma_mon_ring_mask = wlan_cfg_get_host2rxdma_mon_ring_mask(
1736 					soc->wlan_cfg_ctx, intr_ctx_num);
1737 
1738 	soc->intr_mode = DP_INTR_LEGACY;
1739 
1740 	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
1741 
1742 		if (tx_mask & (1 << j)) {
1743 			irq_id_map[num_irq++] =
1744 				(wbm2host_tx_completions_ring1 - j);
1745 		}
1746 
1747 		if (rx_mask & (1 << j)) {
1748 			irq_id_map[num_irq++] =
1749 				(reo2host_destination_ring1 - j);
1750 		}
1751 
1752 		if (rxdma2host_ring_mask & (1 << j)) {
1753 			irq_id_map[num_irq++] =
1754 				rxdma2host_destination_ring_mac1 -
1755 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1756 		}
1757 
1758 		if (host2rxdma_ring_mask & (1 << j)) {
1759 			irq_id_map[num_irq++] =
1760 				host2rxdma_host_buf_ring_mac1 -
1761 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1762 		}
1763 
1764 		if (host2rxdma_mon_ring_mask & (1 << j)) {
1765 			irq_id_map[num_irq++] =
1766 				host2rxdma_monitor_ring1 -
1767 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1768 		}
1769 
1770 		if (rx_mon_mask & (1 << j)) {
1771 			irq_id_map[num_irq++] =
1772 				ppdu_end_interrupts_mac1 -
1773 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1774 			irq_id_map[num_irq++] =
1775 				rxdma2host_monitor_status_ring_mac1 -
1776 				wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
1777 		}
1778 
1779 		if (rx_wbm_rel_ring_mask & (1 << j))
1780 			irq_id_map[num_irq++] = wbm2host_rx_release;
1781 
1782 		if (rx_err_ring_mask & (1 << j))
1783 			irq_id_map[num_irq++] = reo2host_exception;
1784 
1785 		if (reo_status_ring_mask & (1 << j))
1786 			irq_id_map[num_irq++] = reo2host_status;
1787 
1788 	}
1789 	*num_irq_r = num_irq;
1790 }
1791 
1792 static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
1793 		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
1794 		int msi_vector_count, int msi_vector_start)
1795 {
1796 	int tx_mask = wlan_cfg_get_tx_ring_mask(
1797 					soc->wlan_cfg_ctx, intr_ctx_num);
1798 	int rx_mask = wlan_cfg_get_rx_ring_mask(
1799 					soc->wlan_cfg_ctx, intr_ctx_num);
1800 	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
1801 					soc->wlan_cfg_ctx, intr_ctx_num);
1802 	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
1803 					soc->wlan_cfg_ctx, intr_ctx_num);
1804 	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
1805 					soc->wlan_cfg_ctx, intr_ctx_num);
1806 	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
1807 					soc->wlan_cfg_ctx, intr_ctx_num);
1808 	int rxdma2host_ring_mask = wlan_cfg_get_rxdma2host_ring_mask(
1809 					soc->wlan_cfg_ctx, intr_ctx_num);
1810 
1811 	unsigned int vector =
1812 		(intr_ctx_num % msi_vector_count) + msi_vector_start;
1813 	int num_irq = 0;
1814 
1815 	soc->intr_mode = DP_INTR_MSI;
1816 
1817 	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
1818 	    rx_wbm_rel_ring_mask | reo_status_ring_mask | rxdma2host_ring_mask)
1819 		irq_id_map[num_irq++] =
1820 			pld_get_msi_irq(soc->osdev->dev, vector);
1821 
1822 	*num_irq_r = num_irq;
1823 }
1824 
1825 static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
1826 				    int *irq_id_map, int *num_irq)
1827 {
1828 	int msi_vector_count, ret;
1829 	uint32_t msi_base_data, msi_vector_start;
1830 
1831 	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
1832 					    &msi_vector_count,
1833 					    &msi_base_data,
1834 					    &msi_vector_start);
1835 	if (ret)
1836 		return dp_soc_interrupt_map_calculate_integrated(soc,
1837 				intr_ctx_num, irq_id_map, num_irq);
1838 
1839 	else
1840 		dp_soc_interrupt_map_calculate_msi(soc,
1841 				intr_ctx_num, irq_id_map, num_irq,
1842 				msi_vector_count, msi_vector_start);
1843 }
1844 
1845 /*
1846  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
1847  * @txrx_soc: DP SOC handle
1848  *
1849  * Host driver will register for “DP_NUM_INTERRUPT_CONTEXTS” number of NAPI
1850  * contexts. Each NAPI context will have a tx_ring_mask , rx_ring_mask ,and
1851  * rx_monitor_ring mask to indicate the rings that are processed by the handler.
1852  *
1853  * Return: 0 for success. nonzero for failure.
1854  */
1855 static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
1856 {
1857 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1858 
1859 	int i = 0;
1860 	int num_irq = 0;
1861 
1862 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1863 		int ret = 0;
1864 
1865 		/* Map of IRQ ids registered with one interrupt context */
1866 		int irq_id_map[HIF_MAX_GRP_IRQ];
1867 
1868 		int tx_mask =
1869 			wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, i);
1870 		int rx_mask =
1871 			wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, i);
1872 		int rx_mon_mask =
1873 			dp_soc_get_mon_mask_for_interrupt_mode(soc, i);
1874 		int rx_err_ring_mask =
1875 			wlan_cfg_get_rx_err_ring_mask(soc->wlan_cfg_ctx, i);
1876 		int rx_wbm_rel_ring_mask =
1877 			wlan_cfg_get_rx_wbm_rel_ring_mask(soc->wlan_cfg_ctx, i);
1878 		int reo_status_ring_mask =
1879 			wlan_cfg_get_reo_status_ring_mask(soc->wlan_cfg_ctx, i);
1880 		int rxdma2host_ring_mask =
1881 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
1882 		int host2rxdma_ring_mask =
1883 			wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx, i);
1884 		int host2rxdma_mon_ring_mask =
1885 			wlan_cfg_get_host2rxdma_mon_ring_mask(
1886 				soc->wlan_cfg_ctx, i);
1887 
1888 		soc->intr_ctx[i].dp_intr_id = i;
1889 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
1890 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
1891 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
1892 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask;
1893 		soc->intr_ctx[i].rxdma2host_ring_mask = rxdma2host_ring_mask;
1894 		soc->intr_ctx[i].host2rxdma_ring_mask = host2rxdma_ring_mask;
1895 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask;
1896 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask;
1897 		soc->intr_ctx[i].host2rxdma_mon_ring_mask =
1898 			 host2rxdma_mon_ring_mask;
1899 
1900 		soc->intr_ctx[i].soc = soc;
1901 
1902 		num_irq = 0;
1903 
1904 		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
1905 					       &num_irq);
1906 
1907 		ret = hif_register_ext_group(soc->hif_handle,
1908 				num_irq, irq_id_map, dp_service_srngs,
1909 				&soc->intr_ctx[i], "dp_intr",
1910 				HIF_EXEC_NAPI_TYPE, QCA_NAPI_DEF_SCALE_BIN_SHIFT);
1911 
1912 		if (ret) {
1913 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1914 			FL("failed, ret = %d"), ret);
1915 
1916 			return QDF_STATUS_E_FAILURE;
1917 		}
1918 		soc->intr_ctx[i].lro_ctx = qdf_lro_init();
1919 	}
1920 
1921 	hif_configure_ext_group_interrupts(soc->hif_handle);
1922 
1923 	return QDF_STATUS_SUCCESS;
1924 }
1925 
1926 /*
1927  * dp_soc_interrupt_detach() - Deregister any allocations done for interrupts
1928  * @txrx_soc: DP SOC handle
1929  *
1930  * Return: void
1931  */
1932 static void dp_soc_interrupt_detach(void *txrx_soc)
1933 {
1934 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
1935 	int i;
1936 
1937 	if (soc->intr_mode == DP_INTR_POLL) {
1938 		qdf_timer_stop(&soc->int_timer);
1939 		qdf_timer_free(&soc->int_timer);
1940 	} else {
1941 		hif_deregister_exec_group(soc->hif_handle, "dp_intr");
1942 	}
1943 
1944 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
1945 		soc->intr_ctx[i].tx_ring_mask = 0;
1946 		soc->intr_ctx[i].rx_ring_mask = 0;
1947 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
1948 		soc->intr_ctx[i].rx_err_ring_mask = 0;
1949 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
1950 		soc->intr_ctx[i].reo_status_ring_mask = 0;
1951 		soc->intr_ctx[i].rxdma2host_ring_mask = 0;
1952 		soc->intr_ctx[i].host2rxdma_ring_mask = 0;
1953 		soc->intr_ctx[i].host2rxdma_mon_ring_mask = 0;
1954 
1955 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
1956 	}
1957 }
1958 
1959 #define AVG_MAX_MPDUS_PER_TID 128
1960 #define AVG_TIDS_PER_CLIENT 2
1961 #define AVG_FLOWS_PER_TID 2
1962 #define AVG_MSDUS_PER_FLOW 128
1963 #define AVG_MSDUS_PER_MPDU 4
1964 
1965 /*
1966  * Allocate and setup link descriptor pool that will be used by HW for
1967  * various link and queue descriptors and managed by WBM
1968  */
1969 static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
1970 {
1971 	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
1972 	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
1973 	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
1974 	uint32_t num_mpdus_per_link_desc =
1975 		hal_num_mpdus_per_link_desc(soc->hal_soc);
1976 	uint32_t num_msdus_per_link_desc =
1977 		hal_num_msdus_per_link_desc(soc->hal_soc);
1978 	uint32_t num_mpdu_links_per_queue_desc =
1979 		hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
1980 	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
1981 	uint32_t total_link_descs, total_mem_size;
1982 	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
1983 	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
1984 	uint32_t num_link_desc_banks;
1985 	uint32_t last_bank_size = 0;
1986 	uint32_t entry_size, num_entries;
1987 	int i;
1988 	uint32_t desc_id = 0;
1989 	qdf_dma_addr_t *baseaddr = NULL;
1990 
1991 	/* Only Tx queue descriptors are allocated from common link descriptor
1992 	 * pool Rx queue descriptors are not included in this because (REO queue
1993 	 * extension descriptors) they are expected to be allocated contiguously
1994 	 * with REO queue descriptors
1995 	 */
1996 	num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
1997 		AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
1998 
1999 	num_mpdu_queue_descs = num_mpdu_link_descs /
2000 		num_mpdu_links_per_queue_desc;
2001 
2002 	num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2003 		AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
2004 		num_msdus_per_link_desc;
2005 
2006 	num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
2007 		AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
2008 
2009 	num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
2010 		num_tx_msdu_link_descs + num_rx_msdu_link_descs;
2011 
2012 	/* Round up to power of 2 */
2013 	total_link_descs = 1;
2014 	while (total_link_descs < num_entries)
2015 		total_link_descs <<= 1;
2016 
2017 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2018 		FL("total_link_descs: %u, link_desc_size: %d"),
2019 		total_link_descs, link_desc_size);
2020 	total_mem_size =  total_link_descs * link_desc_size;
2021 
2022 	total_mem_size += link_desc_align;
2023 
2024 	if (total_mem_size <= max_alloc_size) {
2025 		num_link_desc_banks = 0;
2026 		last_bank_size = total_mem_size;
2027 	} else {
2028 		num_link_desc_banks = (total_mem_size) /
2029 			(max_alloc_size - link_desc_align);
2030 		last_bank_size = total_mem_size %
2031 			(max_alloc_size - link_desc_align);
2032 	}
2033 
2034 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
2035 		FL("total_mem_size: %d, num_link_desc_banks: %u"),
2036 		total_mem_size, num_link_desc_banks);
2037 
2038 	for (i = 0; i < num_link_desc_banks; i++) {
2039 		if (!dp_is_soc_reinit(soc)) {
2040 			baseaddr = &soc->link_desc_banks[i].
2041 					base_paddr_unaligned;
2042 			soc->link_desc_banks[i].base_vaddr_unaligned =
2043 				qdf_mem_alloc_consistent(soc->osdev,
2044 							 soc->osdev->dev,
2045 							 max_alloc_size,
2046 							 baseaddr);
2047 		}
2048 		soc->link_desc_banks[i].size = max_alloc_size;
2049 
2050 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
2051 			soc->link_desc_banks[i].base_vaddr_unaligned) +
2052 			((unsigned long)(
2053 			soc->link_desc_banks[i].base_vaddr_unaligned) %
2054 			link_desc_align));
2055 
2056 		soc->link_desc_banks[i].base_paddr = (unsigned long)(
2057 			soc->link_desc_banks[i].base_paddr_unaligned) +
2058 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
2059 			(unsigned long)(
2060 			soc->link_desc_banks[i].base_vaddr_unaligned));
2061 
2062 		if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
2063 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2064 				FL("Link descriptor memory alloc failed"));
2065 			goto fail;
2066 		}
2067 		if (!dp_is_soc_reinit(soc)) {
2068 			qdf_minidump_log(soc->link_desc_banks[i].base_vaddr,
2069 					 soc->link_desc_banks[i].size,
2070 					 "link_desc_bank");
2071 		}
2072 		qdf_minidump_log((soc->link_desc_banks[i].base_vaddr),
2073 				 soc->link_desc_banks[i].size,
2074 				 "link_desc_bank");
2075 	}
2076 
2077 	if (last_bank_size) {
2078 		/* Allocate last bank in case total memory required is not exact
2079 		 * multiple of max_alloc_size
2080 		 */
2081 		if (!dp_is_soc_reinit(soc)) {
2082 			baseaddr = &soc->link_desc_banks[i].
2083 					base_paddr_unaligned;
2084 			soc->link_desc_banks[i].base_vaddr_unaligned =
2085 				qdf_mem_alloc_consistent(soc->osdev,
2086 							 soc->osdev->dev,
2087 							 last_bank_size,
2088 							 baseaddr);
2089 		}
2090 		soc->link_desc_banks[i].size = last_bank_size;
2091 
2092 		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
2093 			(soc->link_desc_banks[i].base_vaddr_unaligned) +
2094 			((unsigned long)(
2095 			soc->link_desc_banks[i].base_vaddr_unaligned) %
2096 			link_desc_align));
2097 
2098 		soc->link_desc_banks[i].base_paddr =
2099 			(unsigned long)(
2100 			soc->link_desc_banks[i].base_paddr_unaligned) +
2101 			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
2102 			(unsigned long)(
2103 			soc->link_desc_banks[i].base_vaddr_unaligned));
2104 
2105 		if (!dp_is_soc_reinit(soc)) {
2106 			qdf_minidump_log(soc->link_desc_banks[i].base_vaddr,
2107 					 soc->link_desc_banks[i].size,
2108 					 "link_desc_bank");
2109 		}
2110 		qdf_minidump_log((soc->link_desc_banks[i].base_vaddr),
2111 				 soc->link_desc_banks[i].size,
2112 				 "link_desc_bank");
2113 	}
2114 
2115 
2116 	/* Allocate and setup link descriptor idle list for HW internal use */
2117 	entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
2118 	total_mem_size = entry_size * total_link_descs;
2119 
2120 	if (total_mem_size <= max_alloc_size) {
2121 		void *desc;
2122 
2123 		if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
2124 				  WBM_IDLE_LINK, 0, 0, total_link_descs, 0)) {
2125 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2126 				FL("Link desc idle ring setup failed"));
2127 			goto fail;
2128 		}
2129 
2130 		qdf_minidump_log(soc->wbm_idle_link_ring.base_vaddr_unaligned,
2131 				 soc->wbm_idle_link_ring.alloc_size,
2132 				 "wbm_idle_link_ring");
2133 
2134 		hal_srng_access_start_unlocked(soc->hal_soc,
2135 			soc->wbm_idle_link_ring.hal_srng);
2136 
2137 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
2138 			soc->link_desc_banks[i].base_paddr; i++) {
2139 			uint32_t num_entries = (soc->link_desc_banks[i].size -
2140 				((unsigned long)(
2141 				soc->link_desc_banks[i].base_vaddr) -
2142 				(unsigned long)(
2143 				soc->link_desc_banks[i].base_vaddr_unaligned)))
2144 				/ link_desc_size;
2145 			unsigned long paddr = (unsigned long)(
2146 				soc->link_desc_banks[i].base_paddr);
2147 
2148 			while (num_entries && (desc = hal_srng_src_get_next(
2149 				soc->hal_soc,
2150 				soc->wbm_idle_link_ring.hal_srng))) {
2151 				hal_set_link_desc_addr(desc,
2152 					LINK_DESC_COOKIE(desc_id, i), paddr);
2153 				num_entries--;
2154 				desc_id++;
2155 				paddr += link_desc_size;
2156 			}
2157 		}
2158 		hal_srng_access_end_unlocked(soc->hal_soc,
2159 			soc->wbm_idle_link_ring.hal_srng);
2160 	} else {
2161 		uint32_t num_scatter_bufs;
2162 		uint32_t num_entries_per_buf;
2163 		uint32_t rem_entries;
2164 		uint8_t *scatter_buf_ptr;
2165 		uint16_t scatter_buf_num;
2166 		uint32_t buf_size = 0;
2167 
2168 		soc->wbm_idle_scatter_buf_size =
2169 			hal_idle_list_scatter_buf_size(soc->hal_soc);
2170 		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
2171 			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
2172 		num_scatter_bufs = hal_idle_list_num_scatter_bufs(
2173 					soc->hal_soc, total_mem_size,
2174 					soc->wbm_idle_scatter_buf_size);
2175 
2176 		if (num_scatter_bufs > MAX_IDLE_SCATTER_BUFS) {
2177 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2178 					FL("scatter bufs size out of bounds"));
2179 			goto fail;
2180 		}
2181 
2182 		for (i = 0; i < num_scatter_bufs; i++) {
2183 			baseaddr = &soc->wbm_idle_scatter_buf_base_paddr[i];
2184 			if (!dp_is_soc_reinit(soc)) {
2185 				buf_size = soc->wbm_idle_scatter_buf_size;
2186 				soc->wbm_idle_scatter_buf_base_vaddr[i] =
2187 					qdf_mem_alloc_consistent(soc->osdev,
2188 								 soc->osdev->
2189 								 dev,
2190 								 buf_size,
2191 								 baseaddr);
2192 			}
2193 			if (!soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2194 				QDF_TRACE(QDF_MODULE_ID_DP,
2195 					  QDF_TRACE_LEVEL_ERROR,
2196 					  FL("Scatter lst memory alloc fail"));
2197 				goto fail;
2198 			}
2199 		}
2200 
2201 		/* Populate idle list scatter buffers with link descriptor
2202 		 * pointers
2203 		 */
2204 		scatter_buf_num = 0;
2205 		scatter_buf_ptr = (uint8_t *)(
2206 			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
2207 		rem_entries = num_entries_per_buf;
2208 
2209 		for (i = 0; i < MAX_LINK_DESC_BANKS &&
2210 			soc->link_desc_banks[i].base_paddr; i++) {
2211 			uint32_t num_link_descs =
2212 				(soc->link_desc_banks[i].size -
2213 				((unsigned long)(
2214 				soc->link_desc_banks[i].base_vaddr) -
2215 				(unsigned long)(
2216 				soc->link_desc_banks[i].base_vaddr_unaligned)))
2217 				/ link_desc_size;
2218 			unsigned long paddr = (unsigned long)(
2219 				soc->link_desc_banks[i].base_paddr);
2220 
2221 			while (num_link_descs) {
2222 				hal_set_link_desc_addr((void *)scatter_buf_ptr,
2223 					LINK_DESC_COOKIE(desc_id, i), paddr);
2224 				num_link_descs--;
2225 				desc_id++;
2226 				paddr += link_desc_size;
2227 				rem_entries--;
2228 				if (rem_entries) {
2229 					scatter_buf_ptr += entry_size;
2230 				} else {
2231 					rem_entries = num_entries_per_buf;
2232 					scatter_buf_num++;
2233 
2234 					if (scatter_buf_num >= num_scatter_bufs)
2235 						break;
2236 
2237 					scatter_buf_ptr = (uint8_t *)(
2238 						soc->wbm_idle_scatter_buf_base_vaddr[
2239 						scatter_buf_num]);
2240 				}
2241 			}
2242 		}
2243 		/* Setup link descriptor idle list in HW */
2244 		hal_setup_link_idle_list(soc->hal_soc,
2245 			soc->wbm_idle_scatter_buf_base_paddr,
2246 			soc->wbm_idle_scatter_buf_base_vaddr,
2247 			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
2248 			(uint32_t)(scatter_buf_ptr -
2249 			(uint8_t *)(soc->wbm_idle_scatter_buf_base_vaddr[
2250 			scatter_buf_num-1])), total_link_descs);
2251 	}
2252 	return 0;
2253 
2254 fail:
2255 	if (soc->wbm_idle_link_ring.hal_srng) {
2256 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
2257 				WBM_IDLE_LINK, 0);
2258 	}
2259 
2260 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2261 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2262 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2263 				soc->wbm_idle_scatter_buf_size,
2264 				soc->wbm_idle_scatter_buf_base_vaddr[i],
2265 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
2266 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
2267 		}
2268 	}
2269 
2270 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
2271 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
2272 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2273 				soc->link_desc_banks[i].size,
2274 				soc->link_desc_banks[i].base_vaddr_unaligned,
2275 				soc->link_desc_banks[i].base_paddr_unaligned,
2276 				0);
2277 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
2278 		}
2279 	}
2280 	return QDF_STATUS_E_FAILURE;
2281 }
2282 
2283 /*
2284  * Free link descriptor pool that was setup HW
2285  */
2286 static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
2287 {
2288 	int i;
2289 
2290 	if (soc->wbm_idle_link_ring.hal_srng) {
2291 		qdf_minidump_remove(
2292 			soc->wbm_idle_link_ring.base_vaddr_unaligned);
2293 		dp_srng_cleanup(soc, &soc->wbm_idle_link_ring,
2294 			WBM_IDLE_LINK, 0);
2295 	}
2296 
2297 	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
2298 		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
2299 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2300 				soc->wbm_idle_scatter_buf_size,
2301 				soc->wbm_idle_scatter_buf_base_vaddr[i],
2302 				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
2303 			soc->wbm_idle_scatter_buf_base_vaddr[i] = NULL;
2304 		}
2305 	}
2306 
2307 	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
2308 		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
2309 			qdf_minidump_remove(soc->link_desc_banks[i].base_vaddr);
2310 			qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
2311 				soc->link_desc_banks[i].size,
2312 				soc->link_desc_banks[i].base_vaddr_unaligned,
2313 				soc->link_desc_banks[i].base_paddr_unaligned,
2314 				0);
2315 			soc->link_desc_banks[i].base_vaddr_unaligned = NULL;
2316 		}
2317 	}
2318 }
2319 
2320 #ifdef IPA_OFFLOAD
2321 #define REO_DST_RING_SIZE_QCA6290 1023
2322 #ifndef QCA_WIFI_QCA8074_VP
2323 #define REO_DST_RING_SIZE_QCA8074 1023
2324 #define REO_DST_RING_SIZE_QCN9000 2048
2325 #else
2326 #define REO_DST_RING_SIZE_QCA8074 8
2327 #define REO_DST_RING_SIZE_QCN9000 8
2328 #endif /* QCA_WIFI_QCA8074_VP */
2329 
2330 #else
2331 
2332 #define REO_DST_RING_SIZE_QCA6290 1024
2333 #ifndef QCA_WIFI_QCA8074_VP
2334 #define REO_DST_RING_SIZE_QCA8074 2048
2335 #define REO_DST_RING_SIZE_QCN9000 2048
2336 #else
2337 #define REO_DST_RING_SIZE_QCA8074 8
2338 #define REO_DST_RING_SIZE_QCN9000 8
2339 #endif /* QCA_WIFI_QCA8074_VP */
2340 #endif /* IPA_OFFLOAD */
2341 
2342 #ifndef FEATURE_WDS
2343 static void dp_soc_wds_attach(struct dp_soc *soc)
2344 {
2345 }
2346 
2347 static void dp_soc_wds_detach(struct dp_soc *soc)
2348 {
2349 }
2350 #endif
2351 /*
2352  * dp_soc_reset_ring_map() - Reset cpu ring map
2353  * @soc: Datapath soc handler
2354  *
2355  * This api resets the default cpu ring map
2356  */
2357 
2358 static void dp_soc_reset_cpu_ring_map(struct dp_soc *soc)
2359 {
2360 	uint8_t i;
2361 	int nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2362 
2363 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
2364 		switch (nss_config) {
2365 		case dp_nss_cfg_first_radio:
2366 			/*
2367 			 * Setting Tx ring map for one nss offloaded radio
2368 			 */
2369 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_FIRST_RADIO_OFFLOADED_MAP][i];
2370 			break;
2371 
2372 		case dp_nss_cfg_second_radio:
2373 			/*
2374 			 * Setting Tx ring for two nss offloaded radios
2375 			 */
2376 			soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_SECOND_RADIO_OFFLOADED_MAP][i];
2377 			break;
2378 
2379 		case dp_nss_cfg_dbdc:
2380 			/*
2381 			 * Setting Tx ring map for 2 nss offloaded radios
2382 			 */
2383 			soc->tx_ring_map[i] =
2384 				dp_cpu_ring_map[DP_NSS_DBDC_OFFLOADED_MAP][i];
2385 			break;
2386 
2387 		case dp_nss_cfg_dbtc:
2388 			/*
2389 			 * Setting Tx ring map for 3 nss offloaded radios
2390 			 */
2391 			soc->tx_ring_map[i] =
2392 				dp_cpu_ring_map[DP_NSS_DBTC_OFFLOADED_MAP][i];
2393 			break;
2394 
2395 		default:
2396 			dp_err("tx_ring_map failed due to invalid nss cfg");
2397 			break;
2398 		}
2399 	}
2400 }
2401 
2402 /*
2403  * dp_soc_ring_if_nss_offloaded() - find if ring is offloaded to NSS
2404  * @dp_soc - DP soc handle
2405  * @ring_type - ring type
2406  * @ring_num - ring_num
2407  *
2408  * return 0 or 1
2409  */
2410 static uint8_t dp_soc_ring_if_nss_offloaded(struct dp_soc *soc, enum hal_ring_type ring_type, int ring_num)
2411 {
2412 	uint8_t nss_config = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2413 	uint8_t status = 0;
2414 
2415 	switch (ring_type) {
2416 	case WBM2SW_RELEASE:
2417 	case REO_DST:
2418 	case RXDMA_BUF:
2419 		status = ((nss_config) & (1 << ring_num));
2420 		break;
2421 	default:
2422 		break;
2423 	}
2424 
2425 	return status;
2426 }
2427 
2428 /*
2429  * dp_soc_disable_mac2_intr_mask() - reset interrupt mask for WMAC2 hw rings
2430  * @dp_soc - DP Soc handle
2431  *
2432  * Return: Return void
2433  */
2434 static void dp_soc_disable_mac2_intr_mask(struct dp_soc *soc)
2435 {
2436 	int *grp_mask = NULL;
2437 	int group_number;
2438 
2439 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2440 	group_number = dp_srng_find_ring_in_mask(0x2, grp_mask);
2441 	wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2442 					  group_number, 0x0);
2443 
2444 	grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
2445 	group_number = dp_srng_find_ring_in_mask(0x2, grp_mask);
2446 	wlan_cfg_set_rx_mon_ring_mask(soc->wlan_cfg_ctx,
2447 				      group_number, 0x0);
2448 
2449 	grp_mask = &soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[0];
2450 	group_number = dp_srng_find_ring_in_mask(0x2, grp_mask);
2451 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx,
2452 					  group_number, 0x0);
2453 
2454 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_mon_ring_mask[0];
2455 	group_number = dp_srng_find_ring_in_mask(0x2, grp_mask);
2456 	wlan_cfg_set_host2rxdma_mon_ring_mask(soc->wlan_cfg_ctx,
2457 					      group_number, 0x0);
2458 }
2459 
2460 /*
2461  * dp_soc_reset_intr_mask() - reset interrupt mask
2462  * @dp_soc - DP Soc handle
2463  *
2464  * Return: Return void
2465  */
2466 static void dp_soc_reset_intr_mask(struct dp_soc *soc)
2467 {
2468 	uint8_t j;
2469 	int *grp_mask = NULL;
2470 	int group_number, mask, num_ring;
2471 
2472 	/* number of tx ring */
2473 	num_ring = wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
2474 
2475 	/*
2476 	 * group mask for tx completion  ring.
2477 	 */
2478 	grp_mask =  &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
2479 
2480 	/* loop and reset the mask for only offloaded ring */
2481 	for (j = 0; j < num_ring; j++) {
2482 		if (!dp_soc_ring_if_nss_offloaded(soc, WBM2SW_RELEASE, j)) {
2483 			continue;
2484 		}
2485 
2486 		/*
2487 		 * Group number corresponding to tx offloaded ring.
2488 		 */
2489 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2490 		if (group_number < 0) {
2491 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2492 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2493 					WBM2SW_RELEASE, j);
2494 			return;
2495 		}
2496 
2497 		/* reset the tx mask for offloaded ring */
2498 		mask = wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, group_number);
2499 		mask &= (~(1 << j));
2500 
2501 		/*
2502 		 * reset the interrupt mask for offloaded ring.
2503 		 */
2504 		wlan_cfg_set_tx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2505 	}
2506 
2507 	/* number of rx rings */
2508 	num_ring = wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
2509 
2510 	/*
2511 	 * group mask for reo destination ring.
2512 	 */
2513 	grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
2514 
2515 	/* loop and reset the mask for only offloaded ring */
2516 	for (j = 0; j < num_ring; j++) {
2517 		if (!dp_soc_ring_if_nss_offloaded(soc, REO_DST, j)) {
2518 			continue;
2519 		}
2520 
2521 		/*
2522 		 * Group number corresponding to rx offloaded ring.
2523 		 */
2524 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2525 		if (group_number < 0) {
2526 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2527 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2528 					REO_DST, j);
2529 			return;
2530 		}
2531 
2532 		/* set the interrupt mask for offloaded ring */
2533 		mask =  wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, group_number);
2534 		mask &= (~(1 << j));
2535 
2536 		/*
2537 		 * set the interrupt mask to zero for rx offloaded radio.
2538 		 */
2539 		wlan_cfg_set_rx_ring_mask(soc->wlan_cfg_ctx, group_number, mask);
2540 	}
2541 
2542 	/*
2543 	 * group mask for Rx buffer refill ring
2544 	 */
2545 	grp_mask = &soc->wlan_cfg_ctx->int_host2rxdma_ring_mask[0];
2546 
2547 	/* loop and reset the mask for only offloaded ring */
2548 	for (j = 0; j < MAX_PDEV_CNT; j++) {
2549 		if (!dp_soc_ring_if_nss_offloaded(soc, RXDMA_BUF, j)) {
2550 			continue;
2551 		}
2552 
2553 		/*
2554 		 * Group number corresponding to rx offloaded ring.
2555 		 */
2556 		group_number = dp_srng_find_ring_in_mask(j, grp_mask);
2557 		if (group_number < 0) {
2558 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
2559 					FL("ring not part of any group; ring_type: %d,ring_num %d"),
2560 					REO_DST, j);
2561 			return;
2562 		}
2563 
2564 		/* set the interrupt mask for offloaded ring */
2565 		mask =  wlan_cfg_get_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2566 				group_number);
2567 		mask &= (~(1 << j));
2568 
2569 		/*
2570 		 * set the interrupt mask to zero for rx offloaded radio.
2571 		 */
2572 		wlan_cfg_set_host2rxdma_ring_mask(soc->wlan_cfg_ctx,
2573 			group_number, mask);
2574 	}
2575 }
2576 
2577 #ifdef IPA_OFFLOAD
2578 /**
2579  * dp_reo_remap_config() - configure reo remap register value based
2580  *                         nss configuration.
2581  *		based on offload_radio value below remap configuration
2582  *		get applied.
2583  *		0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
2584  *		1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
2585  *		2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
2586  *		3 - both Radios handled by NSS (remap not required)
2587  *		4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
2588  *
2589  * @remap1: output parameter indicates reo remap 1 register value
2590  * @remap2: output parameter indicates reo remap 2 register value
2591  * Return: bool type, true if remap is configured else false.
2592  */
2593 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap1, uint32_t *remap2)
2594 {
2595 	*remap1 = HAL_REO_REMAP_IX2(REO_REMAP_SW1, 16) |
2596 		  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 17) |
2597 		  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 18) |
2598 		  HAL_REO_REMAP_IX2(REO_REMAP_SW1, 19) |
2599 		  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 20) |
2600 		  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 21) |
2601 		  HAL_REO_REMAP_IX2(REO_REMAP_SW1, 22) |
2602 		  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 23);
2603 
2604 	*remap2 = HAL_REO_REMAP_IX3(REO_REMAP_SW3, 24) |
2605 		  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 25) |
2606 		  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 26) |
2607 		  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 27) |
2608 		  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 28) |
2609 		  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 29) |
2610 		  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 30) |
2611 		  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 31);
2612 
2613 	dp_debug("remap1 %x remap2 %x", *remap1, *remap2);
2614 
2615 	return true;
2616 }
2617 #else
2618 static bool dp_reo_remap_config(struct dp_soc *soc,
2619 				uint32_t *remap1,
2620 				uint32_t *remap2)
2621 {
2622 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2623 	uint8_t target_type;
2624 
2625 	target_type = hal_get_target_type(soc->hal_soc);
2626 
2627 	switch (offload_radio) {
2628 	case dp_nss_cfg_default:
2629 		*remap1 = HAL_REO_REMAP_IX2(REO_REMAP_SW1, 16) |
2630 			  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 17) |
2631 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 18) |
2632 			  HAL_REO_REMAP_IX2(REO_REMAP_SW4, 19) |
2633 			  HAL_REO_REMAP_IX2(REO_REMAP_SW1, 20) |
2634 			  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 21) |
2635 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 22) |
2636 			  HAL_REO_REMAP_IX2(REO_REMAP_SW4, 23);
2637 
2638 		*remap2 = HAL_REO_REMAP_IX3(REO_REMAP_SW1, 24) |
2639 			  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 25) |
2640 			  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 26) |
2641 			  HAL_REO_REMAP_IX3(REO_REMAP_SW4, 27) |
2642 			  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 28) |
2643 			  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 29) |
2644 			  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 30) |
2645 			  HAL_REO_REMAP_IX3(REO_REMAP_SW4, 31);
2646 		break;
2647 	case dp_nss_cfg_first_radio:
2648 		*remap1 = HAL_REO_REMAP_IX2(REO_REMAP_SW2, 16) |
2649 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 17) |
2650 			  HAL_REO_REMAP_IX2(REO_REMAP_SW4, 18) |
2651 			  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 19) |
2652 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 20) |
2653 			  HAL_REO_REMAP_IX2(REO_REMAP_SW4, 21) |
2654 			  HAL_REO_REMAP_IX2(REO_REMAP_SW2, 22) |
2655 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 23);
2656 
2657 		*remap2 = HAL_REO_REMAP_IX3(REO_REMAP_SW4, 24) |
2658 			  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 25) |
2659 			  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 26) |
2660 			  HAL_REO_REMAP_IX3(REO_REMAP_SW4, 27) |
2661 			  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 28) |
2662 			  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 29) |
2663 			  HAL_REO_REMAP_IX3(REO_REMAP_SW4, 30) |
2664 			  HAL_REO_REMAP_IX3(REO_REMAP_SW2, 31);
2665 		break;
2666 	case dp_nss_cfg_second_radio:
2667 		*remap1 = HAL_REO_REMAP_IX2(REO_REMAP_SW1, 16) |
2668 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 17) |
2669 			  HAL_REO_REMAP_IX2(REO_REMAP_SW4, 18) |
2670 			  HAL_REO_REMAP_IX2(REO_REMAP_SW1, 19) |
2671 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 20) |
2672 			  HAL_REO_REMAP_IX2(REO_REMAP_SW4, 21) |
2673 			  HAL_REO_REMAP_IX2(REO_REMAP_SW1, 22) |
2674 			  HAL_REO_REMAP_IX2(REO_REMAP_SW3, 23);
2675 
2676 		*remap2 = HAL_REO_REMAP_IX3(REO_REMAP_SW4, 24) |
2677 			  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 25) |
2678 			  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 26) |
2679 			  HAL_REO_REMAP_IX3(REO_REMAP_SW4, 27) |
2680 			  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 28) |
2681 			  HAL_REO_REMAP_IX3(REO_REMAP_SW3, 29) |
2682 			  HAL_REO_REMAP_IX3(REO_REMAP_SW4, 30) |
2683 			  HAL_REO_REMAP_IX3(REO_REMAP_SW1, 31);
2684 
2685 		break;
2686 	case dp_nss_cfg_dbdc:
2687 	case dp_nss_cfg_dbtc:
2688 		/* return false if both or all are offloaded to NSS */
2689 		return false;
2690 	}
2691 
2692 	dp_debug("remap1 %x remap2 %x offload_radio %u",
2693 		 *remap1, *remap2, offload_radio);
2694 	return true;
2695 }
2696 #endif /* IPA_OFFLOAD */
2697 
2698 /*
2699  * dp_reo_frag_dst_set() - configure reo register to set the
2700  *                        fragment destination ring
2701  * @soc : Datapath soc
2702  * @frag_dst_ring : output parameter to set fragment destination ring
2703  *
2704  * Based on offload_radio below fragment destination rings is selected
2705  * 0 - TCL
2706  * 1 - SW1
2707  * 2 - SW2
2708  * 3 - SW3
2709  * 4 - SW4
2710  * 5 - Release
2711  * 6 - FW
2712  * 7 - alternate select
2713  *
2714  * return: void
2715  */
2716 static void dp_reo_frag_dst_set(struct dp_soc *soc, uint8_t *frag_dst_ring)
2717 {
2718 	uint8_t offload_radio = wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx);
2719 
2720 	switch (offload_radio) {
2721 	case dp_nss_cfg_default:
2722 		*frag_dst_ring = HAL_SRNG_REO_EXCEPTION;
2723 		break;
2724 	case dp_nss_cfg_first_radio:
2725 		/*
2726 		 * This configuration is valid for single band radio which
2727 		 * is also NSS offload.
2728 		 */
2729 	case dp_nss_cfg_dbdc:
2730 	case dp_nss_cfg_dbtc:
2731 		*frag_dst_ring = HAL_SRNG_REO_ALTERNATE_SELECT;
2732 		break;
2733 	default:
2734 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2735 				FL("dp_reo_frag_dst_set invalid offload radio config"));
2736 		break;
2737 	}
2738 }
2739 
2740 #ifdef ENABLE_VERBOSE_DEBUG
2741 static void dp_enable_verbose_debug(struct dp_soc *soc)
2742 {
2743 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2744 
2745 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2746 
2747 	if (soc_cfg_ctx->per_pkt_trace & dp_verbose_debug_mask)
2748 		is_dp_verbose_debug_enabled = true;
2749 
2750 	if (soc_cfg_ctx->per_pkt_trace & hal_verbose_debug_mask)
2751 		hal_set_verbose_debug(true);
2752 	else
2753 		hal_set_verbose_debug(false);
2754 }
2755 #else
2756 static void dp_enable_verbose_debug(struct dp_soc *soc)
2757 {
2758 }
2759 #endif
2760 
2761 /*
2762  * dp_soc_cmn_setup() - Common SoC level initializion
2763  * @soc:		Datapath SOC handle
2764  *
2765  * This is an internal function used to setup common SOC data structures,
2766  * to be called from PDEV attach after receiving HW mode capabilities from FW
2767  */
2768 static int dp_soc_cmn_setup(struct dp_soc *soc)
2769 {
2770 	int i, cached;
2771 	struct hal_reo_params reo_params;
2772 	int tx_ring_size;
2773 	int tx_comp_ring_size;
2774 	int reo_dst_ring_size;
2775 	uint32_t entries;
2776 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
2777 
2778 	if (qdf_atomic_read(&soc->cmn_init_done))
2779 		return 0;
2780 
2781 	if (dp_hw_link_desc_pool_setup(soc))
2782 		goto fail1;
2783 
2784 	soc_cfg_ctx = soc->wlan_cfg_ctx;
2785 
2786 	dp_enable_verbose_debug(soc);
2787 
2788 	/* Setup SRNG rings */
2789 	/* Common rings */
2790 	entries = wlan_cfg_get_dp_soc_wbm_release_ring_size(soc_cfg_ctx);
2791 
2792 	if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
2793 			  entries, 0)) {
2794 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2795 			FL("dp_srng_setup failed for wbm_desc_rel_ring"));
2796 		goto fail1;
2797 	}
2798 
2799 	qdf_minidump_log(soc->wbm_desc_rel_ring.base_vaddr_unaligned,
2800 			 soc->wbm_desc_rel_ring.alloc_size,
2801 			 "wbm_desc_rel_ring");
2802 
2803 	soc->num_tcl_data_rings = 0;
2804 	/* Tx data rings */
2805 	if (!wlan_cfg_per_pdev_tx_ring(soc_cfg_ctx)) {
2806 		soc->num_tcl_data_rings =
2807 			wlan_cfg_num_tcl_data_rings(soc_cfg_ctx);
2808 		tx_comp_ring_size =
2809 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
2810 		tx_ring_size =
2811 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
2812 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
2813 			if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
2814 					  TCL_DATA, i, 0, tx_ring_size, 0)) {
2815 				QDF_TRACE(QDF_MODULE_ID_DP,
2816 					QDF_TRACE_LEVEL_ERROR,
2817 					FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
2818 				goto fail1;
2819 			}
2820 
2821 			/* Disable cached desc if NSS offload is enabled */
2822 			cached = WLAN_CFG_DST_RING_CACHED_DESC;
2823 			if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
2824 				cached = 0;
2825 			/*
2826 			 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
2827 			 * count
2828 			 */
2829 			if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
2830 					  WBM2SW_RELEASE, i, 0,
2831 					  tx_comp_ring_size,
2832 					  cached)) {
2833 				QDF_TRACE(QDF_MODULE_ID_DP,
2834 					QDF_TRACE_LEVEL_ERROR,
2835 					FL("dp_srng_setup failed for tx_comp_ring[%d]"), i);
2836 				goto fail1;
2837 			}
2838 		}
2839 	} else {
2840 		/* This will be incremented during per pdev ring setup */
2841 		soc->num_tcl_data_rings = 0;
2842 	}
2843 
2844 	if (dp_tx_soc_attach(soc)) {
2845 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2846 				FL("dp_tx_soc_attach failed"));
2847 		goto fail1;
2848 	}
2849 
2850 	entries = wlan_cfg_get_dp_soc_tcl_cmd_ring_size(soc_cfg_ctx);
2851 	/* TCL command and status rings */
2852 	if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
2853 			  entries, 0)) {
2854 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2855 			FL("dp_srng_setup failed for tcl_cmd_ring"));
2856 		goto fail1;
2857 	}
2858 
2859 	entries = wlan_cfg_get_dp_soc_tcl_status_ring_size(soc_cfg_ctx);
2860 	if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
2861 			  entries, 0)) {
2862 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2863 			FL("dp_srng_setup failed for tcl_status_ring"));
2864 		goto fail1;
2865 	}
2866 
2867 	reo_dst_ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
2868 
2869 	/* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
2870 	 * descriptors
2871 	 */
2872 
2873 	/* Rx data rings */
2874 	if (!wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
2875 		soc->num_reo_dest_rings =
2876 			wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
2877 		QDF_TRACE(QDF_MODULE_ID_DP,
2878 			QDF_TRACE_LEVEL_INFO,
2879 			FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
2880 
2881 		/* Disable cached desc if NSS offload is enabled */
2882 		cached = WLAN_CFG_DST_RING_CACHED_DESC;
2883 		if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx))
2884 			cached = 0;
2885 
2886 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
2887 			if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
2888 					  i, 0, reo_dst_ring_size, cached)) {
2889 				QDF_TRACE(QDF_MODULE_ID_DP,
2890 					  QDF_TRACE_LEVEL_ERROR,
2891 					  FL(RNG_ERR "reo_dest_ring [%d]"), i);
2892 				goto fail1;
2893 			}
2894 		}
2895 	} else {
2896 		/* This will be incremented during per pdev ring setup */
2897 		soc->num_reo_dest_rings = 0;
2898 	}
2899 
2900 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
2901 	/* LMAC RxDMA to SW Rings configuration */
2902 	if (!wlan_cfg_per_pdev_lmac_ring(soc_cfg_ctx)) {
2903 		/* Only valid for MCL */
2904 		struct dp_pdev *pdev = soc->pdev_list[0];
2905 
2906 		for (i = 0; i < MAX_RX_MAC_RINGS; i++) {
2907 			if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[i],
2908 					  RXDMA_DST, 0, i, entries, 0)) {
2909 				QDF_TRACE(QDF_MODULE_ID_DP,
2910 					  QDF_TRACE_LEVEL_ERROR,
2911 					  FL(RNG_ERR "rxdma_err_dst_ring"));
2912 				goto fail1;
2913 			}
2914 		}
2915 	}
2916 	/* TBD: call dp_rx_init to setup Rx SW descriptors */
2917 
2918 	/* REO reinjection ring */
2919 	entries = wlan_cfg_get_dp_soc_reo_reinject_ring_size(soc_cfg_ctx);
2920 	if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
2921 			  entries, 0)) {
2922 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2923 			  FL("dp_srng_setup failed for reo_reinject_ring"));
2924 		goto fail1;
2925 	}
2926 
2927 
2928 	/* Rx release ring */
2929 	if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
2930 			  wlan_cfg_get_dp_soc_rx_release_ring_size(soc_cfg_ctx),
2931 			  0)) {
2932 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2933 			  FL("dp_srng_setup failed for rx_rel_ring"));
2934 		goto fail1;
2935 	}
2936 
2937 
2938 	/* Rx exception ring */
2939 	entries = wlan_cfg_get_dp_soc_reo_exception_ring_size(soc_cfg_ctx);
2940 	if (dp_srng_setup(soc, &soc->reo_exception_ring,
2941 			  REO_EXCEPTION, 0, MAX_REO_DEST_RINGS, entries, 0)) {
2942 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2943 			  FL("dp_srng_setup failed for reo_exception_ring"));
2944 		goto fail1;
2945 	}
2946 
2947 
2948 	/* REO command and status rings */
2949 	if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
2950 			  wlan_cfg_get_dp_soc_reo_cmd_ring_size(soc_cfg_ctx),
2951 			  0)) {
2952 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2953 			FL("dp_srng_setup failed for reo_cmd_ring"));
2954 		goto fail1;
2955 	}
2956 
2957 	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
2958 	TAILQ_INIT(&soc->rx.reo_cmd_list);
2959 	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
2960 
2961 	if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
2962 			  wlan_cfg_get_dp_soc_reo_status_ring_size(soc_cfg_ctx),
2963 			  0)) {
2964 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
2965 			FL("dp_srng_setup failed for reo_status_ring"));
2966 		goto fail1;
2967 	}
2968 
2969 	/*
2970 	 * Skip registering hw ring interrupts for WMAC2 on IPQ6018
2971 	 * WMAC2 is not there in IPQ6018 platform.
2972 	 */
2973 	if (hal_get_target_type(soc->hal_soc) == TARGET_TYPE_QCA6018) {
2974 		dp_soc_disable_mac2_intr_mask(soc);
2975 	}
2976 
2977 	/* Reset the cpu ring map if radio is NSS offloaded */
2978 	if (wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx)) {
2979 		dp_soc_reset_cpu_ring_map(soc);
2980 		dp_soc_reset_intr_mask(soc);
2981 	}
2982 
2983 	/* Setup HW REO */
2984 	qdf_mem_zero(&reo_params, sizeof(reo_params));
2985 
2986 	if (wlan_cfg_is_rx_hash_enabled(soc_cfg_ctx)) {
2987 
2988 		/*
2989 		 * Reo ring remap is not required if both radios
2990 		 * are offloaded to NSS
2991 		 */
2992 		if (!dp_reo_remap_config(soc,
2993 					&reo_params.remap1,
2994 					&reo_params.remap2))
2995 			goto out;
2996 
2997 		reo_params.rx_hash_enabled = true;
2998 	}
2999 
3000 	/* setup the global rx defrag waitlist */
3001 	TAILQ_INIT(&soc->rx.defrag.waitlist);
3002 	soc->rx.defrag.timeout_ms =
3003 		wlan_cfg_get_rx_defrag_min_timeout(soc_cfg_ctx);
3004 	soc->rx.defrag.next_flush_ms = 0;
3005 	soc->rx.flags.defrag_timeout_check =
3006 		wlan_cfg_get_defrag_timeout_check(soc_cfg_ctx);
3007 	qdf_spinlock_create(&soc->rx.defrag.defrag_lock);
3008 
3009 out:
3010 	/*
3011 	 * set the fragment destination ring
3012 	 */
3013 	dp_reo_frag_dst_set(soc, &reo_params.frag_dst_ring);
3014 
3015 	hal_reo_setup(soc->hal_soc, &reo_params);
3016 
3017 	qdf_atomic_set(&soc->cmn_init_done, 1);
3018 
3019 	dp_soc_wds_attach(soc);
3020 
3021 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
3022 	return 0;
3023 fail1:
3024 	/*
3025 	 * Cleanup will be done as part of soc_detach, which will
3026 	 * be called on pdev attach failure
3027 	 */
3028 	return QDF_STATUS_E_FAILURE;
3029 }
3030 
3031 /*
3032  * dp_soc_cmn_cleanup() - Common SoC level De-initializion
3033  *
3034  * @soc: Datapath SOC handle
3035  *
3036  * This function is responsible for cleaning up DP resource of Soc
3037  * initialled in dp_pdev_attach_wifi3-->dp_soc_cmn_setup, since
3038  * dp_soc_detach_wifi3 could not identify some of them
3039  * whether they have done initialized or not accurately.
3040  *
3041  */
3042 static void dp_soc_cmn_cleanup(struct dp_soc *soc)
3043 {
3044 	qdf_spinlock_destroy(&soc->rx.defrag.defrag_lock);
3045 
3046 	dp_reo_cmdlist_destroy(soc);
3047 	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
3048 }
3049 
3050 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force);
3051 
3052 static QDF_STATUS dp_lro_hash_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3053 {
3054 	struct cdp_lro_hash_config lro_hash;
3055 	QDF_STATUS status;
3056 
3057 	if (!wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) &&
3058 	    !wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx) &&
3059 	    !wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
3060 		dp_err("LRO, GRO and RX hash disabled");
3061 		return QDF_STATUS_E_FAILURE;
3062 	}
3063 
3064 	qdf_mem_zero(&lro_hash, sizeof(lro_hash));
3065 
3066 	if (wlan_cfg_is_lro_enabled(soc->wlan_cfg_ctx) ||
3067 	    wlan_cfg_is_gro_enabled(soc->wlan_cfg_ctx)) {
3068 		lro_hash.lro_enable = 1;
3069 		lro_hash.tcp_flag = QDF_TCPHDR_ACK;
3070 		lro_hash.tcp_flag_mask = QDF_TCPHDR_FIN | QDF_TCPHDR_SYN |
3071 			 QDF_TCPHDR_RST | QDF_TCPHDR_ACK | QDF_TCPHDR_URG |
3072 			 QDF_TCPHDR_ECE | QDF_TCPHDR_CWR;
3073 	}
3074 
3075 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv4,
3076 		 (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
3077 		 LRO_IPV4_SEED_ARR_SZ));
3078 	qdf_get_random_bytes(lro_hash.toeplitz_hash_ipv6,
3079 		 (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
3080 		 LRO_IPV6_SEED_ARR_SZ));
3081 
3082 	qdf_assert(soc->cdp_soc.ol_ops->lro_hash_config);
3083 
3084 	if (!soc->cdp_soc.ol_ops->lro_hash_config) {
3085 		QDF_BUG(0);
3086 		dp_err("lro_hash_config not configured");
3087 		return QDF_STATUS_E_FAILURE;
3088 	}
3089 
3090 	status = soc->cdp_soc.ol_ops->lro_hash_config(pdev->ctrl_pdev,
3091 						      &lro_hash);
3092 	if (!QDF_IS_STATUS_SUCCESS(status)) {
3093 		dp_err("failed to send lro_hash_config to FW %u", status);
3094 		return status;
3095 	}
3096 
3097 	dp_info("LRO CMD config: lro_enable: 0x%x tcp_flag 0x%x tcp_flag_mask 0x%x",
3098 		lro_hash.lro_enable, lro_hash.tcp_flag,
3099 		lro_hash.tcp_flag_mask);
3100 
3101 	dp_info("toeplitz_hash_ipv4:");
3102 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3103 			   lro_hash.toeplitz_hash_ipv4,
3104 			   (sizeof(lro_hash.toeplitz_hash_ipv4[0]) *
3105 			   LRO_IPV4_SEED_ARR_SZ));
3106 
3107 	dp_info("toeplitz_hash_ipv6:");
3108 	qdf_trace_hex_dump(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
3109 			   lro_hash.toeplitz_hash_ipv6,
3110 			   (sizeof(lro_hash.toeplitz_hash_ipv6[0]) *
3111 			   LRO_IPV6_SEED_ARR_SZ));
3112 
3113 	return status;
3114 }
3115 
3116 /*
3117 * dp_rxdma_ring_setup() - configure the RX DMA rings
3118 * @soc: data path SoC handle
3119 * @pdev: Physical device handle
3120 *
3121 * Return: 0 - success, > 0 - failure
3122 */
3123 #ifdef QCA_HOST2FW_RXBUF_RING
3124 static int dp_rxdma_ring_setup(struct dp_soc *soc,
3125 	 struct dp_pdev *pdev)
3126 {
3127 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3128 	int max_mac_rings;
3129 	int i;
3130 	int ring_size;
3131 
3132 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3133 	max_mac_rings = wlan_cfg_get_num_mac_rings(pdev_cfg_ctx);
3134 	ring_size =  wlan_cfg_get_rx_dma_buf_ring_size(pdev_cfg_ctx);
3135 
3136 	for (i = 0; i < max_mac_rings; i++) {
3137 		dp_verbose_debug("pdev_id %d mac_id %d", pdev->pdev_id, i);
3138 		if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
3139 				  RXDMA_BUF, 1, i, ring_size, 0)) {
3140 			QDF_TRACE(QDF_MODULE_ID_DP,
3141 				 QDF_TRACE_LEVEL_ERROR,
3142 				 FL("failed rx mac ring setup"));
3143 			return QDF_STATUS_E_FAILURE;
3144 		}
3145 	}
3146 	return QDF_STATUS_SUCCESS;
3147 }
3148 #else
3149 static int dp_rxdma_ring_setup(struct dp_soc *soc,
3150 	 struct dp_pdev *pdev)
3151 {
3152 	return QDF_STATUS_SUCCESS;
3153 }
3154 #endif
3155 
3156 /**
3157  * dp_dscp_tid_map_setup(): Initialize the dscp-tid maps
3158  * @pdev - DP_PDEV handle
3159  *
3160  * Return: void
3161  */
3162 static inline void
3163 dp_dscp_tid_map_setup(struct dp_pdev *pdev)
3164 {
3165 	uint8_t map_id;
3166 	struct dp_soc *soc = pdev->soc;
3167 
3168 	if (!soc)
3169 		return;
3170 
3171 	for (map_id = 0; map_id < DP_MAX_TID_MAPS; map_id++) {
3172 		qdf_mem_copy(pdev->dscp_tid_map[map_id],
3173 			     default_dscp_tid_map,
3174 			     sizeof(default_dscp_tid_map));
3175 	}
3176 
3177 	for (map_id = 0; map_id < soc->num_hw_dscp_tid_map; map_id++) {
3178 		hal_tx_set_dscp_tid_map(soc->hal_soc,
3179 					default_dscp_tid_map,
3180 					map_id);
3181 	}
3182 }
3183 
3184 /**
3185  * dp_pcp_tid_map_setup(): Initialize the pcp-tid maps
3186  * @pdev - DP_PDEV handle
3187  *
3188  * Return: void
3189  */
3190 static inline void
3191 dp_pcp_tid_map_setup(struct dp_pdev *pdev)
3192 {
3193 	struct dp_soc *soc = pdev->soc;
3194 
3195 	if (!soc)
3196 		return;
3197 
3198 	qdf_mem_copy(soc->pcp_tid_map, default_pcp_tid_map,
3199 		     sizeof(default_pcp_tid_map));
3200 	hal_tx_set_pcp_tid_map_default(soc->hal_soc, default_pcp_tid_map);
3201 }
3202 
3203 #ifdef IPA_OFFLOAD
3204 /**
3205  * dp_setup_ipa_rx_refill_buf_ring - Setup second Rx refill buffer ring
3206  * @soc: data path instance
3207  * @pdev: core txrx pdev context
3208  *
3209  * Return: QDF_STATUS_SUCCESS: success
3210  *         QDF_STATUS_E_RESOURCES: Error return
3211  */
3212 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3213 					   struct dp_pdev *pdev)
3214 {
3215 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3216 	int entries;
3217 
3218 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3219 	entries = wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc_cfg_ctx);
3220 
3221 	/* Setup second Rx refill buffer ring */
3222 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
3223 			  IPA_RX_REFILL_BUF_RING_IDX, pdev->pdev_id, entries, 0)
3224 	   ) {
3225 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3226 			FL("dp_srng_setup failed second rx refill ring"));
3227 		return QDF_STATUS_E_FAILURE;
3228 	}
3229 	return QDF_STATUS_SUCCESS;
3230 }
3231 
3232 /**
3233  * dp_cleanup_ipa_rx_refill_buf_ring - Cleanup second Rx refill buffer ring
3234  * @soc: data path instance
3235  * @pdev: core txrx pdev context
3236  *
3237  * Return: void
3238  */
3239 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3240 					      struct dp_pdev *pdev)
3241 {
3242 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring2, RXDMA_BUF,
3243 			IPA_RX_REFILL_BUF_RING_IDX);
3244 }
3245 
3246 #else
3247 static int dp_setup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3248 					   struct dp_pdev *pdev)
3249 {
3250 	return QDF_STATUS_SUCCESS;
3251 }
3252 
3253 static void dp_cleanup_ipa_rx_refill_buf_ring(struct dp_soc *soc,
3254 					      struct dp_pdev *pdev)
3255 {
3256 }
3257 #endif
3258 
3259 #if !defined(DISABLE_MON_CONFIG)
3260 /**
3261  * dp_mon_rings_setup() - Initialize Monitor rings based on target
3262  * @soc: soc handle
3263  * @pdev: physical device handle
3264  *
3265  * Return: nonzero on failure and zero on success
3266  */
3267 static
3268 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3269 {
3270 	int mac_id = 0;
3271 	int pdev_id = pdev->pdev_id;
3272 	int entries;
3273 	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
3274 
3275 	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
3276 
3277 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3278 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
3279 
3280 		if (soc->wlan_cfg_ctx->rxdma1_enable) {
3281 			entries =
3282 			   wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
3283 			if (dp_srng_setup(soc,
3284 					  &pdev->rxdma_mon_buf_ring[mac_id],
3285 					  RXDMA_MONITOR_BUF, 0, mac_for_pdev,
3286 					  entries, 0)) {
3287 				QDF_TRACE(QDF_MODULE_ID_DP,
3288 					  QDF_TRACE_LEVEL_ERROR,
3289 					  FL(RNG_ERR "rxdma_mon_buf_ring "));
3290 				return QDF_STATUS_E_NOMEM;
3291 			}
3292 
3293 			entries =
3294 			   wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
3295 			if (dp_srng_setup(soc,
3296 					  &pdev->rxdma_mon_dst_ring[mac_id],
3297 					  RXDMA_MONITOR_DST, 0, mac_for_pdev,
3298 					  entries, 0)) {
3299 				QDF_TRACE(QDF_MODULE_ID_DP,
3300 					  QDF_TRACE_LEVEL_ERROR,
3301 					  FL(RNG_ERR "rxdma_mon_dst_ring"));
3302 				return QDF_STATUS_E_NOMEM;
3303 			}
3304 
3305 			entries =
3306 			    wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
3307 			if (dp_srng_setup(soc,
3308 					  &pdev->rxdma_mon_status_ring[mac_id],
3309 					  RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
3310 					  entries, 0)) {
3311 				QDF_TRACE(QDF_MODULE_ID_DP,
3312 					  QDF_TRACE_LEVEL_ERROR,
3313 					  FL(RNG_ERR "rxdma_mon_status_ring"));
3314 				return QDF_STATUS_E_NOMEM;
3315 			}
3316 
3317 			entries =
3318 			   wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
3319 			if (dp_srng_setup(soc,
3320 					  &pdev->rxdma_mon_desc_ring[mac_id],
3321 					  RXDMA_MONITOR_DESC, 0, mac_for_pdev,
3322 					  entries, 0)) {
3323 				QDF_TRACE(QDF_MODULE_ID_DP,
3324 					  QDF_TRACE_LEVEL_ERROR,
3325 					  FL(RNG_ERR "rxdma_mon_desc_ring"));
3326 				return QDF_STATUS_E_NOMEM;
3327 			}
3328 		} else {
3329 			entries =
3330 			   wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
3331 			if (dp_srng_setup(soc,
3332 					  &pdev->rxdma_mon_status_ring[mac_id],
3333 					  RXDMA_MONITOR_STATUS, 0, mac_for_pdev,
3334 					  entries, 0)) {
3335 				QDF_TRACE(QDF_MODULE_ID_DP,
3336 					  QDF_TRACE_LEVEL_ERROR,
3337 					  FL(RNG_ERR "rxdma_mon_status_ring"));
3338 				return QDF_STATUS_E_NOMEM;
3339 			}
3340 		}
3341 	}
3342 
3343 	return QDF_STATUS_SUCCESS;
3344 }
3345 #else
3346 static
3347 QDF_STATUS dp_mon_rings_setup(struct dp_soc *soc, struct dp_pdev *pdev)
3348 {
3349 	return QDF_STATUS_SUCCESS;
3350 }
3351 #endif
3352 
3353 /*dp_iterate_update_peer_list - update peer stats on cal client timer
3354  * @pdev_hdl: pdev handle
3355  */
3356 #ifdef ATH_SUPPORT_EXT_STAT
3357 void  dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
3358 {
3359 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
3360 	struct dp_soc *soc = pdev->soc;
3361 	struct dp_vdev *vdev = NULL;
3362 	struct dp_peer *peer = NULL;
3363 
3364 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
3365 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
3366 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
3367 		DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
3368 			dp_cal_client_update_peer_stats(&peer->stats);
3369 		}
3370 	}
3371 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3372 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
3373 }
3374 #else
3375 void  dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
3376 {
3377 }
3378 #endif
3379 
3380 /*
3381  * dp_htt_ppdu_stats_attach() - attach resources for HTT PPDU stats processing
3382  * @pdev: Datapath PDEV handle
3383  *
3384  * Return: QDF_STATUS_SUCCESS: Success
3385  *         QDF_STATUS_E_NOMEM: Error
3386  */
3387 static QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev)
3388 {
3389 	pdev->ppdu_tlv_buf = qdf_mem_malloc(HTT_T2H_MAX_MSG_SIZE);
3390 
3391 	if (!pdev->ppdu_tlv_buf) {
3392 		QDF_TRACE_ERROR(QDF_MODULE_ID_DP, "ppdu_tlv_buf alloc fail");
3393 		return QDF_STATUS_E_NOMEM;
3394 	}
3395 
3396 	return QDF_STATUS_SUCCESS;
3397 }
3398 
3399 /*
3400 * dp_pdev_attach_wifi3() - attach txrx pdev
3401 * @ctrl_pdev: Opaque PDEV object
3402 * @txrx_soc: Datapath SOC handle
3403 * @htc_handle: HTC handle for host-target interface
3404 * @qdf_osdev: QDF OS device
3405 * @pdev_id: PDEV ID
3406 *
3407 * Return: DP PDEV handle on success, NULL on failure
3408 */
3409 static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
3410 	struct cdp_ctrl_objmgr_pdev *ctrl_pdev,
3411 	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, uint8_t pdev_id)
3412 {
3413 	int ring_size;
3414 	int entries;
3415 	struct wlan_cfg_dp_soc_ctxt *soc_cfg_ctx;
3416 	int nss_cfg;
3417 	void *sojourn_buf;
3418 
3419 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
3420 	struct dp_pdev *pdev = NULL;
3421 
3422 	if (dp_is_soc_reinit(soc)) {
3423 		pdev = soc->pdev_list[pdev_id];
3424 	} else {
3425 		pdev = qdf_mem_malloc(sizeof(*pdev));
3426 		qdf_minidump_log(pdev, sizeof(*pdev), "dp_pdev");
3427 	}
3428 
3429 	if (!pdev) {
3430 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3431 			FL("DP PDEV memory allocation failed"));
3432 		goto fail0;
3433 	}
3434 
3435 	/*
3436 	 * Variable to prevent double pdev deinitialization during
3437 	 * radio detach execution .i.e. in the absence of any vdev.
3438 	 */
3439 	pdev->pdev_deinit = 0;
3440 	pdev->invalid_peer = qdf_mem_malloc(sizeof(struct dp_peer));
3441 
3442 	if (!pdev->invalid_peer) {
3443 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3444 			  FL("Invalid peer memory allocation failed"));
3445 		qdf_mem_free(pdev);
3446 		goto fail0;
3447 	}
3448 
3449 	soc_cfg_ctx = soc->wlan_cfg_ctx;
3450 	pdev->wlan_cfg_ctx = wlan_cfg_pdev_attach(soc->ctrl_psoc);
3451 
3452 	if (!pdev->wlan_cfg_ctx) {
3453 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3454 			FL("pdev cfg_attach failed"));
3455 
3456 		qdf_mem_free(pdev->invalid_peer);
3457 		qdf_mem_free(pdev);
3458 		goto fail0;
3459 	}
3460 
3461 	/*
3462 	 * set nss pdev config based on soc config
3463 	 */
3464 	nss_cfg = wlan_cfg_get_dp_soc_nss_cfg(soc_cfg_ctx);
3465 	wlan_cfg_set_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx,
3466 			(nss_cfg & (1 << pdev_id)));
3467 
3468 	pdev->soc = soc;
3469 	pdev->ctrl_pdev = ctrl_pdev;
3470 	pdev->pdev_id = pdev_id;
3471 	soc->pdev_list[pdev_id] = pdev;
3472 
3473 	pdev->lmac_id = wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, pdev_id);
3474 	soc->pdev_count++;
3475 
3476 	TAILQ_INIT(&pdev->vdev_list);
3477 	qdf_spinlock_create(&pdev->vdev_list_lock);
3478 	pdev->vdev_count = 0;
3479 
3480 	qdf_spinlock_create(&pdev->tx_mutex);
3481 	qdf_spinlock_create(&pdev->neighbour_peer_mutex);
3482 	TAILQ_INIT(&pdev->neighbour_peers_list);
3483 	pdev->neighbour_peers_added = false;
3484 	pdev->monitor_configured = false;
3485 
3486 	if (dp_soc_cmn_setup(soc)) {
3487 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3488 			FL("dp_soc_cmn_setup failed"));
3489 		goto fail1;
3490 	}
3491 
3492 	/* Setup per PDEV TCL rings if configured */
3493 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3494 		ring_size =
3495 			wlan_cfg_tx_ring_size(soc_cfg_ctx);
3496 
3497 		if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
3498 				  pdev_id, pdev_id, ring_size, 0)) {
3499 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3500 				FL("dp_srng_setup failed for tcl_data_ring"));
3501 			goto fail1;
3502 		}
3503 
3504 		ring_size =
3505 			wlan_cfg_tx_comp_ring_size(soc_cfg_ctx);
3506 
3507 		if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
3508 				  WBM2SW_RELEASE, pdev_id, pdev_id,
3509 				  ring_size, 0)) {
3510 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3511 				FL("dp_srng_setup failed for tx_comp_ring"));
3512 			goto fail1;
3513 		}
3514 		soc->num_tcl_data_rings++;
3515 	}
3516 
3517 	/* Tx specific init */
3518 	if (dp_tx_pdev_attach(pdev)) {
3519 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3520 			FL("dp_tx_pdev_attach failed"));
3521 		goto fail1;
3522 	}
3523 
3524 	ring_size = wlan_cfg_get_reo_dst_ring_size(soc->wlan_cfg_ctx);
3525 	/* Setup per PDEV REO rings if configured */
3526 	if (wlan_cfg_per_pdev_rx_ring(soc_cfg_ctx)) {
3527 		if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
3528 				  pdev_id, pdev_id, ring_size, 0)) {
3529 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3530 				FL("dp_srng_setup failed for reo_dest_ringn"));
3531 			goto fail1;
3532 		}
3533 		soc->num_reo_dest_rings++;
3534 	}
3535 
3536 	ring_size =
3537 		wlan_cfg_get_dp_soc_rxdma_refill_ring_size(soc->wlan_cfg_ctx);
3538 
3539 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
3540 			  ring_size, 0)) {
3541 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3542 			 FL("dp_srng_setup failed rx refill ring"));
3543 		goto fail1;
3544 	}
3545 
3546 	if (dp_rxdma_ring_setup(soc, pdev)) {
3547 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3548 			 FL("RXDMA ring config failed"));
3549 		goto fail1;
3550 	}
3551 
3552 	if (dp_mon_rings_setup(soc, pdev)) {
3553 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3554 			  FL("MONITOR rings setup failed"));
3555 		goto fail1;
3556 	}
3557 
3558 	entries = wlan_cfg_get_dp_soc_rxdma_err_dst_ring_size(soc_cfg_ctx);
3559 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx)) {
3560 		if (dp_srng_setup(soc, &pdev->rxdma_err_dst_ring[0], RXDMA_DST,
3561 				  0, pdev_id, entries, 0)) {
3562 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3563 				  FL(RNG_ERR "rxdma_err_dst_ring"));
3564 			goto fail1;
3565 		}
3566 	}
3567 
3568 	if (dp_setup_ipa_rx_refill_buf_ring(soc, pdev))
3569 		goto fail1;
3570 
3571 	if (dp_ipa_ring_resource_setup(soc, pdev))
3572 		goto fail1;
3573 
3574 	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
3575 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3576 			FL("dp_ipa_uc_attach failed"));
3577 		goto fail1;
3578 	}
3579 
3580 	/* Rx specific init */
3581 	if (dp_rx_pdev_attach(pdev)) {
3582 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
3583 			  FL("dp_rx_pdev_attach failed"));
3584 		goto fail2;
3585 	}
3586 
3587 	DP_STATS_INIT(pdev);
3588 
3589 	/* Monitor filter init */
3590 	pdev->mon_filter_mode = MON_FILTER_ALL;
3591 	pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
3592 	pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
3593 	pdev->fp_data_filter = FILTER_DATA_ALL;
3594 	pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
3595 	pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
3596 	pdev->mo_data_filter = FILTER_DATA_ALL;
3597 
3598 	dp_local_peer_id_pool_init(pdev);
3599 
3600 	dp_dscp_tid_map_setup(pdev);
3601 	dp_pcp_tid_map_setup(pdev);
3602 
3603 	/* Rx monitor mode specific init */
3604 	if (dp_rx_pdev_mon_attach(pdev)) {
3605 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3606 				"dp_rx_pdev_mon_attach failed");
3607 		goto fail2;
3608 	}
3609 
3610 	if (dp_wdi_event_attach(pdev)) {
3611 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3612 				"dp_wdi_evet_attach failed");
3613 		goto wdi_attach_fail;
3614 	}
3615 
3616 	/* set the reo destination during initialization */
3617 	pdev->reo_dest = pdev->pdev_id + 1;
3618 
3619 	/*
3620 	 * initialize ppdu tlv list
3621 	 */
3622 	TAILQ_INIT(&pdev->ppdu_info_list);
3623 	pdev->tlv_count = 0;
3624 	pdev->list_depth = 0;
3625 
3626 	qdf_mem_zero(&pdev->sojourn_stats, sizeof(struct cdp_tx_sojourn_stats));
3627 
3628 	pdev->sojourn_buf = qdf_nbuf_alloc(pdev->soc->osdev,
3629 			      sizeof(struct cdp_tx_sojourn_stats), 0, 4,
3630 			      TRUE);
3631 
3632 	if (pdev->sojourn_buf) {
3633 		sojourn_buf = qdf_nbuf_data(pdev->sojourn_buf);
3634 		qdf_mem_zero(sojourn_buf, sizeof(struct cdp_tx_sojourn_stats));
3635 	}
3636 	/* initlialize cal client timer */
3637 	dp_cal_client_attach(&pdev->cal_client_ctx,
3638 			     dp_pdev_to_cdp_pdev(pdev),
3639 			     pdev->soc->osdev,
3640 			     &dp_iterate_update_peer_list);
3641 	qdf_event_create(&pdev->fw_peer_stats_event);
3642 
3643 	pdev->num_tx_allowed = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
3644 
3645 	dp_init_tso_stats(pdev);
3646 
3647 	if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS)
3648 		goto fail1;
3649 
3650 	dp_tx_ppdu_stats_attach(pdev);
3651 
3652 	return (struct cdp_pdev *)pdev;
3653 
3654 wdi_attach_fail:
3655 	/*
3656 	 * dp_mon_link_desc_pool_cleanup is done in dp_pdev_detach
3657 	 * and hence need not to be done here.
3658 	 */
3659 	dp_rx_pdev_mon_detach(pdev);
3660 
3661 fail2:
3662 	dp_rx_pdev_detach(pdev);
3663 
3664 fail1:
3665 	if (pdev->invalid_peer)
3666 		qdf_mem_free(pdev->invalid_peer);
3667 	dp_pdev_detach((struct cdp_pdev *)pdev, 0);
3668 
3669 fail0:
3670 	return NULL;
3671 }
3672 
3673 /*
3674 * dp_rxdma_ring_cleanup() - configure the RX DMA rings
3675 * @soc: data path SoC handle
3676 * @pdev: Physical device handle
3677 *
3678 * Return: void
3679 */
3680 #ifdef QCA_HOST2FW_RXBUF_RING
3681 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3682 	 struct dp_pdev *pdev)
3683 {
3684 	int i;
3685 
3686 	for (i = 0; i < MAX_RX_MAC_RINGS; i++)
3687 		dp_srng_cleanup(soc, &pdev->rx_mac_buf_ring[i],
3688 			 RXDMA_BUF, 1);
3689 
3690 	qdf_timer_free(&soc->mon_reap_timer);
3691 }
3692 #else
3693 static void dp_rxdma_ring_cleanup(struct dp_soc *soc,
3694 	 struct dp_pdev *pdev)
3695 {
3696 }
3697 #endif
3698 
3699 /*
3700  * dp_neighbour_peers_detach() - Detach neighbour peers(nac clients)
3701  * @pdev: device object
3702  *
3703  * Return: void
3704  */
3705 static void dp_neighbour_peers_detach(struct dp_pdev *pdev)
3706 {
3707 	struct dp_neighbour_peer *peer = NULL;
3708 	struct dp_neighbour_peer *temp_peer = NULL;
3709 
3710 	TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
3711 			neighbour_peer_list_elem, temp_peer) {
3712 		/* delete this peer from the list */
3713 		TAILQ_REMOVE(&pdev->neighbour_peers_list,
3714 				peer, neighbour_peer_list_elem);
3715 		qdf_mem_free(peer);
3716 	}
3717 
3718 	qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
3719 }
3720 
3721 /**
3722 * dp_htt_ppdu_stats_detach() - detach stats resources
3723 * @pdev: Datapath PDEV handle
3724 *
3725 * Return: void
3726 */
3727 static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
3728 {
3729 	struct ppdu_info *ppdu_info, *ppdu_info_next;
3730 
3731 	TAILQ_FOREACH_SAFE(ppdu_info, &pdev->ppdu_info_list,
3732 			ppdu_info_list_elem, ppdu_info_next) {
3733 		if (!ppdu_info)
3734 			break;
3735 		qdf_assert_always(ppdu_info->nbuf);
3736 		qdf_nbuf_free(ppdu_info->nbuf);
3737 		qdf_mem_free(ppdu_info);
3738 	}
3739 
3740 	if (pdev->ppdu_tlv_buf)
3741 		qdf_mem_free(pdev->ppdu_tlv_buf);
3742 
3743 }
3744 
3745 #if !defined(DISABLE_MON_CONFIG)
3746 
3747 static
3748 void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
3749 			 int mac_id)
3750 {
3751 		if (soc->wlan_cfg_ctx->rxdma1_enable) {
3752 			dp_srng_cleanup(soc,
3753 					&pdev->rxdma_mon_buf_ring[mac_id],
3754 					RXDMA_MONITOR_BUF, 0);
3755 
3756 			dp_srng_cleanup(soc,
3757 					&pdev->rxdma_mon_dst_ring[mac_id],
3758 					RXDMA_MONITOR_DST, 0);
3759 
3760 			dp_srng_cleanup(soc,
3761 					&pdev->rxdma_mon_status_ring[mac_id],
3762 					RXDMA_MONITOR_STATUS, 0);
3763 
3764 			dp_srng_cleanup(soc,
3765 					&pdev->rxdma_mon_desc_ring[mac_id],
3766 					RXDMA_MONITOR_DESC, 0);
3767 
3768 			dp_srng_cleanup(soc,
3769 					&pdev->rxdma_err_dst_ring[mac_id],
3770 					RXDMA_DST, 0);
3771 		} else {
3772 			dp_srng_cleanup(soc,
3773 					&pdev->rxdma_mon_status_ring[mac_id],
3774 					RXDMA_MONITOR_STATUS, 0);
3775 
3776 			dp_srng_cleanup(soc,
3777 					&pdev->rxdma_err_dst_ring[mac_id],
3778 					RXDMA_DST, 0);
3779 		}
3780 
3781 }
3782 #else
3783 static void dp_mon_ring_cleanup(struct dp_soc *soc, struct dp_pdev *pdev,
3784 				int mac_id)
3785 {
3786 }
3787 #endif
3788 
3789 /**
3790  * dp_mon_ring_deinit() - Placeholder to deinitialize Monitor rings
3791  *
3792  * @soc: soc handle
3793  * @pdev: datapath physical dev handle
3794  * @mac_id: mac number
3795  *
3796  * Return: None
3797  */
3798 static void dp_mon_ring_deinit(struct dp_soc *soc, struct dp_pdev *pdev,
3799 			       int mac_id)
3800 {
3801 }
3802 
3803 /**
3804  * dp_pdev_mem_reset() - Reset txrx pdev memory
3805  * @pdev: dp pdev handle
3806  *
3807  * Return: None
3808  */
3809 static void dp_pdev_mem_reset(struct dp_pdev *pdev)
3810 {
3811 	uint16_t len = 0;
3812 	uint8_t *dp_pdev_offset = (uint8_t *)pdev;
3813 
3814 	len = sizeof(struct dp_pdev) -
3815 		offsetof(struct dp_pdev, pdev_deinit) -
3816 		sizeof(pdev->pdev_deinit);
3817 	dp_pdev_offset = dp_pdev_offset +
3818 			 offsetof(struct dp_pdev, pdev_deinit) +
3819 			 sizeof(pdev->pdev_deinit);
3820 
3821 	qdf_mem_zero(dp_pdev_offset, len);
3822 }
3823 
3824 #ifdef WLAN_DP_PENDING_MEM_FLUSH
3825 /**
3826  * dp_pdev_flush_pending_vdevs() - Flush all delete pending vdevs in pdev
3827  * @pdev: Datapath PDEV handle
3828  *
3829  * This is the last chance to flush all pending dp vdevs/peers,
3830  * some peer/vdev leak case like Non-SSR + peer unmap missing
3831  * will be covered here.
3832  *
3833  * Return: None
3834  */
3835 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
3836 {
3837 	struct dp_vdev *vdev = NULL;
3838 
3839 	while (true) {
3840 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
3841 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
3842 			if (vdev->delete.pending)
3843 				break;
3844 		}
3845 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
3846 
3847 		/*
3848 		 * vdev will be freed when all peers get cleanup,
3849 		 * dp_delete_pending_vdev will remove vdev from vdev_list
3850 		 * in pdev.
3851 		 */
3852 		if (vdev)
3853 			dp_vdev_flush_peers((struct cdp_vdev *)vdev, 0);
3854 		else
3855 			break;
3856 	}
3857 }
3858 #else
3859 static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
3860 {
3861 }
3862 #endif
3863 
3864 /**
3865  * dp_pdev_deinit() - Deinit txrx pdev
3866  * @txrx_pdev: Datapath PDEV handle
3867  * @force: Force deinit
3868  *
3869  * Return: None
3870  */
3871 static void dp_pdev_deinit(struct cdp_pdev *txrx_pdev, int force)
3872 {
3873 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3874 	struct dp_soc *soc = pdev->soc;
3875 	qdf_nbuf_t curr_nbuf, next_nbuf;
3876 	int mac_id;
3877 
3878 	/*
3879 	 * Prevent double pdev deinitialization during radio detach
3880 	 * execution .i.e. in the absence of any vdev
3881 	 */
3882 	if (pdev->pdev_deinit)
3883 		return;
3884 
3885 	pdev->pdev_deinit = 1;
3886 
3887 	dp_wdi_event_detach(pdev);
3888 
3889 	dp_pdev_flush_pending_vdevs(pdev);
3890 
3891 	dp_tx_pdev_detach(pdev);
3892 
3893 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3894 		dp_srng_deinit(soc, &soc->tcl_data_ring[pdev->pdev_id],
3895 			       TCL_DATA, pdev->pdev_id);
3896 		dp_srng_deinit(soc, &soc->tx_comp_ring[pdev->pdev_id],
3897 			       WBM2SW_RELEASE, pdev->pdev_id);
3898 	}
3899 
3900 	dp_pktlogmod_exit(pdev);
3901 
3902 	dp_rx_fst_detach(soc, pdev);
3903 	dp_rx_pdev_detach(pdev);
3904 	dp_rx_pdev_mon_detach(pdev);
3905 	dp_neighbour_peers_detach(pdev);
3906 	qdf_spinlock_destroy(&pdev->tx_mutex);
3907 	qdf_spinlock_destroy(&pdev->vdev_list_lock);
3908 
3909 	dp_ipa_uc_detach(soc, pdev);
3910 
3911 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
3912 
3913 	/* Cleanup per PDEV REO rings if configured */
3914 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
3915 		dp_srng_deinit(soc, &soc->reo_dest_ring[pdev->pdev_id],
3916 			       REO_DST, pdev->pdev_id);
3917 	}
3918 
3919 	dp_srng_deinit(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
3920 
3921 	dp_rxdma_ring_cleanup(soc, pdev);
3922 
3923 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
3924 		dp_mon_ring_deinit(soc, pdev, mac_id);
3925 		dp_srng_deinit(soc, &pdev->rxdma_err_dst_ring[mac_id],
3926 			       RXDMA_DST, 0);
3927 	}
3928 
3929 	curr_nbuf = pdev->invalid_peer_head_msdu;
3930 	while (curr_nbuf) {
3931 		next_nbuf = qdf_nbuf_next(curr_nbuf);
3932 		qdf_nbuf_free(curr_nbuf);
3933 		curr_nbuf = next_nbuf;
3934 	}
3935 	pdev->invalid_peer_head_msdu = NULL;
3936 	pdev->invalid_peer_tail_msdu = NULL;
3937 
3938 	dp_htt_ppdu_stats_detach(pdev);
3939 
3940 	dp_tx_ppdu_stats_detach(pdev);
3941 
3942 	qdf_nbuf_free(pdev->sojourn_buf);
3943 	qdf_nbuf_queue_free(&pdev->rx_ppdu_buf_q);
3944 
3945 	dp_cal_client_detach(&pdev->cal_client_ctx);
3946 
3947 	soc->pdev_count--;
3948 
3949 	/* only do soc common cleanup when last pdev do detach */
3950 	if (!(soc->pdev_count))
3951 		dp_soc_cmn_cleanup(soc);
3952 
3953 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
3954 	if (pdev->invalid_peer)
3955 		qdf_mem_free(pdev->invalid_peer);
3956 	qdf_mem_free(pdev->dp_txrx_handle);
3957 	dp_pdev_mem_reset(pdev);
3958 }
3959 
3960 /**
3961  * dp_pdev_deinit_wifi3() - Deinit txrx pdev
3962  * @txrx_pdev: Datapath PDEV handle
3963  * @force: Force deinit
3964  *
3965  * Return: None
3966  */
3967 static void dp_pdev_deinit_wifi3(struct cdp_pdev *txrx_pdev, int force)
3968 {
3969 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3970 	struct dp_soc *soc = pdev->soc;
3971 
3972 	soc->dp_soc_reinit = TRUE;
3973 
3974 	dp_pdev_deinit(txrx_pdev, force);
3975 }
3976 
3977 /*
3978  * dp_pdev_detach() - Complete rest of pdev detach
3979  * @txrx_pdev: Datapath PDEV handle
3980  * @force: Force deinit
3981  *
3982  * Return: None
3983  */
3984 static void dp_pdev_detach(struct cdp_pdev *txrx_pdev, int force)
3985 {
3986 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
3987 	struct dp_soc *soc = pdev->soc;
3988 	struct rx_desc_pool *rx_desc_pool;
3989 	int mac_id, mac_for_pdev;
3990 
3991 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
3992 		dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
3993 				TCL_DATA, pdev->pdev_id);
3994 		dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
3995 				WBM2SW_RELEASE, pdev->pdev_id);
3996 	}
3997 
3998 	dp_mon_link_free(pdev);
3999 
4000 	/* Cleanup per PDEV REO rings if configured */
4001 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
4002 		dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
4003 				REO_DST, pdev->pdev_id);
4004 	}
4005 	dp_rxdma_ring_cleanup(soc, pdev);
4006 	wlan_cfg_pdev_detach(pdev->wlan_cfg_ctx);
4007 
4008 	dp_srng_cleanup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0);
4009 	dp_cleanup_ipa_rx_refill_buf_ring(soc, pdev);
4010 
4011 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4012 		dp_mon_ring_cleanup(soc, pdev, mac_id);
4013 		dp_srng_cleanup(soc, &pdev->rxdma_err_dst_ring[mac_id],
4014 				RXDMA_DST, 0);
4015 		if (dp_is_soc_reinit(soc)) {
4016 			mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
4017 							      pdev->pdev_id);
4018 			rx_desc_pool = &soc->rx_desc_status[mac_for_pdev];
4019 			dp_rx_desc_pool_free(soc, rx_desc_pool);
4020 			rx_desc_pool = &soc->rx_desc_mon[mac_for_pdev];
4021 			dp_rx_desc_pool_free(soc, rx_desc_pool);
4022 		}
4023 	}
4024 
4025 	if (dp_is_soc_reinit(soc)) {
4026 		rx_desc_pool = &soc->rx_desc_buf[pdev->pdev_id];
4027 		dp_rx_desc_pool_free(soc, rx_desc_pool);
4028 	}
4029 
4030 	soc->pdev_list[pdev->pdev_id] = NULL;
4031 	qdf_minidump_remove(pdev);
4032 	qdf_mem_free(pdev);
4033 }
4034 
4035 /*
4036  * dp_pdev_detach_wifi3() - detach txrx pdev
4037  * @txrx_pdev: Datapath PDEV handle
4038  * @force: Force detach
4039  *
4040  * Return: None
4041  */
4042 static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
4043 {
4044 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
4045 	struct dp_soc *soc = pdev->soc;
4046 
4047 	if (dp_is_soc_reinit(soc)) {
4048 		dp_pdev_detach(txrx_pdev, force);
4049 	} else {
4050 		dp_pdev_deinit(txrx_pdev, force);
4051 		dp_pdev_detach(txrx_pdev, force);
4052 	}
4053 }
4054 
4055 /*
4056  * dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
4057  * @soc: DP SOC handle
4058  */
4059 static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
4060 {
4061 	struct reo_desc_list_node *desc;
4062 	struct dp_rx_tid *rx_tid;
4063 
4064 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
4065 	while (qdf_list_remove_front(&soc->reo_desc_freelist,
4066 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) {
4067 		rx_tid = &desc->rx_tid;
4068 		qdf_mem_unmap_nbytes_single(soc->osdev,
4069 			rx_tid->hw_qdesc_paddr,
4070 			QDF_DMA_BIDIRECTIONAL,
4071 			rx_tid->hw_qdesc_alloc_size);
4072 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
4073 		qdf_mem_free(desc);
4074 	}
4075 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
4076 	qdf_list_destroy(&soc->reo_desc_freelist);
4077 	qdf_spinlock_destroy(&soc->reo_desc_freelist_lock);
4078 }
4079 
4080 /**
4081  * dp_soc_mem_reset() - Reset Dp Soc memory
4082  * @soc: DP handle
4083  *
4084  * Return: None
4085  */
4086 static void dp_soc_mem_reset(struct dp_soc *soc)
4087 {
4088 	uint16_t len = 0;
4089 	uint8_t *dp_soc_offset = (uint8_t *)soc;
4090 
4091 	len = sizeof(struct dp_soc) -
4092 		offsetof(struct dp_soc, dp_soc_reinit) -
4093 		sizeof(soc->dp_soc_reinit);
4094 	dp_soc_offset = dp_soc_offset +
4095 			offsetof(struct dp_soc, dp_soc_reinit) +
4096 			sizeof(soc->dp_soc_reinit);
4097 
4098 	qdf_mem_zero(dp_soc_offset, len);
4099 }
4100 
4101 /**
4102  * dp_soc_deinit() - Deinitialize txrx SOC
4103  * @txrx_soc: Opaque DP SOC handle
4104  *
4105  * Return: None
4106  */
4107 static void dp_soc_deinit(void *txrx_soc)
4108 {
4109 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4110 	int i;
4111 
4112 	qdf_atomic_set(&soc->cmn_init_done, 0);
4113 
4114 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4115 		if (soc->pdev_list[i])
4116 			dp_pdev_deinit((struct cdp_pdev *)
4117 					soc->pdev_list[i], 1);
4118 	}
4119 
4120 	qdf_flush_work(&soc->htt_stats.work);
4121 	qdf_disable_work(&soc->htt_stats.work);
4122 
4123 	/* Free pending htt stats messages */
4124 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
4125 
4126 	dp_peer_find_detach(soc);
4127 
4128 	/* Free the ring memories */
4129 	/* Common rings */
4130 	dp_srng_deinit(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
4131 
4132 	/* Tx data rings */
4133 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
4134 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
4135 			dp_srng_deinit(soc, &soc->tcl_data_ring[i],
4136 				       TCL_DATA, i);
4137 			dp_srng_deinit(soc, &soc->tx_comp_ring[i],
4138 				       WBM2SW_RELEASE, i);
4139 		}
4140 	}
4141 
4142 	/* TCL command and status rings */
4143 	dp_srng_deinit(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
4144 	dp_srng_deinit(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
4145 
4146 	/* Rx data rings */
4147 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
4148 		soc->num_reo_dest_rings =
4149 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
4150 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
4151 			/* TODO: Get number of rings and ring sizes
4152 			 * from wlan_cfg
4153 			 */
4154 			dp_srng_deinit(soc, &soc->reo_dest_ring[i],
4155 				       REO_DST, i);
4156 		}
4157 	}
4158 	/* REO reinjection ring */
4159 	dp_srng_deinit(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
4160 
4161 	/* Rx release ring */
4162 	dp_srng_deinit(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
4163 
4164 	/* Rx exception ring */
4165 	/* TODO: Better to store ring_type and ring_num in
4166 	 * dp_srng during setup
4167 	 */
4168 	dp_srng_deinit(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
4169 
4170 	/* REO command and status rings */
4171 	dp_srng_deinit(soc, &soc->reo_cmd_ring, REO_CMD, 0);
4172 	dp_srng_deinit(soc, &soc->reo_status_ring, REO_STATUS, 0);
4173 
4174 	dp_soc_wds_detach(soc);
4175 
4176 	qdf_spinlock_destroy(&soc->peer_ref_mutex);
4177 	qdf_spinlock_destroy(&soc->htt_stats.lock);
4178 
4179 	htt_soc_htc_dealloc(soc->htt_handle);
4180 
4181 	dp_reo_desc_freelist_destroy(soc);
4182 
4183 	qdf_spinlock_destroy(&soc->ast_lock);
4184 
4185 	dp_soc_mem_reset(soc);
4186 }
4187 
4188 /**
4189  * dp_soc_deinit_wifi3() - Deinitialize txrx SOC
4190  * @txrx_soc: Opaque DP SOC handle
4191  *
4192  * Return: None
4193  */
4194 static void dp_soc_deinit_wifi3(void *txrx_soc)
4195 {
4196 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4197 
4198 	soc->dp_soc_reinit = 1;
4199 	dp_soc_deinit(txrx_soc);
4200 }
4201 
4202 /*
4203  * dp_soc_detach() - Detach rest of txrx SOC
4204  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
4205  *
4206  * Return: None
4207  */
4208 static void dp_soc_detach(void *txrx_soc)
4209 {
4210 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4211 	int i;
4212 
4213 	qdf_atomic_set(&soc->cmn_init_done, 0);
4214 
4215 	/* TBD: Call Tx and Rx cleanup functions to free buffers and
4216 	 * SW descriptors
4217 	 */
4218 
4219 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4220 		if (soc->pdev_list[i])
4221 			dp_pdev_detach((struct cdp_pdev *)
4222 					     soc->pdev_list[i], 1);
4223 	}
4224 
4225 	/* Free the ring memories */
4226 	/* Common rings */
4227 	qdf_minidump_remove(soc->wbm_desc_rel_ring.base_vaddr_unaligned);
4228 	dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
4229 
4230 	dp_tx_soc_detach(soc);
4231 
4232 	/* Tx data rings */
4233 	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
4234 		for (i = 0; i < soc->num_tcl_data_rings; i++) {
4235 			dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
4236 				TCL_DATA, i);
4237 			dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
4238 				WBM2SW_RELEASE, i);
4239 		}
4240 	}
4241 
4242 	/* TCL command and status rings */
4243 	dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
4244 	dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
4245 
4246 	/* Rx data rings */
4247 	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
4248 		soc->num_reo_dest_rings =
4249 			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
4250 		for (i = 0; i < soc->num_reo_dest_rings; i++) {
4251 			/* TODO: Get number of rings and ring sizes
4252 			 * from wlan_cfg
4253 			 */
4254 			dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
4255 				REO_DST, i);
4256 		}
4257 	}
4258 	/* REO reinjection ring */
4259 	dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
4260 
4261 	/* Rx release ring */
4262 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
4263 	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3);
4264 
4265 	/* Rx exception ring */
4266 	/* TODO: Better to store ring_type and ring_num in
4267 	 * dp_srng during setup
4268 	 */
4269 	dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
4270 
4271 	/* REO command and status rings */
4272 	dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
4273 	dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
4274 	dp_hw_link_desc_pool_cleanup(soc);
4275 
4276 	htt_soc_detach(soc->htt_handle);
4277 	soc->dp_soc_reinit = 0;
4278 
4279 	wlan_cfg_soc_detach(soc->wlan_cfg_ctx);
4280 
4281 	qdf_minidump_remove(soc);
4282 	qdf_mem_free(soc);
4283 }
4284 
4285 /*
4286  * dp_soc_detach_wifi3() - Detach txrx SOC
4287  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
4288  *
4289  * Return: None
4290  */
4291 static void dp_soc_detach_wifi3(void *txrx_soc)
4292 {
4293 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
4294 
4295 	if (dp_is_soc_reinit(soc)) {
4296 		dp_soc_detach(txrx_soc);
4297 	} else {
4298 		dp_soc_deinit(txrx_soc);
4299 		dp_soc_detach(txrx_soc);
4300 	}
4301 
4302 }
4303 
4304 #if !defined(DISABLE_MON_CONFIG)
4305 /**
4306  * dp_mon_htt_srng_setup() - Prepare HTT messages for Monitor rings
4307  * @soc: soc handle
4308  * @pdev: physical device handle
4309  * @mac_id: ring number
4310  * @mac_for_pdev: mac_id
4311  *
4312  * Return: non-zero for failure, zero for success
4313  */
4314 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
4315 					struct dp_pdev *pdev,
4316 					int mac_id,
4317 					int mac_for_pdev)
4318 {
4319 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4320 
4321 	if (soc->wlan_cfg_ctx->rxdma1_enable) {
4322 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4323 					pdev->rxdma_mon_buf_ring[mac_id]
4324 					.hal_srng,
4325 					RXDMA_MONITOR_BUF);
4326 
4327 		if (status != QDF_STATUS_SUCCESS) {
4328 			dp_err("Failed to send htt srng setup message for Rxdma mon buf ring");
4329 			return status;
4330 		}
4331 
4332 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4333 					pdev->rxdma_mon_dst_ring[mac_id]
4334 					.hal_srng,
4335 					RXDMA_MONITOR_DST);
4336 
4337 		if (status != QDF_STATUS_SUCCESS) {
4338 			dp_err("Failed to send htt srng setup message for Rxdma mon dst ring");
4339 			return status;
4340 		}
4341 
4342 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4343 					pdev->rxdma_mon_status_ring[mac_id]
4344 					.hal_srng,
4345 					RXDMA_MONITOR_STATUS);
4346 
4347 		if (status != QDF_STATUS_SUCCESS) {
4348 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
4349 			return status;
4350 		}
4351 
4352 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4353 					pdev->rxdma_mon_desc_ring[mac_id]
4354 					.hal_srng,
4355 					RXDMA_MONITOR_DESC);
4356 
4357 		if (status != QDF_STATUS_SUCCESS) {
4358 			dp_err("Failed to send htt srng message for Rxdma mon desc ring");
4359 			return status;
4360 		}
4361 	} else {
4362 		status = htt_srng_setup(soc->htt_handle, mac_for_pdev,
4363 					pdev->rxdma_mon_status_ring[mac_id]
4364 					.hal_srng,
4365 					RXDMA_MONITOR_STATUS);
4366 
4367 		if (status != QDF_STATUS_SUCCESS) {
4368 			dp_err("Failed to send htt srng setup message for Rxdma mon status ring");
4369 			return status;
4370 		}
4371 	}
4372 
4373 	return status;
4374 
4375 }
4376 #else
4377 static QDF_STATUS dp_mon_htt_srng_setup(struct dp_soc *soc,
4378 					struct dp_pdev *pdev,
4379 					int mac_id,
4380 					int mac_for_pdev)
4381 {
4382 	return QDF_STATUS_SUCCESS;
4383 }
4384 #endif
4385 
4386 /*
4387  * dp_rxdma_ring_config() - configure the RX DMA rings
4388  *
4389  * This function is used to configure the MAC rings.
4390  * On MCL host provides buffers in Host2FW ring
4391  * FW refills (copies) buffers to the ring and updates
4392  * ring_idx in register
4393  *
4394  * @soc: data path SoC handle
4395  *
4396  * Return: zero on success, non-zero on failure
4397  */
4398 #ifdef QCA_HOST2FW_RXBUF_RING
4399 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
4400 {
4401 	int i;
4402 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4403 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4404 		struct dp_pdev *pdev = soc->pdev_list[i];
4405 
4406 		if (pdev) {
4407 			int mac_id;
4408 			bool dbs_enable = 0;
4409 			int max_mac_rings =
4410 				 wlan_cfg_get_num_mac_rings
4411 				(pdev->wlan_cfg_ctx);
4412 
4413 			htt_srng_setup(soc->htt_handle, 0,
4414 				 pdev->rx_refill_buf_ring.hal_srng,
4415 				 RXDMA_BUF);
4416 
4417 			if (pdev->rx_refill_buf_ring2.hal_srng)
4418 				htt_srng_setup(soc->htt_handle, 0,
4419 					pdev->rx_refill_buf_ring2.hal_srng,
4420 					RXDMA_BUF);
4421 
4422 			if (soc->cdp_soc.ol_ops->
4423 				is_hw_dbs_2x2_capable) {
4424 				dbs_enable = soc->cdp_soc.ol_ops->
4425 					is_hw_dbs_2x2_capable(
4426 							(void *)soc->ctrl_psoc);
4427 			}
4428 
4429 			if (dbs_enable) {
4430 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4431 				QDF_TRACE_LEVEL_ERROR,
4432 				FL("DBS enabled max_mac_rings %d"),
4433 					 max_mac_rings);
4434 			} else {
4435 				max_mac_rings = 1;
4436 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4437 					 QDF_TRACE_LEVEL_ERROR,
4438 					 FL("DBS disabled, max_mac_rings %d"),
4439 					 max_mac_rings);
4440 			}
4441 
4442 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4443 					 FL("pdev_id %d max_mac_rings %d"),
4444 					 pdev->pdev_id, max_mac_rings);
4445 
4446 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
4447 				int mac_for_pdev = dp_get_mac_id_for_pdev(
4448 							mac_id, pdev->pdev_id);
4449 
4450 				QDF_TRACE(QDF_MODULE_ID_TXRX,
4451 					 QDF_TRACE_LEVEL_ERROR,
4452 					 FL("mac_id %d"), mac_for_pdev);
4453 
4454 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
4455 					 pdev->rx_mac_buf_ring[mac_id]
4456 						.hal_srng,
4457 					 RXDMA_BUF);
4458 				htt_srng_setup(soc->htt_handle, mac_for_pdev,
4459 					pdev->rxdma_err_dst_ring[mac_id]
4460 						.hal_srng,
4461 					RXDMA_DST);
4462 
4463 				/* Configure monitor mode rings */
4464 				status = dp_mon_htt_srng_setup(soc, pdev,
4465 							       mac_id,
4466 							       mac_for_pdev);
4467 				if (status != QDF_STATUS_SUCCESS) {
4468 					dp_err("Failed to send htt monitor messages to target");
4469 					return status;
4470 				}
4471 
4472 			}
4473 		}
4474 	}
4475 
4476 	/*
4477 	 * Timer to reap rxdma status rings.
4478 	 * Needed until we enable ppdu end interrupts
4479 	 */
4480 	qdf_timer_init(soc->osdev, &soc->mon_reap_timer,
4481 			dp_service_mon_rings, (void *)soc,
4482 			QDF_TIMER_TYPE_WAKE_APPS);
4483 	soc->reap_timer_init = 1;
4484 	return status;
4485 }
4486 #else
4487 /* This is only for WIN */
4488 static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
4489 {
4490 	int i;
4491 	int mac_id;
4492 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4493 
4494 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4495 		struct dp_pdev *pdev = soc->pdev_list[i];
4496 
4497 		if (!pdev)
4498 			continue;
4499 
4500 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4501 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, i);
4502 
4503 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4504 				pdev->rx_refill_buf_ring.hal_srng, RXDMA_BUF);
4505 #ifndef DISABLE_MON_CONFIG
4506 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4507 				pdev->rxdma_mon_buf_ring[mac_id].hal_srng,
4508 				RXDMA_MONITOR_BUF);
4509 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4510 				pdev->rxdma_mon_dst_ring[mac_id].hal_srng,
4511 				RXDMA_MONITOR_DST);
4512 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4513 				pdev->rxdma_mon_status_ring[mac_id].hal_srng,
4514 				RXDMA_MONITOR_STATUS);
4515 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4516 				pdev->rxdma_mon_desc_ring[mac_id].hal_srng,
4517 				RXDMA_MONITOR_DESC);
4518 #endif
4519 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
4520 				pdev->rxdma_err_dst_ring[mac_id].hal_srng,
4521 				RXDMA_DST);
4522 		}
4523 	}
4524 	return status;
4525 }
4526 #endif
4527 
4528 #ifdef NO_RX_PKT_HDR_TLV
4529 static QDF_STATUS
4530 dp_rxdma_ring_sel_cfg(struct dp_soc *soc)
4531 {
4532 	int i;
4533 	int mac_id;
4534 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
4535 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4536 
4537 	htt_tlv_filter.mpdu_start = 1;
4538 	htt_tlv_filter.msdu_start = 1;
4539 	htt_tlv_filter.mpdu_end = 1;
4540 	htt_tlv_filter.msdu_end = 1;
4541 	htt_tlv_filter.attention = 1;
4542 	htt_tlv_filter.packet = 1;
4543 	htt_tlv_filter.packet_header = 0;
4544 
4545 	htt_tlv_filter.ppdu_start = 0;
4546 	htt_tlv_filter.ppdu_end = 0;
4547 	htt_tlv_filter.ppdu_end_user_stats = 0;
4548 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
4549 	htt_tlv_filter.ppdu_end_status_done = 0;
4550 	htt_tlv_filter.enable_fp = 1;
4551 	htt_tlv_filter.enable_md = 0;
4552 	htt_tlv_filter.enable_md = 0;
4553 	htt_tlv_filter.enable_mo = 0;
4554 
4555 	htt_tlv_filter.fp_mgmt_filter = 0;
4556 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_BA_REQ;
4557 	htt_tlv_filter.fp_data_filter = (FILTER_DATA_UCAST |
4558 					 FILTER_DATA_MCAST |
4559 					 FILTER_DATA_DATA);
4560 	htt_tlv_filter.mo_mgmt_filter = 0;
4561 	htt_tlv_filter.mo_ctrl_filter = 0;
4562 	htt_tlv_filter.mo_data_filter = 0;
4563 	htt_tlv_filter.md_data_filter = 0;
4564 
4565 	htt_tlv_filter.offset_valid = true;
4566 
4567 	htt_tlv_filter.rx_packet_offset = RX_PKT_TLVS_LEN;
4568 	/*Not subscribing rx_pkt_header*/
4569 	htt_tlv_filter.rx_header_offset = 0;
4570 	htt_tlv_filter.rx_mpdu_start_offset =
4571 				HAL_RX_PKT_TLV_MPDU_START_OFFSET(soc->hal_soc);
4572 	htt_tlv_filter.rx_mpdu_end_offset =
4573 				HAL_RX_PKT_TLV_MPDU_END_OFFSET(soc->hal_soc);
4574 	htt_tlv_filter.rx_msdu_start_offset =
4575 				HAL_RX_PKT_TLV_MSDU_START_OFFSET(soc->hal_soc);
4576 	htt_tlv_filter.rx_msdu_end_offset =
4577 				HAL_RX_PKT_TLV_MSDU_END_OFFSET(soc->hal_soc);
4578 	htt_tlv_filter.rx_attn_offset =
4579 				HAL_RX_PKT_TLV_ATTN_OFFSET(soc->hal_soc);
4580 
4581 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4582 		struct dp_pdev *pdev = soc->pdev_list[i];
4583 
4584 		if (!pdev)
4585 			continue;
4586 
4587 		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
4588 			int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
4589 					pdev->pdev_id);
4590 
4591 			htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
4592 					    pdev->rx_refill_buf_ring.hal_srng,
4593 					    RXDMA_BUF, RX_BUFFER_SIZE,
4594 					    &htt_tlv_filter);
4595 		}
4596 	}
4597 	return status;
4598 }
4599 #else
4600 static QDF_STATUS
4601 dp_rxdma_ring_sel_cfg(struct dp_soc *soc)
4602 {
4603 	return QDF_STATUS_SUCCESS;
4604 }
4605 #endif
4606 
4607 /*
4608  * dp_rx_target_fst_config() - configure the RXOLE Flow Search Engine
4609  *
4610  * This function is used to configure the FSE HW block in RX OLE on a
4611  * per pdev basis. Here, we will be programming parameters related to
4612  * the Flow Search Table.
4613  *
4614  * @soc: data path SoC handle
4615  *
4616  * Return: zero on success, non-zero on failure
4617  */
4618 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
4619 static QDF_STATUS
4620 dp_rx_target_fst_config(struct dp_soc *soc)
4621 {
4622 	int i;
4623 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4624 
4625 	for (i = 0; i < MAX_PDEV_CNT; i++) {
4626 		struct dp_pdev *pdev = soc->pdev_list[i];
4627 
4628 		/* Flow search is not enabled if NSS offload is enabled */
4629 		if (pdev &&
4630 		    !wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
4631 			status = dp_rx_flow_send_fst_fw_setup(pdev->soc, pdev);
4632 			if (status != QDF_STATUS_SUCCESS)
4633 				break;
4634 		}
4635 	}
4636 	return status;
4637 }
4638 #else
4639 /**
4640  * dp_rx_target_fst_config() - Configure RX OLE FSE engine in HW
4641  * @soc: SoC handle
4642  *
4643  * Return: Success
4644  */
4645 static inline QDF_STATUS
4646 dp_rx_target_fst_config(struct dp_soc *soc)
4647 {
4648 	return QDF_STATUS_SUCCESS;
4649 }
4650 
4651 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
4652 
4653 /*
4654  * dp_soc_attach_target_wifi3() - SOC initialization in the target
4655  * @cdp_soc: Opaque Datapath SOC handle
4656  *
4657  * Return: zero on success, non-zero on failure
4658  */
4659 static QDF_STATUS
4660 dp_soc_attach_target_wifi3(struct cdp_soc_t *cdp_soc)
4661 {
4662 	struct dp_soc *soc = (struct dp_soc *)cdp_soc;
4663 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4664 
4665 	htt_soc_attach_target(soc->htt_handle);
4666 
4667 	status = dp_rxdma_ring_config(soc);
4668 	if (status != QDF_STATUS_SUCCESS) {
4669 		dp_err("Failed to send htt srng setup messages to target");
4670 		return status;
4671 	}
4672 
4673 	status = dp_rxdma_ring_sel_cfg(soc);
4674 	if (status != QDF_STATUS_SUCCESS) {
4675 		dp_err("Failed to send htt ring config message to target");
4676 		return status;
4677 	}
4678 
4679 	status = dp_rx_target_fst_config(soc);
4680 	if (status != QDF_STATUS_SUCCESS) {
4681 		dp_err("Failed to send htt fst setup config message to target");
4682 		return status;
4683 	}
4684 
4685 	DP_STATS_INIT(soc);
4686 
4687 	/* initialize work queue for stats processing */
4688 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
4689 
4690 	qdf_minidump_log(soc, sizeof(*soc), "dp_soc");
4691 
4692 	return QDF_STATUS_SUCCESS;
4693 }
4694 
4695 /*
4696  * dp_soc_get_nss_cfg_wifi3() - SOC get nss config
4697  * @txrx_soc: Datapath SOC handle
4698  */
4699 static int dp_soc_get_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc)
4700 {
4701 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
4702 	return wlan_cfg_get_dp_soc_nss_cfg(dsoc->wlan_cfg_ctx);
4703 }
4704 
4705 /*
4706  * dp_soc_set_nss_cfg_wifi3() - SOC set nss config
4707  * @txrx_soc: Datapath SOC handle
4708  * @nss_cfg: nss config
4709  */
4710 static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
4711 {
4712 	struct dp_soc *dsoc = (struct dp_soc *)cdp_soc;
4713 	struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx = dsoc->wlan_cfg_ctx;
4714 
4715 	wlan_cfg_set_dp_soc_nss_cfg(wlan_cfg_ctx, config);
4716 
4717 	/*
4718 	 * TODO: masked out based on the per offloaded radio
4719 	 */
4720 	switch (config) {
4721 	case dp_nss_cfg_default:
4722 		break;
4723 	case dp_nss_cfg_first_radio:
4724 		/*
4725 		 * This configuration is valid for single band radio which
4726 		 * is also NSS offload.
4727 		 */
4728 	case dp_nss_cfg_dbdc:
4729 	case dp_nss_cfg_dbtc:
4730 		wlan_cfg_set_num_tx_desc_pool(wlan_cfg_ctx, 0);
4731 		wlan_cfg_set_num_tx_ext_desc_pool(wlan_cfg_ctx, 0);
4732 		wlan_cfg_set_num_tx_desc(wlan_cfg_ctx, 0);
4733 		wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
4734 		break;
4735 	default:
4736 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4737 			  "Invalid offload config %d", config);
4738 	}
4739 
4740 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4741 		  FL("nss-wifi<0> nss config is enabled"));
4742 }
4743 
4744 /*
4745 * dp_vdev_attach_wifi3() - attach txrx vdev
4746 * @txrx_pdev: Datapath PDEV handle
4747 * @vdev_mac_addr: MAC address of the virtual interface
4748 * @vdev_id: VDEV Id
4749 * @wlan_op_mode: VDEV operating mode
4750 * @subtype: VDEV operating subtype
4751 *
4752 * Return: DP VDEV handle on success, NULL on failure
4753 */
4754 static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
4755 	uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode,
4756 	enum wlan_op_subtype subtype)
4757 {
4758 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
4759 	struct dp_soc *soc = pdev->soc;
4760 	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
4761 
4762 	if (!vdev) {
4763 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
4764 			FL("DP VDEV memory allocation failed"));
4765 		goto fail0;
4766 	}
4767 
4768 	vdev->pdev = pdev;
4769 	vdev->vdev_id = vdev_id;
4770 	vdev->opmode = op_mode;
4771 	vdev->subtype = subtype;
4772 	vdev->osdev = soc->osdev;
4773 
4774 	vdev->osif_rx = NULL;
4775 	vdev->osif_rsim_rx_decap = NULL;
4776 	vdev->osif_get_key = NULL;
4777 	vdev->osif_rx_mon = NULL;
4778 	vdev->osif_tx_free_ext = NULL;
4779 	vdev->osif_vdev = NULL;
4780 
4781 	vdev->delete.pending = 0;
4782 	vdev->safemode = 0;
4783 	vdev->drop_unenc = 1;
4784 	vdev->sec_type = cdp_sec_type_none;
4785 #ifdef notyet
4786 	vdev->filters_num = 0;
4787 #endif
4788 
4789 	qdf_mem_copy(
4790 		&vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
4791 
4792 	/* TODO: Initialize default HTT meta data that will be used in
4793 	 * TCL descriptors for packets transmitted from this VDEV
4794 	 */
4795 
4796 	TAILQ_INIT(&vdev->peer_list);
4797 	dp_peer_multipass_list_init(vdev);
4798 
4799 	if ((soc->intr_mode == DP_INTR_POLL) &&
4800 	    wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx) != 0) {
4801 		if ((pdev->vdev_count == 0) ||
4802 		    (wlan_op_mode_monitor == vdev->opmode))
4803 			qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
4804 	}
4805 
4806 	soc->vdev_id_map[vdev_id] = vdev;
4807 
4808 	if (wlan_op_mode_monitor == vdev->opmode) {
4809 		pdev->monitor_vdev = vdev;
4810 		return (struct cdp_vdev *)vdev;
4811 	}
4812 
4813 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
4814 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
4815 	vdev->dscp_tid_map_id = 0;
4816 	vdev->mcast_enhancement_en = 0;
4817 	vdev->raw_mode_war = wlan_cfg_get_raw_mode_war(soc->wlan_cfg_ctx);
4818 	vdev->prev_tx_enq_tstamp = 0;
4819 	vdev->prev_rx_deliver_tstamp = 0;
4820 
4821 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
4822 	/* add this vdev into the pdev's list */
4823 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
4824 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4825 	pdev->vdev_count++;
4826 
4827 	if (wlan_op_mode_sta != vdev->opmode)
4828 		vdev->ap_bridge_enabled = true;
4829 	else
4830 		vdev->ap_bridge_enabled = false;
4831 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
4832 		  "%s: wlan_cfg_ap_bridge_enabled %d",
4833 		  __func__, vdev->ap_bridge_enabled);
4834 
4835 	dp_tx_vdev_attach(vdev);
4836 
4837 	if (pdev->vdev_count == 1)
4838 		dp_lro_hash_setup(soc, pdev);
4839 
4840 	dp_info("Created vdev %pK (%pM)", vdev, vdev->mac_addr.raw);
4841 	DP_STATS_INIT(vdev);
4842 
4843 	if (wlan_op_mode_sta == vdev->opmode)
4844 		dp_peer_create_wifi3((struct cdp_vdev *)vdev,
4845 							vdev->mac_addr.raw,
4846 							NULL);
4847 
4848 	return (struct cdp_vdev *)vdev;
4849 
4850 fail0:
4851 	return NULL;
4852 }
4853 
4854 /**
4855  * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
4856  * @vdev: Datapath VDEV handle
4857  * @osif_vdev: OSIF vdev handle
4858  * @ctrl_vdev: UMAC vdev handle
4859  * @txrx_ops: Tx and Rx operations
4860  *
4861  * Return: DP VDEV handle on success, NULL on failure
4862  */
4863 static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
4864 	void *osif_vdev, struct cdp_ctrl_objmgr_vdev *ctrl_vdev,
4865 	struct ol_txrx_ops *txrx_ops)
4866 {
4867 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4868 	vdev->osif_vdev = osif_vdev;
4869 	vdev->ctrl_vdev = ctrl_vdev;
4870 	vdev->osif_rx = txrx_ops->rx.rx;
4871 	vdev->osif_rx_stack = txrx_ops->rx.rx_stack;
4872 	vdev->osif_rx_flush = txrx_ops->rx.rx_flush;
4873 	vdev->osif_gro_flush = txrx_ops->rx.rx_gro_flush;
4874 	vdev->osif_rsim_rx_decap = txrx_ops->rx.rsim_rx_decap;
4875 	vdev->osif_get_key = txrx_ops->get_key;
4876 	vdev->osif_rx_mon = txrx_ops->rx.mon;
4877 	vdev->osif_tx_free_ext = txrx_ops->tx.tx_free_ext;
4878 	vdev->tx_comp = txrx_ops->tx.tx_comp;
4879 #ifdef notyet
4880 #if ATH_SUPPORT_WAPI
4881 	vdev->osif_check_wai = txrx_ops->rx.wai_check;
4882 #endif
4883 #endif
4884 #ifdef UMAC_SUPPORT_PROXY_ARP
4885 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
4886 #endif
4887 	vdev->me_convert = txrx_ops->me_convert;
4888 
4889 	/* TODO: Enable the following once Tx code is integrated */
4890 	if (vdev->mesh_vdev)
4891 		txrx_ops->tx.tx = dp_tx_send_mesh;
4892 	else
4893 		txrx_ops->tx.tx = dp_tx_send;
4894 
4895 	txrx_ops->tx.tx_exception = dp_tx_send_exception;
4896 
4897 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
4898 		"DP Vdev Register success");
4899 }
4900 
4901 /**
4902  * dp_peer_flush_ast_entry() - Forcibily flush all AST entry of peer
4903  * @soc: Datapath soc handle
4904  * @peer: Datapath peer handle
4905  * @peer_id: Peer ID
4906  * @vdev_id: Vdev ID
4907  *
4908  * Return: void
4909  */
4910 static void dp_peer_flush_ast_entry(struct dp_soc *soc,
4911 				    struct dp_peer *peer,
4912 				    uint16_t peer_id,
4913 				    uint8_t vdev_id)
4914 {
4915 	struct dp_ast_entry *ase, *tmp_ase;
4916 
4917 	if (soc->is_peer_map_unmap_v2) {
4918 		DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
4919 				dp_rx_peer_unmap_handler
4920 						(soc, peer_id,
4921 						 vdev_id,
4922 						 ase->mac_addr.raw,
4923 						 1);
4924 		}
4925 	}
4926 }
4927 
4928 /**
4929  * dp_vdev_flush_peers() - Forcibily Flush peers of vdev
4930  * @vdev: Datapath VDEV handle
4931  * @unmap_only: Flag to indicate "only unmap"
4932  *
4933  * Return: void
4934  */
4935 static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
4936 {
4937 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
4938 	struct dp_pdev *pdev = vdev->pdev;
4939 	struct dp_soc *soc = pdev->soc;
4940 	struct dp_peer *peer;
4941 	uint16_t *peer_ids;
4942 	struct dp_peer **peer_array = NULL;
4943 	uint8_t i = 0, j = 0;
4944 	uint8_t m = 0, n = 0;
4945 
4946 	peer_ids = qdf_mem_malloc(soc->max_peers * sizeof(peer_ids[0]));
4947 	if (!peer_ids) {
4948 		dp_err("DP alloc failure - unable to flush peers");
4949 		return;
4950 	}
4951 
4952 	if (!unmap_only) {
4953 		peer_array = qdf_mem_malloc(
4954 				soc->max_peers * sizeof(struct dp_peer *));
4955 		if (!peer_array) {
4956 			qdf_mem_free(peer_ids);
4957 			dp_err("DP alloc failure - unable to flush peers");
4958 			return;
4959 		}
4960 	}
4961 
4962 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
4963 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4964 		if (!unmap_only && n < soc->max_peers)
4965 			peer_array[n++] = peer;
4966 
4967 		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
4968 			if (peer->peer_ids[i] != HTT_INVALID_PEER)
4969 				if (j < soc->max_peers)
4970 					peer_ids[j++] = peer->peer_ids[i];
4971 	}
4972 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
4973 
4974 	/*
4975 	 * If peer id is invalid, need to flush the peer if
4976 	 * peer valid flag is true, this is needed for NAN + SSR case.
4977 	 */
4978 	if (!unmap_only) {
4979 		for (m = 0; m < n ; m++) {
4980 			peer = peer_array[m];
4981 
4982 			dp_info("peer: %pM is getting deleted",
4983 				peer->mac_addr.raw);
4984 			/* only if peer valid is true */
4985 			if (peer->valid)
4986 				dp_peer_delete_wifi3(peer, 0);
4987 		}
4988 		qdf_mem_free(peer_array);
4989 	}
4990 
4991 	for (i = 0; i < j ; i++) {
4992 		peer = __dp_peer_find_by_id(soc, peer_ids[i]);
4993 
4994 		if (!peer)
4995 			continue;
4996 
4997 		dp_info("peer: %pM is getting unmap",
4998 			peer->mac_addr.raw);
4999 		/* free AST entries of peer */
5000 		dp_peer_flush_ast_entry(soc, peer,
5001 					peer_ids[i],
5002 					vdev->vdev_id);
5003 
5004 		dp_rx_peer_unmap_handler(soc, peer_ids[i],
5005 					 vdev->vdev_id,
5006 					 peer->mac_addr.raw, 0);
5007 	}
5008 
5009 	qdf_mem_free(peer_ids);
5010 	dp_info("Flushed peers for vdev object %pK ", vdev);
5011 }
5012 
5013 /*
5014  * dp_vdev_detach_wifi3() - Detach txrx vdev
5015  * @txrx_vdev:		Datapath VDEV handle
5016  * @callback:		Callback OL_IF on completion of detach
5017  * @cb_context:	Callback context
5018  *
5019  */
5020 static void dp_vdev_detach_wifi3(struct cdp_vdev *vdev_handle,
5021 	ol_txrx_vdev_delete_cb callback, void *cb_context)
5022 {
5023 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5024 	struct dp_pdev *pdev;
5025 	struct dp_soc *soc;
5026 	struct dp_neighbour_peer *peer = NULL;
5027 	struct dp_neighbour_peer *temp_peer = NULL;
5028 
5029 	/* preconditions */
5030 	qdf_assert_always(vdev);
5031 	pdev = vdev->pdev;
5032 	soc = pdev->soc;
5033 
5034 	soc->vdev_id_map[vdev->vdev_id] = NULL;
5035 
5036 	if (wlan_op_mode_monitor == vdev->opmode)
5037 		goto free_vdev;
5038 
5039 	if (wlan_op_mode_sta == vdev->opmode)
5040 		dp_peer_delete_wifi3(vdev->vap_self_peer, 0);
5041 
5042 	/*
5043 	 * If Target is hung, flush all peers before detaching vdev
5044 	 * this will free all references held due to missing
5045 	 * unmap commands from Target
5046 	 */
5047 	if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
5048 		dp_vdev_flush_peers((struct cdp_vdev *)vdev, false);
5049 
5050 	/*
5051 	 * Use peer_ref_mutex while accessing peer_list, in case
5052 	 * a peer is in the process of being removed from the list.
5053 	 */
5054 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
5055 	/* check that the vdev has no peers allocated */
5056 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
5057 		/* debug print - will be removed later */
5058 		dp_warn("not deleting vdev object %pK (%pM) until deletion finishes for all its peers",
5059 			vdev, vdev->mac_addr.raw);
5060 		/* indicate that the vdev needs to be deleted */
5061 		vdev->delete.pending = 1;
5062 		vdev->delete.callback = callback;
5063 		vdev->delete.context = cb_context;
5064 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5065 		return;
5066 	}
5067 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5068 
5069 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5070 	if (!soc->hw_nac_monitor_support) {
5071 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
5072 			      neighbour_peer_list_elem) {
5073 			QDF_ASSERT(peer->vdev != vdev);
5074 		}
5075 	} else {
5076 		TAILQ_FOREACH_SAFE(peer, &pdev->neighbour_peers_list,
5077 				   neighbour_peer_list_elem, temp_peer) {
5078 			if (peer->vdev == vdev) {
5079 				TAILQ_REMOVE(&pdev->neighbour_peers_list, peer,
5080 					     neighbour_peer_list_elem);
5081 				qdf_mem_free(peer);
5082 			}
5083 		}
5084 	}
5085 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5086 
5087 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5088 	dp_tx_vdev_detach(vdev);
5089 	dp_rx_vdev_detach(vdev);
5090 	/* remove the vdev from its parent pdev's list */
5091 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
5092 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
5093 		FL("deleting vdev object %pK (%pM)"), vdev, vdev->mac_addr.raw);
5094 
5095 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5096 free_vdev:
5097 	if (wlan_op_mode_monitor == vdev->opmode)
5098 		pdev->monitor_vdev = NULL;
5099 
5100 	qdf_mem_free(vdev);
5101 
5102 	if (callback)
5103 		callback(cb_context);
5104 }
5105 
5106 #ifdef FEATURE_AST
5107 /*
5108  * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
5109  * @soc - datapath soc handle
5110  * @peer - datapath peer handle
5111  *
5112  * Delete the AST entries belonging to a peer
5113  */
5114 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
5115 					      struct dp_peer *peer)
5116 {
5117 	struct dp_ast_entry *ast_entry, *temp_ast_entry;
5118 
5119 	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
5120 		dp_peer_del_ast(soc, ast_entry);
5121 
5122 	peer->self_ast_entry = NULL;
5123 }
5124 #else
5125 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
5126 					      struct dp_peer *peer)
5127 {
5128 }
5129 #endif
5130 #if ATH_SUPPORT_WRAP
5131 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
5132 						uint8_t *peer_mac_addr)
5133 {
5134 	struct dp_peer *peer;
5135 
5136 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
5137 				      0, vdev->vdev_id);
5138 	if (!peer)
5139 		return NULL;
5140 
5141 	if (peer->bss_peer)
5142 		return peer;
5143 
5144 	dp_peer_unref_delete(peer);
5145 	return NULL;
5146 }
5147 #else
5148 static inline struct dp_peer *dp_peer_can_reuse(struct dp_vdev *vdev,
5149 						uint8_t *peer_mac_addr)
5150 {
5151 	struct dp_peer *peer;
5152 
5153 	peer = dp_peer_find_hash_find(vdev->pdev->soc, peer_mac_addr,
5154 				      0, vdev->vdev_id);
5155 	if (!peer)
5156 		return NULL;
5157 
5158 	if (peer->bss_peer && (peer->vdev->vdev_id == vdev->vdev_id))
5159 		return peer;
5160 
5161 	dp_peer_unref_delete(peer);
5162 	return NULL;
5163 }
5164 #endif
5165 
5166 #ifdef FEATURE_AST
5167 static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
5168 					       struct dp_pdev *pdev,
5169 					       uint8_t *peer_mac_addr)
5170 {
5171 	struct dp_ast_entry *ast_entry;
5172 
5173 	qdf_spin_lock_bh(&soc->ast_lock);
5174 	if (soc->ast_override_support)
5175 		ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, peer_mac_addr,
5176 							    pdev->pdev_id);
5177 	else
5178 		ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
5179 
5180 	if (ast_entry && ast_entry->next_hop && !ast_entry->delete_in_progress)
5181 		dp_peer_del_ast(soc, ast_entry);
5182 
5183 	qdf_spin_unlock_bh(&soc->ast_lock);
5184 }
5185 #endif
5186 
5187 #ifdef PEER_CACHE_RX_PKTS
5188 static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
5189 {
5190 	qdf_spinlock_create(&peer->bufq_info.bufq_lock);
5191 	peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
5192 	qdf_list_create(&peer->bufq_info.cached_bufq, DP_RX_CACHED_BUFQ_THRESH);
5193 }
5194 #else
5195 static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
5196 {
5197 }
5198 #endif
5199 
5200 /*
5201  * dp_peer_create_wifi3() - attach txrx peer
5202  * @txrx_vdev: Datapath VDEV handle
5203  * @peer_mac_addr: Peer MAC address
5204  *
5205  * Return: DP peeer handle on success, NULL on failure
5206  */
5207 static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
5208 		uint8_t *peer_mac_addr, struct cdp_ctrl_objmgr_peer *ctrl_peer)
5209 {
5210 	struct dp_peer *peer;
5211 	int i;
5212 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5213 	struct dp_pdev *pdev;
5214 	struct dp_soc *soc;
5215 	struct cdp_peer_cookie peer_cookie;
5216 	enum cdp_txrx_ast_entry_type ast_type = CDP_TXRX_AST_TYPE_STATIC;
5217 
5218 	/* preconditions */
5219 	qdf_assert(vdev);
5220 	qdf_assert(peer_mac_addr);
5221 
5222 	pdev = vdev->pdev;
5223 	soc = pdev->soc;
5224 
5225 	/*
5226 	 * If a peer entry with given MAC address already exists,
5227 	 * reuse the peer and reset the state of peer.
5228 	 */
5229 	peer = dp_peer_can_reuse(vdev, peer_mac_addr);
5230 
5231 	if (peer) {
5232 		qdf_atomic_init(&peer->is_default_route_set);
5233 		dp_peer_cleanup(vdev, peer, true);
5234 
5235 		qdf_spin_lock_bh(&soc->ast_lock);
5236 		dp_peer_delete_ast_entries(soc, peer);
5237 		peer->delete_in_progress = false;
5238 		qdf_spin_unlock_bh(&soc->ast_lock);
5239 
5240 		if ((vdev->opmode == wlan_op_mode_sta) &&
5241 		    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
5242 		     QDF_MAC_ADDR_SIZE)) {
5243 			ast_type = CDP_TXRX_AST_TYPE_SELF;
5244 		}
5245 		dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
5246 		/*
5247 		* Control path maintains a node count which is incremented
5248 		* for every new peer create command. Since new peer is not being
5249 		* created and earlier reference is reused here,
5250 		* peer_unref_delete event is sent to control path to
5251 		* increment the count back.
5252 		*/
5253 		if (soc->cdp_soc.ol_ops->peer_unref_delete) {
5254 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
5255 				peer->mac_addr.raw, vdev->mac_addr.raw,
5256 				vdev->opmode, peer->ctrl_peer, ctrl_peer);
5257 		}
5258 		peer->ctrl_peer = ctrl_peer;
5259 
5260 		dp_local_peer_id_alloc(pdev, peer);
5261 
5262 		qdf_spinlock_create(&peer->peer_info_lock);
5263 		dp_peer_rx_bufq_resources_init(peer);
5264 
5265 		DP_STATS_INIT(peer);
5266 		DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI);
5267 
5268 		return (void *)peer;
5269 	} else {
5270 		/*
5271 		 * When a STA roams from RPTR AP to ROOT AP and vice versa, we
5272 		 * need to remove the AST entry which was earlier added as a WDS
5273 		 * entry.
5274 		 * If an AST entry exists, but no peer entry exists with a given
5275 		 * MAC addresses, we could deduce it as a WDS entry
5276 		 */
5277 		dp_peer_ast_handle_roam_del(soc, pdev, peer_mac_addr);
5278 	}
5279 
5280 #ifdef notyet
5281 	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
5282 		soc->mempool_ol_ath_peer);
5283 #else
5284 	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
5285 #endif
5286 
5287 	if (!peer)
5288 		return NULL; /* failure */
5289 
5290 	qdf_mem_zero(peer, sizeof(struct dp_peer));
5291 
5292 	TAILQ_INIT(&peer->ast_entry_list);
5293 
5294 	/* store provided params */
5295 	peer->vdev = vdev;
5296 	peer->ctrl_peer = ctrl_peer;
5297 
5298 	if ((vdev->opmode == wlan_op_mode_sta) &&
5299 	    !qdf_mem_cmp(peer_mac_addr, &vdev->mac_addr.raw[0],
5300 			 QDF_MAC_ADDR_SIZE)) {
5301 		ast_type = CDP_TXRX_AST_TYPE_SELF;
5302 	}
5303 	dp_peer_add_ast(soc, peer, peer_mac_addr, ast_type, 0);
5304 	qdf_spinlock_create(&peer->peer_info_lock);
5305 
5306 	dp_peer_rx_bufq_resources_init(peer);
5307 
5308 	qdf_mem_copy(
5309 		&peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
5310 
5311 	/* initialize the peer_id */
5312 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
5313 		peer->peer_ids[i] = HTT_INVALID_PEER;
5314 
5315 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
5316 
5317 	qdf_atomic_init(&peer->ref_cnt);
5318 
5319 	/* keep one reference for attach */
5320 	qdf_atomic_inc(&peer->ref_cnt);
5321 
5322 	/* add this peer into the vdev's list */
5323 	if (wlan_op_mode_sta == vdev->opmode)
5324 		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
5325 	else
5326 		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
5327 
5328 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5329 
5330 	/* TODO: See if hash based search is required */
5331 	dp_peer_find_hash_add(soc, peer);
5332 
5333 	/* Initialize the peer state */
5334 	peer->state = OL_TXRX_PEER_STATE_DISC;
5335 
5336 	dp_info("vdev %pK created peer %pK (%pM) ref_cnt: %d",
5337 		vdev, peer, peer->mac_addr.raw,
5338 		qdf_atomic_read(&peer->ref_cnt));
5339 	/*
5340 	 * For every peer MAp message search and set if bss_peer
5341 	 */
5342 	if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
5343 			QDF_MAC_ADDR_SIZE) == 0 &&
5344 			(wlan_op_mode_sta != vdev->opmode)) {
5345 		dp_info("vdev bss_peer!!");
5346 		peer->bss_peer = 1;
5347 		vdev->vap_bss_peer = peer;
5348 	}
5349 
5350 	if (wlan_op_mode_sta == vdev->opmode &&
5351 	    qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
5352 			QDF_MAC_ADDR_SIZE) == 0) {
5353 		vdev->vap_self_peer = peer;
5354 	}
5355 
5356 	for (i = 0; i < DP_MAX_TIDS; i++)
5357 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
5358 
5359 	peer->valid = 1;
5360 	dp_local_peer_id_alloc(pdev, peer);
5361 	DP_STATS_INIT(peer);
5362 	DP_STATS_UPD(peer, rx.avg_rssi, INVALID_RSSI);
5363 
5364 	qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
5365 		     QDF_MAC_ADDR_SIZE);
5366 	peer_cookie.ctx = NULL;
5367 	peer_cookie.cookie = pdev->next_peer_cookie++;
5368 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
5369 	dp_wdi_event_handler(WDI_EVENT_PEER_CREATE, pdev->soc,
5370 			     (void *)&peer_cookie,
5371 			     peer->peer_ids[0], WDI_NO_VAL, pdev->pdev_id);
5372 #endif
5373 	if (soc->wlanstats_enabled) {
5374 		if (!peer_cookie.ctx) {
5375 			pdev->next_peer_cookie--;
5376 			qdf_err("Failed to initialize peer rate stats");
5377 		} else {
5378 			peer->wlanstats_ctx = (struct cdp_peer_rate_stats_ctx *)
5379 						peer_cookie.ctx;
5380 		}
5381 	}
5382 	return (void *)peer;
5383 }
5384 
5385 /*
5386  * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
5387  * @vdev: Datapath VDEV handle
5388  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5389  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5390  *
5391  * Return: None
5392  */
5393 static
5394 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
5395 				  enum cdp_host_reo_dest_ring *reo_dest,
5396 				  bool *hash_based)
5397 {
5398 	struct dp_soc *soc;
5399 	struct dp_pdev *pdev;
5400 
5401 	pdev = vdev->pdev;
5402 	soc = pdev->soc;
5403 	/*
5404 	 * hash based steering is disabled for Radios which are offloaded
5405 	 * to NSS
5406 	 */
5407 	if (!wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx))
5408 		*hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
5409 
5410 	/*
5411 	 * Below line of code will ensure the proper reo_dest ring is chosen
5412 	 * for cases where toeplitz hash cannot be generated (ex: non TCP/UDP)
5413 	 */
5414 	*reo_dest = pdev->reo_dest;
5415 }
5416 
5417 #ifdef IPA_OFFLOAD
5418 /**
5419  * dp_is_vdev_subtype_p2p() - Check if the subtype for vdev is P2P
5420  * @vdev: Virtual device
5421  *
5422  * Return: true if the vdev is of subtype P2P
5423  *	   false if the vdev is of any other subtype
5424  */
5425 static inline bool dp_is_vdev_subtype_p2p(struct dp_vdev *vdev)
5426 {
5427 	if (vdev->subtype == wlan_op_subtype_p2p_device ||
5428 	    vdev->subtype == wlan_op_subtype_p2p_cli ||
5429 	    vdev->subtype == wlan_op_subtype_p2p_go)
5430 		return true;
5431 
5432 	return false;
5433 }
5434 
5435 /*
5436  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
5437  * @vdev: Datapath VDEV handle
5438  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5439  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5440  *
5441  * If IPA is enabled in ini, for SAP mode, disable hash based
5442  * steering, use default reo_dst ring for RX. Use config values for other modes.
5443  * Return: None
5444  */
5445 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
5446 				       enum cdp_host_reo_dest_ring *reo_dest,
5447 				       bool *hash_based)
5448 {
5449 	struct dp_soc *soc;
5450 	struct dp_pdev *pdev;
5451 
5452 	pdev = vdev->pdev;
5453 	soc = pdev->soc;
5454 
5455 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
5456 
5457 	/* For P2P-GO interfaces we do not need to change the REO
5458 	 * configuration even if IPA config is enabled
5459 	 */
5460 	if (dp_is_vdev_subtype_p2p(vdev))
5461 		return;
5462 
5463 	/*
5464 	 * If IPA is enabled, disable hash-based flow steering and set
5465 	 * reo_dest_ring_4 as the REO ring to receive packets on.
5466 	 * IPA is configured to reap reo_dest_ring_4.
5467 	 *
5468 	 * Note - REO DST indexes are from 0 - 3, while cdp_host_reo_dest_ring
5469 	 * value enum value is from 1 - 4.
5470 	 * Hence, *reo_dest = IPA_REO_DEST_RING_IDX + 1
5471 	 */
5472 	if (wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx)) {
5473 		if (vdev->opmode == wlan_op_mode_ap) {
5474 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
5475 			*hash_based = 0;
5476 		} else if (vdev->opmode == wlan_op_mode_sta &&
5477 			   dp_ipa_is_mdm_platform()) {
5478 			*reo_dest = IPA_REO_DEST_RING_IDX + 1;
5479 		}
5480 	}
5481 }
5482 
5483 #else
5484 
5485 /*
5486  * dp_peer_setup_get_reo_hash() - get reo dest ring and hash values for a peer
5487  * @vdev: Datapath VDEV handle
5488  * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5489  * @hash_based: pointer to hash value (enabled/disabled) to be populated
5490  *
5491  * Use system config values for hash based steering.
5492  * Return: None
5493  */
5494 
5495 static void dp_peer_setup_get_reo_hash(struct dp_vdev *vdev,
5496 				       enum cdp_host_reo_dest_ring *reo_dest,
5497 				       bool *hash_based)
5498 {
5499 	dp_vdev_get_default_reo_hash(vdev, reo_dest, hash_based);
5500 }
5501 #endif /* IPA_OFFLOAD */
5502 
5503 /*
5504  * dp_peer_setup_wifi3() - initialize the peer
5505  * @vdev_hdl: virtual device object
5506  * @peer: Peer object
5507  *
5508  * Return: void
5509  */
5510 static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
5511 {
5512 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
5513 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5514 	struct dp_pdev *pdev;
5515 	struct dp_soc *soc;
5516 	bool hash_based = 0;
5517 	enum cdp_host_reo_dest_ring reo_dest;
5518 
5519 	/* preconditions */
5520 	qdf_assert(vdev);
5521 	qdf_assert(peer);
5522 
5523 	pdev = vdev->pdev;
5524 	soc = pdev->soc;
5525 
5526 	dp_peer_setup_get_reo_hash(vdev, &reo_dest, &hash_based);
5527 
5528 	dp_info("pdev: %d vdev :%d opmode:%u hash-based-steering:%d default-reo_dest:%u",
5529 		pdev->pdev_id, vdev->vdev_id,
5530 		vdev->opmode, hash_based, reo_dest);
5531 
5532 
5533 	/*
5534 	 * There are corner cases where the AD1 = AD2 = "VAPs address"
5535 	 * i.e both the devices have same MAC address. In these
5536 	 * cases we want such pkts to be processed in NULL Q handler
5537 	 * which is REO2TCL ring. for this reason we should
5538 	 * not setup reo_queues and default route for bss_peer.
5539 	 */
5540 	if (peer->bss_peer && vdev->opmode == wlan_op_mode_ap)
5541 		return;
5542 
5543 	if (soc->cdp_soc.ol_ops->peer_set_default_routing) {
5544 		/* TODO: Check the destination ring number to be passed to FW */
5545 		soc->cdp_soc.ol_ops->peer_set_default_routing(
5546 				pdev->ctrl_pdev, peer->mac_addr.raw,
5547 				peer->vdev->vdev_id, hash_based, reo_dest);
5548 	}
5549 
5550 	qdf_atomic_set(&peer->is_default_route_set, 1);
5551 
5552 	dp_peer_rx_init(pdev, peer);
5553 	dp_peer_tx_init(pdev, peer);
5554 
5555 	dp_peer_ppdu_delayed_ba_init(peer);
5556 
5557 	return;
5558 }
5559 
5560 /*
5561  * dp_cp_peer_del_resp_handler - Handle the peer delete response
5562  * @soc_hdl: Datapath SOC handle
5563  * @vdev_hdl: virtual device object
5564  * @mac_addr: Mac address of the peer
5565  *
5566  * Return: void
5567  */
5568 static void dp_cp_peer_del_resp_handler(struct cdp_soc_t *soc_hdl,
5569 					struct cdp_vdev *vdev_hdl,
5570 					uint8_t *mac_addr)
5571 {
5572 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
5573 	struct dp_ast_entry  *ast_entry = NULL;
5574 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
5575 	txrx_ast_free_cb cb = NULL;
5576 	void *cookie;
5577 
5578 	qdf_spin_lock_bh(&soc->ast_lock);
5579 
5580 	if (soc->ast_override_support)
5581 		ast_entry =
5582 			dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
5583 							vdev->pdev->pdev_id);
5584 	else
5585 		ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
5586 
5587 	/* in case of qwrap we have multiple BSS peers
5588 	 * with same mac address
5589 	 *
5590 	 * AST entry for this mac address will be created
5591 	 * only for one peer hence it will be NULL here
5592 	 */
5593 	if (!ast_entry || ast_entry->peer || !ast_entry->delete_in_progress) {
5594 		qdf_spin_unlock_bh(&soc->ast_lock);
5595 		return;
5596 	}
5597 
5598 	if (ast_entry->is_mapped)
5599 		soc->ast_table[ast_entry->ast_idx] = NULL;
5600 
5601 	DP_STATS_INC(soc, ast.deleted, 1);
5602 	dp_peer_ast_hash_remove(soc, ast_entry);
5603 
5604 	cb = ast_entry->callback;
5605 	cookie = ast_entry->cookie;
5606 	ast_entry->callback = NULL;
5607 	ast_entry->cookie = NULL;
5608 
5609 	soc->num_ast_entries--;
5610 	qdf_spin_unlock_bh(&soc->ast_lock);
5611 
5612 	if (cb) {
5613 		cb(soc->ctrl_psoc,
5614 		   dp_soc_to_cdp_soc(soc),
5615 		   cookie,
5616 		   CDP_TXRX_AST_DELETED);
5617 	}
5618 	qdf_mem_free(ast_entry);
5619 }
5620 
5621 /*
5622  * dp_set_vdev_tx_encap_type() - set the encap type of the vdev
5623  * @vdev_handle: virtual device object
5624  * @htt_pkt_type: type of pkt
5625  *
5626  * Return: void
5627  */
5628 static void dp_set_vdev_tx_encap_type(struct cdp_vdev *vdev_handle,
5629 	 enum htt_cmn_pkt_type val)
5630 {
5631 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5632 	vdev->tx_encap_type = val;
5633 }
5634 
5635 /*
5636  * dp_set_vdev_rx_decap_type() - set the decap type of the vdev
5637  * @vdev_handle: virtual device object
5638  * @htt_pkt_type: type of pkt
5639  *
5640  * Return: void
5641  */
5642 static void dp_set_vdev_rx_decap_type(struct cdp_vdev *vdev_handle,
5643 	 enum htt_cmn_pkt_type val)
5644 {
5645 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5646 	vdev->rx_decap_type = val;
5647 }
5648 
5649 /*
5650  * dp_set_ba_aging_timeout() - set ba aging timeout per AC
5651  * @txrx_soc: cdp soc handle
5652  * @ac: Access category
5653  * @value: timeout value in millisec
5654  *
5655  * Return: void
5656  */
5657 static void dp_set_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
5658 				    uint8_t ac, uint32_t value)
5659 {
5660 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5661 
5662 	hal_set_ba_aging_timeout(soc->hal_soc, ac, value);
5663 }
5664 
5665 /*
5666  * dp_get_ba_aging_timeout() - get ba aging timeout per AC
5667  * @txrx_soc: cdp soc handle
5668  * @ac: access category
5669  * @value: timeout value in millisec
5670  *
5671  * Return: void
5672  */
5673 static void dp_get_ba_aging_timeout(struct cdp_soc_t *txrx_soc,
5674 				    uint8_t ac, uint32_t *value)
5675 {
5676 	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
5677 
5678 	hal_get_ba_aging_timeout(soc->hal_soc, ac, value);
5679 }
5680 
5681 /*
5682  * dp_set_pdev_reo_dest() - set the reo destination ring for this pdev
5683  * @pdev_handle: physical device object
5684  * @val: reo destination ring index (1 - 4)
5685  *
5686  * Return: void
5687  */
5688 static void dp_set_pdev_reo_dest(struct cdp_pdev *pdev_handle,
5689 	 enum cdp_host_reo_dest_ring val)
5690 {
5691 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5692 
5693 	if (pdev)
5694 		pdev->reo_dest = val;
5695 }
5696 
5697 /*
5698  * dp_get_pdev_reo_dest() - get the reo destination for this pdev
5699  * @pdev_handle: physical device object
5700  *
5701  * Return: reo destination ring index
5702  */
5703 static enum cdp_host_reo_dest_ring
5704 dp_get_pdev_reo_dest(struct cdp_pdev *pdev_handle)
5705 {
5706 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5707 
5708 	if (pdev)
5709 		return pdev->reo_dest;
5710 	else
5711 		return cdp_host_reo_dest_ring_unknown;
5712 }
5713 
5714 /*
5715  * dp_set_filter_neighbour_peers() - set filter neighbour peers for smart mesh
5716  * @pdev_handle: device object
5717  * @val: value to be set
5718  *
5719  * Return: void
5720  */
5721 static int dp_set_filter_neighbour_peers(struct cdp_pdev *pdev_handle,
5722 	 uint32_t val)
5723 {
5724 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
5725 
5726 	/* Enable/Disable smart mesh filtering. This flag will be checked
5727 	 * during rx processing to check if packets are from NAC clients.
5728 	 */
5729 	pdev->filter_neighbour_peers = val;
5730 	return 0;
5731 }
5732 
5733 /*
5734  * dp_update_filter_neighbour_peers() - set neighbour peers(nac clients)
5735  * address for smart mesh filtering
5736  * @vdev_handle: virtual device object
5737  * @cmd: Add/Del command
5738  * @macaddr: nac client mac address
5739  *
5740  * Return: void
5741  */
5742 static int dp_update_filter_neighbour_peers(struct cdp_vdev *vdev_handle,
5743 					    uint32_t cmd, uint8_t *macaddr)
5744 {
5745 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
5746 	struct dp_pdev *pdev = vdev->pdev;
5747 	struct dp_neighbour_peer *peer = NULL;
5748 
5749 	if (!macaddr)
5750 		goto fail0;
5751 
5752 	/* Store address of NAC (neighbour peer) which will be checked
5753 	 * against TA of received packets.
5754 	 */
5755 	if (cmd == DP_NAC_PARAM_ADD) {
5756 		peer = (struct dp_neighbour_peer *) qdf_mem_malloc(
5757 				sizeof(*peer));
5758 
5759 		if (!peer) {
5760 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5761 				FL("DP neighbour peer node memory allocation failed"));
5762 			goto fail0;
5763 		}
5764 
5765 		qdf_mem_copy(&peer->neighbour_peers_macaddr.raw[0],
5766 			macaddr, QDF_MAC_ADDR_SIZE);
5767 		peer->vdev = vdev;
5768 
5769 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5770 
5771 		/* add this neighbour peer into the list */
5772 		TAILQ_INSERT_TAIL(&pdev->neighbour_peers_list, peer,
5773 				neighbour_peer_list_elem);
5774 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5775 
5776 		/* first neighbour */
5777 		if (!pdev->neighbour_peers_added) {
5778 			pdev->neighbour_peers_added = true;
5779 			dp_ppdu_ring_cfg(pdev);
5780 		}
5781 		return 1;
5782 
5783 	} else if (cmd == DP_NAC_PARAM_DEL) {
5784 		qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
5785 		TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
5786 				neighbour_peer_list_elem) {
5787 			if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
5788 				macaddr, QDF_MAC_ADDR_SIZE)) {
5789 				/* delete this peer from the list */
5790 				TAILQ_REMOVE(&pdev->neighbour_peers_list,
5791 					peer, neighbour_peer_list_elem);
5792 				qdf_mem_free(peer);
5793 				break;
5794 			}
5795 		}
5796 		/* last neighbour deleted */
5797 		if (TAILQ_EMPTY(&pdev->neighbour_peers_list)) {
5798 			pdev->neighbour_peers_added = false;
5799 			dp_ppdu_ring_cfg(pdev);
5800 		}
5801 
5802 		qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
5803 
5804 		if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
5805 		    !pdev->enhanced_stats_en)
5806 			dp_ppdu_ring_reset(pdev);
5807 		return 1;
5808 
5809 	}
5810 
5811 fail0:
5812 	return 0;
5813 }
5814 
5815 /*
5816  * dp_get_sec_type() - Get the security type
5817  * @peer:		Datapath peer handle
5818  * @sec_idx:    Security id (mcast, ucast)
5819  *
5820  * return sec_type: Security type
5821  */
5822 static int dp_get_sec_type(struct cdp_peer *peer, uint8_t sec_idx)
5823 {
5824 	struct dp_peer *dpeer = (struct dp_peer *)peer;
5825 
5826 	return dpeer->security[sec_idx].sec_type;
5827 }
5828 
5829 /*
5830  * dp_peer_authorize() - authorize txrx peer
5831  * @peer_handle:		Datapath peer handle
5832  * @authorize
5833  *
5834  */
5835 static void dp_peer_authorize(struct cdp_peer *peer_handle, uint32_t authorize)
5836 {
5837 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
5838 	struct dp_soc *soc;
5839 
5840 	if (peer) {
5841 		soc = peer->vdev->pdev->soc;
5842 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
5843 		peer->authorize = authorize ? 1 : 0;
5844 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
5845 	}
5846 }
5847 
5848 static void dp_reset_and_release_peer_mem(struct dp_soc *soc,
5849 					  struct dp_pdev *pdev,
5850 					  struct dp_peer *peer,
5851 					  struct dp_vdev *vdev)
5852 {
5853 	struct dp_peer *bss_peer = NULL;
5854 	uint8_t *m_addr = NULL;
5855 
5856 	if (!vdev) {
5857 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
5858 			  "vdev is NULL");
5859 	} else {
5860 		if (vdev->vap_bss_peer == peer)
5861 		    vdev->vap_bss_peer = NULL;
5862 		m_addr = peer->mac_addr.raw;
5863 		if (soc->cdp_soc.ol_ops->peer_unref_delete)
5864 			soc->cdp_soc.ol_ops->peer_unref_delete(pdev->ctrl_pdev,
5865 				m_addr, vdev->mac_addr.raw, vdev->opmode,
5866 				peer->ctrl_peer, NULL);
5867 
5868 		if (vdev && vdev->vap_bss_peer) {
5869 		    bss_peer = vdev->vap_bss_peer;
5870 		    DP_UPDATE_STATS(vdev, peer);
5871 		}
5872 	}
5873 	/*
5874 	 * Peer AST list hast to be empty here
5875 	 */
5876 	DP_AST_ASSERT(TAILQ_EMPTY(&peer->ast_entry_list));
5877 
5878 	qdf_mem_free(peer);
5879 }
5880 
5881 /**
5882  * dp_delete_pending_vdev() - check and process vdev delete
5883  * @pdev: DP specific pdev pointer
5884  * @vdev: DP specific vdev pointer
5885  * @vdev_id: vdev id corresponding to vdev
5886  *
5887  * This API does following:
5888  * 1) It releases tx flow pools buffers as vdev is
5889  *    going down and no peers are associated.
5890  * 2) It also detaches vdev before cleaning vdev (struct dp_vdev) memory
5891  */
5892 static void dp_delete_pending_vdev(struct dp_pdev *pdev, struct dp_vdev *vdev,
5893 				   uint8_t vdev_id)
5894 {
5895 	ol_txrx_vdev_delete_cb vdev_delete_cb = NULL;
5896 	void *vdev_delete_context = NULL;
5897 
5898 	vdev_delete_cb = vdev->delete.callback;
5899 	vdev_delete_context = vdev->delete.context;
5900 
5901 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
5902 		  FL("deleting vdev object %pK (%pM)- its last peer is done"),
5903 		  vdev, vdev->mac_addr.raw);
5904 	/* all peers are gone, go ahead and delete it */
5905 	dp_tx_flow_pool_unmap_handler(pdev, vdev_id,
5906 			FLOW_TYPE_VDEV, vdev_id);
5907 	dp_tx_vdev_detach(vdev);
5908 
5909 	pdev->soc->vdev_id_map[vdev_id] = NULL;
5910 
5911 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
5912 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
5913 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
5914 
5915 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
5916 		  FL("deleting vdev object %pK (%pM)"),
5917 		  vdev, vdev->mac_addr.raw);
5918 	qdf_mem_free(vdev);
5919 	vdev = NULL;
5920 
5921 	if (vdev_delete_cb)
5922 		vdev_delete_cb(vdev_delete_context);
5923 }
5924 
5925 /*
5926  * dp_peer_unref_delete() - unref and delete peer
5927  * @peer_handle:		Datapath peer handle
5928  *
5929  */
5930 void dp_peer_unref_delete(struct dp_peer *peer)
5931 {
5932 	struct dp_vdev *vdev = peer->vdev;
5933 	struct dp_pdev *pdev = vdev->pdev;
5934 	struct dp_soc *soc = pdev->soc;
5935 	struct dp_peer *tmppeer;
5936 	int found = 0;
5937 	uint16_t peer_id;
5938 	uint16_t vdev_id;
5939 	bool delete_vdev;
5940 	struct cdp_peer_cookie peer_cookie;
5941 
5942 	/*
5943 	 * Hold the lock all the way from checking if the peer ref count
5944 	 * is zero until the peer references are removed from the hash
5945 	 * table and vdev list (if the peer ref count is zero).
5946 	 * This protects against a new HL tx operation starting to use the
5947 	 * peer object just after this function concludes it's done being used.
5948 	 * Furthermore, the lock needs to be held while checking whether the
5949 	 * vdev's list of peers is empty, to make sure that list is not modified
5950 	 * concurrently with the empty check.
5951 	 */
5952 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
5953 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
5954 		peer_id = peer->peer_ids[0];
5955 		vdev_id = vdev->vdev_id;
5956 
5957 		/*
5958 		 * Make sure that the reference to the peer in
5959 		 * peer object map is removed
5960 		 */
5961 		if (peer_id != HTT_INVALID_PEER)
5962 			soc->peer_id_to_obj_map[peer_id] = NULL;
5963 
5964 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
5965 			  "Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
5966 
5967 		/* remove the reference to the peer from the hash table */
5968 		dp_peer_find_hash_remove(soc, peer);
5969 
5970 		qdf_spin_lock_bh(&soc->ast_lock);
5971 		if (peer->self_ast_entry) {
5972 			dp_peer_del_ast(soc, peer->self_ast_entry);
5973 			peer->self_ast_entry = NULL;
5974 		}
5975 		qdf_spin_unlock_bh(&soc->ast_lock);
5976 
5977 		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
5978 			if (tmppeer == peer) {
5979 				found = 1;
5980 				break;
5981 			}
5982 		}
5983 
5984 		if (found) {
5985 			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
5986 				peer_list_elem);
5987 		} else {
5988 			/*Ignoring the remove operation as peer not found*/
5989 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
5990 				  "peer:%pK not found in vdev:%pK peerlist:%pK",
5991 				  peer, vdev, &peer->vdev->peer_list);
5992 		}
5993 
5994 		/* send peer destroy event to upper layer */
5995 		qdf_mem_copy(peer_cookie.mac_addr, peer->mac_addr.raw,
5996 			     QDF_MAC_ADDR_SIZE);
5997 		peer_cookie.ctx = NULL;
5998 		peer_cookie.ctx = (struct cdp_stats_cookie *)
5999 					peer->wlanstats_ctx;
6000 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6001 		dp_wdi_event_handler(WDI_EVENT_PEER_DESTROY,
6002 				     pdev->soc,
6003 				     (void *)&peer_cookie,
6004 				     peer->peer_ids[0],
6005 				     WDI_NO_VAL,
6006 				     pdev->pdev_id);
6007 #endif
6008 		peer->wlanstats_ctx = NULL;
6009 
6010 		/* cleanup the peer data */
6011 		dp_peer_cleanup(vdev, peer, false);
6012 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6013 		dp_reset_and_release_peer_mem(soc, pdev, peer, vdev);
6014 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
6015 
6016 		/* check whether the parent vdev has no peers left */
6017 		if (TAILQ_EMPTY(&vdev->peer_list)) {
6018 			/*
6019 			 * capture vdev delete pending flag's status
6020 			 * while holding peer_ref_mutex lock
6021 			 */
6022 			delete_vdev = vdev->delete.pending;
6023 			/*
6024 			 * Now that there are no references to the peer, we can
6025 			 * release the peer reference lock.
6026 			 */
6027 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6028 			/*
6029 			 * Check if the parent vdev was waiting for its peers
6030 			 * to be deleted, in order for it to be deleted too.
6031 			 */
6032 			if (delete_vdev)
6033 				dp_delete_pending_vdev(pdev, vdev, vdev_id);
6034 		} else {
6035 			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6036 		}
6037 
6038 	} else {
6039 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6040 	}
6041 }
6042 
6043 #ifdef PEER_CACHE_RX_PKTS
6044 static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
6045 {
6046 	dp_rx_flush_rx_cached(peer, true);
6047 	qdf_list_destroy(&peer->bufq_info.cached_bufq);
6048 	qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
6049 }
6050 #else
6051 static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
6052 {
6053 }
6054 #endif
6055 
6056 /*
6057  * dp_peer_detach_wifi3() – Detach txrx peer
6058  * @peer_handle: Datapath peer handle
6059  * @bitmap: bitmap indicating special handling of request.
6060  *
6061  */
6062 static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
6063 {
6064 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6065 
6066 	/* redirect the peer's rx delivery function to point to a
6067 	 * discard func
6068 	 */
6069 
6070 	/* Do not make ctrl_peer to NULL for connected sta peers.
6071 	 * We need ctrl_peer to release the reference during dp
6072 	 * peer free. This reference was held for
6073 	 * obj_mgr peer during the creation of dp peer.
6074 	 */
6075 	if (!(peer->vdev && (peer->vdev->opmode != wlan_op_mode_sta) &&
6076 	      !peer->bss_peer))
6077 		peer->ctrl_peer = NULL;
6078 
6079 	peer->valid = 0;
6080 
6081 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
6082 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
6083 
6084 	dp_local_peer_id_free(peer->vdev->pdev, peer);
6085 
6086 	dp_peer_rx_bufq_resources_deinit(peer);
6087 
6088 	qdf_spinlock_destroy(&peer->peer_info_lock);
6089 	dp_peer_multipass_list_remove(peer);
6090 
6091 	/*
6092 	 * Remove the reference added during peer_attach.
6093 	 * The peer will still be left allocated until the
6094 	 * PEER_UNMAP message arrives to remove the other
6095 	 * reference, added by the PEER_MAP message.
6096 	 */
6097 	dp_peer_unref_delete(peer_handle);
6098 }
6099 
6100 /*
6101  * dp_get_vdev_mac_addr_wifi3() – Detach txrx peer
6102  * @peer_handle:		Datapath peer handle
6103  *
6104  */
6105 static uint8 *dp_get_vdev_mac_addr_wifi3(struct cdp_vdev *pvdev)
6106 {
6107 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
6108 	return vdev->mac_addr.raw;
6109 }
6110 
6111 /*
6112  * dp_vdev_set_wds() - Enable per packet stats
6113  * @vdev_handle: DP VDEV handle
6114  * @val: value
6115  *
6116  * Return: none
6117  */
6118 static int dp_vdev_set_wds(void *vdev_handle, uint32_t val)
6119 {
6120 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6121 
6122 	vdev->wds_enabled = val;
6123 	return 0;
6124 }
6125 
6126 /*
6127  * dp_get_vdev_from_vdev_id_wifi3() – Detach txrx peer
6128  * @peer_handle:		Datapath peer handle
6129  *
6130  */
6131 static struct cdp_vdev *dp_get_vdev_from_vdev_id_wifi3(struct cdp_pdev *dev,
6132 						uint8_t vdev_id)
6133 {
6134 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
6135 	struct dp_vdev *vdev = NULL;
6136 
6137 	if (qdf_unlikely(!pdev))
6138 		return NULL;
6139 
6140 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
6141 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
6142 		if (vdev->delete.pending)
6143 			continue;
6144 
6145 		if (vdev->vdev_id == vdev_id)
6146 			break;
6147 	}
6148 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6149 
6150 	return (struct cdp_vdev *)vdev;
6151 }
6152 
6153 /*
6154  * dp_get_mon_vdev_from_pdev_wifi3() - Get vdev handle of monitor mode
6155  * @dev: PDEV handle
6156  *
6157  * Return: VDEV handle of monitor mode
6158  */
6159 
6160 static struct cdp_vdev *dp_get_mon_vdev_from_pdev_wifi3(struct cdp_pdev *dev)
6161 {
6162 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
6163 
6164 	if (qdf_unlikely(!pdev))
6165 		return NULL;
6166 
6167 	return (struct cdp_vdev *)pdev->monitor_vdev;
6168 }
6169 
6170 static int dp_get_opmode(struct cdp_vdev *vdev_handle)
6171 {
6172 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6173 
6174 	return vdev->opmode;
6175 }
6176 
6177 static
6178 void dp_get_os_rx_handles_from_vdev_wifi3(struct cdp_vdev *pvdev,
6179 					  ol_txrx_rx_fp *stack_fn_p,
6180 					  ol_osif_vdev_handle *osif_vdev_p)
6181 {
6182 	struct dp_vdev *vdev = dp_get_dp_vdev_from_cdp_vdev(pvdev);
6183 
6184 	qdf_assert(vdev);
6185 	*stack_fn_p = vdev->osif_rx_stack;
6186 	*osif_vdev_p = vdev->osif_vdev;
6187 }
6188 
6189 static struct cdp_cfg *dp_get_ctrl_pdev_from_vdev_wifi3(struct cdp_vdev *pvdev)
6190 {
6191 	struct dp_vdev *vdev = (struct dp_vdev *)pvdev;
6192 	struct dp_pdev *pdev = vdev->pdev;
6193 
6194 	return (struct cdp_cfg *)pdev->wlan_cfg_ctx;
6195 }
6196 
6197 /**
6198  * dp_monitor_mode_ring_config() - Send the tlv config to fw for monitor buffer
6199  *                                 ring based on target
6200  * @soc: soc handle
6201  * @mac_for_pdev: pdev_id
6202  * @pdev: physical device handle
6203  * @ring_num: mac id
6204  * @htt_tlv_filter: tlv filter
6205  *
6206  * Return: zero on success, non-zero on failure
6207  */
6208 static inline
6209 QDF_STATUS dp_monitor_mode_ring_config(struct dp_soc *soc, uint8_t mac_for_pdev,
6210 				       struct dp_pdev *pdev, uint8_t ring_num,
6211 				       struct htt_rx_ring_tlv_filter htt_tlv_filter)
6212 {
6213 	QDF_STATUS status;
6214 
6215 	if (soc->wlan_cfg_ctx->rxdma1_enable)
6216 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6217 					     pdev->rxdma_mon_buf_ring[ring_num]
6218 					     .hal_srng,
6219 					     RXDMA_MONITOR_BUF, RX_BUFFER_SIZE,
6220 					     &htt_tlv_filter);
6221 	else
6222 		status = htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6223 					     pdev->rx_mac_buf_ring[ring_num]
6224 					     .hal_srng,
6225 					     RXDMA_BUF, RX_BUFFER_SIZE,
6226 					     &htt_tlv_filter);
6227 
6228 	return status;
6229 }
6230 
6231 /**
6232  * dp_reset_monitor_mode() - Disable monitor mode
6233  * @pdev_handle: Datapath PDEV handle
6234  *
6235  * Return: QDF_STATUS
6236  */
6237 QDF_STATUS dp_reset_monitor_mode(struct cdp_pdev *pdev_handle)
6238 {
6239 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6240 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
6241 	struct dp_soc *soc = pdev->soc;
6242 	uint8_t pdev_id;
6243 	int mac_id;
6244 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6245 
6246 	pdev_id = pdev->pdev_id;
6247 	soc = pdev->soc;
6248 
6249 	qdf_spin_lock_bh(&pdev->mon_lock);
6250 
6251 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
6252 
6253 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6254 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
6255 
6256 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
6257 						     pdev, mac_id,
6258 						     htt_tlv_filter);
6259 
6260 		if (status != QDF_STATUS_SUCCESS) {
6261 			dp_err("Failed to send tlv filter for monitor mode rings");
6262 			qdf_spin_unlock_bh(&pdev->mon_lock);
6263 			return status;
6264 		}
6265 
6266 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6267 			    pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6268 			    RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE,
6269 			    &htt_tlv_filter);
6270 	}
6271 
6272 	pdev->monitor_vdev = NULL;
6273 	pdev->mcopy_mode = 0;
6274 	pdev->monitor_configured = false;
6275 
6276 	qdf_spin_unlock_bh(&pdev->mon_lock);
6277 
6278 	return QDF_STATUS_SUCCESS;
6279 }
6280 
6281 /**
6282  * dp_set_nac() - set peer_nac
6283  * @peer_handle: Datapath PEER handle
6284  *
6285  * Return: void
6286  */
6287 static void dp_set_nac(struct cdp_peer *peer_handle)
6288 {
6289 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
6290 
6291 	peer->nac = 1;
6292 }
6293 
6294 /**
6295  * dp_get_tx_pending() - read pending tx
6296  * @pdev_handle: Datapath PDEV handle
6297  *
6298  * Return: outstanding tx
6299  */
6300 static int dp_get_tx_pending(struct cdp_pdev *pdev_handle)
6301 {
6302 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6303 
6304 	return qdf_atomic_read(&pdev->num_tx_outstanding);
6305 }
6306 
6307 /**
6308  * dp_get_peer_mac_from_peer_id() - get peer mac
6309  * @pdev_handle: Datapath PDEV handle
6310  * @peer_id: Peer ID
6311  * @peer_mac: MAC addr of PEER
6312  *
6313  * Return: void
6314  */
6315 static void dp_get_peer_mac_from_peer_id(struct cdp_pdev *pdev_handle,
6316 	uint32_t peer_id, uint8_t *peer_mac)
6317 {
6318 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6319 	struct dp_peer *peer;
6320 
6321 	if (pdev && peer_mac) {
6322 		peer = dp_peer_find_by_id(pdev->soc, (uint16_t)peer_id);
6323 		if (peer) {
6324 			qdf_mem_copy(peer_mac, peer->mac_addr.raw,
6325 				     QDF_MAC_ADDR_SIZE);
6326 			dp_peer_unref_del_find_by_id(peer);
6327 		}
6328 	}
6329 }
6330 
6331 /**
6332  * dp_pdev_configure_monitor_rings() - configure monitor rings
6333  * @vdev_handle: Datapath VDEV handle
6334  *
6335  * Return: QDF_STATUS
6336  */
6337 QDF_STATUS dp_pdev_configure_monitor_rings(struct dp_pdev *pdev)
6338 {
6339 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
6340 	struct dp_soc *soc;
6341 	uint8_t pdev_id;
6342 	int mac_id;
6343 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6344 
6345 	pdev_id = pdev->pdev_id;
6346 	soc = pdev->soc;
6347 
6348 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
6349 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
6350 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
6351 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
6352 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
6353 		pdev->mo_data_filter);
6354 
6355 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
6356 
6357 	htt_tlv_filter.mpdu_start = 1;
6358 	htt_tlv_filter.msdu_start = 1;
6359 	htt_tlv_filter.packet = 1;
6360 	htt_tlv_filter.msdu_end = 1;
6361 	htt_tlv_filter.mpdu_end = 1;
6362 	htt_tlv_filter.packet_header = 1;
6363 	htt_tlv_filter.attention = 1;
6364 	htt_tlv_filter.ppdu_start = 0;
6365 	htt_tlv_filter.ppdu_end = 0;
6366 	htt_tlv_filter.ppdu_end_user_stats = 0;
6367 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
6368 	htt_tlv_filter.ppdu_end_status_done = 0;
6369 	htt_tlv_filter.header_per_msdu = 1;
6370 	htt_tlv_filter.enable_fp =
6371 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
6372 	htt_tlv_filter.enable_md = 0;
6373 	htt_tlv_filter.enable_mo =
6374 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
6375 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
6376 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
6377 
6378 	if (pdev->mcopy_mode) {
6379 		htt_tlv_filter.fp_data_filter = 0;
6380 		htt_tlv_filter.mo_data_filter = 0;
6381 	} else {
6382 		htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
6383 		htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
6384 	}
6385 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
6386 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
6387 	htt_tlv_filter.offset_valid = false;
6388 
6389 	if ((pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU) ||
6390 	    (pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU_MSDU)) {
6391 		htt_tlv_filter.fp_mgmt_filter = 0;
6392 		htt_tlv_filter.fp_ctrl_filter = 0;
6393 		htt_tlv_filter.fp_data_filter = 0;
6394 		htt_tlv_filter.mo_mgmt_filter = 0;
6395 		htt_tlv_filter.mo_ctrl_filter = 0;
6396 		htt_tlv_filter.mo_data_filter = 0;
6397 	}
6398 
6399 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6400 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
6401 
6402 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
6403 						     pdev, mac_id,
6404 						     htt_tlv_filter);
6405 
6406 		if (status != QDF_STATUS_SUCCESS) {
6407 			dp_err("Failed to send tlv filter for monitor mode rings");
6408 			return status;
6409 		}
6410 	}
6411 
6412 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
6413 
6414 	htt_tlv_filter.mpdu_start = 1;
6415 	htt_tlv_filter.msdu_start = 0;
6416 	htt_tlv_filter.packet = 0;
6417 	htt_tlv_filter.msdu_end = 0;
6418 	htt_tlv_filter.mpdu_end = 0;
6419 	if ((pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU) ||
6420 	    (pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU_MSDU)) {
6421 		htt_tlv_filter.mpdu_end = 1;
6422 	}
6423 	htt_tlv_filter.attention = 0;
6424 	htt_tlv_filter.ppdu_start = 1;
6425 	htt_tlv_filter.ppdu_end = 1;
6426 	htt_tlv_filter.ppdu_end_user_stats = 1;
6427 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6428 	htt_tlv_filter.ppdu_end_status_done = 1;
6429 	htt_tlv_filter.enable_fp = 1;
6430 	htt_tlv_filter.enable_md = 0;
6431 	htt_tlv_filter.enable_mo = 1;
6432 	if (pdev->mcopy_mode ||
6433 	    (pdev->rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED)) {
6434 		htt_tlv_filter.packet_header = 1;
6435 		if (pdev->rx_enh_capture_mode == CDP_RX_ENH_CAPTURE_MPDU) {
6436 			htt_tlv_filter.header_per_msdu = 0;
6437 			htt_tlv_filter.enable_mo = 0;
6438 		} else if (pdev->rx_enh_capture_mode ==
6439 			   CDP_RX_ENH_CAPTURE_MPDU_MSDU) {
6440 			bool is_rx_mon_proto_flow_tag_enabled =
6441 			    wlan_cfg_is_rx_mon_protocol_flow_tag_enabled(
6442 						    soc->wlan_cfg_ctx);
6443 			htt_tlv_filter.header_per_msdu = 1;
6444 			htt_tlv_filter.enable_mo = 0;
6445 			if (pdev->is_rx_enh_capture_trailer_enabled ||
6446 			    is_rx_mon_proto_flow_tag_enabled)
6447 				htt_tlv_filter.msdu_end = 1;
6448 		}
6449 	}
6450 
6451 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6452 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6453 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6454 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6455 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6456 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
6457 	htt_tlv_filter.offset_valid = false;
6458 
6459 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6460 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6461 						pdev->pdev_id);
6462 
6463 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6464 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6465 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6466 	}
6467 
6468 	return status;
6469 }
6470 
6471 /**
6472  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
6473  * @vdev_handle: Datapath VDEV handle
6474  * @smart_monitor: Flag to denote if its smart monitor mode
6475  *
6476  * Return: 0 on success, not 0 on failure
6477  */
6478 static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
6479 					   uint8_t special_monitor)
6480 {
6481 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6482 	struct dp_pdev *pdev;
6483 
6484 	qdf_assert(vdev);
6485 
6486 	pdev = vdev->pdev;
6487 	pdev->monitor_vdev = vdev;
6488 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
6489 		  "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
6490 		  pdev, pdev->pdev_id, pdev->soc, vdev);
6491 
6492 	/*
6493 	 * do not configure monitor buf ring and filter for smart and
6494 	 * lite monitor
6495 	 * for smart monitor filters are added along with first NAC
6496 	 * for lite monitor required configuration done through
6497 	 * dp_set_pdev_param
6498 	 */
6499 	if (special_monitor)
6500 		return QDF_STATUS_SUCCESS;
6501 
6502 	/*Check if current pdev's monitor_vdev exists */
6503 	if (pdev->monitor_configured) {
6504 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
6505 			  "monitor vap already created vdev=%pK\n", vdev);
6506 		return QDF_STATUS_E_RESOURCES;
6507 	}
6508 
6509 	pdev->monitor_configured = true;
6510 
6511 	dp_mon_buf_delayed_replenish(pdev);
6512 
6513 	return dp_pdev_configure_monitor_rings(pdev);
6514 }
6515 
6516 /**
6517  * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
6518  * @pdev_handle: Datapath PDEV handle
6519  * @filter_val: Flag to select Filter for monitor mode
6520  * Return: 0 on success, not 0 on failure
6521  */
6522 static QDF_STATUS
6523 dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
6524 				   struct cdp_monitor_filter *filter_val)
6525 {
6526 	/* Many monitor VAPs can exists in a system but only one can be up at
6527 	 * anytime
6528 	 */
6529 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6530 	struct dp_vdev *vdev = pdev->monitor_vdev;
6531 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
6532 	struct dp_soc *soc;
6533 	uint8_t pdev_id;
6534 	int mac_id;
6535 	QDF_STATUS status = QDF_STATUS_SUCCESS;
6536 
6537 	pdev_id = pdev->pdev_id;
6538 	soc = pdev->soc;
6539 
6540 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
6541 		"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
6542 		pdev, pdev_id, soc, vdev);
6543 
6544 	/*Check if current pdev's monitor_vdev exists */
6545 	if (!pdev->monitor_vdev) {
6546 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
6547 			"vdev=%pK", vdev);
6548 		qdf_assert(vdev);
6549 	}
6550 
6551 	/* update filter mode, type in pdev structure */
6552 	pdev->mon_filter_mode = filter_val->mode;
6553 	pdev->fp_mgmt_filter = filter_val->fp_mgmt;
6554 	pdev->fp_ctrl_filter = filter_val->fp_ctrl;
6555 	pdev->fp_data_filter = filter_val->fp_data;
6556 	pdev->mo_mgmt_filter = filter_val->mo_mgmt;
6557 	pdev->mo_ctrl_filter = filter_val->mo_ctrl;
6558 	pdev->mo_data_filter = filter_val->mo_data;
6559 
6560 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
6561 		"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
6562 		pdev->mon_filter_mode, pdev->fp_mgmt_filter,
6563 		pdev->fp_ctrl_filter, pdev->fp_data_filter,
6564 		pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
6565 		pdev->mo_data_filter);
6566 
6567 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
6568 
6569 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6570 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
6571 
6572 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
6573 						     pdev, mac_id,
6574 						     htt_tlv_filter);
6575 
6576 		if (status != QDF_STATUS_SUCCESS) {
6577 			dp_err("Failed to send tlv filter for monitor mode rings");
6578 			return status;
6579 		}
6580 
6581 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6582 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6583 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6584 	}
6585 
6586 	htt_tlv_filter.mpdu_start = 1;
6587 	htt_tlv_filter.msdu_start = 1;
6588 	htt_tlv_filter.packet = 1;
6589 	htt_tlv_filter.msdu_end = 1;
6590 	htt_tlv_filter.mpdu_end = 1;
6591 	htt_tlv_filter.packet_header = 1;
6592 	htt_tlv_filter.attention = 1;
6593 	htt_tlv_filter.ppdu_start = 0;
6594 	htt_tlv_filter.ppdu_end = 0;
6595 	htt_tlv_filter.ppdu_end_user_stats = 0;
6596 	htt_tlv_filter.ppdu_end_user_stats_ext = 0;
6597 	htt_tlv_filter.ppdu_end_status_done = 0;
6598 	htt_tlv_filter.header_per_msdu = 1;
6599 	htt_tlv_filter.enable_fp =
6600 		(pdev->mon_filter_mode & MON_FILTER_PASS) ? 1 : 0;
6601 	htt_tlv_filter.enable_md = 0;
6602 	htt_tlv_filter.enable_mo =
6603 		(pdev->mon_filter_mode & MON_FILTER_OTHER) ? 1 : 0;
6604 	htt_tlv_filter.fp_mgmt_filter = pdev->fp_mgmt_filter;
6605 	htt_tlv_filter.fp_ctrl_filter = pdev->fp_ctrl_filter;
6606 	if (pdev->mcopy_mode)
6607 		htt_tlv_filter.fp_data_filter = 0;
6608 	else
6609 		htt_tlv_filter.fp_data_filter = pdev->fp_data_filter;
6610 	htt_tlv_filter.mo_mgmt_filter = pdev->mo_mgmt_filter;
6611 	htt_tlv_filter.mo_ctrl_filter = pdev->mo_ctrl_filter;
6612 	htt_tlv_filter.mo_data_filter = pdev->mo_data_filter;
6613 	htt_tlv_filter.offset_valid = false;
6614 
6615 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6616 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
6617 
6618 		status = dp_monitor_mode_ring_config(soc, mac_for_pdev,
6619 						     pdev, mac_id,
6620 						     htt_tlv_filter);
6621 
6622 		if (status != QDF_STATUS_SUCCESS) {
6623 			dp_err("Failed to send tlv filter for monitor mode rings");
6624 			return status;
6625 		}
6626 	}
6627 
6628 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
6629 
6630 	htt_tlv_filter.mpdu_start = 1;
6631 	htt_tlv_filter.msdu_start = 0;
6632 	htt_tlv_filter.packet = 0;
6633 	htt_tlv_filter.msdu_end = 0;
6634 	htt_tlv_filter.mpdu_end = 0;
6635 	htt_tlv_filter.attention = 0;
6636 	htt_tlv_filter.ppdu_start = 1;
6637 	htt_tlv_filter.ppdu_end = 1;
6638 	htt_tlv_filter.ppdu_end_user_stats = 1;
6639 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
6640 	htt_tlv_filter.ppdu_end_status_done = 1;
6641 	htt_tlv_filter.enable_fp = 1;
6642 	htt_tlv_filter.enable_md = 0;
6643 	htt_tlv_filter.enable_mo = 1;
6644 	if (pdev->mcopy_mode) {
6645 		htt_tlv_filter.packet_header = 1;
6646 	}
6647 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
6648 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
6649 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
6650 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
6651 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
6652 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
6653 	htt_tlv_filter.offset_valid = false;
6654 
6655 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
6656 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
6657 						pdev->pdev_id);
6658 
6659 		htt_h2t_rx_ring_cfg(soc->htt_handle, mac_for_pdev,
6660 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
6661 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
6662 	}
6663 
6664 	return QDF_STATUS_SUCCESS;
6665 }
6666 
6667 /**
6668  * dp_pdev_set_monitor_channel() - set monitor channel num in pdev
6669  * @pdev_handle: Datapath PDEV handle
6670  *
6671  * Return: None
6672  */
6673 static
6674 void dp_pdev_set_monitor_channel(struct cdp_pdev *pdev_handle, int chan_num)
6675 {
6676 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6677 
6678 	pdev->mon_chan_num = chan_num;
6679 }
6680 
6681 /**
6682  * dp_deliver_tx_mgmt() - Deliver mgmt frame for tx capture
6683  * @pdev_handle: Datapath PDEV handle
6684  * @nbuf: Management frame buffer
6685  */
6686 static void
6687 dp_deliver_tx_mgmt(struct cdp_pdev *pdev_handle, qdf_nbuf_t nbuf)
6688 {
6689 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6690 
6691 	dp_deliver_mgmt_frm(pdev, nbuf);
6692 }
6693 
6694 /**
6695  * dp_get_pdev_id_frm_pdev() - get pdev_id
6696  * @pdev_handle: Datapath PDEV handle
6697  *
6698  * Return: pdev_id
6699  */
6700 static
6701 uint8_t dp_get_pdev_id_frm_pdev(struct cdp_pdev *pdev_handle)
6702 {
6703 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6704 
6705 	return pdev->pdev_id;
6706 }
6707 
6708 /**
6709  * dp_get_delay_stats_flag() - get delay stats flag
6710  * @pdev_handle: Datapath PDEV handle
6711  *
6712  * Return: 0 if flag is disabled else 1
6713  */
6714 static
6715 bool dp_get_delay_stats_flag(struct cdp_pdev *pdev_handle)
6716 {
6717 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6718 
6719 	return pdev->delay_stats_flag;
6720 }
6721 
6722 /**
6723  * dp_pdev_set_chan_noise_floor() - set channel noise floor
6724  * @pdev_handle: Datapath PDEV handle
6725  * @chan_noise_floor: Channel Noise Floor
6726  *
6727  * Return: void
6728  */
6729 static
6730 void dp_pdev_set_chan_noise_floor(struct cdp_pdev *pdev_handle,
6731 				  int16_t chan_noise_floor)
6732 {
6733 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
6734 
6735 	pdev->chan_noise_floor = chan_noise_floor;
6736 }
6737 
6738 /**
6739  * dp_vdev_get_filter_ucast_data() - get DP VDEV monitor ucast filter
6740  * @vdev_handle: Datapath VDEV handle
6741  * Return: true on ucast filter flag set
6742  */
6743 static bool dp_vdev_get_filter_ucast_data(struct cdp_vdev *vdev_handle)
6744 {
6745 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6746 	struct dp_pdev *pdev;
6747 
6748 	pdev = vdev->pdev;
6749 
6750 	if ((pdev->fp_data_filter & FILTER_DATA_UCAST) ||
6751 	    (pdev->mo_data_filter & FILTER_DATA_UCAST))
6752 		return true;
6753 
6754 	return false;
6755 }
6756 
6757 /**
6758  * dp_vdev_get_filter_mcast_data() - get DP VDEV monitor mcast filter
6759  * @vdev_handle: Datapath VDEV handle
6760  * Return: true on mcast filter flag set
6761  */
6762 static bool dp_vdev_get_filter_mcast_data(struct cdp_vdev *vdev_handle)
6763 {
6764 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6765 	struct dp_pdev *pdev;
6766 
6767 	pdev = vdev->pdev;
6768 
6769 	if ((pdev->fp_data_filter & FILTER_DATA_MCAST) ||
6770 	    (pdev->mo_data_filter & FILTER_DATA_MCAST))
6771 		return true;
6772 
6773 	return false;
6774 }
6775 
6776 /**
6777  * dp_vdev_get_filter_non_data() - get DP VDEV monitor non_data filter
6778  * @vdev_handle: Datapath VDEV handle
6779  * Return: true on non data filter flag set
6780  */
6781 static bool dp_vdev_get_filter_non_data(struct cdp_vdev *vdev_handle)
6782 {
6783 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6784 	struct dp_pdev *pdev;
6785 
6786 	pdev = vdev->pdev;
6787 
6788 	if ((pdev->fp_mgmt_filter & FILTER_MGMT_ALL) ||
6789 	    (pdev->mo_mgmt_filter & FILTER_MGMT_ALL)) {
6790 		if ((pdev->fp_ctrl_filter & FILTER_CTRL_ALL) ||
6791 		    (pdev->mo_ctrl_filter & FILTER_CTRL_ALL)) {
6792 			return true;
6793 		}
6794 	}
6795 
6796 	return false;
6797 }
6798 
6799 #ifdef MESH_MODE_SUPPORT
6800 void dp_peer_set_mesh_mode(struct cdp_vdev *vdev_hdl, uint32_t val)
6801 {
6802 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
6803 
6804 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6805 		FL("val %d"), val);
6806 	vdev->mesh_vdev = val;
6807 }
6808 
6809 /*
6810  * dp_peer_set_mesh_rx_filter() - to set the mesh rx filter
6811  * @vdev_hdl: virtual device object
6812  * @val: value to be set
6813  *
6814  * Return: void
6815  */
6816 void dp_peer_set_mesh_rx_filter(struct cdp_vdev *vdev_hdl, uint32_t val)
6817 {
6818 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
6819 
6820 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
6821 		FL("val %d"), val);
6822 	vdev->mesh_rx_filter = val;
6823 }
6824 #endif
6825 
6826 bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data)
6827 {
6828 	uint8_t pdev_count;
6829 
6830 	for (pdev_count = 0; pdev_count < MAX_PDEV_CNT; pdev_count++) {
6831 		if (soc->pdev_list[pdev_count] &&
6832 		    soc->pdev_list[pdev_count] == data)
6833 			return true;
6834 	}
6835 	return false;
6836 }
6837 
6838 /**
6839  * dp_rx_bar_stats_cb(): BAR received stats callback
6840  * @soc: SOC handle
6841  * @cb_ctxt: Call back context
6842  * @reo_status: Reo status
6843  *
6844  * return: void
6845  */
6846 void dp_rx_bar_stats_cb(struct dp_soc *soc, void *cb_ctxt,
6847 	union hal_reo_status *reo_status)
6848 {
6849 	struct dp_pdev *pdev = (struct dp_pdev *)cb_ctxt;
6850 	struct hal_reo_queue_status *queue_status = &(reo_status->queue_status);
6851 
6852 	if (!dp_check_pdev_exists(soc, pdev)) {
6853 		dp_err_rl("pdev doesn't exist");
6854 		return;
6855 	}
6856 
6857 	if (!qdf_atomic_read(&soc->cmn_init_done))
6858 		return;
6859 
6860 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
6861 		DP_PRINT_STATS("REO stats failure %d",
6862 			       queue_status->header.status);
6863 		qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
6864 		return;
6865 	}
6866 
6867 	pdev->stats.rx.bar_recv_cnt += queue_status->bar_rcvd_cnt;
6868 	qdf_atomic_set(&(pdev->stats_cmd_complete), 1);
6869 
6870 }
6871 
6872 /**
6873  * dp_aggregate_vdev_stats(): Consolidate stats at VDEV level
6874  * @vdev: DP VDEV handle
6875  *
6876  * return: void
6877  */
6878 void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
6879 			     struct cdp_vdev_stats *vdev_stats)
6880 {
6881 	struct dp_peer *peer = NULL;
6882 	struct dp_soc *soc = NULL;
6883 
6884 	if (!vdev || !vdev->pdev)
6885 		return;
6886 
6887 	soc = vdev->pdev->soc;
6888 
6889 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
6890 
6891 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
6892 		dp_update_vdev_stats(vdev_stats, peer);
6893 
6894 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6895 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
6896 			     vdev_stats, vdev->vdev_id,
6897 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
6898 #endif
6899 }
6900 
6901 void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
6902 {
6903 	struct dp_vdev *vdev = NULL;
6904 	struct dp_soc *soc;
6905 	struct cdp_vdev_stats *vdev_stats =
6906 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
6907 
6908 	if (!vdev_stats) {
6909 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6910 			  "DP alloc failure - unable to get alloc vdev stats");
6911 		return;
6912 	}
6913 
6914 	qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
6915 	qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
6916 	qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
6917 
6918 	if (pdev->mcopy_mode)
6919 		DP_UPDATE_STATS(pdev, pdev->invalid_peer);
6920 
6921 	soc = pdev->soc;
6922 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
6923 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
6924 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
6925 
6926 		dp_aggregate_vdev_stats(vdev, vdev_stats);
6927 		dp_update_pdev_stats(pdev, vdev_stats);
6928 		dp_update_pdev_ingress_stats(pdev, vdev);
6929 	}
6930 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
6931 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6932 	qdf_mem_free(vdev_stats);
6933 
6934 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
6935 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc, &pdev->stats,
6936 			     pdev->pdev_id, UPDATE_PDEV_STATS, pdev->pdev_id);
6937 #endif
6938 }
6939 
6940 /**
6941  * dp_vdev_getstats() - get vdev packet level stats
6942  * @vdev_handle: Datapath VDEV handle
6943  * @stats: cdp network device stats structure
6944  *
6945  * Return: void
6946  */
6947 static void dp_vdev_getstats(void *vdev_handle,
6948 		struct cdp_dev_stats *stats)
6949 {
6950 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
6951 	struct dp_pdev *pdev;
6952 	struct dp_soc *soc;
6953 	struct cdp_vdev_stats *vdev_stats;
6954 
6955 	if (!vdev)
6956 		return;
6957 
6958 	pdev = vdev->pdev;
6959 	if (!pdev)
6960 		return;
6961 
6962 	soc = pdev->soc;
6963 
6964 	vdev_stats = qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
6965 
6966 	if (!vdev_stats) {
6967 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
6968 			  "DP alloc failure - unable to get alloc vdev stats");
6969 		return;
6970 	}
6971 
6972 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
6973 	dp_aggregate_vdev_stats(vdev, vdev_stats);
6974 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
6975 
6976 	stats->tx_packets = vdev_stats->tx_i.rcvd.num;
6977 	stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
6978 
6979 	stats->tx_errors = vdev_stats->tx.tx_failed +
6980 		vdev_stats->tx_i.dropped.dropped_pkt.num;
6981 	stats->tx_dropped = stats->tx_errors;
6982 
6983 	stats->rx_packets = vdev_stats->rx.unicast.num +
6984 		vdev_stats->rx.multicast.num +
6985 		vdev_stats->rx.bcast.num;
6986 	stats->rx_bytes = vdev_stats->rx.unicast.bytes +
6987 		vdev_stats->rx.multicast.bytes +
6988 		vdev_stats->rx.bcast.bytes;
6989 
6990 	qdf_mem_free(vdev_stats);
6991 
6992 }
6993 
6994 
6995 /**
6996  * dp_pdev_getstats() - get pdev packet level stats
6997  * @pdev_handle: Datapath PDEV handle
6998  * @stats: cdp network device stats structure
6999  *
7000  * Return: void
7001  */
7002 static void dp_pdev_getstats(void *pdev_handle,
7003 		struct cdp_dev_stats *stats)
7004 {
7005 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7006 
7007 	dp_aggregate_pdev_stats(pdev);
7008 
7009 	stats->tx_packets = pdev->stats.tx_i.rcvd.num;
7010 	stats->tx_bytes = pdev->stats.tx_i.rcvd.bytes;
7011 
7012 	stats->tx_errors = pdev->stats.tx.tx_failed +
7013 		pdev->stats.tx_i.dropped.dropped_pkt.num;
7014 	stats->tx_dropped = stats->tx_errors;
7015 
7016 	stats->rx_packets = pdev->stats.rx.unicast.num +
7017 		pdev->stats.rx.multicast.num +
7018 		pdev->stats.rx.bcast.num;
7019 	stats->rx_bytes = pdev->stats.rx.unicast.bytes +
7020 		pdev->stats.rx.multicast.bytes +
7021 		pdev->stats.rx.bcast.bytes;
7022 	stats->rx_errors = pdev->stats.err.desc_alloc_fail +
7023 		pdev->stats.err.ip_csum_err +
7024 		pdev->stats.err.tcp_udp_csum_err +
7025 		pdev->stats.rx.err.mic_err +
7026 		pdev->stats.rx.err.decrypt_err +
7027 		pdev->stats.err.rxdma_error +
7028 		pdev->stats.err.reo_error;
7029 	stats->rx_dropped = pdev->stats.dropped.msdu_not_done +
7030 		pdev->stats.dropped.mec +
7031 		pdev->stats.dropped.mesh_filter +
7032 		pdev->stats.dropped.wifi_parse +
7033 		pdev->stats.dropped.mon_rx_drop +
7034 		pdev->stats.dropped.mon_radiotap_update_err;
7035 }
7036 
7037 /**
7038  * dp_get_device_stats() - get interface level packet stats
7039  * @handle: device handle
7040  * @stats: cdp network device stats structure
7041  * @type: device type pdev/vdev
7042  *
7043  * Return: void
7044  */
7045 static void dp_get_device_stats(void *handle,
7046 		struct cdp_dev_stats *stats, uint8_t type)
7047 {
7048 	switch (type) {
7049 	case UPDATE_VDEV_STATS:
7050 		dp_vdev_getstats(handle, stats);
7051 		break;
7052 	case UPDATE_PDEV_STATS:
7053 		dp_pdev_getstats(handle, stats);
7054 		break;
7055 	default:
7056 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7057 			"apstats cannot be updated for this input "
7058 			"type %d", type);
7059 		break;
7060 	}
7061 
7062 }
7063 
7064 const
7065 char *dp_srng_get_str_from_hal_ring_type(enum hal_ring_type ring_type)
7066 {
7067 	switch (ring_type) {
7068 	case REO_DST:
7069 		return "Reo_dst";
7070 	case REO_EXCEPTION:
7071 		return "Reo_exception";
7072 	case REO_CMD:
7073 		return "Reo_cmd";
7074 	case REO_REINJECT:
7075 		return "Reo_reinject";
7076 	case REO_STATUS:
7077 		return "Reo_status";
7078 	case WBM2SW_RELEASE:
7079 		return "wbm2sw_release";
7080 	case TCL_DATA:
7081 		return "tcl_data";
7082 	case TCL_CMD:
7083 		return "tcl_cmd";
7084 	case TCL_STATUS:
7085 		return "tcl_status";
7086 	case SW2WBM_RELEASE:
7087 		return "sw2wbm_release";
7088 	case RXDMA_BUF:
7089 		return "Rxdma_buf";
7090 	case RXDMA_DST:
7091 		return "Rxdma_dst";
7092 	case RXDMA_MONITOR_BUF:
7093 		return "Rxdma_monitor_buf";
7094 	case RXDMA_MONITOR_DESC:
7095 		return "Rxdma_monitor_desc";
7096 	case RXDMA_MONITOR_STATUS:
7097 		return "Rxdma_monitor_status";
7098 	default:
7099 		dp_err("Invalid ring type");
7100 		break;
7101 	}
7102 	return "Invalid";
7103 }
7104 
7105 /*
7106  * dp_print_napi_stats(): NAPI stats
7107  * @soc - soc handle
7108  */
7109 void dp_print_napi_stats(struct dp_soc *soc)
7110 {
7111 	hif_print_napi_stats(soc->hif_handle);
7112 }
7113 
7114 /**
7115  * dp_txrx_host_stats_clr(): Reinitialize the txrx stats
7116  * @vdev: DP_VDEV handle
7117  *
7118  * Return:void
7119  */
7120 static inline void
7121 dp_txrx_host_stats_clr(struct dp_vdev *vdev)
7122 {
7123 	struct dp_peer *peer = NULL;
7124 
7125 	if (!vdev || !vdev->pdev)
7126 		return;
7127 
7128 	DP_STATS_CLR(vdev->pdev);
7129 	DP_STATS_CLR(vdev->pdev->soc);
7130 	DP_STATS_CLR(vdev);
7131 
7132 	hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
7133 
7134 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
7135 		if (!peer)
7136 			return;
7137 		DP_STATS_CLR(peer);
7138 
7139 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7140 		dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
7141 				     &peer->stats,  peer->peer_ids[0],
7142 				     UPDATE_PEER_STATS, vdev->pdev->pdev_id);
7143 #endif
7144 	}
7145 
7146 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
7147 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
7148 			     &vdev->stats,  vdev->vdev_id,
7149 			     UPDATE_VDEV_STATS, vdev->pdev->pdev_id);
7150 #endif
7151 }
7152 
7153 /*
7154  * dp_get_host_peer_stats()- function to print peer stats
7155  * @pdev_handle: DP_PDEV handle
7156  * @mac_addr: mac address of the peer
7157  *
7158  * Return: void
7159  */
7160 static void
7161 dp_get_host_peer_stats(struct cdp_pdev *pdev_handle, char *mac_addr)
7162 {
7163 	struct dp_peer *peer;
7164 	uint8_t local_id;
7165 
7166 	if (!mac_addr) {
7167 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7168 			  "Invalid MAC address\n");
7169 		return;
7170 	}
7171 
7172 	peer = (struct dp_peer *)dp_find_peer_by_addr(pdev_handle, mac_addr,
7173 			&local_id);
7174 
7175 	if (!peer) {
7176 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7177 			  "%s: Invalid peer\n", __func__);
7178 		return;
7179 	}
7180 
7181 	/* Making sure the peer is for the specific pdev */
7182 	if ((struct dp_pdev *)pdev_handle != peer->vdev->pdev) {
7183 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
7184 			  "%s: Peer is not for this pdev\n", __func__);
7185 		return;
7186 	}
7187 
7188 	dp_print_peer_stats(peer);
7189 	dp_peer_rxtid_stats(peer, dp_rx_tid_stats_cb, NULL);
7190 }
7191 
7192 /**
7193  * dp_txrx_stats_help() - Helper function for Txrx_Stats
7194  *
7195  * Return: None
7196  */
7197 static void dp_txrx_stats_help(void)
7198 {
7199 	dp_info("Command: iwpriv wlan0 txrx_stats <stats_option> <mac_id>");
7200 	dp_info("stats_option:");
7201 	dp_info("  1 -- HTT Tx Statistics");
7202 	dp_info("  2 -- HTT Rx Statistics");
7203 	dp_info("  3 -- HTT Tx HW Queue Statistics");
7204 	dp_info("  4 -- HTT Tx HW Sched Statistics");
7205 	dp_info("  5 -- HTT Error Statistics");
7206 	dp_info("  6 -- HTT TQM Statistics");
7207 	dp_info("  7 -- HTT TQM CMDQ Statistics");
7208 	dp_info("  8 -- HTT TX_DE_CMN Statistics");
7209 	dp_info("  9 -- HTT Tx Rate Statistics");
7210 	dp_info(" 10 -- HTT Rx Rate Statistics");
7211 	dp_info(" 11 -- HTT Peer Statistics");
7212 	dp_info(" 12 -- HTT Tx SelfGen Statistics");
7213 	dp_info(" 13 -- HTT Tx MU HWQ Statistics");
7214 	dp_info(" 14 -- HTT RING_IF_INFO Statistics");
7215 	dp_info(" 15 -- HTT SRNG Statistics");
7216 	dp_info(" 16 -- HTT SFM Info Statistics");
7217 	dp_info(" 17 -- HTT PDEV_TX_MU_MIMO_SCHED INFO Statistics");
7218 	dp_info(" 18 -- HTT Peer List Details");
7219 	dp_info(" 20 -- Clear Host Statistics");
7220 	dp_info(" 21 -- Host Rx Rate Statistics");
7221 	dp_info(" 22 -- Host Tx Rate Statistics");
7222 	dp_info(" 23 -- Host Tx Statistics");
7223 	dp_info(" 24 -- Host Rx Statistics");
7224 	dp_info(" 25 -- Host AST Statistics");
7225 	dp_info(" 26 -- Host SRNG PTR Statistics");
7226 	dp_info(" 27 -- Host Mon Statistics");
7227 	dp_info(" 28 -- Host REO Queue Statistics");
7228 	dp_info(" 29 -- Host Soc cfg param Statistics");
7229 	dp_info(" 30 -- Host pdev cfg param Statistics");
7230 }
7231 
7232 /**
7233  * dp_print_host_stats()- Function to print the stats aggregated at host
7234  * @vdev_handle: DP_VDEV handle
7235  * @type: host stats type
7236  *
7237  * Return: 0 on success, print error message in case of failure
7238  */
7239 static int
7240 dp_print_host_stats(struct cdp_vdev *vdev_handle,
7241 		    struct cdp_txrx_stats_req *req)
7242 {
7243 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7244 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
7245 	enum cdp_host_txrx_stats type =
7246 			dp_stats_mapping_table[req->stats][STATS_HOST];
7247 
7248 	dp_aggregate_pdev_stats(pdev);
7249 
7250 	switch (type) {
7251 	case TXRX_CLEAR_STATS:
7252 		dp_txrx_host_stats_clr(vdev);
7253 		break;
7254 	case TXRX_RX_RATE_STATS:
7255 		dp_print_rx_rates(vdev);
7256 		break;
7257 	case TXRX_TX_RATE_STATS:
7258 		dp_print_tx_rates(vdev);
7259 		break;
7260 	case TXRX_TX_HOST_STATS:
7261 		dp_print_pdev_tx_stats(pdev);
7262 		dp_print_soc_tx_stats(pdev->soc);
7263 		break;
7264 	case TXRX_RX_HOST_STATS:
7265 		dp_print_pdev_rx_stats(pdev);
7266 		dp_print_soc_rx_stats(pdev->soc);
7267 		break;
7268 	case TXRX_AST_STATS:
7269 		dp_print_ast_stats(pdev->soc);
7270 		dp_print_peer_table(vdev);
7271 		break;
7272 	case TXRX_SRNG_PTR_STATS:
7273 		dp_print_ring_stats(pdev);
7274 		break;
7275 	case TXRX_RX_MON_STATS:
7276 		dp_print_pdev_rx_mon_stats(pdev);
7277 		break;
7278 	case TXRX_REO_QUEUE_STATS:
7279 		dp_get_host_peer_stats((struct cdp_pdev *)pdev, req->peer_addr);
7280 		break;
7281 	case TXRX_SOC_CFG_PARAMS:
7282 		dp_print_soc_cfg_params(pdev->soc);
7283 		break;
7284 	case TXRX_PDEV_CFG_PARAMS:
7285 		dp_print_pdev_cfg_params(pdev);
7286 		break;
7287 	case TXRX_NAPI_STATS:
7288 		dp_print_napi_stats(pdev->soc);
7289 	case TXRX_SOC_INTERRUPT_STATS:
7290 		dp_print_soc_interrupt_stats(pdev->soc);
7291 		break;
7292 	default:
7293 		dp_info("Wrong Input For TxRx Host Stats");
7294 		dp_txrx_stats_help();
7295 		break;
7296 	}
7297 	return 0;
7298 }
7299 
7300 /*
7301  * dp_ppdu_ring_reset()- Reset PPDU Stats ring
7302  * @pdev: DP_PDEV handle
7303  *
7304  * Return: void
7305  */
7306 static void
7307 dp_ppdu_ring_reset(struct dp_pdev *pdev)
7308 {
7309 	struct htt_rx_ring_tlv_filter htt_tlv_filter;
7310 	int mac_id;
7311 
7312 	qdf_mem_zero(&(htt_tlv_filter), sizeof(htt_tlv_filter));
7313 
7314 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
7315 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
7316 							pdev->pdev_id);
7317 
7318 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
7319 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
7320 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
7321 	}
7322 }
7323 
7324 /*
7325  * dp_ppdu_ring_cfg()- Configure PPDU Stats ring
7326  * @pdev: DP_PDEV handle
7327  *
7328  * Return: void
7329  */
7330 static void
7331 dp_ppdu_ring_cfg(struct dp_pdev *pdev)
7332 {
7333 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
7334 	int mac_id;
7335 
7336 	htt_tlv_filter.mpdu_start = 1;
7337 	htt_tlv_filter.msdu_start = 0;
7338 	htt_tlv_filter.packet = 0;
7339 	htt_tlv_filter.msdu_end = 0;
7340 	htt_tlv_filter.mpdu_end = 0;
7341 	htt_tlv_filter.attention = 0;
7342 	htt_tlv_filter.ppdu_start = 1;
7343 	htt_tlv_filter.ppdu_end = 1;
7344 	htt_tlv_filter.ppdu_end_user_stats = 1;
7345 	htt_tlv_filter.ppdu_end_user_stats_ext = 1;
7346 	htt_tlv_filter.ppdu_end_status_done = 1;
7347 	htt_tlv_filter.enable_fp = 1;
7348 	htt_tlv_filter.enable_md = 0;
7349 	if (pdev->neighbour_peers_added &&
7350 	    pdev->soc->hw_nac_monitor_support) {
7351 		htt_tlv_filter.enable_md = 1;
7352 		htt_tlv_filter.packet_header = 1;
7353 	}
7354 	if (pdev->mcopy_mode) {
7355 		htt_tlv_filter.packet_header = 1;
7356 		htt_tlv_filter.enable_mo = 1;
7357 	}
7358 	htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
7359 	htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
7360 	htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
7361 	htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
7362 	htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
7363 	htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
7364 	if (pdev->neighbour_peers_added &&
7365 	    pdev->soc->hw_nac_monitor_support)
7366 		htt_tlv_filter.md_data_filter = FILTER_DATA_ALL;
7367 
7368 	htt_tlv_filter.offset_valid = false;
7369 
7370 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
7371 		int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id,
7372 						pdev->pdev_id);
7373 
7374 		htt_h2t_rx_ring_cfg(pdev->soc->htt_handle, mac_for_pdev,
7375 			pdev->rxdma_mon_status_ring[mac_id].hal_srng,
7376 			RXDMA_MONITOR_STATUS, RX_BUFFER_SIZE, &htt_tlv_filter);
7377 	}
7378 }
7379 
7380 /*
7381  * is_ppdu_txrx_capture_enabled() - API to check both pktlog and debug_sniffer
7382  *                              modes are enabled or not.
7383  * @dp_pdev: dp pdev handle.
7384  *
7385  * Return: bool
7386  */
7387 static inline bool is_ppdu_txrx_capture_enabled(struct dp_pdev *pdev)
7388 {
7389 	if (!pdev->pktlog_ppdu_stats && !pdev->tx_sniffer_enable &&
7390 	    !pdev->mcopy_mode)
7391 		return true;
7392 	else
7393 		return false;
7394 }
7395 
7396 /*
7397  *dp_set_bpr_enable() - API to enable/disable bpr feature
7398  *@pdev_handle: DP_PDEV handle.
7399  *@val: Provided value.
7400  *
7401  *Return: 0 for success. nonzero for failure.
7402  */
7403 static QDF_STATUS
7404 dp_set_bpr_enable(struct cdp_pdev *pdev_handle, int val)
7405 {
7406 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7407 
7408 	switch (val) {
7409 	case CDP_BPR_DISABLE:
7410 		pdev->bpr_enable = CDP_BPR_DISABLE;
7411 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7412 		    !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
7413 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7414 		} else if (pdev->enhanced_stats_en &&
7415 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
7416 			   !pdev->pktlog_ppdu_stats) {
7417 			dp_h2t_cfg_stats_msg_send(pdev,
7418 						  DP_PPDU_STATS_CFG_ENH_STATS,
7419 						  pdev->pdev_id);
7420 		}
7421 		break;
7422 	case CDP_BPR_ENABLE:
7423 		pdev->bpr_enable = CDP_BPR_ENABLE;
7424 		if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable &&
7425 		    !pdev->mcopy_mode && !pdev->pktlog_ppdu_stats) {
7426 			dp_h2t_cfg_stats_msg_send(pdev,
7427 						  DP_PPDU_STATS_CFG_BPR,
7428 						  pdev->pdev_id);
7429 		} else if (pdev->enhanced_stats_en &&
7430 			   !pdev->tx_sniffer_enable && !pdev->mcopy_mode &&
7431 			   !pdev->pktlog_ppdu_stats) {
7432 			dp_h2t_cfg_stats_msg_send(pdev,
7433 						  DP_PPDU_STATS_CFG_BPR_ENH,
7434 						  pdev->pdev_id);
7435 		} else if (pdev->pktlog_ppdu_stats) {
7436 			dp_h2t_cfg_stats_msg_send(pdev,
7437 						  DP_PPDU_STATS_CFG_BPR_PKTLOG,
7438 						  pdev->pdev_id);
7439 		}
7440 		break;
7441 	default:
7442 		break;
7443 	}
7444 
7445 	return QDF_STATUS_SUCCESS;
7446 }
7447 
7448 /*
7449  * dp_pdev_tid_stats_ingress_inc
7450  * @pdev: pdev handle
7451  * @val: increase in value
7452  *
7453  * Return: void
7454  */
7455 static void
7456 dp_pdev_tid_stats_ingress_inc(struct cdp_pdev *pdev, uint32_t val)
7457 {
7458 	struct dp_pdev *dp_pdev = (struct dp_pdev *)pdev;
7459 
7460 	dp_pdev->stats.tid_stats.ingress_stack += val;
7461 }
7462 
7463 /*
7464  * dp_pdev_tid_stats_osif_drop
7465  * @pdev: pdev handle
7466  * @val: increase in value
7467  *
7468  * Return: void
7469  */
7470 static void
7471 dp_pdev_tid_stats_osif_drop(struct cdp_pdev *pdev, uint32_t val)
7472 {
7473 	struct dp_pdev *dp_pdev = (struct dp_pdev *)pdev;
7474 
7475 	dp_pdev->stats.tid_stats.osif_drop += val;
7476 }
7477 
7478 static inline void
7479 dp_pdev_disable_mcopy_code(struct dp_pdev *pdev)
7480 {
7481 	pdev->mcopy_mode = 0;
7482 	qdf_nbuf_queue_free(&pdev->rx_ppdu_buf_q);
7483 }
7484 
7485 /*
7486  * dp_config_debug_sniffer()- API to enable/disable debug sniffer
7487  * @pdev_handle: DP_PDEV handle
7488  * @val: user provided value
7489  *
7490  * Return: 0 for success. nonzero for failure.
7491  */
7492 static QDF_STATUS
7493 dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
7494 {
7495 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7496 	QDF_STATUS status = QDF_STATUS_SUCCESS;
7497 
7498 	if (pdev->mcopy_mode)
7499 		dp_reset_monitor_mode(pdev_handle);
7500 
7501 	switch (val) {
7502 	case 0:
7503 		pdev->tx_sniffer_enable = 0;
7504 		if (pdev->mcopy_mode)
7505 			dp_pdev_disable_mcopy_code(pdev);
7506 
7507 		pdev->monitor_configured = false;
7508 
7509 		if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
7510 		    !pdev->bpr_enable) {
7511 			dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7512 			dp_ppdu_ring_reset(pdev);
7513 		} else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
7514 			dp_h2t_cfg_stats_msg_send(pdev,
7515 				DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
7516 		} else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
7517 			dp_h2t_cfg_stats_msg_send(pdev,
7518 						  DP_PPDU_STATS_CFG_BPR_ENH,
7519 						  pdev->pdev_id);
7520 		} else {
7521 			dp_h2t_cfg_stats_msg_send(pdev,
7522 						  DP_PPDU_STATS_CFG_BPR,
7523 						  pdev->pdev_id);
7524 		}
7525 		break;
7526 
7527 	case 1:
7528 		pdev->tx_sniffer_enable = 1;
7529 		if (pdev->mcopy_mode)
7530 			dp_pdev_disable_mcopy_code(pdev);
7531 		pdev->monitor_configured = false;
7532 
7533 		if (!pdev->pktlog_ppdu_stats)
7534 			dp_h2t_cfg_stats_msg_send(pdev,
7535 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
7536 		break;
7537 	case 2:
7538 		if (pdev->monitor_vdev) {
7539 			status = QDF_STATUS_E_RESOURCES;
7540 			break;
7541 		}
7542 
7543 		pdev->mcopy_mode = 1;
7544 		dp_pdev_configure_monitor_rings(pdev);
7545 		pdev->monitor_configured = true;
7546 		pdev->tx_sniffer_enable = 0;
7547 
7548 		if (!pdev->pktlog_ppdu_stats)
7549 			dp_h2t_cfg_stats_msg_send(pdev,
7550 				DP_PPDU_STATS_CFG_SNIFFER, pdev->pdev_id);
7551 		break;
7552 
7553 	default:
7554 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7555 			"Invalid value");
7556 		break;
7557 	}
7558 	return status;
7559 }
7560 
7561 /*
7562  * dp_enable_enhanced_stats()- API to enable enhanced statistcs
7563  * @pdev_handle: DP_PDEV handle
7564  *
7565  * Return: void
7566  */
7567 static void
7568 dp_enable_enhanced_stats(struct cdp_pdev *pdev_handle)
7569 {
7570 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7571 
7572 	if (pdev->enhanced_stats_en == 0)
7573 		dp_cal_client_timer_start(pdev->cal_client_ctx);
7574 
7575 	pdev->enhanced_stats_en = 1;
7576 
7577 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
7578 	    !pdev->monitor_vdev)
7579 		dp_ppdu_ring_cfg(pdev);
7580 
7581 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
7582 		dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS, pdev->pdev_id);
7583 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7584 		dp_h2t_cfg_stats_msg_send(pdev,
7585 					  DP_PPDU_STATS_CFG_BPR_ENH,
7586 					  pdev->pdev_id);
7587 	}
7588 }
7589 
7590 /*
7591  * dp_disable_enhanced_stats()- API to disable enhanced statistcs
7592  * @pdev_handle: DP_PDEV handle
7593  *
7594  * Return: void
7595  */
7596 static void
7597 dp_disable_enhanced_stats(struct cdp_pdev *pdev_handle)
7598 {
7599 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7600 
7601 	if (pdev->enhanced_stats_en == 1)
7602 		dp_cal_client_timer_stop(pdev->cal_client_ctx);
7603 
7604 	pdev->enhanced_stats_en = 0;
7605 
7606 	if (is_ppdu_txrx_capture_enabled(pdev) && !pdev->bpr_enable) {
7607 		dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
7608 	} else if (is_ppdu_txrx_capture_enabled(pdev) && pdev->bpr_enable) {
7609 		dp_h2t_cfg_stats_msg_send(pdev,
7610 					  DP_PPDU_STATS_CFG_BPR,
7611 					  pdev->pdev_id);
7612 	}
7613 
7614 	if (!pdev->mcopy_mode && !pdev->neighbour_peers_added &&
7615 	    !pdev->monitor_vdev)
7616 		dp_ppdu_ring_reset(pdev);
7617 }
7618 
7619 /*
7620  * dp_get_fw_peer_stats()- function to print peer stats
7621  * @pdev_handle: DP_PDEV handle
7622  * @mac_addr: mac address of the peer
7623  * @cap: Type of htt stats requested
7624  * @is_wait: if set, wait on completion from firmware response
7625  *
7626  * Currently Supporting only MAC ID based requests Only
7627  *	1: HTT_PEER_STATS_REQ_MODE_NO_QUERY
7628  *	2: HTT_PEER_STATS_REQ_MODE_QUERY_TQM
7629  *	3: HTT_PEER_STATS_REQ_MODE_FLUSH_TQM
7630  *
7631  * Return: void
7632  */
7633 static void
7634 dp_get_fw_peer_stats(struct cdp_pdev *pdev_handle, uint8_t *mac_addr,
7635 		uint32_t cap, uint32_t is_wait)
7636 {
7637 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7638 	int i;
7639 	uint32_t config_param0 = 0;
7640 	uint32_t config_param1 = 0;
7641 	uint32_t config_param2 = 0;
7642 	uint32_t config_param3 = 0;
7643 
7644 	HTT_DBG_EXT_STATS_PEER_INFO_IS_MAC_ADDR_SET(config_param0, 1);
7645 	config_param0 |= (1 << (cap + 1));
7646 
7647 	for (i = 0; i < HTT_PEER_STATS_MAX_TLV; i++) {
7648 		config_param1 |= (1 << i);
7649 	}
7650 
7651 	config_param2 |= (mac_addr[0] & 0x000000ff);
7652 	config_param2 |= ((mac_addr[1] << 8) & 0x0000ff00);
7653 	config_param2 |= ((mac_addr[2] << 16) & 0x00ff0000);
7654 	config_param2 |= ((mac_addr[3] << 24) & 0xff000000);
7655 
7656 	config_param3 |= (mac_addr[4] & 0x000000ff);
7657 	config_param3 |= ((mac_addr[5] << 8) & 0x0000ff00);
7658 
7659 	if (is_wait) {
7660 		qdf_event_reset(&pdev->fw_peer_stats_event);
7661 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
7662 					  config_param0, config_param1,
7663 					  config_param2, config_param3,
7664 					  0, 1, 0);
7665 		qdf_wait_single_event(&pdev->fw_peer_stats_event,
7666 				      DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC);
7667 	} else {
7668 		dp_h2t_ext_stats_msg_send(pdev, HTT_DBG_EXT_STATS_PEER_INFO,
7669 					  config_param0, config_param1,
7670 					  config_param2, config_param3,
7671 					  0, 0, 0);
7672 	}
7673 
7674 }
7675 
7676 /* This struct definition will be removed from here
7677  * once it get added in FW headers*/
7678 struct httstats_cmd_req {
7679     uint32_t    config_param0;
7680     uint32_t    config_param1;
7681     uint32_t    config_param2;
7682     uint32_t    config_param3;
7683     int cookie;
7684     u_int8_t    stats_id;
7685 };
7686 
7687 /*
7688  * dp_get_htt_stats: function to process the httstas request
7689  * @pdev_handle: DP pdev handle
7690  * @data: pointer to request data
7691  * @data_len: length for request data
7692  *
7693  * return: void
7694  */
7695 static void
7696 dp_get_htt_stats(struct cdp_pdev *pdev_handle, void *data, uint32_t data_len)
7697 {
7698 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7699 	struct httstats_cmd_req *req = (struct httstats_cmd_req *)data;
7700 
7701 	QDF_ASSERT(data_len == sizeof(struct httstats_cmd_req));
7702 	dp_h2t_ext_stats_msg_send(pdev, req->stats_id,
7703 				req->config_param0, req->config_param1,
7704 				req->config_param2, req->config_param3,
7705 				req->cookie, 0, 0);
7706 }
7707 
7708 /*
7709  * dp_set_pdev_param: function to set parameters in pdev
7710  * @pdev_handle: DP pdev handle
7711  * @param: parameter type to be set
7712  * @val: value of parameter to be set
7713  *
7714  * Return: 0 for success. nonzero for failure.
7715  */
7716 static QDF_STATUS dp_set_pdev_param(struct cdp_pdev *pdev_handle,
7717 				    enum cdp_pdev_param_type param,
7718 				    uint32_t val)
7719 {
7720 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7721 	switch (param) {
7722 	case CDP_CONFIG_DEBUG_SNIFFER:
7723 		return dp_config_debug_sniffer(pdev_handle, val);
7724 	case CDP_CONFIG_BPR_ENABLE:
7725 		return dp_set_bpr_enable(pdev_handle, val);
7726 	case CDP_CONFIG_PRIMARY_RADIO:
7727 		pdev->is_primary = val;
7728 		break;
7729 	case CDP_CONFIG_CAPTURE_LATENCY:
7730 		if (val == 1)
7731 			pdev->latency_capture_enable = true;
7732 		else
7733 			pdev->latency_capture_enable = false;
7734 		break;
7735 	case CDP_INGRESS_STATS:
7736 		dp_pdev_tid_stats_ingress_inc(pdev_handle, val);
7737 		break;
7738 	case CDP_OSIF_DROP:
7739 		dp_pdev_tid_stats_osif_drop(pdev_handle, val);
7740 		break;
7741 	case CDP_CONFIG_ENH_RX_CAPTURE:
7742 		return dp_config_enh_rx_capture(pdev_handle, val);
7743 	case CDP_CONFIG_TX_CAPTURE:
7744 		return dp_config_enh_tx_capture(pdev_handle, val);
7745 	default:
7746 		return QDF_STATUS_E_INVAL;
7747 	}
7748 	return QDF_STATUS_SUCCESS;
7749 }
7750 
7751 /*
7752  * dp_calculate_delay_stats: function to get rx delay stats
7753  * @vdev_handle: DP vdev handle
7754  * @nbuf: skb
7755  *
7756  * Return: void
7757  */
7758 static void dp_calculate_delay_stats(struct cdp_vdev *vdev_handle,
7759 				     qdf_nbuf_t nbuf)
7760 {
7761 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7762 
7763 	dp_rx_compute_delay(vdev, nbuf);
7764 }
7765 
7766 /*
7767  * dp_get_vdev_param: function to get parameters from vdev
7768  * @param: parameter type to get value
7769  *
7770  * return: void
7771  */
7772 static uint32_t dp_get_vdev_param(struct cdp_vdev *vdev_handle,
7773 				  enum cdp_vdev_param_type param)
7774 {
7775 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7776 	uint32_t val;
7777 
7778 	switch (param) {
7779 	case CDP_ENABLE_WDS:
7780 		val = vdev->wds_enabled;
7781 		break;
7782 	case CDP_ENABLE_MEC:
7783 		val = vdev->mec_enabled;
7784 		break;
7785 	case CDP_ENABLE_DA_WAR:
7786 		val = vdev->pdev->soc->da_war_enabled;
7787 		break;
7788 	default:
7789 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7790 			  "param value %d is wrong\n",
7791 			  param);
7792 		val = -1;
7793 		break;
7794 	}
7795 
7796 	return val;
7797 }
7798 
7799 /*
7800  * dp_set_vdev_param: function to set parameters in vdev
7801  * @param: parameter type to be set
7802  * @val: value of parameter to be set
7803  *
7804  * return: void
7805  */
7806 static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
7807 		enum cdp_vdev_param_type param, uint32_t val)
7808 {
7809 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7810 	switch (param) {
7811 	case CDP_ENABLE_WDS:
7812 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7813 			  "wds_enable %d for vdev(%pK) id(%d)\n",
7814 			  val, vdev, vdev->vdev_id);
7815 		vdev->wds_enabled = val;
7816 		break;
7817 	case CDP_ENABLE_MEC:
7818 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7819 			  "mec_enable %d for vdev(%pK) id(%d)\n",
7820 			  val, vdev, vdev->vdev_id);
7821 		vdev->mec_enabled = val;
7822 		break;
7823 	case CDP_ENABLE_DA_WAR:
7824 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
7825 			  "da_war_enable %d for vdev(%pK) id(%d)\n",
7826 			  val, vdev, vdev->vdev_id);
7827 		vdev->pdev->soc->da_war_enabled = val;
7828 		dp_wds_flush_ast_table_wifi3(((struct cdp_soc_t *)
7829 					     vdev->pdev->soc));
7830 		break;
7831 	case CDP_ENABLE_NAWDS:
7832 		vdev->nawds_enabled = val;
7833 		break;
7834 	case CDP_ENABLE_MCAST_EN:
7835 		vdev->mcast_enhancement_en = val;
7836 		break;
7837 	case CDP_ENABLE_PROXYSTA:
7838 		vdev->proxysta_vdev = val;
7839 		break;
7840 	case CDP_UPDATE_TDLS_FLAGS:
7841 		vdev->tdls_link_connected = val;
7842 		break;
7843 	case CDP_CFG_WDS_AGING_TIMER:
7844 		if (val == 0)
7845 			qdf_timer_stop(&vdev->pdev->soc->ast_aging_timer);
7846 		else if (val != vdev->wds_aging_timer_val)
7847 			qdf_timer_mod(&vdev->pdev->soc->ast_aging_timer, val);
7848 
7849 		vdev->wds_aging_timer_val = val;
7850 		break;
7851 	case CDP_ENABLE_AP_BRIDGE:
7852 		if (wlan_op_mode_sta != vdev->opmode)
7853 			vdev->ap_bridge_enabled = val;
7854 		else
7855 			vdev->ap_bridge_enabled = false;
7856 		break;
7857 	case CDP_ENABLE_CIPHER:
7858 		vdev->sec_type = val;
7859 		break;
7860 	case CDP_ENABLE_QWRAP_ISOLATION:
7861 		vdev->isolation_vdev = val;
7862 		break;
7863 	case CDP_UPDATE_MULTIPASS:
7864 		vdev->multipass_en = val;
7865 		break;
7866 	default:
7867 		break;
7868 	}
7869 
7870 	dp_tx_vdev_update_search_flags(vdev);
7871 }
7872 
7873 /**
7874  * dp_peer_set_nawds: set nawds bit in peer
7875  * @peer_handle: pointer to peer
7876  * @value: enable/disable nawds
7877  *
7878  * return: void
7879  */
7880 static void dp_peer_set_nawds(struct cdp_peer *peer_handle, uint8_t value)
7881 {
7882 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
7883 	peer->nawds_enabled = value;
7884 }
7885 
7886 /**
7887  * dp_peer_set_tx_capture_enabled: Set tx_cap_enabled bit in peer
7888  * @peer_handle: Peer handle
7889  * @value: Enable/disable setting for tx_cap_enabled
7890  *
7891  * Return: None
7892  */
7893 static void
7894 dp_peer_set_tx_capture_enabled(struct cdp_peer *peer_handle, bool value)
7895 {
7896 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
7897 
7898 	peer->tx_cap_enabled = value;
7899 }
7900 
7901 /**
7902  * dp_peer_set_rx_capture_enabled: Set rx_cap_enabled bit in peer
7903  * @peer_handle: Peer handle
7904  * @value: Enable/disable setting for rx_cap_enabled
7905  *
7906  * Return: None
7907  */
7908 static void
7909 dp_peer_set_rx_capture_enabled(struct cdp_peer *peer_handle, bool value)
7910 {
7911 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
7912 
7913 	peer->rx_cap_enabled = value;
7914 }
7915 
7916 /**
7917  * dp_peer_update_pkt_capture_params: Set Rx & Tx Capture flags for a peer
7918  * @is_rx_pkt_cap_enable: enable/disable Rx packet capture in monitor mode
7919  * @is_tx_pkt_cap_enable: enable/disable Tx packet capture in monitor mode
7920  * @peer_mac: MAC address for which the above need to be enabled/disabled
7921  *
7922  * Return: Success if Rx & Tx capture is enabled for peer, false otherwise
7923  */
7924 QDF_STATUS
7925 dp_peer_update_pkt_capture_params(struct cdp_pdev *pdev,
7926 				  bool is_rx_pkt_cap_enable,
7927 				  bool is_tx_pkt_cap_enable,
7928 				  uint8_t *peer_mac)
7929 
7930 {
7931 	struct dp_peer *peer;
7932 	uint8_t local_id;
7933 
7934 	peer = (struct dp_peer *)dp_find_peer_by_addr(pdev,
7935 			peer_mac, &local_id);
7936 
7937 	if (!peer) {
7938 		dp_err("Invalid Peer");
7939 		return QDF_STATUS_E_FAILURE;
7940 	}
7941 
7942 	dp_peer_set_rx_capture_enabled((struct cdp_peer *)peer,
7943 				       is_rx_pkt_cap_enable);
7944 	dp_peer_set_tx_capture_enabled((struct cdp_peer *)peer,
7945 				       is_tx_pkt_cap_enable);
7946 	return QDF_STATUS_SUCCESS;
7947 }
7948 
7949 /*
7950  * dp_set_vdev_dscp_tid_map_wifi3(): Update Map ID selected for particular vdev
7951  * @vdev_handle: DP_VDEV handle
7952  * @map_id:ID of map that needs to be updated
7953  *
7954  * Return: void
7955  */
7956 static void dp_set_vdev_dscp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
7957 		uint8_t map_id)
7958 {
7959 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
7960 	vdev->dscp_tid_map_id = map_id;
7961 	return;
7962 }
7963 
7964 #ifdef DP_RATETABLE_SUPPORT
7965 static int dp_txrx_get_ratekbps(int preamb, int mcs,
7966 				int htflag, int gintval)
7967 {
7968 	uint32_t rix;
7969 	uint16_t ratecode;
7970 
7971 	return dp_getrateindex((uint32_t)gintval, (uint16_t)mcs, 1,
7972 			       (uint8_t)preamb, 1, &rix, &ratecode);
7973 }
7974 #else
7975 static int dp_txrx_get_ratekbps(int preamb, int mcs,
7976 				int htflag, int gintval)
7977 {
7978 	return 0;
7979 }
7980 #endif
7981 
7982 /* dp_txrx_get_pdev_stats - Returns cdp_pdev_stats
7983  * @peer_handle: DP pdev handle
7984  *
7985  * return : cdp_pdev_stats pointer
7986  */
7987 static struct cdp_pdev_stats*
7988 dp_txrx_get_pdev_stats(struct cdp_pdev *pdev_handle)
7989 {
7990 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
7991 
7992 	dp_aggregate_pdev_stats(pdev);
7993 
7994 	return &pdev->stats;
7995 }
7996 
7997 /* dp_txrx_update_vdev_me_stats(): Update vdev ME stats sent from CDP
7998  * @vdev_handle: DP vdev handle
7999  * @buf: buffer containing specific stats structure
8000  *
8001  * Returns: void
8002  */
8003 static void dp_txrx_update_vdev_me_stats(struct cdp_vdev *vdev_handle,
8004 					 void *buf)
8005 {
8006 	struct dp_vdev *vdev = NULL;
8007 	struct cdp_tx_ingress_stats *host_stats = NULL;
8008 
8009 	if (!vdev_handle) {
8010 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8011 			  "Invalid vdev handle");
8012 		return;
8013 	}
8014 	vdev = (struct dp_vdev *)vdev_handle;
8015 
8016 	if (!buf) {
8017 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8018 			  "Invalid host stats buf");
8019 		return;
8020 	}
8021 	host_stats = (struct cdp_tx_ingress_stats *)buf;
8022 
8023 	DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt,
8024 			 host_stats->mcast_en.mcast_pkt.num,
8025 			 host_stats->mcast_en.mcast_pkt.bytes);
8026 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error,
8027 		     host_stats->mcast_en.dropped_map_error);
8028 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac,
8029 		     host_stats->mcast_en.dropped_self_mac);
8030 	DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail,
8031 		     host_stats->mcast_en.dropped_send_fail);
8032 	DP_STATS_INC(vdev, tx_i.mcast_en.ucast,
8033 		     host_stats->mcast_en.ucast);
8034 	DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc,
8035 		     host_stats->mcast_en.fail_seg_alloc);
8036 	DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail,
8037 		     host_stats->mcast_en.clone_fail);
8038 }
8039 
8040 /* dp_txrx_update_vdev_host_stats(): Update stats sent through CDP
8041  * @vdev_handle: DP vdev handle
8042  * @buf: buffer containing specific stats structure
8043  * @stats_id: stats type
8044  *
8045  * Returns: void
8046  */
8047 static void dp_txrx_update_vdev_host_stats(struct cdp_vdev *vdev_handle,
8048 					   void *buf,
8049 					   uint16_t stats_id)
8050 {
8051 	switch (stats_id) {
8052 	case DP_VDEV_STATS_PKT_CNT_ONLY:
8053 		break;
8054 	case DP_VDEV_STATS_TX_ME:
8055 		dp_txrx_update_vdev_me_stats(vdev_handle, buf);
8056 		break;
8057 	default:
8058 		qdf_info("Invalid stats_id %d", stats_id);
8059 		break;
8060 	}
8061 }
8062 
8063 /* dp_txrx_get_peer_stats - will return cdp_peer_stats
8064  * @peer_handle: DP_PEER handle
8065  *
8066  * return : cdp_peer_stats pointer
8067  */
8068 static struct cdp_peer_stats*
8069 		dp_txrx_get_peer_stats(struct cdp_peer *peer_handle)
8070 {
8071 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
8072 
8073 	qdf_assert(peer);
8074 
8075 	return &peer->stats;
8076 }
8077 
8078 /* dp_txrx_reset_peer_stats - reset cdp_peer_stats for particular peer
8079  * @peer_handle: DP_PEER handle
8080  *
8081  * return : void
8082  */
8083 static void dp_txrx_reset_peer_stats(struct cdp_peer *peer_handle)
8084 {
8085 	struct dp_peer *peer = (struct dp_peer *)peer_handle;
8086 
8087 	qdf_assert(peer);
8088 
8089 	qdf_mem_zero(&peer->stats, sizeof(peer->stats));
8090 }
8091 
8092 /* dp_txrx_get_vdev_stats - Update buffer with cdp_vdev_stats
8093  * @vdev_handle: DP_VDEV handle
8094  * @buf: buffer for vdev stats
8095  *
8096  * return : int
8097  */
8098 static int  dp_txrx_get_vdev_stats(struct cdp_vdev *vdev_handle, void *buf,
8099 				   bool is_aggregate)
8100 {
8101 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8102 	struct cdp_vdev_stats *vdev_stats;
8103 	struct dp_pdev *pdev;
8104 	struct dp_soc *soc;
8105 
8106 	if (!vdev)
8107 		return 1;
8108 
8109 	pdev = vdev->pdev;
8110 	if (!pdev)
8111 		return 1;
8112 
8113 	soc = pdev->soc;
8114 	vdev_stats = (struct cdp_vdev_stats *)buf;
8115 
8116 	if (is_aggregate) {
8117 		qdf_spin_lock_bh(&soc->peer_ref_mutex);
8118 		dp_aggregate_vdev_stats(vdev, buf);
8119 		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
8120 	} else {
8121 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
8122 	}
8123 
8124 	return 0;
8125 }
8126 
8127 /*
8128  * dp_get_total_per(): get total per
8129  * @pdev_handle: DP_PDEV handle
8130  *
8131  * Return: % error rate using retries per packet and success packets
8132  */
8133 static int dp_get_total_per(struct cdp_pdev *pdev_handle)
8134 {
8135 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8136 
8137 	dp_aggregate_pdev_stats(pdev);
8138 	if ((pdev->stats.tx.tx_success.num + pdev->stats.tx.retries) == 0)
8139 		return 0;
8140 	return ((pdev->stats.tx.retries * 100) /
8141 		((pdev->stats.tx.tx_success.num) + (pdev->stats.tx.retries)));
8142 }
8143 
8144 /*
8145  * dp_txrx_stats_publish(): publish pdev stats into a buffer
8146  * @pdev_handle: DP_PDEV handle
8147  * @buf: to hold pdev_stats
8148  *
8149  * Return: int
8150  */
8151 static int
8152 dp_txrx_stats_publish(struct cdp_pdev *pdev_handle, struct cdp_stats_extd *buf)
8153 {
8154 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8155 	struct cdp_pdev_stats *buffer = (struct cdp_pdev_stats *) buf;
8156 	struct cdp_txrx_stats_req req = {0,};
8157 
8158 	dp_aggregate_pdev_stats(pdev);
8159 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_TX;
8160 	req.cookie_val = 1;
8161 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
8162 				req.param1, req.param2, req.param3, 0,
8163 				req.cookie_val, 0);
8164 
8165 	msleep(DP_MAX_SLEEP_TIME);
8166 
8167 	req.stats = (enum cdp_stats)HTT_DBG_EXT_STATS_PDEV_RX;
8168 	req.cookie_val = 1;
8169 	dp_h2t_ext_stats_msg_send(pdev, req.stats, req.param0,
8170 				req.param1, req.param2, req.param3, 0,
8171 				req.cookie_val, 0);
8172 
8173 	msleep(DP_MAX_SLEEP_TIME);
8174 	qdf_mem_copy(buffer, &pdev->stats, sizeof(pdev->stats));
8175 
8176 	return TXRX_STATS_LEVEL;
8177 }
8178 
8179 /**
8180  * dp_set_pdev_dscp_tid_map_wifi3(): update dscp tid map in pdev
8181  * @pdev: DP_PDEV handle
8182  * @map_id: ID of map that needs to be updated
8183  * @tos: index value in map
8184  * @tid: tid value passed by the user
8185  *
8186  * Return: void
8187  */
8188 static void dp_set_pdev_dscp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
8189 		uint8_t map_id, uint8_t tos, uint8_t tid)
8190 {
8191 	uint8_t dscp;
8192 	struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
8193 	struct dp_soc *soc = pdev->soc;
8194 
8195 	if (!soc)
8196 		return;
8197 
8198 	dscp = (tos >> DP_IP_DSCP_SHIFT) & DP_IP_DSCP_MASK;
8199 	pdev->dscp_tid_map[map_id][dscp] = tid;
8200 
8201 	if (map_id < soc->num_hw_dscp_tid_map)
8202 		hal_tx_update_dscp_tid(soc->hal_soc, tid,
8203 				       map_id, dscp);
8204 	return;
8205 }
8206 
8207 /**
8208  * dp_hmmc_tid_override_en_wifi3(): Function to enable hmmc tid override.
8209  * @pdev_handle: pdev handle
8210  * @val: hmmc-dscp flag value
8211  *
8212  * Return: void
8213  */
8214 static void dp_hmmc_tid_override_en_wifi3(struct cdp_pdev *pdev_handle,
8215 					  bool val)
8216 {
8217 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8218 
8219 	pdev->hmmc_tid_override_en = val;
8220 }
8221 
8222 /**
8223  * dp_set_hmmc_tid_val_wifi3(): Function to set hmmc tid value.
8224  * @pdev_handle: pdev handle
8225  * @tid: tid value
8226  *
8227  * Return: void
8228  */
8229 static void dp_set_hmmc_tid_val_wifi3(struct cdp_pdev *pdev_handle,
8230 				      uint8_t tid)
8231 {
8232 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8233 
8234 	pdev->hmmc_tid = tid;
8235 }
8236 
8237 /**
8238  * dp_fw_stats_process(): Process TxRX FW stats request
8239  * @vdev_handle: DP VDEV handle
8240  * @req: stats request
8241  *
8242  * return: int
8243  */
8244 static int dp_fw_stats_process(struct cdp_vdev *vdev_handle,
8245 		struct cdp_txrx_stats_req *req)
8246 {
8247 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8248 	struct dp_pdev *pdev = NULL;
8249 	uint32_t stats = req->stats;
8250 	uint8_t mac_id = req->mac_id;
8251 
8252 	if (!vdev) {
8253 		DP_TRACE(NONE, "VDEV not found");
8254 		return 1;
8255 	}
8256 	pdev = vdev->pdev;
8257 
8258 	/*
8259 	 * For HTT_DBG_EXT_STATS_RESET command, FW need to config
8260 	 * from param0 to param3 according to below rule:
8261 	 *
8262 	 * PARAM:
8263 	 *   - config_param0 : start_offset (stats type)
8264 	 *   - config_param1 : stats bmask from start offset
8265 	 *   - config_param2 : stats bmask from start offset + 32
8266 	 *   - config_param3 : stats bmask from start offset + 64
8267 	 */
8268 	if (req->stats == CDP_TXRX_STATS_0) {
8269 		req->param0 = HTT_DBG_EXT_STATS_PDEV_TX;
8270 		req->param1 = 0xFFFFFFFF;
8271 		req->param2 = 0xFFFFFFFF;
8272 		req->param3 = 0xFFFFFFFF;
8273 	} else if (req->stats == (uint8_t)HTT_DBG_EXT_STATS_PDEV_TX_MU) {
8274 		req->param0 = HTT_DBG_EXT_STATS_SET_VDEV_MASK(vdev->vdev_id);
8275 	}
8276 
8277 	return dp_h2t_ext_stats_msg_send(pdev, stats, req->param0,
8278 				req->param1, req->param2, req->param3,
8279 				0, 0, mac_id);
8280 }
8281 
8282 /**
8283  * dp_txrx_stats_request - function to map to firmware and host stats
8284  * @vdev: virtual handle
8285  * @req: stats request
8286  *
8287  * Return: QDF_STATUS
8288  */
8289 static
8290 QDF_STATUS dp_txrx_stats_request(struct cdp_vdev *vdev,
8291 				 struct cdp_txrx_stats_req *req)
8292 {
8293 	int host_stats;
8294 	int fw_stats;
8295 	enum cdp_stats stats;
8296 	int num_stats;
8297 
8298 	if (!vdev || !req) {
8299 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8300 				"Invalid vdev/req instance");
8301 		return QDF_STATUS_E_INVAL;
8302 	}
8303 
8304 	if (req->mac_id >= WLAN_CFG_MAC_PER_TARGET) {
8305 		dp_err("Invalid mac id request");
8306 		return QDF_STATUS_E_INVAL;
8307 	}
8308 
8309 	stats = req->stats;
8310 	if (stats >= CDP_TXRX_MAX_STATS)
8311 		return QDF_STATUS_E_INVAL;
8312 
8313 	/*
8314 	 * DP_CURR_FW_STATS_AVAIL: no of FW stats currently available
8315 	 *			has to be updated if new FW HTT stats added
8316 	 */
8317 	if (stats > CDP_TXRX_STATS_HTT_MAX)
8318 		stats = stats + DP_CURR_FW_STATS_AVAIL - DP_HTT_DBG_EXT_STATS_MAX;
8319 
8320 	num_stats  = QDF_ARRAY_SIZE(dp_stats_mapping_table);
8321 
8322 	if (stats >= num_stats) {
8323 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8324 			  "%s: Invalid stats option: %d", __func__, stats);
8325 		return QDF_STATUS_E_INVAL;
8326 	}
8327 
8328 	req->stats = stats;
8329 	fw_stats = dp_stats_mapping_table[stats][STATS_FW];
8330 	host_stats = dp_stats_mapping_table[stats][STATS_HOST];
8331 
8332 	dp_info("stats: %u fw_stats_type: %d host_stats: %d",
8333 		stats, fw_stats, host_stats);
8334 
8335 	if (fw_stats != TXRX_FW_STATS_INVALID) {
8336 		/* update request with FW stats type */
8337 		req->stats = fw_stats;
8338 		return dp_fw_stats_process(vdev, req);
8339 	}
8340 
8341 	if ((host_stats != TXRX_HOST_STATS_INVALID) &&
8342 			(host_stats <= TXRX_HOST_STATS_MAX))
8343 		return dp_print_host_stats(vdev, req);
8344 	else
8345 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
8346 				"Wrong Input for TxRx Stats");
8347 
8348 	return QDF_STATUS_SUCCESS;
8349 }
8350 
8351 /*
8352  * dp_txrx_dump_stats() -  Dump statistics
8353  * @value - Statistics option
8354  */
8355 static QDF_STATUS dp_txrx_dump_stats(void *psoc, uint16_t value,
8356 				     enum qdf_stats_verbosity_level level)
8357 {
8358 	struct dp_soc *soc =
8359 		(struct dp_soc *)psoc;
8360 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8361 
8362 	if (!soc) {
8363 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8364 			"%s: soc is NULL", __func__);
8365 		return QDF_STATUS_E_INVAL;
8366 	}
8367 
8368 	switch (value) {
8369 	case CDP_TXRX_PATH_STATS:
8370 		dp_txrx_path_stats(soc);
8371 		dp_print_soc_interrupt_stats(soc);
8372 		break;
8373 
8374 	case CDP_RX_RING_STATS:
8375 		dp_print_per_ring_stats(soc);
8376 		break;
8377 
8378 	case CDP_TXRX_TSO_STATS:
8379 		dp_print_tso_stats(soc, level);
8380 		break;
8381 
8382 	case CDP_DUMP_TX_FLOW_POOL_INFO:
8383 		if (level == QDF_STATS_VERBOSITY_LEVEL_HIGH)
8384 			cdp_dump_flow_pool_info((struct cdp_soc_t *)soc);
8385 		break;
8386 
8387 	case CDP_DP_NAPI_STATS:
8388 		dp_print_napi_stats(soc);
8389 		break;
8390 
8391 	case CDP_TXRX_DESC_STATS:
8392 		/* TODO: NOT IMPLEMENTED */
8393 		break;
8394 
8395 	default:
8396 		status = QDF_STATUS_E_INVAL;
8397 		break;
8398 	}
8399 
8400 	return status;
8401 
8402 }
8403 
8404 /**
8405  * dp_txrx_clear_dump_stats() - clear dumpStats
8406  * @soc- soc handle
8407  * @value - stats option
8408  *
8409  * Return: 0 - Success, non-zero - failure
8410  */
8411 static
8412 QDF_STATUS dp_txrx_clear_dump_stats(struct cdp_soc *psoc, uint8_t value)
8413 {
8414 	struct dp_soc *soc =
8415 		(struct dp_soc *)psoc;
8416 	QDF_STATUS status = QDF_STATUS_SUCCESS;
8417 
8418 	if (!soc) {
8419 		dp_err("%s: soc is NULL", __func__);
8420 		return QDF_STATUS_E_INVAL;
8421 	}
8422 
8423 	switch (value) {
8424 	case CDP_TXRX_TSO_STATS:
8425 		dp_txrx_clear_tso_stats(soc);
8426 		break;
8427 
8428 	default:
8429 		status = QDF_STATUS_E_INVAL;
8430 		break;
8431 	}
8432 
8433 	return status;
8434 }
8435 
8436 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
8437 /**
8438  * dp_update_flow_control_parameters() - API to store datapath
8439  *                            config parameters
8440  * @soc: soc handle
8441  * @cfg: ini parameter handle
8442  *
8443  * Return: void
8444  */
8445 static inline
8446 void dp_update_flow_control_parameters(struct dp_soc *soc,
8447 				struct cdp_config_params *params)
8448 {
8449 	soc->wlan_cfg_ctx->tx_flow_stop_queue_threshold =
8450 					params->tx_flow_stop_queue_threshold;
8451 	soc->wlan_cfg_ctx->tx_flow_start_queue_offset =
8452 					params->tx_flow_start_queue_offset;
8453 }
8454 #else
8455 static inline
8456 void dp_update_flow_control_parameters(struct dp_soc *soc,
8457 				struct cdp_config_params *params)
8458 {
8459 }
8460 #endif
8461 
8462 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
8463 /* Max packet limit for TX Comp packet loop (dp_tx_comp_handler) */
8464 #define DP_TX_COMP_LOOP_PKT_LIMIT_MAX 1024
8465 
8466 /* Max packet limit for RX REAP Loop (dp_rx_process) */
8467 #define DP_RX_REAP_LOOP_PKT_LIMIT_MAX 1024
8468 
8469 static
8470 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
8471 					struct cdp_config_params *params)
8472 {
8473 	soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit =
8474 				params->tx_comp_loop_pkt_limit;
8475 
8476 	if (params->tx_comp_loop_pkt_limit < DP_TX_COMP_LOOP_PKT_LIMIT_MAX)
8477 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = true;
8478 	else
8479 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check = false;
8480 
8481 	soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit =
8482 				params->rx_reap_loop_pkt_limit;
8483 
8484 	if (params->rx_reap_loop_pkt_limit < DP_RX_REAP_LOOP_PKT_LIMIT_MAX)
8485 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = true;
8486 	else
8487 		soc->wlan_cfg_ctx->rx_enable_eol_data_check = false;
8488 
8489 	soc->wlan_cfg_ctx->rx_hp_oos_update_limit =
8490 				params->rx_hp_oos_update_limit;
8491 
8492 	dp_info("tx_comp_loop_pkt_limit %u tx_comp_enable_eol_data_check %u rx_reap_loop_pkt_limit %u rx_enable_eol_data_check %u rx_hp_oos_update_limit %u",
8493 		soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit,
8494 		soc->wlan_cfg_ctx->tx_comp_enable_eol_data_check,
8495 		soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit,
8496 		soc->wlan_cfg_ctx->rx_enable_eol_data_check,
8497 		soc->wlan_cfg_ctx->rx_hp_oos_update_limit);
8498 }
8499 #else
8500 static inline
8501 void dp_update_rx_soft_irq_limit_params(struct dp_soc *soc,
8502 					struct cdp_config_params *params)
8503 { }
8504 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
8505 
8506 /**
8507  * dp_update_config_parameters() - API to store datapath
8508  *                            config parameters
8509  * @soc: soc handle
8510  * @cfg: ini parameter handle
8511  *
8512  * Return: status
8513  */
8514 static
8515 QDF_STATUS dp_update_config_parameters(struct cdp_soc *psoc,
8516 				struct cdp_config_params *params)
8517 {
8518 	struct dp_soc *soc = (struct dp_soc *)psoc;
8519 
8520 	if (!(soc)) {
8521 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
8522 				"%s: Invalid handle", __func__);
8523 		return QDF_STATUS_E_INVAL;
8524 	}
8525 
8526 	soc->wlan_cfg_ctx->tso_enabled = params->tso_enable;
8527 	soc->wlan_cfg_ctx->lro_enabled = params->lro_enable;
8528 	soc->wlan_cfg_ctx->rx_hash = params->flow_steering_enable;
8529 	soc->wlan_cfg_ctx->tcp_udp_checksumoffload =
8530 				params->tcp_udp_checksumoffload;
8531 	soc->wlan_cfg_ctx->napi_enabled = params->napi_enable;
8532 	soc->wlan_cfg_ctx->ipa_enabled = params->ipa_enable;
8533 	soc->wlan_cfg_ctx->gro_enabled = params->gro_enable;
8534 
8535 	dp_update_rx_soft_irq_limit_params(soc, params);
8536 	dp_update_flow_control_parameters(soc, params);
8537 
8538 	return QDF_STATUS_SUCCESS;
8539 }
8540 
8541 static struct cdp_wds_ops dp_ops_wds = {
8542 	.vdev_set_wds = dp_vdev_set_wds,
8543 #ifdef WDS_VENDOR_EXTENSION
8544 	.txrx_set_wds_rx_policy = dp_txrx_set_wds_rx_policy,
8545 	.txrx_wds_peer_tx_policy_update = dp_txrx_peer_wds_tx_policy_update,
8546 #endif
8547 };
8548 
8549 /*
8550  * dp_txrx_data_tx_cb_set(): set the callback for non standard tx
8551  * @vdev_handle - datapath vdev handle
8552  * @callback - callback function
8553  * @ctxt: callback context
8554  *
8555  */
8556 static void
8557 dp_txrx_data_tx_cb_set(struct cdp_vdev *vdev_handle,
8558 		       ol_txrx_data_tx_cb callback, void *ctxt)
8559 {
8560 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8561 
8562 	vdev->tx_non_std_data_callback.func = callback;
8563 	vdev->tx_non_std_data_callback.ctxt = ctxt;
8564 }
8565 
8566 /**
8567  * dp_pdev_get_dp_txrx_handle() - get dp handle from pdev
8568  * @pdev_hdl: datapath pdev handle
8569  *
8570  * Return: opaque pointer to dp txrx handle
8571  */
8572 static void *dp_pdev_get_dp_txrx_handle(struct cdp_pdev *pdev_hdl)
8573 {
8574 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
8575 
8576 	return pdev->dp_txrx_handle;
8577 }
8578 
8579 /**
8580  * dp_pdev_set_dp_txrx_handle() - set dp handle in pdev
8581  * @pdev_hdl: datapath pdev handle
8582  * @dp_txrx_hdl: opaque pointer for dp_txrx_handle
8583  *
8584  * Return: void
8585  */
8586 static void
8587 dp_pdev_set_dp_txrx_handle(struct cdp_pdev *pdev_hdl, void *dp_txrx_hdl)
8588 {
8589 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
8590 
8591 	pdev->dp_txrx_handle = dp_txrx_hdl;
8592 }
8593 
8594 /**
8595  * dp_soc_get_dp_txrx_handle() - get context for external-dp from dp soc
8596  * @soc_handle: datapath soc handle
8597  *
8598  * Return: opaque pointer to external dp (non-core DP)
8599  */
8600 static void *dp_soc_get_dp_txrx_handle(struct cdp_soc *soc_handle)
8601 {
8602 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8603 
8604 	return soc->external_txrx_handle;
8605 }
8606 
8607 /**
8608  * dp_soc_set_dp_txrx_handle() - set external dp handle in soc
8609  * @soc_handle: datapath soc handle
8610  * @txrx_handle: opaque pointer to external dp (non-core DP)
8611  *
8612  * Return: void
8613  */
8614 static void
8615 dp_soc_set_dp_txrx_handle(struct cdp_soc *soc_handle, void *txrx_handle)
8616 {
8617 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8618 
8619 	soc->external_txrx_handle = txrx_handle;
8620 }
8621 
8622 /**
8623  * dp_soc_map_pdev_to_lmac() - Save pdev_id to lmac_id mapping
8624  * @pdev_hdl: datapath pdev handle
8625  * @lmac_id: lmac id
8626  *
8627  * Return: void
8628  */
8629 static void
8630 dp_soc_map_pdev_to_lmac(struct cdp_pdev *pdev_hdl, uint32_t lmac_id)
8631 {
8632 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
8633 	struct dp_soc *soc = pdev->soc;
8634 
8635 	pdev->lmac_id = lmac_id;
8636 	wlan_cfg_set_hw_macid(soc->wlan_cfg_ctx,
8637 			      pdev->pdev_id,
8638 			      (lmac_id + 1));
8639 }
8640 
8641 /**
8642  * dp_soc_set_pdev_status_down() - set pdev down/up status
8643  * @pdev_hdl: datapath pdev handle
8644  * @is_pdev_down: pdev down/up status
8645  *
8646  * Return: void
8647  */
8648 static void
8649 dp_soc_set_pdev_status_down(struct cdp_pdev *pdev_hdl, bool is_pdev_down)
8650 {
8651 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
8652 
8653 	pdev->is_pdev_down = is_pdev_down;
8654 }
8655 
8656 /**
8657  * dp_get_cfg_capabilities() - get dp capabilities
8658  * @soc_handle: datapath soc handle
8659  * @dp_caps: enum for dp capabilities
8660  *
8661  * Return: bool to determine if dp caps is enabled
8662  */
8663 static bool
8664 dp_get_cfg_capabilities(struct cdp_soc_t *soc_handle,
8665 			enum cdp_capabilities dp_caps)
8666 {
8667 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8668 
8669 	return wlan_cfg_get_dp_caps(soc->wlan_cfg_ctx, dp_caps);
8670 }
8671 
8672 #ifdef FEATURE_AST
8673 static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
8674 {
8675 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
8676 	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
8677 	struct dp_soc *soc = (struct dp_soc *)vdev->pdev->soc;
8678 
8679 	/*
8680 	 * For BSS peer, new peer is not created on alloc_node if the
8681 	 * peer with same address already exists , instead refcnt is
8682 	 * increased for existing peer. Correspondingly in delete path,
8683 	 * only refcnt is decreased; and peer is only deleted , when all
8684 	 * references are deleted. So delete_in_progress should not be set
8685 	 * for bss_peer, unless only 2 reference remains (peer map reference
8686 	 * and peer hash table reference).
8687 	 */
8688 	if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2))
8689 		return;
8690 
8691 	qdf_spin_lock_bh(&soc->ast_lock);
8692 	peer->delete_in_progress = true;
8693 	dp_peer_delete_ast_entries(soc, peer);
8694 	qdf_spin_unlock_bh(&soc->ast_lock);
8695 }
8696 #endif
8697 
8698 #ifdef ATH_SUPPORT_NAC_RSSI
8699 /**
8700  * dp_vdev_get_neighbour_rssi(): Store RSSI for configured NAC
8701  * @vdev_hdl: DP vdev handle
8702  * @rssi: rssi value
8703  *
8704  * Return: 0 for success. nonzero for failure.
8705  */
8706 static QDF_STATUS  dp_vdev_get_neighbour_rssi(struct cdp_vdev *vdev_hdl,
8707 					      char *mac_addr,
8708 					      uint8_t *rssi)
8709 {
8710 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
8711 	struct dp_pdev *pdev = vdev->pdev;
8712 	struct dp_neighbour_peer *peer = NULL;
8713 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
8714 
8715 	*rssi = 0;
8716 	qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
8717 	TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
8718 		      neighbour_peer_list_elem) {
8719 		if (qdf_mem_cmp(&peer->neighbour_peers_macaddr.raw[0],
8720 				mac_addr, QDF_MAC_ADDR_SIZE) == 0) {
8721 			*rssi = peer->rssi;
8722 			status = QDF_STATUS_SUCCESS;
8723 			break;
8724 		}
8725 	}
8726 	qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
8727 	return status;
8728 }
8729 
8730 static QDF_STATUS dp_config_for_nac_rssi(struct cdp_vdev *vdev_handle,
8731 		enum cdp_nac_param_cmd cmd, char *bssid, char *client_macaddr,
8732 		uint8_t chan_num)
8733 {
8734 
8735 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
8736 	struct dp_pdev *pdev = (struct dp_pdev *)vdev->pdev;
8737 	struct dp_soc *soc = (struct dp_soc *) vdev->pdev->soc;
8738 
8739 	pdev->nac_rssi_filtering = 1;
8740 	/* Store address of NAC (neighbour peer) which will be checked
8741 	 * against TA of received packets.
8742 	 */
8743 
8744 	if (cmd == CDP_NAC_PARAM_ADD) {
8745 		dp_update_filter_neighbour_peers(vdev_handle, DP_NAC_PARAM_ADD,
8746 						 client_macaddr);
8747 	} else if (cmd == CDP_NAC_PARAM_DEL) {
8748 		dp_update_filter_neighbour_peers(vdev_handle,
8749 						 DP_NAC_PARAM_DEL,
8750 						 client_macaddr);
8751 	}
8752 
8753 	if (soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi)
8754 		soc->cdp_soc.ol_ops->config_bssid_in_fw_for_nac_rssi
8755 			((void *)vdev->pdev->ctrl_pdev,
8756 			 vdev->vdev_id, cmd, bssid, client_macaddr);
8757 
8758 	return QDF_STATUS_SUCCESS;
8759 }
8760 #endif
8761 
8762 /**
8763  * dp_enable_peer_based_pktlog() - Set Flag for peer based filtering
8764  * for pktlog
8765  * @txrx_pdev_handle: cdp_pdev handle
8766  * @enb_dsb: Enable or disable peer based filtering
8767  *
8768  * Return: QDF_STATUS
8769  */
8770 static int
8771 dp_enable_peer_based_pktlog(
8772 	struct cdp_pdev *txrx_pdev_handle,
8773 	char *mac_addr, uint8_t enb_dsb)
8774 {
8775 	struct dp_peer *peer;
8776 	uint8_t local_id;
8777 	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev_handle;
8778 
8779 	peer = (struct dp_peer *)dp_find_peer_by_addr(txrx_pdev_handle,
8780 			mac_addr, &local_id);
8781 
8782 	if (!peer) {
8783 		dp_err("Invalid Peer");
8784 		return QDF_STATUS_E_FAILURE;
8785 	}
8786 
8787 	peer->peer_based_pktlog_filter = enb_dsb;
8788 	pdev->dp_peer_based_pktlog = enb_dsb;
8789 
8790 	return QDF_STATUS_SUCCESS;
8791 }
8792 
8793 #ifndef WLAN_SUPPORT_RX_TAG_STATISTICS
8794 /**
8795  * dp_dump_pdev_rx_protocol_tag_stats - dump the number of packets tagged for
8796  * given protocol type (RX_PROTOCOL_TAG_ALL indicates for all protocol)
8797  * @pdev_handle: cdp_pdev handle
8798  * @protocol_type: protocol type for which stats should be displayed
8799  *
8800  * Return: none
8801  */
8802 static inline void
8803 dp_dump_pdev_rx_protocol_tag_stats(struct cdp_pdev *pdev_handle,
8804 				   uint16_t protocol_type)
8805 {
8806 }
8807 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
8808 
8809 #ifndef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
8810 /**
8811  * dp_update_pdev_rx_protocol_tag - Add/remove a protocol tag that should be
8812  * applied to the desired protocol type packets
8813  * @txrx_pdev_handle: cdp_pdev handle
8814  * @enable_rx_protocol_tag - bitmask that indicates what protocol types
8815  * are enabled for tagging. zero indicates disable feature, non-zero indicates
8816  * enable feature
8817  * @protocol_type: new protocol type for which the tag is being added
8818  * @tag: user configured tag for the new protocol
8819  *
8820  * Return: Success
8821  */
8822 static inline QDF_STATUS
8823 dp_update_pdev_rx_protocol_tag(struct cdp_pdev *pdev_handle,
8824 			       uint32_t enable_rx_protocol_tag,
8825 			       uint16_t protocol_type,
8826 			       uint16_t tag)
8827 {
8828 	return QDF_STATUS_SUCCESS;
8829 }
8830 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
8831 
8832 #ifndef WLAN_SUPPORT_RX_FLOW_TAG
8833 /**
8834  * dp_set_rx_flow_tag - add/delete a flow
8835  * @pdev_handle: cdp_pdev handle
8836  * @flow_info: flow tuple that is to be added to/deleted from flow search table
8837  *
8838  * Return: Success
8839  */
8840 static inline QDF_STATUS
8841 dp_set_rx_flow_tag(struct cdp_pdev *pdev_handle,
8842 		   struct cdp_rx_flow_info *flow_info)
8843 {
8844 	return QDF_STATUS_SUCCESS;
8845 }
8846 
8847 /**
8848  * dp_dump_rx_flow_tag_stats - dump the number of packets tagged for
8849  * given flow 5-tuple
8850  * @pdev_handle: cdp_pdev handle
8851  * @flow_info: flow 5-tuple for which stats should be displayed
8852  *
8853  * Return: Success
8854  */
8855 static inline QDF_STATUS
8856 dp_dump_rx_flow_tag_stats(struct cdp_pdev *pdev_handle,
8857 			  struct cdp_rx_flow_info *flow_info)
8858 {
8859 	return QDF_STATUS_SUCCESS;
8860 }
8861 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
8862 
8863 static QDF_STATUS dp_peer_map_attach_wifi3(struct cdp_soc_t  *soc_hdl,
8864 					   uint32_t max_peers,
8865 					   uint32_t max_ast_index,
8866 					   bool peer_map_unmap_v2)
8867 {
8868 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8869 
8870 	soc->max_peers = max_peers;
8871 
8872 	qdf_print ("%s max_peers %u, max_ast_index: %u\n",
8873 		   __func__, max_peers, max_ast_index);
8874 	wlan_cfg_set_max_ast_idx(soc->wlan_cfg_ctx, max_ast_index);
8875 
8876 	if (dp_peer_find_attach(soc))
8877 		return QDF_STATUS_E_FAILURE;
8878 
8879 	soc->is_peer_map_unmap_v2 = peer_map_unmap_v2;
8880 
8881 	return QDF_STATUS_SUCCESS;
8882 }
8883 
8884 /**
8885  * dp_pdev_set_ctrl_pdev() - set ctrl pdev handle in dp pdev
8886  * @dp_pdev: dp pdev handle
8887  * @ctrl_pdev: UMAC ctrl pdev handle
8888  *
8889  * Return: void
8890  */
8891 static void dp_pdev_set_ctrl_pdev(struct cdp_pdev *dp_pdev,
8892 				  struct cdp_ctrl_objmgr_pdev *ctrl_pdev)
8893 {
8894 	struct dp_pdev *pdev = (struct dp_pdev *)dp_pdev;
8895 
8896 	pdev->ctrl_pdev = ctrl_pdev;
8897 }
8898 
8899 static void dp_set_rate_stats_cap(struct cdp_soc_t *soc_hdl,
8900 				  uint8_t val)
8901 {
8902 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
8903 
8904 	soc->wlanstats_enabled = val;
8905 }
8906 
8907 static void dp_soc_set_rate_stats_ctx(struct cdp_soc_t *soc_handle,
8908 				      void *stats_ctx)
8909 {
8910 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8911 
8912 	soc->rate_stats_ctx = stats_ctx;
8913 }
8914 
8915 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
8916 static void dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
8917 				    struct cdp_pdev *pdev_hdl)
8918 {
8919 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
8920 	struct dp_soc *soc = (struct dp_soc *)pdev->soc;
8921 	struct dp_vdev *vdev = NULL;
8922 	struct dp_peer *peer = NULL;
8923 
8924 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
8925 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
8926 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
8927 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
8928 			if (peer && !peer->bss_peer)
8929 				dp_wdi_event_handler(
8930 					WDI_EVENT_FLUSH_RATE_STATS_REQ,
8931 					pdev->soc, peer->wlanstats_ctx,
8932 					peer->peer_ids[0],
8933 					WDI_NO_VAL, pdev->pdev_id);
8934 		}
8935 	}
8936 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
8937 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
8938 }
8939 #else
8940 static inline void
8941 dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
8942 			struct cdp_pdev *pdev_hdl)
8943 {
8944 }
8945 #endif
8946 
8947 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
8948 static void dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
8949 				     struct cdp_pdev *pdev_handle,
8950 				     void *buf)
8951 {
8952 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
8953 
8954 	 dp_wdi_event_handler(WDI_EVENT_PEER_FLUSH_RATE_STATS,
8955 			      pdev->soc, buf, HTT_INVALID_PEER,
8956 			      WDI_NO_VAL, pdev->pdev_id);
8957 }
8958 #else
8959 static inline void
8960 dp_peer_flush_rate_stats(struct cdp_soc_t *soc,
8961 			 struct cdp_pdev *pdev_handle,
8962 			 void *buf)
8963 {
8964 }
8965 #endif
8966 
8967 static void *dp_soc_get_rate_stats_ctx(struct cdp_soc_t *soc_handle)
8968 {
8969 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
8970 
8971 	return soc->rate_stats_ctx;
8972 }
8973 
8974 /*
8975  * dp_get_cfg() - get dp cfg
8976  * @soc: cdp soc handle
8977  * @cfg: cfg enum
8978  *
8979  * Return: cfg value
8980  */
8981 static uint32_t dp_get_cfg(void *soc, enum cdp_dp_cfg cfg)
8982 {
8983 	struct dp_soc *dpsoc = (struct dp_soc *)soc;
8984 	uint32_t value = 0;
8985 
8986 	switch (cfg) {
8987 	case cfg_dp_enable_data_stall:
8988 		value = dpsoc->wlan_cfg_ctx->enable_data_stall_detection;
8989 		break;
8990 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
8991 		value = dpsoc->wlan_cfg_ctx->tcp_udp_checksumoffload;
8992 		break;
8993 	case cfg_dp_tso_enable:
8994 		value = dpsoc->wlan_cfg_ctx->tso_enabled;
8995 		break;
8996 	case cfg_dp_lro_enable:
8997 		value = dpsoc->wlan_cfg_ctx->lro_enabled;
8998 		break;
8999 	case cfg_dp_gro_enable:
9000 		value = dpsoc->wlan_cfg_ctx->gro_enabled;
9001 		break;
9002 	case cfg_dp_tx_flow_start_queue_offset:
9003 		value = dpsoc->wlan_cfg_ctx->tx_flow_start_queue_offset;
9004 		break;
9005 	case cfg_dp_tx_flow_stop_queue_threshold:
9006 		value = dpsoc->wlan_cfg_ctx->tx_flow_stop_queue_threshold;
9007 		break;
9008 	case cfg_dp_disable_intra_bss_fwd:
9009 		value = dpsoc->wlan_cfg_ctx->disable_intra_bss_fwd;
9010 		break;
9011 	default:
9012 		value =  0;
9013 	}
9014 
9015 	return value;
9016 }
9017 
9018 #ifdef PEER_FLOW_CONTROL
9019 /**
9020  * dp_tx_flow_ctrl_configure_pdev() - Configure flow control params
9021  * @pdev_hdl: datapath pdev handle
9022  * @param: ol ath params
9023  * @value: value of the flag
9024  * @buff: Buffer to be passed
9025  *
9026  * Implemented this function same as legacy function. In legacy code, single
9027  * function is used to display stats and update pdev params.
9028  *
9029  * Return: 0 for success. nonzero for failure.
9030  */
9031 static uint32_t dp_tx_flow_ctrl_configure_pdev(void *pdev_handle,
9032 					       enum _ol_ath_param_t param,
9033 					       uint32_t value, void *buff)
9034 {
9035 	struct dp_soc *soc = NULL;
9036 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9037 
9038 	if (qdf_unlikely(!pdev))
9039 		return 1;
9040 
9041 	soc = pdev->soc;
9042 	if (!soc)
9043 		return 1;
9044 
9045 	switch (param) {
9046 #ifdef QCA_ENH_V3_STATS_SUPPORT
9047 	case OL_ATH_PARAM_VIDEO_DELAY_STATS_FC:
9048 		if (value)
9049 			pdev->delay_stats_flag = true;
9050 		else
9051 			pdev->delay_stats_flag = false;
9052 		break;
9053 	case OL_ATH_PARAM_VIDEO_STATS_FC:
9054 		qdf_print("------- TID Stats ------\n");
9055 		dp_pdev_print_tid_stats(pdev);
9056 		qdf_print("------ Delay Stats ------\n");
9057 		dp_pdev_print_delay_stats(pdev);
9058 		break;
9059 #endif
9060 	case OL_ATH_PARAM_TOTAL_Q_SIZE:
9061 		{
9062 			uint32_t tx_min, tx_max;
9063 
9064 			tx_min = wlan_cfg_get_min_tx_desc(soc->wlan_cfg_ctx);
9065 			tx_max = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
9066 
9067 			if (!buff) {
9068 				if ((value >= tx_min) && (value <= tx_max)) {
9069 					pdev->num_tx_allowed = value;
9070 				} else {
9071 					QDF_TRACE(QDF_MODULE_ID_DP,
9072 						  QDF_TRACE_LEVEL_INFO,
9073 						  "Failed to update num_tx_allowed, Q_min = %d Q_max = %d",
9074 						  tx_min, tx_max);
9075 					break;
9076 				}
9077 			} else {
9078 				*(int *)buff = pdev->num_tx_allowed;
9079 			}
9080 		}
9081 		break;
9082 	default:
9083 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
9084 			  "%s: not handled param %d ", __func__, param);
9085 		break;
9086 	}
9087 
9088 	return 0;
9089 }
9090 #endif
9091 
9092 /**
9093  * dp_set_pdev_pcp_tid_map_wifi3(): update pcp tid map in pdev
9094  * @vdev: DP_PDEV handle
9095  * @pcp: pcp value
9096  * @tid: tid value passed by the user
9097  *
9098  * Return: QDF_STATUS_SUCCESS on success
9099  */
9100 static QDF_STATUS dp_set_pdev_pcp_tid_map_wifi3(struct cdp_pdev *pdev_handle,
9101 						uint8_t pcp, uint8_t tid)
9102 {
9103 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9104 	struct dp_soc *soc = pdev->soc;
9105 
9106 	soc->pcp_tid_map[pcp] = tid;
9107 
9108 	hal_tx_update_pcp_tid_map(soc->hal_soc, pcp, tid);
9109 	return QDF_STATUS_SUCCESS;
9110 }
9111 
9112 /**
9113  * dp_set_pdev_tidmap_prty_wifi3(): update tidmap priority in pdev
9114  * @vdev: DP_PDEV handle
9115  * @prio: tidmap priority value passed by the user
9116  *
9117  * Return: QDF_STATUS_SUCCESS on success
9118  */
9119 static QDF_STATUS dp_set_pdev_tidmap_prty_wifi3(struct cdp_pdev *pdev_handle,
9120 						uint8_t prio)
9121 {
9122 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
9123 	struct dp_soc *soc = pdev->soc;
9124 
9125 	soc->tidmap_prty = prio;
9126 
9127 	hal_tx_set_tidmap_prty(soc->hal_soc, prio);
9128 	return QDF_STATUS_SUCCESS;
9129 }
9130 
9131 /**
9132  * dp_set_vdev_pcp_tid_map_wifi3(): update pcp tid map in vdev
9133  * @vdev: DP_VDEV handle
9134  * @pcp: pcp value
9135  * @tid: tid value passed by the user
9136  *
9137  * Return: QDF_STATUS_SUCCESS on success
9138  */
9139 static QDF_STATUS dp_set_vdev_pcp_tid_map_wifi3(struct cdp_vdev *vdev_handle,
9140 						uint8_t pcp, uint8_t tid)
9141 {
9142 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
9143 
9144 	vdev->pcp_tid_map[pcp] = tid;
9145 
9146 	return QDF_STATUS_SUCCESS;
9147 }
9148 
9149 /**
9150  * dp_set_vdev_tidmap_tbl_id_wifi3(): update tidmapi tbl id in vdev
9151  * @vdev: DP_VDEV handle
9152  * @mapid: map_id value passed by the user
9153  *
9154  * Return: QDF_STATUS_SUCCESS on success
9155  */
9156 static QDF_STATUS dp_set_vdev_tidmap_tbl_id_wifi3(struct cdp_vdev *vdev_handle,
9157 						  uint8_t mapid)
9158 {
9159 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
9160 
9161 	vdev->tidmap_tbl_id = mapid;
9162 
9163 	return QDF_STATUS_SUCCESS;
9164 }
9165 
9166 /**
9167  * dp_set_vdev_tidmap_prty_wifi3(): update tidmap priority in vdev
9168  * @vdev: DP_VDEV handle
9169  * @prio: tidmap priority value passed by the user
9170  *
9171  * Return: QDF_STATUS_SUCCESS on success
9172  */
9173 static QDF_STATUS dp_set_vdev_tidmap_prty_wifi3(struct cdp_vdev *vdev_handle,
9174 						uint8_t prio)
9175 {
9176 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
9177 
9178 	vdev->tidmap_prty = prio;
9179 
9180 	return QDF_STATUS_SUCCESS;
9181 }
9182 
9183 static struct cdp_cmn_ops dp_ops_cmn = {
9184 	.txrx_soc_attach_target = dp_soc_attach_target_wifi3,
9185 	.txrx_vdev_attach = dp_vdev_attach_wifi3,
9186 	.txrx_vdev_detach = dp_vdev_detach_wifi3,
9187 	.txrx_pdev_attach = dp_pdev_attach_wifi3,
9188 	.txrx_pdev_detach = dp_pdev_detach_wifi3,
9189 	.txrx_pdev_deinit = dp_pdev_deinit_wifi3,
9190 	.txrx_peer_create = dp_peer_create_wifi3,
9191 	.txrx_peer_setup = dp_peer_setup_wifi3,
9192 #ifdef FEATURE_AST
9193 	.txrx_peer_teardown = dp_peer_teardown_wifi3,
9194 #else
9195 	.txrx_peer_teardown = NULL,
9196 #endif
9197 	.txrx_peer_add_ast = dp_peer_add_ast_wifi3,
9198 	.txrx_peer_update_ast = dp_peer_update_ast_wifi3,
9199 	.txrx_peer_get_ast_info_by_soc = dp_peer_get_ast_info_by_soc_wifi3,
9200 	.txrx_peer_get_ast_info_by_pdev =
9201 		dp_peer_get_ast_info_by_pdevid_wifi3,
9202 	.txrx_peer_ast_delete_by_soc =
9203 		dp_peer_ast_entry_del_by_soc,
9204 	.txrx_peer_ast_delete_by_pdev =
9205 		dp_peer_ast_entry_del_by_pdev,
9206 	.txrx_peer_delete = dp_peer_delete_wifi3,
9207 	.txrx_vdev_register = dp_vdev_register_wifi3,
9208 	.txrx_vdev_flush_peers = dp_vdev_flush_peers,
9209 	.txrx_soc_detach = dp_soc_detach_wifi3,
9210 	.txrx_soc_deinit = dp_soc_deinit_wifi3,
9211 	.txrx_soc_init = dp_soc_init_wifi3,
9212 	.txrx_tso_soc_attach = dp_tso_soc_attach,
9213 	.txrx_tso_soc_detach = dp_tso_soc_detach,
9214 	.txrx_get_vdev_mac_addr = dp_get_vdev_mac_addr_wifi3,
9215 	.txrx_get_vdev_from_vdev_id = dp_get_vdev_from_vdev_id_wifi3,
9216 	.txrx_get_mon_vdev_from_pdev = dp_get_mon_vdev_from_pdev_wifi3,
9217 	.txrx_get_ctrl_pdev_from_vdev = dp_get_ctrl_pdev_from_vdev_wifi3,
9218 	.txrx_ath_getstats = dp_get_device_stats,
9219 	.addba_requestprocess = dp_addba_requestprocess_wifi3,
9220 	.addba_responsesetup = dp_addba_responsesetup_wifi3,
9221 	.addba_resp_tx_completion = dp_addba_resp_tx_completion_wifi3,
9222 	.delba_process = dp_delba_process_wifi3,
9223 	.set_addba_response = dp_set_addba_response,
9224 	.get_peer_mac_addr_frm_id = dp_get_peer_mac_addr_frm_id,
9225 	.flush_cache_rx_queue = NULL,
9226 	/* TODO: get API's for dscp-tid need to be added*/
9227 	.set_vdev_dscp_tid_map = dp_set_vdev_dscp_tid_map_wifi3,
9228 	.set_pdev_dscp_tid_map = dp_set_pdev_dscp_tid_map_wifi3,
9229 	.hmmc_tid_override_en = dp_hmmc_tid_override_en_wifi3,
9230 	.set_hmmc_tid_val = dp_set_hmmc_tid_val_wifi3,
9231 	.txrx_get_total_per = dp_get_total_per,
9232 	.txrx_stats_request = dp_txrx_stats_request,
9233 	.txrx_set_monitor_mode = dp_vdev_set_monitor_mode,
9234 	.txrx_get_pdev_id_frm_pdev = dp_get_pdev_id_frm_pdev,
9235 	.txrx_get_vow_config_frm_pdev = dp_get_delay_stats_flag,
9236 	.txrx_pdev_set_chan_noise_floor = dp_pdev_set_chan_noise_floor,
9237 	.txrx_set_nac = dp_set_nac,
9238 	.txrx_get_tx_pending = dp_get_tx_pending,
9239 	.txrx_set_pdev_tx_capture = dp_config_debug_sniffer,
9240 	.txrx_get_peer_mac_from_peer_id = dp_get_peer_mac_from_peer_id,
9241 	.display_stats = dp_txrx_dump_stats,
9242 	.txrx_soc_set_nss_cfg = dp_soc_set_nss_cfg_wifi3,
9243 	.txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
9244 	.txrx_intr_attach = dp_soc_interrupt_attach_wrapper,
9245 	.txrx_intr_detach = dp_soc_interrupt_detach,
9246 	.set_pn_check = dp_set_pn_check_wifi3,
9247 	.update_config_parameters = dp_update_config_parameters,
9248 	/* TODO: Add other functions */
9249 	.txrx_data_tx_cb_set = dp_txrx_data_tx_cb_set,
9250 	.get_dp_txrx_handle = dp_pdev_get_dp_txrx_handle,
9251 	.set_dp_txrx_handle = dp_pdev_set_dp_txrx_handle,
9252 	.get_soc_dp_txrx_handle = dp_soc_get_dp_txrx_handle,
9253 	.set_soc_dp_txrx_handle = dp_soc_set_dp_txrx_handle,
9254 	.map_pdev_to_lmac = dp_soc_map_pdev_to_lmac,
9255 	.set_pdev_status_down = dp_soc_set_pdev_status_down,
9256 	.txrx_set_ba_aging_timeout = dp_set_ba_aging_timeout,
9257 	.txrx_get_ba_aging_timeout = dp_get_ba_aging_timeout,
9258 	.tx_send = dp_tx_send,
9259 	.txrx_peer_reset_ast = dp_wds_reset_ast_wifi3,
9260 	.txrx_peer_reset_ast_table = dp_wds_reset_ast_table_wifi3,
9261 	.txrx_peer_flush_ast_table = dp_wds_flush_ast_table_wifi3,
9262 	.txrx_peer_map_attach = dp_peer_map_attach_wifi3,
9263 	.txrx_pdev_set_ctrl_pdev = dp_pdev_set_ctrl_pdev,
9264 	.txrx_get_os_rx_handles_from_vdev =
9265 					dp_get_os_rx_handles_from_vdev_wifi3,
9266 	.delba_tx_completion = dp_delba_tx_completion_wifi3,
9267 	.get_dp_capabilities = dp_get_cfg_capabilities,
9268 	.txrx_get_cfg = dp_get_cfg,
9269 	.set_rate_stats_ctx = dp_soc_set_rate_stats_ctx,
9270 	.get_rate_stats_ctx = dp_soc_get_rate_stats_ctx,
9271 	.txrx_peer_flush_rate_stats = dp_peer_flush_rate_stats,
9272 	.txrx_flush_rate_stats_request = dp_flush_rate_stats_req,
9273 
9274 	.set_pdev_pcp_tid_map = dp_set_pdev_pcp_tid_map_wifi3,
9275 	.set_pdev_tidmap_prty = dp_set_pdev_tidmap_prty_wifi3,
9276 	.set_vdev_pcp_tid_map = dp_set_vdev_pcp_tid_map_wifi3,
9277 	.set_vdev_tidmap_prty = dp_set_vdev_tidmap_prty_wifi3,
9278 	.set_vdev_tidmap_tbl_id = dp_set_vdev_tidmap_tbl_id_wifi3,
9279 
9280 	.txrx_cp_peer_del_response = dp_cp_peer_del_resp_handler,
9281 #ifdef QCA_MULTIPASS_SUPPORT
9282 	.set_vlan_groupkey = dp_set_vlan_groupkey,
9283 #endif
9284 };
9285 
9286 static struct cdp_ctrl_ops dp_ops_ctrl = {
9287 	.txrx_peer_authorize = dp_peer_authorize,
9288 	.txrx_set_vdev_rx_decap_type = dp_set_vdev_rx_decap_type,
9289 	.txrx_set_tx_encap_type = dp_set_vdev_tx_encap_type,
9290 #ifdef MESH_MODE_SUPPORT
9291 	.txrx_set_mesh_mode  = dp_peer_set_mesh_mode,
9292 	.txrx_set_mesh_rx_filter = dp_peer_set_mesh_rx_filter,
9293 #endif
9294 	.txrx_set_vdev_param = dp_set_vdev_param,
9295 	.txrx_peer_set_nawds = dp_peer_set_nawds,
9296 	.txrx_set_pdev_reo_dest = dp_set_pdev_reo_dest,
9297 	.txrx_get_pdev_reo_dest = dp_get_pdev_reo_dest,
9298 	.txrx_set_filter_neighbour_peers = dp_set_filter_neighbour_peers,
9299 	.txrx_update_filter_neighbour_peers =
9300 		dp_update_filter_neighbour_peers,
9301 	.txrx_get_sec_type = dp_get_sec_type,
9302 	/* TODO: Add other functions */
9303 	.txrx_wdi_event_sub = dp_wdi_event_sub,
9304 	.txrx_wdi_event_unsub = dp_wdi_event_unsub,
9305 #ifdef WDI_EVENT_ENABLE
9306 	.txrx_get_pldev = dp_get_pldev,
9307 #endif
9308 	.txrx_set_pdev_param = dp_set_pdev_param,
9309 #ifdef ATH_SUPPORT_NAC_RSSI
9310 	.txrx_vdev_config_for_nac_rssi = dp_config_for_nac_rssi,
9311 	.txrx_vdev_get_neighbour_rssi = dp_vdev_get_neighbour_rssi,
9312 #endif
9313 	.set_key = dp_set_michael_key,
9314 	.txrx_get_vdev_param = dp_get_vdev_param,
9315 	.enable_peer_based_pktlog = dp_enable_peer_based_pktlog,
9316 	.calculate_delay_stats = dp_calculate_delay_stats,
9317 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
9318 	.txrx_update_pdev_rx_protocol_tag = dp_update_pdev_rx_protocol_tag,
9319 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
9320 	.txrx_dump_pdev_rx_protocol_tag_stats =
9321 				dp_dump_pdev_rx_protocol_tag_stats,
9322 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
9323 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
9324 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
9325 	.txrx_set_rx_flow_tag = dp_set_rx_flow_tag,
9326 	.txrx_dump_rx_flow_tag_stats = dp_dump_rx_flow_tag_stats,
9327 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
9328 #ifdef QCA_MULTIPASS_SUPPORT
9329 	.txrx_peer_set_vlan_id = dp_peer_set_vlan_id,
9330 #endif /*QCA_MULTIPASS_SUPPORT*/
9331 #if defined(WLAN_TX_PKT_CAPTURE_ENH) || defined(WLAN_RX_PKT_CAPTURE_ENH)
9332 	.txrx_update_peer_pkt_capture_params =
9333 		 dp_peer_update_pkt_capture_params,
9334 #endif /* WLAN_TX_PKT_CAPTURE_ENH || WLAN_RX_PKT_CAPTURE_ENH */
9335 };
9336 
9337 static struct cdp_me_ops dp_ops_me = {
9338 #ifdef ATH_SUPPORT_IQUE
9339 	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
9340 	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
9341 	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
9342 #endif
9343 };
9344 
9345 static struct cdp_mon_ops dp_ops_mon = {
9346 	.txrx_monitor_set_filter_ucast_data = NULL,
9347 	.txrx_monitor_set_filter_mcast_data = NULL,
9348 	.txrx_monitor_set_filter_non_data = NULL,
9349 	.txrx_monitor_get_filter_ucast_data = dp_vdev_get_filter_ucast_data,
9350 	.txrx_monitor_get_filter_mcast_data = dp_vdev_get_filter_mcast_data,
9351 	.txrx_monitor_get_filter_non_data = dp_vdev_get_filter_non_data,
9352 	.txrx_reset_monitor_mode = dp_reset_monitor_mode,
9353 	/* Added support for HK advance filter */
9354 	.txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
9355 	.txrx_monitor_record_channel = dp_pdev_set_monitor_channel,
9356 	.txrx_deliver_tx_mgmt = dp_deliver_tx_mgmt,
9357 };
9358 
9359 static struct cdp_host_stats_ops dp_ops_host_stats = {
9360 	.txrx_per_peer_stats = dp_get_host_peer_stats,
9361 	.get_fw_peer_stats = dp_get_fw_peer_stats,
9362 	.get_htt_stats = dp_get_htt_stats,
9363 	.txrx_enable_enhanced_stats = dp_enable_enhanced_stats,
9364 	.txrx_disable_enhanced_stats = dp_disable_enhanced_stats,
9365 	.txrx_stats_publish = dp_txrx_stats_publish,
9366 	.txrx_get_vdev_stats  = dp_txrx_get_vdev_stats,
9367 	.txrx_get_peer_stats = dp_txrx_get_peer_stats,
9368 	.txrx_reset_peer_stats = dp_txrx_reset_peer_stats,
9369 	.txrx_get_pdev_stats = dp_txrx_get_pdev_stats,
9370 	.txrx_get_ratekbps = dp_txrx_get_ratekbps,
9371 	.configure_rate_stats = dp_set_rate_stats_cap,
9372 	.txrx_update_vdev_stats = dp_txrx_update_vdev_host_stats,
9373 	/* TODO */
9374 };
9375 
9376 static struct cdp_raw_ops dp_ops_raw = {
9377 	/* TODO */
9378 };
9379 
9380 #ifdef PEER_FLOW_CONTROL
9381 static struct cdp_pflow_ops dp_ops_pflow = {
9382 	dp_tx_flow_ctrl_configure_pdev,
9383 };
9384 #endif /* CONFIG_WIN */
9385 
9386 #ifdef FEATURE_RUNTIME_PM
9387 /**
9388  * dp_runtime_suspend() - ensure DP is ready to runtime suspend
9389  * @opaque_pdev: DP pdev context
9390  *
9391  * DP is ready to runtime suspend if there are no pending TX packets.
9392  *
9393  * Return: QDF_STATUS
9394  */
9395 static QDF_STATUS dp_runtime_suspend(struct cdp_pdev *opaque_pdev)
9396 {
9397 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
9398 	struct dp_soc *soc = pdev->soc;
9399 
9400 	/* Abort if there are any pending TX packets */
9401 	if (dp_get_tx_pending(opaque_pdev) > 0) {
9402 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
9403 			  FL("Abort suspend due to pending TX packets"));
9404 		return QDF_STATUS_E_AGAIN;
9405 	}
9406 
9407 	if (soc->intr_mode == DP_INTR_POLL)
9408 		qdf_timer_stop(&soc->int_timer);
9409 
9410 	return QDF_STATUS_SUCCESS;
9411 }
9412 
9413 /**
9414  * dp_flush_ring_hptp() - Update ring shadow
9415  *			  register HP/TP address when runtime
9416  *                        resume
9417  * @opaque_soc: DP soc context
9418  *
9419  * Return: None
9420  */
9421 static
9422 void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
9423 {
9424 	if (hal_srng && hal_srng_get_clear_event(hal_srng,
9425 						 HAL_SRNG_FLUSH_EVENT)) {
9426 		/* Acquire the lock */
9427 		hal_srng_access_start(soc->hal_soc, hal_srng);
9428 
9429 		hal_srng_access_end(soc->hal_soc, hal_srng);
9430 
9431 		hal_srng_set_flush_last_ts(hal_srng);
9432 	}
9433 }
9434 
9435 /**
9436  * dp_runtime_resume() - ensure DP is ready to runtime resume
9437  * @opaque_pdev: DP pdev context
9438  *
9439  * Resume DP for runtime PM.
9440  *
9441  * Return: QDF_STATUS
9442  */
9443 static QDF_STATUS dp_runtime_resume(struct cdp_pdev *opaque_pdev)
9444 {
9445 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
9446 	struct dp_soc *soc = pdev->soc;
9447 	int i;
9448 
9449 	if (soc->intr_mode == DP_INTR_POLL)
9450 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
9451 
9452 	for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
9453 		dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
9454 	}
9455 
9456 	dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
9457 
9458 	return QDF_STATUS_SUCCESS;
9459 }
9460 #endif /* FEATURE_RUNTIME_PM */
9461 
9462 /**
9463  * dp_tx_get_success_ack_stats() - get tx success completion count
9464  * @opaque_pdev: dp pdev context
9465  * @vdevid: vdev identifier
9466  *
9467  * Return: tx success ack count
9468  */
9469 static uint32_t dp_tx_get_success_ack_stats(struct cdp_pdev *pdev,
9470 					    uint8_t vdev_id)
9471 {
9472 	struct dp_soc *soc = ((struct dp_pdev *)pdev)->soc;
9473 	struct dp_vdev *vdev =
9474 		(struct dp_vdev *)dp_get_vdev_from_soc_vdev_id_wifi3(soc,
9475 								 vdev_id);
9476 	struct cdp_vdev_stats *vdev_stats = NULL;
9477 	uint32_t tx_success;
9478 
9479 	if (!vdev) {
9480 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
9481 			  FL("Invalid vdev id %d"), vdev_id);
9482 		return 0;
9483 	}
9484 
9485 	vdev_stats = qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
9486 	if (!vdev_stats) {
9487 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
9488 			  "DP alloc failure - unable to get alloc vdev stats");
9489 		return 0;
9490 	}
9491 
9492 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
9493 	dp_aggregate_vdev_stats(vdev, vdev_stats);
9494 	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
9495 
9496 	tx_success = vdev_stats->tx.tx_success.num;
9497 	qdf_mem_free(vdev_stats);
9498 
9499 	return tx_success;
9500 }
9501 
9502 #ifdef DP_PEER_EXTENDED_API
9503 static struct cdp_misc_ops dp_ops_misc = {
9504 #ifdef FEATURE_WLAN_TDLS
9505 	.tx_non_std = dp_tx_non_std,
9506 #endif /* FEATURE_WLAN_TDLS */
9507 	.get_opmode = dp_get_opmode,
9508 #ifdef FEATURE_RUNTIME_PM
9509 	.runtime_suspend = dp_runtime_suspend,
9510 	.runtime_resume = dp_runtime_resume,
9511 #endif /* FEATURE_RUNTIME_PM */
9512 	.pkt_log_init = dp_pkt_log_init,
9513 	.pkt_log_con_service = dp_pkt_log_con_service,
9514 	.get_num_rx_contexts = dp_get_num_rx_contexts,
9515 	.get_tx_ack_stats = dp_tx_get_success_ack_stats,
9516 };
9517 #endif
9518 
9519 #ifdef DP_FLOW_CTL
9520 static struct cdp_flowctl_ops dp_ops_flowctl = {
9521 	/* WIFI 3.0 DP implement as required. */
9522 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
9523 	.flow_pool_map_handler = dp_tx_flow_pool_map,
9524 	.flow_pool_unmap_handler = dp_tx_flow_pool_unmap,
9525 	.register_pause_cb = dp_txrx_register_pause_cb,
9526 	.dump_flow_pool_info = dp_tx_dump_flow_pool_info,
9527 	.tx_desc_thresh_reached = dp_tx_desc_thresh_reached,
9528 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
9529 };
9530 
9531 static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
9532 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9533 };
9534 #endif
9535 
9536 #ifdef IPA_OFFLOAD
9537 static struct cdp_ipa_ops dp_ops_ipa = {
9538 	.ipa_get_resource = dp_ipa_get_resource,
9539 	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
9540 	.ipa_op_response = dp_ipa_op_response,
9541 	.ipa_register_op_cb = dp_ipa_register_op_cb,
9542 	.ipa_get_stat = dp_ipa_get_stat,
9543 	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
9544 	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
9545 	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
9546 	.ipa_setup = dp_ipa_setup,
9547 	.ipa_cleanup = dp_ipa_cleanup,
9548 	.ipa_setup_iface = dp_ipa_setup_iface,
9549 	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
9550 	.ipa_enable_pipes = dp_ipa_enable_pipes,
9551 	.ipa_disable_pipes = dp_ipa_disable_pipes,
9552 	.ipa_set_perf_level = dp_ipa_set_perf_level,
9553 	.ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd
9554 };
9555 #endif
9556 
9557 #ifdef DP_POWER_SAVE
9558 static QDF_STATUS dp_bus_suspend(struct cdp_pdev *opaque_pdev)
9559 {
9560 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
9561 	struct dp_soc *soc = pdev->soc;
9562 	int timeout = SUSPEND_DRAIN_WAIT;
9563 	int drain_wait_delay = 50; /* 50 ms */
9564 
9565 	/* Abort if there are any pending TX packets */
9566 	while (dp_get_tx_pending(opaque_pdev) > 0) {
9567 		qdf_sleep(drain_wait_delay);
9568 		if (timeout <= 0) {
9569 			dp_err("TX frames are pending, abort suspend");
9570 			return QDF_STATUS_E_TIMEOUT;
9571 		}
9572 		timeout = timeout - drain_wait_delay;
9573 	}
9574 
9575 	if (soc->intr_mode == DP_INTR_POLL)
9576 		qdf_timer_stop(&soc->int_timer);
9577 
9578 	return QDF_STATUS_SUCCESS;
9579 }
9580 
9581 static QDF_STATUS dp_bus_resume(struct cdp_pdev *opaque_pdev)
9582 {
9583 	struct dp_pdev *pdev = (struct dp_pdev *)opaque_pdev;
9584 	struct dp_soc *soc = pdev->soc;
9585 
9586 	if (soc->intr_mode == DP_INTR_POLL)
9587 		qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
9588 
9589 	return QDF_STATUS_SUCCESS;
9590 }
9591 
9592 static struct cdp_bus_ops dp_ops_bus = {
9593 	.bus_suspend = dp_bus_suspend,
9594 	.bus_resume = dp_bus_resume
9595 };
9596 #endif
9597 
9598 #ifdef DP_FLOW_CTL
9599 static struct cdp_throttle_ops dp_ops_throttle = {
9600 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9601 };
9602 
9603 static struct cdp_cfg_ops dp_ops_cfg = {
9604 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9605 };
9606 #endif
9607 
9608 #ifdef DP_PEER_EXTENDED_API
9609 static struct cdp_ocb_ops dp_ops_ocb = {
9610 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
9611 };
9612 
9613 static struct cdp_mob_stats_ops dp_ops_mob_stats = {
9614 	.clear_stats = dp_txrx_clear_dump_stats,
9615 };
9616 
9617 /*
9618  * dp_peer_get_ref_find_by_addr - get peer with addr by ref count inc
9619  * @dev: physical device instance
9620  * @peer_mac_addr: peer mac address
9621  * @local_id: local id for the peer
9622  * @debug_id: to track enum peer access
9623  *
9624  * Return: peer instance pointer
9625  */
9626 static inline void *
9627 dp_peer_get_ref_find_by_addr(struct cdp_pdev *dev, uint8_t *peer_mac_addr,
9628 			     uint8_t *local_id,
9629 			     enum peer_debug_id_type debug_id)
9630 {
9631 	struct dp_pdev *pdev = (struct dp_pdev *)dev;
9632 	struct dp_peer *peer;
9633 
9634 	peer = dp_peer_find_hash_find(pdev->soc, peer_mac_addr, 0, DP_VDEV_ALL);
9635 
9636 	if (!peer)
9637 		return NULL;
9638 
9639 	*local_id = peer->local_id;
9640 	DP_TRACE(INFO, "%s: peer %pK id %d", __func__, peer, *local_id);
9641 
9642 	return peer;
9643 }
9644 
9645 /*
9646  * dp_peer_release_ref - release peer ref count
9647  * @peer: peer handle
9648  * @debug_id: to track enum peer access
9649  *
9650  * Return: None
9651  */
9652 static inline
9653 void dp_peer_release_ref(void *peer, enum peer_debug_id_type debug_id)
9654 {
9655 	dp_peer_unref_delete(peer);
9656 }
9657 
9658 static struct cdp_peer_ops dp_ops_peer = {
9659 	.register_peer = dp_register_peer,
9660 	.clear_peer = dp_clear_peer,
9661 	.find_peer_by_addr = dp_find_peer_by_addr,
9662 	.find_peer_by_addr_and_vdev = dp_find_peer_by_addr_and_vdev,
9663 	.peer_get_ref_by_addr = dp_peer_get_ref_find_by_addr,
9664 	.peer_release_ref = dp_peer_release_ref,
9665 	.local_peer_id = dp_local_peer_id,
9666 	.peer_find_by_local_id = dp_peer_find_by_local_id,
9667 	.peer_state_update = dp_peer_state_update,
9668 	.get_vdevid = dp_get_vdevid,
9669 	.get_vdev_by_peer_addr = dp_get_vdev_by_peer_addr,
9670 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
9671 	.get_vdev_for_peer = dp_get_vdev_for_peer,
9672 	.get_peer_state = dp_get_peer_state,
9673 };
9674 #endif
9675 
9676 static struct cdp_ops dp_txrx_ops = {
9677 	.cmn_drv_ops = &dp_ops_cmn,
9678 	.ctrl_ops = &dp_ops_ctrl,
9679 	.me_ops = &dp_ops_me,
9680 	.mon_ops = &dp_ops_mon,
9681 	.host_stats_ops = &dp_ops_host_stats,
9682 	.wds_ops = &dp_ops_wds,
9683 	.raw_ops = &dp_ops_raw,
9684 #ifdef PEER_FLOW_CONTROL
9685 	.pflow_ops = &dp_ops_pflow,
9686 #endif /* PEER_FLOW_CONTROL */
9687 #ifdef DP_PEER_EXTENDED_API
9688 	.misc_ops = &dp_ops_misc,
9689 	.ocb_ops = &dp_ops_ocb,
9690 	.peer_ops = &dp_ops_peer,
9691 	.mob_stats_ops = &dp_ops_mob_stats,
9692 #endif
9693 #ifdef DP_FLOW_CTL
9694 	.cfg_ops = &dp_ops_cfg,
9695 	.flowctl_ops = &dp_ops_flowctl,
9696 	.l_flowctl_ops = &dp_ops_l_flowctl,
9697 	.throttle_ops = &dp_ops_throttle,
9698 #endif
9699 #ifdef IPA_OFFLOAD
9700 	.ipa_ops = &dp_ops_ipa,
9701 #endif
9702 #ifdef DP_POWER_SAVE
9703 	.bus_ops = &dp_ops_bus,
9704 #endif
9705 };
9706 
9707 /*
9708  * dp_soc_set_txrx_ring_map()
9709  * @dp_soc: DP handler for soc
9710  *
9711  * Return: Void
9712  */
9713 void dp_soc_set_txrx_ring_map(struct dp_soc *soc)
9714 {
9715 	uint32_t i;
9716 	for (i = 0; i < WLAN_CFG_INT_NUM_CONTEXTS; i++) {
9717 		soc->tx_ring_map[i] = dp_cpu_ring_map[DP_NSS_DEFAULT_MAP][i];
9718 	}
9719 }
9720 
9721 #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018)
9722 
9723 #ifndef QCA_MEM_ATTACH_ON_WIFI3
9724 
9725 /**
9726  * dp_soc_attach_wifi3() - Attach txrx SOC
9727  * @ctrl_psoc: Opaque SOC handle from control plane
9728  * @htc_handle: Opaque HTC handle
9729  * @hif_handle: Opaque HIF handle
9730  * @qdf_osdev: QDF device
9731  * @ol_ops: Offload Operations
9732  * @device_id: Device ID
9733  *
9734  * Return: DP SOC handle on success, NULL on failure
9735  */
9736 struct cdp_soc_t *
9737 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
9738 		    struct hif_opaque_softc *hif_handle,
9739 		    HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9740 		    struct ol_if_ops *ol_ops, uint16_t device_id)
9741 {
9742 	struct dp_soc *dp_soc =  NULL;
9743 
9744 	dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
9745 			       ol_ops, device_id);
9746 	if (!dp_soc)
9747 		return NULL;
9748 
9749 	if (!dp_soc_init(dp_soc, htc_handle, hif_handle))
9750 		return NULL;
9751 
9752 	return dp_soc_to_cdp_soc_t(dp_soc);
9753 }
9754 #else
9755 
9756 /**
9757  * dp_soc_attach_wifi3() - Attach txrx SOC
9758  * @ctrl_psoc: Opaque SOC handle from control plane
9759  * @htc_handle: Opaque HTC handle
9760  * @hif_handle: Opaque HIF handle
9761  * @qdf_osdev: QDF device
9762  * @ol_ops: Offload Operations
9763  * @device_id: Device ID
9764  *
9765  * Return: DP SOC handle on success, NULL on failure
9766  */
9767 struct cdp_soc_t *
9768 dp_soc_attach_wifi3(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
9769 		    struct hif_opaque_softc *hif_handle,
9770 		    HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9771 		    struct ol_if_ops *ol_ops, uint16_t device_id)
9772 {
9773 	struct dp_soc *dp_soc = NULL;
9774 
9775 	dp_soc = dp_soc_attach(ctrl_psoc, htc_handle, qdf_osdev,
9776 			       ol_ops, device_id);
9777 	return dp_soc_to_cdp_soc_t(dp_soc);
9778 }
9779 
9780 #endif
9781 
9782 /**
9783  * dp_soc_attach() - Attach txrx SOC
9784  * @ctrl_psoc: Opaque SOC handle from control plane
9785  * @htc_handle: Opaque HTC handle
9786  * @qdf_osdev: QDF device
9787  * @ol_ops: Offload Operations
9788  * @device_id: Device ID
9789  *
9790  * Return: DP SOC handle on success, NULL on failure
9791  */
9792 static struct dp_soc *
9793 dp_soc_attach(struct cdp_ctrl_objmgr_psoc *ctrl_psoc, HTC_HANDLE htc_handle,
9794 	      qdf_device_t qdf_osdev,
9795 	      struct ol_if_ops *ol_ops, uint16_t device_id)
9796 {
9797 	int int_ctx;
9798 	struct dp_soc *soc =  NULL;
9799 	struct htt_soc *htt_soc;
9800 
9801 	soc = qdf_mem_malloc(sizeof(*soc));
9802 
9803 	if (!soc) {
9804 		dp_err("DP SOC memory allocation failed");
9805 		goto fail0;
9806 	}
9807 
9808 	int_ctx = 0;
9809 	soc->device_id = device_id;
9810 	soc->cdp_soc.ops = &dp_txrx_ops;
9811 	soc->cdp_soc.ol_ops = ol_ops;
9812 	soc->ctrl_psoc = ctrl_psoc;
9813 	soc->osdev = qdf_osdev;
9814 	soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_MAPS;
9815 
9816 	wlan_set_srng_cfg(&soc->wlan_srng_cfg);
9817 	qdf_mem_zero(&soc->vdev_id_map, sizeof(soc->vdev_id_map));
9818 
9819 	soc->wlan_cfg_ctx = wlan_cfg_soc_attach(soc->ctrl_psoc);
9820 	if (!soc->wlan_cfg_ctx) {
9821 		dp_err("wlan_cfg_ctx failed\n");
9822 		goto fail1;
9823 	}
9824 
9825 	dp_soc_set_interrupt_mode(soc);
9826 	htt_soc = htt_soc_attach(soc, htc_handle);
9827 
9828 	if (!htt_soc)
9829 		goto fail1;
9830 
9831 	soc->htt_handle = htt_soc;
9832 
9833 	if (htt_soc_htc_prealloc(htt_soc) != QDF_STATUS_SUCCESS)
9834 		goto fail2;
9835 
9836 	return soc;
9837 fail2:
9838 	htt_soc_detach(htt_soc);
9839 fail1:
9840 	qdf_mem_free(soc);
9841 fail0:
9842 	return NULL;
9843 }
9844 
9845 /**
9846  * dp_soc_init() - Initialize txrx SOC
9847  * @dp_soc: Opaque DP SOC handle
9848  * @htc_handle: Opaque HTC handle
9849  * @hif_handle: Opaque HIF handle
9850  *
9851  * Return: DP SOC handle on success, NULL on failure
9852  */
9853 void *dp_soc_init(void *dpsoc, HTC_HANDLE htc_handle,
9854 		  struct hif_opaque_softc *hif_handle)
9855 {
9856 	int target_type;
9857 	struct dp_soc *soc = (struct dp_soc *)dpsoc;
9858 	struct htt_soc *htt_soc = soc->htt_handle;
9859 
9860 	htt_set_htc_handle(htt_soc, htc_handle);
9861 	soc->hif_handle = hif_handle;
9862 
9863 	soc->hal_soc = hif_get_hal_handle(soc->hif_handle);
9864 	if (!soc->hal_soc)
9865 		return NULL;
9866 
9867 	htt_soc_initialize(soc->htt_handle, soc->ctrl_psoc,
9868 			   htt_get_htc_handle(htt_soc),
9869 			   soc->hal_soc, soc->osdev);
9870 	target_type = hal_get_target_type(soc->hal_soc);
9871 	switch (target_type) {
9872 	case TARGET_TYPE_QCA6290:
9873 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9874 					       REO_DST_RING_SIZE_QCA6290);
9875 		soc->ast_override_support = 1;
9876 		soc->da_war_enabled = false;
9877 		break;
9878 #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490)
9879 	case TARGET_TYPE_QCA6390:
9880 	case TARGET_TYPE_QCA6490:
9881 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9882 					       REO_DST_RING_SIZE_QCA6290);
9883 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
9884 		soc->ast_override_support = 1;
9885 		if (soc->cdp_soc.ol_ops->get_con_mode &&
9886 		    soc->cdp_soc.ol_ops->get_con_mode() ==
9887 		    QDF_GLOBAL_MONITOR_MODE) {
9888 			int int_ctx;
9889 
9890 			for (int_ctx = 0; int_ctx < WLAN_CFG_INT_NUM_CONTEXTS; int_ctx++) {
9891 				soc->wlan_cfg_ctx->int_rx_ring_mask[int_ctx] = 0;
9892 				soc->wlan_cfg_ctx->int_rxdma2host_ring_mask[int_ctx] = 0;
9893 			}
9894 		}
9895 		soc->wlan_cfg_ctx->rxdma1_enable = 0;
9896 		break;
9897 #endif /* QCA_WIFI_QCA6390 || QCA_WIFI_QCA6490 */
9898 
9899 	case TARGET_TYPE_QCA8074:
9900 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9901 					       REO_DST_RING_SIZE_QCA8074);
9902 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, true);
9903 		soc->da_war_enabled = true;
9904 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
9905 		break;
9906 	case TARGET_TYPE_QCA8074V2:
9907 	case TARGET_TYPE_QCA6018:
9908 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9909 					       REO_DST_RING_SIZE_QCA8074);
9910 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
9911 		soc->hw_nac_monitor_support = 1;
9912 		soc->ast_override_support = 1;
9913 		soc->per_tid_basize_max_tid = 8;
9914 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
9915 		soc->da_war_enabled = false;
9916 		soc->is_rx_fse_full_cache_invalidate_war_enabled = true;
9917 		break;
9918 	case TARGET_TYPE_QCN9000:
9919 		wlan_cfg_set_reo_dst_ring_size(soc->wlan_cfg_ctx,
9920 					       REO_DST_RING_SIZE_QCN9000);
9921 		soc->ast_override_support = 1;
9922 		soc->da_war_enabled = false;
9923 		wlan_cfg_set_raw_mode_war(soc->wlan_cfg_ctx, false);
9924 		soc->hw_nac_monitor_support = 1;
9925 		soc->per_tid_basize_max_tid = 8;
9926 		soc->num_hw_dscp_tid_map = HAL_MAX_HW_DSCP_TID_V2_MAPS;
9927 		break;
9928 	default:
9929 		qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
9930 		qdf_assert_always(0);
9931 		break;
9932 	}
9933 
9934 	wlan_cfg_set_rx_hash(soc->wlan_cfg_ctx,
9935 			     cfg_get(soc->ctrl_psoc, CFG_DP_RX_HASH));
9936 	soc->cce_disable = false;
9937 
9938 	qdf_atomic_init(&soc->num_tx_outstanding);
9939 	soc->num_tx_allowed =
9940 		wlan_cfg_get_dp_soc_tx_device_limit(soc->wlan_cfg_ctx);
9941 
9942 	if (soc->cdp_soc.ol_ops->get_dp_cfg_param) {
9943 		int ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
9944 				CDP_CFG_MAX_PEER_ID);
9945 
9946 		if (ret != -EINVAL) {
9947 			wlan_cfg_set_max_peer_id(soc->wlan_cfg_ctx, ret);
9948 		}
9949 
9950 		ret = soc->cdp_soc.ol_ops->get_dp_cfg_param(soc->ctrl_psoc,
9951 				CDP_CFG_CCE_DISABLE);
9952 		if (ret == 1)
9953 			soc->cce_disable = true;
9954 	}
9955 
9956 	qdf_spinlock_create(&soc->peer_ref_mutex);
9957 	qdf_spinlock_create(&soc->ast_lock);
9958 
9959 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
9960 	qdf_list_create(&soc->reo_desc_freelist, REO_DESC_FREELIST_SIZE);
9961 
9962 	/* fill the tx/rx cpu ring map*/
9963 	dp_soc_set_txrx_ring_map(soc);
9964 
9965 	qdf_spinlock_create(&soc->htt_stats.lock);
9966 	/* initialize work queue for stats processing */
9967 	qdf_create_work(0, &soc->htt_stats.work, htt_t2h_stats_handler, soc);
9968 
9969 	return soc;
9970 
9971 }
9972 
9973 /**
9974  * dp_soc_init_wifi3() - Initialize txrx SOC
9975  * @dp_soc: Opaque DP SOC handle
9976  * @ctrl_psoc: Opaque SOC handle from control plane(Unused)
9977  * @hif_handle: Opaque HIF handle
9978  * @htc_handle: Opaque HTC handle
9979  * @qdf_osdev: QDF device (Unused)
9980  * @ol_ops: Offload Operations (Unused)
9981  * @device_id: Device ID (Unused)
9982  *
9983  * Return: DP SOC handle on success, NULL on failure
9984  */
9985 void *dp_soc_init_wifi3(void *dpsoc, struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
9986 			struct hif_opaque_softc *hif_handle,
9987 			HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
9988 			struct ol_if_ops *ol_ops, uint16_t device_id)
9989 {
9990 	return dp_soc_init(dpsoc, htc_handle, hif_handle);
9991 }
9992 
9993 #endif
9994 
9995 /*
9996  * dp_get_pdev_for_mac_id() -  Return pdev for mac_id
9997  *
9998  * @soc: handle to DP soc
9999  * @mac_id: MAC id
10000  *
10001  * Return: Return pdev corresponding to MAC
10002  */
10003 void *dp_get_pdev_for_mac_id(struct dp_soc *soc, uint32_t mac_id)
10004 {
10005 	if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
10006 		return soc->pdev_list[mac_id];
10007 
10008 	/* Typically for MCL as there only 1 PDEV*/
10009 	return soc->pdev_list[0];
10010 }
10011 
10012 /*
10013  * dp_is_hw_dbs_enable() - Procedure to check if DBS is supported
10014  * @soc:		DP SoC context
10015  * @max_mac_rings:	No of MAC rings
10016  *
10017  * Return: None
10018  */
10019 static
10020 void dp_is_hw_dbs_enable(struct dp_soc *soc,
10021 				int *max_mac_rings)
10022 {
10023 	bool dbs_enable = false;
10024 	if (soc->cdp_soc.ol_ops->is_hw_dbs_2x2_capable)
10025 		dbs_enable = soc->cdp_soc.ol_ops->
10026 		is_hw_dbs_2x2_capable((void *)soc->ctrl_psoc);
10027 
10028 	*max_mac_rings = (dbs_enable)?(*max_mac_rings):1;
10029 }
10030 
10031 /*
10032 * dp_is_soc_reinit() - Check if soc reinit is true
10033 * @soc: DP SoC context
10034 *
10035 * Return: true or false
10036 */
10037 bool dp_is_soc_reinit(struct dp_soc *soc)
10038 {
10039 	return soc->dp_soc_reinit;
10040 }
10041 
10042 /*
10043 * dp_set_pktlog_wifi3() - attach txrx vdev
10044 * @pdev: Datapath PDEV handle
10045 * @event: which event's notifications are being subscribed to
10046 * @enable: WDI event subscribe or not. (True or False)
10047 *
10048 * Return: Success, NULL on failure
10049 */
10050 #ifdef WDI_EVENT_ENABLE
10051 int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
10052 		bool enable)
10053 {
10054 	struct dp_soc *soc = NULL;
10055 	struct htt_rx_ring_tlv_filter htt_tlv_filter = {0};
10056 	int max_mac_rings = wlan_cfg_get_num_mac_rings
10057 					(pdev->wlan_cfg_ctx);
10058 	uint8_t mac_id = 0;
10059 
10060 	soc = pdev->soc;
10061 	dp_is_hw_dbs_enable(soc, &max_mac_rings);
10062 
10063 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
10064 			FL("Max_mac_rings %d "),
10065 			max_mac_rings);
10066 
10067 	if (enable) {
10068 		switch (event) {
10069 		case WDI_EVENT_RX_DESC:
10070 			if (pdev->monitor_vdev) {
10071 				/* Nothing needs to be done if monitor mode is
10072 				 * enabled
10073 				 */
10074 				return 0;
10075 			}
10076 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_FULL) {
10077 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_FULL;
10078 				htt_tlv_filter.mpdu_start = 1;
10079 				htt_tlv_filter.msdu_start = 1;
10080 				htt_tlv_filter.msdu_end = 1;
10081 				htt_tlv_filter.mpdu_end = 1;
10082 				htt_tlv_filter.packet_header = 1;
10083 				htt_tlv_filter.attention = 1;
10084 				htt_tlv_filter.ppdu_start = 1;
10085 				htt_tlv_filter.ppdu_end = 1;
10086 				htt_tlv_filter.ppdu_end_user_stats = 1;
10087 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
10088 				htt_tlv_filter.ppdu_end_status_done = 1;
10089 				htt_tlv_filter.enable_fp = 1;
10090 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
10091 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
10092 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
10093 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
10094 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
10095 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
10096 				htt_tlv_filter.offset_valid = false;
10097 
10098 				for (mac_id = 0; mac_id < max_mac_rings;
10099 								mac_id++) {
10100 					int mac_for_pdev =
10101 						dp_get_mac_id_for_pdev(mac_id,
10102 								pdev->pdev_id);
10103 
10104 					htt_h2t_rx_ring_cfg(soc->htt_handle,
10105 					 mac_for_pdev,
10106 					 pdev->rxdma_mon_status_ring[mac_id]
10107 					 .hal_srng,
10108 					 RXDMA_MONITOR_STATUS,
10109 					 RX_BUFFER_SIZE,
10110 					 &htt_tlv_filter);
10111 
10112 				}
10113 
10114 				if (soc->reap_timer_init)
10115 					qdf_timer_mod(&soc->mon_reap_timer,
10116 					DP_INTR_POLL_TIMER_MS);
10117 			}
10118 			break;
10119 
10120 		case WDI_EVENT_LITE_RX:
10121 			if (pdev->monitor_vdev) {
10122 				/* Nothing needs to be done if monitor mode is
10123 				 * enabled
10124 				 */
10125 				return 0;
10126 			}
10127 
10128 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_LITE) {
10129 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_LITE;
10130 
10131 				htt_tlv_filter.ppdu_start = 1;
10132 				htt_tlv_filter.ppdu_end = 1;
10133 				htt_tlv_filter.ppdu_end_user_stats = 1;
10134 				htt_tlv_filter.ppdu_end_user_stats_ext = 1;
10135 				htt_tlv_filter.ppdu_end_status_done = 1;
10136 				htt_tlv_filter.mpdu_start = 1;
10137 				htt_tlv_filter.enable_fp = 1;
10138 				htt_tlv_filter.fp_mgmt_filter = FILTER_MGMT_ALL;
10139 				htt_tlv_filter.fp_ctrl_filter = FILTER_CTRL_ALL;
10140 				htt_tlv_filter.fp_data_filter = FILTER_DATA_ALL;
10141 				htt_tlv_filter.mo_mgmt_filter = FILTER_MGMT_ALL;
10142 				htt_tlv_filter.mo_ctrl_filter = FILTER_CTRL_ALL;
10143 				htt_tlv_filter.mo_data_filter = FILTER_DATA_ALL;
10144 				htt_tlv_filter.offset_valid = false;
10145 
10146 				for (mac_id = 0; mac_id < max_mac_rings;
10147 								mac_id++) {
10148 					int mac_for_pdev =
10149 						dp_get_mac_id_for_pdev(mac_id,
10150 								pdev->pdev_id);
10151 
10152 					htt_h2t_rx_ring_cfg(soc->htt_handle,
10153 					mac_for_pdev,
10154 					pdev->rxdma_mon_status_ring[mac_id]
10155 					.hal_srng,
10156 					RXDMA_MONITOR_STATUS,
10157 					RX_BUFFER_SIZE_PKTLOG_LITE,
10158 					&htt_tlv_filter);
10159 				}
10160 
10161 				if (soc->reap_timer_init)
10162 					qdf_timer_mod(&soc->mon_reap_timer,
10163 					DP_INTR_POLL_TIMER_MS);
10164 			}
10165 			break;
10166 
10167 		case WDI_EVENT_LITE_T2H:
10168 			if (pdev->monitor_vdev) {
10169 				/* Nothing needs to be done if monitor mode is
10170 				 * enabled
10171 				 */
10172 				return 0;
10173 			}
10174 
10175 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
10176 				int mac_for_pdev = dp_get_mac_id_for_pdev(
10177 							mac_id,	pdev->pdev_id);
10178 
10179 				pdev->pktlog_ppdu_stats = true;
10180 				dp_h2t_cfg_stats_msg_send(pdev,
10181 					DP_PPDU_TXLITE_STATS_BITMASK_CFG,
10182 					mac_for_pdev);
10183 			}
10184 			break;
10185 
10186 		default:
10187 			/* Nothing needs to be done for other pktlog types */
10188 			break;
10189 		}
10190 	} else {
10191 		switch (event) {
10192 		case WDI_EVENT_RX_DESC:
10193 		case WDI_EVENT_LITE_RX:
10194 			if (pdev->monitor_vdev) {
10195 				/* Nothing needs to be done if monitor mode is
10196 				 * enabled
10197 				 */
10198 				return 0;
10199 			}
10200 			if (pdev->rx_pktlog_mode != DP_RX_PKTLOG_DISABLED) {
10201 				pdev->rx_pktlog_mode = DP_RX_PKTLOG_DISABLED;
10202 
10203 				for (mac_id = 0; mac_id < max_mac_rings;
10204 								mac_id++) {
10205 					int mac_for_pdev =
10206 						dp_get_mac_id_for_pdev(mac_id,
10207 								pdev->pdev_id);
10208 
10209 					htt_h2t_rx_ring_cfg(soc->htt_handle,
10210 					  mac_for_pdev,
10211 					  pdev->rxdma_mon_status_ring[mac_id]
10212 					  .hal_srng,
10213 					  RXDMA_MONITOR_STATUS,
10214 					  RX_BUFFER_SIZE,
10215 					  &htt_tlv_filter);
10216 				}
10217 
10218 				if (soc->reap_timer_init)
10219 					qdf_timer_stop(&soc->mon_reap_timer);
10220 			}
10221 			break;
10222 		case WDI_EVENT_LITE_T2H:
10223 			if (pdev->monitor_vdev) {
10224 				/* Nothing needs to be done if monitor mode is
10225 				 * enabled
10226 				 */
10227 				return 0;
10228 			}
10229 			/* To disable HTT_H2T_MSG_TYPE_PPDU_STATS_CFG in FW
10230 			 * passing value 0. Once these macros will define in htt
10231 			 * header file will use proper macros
10232 			*/
10233 			for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
10234 				int mac_for_pdev =
10235 						dp_get_mac_id_for_pdev(mac_id,
10236 								pdev->pdev_id);
10237 
10238 				pdev->pktlog_ppdu_stats = false;
10239 				if (!pdev->enhanced_stats_en && !pdev->tx_sniffer_enable && !pdev->mcopy_mode) {
10240 					dp_h2t_cfg_stats_msg_send(pdev, 0,
10241 								mac_for_pdev);
10242 				} else if (pdev->tx_sniffer_enable || pdev->mcopy_mode) {
10243 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_SNIFFER,
10244 								mac_for_pdev);
10245 				} else if (pdev->enhanced_stats_en) {
10246 					dp_h2t_cfg_stats_msg_send(pdev, DP_PPDU_STATS_CFG_ENH_STATS,
10247 								mac_for_pdev);
10248 				}
10249 			}
10250 
10251 			break;
10252 		default:
10253 			/* Nothing needs to be done for other pktlog types */
10254 			break;
10255 		}
10256 	}
10257 	return 0;
10258 }
10259 #endif
10260 
10261 /**
10262  * dp_bucket_index() - Return index from array
10263  *
10264  * @delay: delay measured
10265  * @array: array used to index corresponding delay
10266  *
10267  * Return: index
10268  */
10269 static uint8_t dp_bucket_index(uint32_t delay, uint16_t *array)
10270 {
10271 	uint8_t i = CDP_DELAY_BUCKET_0;
10272 
10273 	for (; i < CDP_DELAY_BUCKET_MAX; i++) {
10274 		if (delay >= array[i] && delay <= array[i + 1])
10275 			return i;
10276 	}
10277 
10278 	return (CDP_DELAY_BUCKET_MAX - 1);
10279 }
10280 
10281 /**
10282  * dp_fill_delay_buckets() - Fill delay statistics bucket for each
10283  *				type of delay
10284  *
10285  * @pdev: pdev handle
10286  * @delay: delay in ms
10287  * @tid: tid value
10288  * @mode: type of tx delay mode
10289  * @ring_id: ring number
10290  * Return: pointer to cdp_delay_stats structure
10291  */
10292 static struct cdp_delay_stats *
10293 dp_fill_delay_buckets(struct dp_pdev *pdev, uint32_t delay,
10294 		      uint8_t tid, uint8_t mode, uint8_t ring_id)
10295 {
10296 	uint8_t delay_index = 0;
10297 	struct cdp_tid_tx_stats *tstats =
10298 		&pdev->stats.tid_stats.tid_tx_stats[ring_id][tid];
10299 	struct cdp_tid_rx_stats *rstats =
10300 		&pdev->stats.tid_stats.tid_rx_stats[ring_id][tid];
10301 	/*
10302 	 * cdp_fw_to_hw_delay_range
10303 	 * Fw to hw delay ranges in milliseconds
10304 	 */
10305 	uint16_t cdp_fw_to_hw_delay[CDP_DELAY_BUCKET_MAX] = {
10306 		0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 250, 500};
10307 
10308 	/*
10309 	 * cdp_sw_enq_delay_range
10310 	 * Software enqueue delay ranges in milliseconds
10311 	 */
10312 	uint16_t cdp_sw_enq_delay[CDP_DELAY_BUCKET_MAX] = {
10313 		0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
10314 
10315 	/*
10316 	 * cdp_intfrm_delay_range
10317 	 * Interframe delay ranges in milliseconds
10318 	 */
10319 	uint16_t cdp_intfrm_delay[CDP_DELAY_BUCKET_MAX] = {
10320 		0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60};
10321 
10322 	/*
10323 	 * Update delay stats in proper bucket
10324 	 */
10325 	switch (mode) {
10326 	/* Software Enqueue delay ranges */
10327 	case CDP_DELAY_STATS_SW_ENQ:
10328 
10329 		delay_index = dp_bucket_index(delay, cdp_sw_enq_delay);
10330 		tstats->swq_delay.delay_bucket[delay_index]++;
10331 		return &tstats->swq_delay;
10332 
10333 	/* Tx Completion delay ranges */
10334 	case CDP_DELAY_STATS_FW_HW_TRANSMIT:
10335 
10336 		delay_index = dp_bucket_index(delay, cdp_fw_to_hw_delay);
10337 		tstats->hwtx_delay.delay_bucket[delay_index]++;
10338 		return &tstats->hwtx_delay;
10339 
10340 	/* Interframe tx delay ranges */
10341 	case CDP_DELAY_STATS_TX_INTERFRAME:
10342 
10343 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
10344 		tstats->intfrm_delay.delay_bucket[delay_index]++;
10345 		return &tstats->intfrm_delay;
10346 
10347 	/* Interframe rx delay ranges */
10348 	case CDP_DELAY_STATS_RX_INTERFRAME:
10349 
10350 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
10351 		rstats->intfrm_delay.delay_bucket[delay_index]++;
10352 		return &rstats->intfrm_delay;
10353 
10354 	/* Ring reap to indication to network stack */
10355 	case CDP_DELAY_STATS_REAP_STACK:
10356 
10357 		delay_index = dp_bucket_index(delay, cdp_intfrm_delay);
10358 		rstats->to_stack_delay.delay_bucket[delay_index]++;
10359 		return &rstats->to_stack_delay;
10360 	default:
10361 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
10362 			  "%s Incorrect delay mode: %d", __func__, mode);
10363 	}
10364 
10365 	return NULL;
10366 }
10367 
10368 /**
10369  * dp_update_delay_stats() - Update delay statistics in structure
10370  *				and fill min, max and avg delay
10371  *
10372  * @pdev: pdev handle
10373  * @delay: delay in ms
10374  * @tid: tid value
10375  * @mode: type of tx delay mode
10376  * @ring id: ring number
10377  * Return: none
10378  */
10379 void dp_update_delay_stats(struct dp_pdev *pdev, uint32_t delay,
10380 			   uint8_t tid, uint8_t mode, uint8_t ring_id)
10381 {
10382 	struct cdp_delay_stats *dstats = NULL;
10383 
10384 	/*
10385 	 * Delay ranges are different for different delay modes
10386 	 * Get the correct index to update delay bucket
10387 	 */
10388 	dstats = dp_fill_delay_buckets(pdev, delay, tid, mode, ring_id);
10389 	if (qdf_unlikely(!dstats))
10390 		return;
10391 
10392 	if (delay != 0) {
10393 		/*
10394 		 * Compute minimum,average and maximum
10395 		 * delay
10396 		 */
10397 		if (delay < dstats->min_delay)
10398 			dstats->min_delay = delay;
10399 
10400 		if (delay > dstats->max_delay)
10401 			dstats->max_delay = delay;
10402 
10403 		/*
10404 		 * Average over delay measured till now
10405 		 */
10406 		if (!dstats->avg_delay)
10407 			dstats->avg_delay = delay;
10408 		else
10409 			dstats->avg_delay = ((delay + dstats->avg_delay) / 2);
10410 	}
10411 }
10412